1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
68 neg_const_int (enum machine_mode mode, const_rtx i)
70 return gen_int_mode (- INTVAL (i), mode);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 unsigned HOST_WIDE_INT val;
82 if (GET_MODE_CLASS (mode) != MODE_INT)
85 width = GET_MODE_BITSIZE (mode);
89 if (width <= HOST_BITS_PER_WIDE_INT
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x)
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
138 switch (GET_CODE (x))
144 /* Handle float extensions of constant pool references. */
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
160 if (GET_MODE (x) == BLKmode)
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if ((offset != 0 || cmode != GET_MODE (x))
192 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
212 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
213 use their base addresses as equivalent. */
218 tree decl = MEM_EXPR (x);
219 enum machine_mode mode = GET_MODE (x);
220 HOST_WIDE_INT offset = 0;
222 switch (TREE_CODE (decl))
232 case ARRAY_RANGE_REF:
237 case VIEW_CONVERT_EXPR:
239 HOST_WIDE_INT bitsize, bitpos;
241 int unsignedp = 0, volatilep = 0;
243 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
244 &mode, &unsignedp, &volatilep, false);
245 if (bitsize != GET_MODE_BITSIZE (mode)
246 || (bitpos % BITS_PER_UNIT)
247 || (toffset && !host_integerp (toffset, 0)))
251 offset += bitpos / BITS_PER_UNIT;
253 offset += TREE_INT_CST_LOW (toffset);
260 && mode == GET_MODE (x)
261 && TREE_CODE (decl) == VAR_DECL
262 && (TREE_STATIC (decl)
263 || DECL_THREAD_LOCAL_P (decl))
264 && DECL_RTL_SET_P (decl)
265 && MEM_P (DECL_RTL (decl)))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
354 and simplify the result. If FN is non-NULL, call this callback on each
355 X, if it returns non-NULL, replace X with its return value and simplify the
359 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
360 rtx (*fn) (rtx, const_rtx, void *), void *data)
362 enum rtx_code code = GET_CODE (x);
363 enum machine_mode mode = GET_MODE (x);
364 enum machine_mode op_mode;
366 rtx op0, op1, op2, newx, op;
370 if (__builtin_expect (fn != NULL, 0))
372 newx = fn (x, old_rtx, data);
376 else if (rtx_equal_p (x, old_rtx))
377 return copy_rtx ((rtx) data);
379 switch (GET_RTX_CLASS (code))
383 op_mode = GET_MODE (op0);
384 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
385 if (op0 == XEXP (x, 0))
387 return simplify_gen_unary (code, mode, op0, op_mode);
391 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
392 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
393 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
395 return simplify_gen_binary (code, mode, op0, op1);
398 case RTX_COMM_COMPARE:
401 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
402 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
403 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
404 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
406 return simplify_gen_relational (code, mode, op_mode, op0, op1);
409 case RTX_BITFIELD_OPS:
411 op_mode = GET_MODE (op0);
412 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
413 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
414 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
415 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
417 if (op_mode == VOIDmode)
418 op_mode = GET_MODE (op0);
419 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
424 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
425 if (op0 == SUBREG_REG (x))
427 op0 = simplify_gen_subreg (GET_MODE (x), op0,
428 GET_MODE (SUBREG_REG (x)),
430 return op0 ? op0 : x;
437 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
438 if (op0 == XEXP (x, 0))
440 return replace_equiv_address_nv (x, op0);
442 else if (code == LO_SUM)
444 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
445 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 /* (lo_sum (high x) x) -> x */
448 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
453 return gen_rtx_LO_SUM (mode, op0, op1);
462 fmt = GET_RTX_FORMAT (code);
463 for (i = 0; fmt[i]; i++)
468 newvec = XVEC (newx, i);
469 for (j = 0; j < GET_NUM_ELEM (vec); j++)
471 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
473 if (op != RTVEC_ELT (vec, j))
477 newvec = shallow_copy_rtvec (vec);
479 newx = shallow_copy_rtx (x);
480 XVEC (newx, i) = newvec;
482 RTVEC_ELT (newvec, j) = op;
490 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
491 if (op != XEXP (x, i))
494 newx = shallow_copy_rtx (x);
503 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
504 resulting RTX. Return a new RTX which is as simplified as possible. */
507 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
509 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
512 /* Try to simplify a unary operation CODE whose output mode is to be
513 MODE with input operand OP whose mode was originally OP_MODE.
514 Return zero if no simplification can be made. */
516 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
517 rtx op, enum machine_mode op_mode)
521 trueop = avoid_constant_pool_reference (op);
523 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
527 return simplify_unary_operation_1 (code, mode, op);
530 /* Perform some simplifications we can do even if the operands
533 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
535 enum rtx_code reversed;
541 /* (not (not X)) == X. */
542 if (GET_CODE (op) == NOT)
545 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
546 comparison is all ones. */
547 if (COMPARISON_P (op)
548 && (mode == BImode || STORE_FLAG_VALUE == -1)
549 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
550 return simplify_gen_relational (reversed, mode, VOIDmode,
551 XEXP (op, 0), XEXP (op, 1));
553 /* (not (plus X -1)) can become (neg X). */
554 if (GET_CODE (op) == PLUS
555 && XEXP (op, 1) == constm1_rtx)
556 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
558 /* Similarly, (not (neg X)) is (plus X -1). */
559 if (GET_CODE (op) == NEG)
560 return plus_constant (XEXP (op, 0), -1);
562 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
563 if (GET_CODE (op) == XOR
564 && CONST_INT_P (XEXP (op, 1))
565 && (temp = simplify_unary_operation (NOT, mode,
566 XEXP (op, 1), mode)) != 0)
567 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
569 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
570 if (GET_CODE (op) == PLUS
571 && CONST_INT_P (XEXP (op, 1))
572 && mode_signbit_p (mode, XEXP (op, 1))
573 && (temp = simplify_unary_operation (NOT, mode,
574 XEXP (op, 1), mode)) != 0)
575 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
578 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
579 operands other than 1, but that is not valid. We could do a
580 similar simplification for (not (lshiftrt C X)) where C is
581 just the sign bit, but this doesn't seem common enough to
583 if (GET_CODE (op) == ASHIFT
584 && XEXP (op, 0) == const1_rtx)
586 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
587 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
590 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
591 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
592 so we can perform the above simplification. */
594 if (STORE_FLAG_VALUE == -1
595 && GET_CODE (op) == ASHIFTRT
596 && GET_CODE (XEXP (op, 1))
597 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
598 return simplify_gen_relational (GE, mode, VOIDmode,
599 XEXP (op, 0), const0_rtx);
602 if (GET_CODE (op) == SUBREG
603 && subreg_lowpart_p (op)
604 && (GET_MODE_SIZE (GET_MODE (op))
605 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
606 && GET_CODE (SUBREG_REG (op)) == ASHIFT
607 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
609 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
612 x = gen_rtx_ROTATE (inner_mode,
613 simplify_gen_unary (NOT, inner_mode, const1_rtx,
615 XEXP (SUBREG_REG (op), 1));
616 return rtl_hooks.gen_lowpart_no_emit (mode, x);
619 /* Apply De Morgan's laws to reduce number of patterns for machines
620 with negating logical insns (and-not, nand, etc.). If result has
621 only one NOT, put it first, since that is how the patterns are
624 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
626 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
627 enum machine_mode op_mode;
629 op_mode = GET_MODE (in1);
630 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
632 op_mode = GET_MODE (in2);
633 if (op_mode == VOIDmode)
635 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
637 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
640 in2 = in1; in1 = tem;
643 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
649 /* (neg (neg X)) == X. */
650 if (GET_CODE (op) == NEG)
653 /* (neg (plus X 1)) can become (not X). */
654 if (GET_CODE (op) == PLUS
655 && XEXP (op, 1) == const1_rtx)
656 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
658 /* Similarly, (neg (not X)) is (plus X 1). */
659 if (GET_CODE (op) == NOT)
660 return plus_constant (XEXP (op, 0), 1);
662 /* (neg (minus X Y)) can become (minus Y X). This transformation
663 isn't safe for modes with signed zeros, since if X and Y are
664 both +0, (minus Y X) is the same as (minus X Y). If the
665 rounding mode is towards +infinity (or -infinity) then the two
666 expressions will be rounded differently. */
667 if (GET_CODE (op) == MINUS
668 && !HONOR_SIGNED_ZEROS (mode)
669 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
670 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
672 if (GET_CODE (op) == PLUS
673 && !HONOR_SIGNED_ZEROS (mode)
674 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
676 /* (neg (plus A C)) is simplified to (minus -C A). */
677 if (CONST_INT_P (XEXP (op, 1))
678 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
680 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
682 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
685 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
686 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
687 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
690 /* (neg (mult A B)) becomes (mult A (neg B)).
691 This works even for floating-point values. */
692 if (GET_CODE (op) == MULT
693 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
695 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
696 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
699 /* NEG commutes with ASHIFT since it is multiplication. Only do
700 this if we can then eliminate the NEG (e.g., if the operand
702 if (GET_CODE (op) == ASHIFT)
704 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
706 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
709 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
710 C is equal to the width of MODE minus 1. */
711 if (GET_CODE (op) == ASHIFTRT
712 && CONST_INT_P (XEXP (op, 1))
713 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
714 return simplify_gen_binary (LSHIFTRT, mode,
715 XEXP (op, 0), XEXP (op, 1));
717 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
718 C is equal to the width of MODE minus 1. */
719 if (GET_CODE (op) == LSHIFTRT
720 && CONST_INT_P (XEXP (op, 1))
721 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
722 return simplify_gen_binary (ASHIFTRT, mode,
723 XEXP (op, 0), XEXP (op, 1));
725 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
726 if (GET_CODE (op) == XOR
727 && XEXP (op, 1) == const1_rtx
728 && nonzero_bits (XEXP (op, 0), mode) == 1)
729 return plus_constant (XEXP (op, 0), -1);
731 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
732 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
733 if (GET_CODE (op) == LT
734 && XEXP (op, 1) == const0_rtx
735 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
737 enum machine_mode inner = GET_MODE (XEXP (op, 0));
738 int isize = GET_MODE_BITSIZE (inner);
739 if (STORE_FLAG_VALUE == 1)
741 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
742 GEN_INT (isize - 1));
745 if (GET_MODE_BITSIZE (mode) > isize)
746 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
747 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
749 else if (STORE_FLAG_VALUE == -1)
751 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
752 GEN_INT (isize - 1));
755 if (GET_MODE_BITSIZE (mode) > isize)
756 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
757 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
763 /* We can't handle truncation to a partial integer mode here
764 because we don't know the real bitsize of the partial
766 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
769 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
770 if ((GET_CODE (op) == SIGN_EXTEND
771 || GET_CODE (op) == ZERO_EXTEND)
772 && GET_MODE (XEXP (op, 0)) == mode)
775 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
776 (OP:SI foo:SI) if OP is NEG or ABS. */
777 if ((GET_CODE (op) == ABS
778 || GET_CODE (op) == NEG)
779 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
780 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
781 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
782 return simplify_gen_unary (GET_CODE (op), mode,
783 XEXP (XEXP (op, 0), 0), mode);
785 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 if (GET_CODE (op) == SUBREG
788 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
789 && subreg_lowpart_p (op))
790 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
791 GET_MODE (XEXP (SUBREG_REG (op), 0)));
793 /* If we know that the value is already truncated, we can
794 replace the TRUNCATE with a SUBREG. Note that this is also
795 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
796 modes we just have to apply a different definition for
797 truncation. But don't do this for an (LSHIFTRT (MULT ...))
798 since this will cause problems with the umulXi3_highpart
800 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
801 GET_MODE_BITSIZE (GET_MODE (op)))
802 ? (num_sign_bit_copies (op, GET_MODE (op))
803 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
804 - GET_MODE_BITSIZE (mode)))
805 : truncated_to_mode (mode, op))
806 && ! (GET_CODE (op) == LSHIFTRT
807 && GET_CODE (XEXP (op, 0)) == MULT))
808 return rtl_hooks.gen_lowpart_no_emit (mode, op);
810 /* A truncate of a comparison can be replaced with a subreg if
811 STORE_FLAG_VALUE permits. This is like the previous test,
812 but it works even if the comparison is done in a mode larger
813 than HOST_BITS_PER_WIDE_INT. */
814 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
816 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
817 return rtl_hooks.gen_lowpart_no_emit (mode, op);
821 if (DECIMAL_FLOAT_MODE_P (mode))
824 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
825 if (GET_CODE (op) == FLOAT_EXTEND
826 && GET_MODE (XEXP (op, 0)) == mode)
829 /* (float_truncate:SF (float_truncate:DF foo:XF))
830 = (float_truncate:SF foo:XF).
831 This may eliminate double rounding, so it is unsafe.
833 (float_truncate:SF (float_extend:XF foo:DF))
834 = (float_truncate:SF foo:DF).
836 (float_truncate:DF (float_extend:XF foo:SF))
837 = (float_extend:SF foo:DF). */
838 if ((GET_CODE (op) == FLOAT_TRUNCATE
839 && flag_unsafe_math_optimizations)
840 || GET_CODE (op) == FLOAT_EXTEND)
841 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
843 > GET_MODE_SIZE (mode)
844 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
848 /* (float_truncate (float x)) is (float x) */
849 if (GET_CODE (op) == FLOAT
850 && (flag_unsafe_math_optimizations
851 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
852 && ((unsigned)significand_size (GET_MODE (op))
853 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
854 - num_sign_bit_copies (XEXP (op, 0),
855 GET_MODE (XEXP (op, 0))))))))
856 return simplify_gen_unary (FLOAT, mode,
858 GET_MODE (XEXP (op, 0)));
860 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
861 (OP:SF foo:SF) if OP is NEG or ABS. */
862 if ((GET_CODE (op) == ABS
863 || GET_CODE (op) == NEG)
864 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
865 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
866 return simplify_gen_unary (GET_CODE (op), mode,
867 XEXP (XEXP (op, 0), 0), mode);
869 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
870 is (float_truncate:SF x). */
871 if (GET_CODE (op) == SUBREG
872 && subreg_lowpart_p (op)
873 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
874 return SUBREG_REG (op);
878 if (DECIMAL_FLOAT_MODE_P (mode))
881 /* (float_extend (float_extend x)) is (float_extend x)
883 (float_extend (float x)) is (float x) assuming that double
884 rounding can't happen.
886 if (GET_CODE (op) == FLOAT_EXTEND
887 || (GET_CODE (op) == FLOAT
888 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
889 && ((unsigned)significand_size (GET_MODE (op))
890 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
891 - num_sign_bit_copies (XEXP (op, 0),
892 GET_MODE (XEXP (op, 0)))))))
893 return simplify_gen_unary (GET_CODE (op), mode,
895 GET_MODE (XEXP (op, 0)));
900 /* (abs (neg <foo>)) -> (abs <foo>) */
901 if (GET_CODE (op) == NEG)
902 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
903 GET_MODE (XEXP (op, 0)));
905 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
907 if (GET_MODE (op) == VOIDmode)
910 /* If operand is something known to be positive, ignore the ABS. */
911 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
912 || ((GET_MODE_BITSIZE (GET_MODE (op))
913 <= HOST_BITS_PER_WIDE_INT)
914 && ((nonzero_bits (op, GET_MODE (op))
915 & ((unsigned HOST_WIDE_INT) 1
916 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
920 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
921 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
922 return gen_rtx_NEG (mode, op);
927 /* (ffs (*_extend <X>)) = (ffs <X>) */
928 if (GET_CODE (op) == SIGN_EXTEND
929 || GET_CODE (op) == ZERO_EXTEND)
930 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
931 GET_MODE (XEXP (op, 0)));
935 switch (GET_CODE (op))
939 /* (popcount (zero_extend <X>)) = (popcount <X>) */
940 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
941 GET_MODE (XEXP (op, 0)));
945 /* Rotations don't affect popcount. */
946 if (!side_effects_p (XEXP (op, 1)))
947 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
948 GET_MODE (XEXP (op, 0)));
957 switch (GET_CODE (op))
963 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
964 GET_MODE (XEXP (op, 0)));
968 /* Rotations don't affect parity. */
969 if (!side_effects_p (XEXP (op, 1)))
970 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
971 GET_MODE (XEXP (op, 0)));
980 /* (bswap (bswap x)) -> x. */
981 if (GET_CODE (op) == BSWAP)
986 /* (float (sign_extend <X>)) = (float <X>). */
987 if (GET_CODE (op) == SIGN_EXTEND)
988 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
989 GET_MODE (XEXP (op, 0)));
993 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
994 becomes just the MINUS if its mode is MODE. This allows
995 folding switch statements on machines using casesi (such as
997 if (GET_CODE (op) == TRUNCATE
998 && GET_MODE (XEXP (op, 0)) == mode
999 && GET_CODE (XEXP (op, 0)) == MINUS
1000 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1001 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1002 return XEXP (op, 0);
1004 /* Check for a sign extension of a subreg of a promoted
1005 variable, where the promotion is sign-extended, and the
1006 target mode is the same as the variable's promotion. */
1007 if (GET_CODE (op) == SUBREG
1008 && SUBREG_PROMOTED_VAR_P (op)
1009 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1010 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1011 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1013 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1014 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1015 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1017 gcc_assert (GET_MODE_BITSIZE (mode)
1018 > GET_MODE_BITSIZE (GET_MODE (op)));
1019 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1020 GET_MODE (XEXP (op, 0)));
1023 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1024 is (sign_extend:M (subreg:O <X>)) if there is mode with
1025 GET_MODE_BITSIZE (N) - I bits.
1026 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1027 is similarly (zero_extend:M (subreg:O <X>)). */
1028 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1029 && GET_CODE (XEXP (op, 0)) == ASHIFT
1030 && CONST_INT_P (XEXP (op, 1))
1031 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1032 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1034 enum machine_mode tmode
1035 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1036 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1037 gcc_assert (GET_MODE_BITSIZE (mode)
1038 > GET_MODE_BITSIZE (GET_MODE (op)));
1039 if (tmode != BLKmode)
1042 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1043 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1044 ? SIGN_EXTEND : ZERO_EXTEND,
1045 mode, inner, tmode);
1049 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1050 /* As we do not know which address space the pointer is refering to,
1051 we can do this only if the target does not support different pointer
1052 or address modes depending on the address space. */
1053 if (target_default_pointer_address_modes_p ()
1054 && ! POINTERS_EXTEND_UNSIGNED
1055 && mode == Pmode && GET_MODE (op) == ptr_mode
1057 || (GET_CODE (op) == SUBREG
1058 && REG_P (SUBREG_REG (op))
1059 && REG_POINTER (SUBREG_REG (op))
1060 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1061 return convert_memory_address (Pmode, op);
1066 /* Check for a zero extension of a subreg of a promoted
1067 variable, where the promotion is zero-extended, and the
1068 target mode is the same as the variable's promotion. */
1069 if (GET_CODE (op) == SUBREG
1070 && SUBREG_PROMOTED_VAR_P (op)
1071 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1072 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1073 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1075 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1076 if (GET_CODE (op) == ZERO_EXTEND)
1077 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1078 GET_MODE (XEXP (op, 0)));
1080 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1081 is (zero_extend:M (subreg:O <X>)) if there is mode with
1082 GET_MODE_BITSIZE (N) - I bits. */
1083 if (GET_CODE (op) == LSHIFTRT
1084 && GET_CODE (XEXP (op, 0)) == ASHIFT
1085 && CONST_INT_P (XEXP (op, 1))
1086 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1087 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1089 enum machine_mode tmode
1090 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1091 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1092 if (tmode != BLKmode)
1095 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1096 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1100 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1101 /* As we do not know which address space the pointer is refering to,
1102 we can do this only if the target does not support different pointer
1103 or address modes depending on the address space. */
1104 if (target_default_pointer_address_modes_p ()
1105 && POINTERS_EXTEND_UNSIGNED > 0
1106 && mode == Pmode && GET_MODE (op) == ptr_mode
1108 || (GET_CODE (op) == SUBREG
1109 && REG_P (SUBREG_REG (op))
1110 && REG_POINTER (SUBREG_REG (op))
1111 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1112 return convert_memory_address (Pmode, op);
1123 /* Try to compute the value of a unary operation CODE whose output mode is to
1124 be MODE with input operand OP whose mode was originally OP_MODE.
1125 Return zero if the value cannot be computed. */
1127 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1128 rtx op, enum machine_mode op_mode)
1130 unsigned int width = GET_MODE_BITSIZE (mode);
1132 if (code == VEC_DUPLICATE)
1134 gcc_assert (VECTOR_MODE_P (mode));
1135 if (GET_MODE (op) != VOIDmode)
1137 if (!VECTOR_MODE_P (GET_MODE (op)))
1138 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1140 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1143 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1144 || GET_CODE (op) == CONST_VECTOR)
1146 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1147 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1148 rtvec v = rtvec_alloc (n_elts);
1151 if (GET_CODE (op) != CONST_VECTOR)
1152 for (i = 0; i < n_elts; i++)
1153 RTVEC_ELT (v, i) = op;
1156 enum machine_mode inmode = GET_MODE (op);
1157 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1158 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1160 gcc_assert (in_n_elts < n_elts);
1161 gcc_assert ((n_elts % in_n_elts) == 0);
1162 for (i = 0; i < n_elts; i++)
1163 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1165 return gen_rtx_CONST_VECTOR (mode, v);
1169 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1171 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1172 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1173 enum machine_mode opmode = GET_MODE (op);
1174 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1175 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1176 rtvec v = rtvec_alloc (n_elts);
1179 gcc_assert (op_n_elts == n_elts);
1180 for (i = 0; i < n_elts; i++)
1182 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1183 CONST_VECTOR_ELT (op, i),
1184 GET_MODE_INNER (opmode));
1187 RTVEC_ELT (v, i) = x;
1189 return gen_rtx_CONST_VECTOR (mode, v);
1192 /* The order of these tests is critical so that, for example, we don't
1193 check the wrong mode (input vs. output) for a conversion operation,
1194 such as FIX. At some point, this should be simplified. */
1196 if (code == FLOAT && GET_MODE (op) == VOIDmode
1197 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1199 HOST_WIDE_INT hv, lv;
1202 if (CONST_INT_P (op))
1203 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1205 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1207 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1208 d = real_value_truncate (mode, d);
1209 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1211 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1212 && (GET_CODE (op) == CONST_DOUBLE
1213 || CONST_INT_P (op)))
1215 HOST_WIDE_INT hv, lv;
1218 if (CONST_INT_P (op))
1219 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1221 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1223 if (op_mode == VOIDmode)
1225 /* We don't know how to interpret negative-looking numbers in
1226 this case, so don't try to fold those. */
1230 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1233 hv = 0, lv &= GET_MODE_MASK (op_mode);
1235 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1236 d = real_value_truncate (mode, d);
1237 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1240 if (CONST_INT_P (op)
1241 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1243 HOST_WIDE_INT arg0 = INTVAL (op);
1257 val = (arg0 >= 0 ? arg0 : - arg0);
1261 arg0 &= GET_MODE_MASK (mode);
1262 val = ffs_hwi (arg0);
1266 arg0 &= GET_MODE_MASK (mode);
1267 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1270 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1274 arg0 &= GET_MODE_MASK (mode);
1277 /* Even if the value at zero is undefined, we have to come
1278 up with some replacement. Seems good enough. */
1279 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1280 val = GET_MODE_BITSIZE (mode);
1283 val = ctz_hwi (arg0);
1287 arg0 &= GET_MODE_MASK (mode);
1290 val++, arg0 &= arg0 - 1;
1294 arg0 &= GET_MODE_MASK (mode);
1297 val++, arg0 &= arg0 - 1;
1306 for (s = 0; s < width; s += 8)
1308 unsigned int d = width - s - 8;
1309 unsigned HOST_WIDE_INT byte;
1310 byte = (arg0 >> s) & 0xff;
1321 /* When zero-extending a CONST_INT, we need to know its
1323 gcc_assert (op_mode != VOIDmode);
1324 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1326 /* If we were really extending the mode,
1327 we would have to distinguish between zero-extension
1328 and sign-extension. */
1329 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1332 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1333 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1334 << GET_MODE_BITSIZE (op_mode));
1340 if (op_mode == VOIDmode)
1342 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1344 /* If we were really extending the mode,
1345 we would have to distinguish between zero-extension
1346 and sign-extension. */
1347 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1350 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1353 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1354 << GET_MODE_BITSIZE (op_mode));
1355 if (val & ((unsigned HOST_WIDE_INT) 1
1356 << (GET_MODE_BITSIZE (op_mode) - 1)))
1358 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1366 case FLOAT_TRUNCATE:
1378 return gen_int_mode (val, mode);
1381 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1382 for a DImode operation on a CONST_INT. */
1383 else if (GET_MODE (op) == VOIDmode
1384 && width <= HOST_BITS_PER_WIDE_INT * 2
1385 && (GET_CODE (op) == CONST_DOUBLE
1386 || CONST_INT_P (op)))
1388 unsigned HOST_WIDE_INT l1, lv;
1389 HOST_WIDE_INT h1, hv;
1391 if (GET_CODE (op) == CONST_DOUBLE)
1392 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1394 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1404 neg_double (l1, h1, &lv, &hv);
1409 neg_double (l1, h1, &lv, &hv);
1419 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1427 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1428 - HOST_BITS_PER_WIDE_INT;
1430 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1431 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1432 lv = GET_MODE_BITSIZE (mode);
1440 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1441 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1442 lv = GET_MODE_BITSIZE (mode);
1470 for (s = 0; s < width; s += 8)
1472 unsigned int d = width - s - 8;
1473 unsigned HOST_WIDE_INT byte;
1475 if (s < HOST_BITS_PER_WIDE_INT)
1476 byte = (l1 >> s) & 0xff;
1478 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1480 if (d < HOST_BITS_PER_WIDE_INT)
1483 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1489 /* This is just a change-of-mode, so do nothing. */
1494 gcc_assert (op_mode != VOIDmode);
1496 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1500 lv = l1 & GET_MODE_MASK (op_mode);
1504 if (op_mode == VOIDmode
1505 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1509 lv = l1 & GET_MODE_MASK (op_mode);
1510 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1511 && (lv & ((unsigned HOST_WIDE_INT) 1
1512 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1513 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1515 hv = HWI_SIGN_EXTEND (lv);
1526 return immed_double_const (lv, hv, mode);
1529 else if (GET_CODE (op) == CONST_DOUBLE
1530 && SCALAR_FLOAT_MODE_P (mode)
1531 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1533 REAL_VALUE_TYPE d, t;
1534 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1539 if (HONOR_SNANS (mode) && real_isnan (&d))
1541 real_sqrt (&t, mode, &d);
1545 d = real_value_abs (&d);
1548 d = real_value_negate (&d);
1550 case FLOAT_TRUNCATE:
1551 d = real_value_truncate (mode, d);
1554 /* All this does is change the mode, unless changing
1556 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1557 real_convert (&d, mode, &d);
1560 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1567 real_to_target (tmp, &d, GET_MODE (op));
1568 for (i = 0; i < 4; i++)
1570 real_from_target (&d, tmp, mode);
1576 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1579 else if (GET_CODE (op) == CONST_DOUBLE
1580 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1581 && GET_MODE_CLASS (mode) == MODE_INT
1582 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1584 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1585 operators are intentionally left unspecified (to ease implementation
1586 by target backends), for consistency, this routine implements the
1587 same semantics for constant folding as used by the middle-end. */
1589 /* This was formerly used only for non-IEEE float.
1590 eggert@twinsun.com says it is safe for IEEE also. */
1591 HOST_WIDE_INT xh, xl, th, tl;
1592 REAL_VALUE_TYPE x, t;
1593 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1597 if (REAL_VALUE_ISNAN (x))
1600 /* Test against the signed upper bound. */
1601 if (width > HOST_BITS_PER_WIDE_INT)
1603 th = ((unsigned HOST_WIDE_INT) 1
1604 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1610 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1612 real_from_integer (&t, VOIDmode, tl, th, 0);
1613 if (REAL_VALUES_LESS (t, x))
1620 /* Test against the signed lower bound. */
1621 if (width > HOST_BITS_PER_WIDE_INT)
1623 th = (unsigned HOST_WIDE_INT) (-1)
1624 << (width - HOST_BITS_PER_WIDE_INT - 1);
1630 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1632 real_from_integer (&t, VOIDmode, tl, th, 0);
1633 if (REAL_VALUES_LESS (x, t))
1639 REAL_VALUE_TO_INT (&xl, &xh, x);
1643 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1646 /* Test against the unsigned upper bound. */
1647 if (width == 2*HOST_BITS_PER_WIDE_INT)
1652 else if (width >= HOST_BITS_PER_WIDE_INT)
1654 th = ((unsigned HOST_WIDE_INT) 1
1655 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1661 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1663 real_from_integer (&t, VOIDmode, tl, th, 1);
1664 if (REAL_VALUES_LESS (t, x))
1671 REAL_VALUE_TO_INT (&xl, &xh, x);
1677 return immed_double_const (xl, xh, mode);
1683 /* Subroutine of simplify_binary_operation to simplify a commutative,
1684 associative binary operation CODE with result mode MODE, operating
1685 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1686 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1687 canonicalization is possible. */
1690 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1695 /* Linearize the operator to the left. */
1696 if (GET_CODE (op1) == code)
1698 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1699 if (GET_CODE (op0) == code)
1701 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1702 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1705 /* "a op (b op c)" becomes "(b op c) op a". */
1706 if (! swap_commutative_operands_p (op1, op0))
1707 return simplify_gen_binary (code, mode, op1, op0);
1714 if (GET_CODE (op0) == code)
1716 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1717 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1719 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1720 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1723 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1724 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1726 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1728 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1729 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1731 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1738 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1739 and OP1. Return 0 if no simplification is possible.
1741 Don't use this for relational operations such as EQ or LT.
1742 Use simplify_relational_operation instead. */
1744 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1747 rtx trueop0, trueop1;
1750 /* Relational operations don't work here. We must know the mode
1751 of the operands in order to do the comparison correctly.
1752 Assuming a full word can give incorrect results.
1753 Consider comparing 128 with -128 in QImode. */
1754 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1755 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1757 /* Make sure the constant is second. */
1758 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1759 && swap_commutative_operands_p (op0, op1))
1761 tem = op0, op0 = op1, op1 = tem;
1764 trueop0 = avoid_constant_pool_reference (op0);
1765 trueop1 = avoid_constant_pool_reference (op1);
1767 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1770 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1773 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1774 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1775 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1776 actual constants. */
1779 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1780 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1782 rtx tem, reversed, opleft, opright;
1784 unsigned int width = GET_MODE_BITSIZE (mode);
1786 /* Even if we can't compute a constant result,
1787 there are some cases worth simplifying. */
1792 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1793 when x is NaN, infinite, or finite and nonzero. They aren't
1794 when x is -0 and the rounding mode is not towards -infinity,
1795 since (-0) + 0 is then 0. */
1796 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1799 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1800 transformations are safe even for IEEE. */
1801 if (GET_CODE (op0) == NEG)
1802 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1803 else if (GET_CODE (op1) == NEG)
1804 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1806 /* (~a) + 1 -> -a */
1807 if (INTEGRAL_MODE_P (mode)
1808 && GET_CODE (op0) == NOT
1809 && trueop1 == const1_rtx)
1810 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1812 /* Handle both-operands-constant cases. We can only add
1813 CONST_INTs to constants since the sum of relocatable symbols
1814 can't be handled by most assemblers. Don't add CONST_INT
1815 to CONST_INT since overflow won't be computed properly if wider
1816 than HOST_BITS_PER_WIDE_INT. */
1818 if ((GET_CODE (op0) == CONST
1819 || GET_CODE (op0) == SYMBOL_REF
1820 || GET_CODE (op0) == LABEL_REF)
1821 && CONST_INT_P (op1))
1822 return plus_constant (op0, INTVAL (op1));
1823 else if ((GET_CODE (op1) == CONST
1824 || GET_CODE (op1) == SYMBOL_REF
1825 || GET_CODE (op1) == LABEL_REF)
1826 && CONST_INT_P (op0))
1827 return plus_constant (op1, INTVAL (op0));
1829 /* See if this is something like X * C - X or vice versa or
1830 if the multiplication is written as a shift. If so, we can
1831 distribute and make a new multiply, shift, or maybe just
1832 have X (if C is 2 in the example above). But don't make
1833 something more expensive than we had before. */
1835 if (SCALAR_INT_MODE_P (mode))
1837 double_int coeff0, coeff1;
1838 rtx lhs = op0, rhs = op1;
1840 coeff0 = double_int_one;
1841 coeff1 = double_int_one;
1843 if (GET_CODE (lhs) == NEG)
1845 coeff0 = double_int_minus_one;
1846 lhs = XEXP (lhs, 0);
1848 else if (GET_CODE (lhs) == MULT
1849 && CONST_INT_P (XEXP (lhs, 1)))
1851 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1852 lhs = XEXP (lhs, 0);
1854 else if (GET_CODE (lhs) == ASHIFT
1855 && CONST_INT_P (XEXP (lhs, 1))
1856 && INTVAL (XEXP (lhs, 1)) >= 0
1857 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1859 coeff0 = double_int_setbit (double_int_zero,
1860 INTVAL (XEXP (lhs, 1)));
1861 lhs = XEXP (lhs, 0);
1864 if (GET_CODE (rhs) == NEG)
1866 coeff1 = double_int_minus_one;
1867 rhs = XEXP (rhs, 0);
1869 else if (GET_CODE (rhs) == MULT
1870 && CONST_INT_P (XEXP (rhs, 1)))
1872 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1873 rhs = XEXP (rhs, 0);
1875 else if (GET_CODE (rhs) == ASHIFT
1876 && CONST_INT_P (XEXP (rhs, 1))
1877 && INTVAL (XEXP (rhs, 1)) >= 0
1878 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1880 coeff1 = double_int_setbit (double_int_zero,
1881 INTVAL (XEXP (rhs, 1)));
1882 rhs = XEXP (rhs, 0);
1885 if (rtx_equal_p (lhs, rhs))
1887 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1890 bool speed = optimize_function_for_speed_p (cfun);
1892 val = double_int_add (coeff0, coeff1);
1893 coeff = immed_double_int_const (val, mode);
1895 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1896 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1901 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1902 if ((CONST_INT_P (op1)
1903 || GET_CODE (op1) == CONST_DOUBLE)
1904 && GET_CODE (op0) == XOR
1905 && (CONST_INT_P (XEXP (op0, 1))
1906 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1907 && mode_signbit_p (mode, op1))
1908 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1909 simplify_gen_binary (XOR, mode, op1,
1912 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1913 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1914 && GET_CODE (op0) == MULT
1915 && GET_CODE (XEXP (op0, 0)) == NEG)
1919 in1 = XEXP (XEXP (op0, 0), 0);
1920 in2 = XEXP (op0, 1);
1921 return simplify_gen_binary (MINUS, mode, op1,
1922 simplify_gen_binary (MULT, mode,
1926 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1927 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1929 if (COMPARISON_P (op0)
1930 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1931 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1932 && (reversed = reversed_comparison (op0, mode)))
1934 simplify_gen_unary (NEG, mode, reversed, mode);
1936 /* If one of the operands is a PLUS or a MINUS, see if we can
1937 simplify this by the associative law.
1938 Don't use the associative law for floating point.
1939 The inaccuracy makes it nonassociative,
1940 and subtle programs can break if operations are associated. */
1942 if (INTEGRAL_MODE_P (mode)
1943 && (plus_minus_operand_p (op0)
1944 || plus_minus_operand_p (op1))
1945 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1948 /* Reassociate floating point addition only when the user
1949 specifies associative math operations. */
1950 if (FLOAT_MODE_P (mode)
1951 && flag_associative_math)
1953 tem = simplify_associative_operation (code, mode, op0, op1);
1960 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1961 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1962 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1963 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1965 rtx xop00 = XEXP (op0, 0);
1966 rtx xop10 = XEXP (op1, 0);
1969 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1971 if (REG_P (xop00) && REG_P (xop10)
1972 && GET_MODE (xop00) == GET_MODE (xop10)
1973 && REGNO (xop00) == REGNO (xop10)
1974 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1975 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1982 /* We can't assume x-x is 0 even with non-IEEE floating point,
1983 but since it is zero except in very strange circumstances, we
1984 will treat it as zero with -ffinite-math-only. */
1985 if (rtx_equal_p (trueop0, trueop1)
1986 && ! side_effects_p (op0)
1987 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1988 return CONST0_RTX (mode);
1990 /* Change subtraction from zero into negation. (0 - x) is the
1991 same as -x when x is NaN, infinite, or finite and nonzero.
1992 But if the mode has signed zeros, and does not round towards
1993 -infinity, then 0 - 0 is 0, not -0. */
1994 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1995 return simplify_gen_unary (NEG, mode, op1, mode);
1997 /* (-1 - a) is ~a. */
1998 if (trueop0 == constm1_rtx)
1999 return simplify_gen_unary (NOT, mode, op1, mode);
2001 /* Subtracting 0 has no effect unless the mode has signed zeros
2002 and supports rounding towards -infinity. In such a case,
2004 if (!(HONOR_SIGNED_ZEROS (mode)
2005 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2006 && trueop1 == CONST0_RTX (mode))
2009 /* See if this is something like X * C - X or vice versa or
2010 if the multiplication is written as a shift. If so, we can
2011 distribute and make a new multiply, shift, or maybe just
2012 have X (if C is 2 in the example above). But don't make
2013 something more expensive than we had before. */
2015 if (SCALAR_INT_MODE_P (mode))
2017 double_int coeff0, negcoeff1;
2018 rtx lhs = op0, rhs = op1;
2020 coeff0 = double_int_one;
2021 negcoeff1 = double_int_minus_one;
2023 if (GET_CODE (lhs) == NEG)
2025 coeff0 = double_int_minus_one;
2026 lhs = XEXP (lhs, 0);
2028 else if (GET_CODE (lhs) == MULT
2029 && CONST_INT_P (XEXP (lhs, 1)))
2031 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2032 lhs = XEXP (lhs, 0);
2034 else if (GET_CODE (lhs) == ASHIFT
2035 && CONST_INT_P (XEXP (lhs, 1))
2036 && INTVAL (XEXP (lhs, 1)) >= 0
2037 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2039 coeff0 = double_int_setbit (double_int_zero,
2040 INTVAL (XEXP (lhs, 1)));
2041 lhs = XEXP (lhs, 0);
2044 if (GET_CODE (rhs) == NEG)
2046 negcoeff1 = double_int_one;
2047 rhs = XEXP (rhs, 0);
2049 else if (GET_CODE (rhs) == MULT
2050 && CONST_INT_P (XEXP (rhs, 1)))
2052 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2053 rhs = XEXP (rhs, 0);
2055 else if (GET_CODE (rhs) == ASHIFT
2056 && CONST_INT_P (XEXP (rhs, 1))
2057 && INTVAL (XEXP (rhs, 1)) >= 0
2058 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2060 negcoeff1 = double_int_setbit (double_int_zero,
2061 INTVAL (XEXP (rhs, 1)));
2062 negcoeff1 = double_int_neg (negcoeff1);
2063 rhs = XEXP (rhs, 0);
2066 if (rtx_equal_p (lhs, rhs))
2068 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2071 bool speed = optimize_function_for_speed_p (cfun);
2073 val = double_int_add (coeff0, negcoeff1);
2074 coeff = immed_double_int_const (val, mode);
2076 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2077 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2082 /* (a - (-b)) -> (a + b). True even for IEEE. */
2083 if (GET_CODE (op1) == NEG)
2084 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2086 /* (-x - c) may be simplified as (-c - x). */
2087 if (GET_CODE (op0) == NEG
2088 && (CONST_INT_P (op1)
2089 || GET_CODE (op1) == CONST_DOUBLE))
2091 tem = simplify_unary_operation (NEG, mode, op1, mode);
2093 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2096 /* Don't let a relocatable value get a negative coeff. */
2097 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2098 return simplify_gen_binary (PLUS, mode,
2100 neg_const_int (mode, op1));
2102 /* (x - (x & y)) -> (x & ~y) */
2103 if (GET_CODE (op1) == AND)
2105 if (rtx_equal_p (op0, XEXP (op1, 0)))
2107 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2108 GET_MODE (XEXP (op1, 1)));
2109 return simplify_gen_binary (AND, mode, op0, tem);
2111 if (rtx_equal_p (op0, XEXP (op1, 1)))
2113 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2114 GET_MODE (XEXP (op1, 0)));
2115 return simplify_gen_binary (AND, mode, op0, tem);
2119 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2120 by reversing the comparison code if valid. */
2121 if (STORE_FLAG_VALUE == 1
2122 && trueop0 == const1_rtx
2123 && COMPARISON_P (op1)
2124 && (reversed = reversed_comparison (op1, mode)))
2127 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2128 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2129 && GET_CODE (op1) == MULT
2130 && GET_CODE (XEXP (op1, 0)) == NEG)
2134 in1 = XEXP (XEXP (op1, 0), 0);
2135 in2 = XEXP (op1, 1);
2136 return simplify_gen_binary (PLUS, mode,
2137 simplify_gen_binary (MULT, mode,
2142 /* Canonicalize (minus (neg A) (mult B C)) to
2143 (minus (mult (neg B) C) A). */
2144 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2145 && GET_CODE (op1) == MULT
2146 && GET_CODE (op0) == NEG)
2150 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2151 in2 = XEXP (op1, 1);
2152 return simplify_gen_binary (MINUS, mode,
2153 simplify_gen_binary (MULT, mode,
2158 /* If one of the operands is a PLUS or a MINUS, see if we can
2159 simplify this by the associative law. This will, for example,
2160 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2161 Don't use the associative law for floating point.
2162 The inaccuracy makes it nonassociative,
2163 and subtle programs can break if operations are associated. */
2165 if (INTEGRAL_MODE_P (mode)
2166 && (plus_minus_operand_p (op0)
2167 || plus_minus_operand_p (op1))
2168 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2173 if (trueop1 == constm1_rtx)
2174 return simplify_gen_unary (NEG, mode, op0, mode);
2176 if (GET_CODE (op0) == NEG)
2178 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2179 /* If op1 is a MULT as well and simplify_unary_operation
2180 just moved the NEG to the second operand, simplify_gen_binary
2181 below could through simplify_associative_operation move
2182 the NEG around again and recurse endlessly. */
2184 && GET_CODE (op1) == MULT
2185 && GET_CODE (temp) == MULT
2186 && XEXP (op1, 0) == XEXP (temp, 0)
2187 && GET_CODE (XEXP (temp, 1)) == NEG
2188 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2191 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2193 if (GET_CODE (op1) == NEG)
2195 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2196 /* If op0 is a MULT as well and simplify_unary_operation
2197 just moved the NEG to the second operand, simplify_gen_binary
2198 below could through simplify_associative_operation move
2199 the NEG around again and recurse endlessly. */
2201 && GET_CODE (op0) == MULT
2202 && GET_CODE (temp) == MULT
2203 && XEXP (op0, 0) == XEXP (temp, 0)
2204 && GET_CODE (XEXP (temp, 1)) == NEG
2205 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2208 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2211 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2212 x is NaN, since x * 0 is then also NaN. Nor is it valid
2213 when the mode has signed zeros, since multiplying a negative
2214 number by 0 will give -0, not 0. */
2215 if (!HONOR_NANS (mode)
2216 && !HONOR_SIGNED_ZEROS (mode)
2217 && trueop1 == CONST0_RTX (mode)
2218 && ! side_effects_p (op0))
2221 /* In IEEE floating point, x*1 is not equivalent to x for
2223 if (!HONOR_SNANS (mode)
2224 && trueop1 == CONST1_RTX (mode))
2227 /* Convert multiply by constant power of two into shift unless
2228 we are still generating RTL. This test is a kludge. */
2229 if (CONST_INT_P (trueop1)
2230 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2231 /* If the mode is larger than the host word size, and the
2232 uppermost bit is set, then this isn't a power of two due
2233 to implicit sign extension. */
2234 && (width <= HOST_BITS_PER_WIDE_INT
2235 || val != HOST_BITS_PER_WIDE_INT - 1))
2236 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2238 /* Likewise for multipliers wider than a word. */
2239 if (GET_CODE (trueop1) == CONST_DOUBLE
2240 && (GET_MODE (trueop1) == VOIDmode
2241 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2242 && GET_MODE (op0) == mode
2243 && CONST_DOUBLE_LOW (trueop1) == 0
2244 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2245 return simplify_gen_binary (ASHIFT, mode, op0,
2246 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2248 /* x*2 is x+x and x*(-1) is -x */
2249 if (GET_CODE (trueop1) == CONST_DOUBLE
2250 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2251 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2252 && GET_MODE (op0) == mode)
2255 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2257 if (REAL_VALUES_EQUAL (d, dconst2))
2258 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2260 if (!HONOR_SNANS (mode)
2261 && REAL_VALUES_EQUAL (d, dconstm1))
2262 return simplify_gen_unary (NEG, mode, op0, mode);
2265 /* Optimize -x * -x as x * x. */
2266 if (FLOAT_MODE_P (mode)
2267 && GET_CODE (op0) == NEG
2268 && GET_CODE (op1) == NEG
2269 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2270 && !side_effects_p (XEXP (op0, 0)))
2271 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2273 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2274 if (SCALAR_FLOAT_MODE_P (mode)
2275 && GET_CODE (op0) == ABS
2276 && GET_CODE (op1) == ABS
2277 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2278 && !side_effects_p (XEXP (op0, 0)))
2279 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2281 /* Reassociate multiplication, but for floating point MULTs
2282 only when the user specifies unsafe math optimizations. */
2283 if (! FLOAT_MODE_P (mode)
2284 || flag_unsafe_math_optimizations)
2286 tem = simplify_associative_operation (code, mode, op0, op1);
2293 if (trueop1 == CONST0_RTX (mode))
2295 if (CONST_INT_P (trueop1)
2296 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2297 == GET_MODE_MASK (mode)))
2299 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2301 /* A | (~A) -> -1 */
2302 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2303 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2304 && ! side_effects_p (op0)
2305 && SCALAR_INT_MODE_P (mode))
2308 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2309 if (CONST_INT_P (op1)
2310 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2311 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2314 /* Canonicalize (X & C1) | C2. */
2315 if (GET_CODE (op0) == AND
2316 && CONST_INT_P (trueop1)
2317 && CONST_INT_P (XEXP (op0, 1)))
2319 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2320 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2321 HOST_WIDE_INT c2 = INTVAL (trueop1);
2323 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2325 && !side_effects_p (XEXP (op0, 0)))
2328 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2329 if (((c1|c2) & mask) == mask)
2330 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2332 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2333 if (((c1 & ~c2) & mask) != (c1 & mask))
2335 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2336 gen_int_mode (c1 & ~c2, mode));
2337 return simplify_gen_binary (IOR, mode, tem, op1);
2341 /* Convert (A & B) | A to A. */
2342 if (GET_CODE (op0) == AND
2343 && (rtx_equal_p (XEXP (op0, 0), op1)
2344 || rtx_equal_p (XEXP (op0, 1), op1))
2345 && ! side_effects_p (XEXP (op0, 0))
2346 && ! side_effects_p (XEXP (op0, 1)))
2349 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2350 mode size to (rotate A CX). */
2352 if (GET_CODE (op1) == ASHIFT
2353 || GET_CODE (op1) == SUBREG)
2364 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2365 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2366 && CONST_INT_P (XEXP (opleft, 1))
2367 && CONST_INT_P (XEXP (opright, 1))
2368 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2369 == GET_MODE_BITSIZE (mode)))
2370 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2372 /* Same, but for ashift that has been "simplified" to a wider mode
2373 by simplify_shift_const. */
2375 if (GET_CODE (opleft) == SUBREG
2376 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2377 && GET_CODE (opright) == LSHIFTRT
2378 && GET_CODE (XEXP (opright, 0)) == SUBREG
2379 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2380 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2381 && (GET_MODE_SIZE (GET_MODE (opleft))
2382 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2383 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2384 SUBREG_REG (XEXP (opright, 0)))
2385 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2386 && CONST_INT_P (XEXP (opright, 1))
2387 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2388 == GET_MODE_BITSIZE (mode)))
2389 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2390 XEXP (SUBREG_REG (opleft), 1));
2392 /* If we have (ior (and (X C1) C2)), simplify this by making
2393 C1 as small as possible if C1 actually changes. */
2394 if (CONST_INT_P (op1)
2395 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2396 || INTVAL (op1) > 0)
2397 && GET_CODE (op0) == AND
2398 && CONST_INT_P (XEXP (op0, 1))
2399 && CONST_INT_P (op1)
2400 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2401 return simplify_gen_binary (IOR, mode,
2403 (AND, mode, XEXP (op0, 0),
2404 GEN_INT (UINTVAL (XEXP (op0, 1))
2408 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2409 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2410 the PLUS does not affect any of the bits in OP1: then we can do
2411 the IOR as a PLUS and we can associate. This is valid if OP1
2412 can be safely shifted left C bits. */
2413 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2414 && GET_CODE (XEXP (op0, 0)) == PLUS
2415 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2416 && CONST_INT_P (XEXP (op0, 1))
2417 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2419 int count = INTVAL (XEXP (op0, 1));
2420 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2422 if (mask >> count == INTVAL (trueop1)
2423 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2424 return simplify_gen_binary (ASHIFTRT, mode,
2425 plus_constant (XEXP (op0, 0), mask),
2429 tem = simplify_associative_operation (code, mode, op0, op1);
2435 if (trueop1 == CONST0_RTX (mode))
2437 if (CONST_INT_P (trueop1)
2438 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2439 == GET_MODE_MASK (mode)))
2440 return simplify_gen_unary (NOT, mode, op0, mode);
2441 if (rtx_equal_p (trueop0, trueop1)
2442 && ! side_effects_p (op0)
2443 && GET_MODE_CLASS (mode) != MODE_CC)
2444 return CONST0_RTX (mode);
2446 /* Canonicalize XOR of the most significant bit to PLUS. */
2447 if ((CONST_INT_P (op1)
2448 || GET_CODE (op1) == CONST_DOUBLE)
2449 && mode_signbit_p (mode, op1))
2450 return simplify_gen_binary (PLUS, mode, op0, op1);
2451 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2452 if ((CONST_INT_P (op1)
2453 || GET_CODE (op1) == CONST_DOUBLE)
2454 && GET_CODE (op0) == PLUS
2455 && (CONST_INT_P (XEXP (op0, 1))
2456 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2457 && mode_signbit_p (mode, XEXP (op0, 1)))
2458 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2459 simplify_gen_binary (XOR, mode, op1,
2462 /* If we are XORing two things that have no bits in common,
2463 convert them into an IOR. This helps to detect rotation encoded
2464 using those methods and possibly other simplifications. */
2466 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2467 && (nonzero_bits (op0, mode)
2468 & nonzero_bits (op1, mode)) == 0)
2469 return (simplify_gen_binary (IOR, mode, op0, op1));
2471 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2472 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2475 int num_negated = 0;
2477 if (GET_CODE (op0) == NOT)
2478 num_negated++, op0 = XEXP (op0, 0);
2479 if (GET_CODE (op1) == NOT)
2480 num_negated++, op1 = XEXP (op1, 0);
2482 if (num_negated == 2)
2483 return simplify_gen_binary (XOR, mode, op0, op1);
2484 else if (num_negated == 1)
2485 return simplify_gen_unary (NOT, mode,
2486 simplify_gen_binary (XOR, mode, op0, op1),
2490 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2491 correspond to a machine insn or result in further simplifications
2492 if B is a constant. */
2494 if (GET_CODE (op0) == AND
2495 && rtx_equal_p (XEXP (op0, 1), op1)
2496 && ! side_effects_p (op1))
2497 return simplify_gen_binary (AND, mode,
2498 simplify_gen_unary (NOT, mode,
2499 XEXP (op0, 0), mode),
2502 else if (GET_CODE (op0) == AND
2503 && rtx_equal_p (XEXP (op0, 0), op1)
2504 && ! side_effects_p (op1))
2505 return simplify_gen_binary (AND, mode,
2506 simplify_gen_unary (NOT, mode,
2507 XEXP (op0, 1), mode),
2510 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2511 comparison if STORE_FLAG_VALUE is 1. */
2512 if (STORE_FLAG_VALUE == 1
2513 && trueop1 == const1_rtx
2514 && COMPARISON_P (op0)
2515 && (reversed = reversed_comparison (op0, mode)))
2518 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2519 is (lt foo (const_int 0)), so we can perform the above
2520 simplification if STORE_FLAG_VALUE is 1. */
2522 if (STORE_FLAG_VALUE == 1
2523 && trueop1 == const1_rtx
2524 && GET_CODE (op0) == LSHIFTRT
2525 && CONST_INT_P (XEXP (op0, 1))
2526 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2527 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2529 /* (xor (comparison foo bar) (const_int sign-bit))
2530 when STORE_FLAG_VALUE is the sign bit. */
2531 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2532 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2533 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2534 && trueop1 == const_true_rtx
2535 && COMPARISON_P (op0)
2536 && (reversed = reversed_comparison (op0, mode)))
2539 tem = simplify_associative_operation (code, mode, op0, op1);
2545 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2547 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2549 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2550 HOST_WIDE_INT nzop1;
2551 if (CONST_INT_P (trueop1))
2553 HOST_WIDE_INT val1 = INTVAL (trueop1);
2554 /* If we are turning off bits already known off in OP0, we need
2556 if ((nzop0 & ~val1) == 0)
2559 nzop1 = nonzero_bits (trueop1, mode);
2560 /* If we are clearing all the nonzero bits, the result is zero. */
2561 if ((nzop1 & nzop0) == 0
2562 && !side_effects_p (op0) && !side_effects_p (op1))
2563 return CONST0_RTX (mode);
2565 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2566 && GET_MODE_CLASS (mode) != MODE_CC)
2569 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2570 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2571 && ! side_effects_p (op0)
2572 && GET_MODE_CLASS (mode) != MODE_CC)
2573 return CONST0_RTX (mode);
2575 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2576 there are no nonzero bits of C outside of X's mode. */
2577 if ((GET_CODE (op0) == SIGN_EXTEND
2578 || GET_CODE (op0) == ZERO_EXTEND)
2579 && CONST_INT_P (trueop1)
2580 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2581 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2582 & UINTVAL (trueop1)) == 0)
2584 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2585 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2586 gen_int_mode (INTVAL (trueop1),
2588 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2591 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2592 we might be able to further simplify the AND with X and potentially
2593 remove the truncation altogether. */
2594 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2596 rtx x = XEXP (op0, 0);
2597 enum machine_mode xmode = GET_MODE (x);
2598 tem = simplify_gen_binary (AND, xmode, x,
2599 gen_int_mode (INTVAL (trueop1), xmode));
2600 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2603 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2604 if (GET_CODE (op0) == IOR
2605 && CONST_INT_P (trueop1)
2606 && CONST_INT_P (XEXP (op0, 1)))
2608 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2609 return simplify_gen_binary (IOR, mode,
2610 simplify_gen_binary (AND, mode,
2611 XEXP (op0, 0), op1),
2612 gen_int_mode (tmp, mode));
2615 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2616 insn (and may simplify more). */
2617 if (GET_CODE (op0) == XOR
2618 && rtx_equal_p (XEXP (op0, 0), op1)
2619 && ! side_effects_p (op1))
2620 return simplify_gen_binary (AND, mode,
2621 simplify_gen_unary (NOT, mode,
2622 XEXP (op0, 1), mode),
2625 if (GET_CODE (op0) == XOR
2626 && rtx_equal_p (XEXP (op0, 1), op1)
2627 && ! side_effects_p (op1))
2628 return simplify_gen_binary (AND, mode,
2629 simplify_gen_unary (NOT, mode,
2630 XEXP (op0, 0), mode),
2633 /* Similarly for (~(A ^ B)) & A. */
2634 if (GET_CODE (op0) == NOT
2635 && GET_CODE (XEXP (op0, 0)) == XOR
2636 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2637 && ! side_effects_p (op1))
2638 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2640 if (GET_CODE (op0) == NOT
2641 && GET_CODE (XEXP (op0, 0)) == XOR
2642 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2643 && ! side_effects_p (op1))
2644 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2646 /* Convert (A | B) & A to A. */
2647 if (GET_CODE (op0) == IOR
2648 && (rtx_equal_p (XEXP (op0, 0), op1)
2649 || rtx_equal_p (XEXP (op0, 1), op1))
2650 && ! side_effects_p (XEXP (op0, 0))
2651 && ! side_effects_p (XEXP (op0, 1)))
2654 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2655 ((A & N) + B) & M -> (A + B) & M
2656 Similarly if (N & M) == 0,
2657 ((A | N) + B) & M -> (A + B) & M
2658 and for - instead of + and/or ^ instead of |.
2659 Also, if (N & M) == 0, then
2660 (A +- N) & M -> A & M. */
2661 if (CONST_INT_P (trueop1)
2662 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2663 && ~UINTVAL (trueop1)
2664 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2665 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2670 pmop[0] = XEXP (op0, 0);
2671 pmop[1] = XEXP (op0, 1);
2673 if (CONST_INT_P (pmop[1])
2674 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2675 return simplify_gen_binary (AND, mode, pmop[0], op1);
2677 for (which = 0; which < 2; which++)
2680 switch (GET_CODE (tem))
2683 if (CONST_INT_P (XEXP (tem, 1))
2684 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2685 == UINTVAL (trueop1))
2686 pmop[which] = XEXP (tem, 0);
2690 if (CONST_INT_P (XEXP (tem, 1))
2691 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2692 pmop[which] = XEXP (tem, 0);
2699 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2701 tem = simplify_gen_binary (GET_CODE (op0), mode,
2703 return simplify_gen_binary (code, mode, tem, op1);
2707 /* (and X (ior (not X) Y) -> (and X Y) */
2708 if (GET_CODE (op1) == IOR
2709 && GET_CODE (XEXP (op1, 0)) == NOT
2710 && op0 == XEXP (XEXP (op1, 0), 0))
2711 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2713 /* (and (ior (not X) Y) X) -> (and X Y) */
2714 if (GET_CODE (op0) == IOR
2715 && GET_CODE (XEXP (op0, 0)) == NOT
2716 && op1 == XEXP (XEXP (op0, 0), 0))
2717 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2719 tem = simplify_associative_operation (code, mode, op0, op1);
2725 /* 0/x is 0 (or x&0 if x has side-effects). */
2726 if (trueop0 == CONST0_RTX (mode))
2728 if (side_effects_p (op1))
2729 return simplify_gen_binary (AND, mode, op1, trueop0);
2733 if (trueop1 == CONST1_RTX (mode))
2734 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2735 /* Convert divide by power of two into shift. */
2736 if (CONST_INT_P (trueop1)
2737 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2738 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2742 /* Handle floating point and integers separately. */
2743 if (SCALAR_FLOAT_MODE_P (mode))
2745 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2746 safe for modes with NaNs, since 0.0 / 0.0 will then be
2747 NaN rather than 0.0. Nor is it safe for modes with signed
2748 zeros, since dividing 0 by a negative number gives -0.0 */
2749 if (trueop0 == CONST0_RTX (mode)
2750 && !HONOR_NANS (mode)
2751 && !HONOR_SIGNED_ZEROS (mode)
2752 && ! side_effects_p (op1))
2755 if (trueop1 == CONST1_RTX (mode)
2756 && !HONOR_SNANS (mode))
2759 if (GET_CODE (trueop1) == CONST_DOUBLE
2760 && trueop1 != CONST0_RTX (mode))
2763 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2766 if (REAL_VALUES_EQUAL (d, dconstm1)
2767 && !HONOR_SNANS (mode))
2768 return simplify_gen_unary (NEG, mode, op0, mode);
2770 /* Change FP division by a constant into multiplication.
2771 Only do this with -freciprocal-math. */
2772 if (flag_reciprocal_math
2773 && !REAL_VALUES_EQUAL (d, dconst0))
2775 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2776 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2777 return simplify_gen_binary (MULT, mode, op0, tem);
2781 else if (SCALAR_INT_MODE_P (mode))
2783 /* 0/x is 0 (or x&0 if x has side-effects). */
2784 if (trueop0 == CONST0_RTX (mode)
2785 && !cfun->can_throw_non_call_exceptions)
2787 if (side_effects_p (op1))
2788 return simplify_gen_binary (AND, mode, op1, trueop0);
2792 if (trueop1 == CONST1_RTX (mode))
2793 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2795 if (trueop1 == constm1_rtx)
2797 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2798 return simplify_gen_unary (NEG, mode, x, mode);
2804 /* 0%x is 0 (or x&0 if x has side-effects). */
2805 if (trueop0 == CONST0_RTX (mode))
2807 if (side_effects_p (op1))
2808 return simplify_gen_binary (AND, mode, op1, trueop0);
2811 /* x%1 is 0 (of x&0 if x has side-effects). */
2812 if (trueop1 == CONST1_RTX (mode))
2814 if (side_effects_p (op0))
2815 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2816 return CONST0_RTX (mode);
2818 /* Implement modulus by power of two as AND. */
2819 if (CONST_INT_P (trueop1)
2820 && exact_log2 (UINTVAL (trueop1)) > 0)
2821 return simplify_gen_binary (AND, mode, op0,
2822 GEN_INT (INTVAL (op1) - 1));
2826 /* 0%x is 0 (or x&0 if x has side-effects). */
2827 if (trueop0 == CONST0_RTX (mode))
2829 if (side_effects_p (op1))
2830 return simplify_gen_binary (AND, mode, op1, trueop0);
2833 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2834 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2836 if (side_effects_p (op0))
2837 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2838 return CONST0_RTX (mode);
2845 if (trueop1 == CONST0_RTX (mode))
2847 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2849 /* Rotating ~0 always results in ~0. */
2850 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2851 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2852 && ! side_effects_p (op1))
2855 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2857 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2858 if (val != INTVAL (op1))
2859 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2866 if (trueop1 == CONST0_RTX (mode))
2868 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2870 goto canonicalize_shift;
2873 if (trueop1 == CONST0_RTX (mode))
2875 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2877 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2878 if (GET_CODE (op0) == CLZ
2879 && CONST_INT_P (trueop1)
2880 && STORE_FLAG_VALUE == 1
2881 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2883 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2884 unsigned HOST_WIDE_INT zero_val = 0;
2886 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2887 && zero_val == GET_MODE_BITSIZE (imode)
2888 && INTVAL (trueop1) == exact_log2 (zero_val))
2889 return simplify_gen_relational (EQ, mode, imode,
2890 XEXP (op0, 0), const0_rtx);
2892 goto canonicalize_shift;
2895 if (width <= HOST_BITS_PER_WIDE_INT
2896 && CONST_INT_P (trueop1)
2897 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
2898 && ! side_effects_p (op0))
2900 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2902 tem = simplify_associative_operation (code, mode, op0, op1);
2908 if (width <= HOST_BITS_PER_WIDE_INT
2909 && CONST_INT_P (trueop1)
2910 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
2911 && ! side_effects_p (op0))
2913 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2915 tem = simplify_associative_operation (code, mode, op0, op1);
2921 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2923 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2925 tem = simplify_associative_operation (code, mode, op0, op1);
2931 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2933 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2935 tem = simplify_associative_operation (code, mode, op0, op1);
2948 /* ??? There are simplifications that can be done. */
2952 if (!VECTOR_MODE_P (mode))
2954 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2955 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2956 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2957 gcc_assert (XVECLEN (trueop1, 0) == 1);
2958 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2960 if (GET_CODE (trueop0) == CONST_VECTOR)
2961 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2964 /* Extract a scalar element from a nested VEC_SELECT expression
2965 (with optional nested VEC_CONCAT expression). Some targets
2966 (i386) extract scalar element from a vector using chain of
2967 nested VEC_SELECT expressions. When input operand is a memory
2968 operand, this operation can be simplified to a simple scalar
2969 load from an offseted memory address. */
2970 if (GET_CODE (trueop0) == VEC_SELECT)
2972 rtx op0 = XEXP (trueop0, 0);
2973 rtx op1 = XEXP (trueop0, 1);
2975 enum machine_mode opmode = GET_MODE (op0);
2976 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2977 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2979 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2985 gcc_assert (GET_CODE (op1) == PARALLEL);
2986 gcc_assert (i < n_elts);
2988 /* Select element, pointed by nested selector. */
2989 elem = INTVAL (XVECEXP (op1, 0, i));
2991 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2992 if (GET_CODE (op0) == VEC_CONCAT)
2994 rtx op00 = XEXP (op0, 0);
2995 rtx op01 = XEXP (op0, 1);
2997 enum machine_mode mode00, mode01;
2998 int n_elts00, n_elts01;
3000 mode00 = GET_MODE (op00);
3001 mode01 = GET_MODE (op01);
3003 /* Find out number of elements of each operand. */
3004 if (VECTOR_MODE_P (mode00))
3006 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3007 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3012 if (VECTOR_MODE_P (mode01))
3014 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3015 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3020 gcc_assert (n_elts == n_elts00 + n_elts01);
3022 /* Select correct operand of VEC_CONCAT
3023 and adjust selector. */
3024 if (elem < n_elts01)
3035 vec = rtvec_alloc (1);
3036 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3038 tmp = gen_rtx_fmt_ee (code, mode,
3039 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3042 if (GET_CODE (trueop0) == VEC_DUPLICATE
3043 && GET_MODE (XEXP (trueop0, 0)) == mode)
3044 return XEXP (trueop0, 0);
3048 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3049 gcc_assert (GET_MODE_INNER (mode)
3050 == GET_MODE_INNER (GET_MODE (trueop0)));
3051 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3053 if (GET_CODE (trueop0) == CONST_VECTOR)
3055 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3056 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3057 rtvec v = rtvec_alloc (n_elts);
3060 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3061 for (i = 0; i < n_elts; i++)
3063 rtx x = XVECEXP (trueop1, 0, i);
3065 gcc_assert (CONST_INT_P (x));
3066 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3070 return gen_rtx_CONST_VECTOR (mode, v);
3074 if (XVECLEN (trueop1, 0) == 1
3075 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3076 && GET_CODE (trueop0) == VEC_CONCAT)
3079 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3081 /* Try to find the element in the VEC_CONCAT. */
3082 while (GET_MODE (vec) != mode
3083 && GET_CODE (vec) == VEC_CONCAT)
3085 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3086 if (offset < vec_size)
3087 vec = XEXP (vec, 0);
3091 vec = XEXP (vec, 1);
3093 vec = avoid_constant_pool_reference (vec);
3096 if (GET_MODE (vec) == mode)
3103 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3104 ? GET_MODE (trueop0)
3105 : GET_MODE_INNER (mode));
3106 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3107 ? GET_MODE (trueop1)
3108 : GET_MODE_INNER (mode));
3110 gcc_assert (VECTOR_MODE_P (mode));
3111 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3112 == GET_MODE_SIZE (mode));
3114 if (VECTOR_MODE_P (op0_mode))
3115 gcc_assert (GET_MODE_INNER (mode)
3116 == GET_MODE_INNER (op0_mode));
3118 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3120 if (VECTOR_MODE_P (op1_mode))
3121 gcc_assert (GET_MODE_INNER (mode)
3122 == GET_MODE_INNER (op1_mode));
3124 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3126 if ((GET_CODE (trueop0) == CONST_VECTOR
3127 || CONST_INT_P (trueop0)
3128 || GET_CODE (trueop0) == CONST_DOUBLE)
3129 && (GET_CODE (trueop1) == CONST_VECTOR
3130 || CONST_INT_P (trueop1)
3131 || GET_CODE (trueop1) == CONST_DOUBLE))
3133 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3134 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3135 rtvec v = rtvec_alloc (n_elts);
3137 unsigned in_n_elts = 1;
3139 if (VECTOR_MODE_P (op0_mode))
3140 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3141 for (i = 0; i < n_elts; i++)
3145 if (!VECTOR_MODE_P (op0_mode))
3146 RTVEC_ELT (v, i) = trueop0;
3148 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3152 if (!VECTOR_MODE_P (op1_mode))
3153 RTVEC_ELT (v, i) = trueop1;
3155 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3160 return gen_rtx_CONST_VECTOR (mode, v);
3173 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3176 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3178 unsigned int width = GET_MODE_BITSIZE (mode);
3180 if (VECTOR_MODE_P (mode)
3181 && code != VEC_CONCAT
3182 && GET_CODE (op0) == CONST_VECTOR
3183 && GET_CODE (op1) == CONST_VECTOR)
3185 unsigned n_elts = GET_MODE_NUNITS (mode);
3186 enum machine_mode op0mode = GET_MODE (op0);
3187 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3188 enum machine_mode op1mode = GET_MODE (op1);
3189 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3190 rtvec v = rtvec_alloc (n_elts);
3193 gcc_assert (op0_n_elts == n_elts);
3194 gcc_assert (op1_n_elts == n_elts);
3195 for (i = 0; i < n_elts; i++)
3197 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3198 CONST_VECTOR_ELT (op0, i),
3199 CONST_VECTOR_ELT (op1, i));
3202 RTVEC_ELT (v, i) = x;
3205 return gen_rtx_CONST_VECTOR (mode, v);
3208 if (VECTOR_MODE_P (mode)
3209 && code == VEC_CONCAT
3210 && (CONST_INT_P (op0)
3211 || GET_CODE (op0) == CONST_DOUBLE
3212 || GET_CODE (op0) == CONST_FIXED)
3213 && (CONST_INT_P (op1)
3214 || GET_CODE (op1) == CONST_DOUBLE
3215 || GET_CODE (op1) == CONST_FIXED))
3217 unsigned n_elts = GET_MODE_NUNITS (mode);
3218 rtvec v = rtvec_alloc (n_elts);
3220 gcc_assert (n_elts >= 2);
3223 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3224 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3226 RTVEC_ELT (v, 0) = op0;
3227 RTVEC_ELT (v, 1) = op1;
3231 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3232 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3235 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3236 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3237 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3239 for (i = 0; i < op0_n_elts; ++i)
3240 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3241 for (i = 0; i < op1_n_elts; ++i)
3242 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3245 return gen_rtx_CONST_VECTOR (mode, v);
3248 if (SCALAR_FLOAT_MODE_P (mode)
3249 && GET_CODE (op0) == CONST_DOUBLE
3250 && GET_CODE (op1) == CONST_DOUBLE
3251 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3262 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3264 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3266 for (i = 0; i < 4; i++)
3283 real_from_target (&r, tmp0, mode);
3284 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3288 REAL_VALUE_TYPE f0, f1, value, result;
3291 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3292 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3293 real_convert (&f0, mode, &f0);
3294 real_convert (&f1, mode, &f1);
3296 if (HONOR_SNANS (mode)
3297 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3301 && REAL_VALUES_EQUAL (f1, dconst0)
3302 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3305 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3306 && flag_trapping_math
3307 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3309 int s0 = REAL_VALUE_NEGATIVE (f0);
3310 int s1 = REAL_VALUE_NEGATIVE (f1);
3315 /* Inf + -Inf = NaN plus exception. */
3320 /* Inf - Inf = NaN plus exception. */
3325 /* Inf / Inf = NaN plus exception. */
3332 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3333 && flag_trapping_math
3334 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3335 || (REAL_VALUE_ISINF (f1)
3336 && REAL_VALUES_EQUAL (f0, dconst0))))
3337 /* Inf * 0 = NaN plus exception. */
3340 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3342 real_convert (&result, mode, &value);
3344 /* Don't constant fold this floating point operation if
3345 the result has overflowed and flag_trapping_math. */
3347 if (flag_trapping_math
3348 && MODE_HAS_INFINITIES (mode)
3349 && REAL_VALUE_ISINF (result)
3350 && !REAL_VALUE_ISINF (f0)
3351 && !REAL_VALUE_ISINF (f1))
3352 /* Overflow plus exception. */
3355 /* Don't constant fold this floating point operation if the
3356 result may dependent upon the run-time rounding mode and
3357 flag_rounding_math is set, or if GCC's software emulation
3358 is unable to accurately represent the result. */
3360 if ((flag_rounding_math
3361 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3362 && (inexact || !real_identical (&result, &value)))
3365 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3369 /* We can fold some multi-word operations. */
3370 if (GET_MODE_CLASS (mode) == MODE_INT
3371 && width == HOST_BITS_PER_DOUBLE_INT
3372 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3373 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3375 double_int o0, o1, res, tmp;
3377 o0 = rtx_to_double_int (op0);
3378 o1 = rtx_to_double_int (op1);
3383 /* A - B == A + (-B). */
3384 o1 = double_int_neg (o1);
3386 /* Fall through.... */
3389 res = double_int_add (o0, o1);
3393 res = double_int_mul (o0, o1);
3397 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3398 o0.low, o0.high, o1.low, o1.high,
3399 &res.low, &res.high,
3400 &tmp.low, &tmp.high))
3405 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3406 o0.low, o0.high, o1.low, o1.high,
3407 &tmp.low, &tmp.high,
3408 &res.low, &res.high))
3413 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3414 o0.low, o0.high, o1.low, o1.high,
3415 &res.low, &res.high,
3416 &tmp.low, &tmp.high))
3421 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3422 o0.low, o0.high, o1.low, o1.high,
3423 &tmp.low, &tmp.high,
3424 &res.low, &res.high))
3429 res = double_int_and (o0, o1);
3433 res = double_int_ior (o0, o1);
3437 res = double_int_xor (o0, o1);
3441 res = double_int_smin (o0, o1);
3445 res = double_int_smax (o0, o1);
3449 res = double_int_umin (o0, o1);
3453 res = double_int_umax (o0, o1);
3456 case LSHIFTRT: case ASHIFTRT:
3458 case ROTATE: case ROTATERT:
3460 unsigned HOST_WIDE_INT cnt;
3462 if (SHIFT_COUNT_TRUNCATED)
3463 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3465 if (!double_int_fits_in_uhwi_p (o1)
3466 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3469 cnt = double_int_to_uhwi (o1);
3471 if (code == LSHIFTRT || code == ASHIFTRT)
3472 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3474 else if (code == ASHIFT)
3475 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3477 else if (code == ROTATE)
3478 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3479 else /* code == ROTATERT */
3480 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3488 return immed_double_int_const (res, mode);
3491 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3492 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3494 /* Get the integer argument values in two forms:
3495 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3497 arg0 = INTVAL (op0);
3498 arg1 = INTVAL (op1);
3500 if (width < HOST_BITS_PER_WIDE_INT)
3502 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3503 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3506 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3507 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3510 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3511 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3519 /* Compute the value of the arithmetic. */
3524 val = arg0s + arg1s;
3528 val = arg0s - arg1s;
3532 val = arg0s * arg1s;
3537 || ((unsigned HOST_WIDE_INT) arg0s
3538 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3541 val = arg0s / arg1s;
3546 || ((unsigned HOST_WIDE_INT) arg0s
3547 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3550 val = arg0s % arg1s;
3555 || ((unsigned HOST_WIDE_INT) arg0s
3556 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3559 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3564 || ((unsigned HOST_WIDE_INT) arg0s
3565 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3568 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3586 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3587 the value is in range. We can't return any old value for
3588 out-of-range arguments because either the middle-end (via
3589 shift_truncation_mask) or the back-end might be relying on
3590 target-specific knowledge. Nor can we rely on
3591 shift_truncation_mask, since the shift might not be part of an
3592 ashlM3, lshrM3 or ashrM3 instruction. */
3593 if (SHIFT_COUNT_TRUNCATED)
3594 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3595 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3598 val = (code == ASHIFT
3599 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3600 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3602 /* Sign-extend the result for arithmetic right shifts. */
3603 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3604 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3612 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3613 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3621 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3622 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3626 /* Do nothing here. */
3630 val = arg0s <= arg1s ? arg0s : arg1s;
3634 val = ((unsigned HOST_WIDE_INT) arg0
3635 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3639 val = arg0s > arg1s ? arg0s : arg1s;
3643 val = ((unsigned HOST_WIDE_INT) arg0
3644 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3657 /* ??? There are simplifications that can be done. */
3664 return gen_int_mode (val, mode);
3672 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3675 Rather than test for specific case, we do this by a brute-force method
3676 and do all possible simplifications until no more changes occur. Then
3677 we rebuild the operation. */
3679 struct simplify_plus_minus_op_data
3686 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3690 result = (commutative_operand_precedence (y)
3691 - commutative_operand_precedence (x));
3695 /* Group together equal REGs to do more simplification. */
3696 if (REG_P (x) && REG_P (y))
3697 return REGNO (x) > REGNO (y);
3703 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3706 struct simplify_plus_minus_op_data ops[8];
3708 int n_ops = 2, input_ops = 2;
3709 int changed, n_constants = 0, canonicalized = 0;
3712 memset (ops, 0, sizeof ops);
3714 /* Set up the two operands and then expand them until nothing has been
3715 changed. If we run out of room in our array, give up; this should
3716 almost never happen. */
3721 ops[1].neg = (code == MINUS);
3727 for (i = 0; i < n_ops; i++)
3729 rtx this_op = ops[i].op;
3730 int this_neg = ops[i].neg;
3731 enum rtx_code this_code = GET_CODE (this_op);
3740 ops[n_ops].op = XEXP (this_op, 1);
3741 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3744 ops[i].op = XEXP (this_op, 0);
3747 canonicalized |= this_neg;
3751 ops[i].op = XEXP (this_op, 0);
3752 ops[i].neg = ! this_neg;
3759 && GET_CODE (XEXP (this_op, 0)) == PLUS
3760 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3761 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3763 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3764 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3765 ops[n_ops].neg = this_neg;
3773 /* ~a -> (-a - 1) */
3776 ops[n_ops].op = constm1_rtx;
3777 ops[n_ops++].neg = this_neg;
3778 ops[i].op = XEXP (this_op, 0);
3779 ops[i].neg = !this_neg;
3789 ops[i].op = neg_const_int (mode, this_op);
3803 if (n_constants > 1)
3806 gcc_assert (n_ops >= 2);
3808 /* If we only have two operands, we can avoid the loops. */
3811 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3814 /* Get the two operands. Be careful with the order, especially for
3815 the cases where code == MINUS. */
3816 if (ops[0].neg && ops[1].neg)
3818 lhs = gen_rtx_NEG (mode, ops[0].op);
3821 else if (ops[0].neg)
3832 return simplify_const_binary_operation (code, mode, lhs, rhs);
3835 /* Now simplify each pair of operands until nothing changes. */
3838 /* Insertion sort is good enough for an eight-element array. */
3839 for (i = 1; i < n_ops; i++)
3841 struct simplify_plus_minus_op_data save;
3843 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3849 ops[j + 1] = ops[j];
3850 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3855 for (i = n_ops - 1; i > 0; i--)
3856 for (j = i - 1; j >= 0; j--)
3858 rtx lhs = ops[j].op, rhs = ops[i].op;
3859 int lneg = ops[j].neg, rneg = ops[i].neg;
3861 if (lhs != 0 && rhs != 0)
3863 enum rtx_code ncode = PLUS;
3869 tem = lhs, lhs = rhs, rhs = tem;
3871 else if (swap_commutative_operands_p (lhs, rhs))
3872 tem = lhs, lhs = rhs, rhs = tem;
3874 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3875 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3877 rtx tem_lhs, tem_rhs;
3879 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3880 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3881 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3883 if (tem && !CONSTANT_P (tem))
3884 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3887 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3889 /* Reject "simplifications" that just wrap the two
3890 arguments in a CONST. Failure to do so can result
3891 in infinite recursion with simplify_binary_operation
3892 when it calls us to simplify CONST operations. */
3894 && ! (GET_CODE (tem) == CONST
3895 && GET_CODE (XEXP (tem, 0)) == ncode
3896 && XEXP (XEXP (tem, 0), 0) == lhs
3897 && XEXP (XEXP (tem, 0), 1) == rhs))
3900 if (GET_CODE (tem) == NEG)
3901 tem = XEXP (tem, 0), lneg = !lneg;
3902 if (CONST_INT_P (tem) && lneg)
3903 tem = neg_const_int (mode, tem), lneg = 0;
3907 ops[j].op = NULL_RTX;
3914 /* If nothing changed, fail. */
3918 /* Pack all the operands to the lower-numbered entries. */
3919 for (i = 0, j = 0; j < n_ops; j++)
3929 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3931 && CONST_INT_P (ops[1].op)
3932 && CONSTANT_P (ops[0].op)
3934 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3936 /* We suppressed creation of trivial CONST expressions in the
3937 combination loop to avoid recursion. Create one manually now.
3938 The combination loop should have ensured that there is exactly
3939 one CONST_INT, and the sort will have ensured that it is last
3940 in the array and that any other constant will be next-to-last. */
3943 && CONST_INT_P (ops[n_ops - 1].op)
3944 && CONSTANT_P (ops[n_ops - 2].op))
3946 rtx value = ops[n_ops - 1].op;
3947 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3948 value = neg_const_int (mode, value);
3949 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3953 /* Put a non-negated operand first, if possible. */
3955 for (i = 0; i < n_ops && ops[i].neg; i++)
3958 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3967 /* Now make the result by performing the requested operations. */
3969 for (i = 1; i < n_ops; i++)
3970 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3971 mode, result, ops[i].op);
3976 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3978 plus_minus_operand_p (const_rtx x)
3980 return GET_CODE (x) == PLUS
3981 || GET_CODE (x) == MINUS
3982 || (GET_CODE (x) == CONST
3983 && GET_CODE (XEXP (x, 0)) == PLUS
3984 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3985 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3988 /* Like simplify_binary_operation except used for relational operators.
3989 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3990 not also be VOIDmode.
3992 CMP_MODE specifies in which mode the comparison is done in, so it is
3993 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3994 the operands or, if both are VOIDmode, the operands are compared in
3995 "infinite precision". */
3997 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3998 enum machine_mode cmp_mode, rtx op0, rtx op1)
4000 rtx tem, trueop0, trueop1;
4002 if (cmp_mode == VOIDmode)
4003 cmp_mode = GET_MODE (op0);
4004 if (cmp_mode == VOIDmode)
4005 cmp_mode = GET_MODE (op1);
4007 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4010 if (SCALAR_FLOAT_MODE_P (mode))
4012 if (tem == const0_rtx)
4013 return CONST0_RTX (mode);
4014 #ifdef FLOAT_STORE_FLAG_VALUE
4016 REAL_VALUE_TYPE val;
4017 val = FLOAT_STORE_FLAG_VALUE (mode);
4018 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4024 if (VECTOR_MODE_P (mode))
4026 if (tem == const0_rtx)
4027 return CONST0_RTX (mode);
4028 #ifdef VECTOR_STORE_FLAG_VALUE
4033 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4034 if (val == NULL_RTX)
4036 if (val == const1_rtx)
4037 return CONST1_RTX (mode);
4039 units = GET_MODE_NUNITS (mode);
4040 v = rtvec_alloc (units);
4041 for (i = 0; i < units; i++)
4042 RTVEC_ELT (v, i) = val;
4043 return gen_rtx_raw_CONST_VECTOR (mode, v);
4053 /* For the following tests, ensure const0_rtx is op1. */
4054 if (swap_commutative_operands_p (op0, op1)
4055 || (op0 == const0_rtx && op1 != const0_rtx))
4056 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4058 /* If op0 is a compare, extract the comparison arguments from it. */
4059 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4060 return simplify_gen_relational (code, mode, VOIDmode,
4061 XEXP (op0, 0), XEXP (op0, 1));
4063 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4067 trueop0 = avoid_constant_pool_reference (op0);
4068 trueop1 = avoid_constant_pool_reference (op1);
4069 return simplify_relational_operation_1 (code, mode, cmp_mode,
4073 /* This part of simplify_relational_operation is only used when CMP_MODE
4074 is not in class MODE_CC (i.e. it is a real comparison).
4076 MODE is the mode of the result, while CMP_MODE specifies in which
4077 mode the comparison is done in, so it is the mode of the operands. */
4080 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4081 enum machine_mode cmp_mode, rtx op0, rtx op1)
4083 enum rtx_code op0code = GET_CODE (op0);
4085 if (op1 == const0_rtx && COMPARISON_P (op0))
4087 /* If op0 is a comparison, extract the comparison arguments
4091 if (GET_MODE (op0) == mode)
4092 return simplify_rtx (op0);
4094 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4095 XEXP (op0, 0), XEXP (op0, 1));
4097 else if (code == EQ)
4099 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4100 if (new_code != UNKNOWN)
4101 return simplify_gen_relational (new_code, mode, VOIDmode,
4102 XEXP (op0, 0), XEXP (op0, 1));
4106 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4107 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4108 if ((code == LTU || code == GEU)
4109 && GET_CODE (op0) == PLUS
4110 && CONST_INT_P (XEXP (op0, 1))
4111 && (rtx_equal_p (op1, XEXP (op0, 0))
4112 || rtx_equal_p (op1, XEXP (op0, 1))))
4115 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4116 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4117 cmp_mode, XEXP (op0, 0), new_cmp);
4120 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4121 if ((code == LTU || code == GEU)
4122 && GET_CODE (op0) == PLUS
4123 && rtx_equal_p (op1, XEXP (op0, 1))
4124 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4125 && !rtx_equal_p (op1, XEXP (op0, 0)))
4126 return simplify_gen_relational (code, mode, cmp_mode, op0,
4127 copy_rtx (XEXP (op0, 0)));
4129 if (op1 == const0_rtx)
4131 /* Canonicalize (GTU x 0) as (NE x 0). */
4133 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4134 /* Canonicalize (LEU x 0) as (EQ x 0). */
4136 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4138 else if (op1 == const1_rtx)
4143 /* Canonicalize (GE x 1) as (GT x 0). */
4144 return simplify_gen_relational (GT, mode, cmp_mode,
4147 /* Canonicalize (GEU x 1) as (NE x 0). */
4148 return simplify_gen_relational (NE, mode, cmp_mode,
4151 /* Canonicalize (LT x 1) as (LE x 0). */
4152 return simplify_gen_relational (LE, mode, cmp_mode,
4155 /* Canonicalize (LTU x 1) as (EQ x 0). */
4156 return simplify_gen_relational (EQ, mode, cmp_mode,
4162 else if (op1 == constm1_rtx)
4164 /* Canonicalize (LE x -1) as (LT x 0). */
4166 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4167 /* Canonicalize (GT x -1) as (GE x 0). */
4169 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4172 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4173 if ((code == EQ || code == NE)
4174 && (op0code == PLUS || op0code == MINUS)
4176 && CONSTANT_P (XEXP (op0, 1))
4177 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4179 rtx x = XEXP (op0, 0);
4180 rtx c = XEXP (op0, 1);
4181 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4182 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4184 /* Detect an infinite recursive condition, where we oscillate at this
4185 simplification case between:
4186 A + B == C <---> C - B == A,
4187 where A, B, and C are all constants with non-simplifiable expressions,
4188 usually SYMBOL_REFs. */
4189 if (GET_CODE (tem) == invcode
4191 && rtx_equal_p (c, XEXP (tem, 1)))
4194 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4197 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4198 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4200 && op1 == const0_rtx
4201 && GET_MODE_CLASS (mode) == MODE_INT
4202 && cmp_mode != VOIDmode
4203 /* ??? Work-around BImode bugs in the ia64 backend. */
4205 && cmp_mode != BImode
4206 && nonzero_bits (op0, cmp_mode) == 1
4207 && STORE_FLAG_VALUE == 1)
4208 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4209 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4210 : lowpart_subreg (mode, op0, cmp_mode);
4212 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4213 if ((code == EQ || code == NE)
4214 && op1 == const0_rtx
4216 return simplify_gen_relational (code, mode, cmp_mode,
4217 XEXP (op0, 0), XEXP (op0, 1));
4219 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4220 if ((code == EQ || code == NE)
4222 && rtx_equal_p (XEXP (op0, 0), op1)
4223 && !side_effects_p (XEXP (op0, 0)))
4224 return simplify_gen_relational (code, mode, cmp_mode,
4225 XEXP (op0, 1), const0_rtx);
4227 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4228 if ((code == EQ || code == NE)
4230 && rtx_equal_p (XEXP (op0, 1), op1)
4231 && !side_effects_p (XEXP (op0, 1)))
4232 return simplify_gen_relational (code, mode, cmp_mode,
4233 XEXP (op0, 0), const0_rtx);
4235 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4236 if ((code == EQ || code == NE)
4238 && (CONST_INT_P (op1)
4239 || GET_CODE (op1) == CONST_DOUBLE)
4240 && (CONST_INT_P (XEXP (op0, 1))
4241 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4242 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4243 simplify_gen_binary (XOR, cmp_mode,
4244 XEXP (op0, 1), op1));
4246 if (op0code == POPCOUNT && op1 == const0_rtx)
4252 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4253 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4254 XEXP (op0, 0), const0_rtx);
4259 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4260 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4261 XEXP (op0, 0), const0_rtx);
4280 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4281 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4282 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4283 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4284 For floating-point comparisons, assume that the operands were ordered. */
4287 comparison_result (enum rtx_code code, int known_results)
4293 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4296 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4300 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4303 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4307 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4310 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4313 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4315 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4318 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4320 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4323 return const_true_rtx;
4331 /* Check if the given comparison (done in the given MODE) is actually a
4332 tautology or a contradiction.
4333 If no simplification is possible, this function returns zero.
4334 Otherwise, it returns either const_true_rtx or const0_rtx. */
4337 simplify_const_relational_operation (enum rtx_code code,
4338 enum machine_mode mode,
4345 gcc_assert (mode != VOIDmode
4346 || (GET_MODE (op0) == VOIDmode
4347 && GET_MODE (op1) == VOIDmode));
4349 /* If op0 is a compare, extract the comparison arguments from it. */
4350 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4352 op1 = XEXP (op0, 1);
4353 op0 = XEXP (op0, 0);
4355 if (GET_MODE (op0) != VOIDmode)
4356 mode = GET_MODE (op0);
4357 else if (GET_MODE (op1) != VOIDmode)
4358 mode = GET_MODE (op1);
4363 /* We can't simplify MODE_CC values since we don't know what the
4364 actual comparison is. */
4365 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4368 /* Make sure the constant is second. */
4369 if (swap_commutative_operands_p (op0, op1))
4371 tem = op0, op0 = op1, op1 = tem;
4372 code = swap_condition (code);
4375 trueop0 = avoid_constant_pool_reference (op0);
4376 trueop1 = avoid_constant_pool_reference (op1);
4378 /* For integer comparisons of A and B maybe we can simplify A - B and can
4379 then simplify a comparison of that with zero. If A and B are both either
4380 a register or a CONST_INT, this can't help; testing for these cases will
4381 prevent infinite recursion here and speed things up.
4383 We can only do this for EQ and NE comparisons as otherwise we may
4384 lose or introduce overflow which we cannot disregard as undefined as
4385 we do not know the signedness of the operation on either the left or
4386 the right hand side of the comparison. */
4388 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4389 && (code == EQ || code == NE)
4390 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4391 && (REG_P (op1) || CONST_INT_P (trueop1)))
4392 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4393 /* We cannot do this if tem is a nonzero address. */
4394 && ! nonzero_address_p (tem))
4395 return simplify_const_relational_operation (signed_condition (code),
4396 mode, tem, const0_rtx);
4398 if (! HONOR_NANS (mode) && code == ORDERED)
4399 return const_true_rtx;
4401 if (! HONOR_NANS (mode) && code == UNORDERED)
4404 /* For modes without NaNs, if the two operands are equal, we know the
4405 result except if they have side-effects. Even with NaNs we know
4406 the result of unordered comparisons and, if signaling NaNs are
4407 irrelevant, also the result of LT/GT/LTGT. */
4408 if ((! HONOR_NANS (GET_MODE (trueop0))
4409 || code == UNEQ || code == UNLE || code == UNGE
4410 || ((code == LT || code == GT || code == LTGT)
4411 && ! HONOR_SNANS (GET_MODE (trueop0))))
4412 && rtx_equal_p (trueop0, trueop1)
4413 && ! side_effects_p (trueop0))
4414 return comparison_result (code, CMP_EQ);
4416 /* If the operands are floating-point constants, see if we can fold
4418 if (GET_CODE (trueop0) == CONST_DOUBLE
4419 && GET_CODE (trueop1) == CONST_DOUBLE
4420 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4422 REAL_VALUE_TYPE d0, d1;
4424 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4425 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4427 /* Comparisons are unordered iff at least one of the values is NaN. */
4428 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4438 return const_true_rtx;
4451 return comparison_result (code,
4452 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4453 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4456 /* Otherwise, see if the operands are both integers. */
4457 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4458 && (GET_CODE (trueop0) == CONST_DOUBLE
4459 || CONST_INT_P (trueop0))
4460 && (GET_CODE (trueop1) == CONST_DOUBLE
4461 || CONST_INT_P (trueop1)))
4463 int width = GET_MODE_BITSIZE (mode);
4464 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4465 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4467 /* Get the two words comprising each integer constant. */
4468 if (GET_CODE (trueop0) == CONST_DOUBLE)
4470 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4471 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4475 l0u = l0s = INTVAL (trueop0);
4476 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4479 if (GET_CODE (trueop1) == CONST_DOUBLE)
4481 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4482 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4486 l1u = l1s = INTVAL (trueop1);
4487 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4490 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4491 we have to sign or zero-extend the values. */
4492 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4494 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4495 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4497 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4498 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4500 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4501 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4503 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4504 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4506 if (h0u == h1u && l0u == l1u)
4507 return comparison_result (code, CMP_EQ);
4511 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4512 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4513 return comparison_result (code, cr);
4517 /* Optimize comparisons with upper and lower bounds. */
4518 if (SCALAR_INT_MODE_P (mode)
4519 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4520 && CONST_INT_P (trueop1))
4523 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4524 HOST_WIDE_INT val = INTVAL (trueop1);
4525 HOST_WIDE_INT mmin, mmax;
4535 /* Get a reduced range if the sign bit is zero. */
4536 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4543 rtx mmin_rtx, mmax_rtx;
4544 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4546 mmin = INTVAL (mmin_rtx);
4547 mmax = INTVAL (mmax_rtx);
4550 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4552 mmin >>= (sign_copies - 1);
4553 mmax >>= (sign_copies - 1);
4559 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4561 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4562 return const_true_rtx;
4563 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4568 return const_true_rtx;
4573 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4575 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4576 return const_true_rtx;
4577 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4582 return const_true_rtx;
4588 /* x == y is always false for y out of range. */
4589 if (val < mmin || val > mmax)
4593 /* x > y is always false for y >= mmax, always true for y < mmin. */
4595 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4597 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4598 return const_true_rtx;
4604 return const_true_rtx;
4607 /* x < y is always false for y <= mmin, always true for y > mmax. */
4609 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4611 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4612 return const_true_rtx;
4618 return const_true_rtx;
4622 /* x != y is always true for y out of range. */
4623 if (val < mmin || val > mmax)
4624 return const_true_rtx;
4632 /* Optimize integer comparisons with zero. */
4633 if (trueop1 == const0_rtx)
4635 /* Some addresses are known to be nonzero. We don't know
4636 their sign, but equality comparisons are known. */
4637 if (nonzero_address_p (trueop0))
4639 if (code == EQ || code == LEU)
4641 if (code == NE || code == GTU)
4642 return const_true_rtx;
4645 /* See if the first operand is an IOR with a constant. If so, we
4646 may be able to determine the result of this comparison. */
4647 if (GET_CODE (op0) == IOR)
4649 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4650 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4652 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4653 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4654 && (UINTVAL (inner_const)
4655 & ((unsigned HOST_WIDE_INT) 1
4665 return const_true_rtx;
4669 return const_true_rtx;
4683 /* Optimize comparison of ABS with zero. */
4684 if (trueop1 == CONST0_RTX (mode)
4685 && (GET_CODE (trueop0) == ABS
4686 || (GET_CODE (trueop0) == FLOAT_EXTEND
4687 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4692 /* Optimize abs(x) < 0.0. */
4693 if (!HONOR_SNANS (mode)
4694 && (!INTEGRAL_MODE_P (mode)
4695 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4697 if (INTEGRAL_MODE_P (mode)
4698 && (issue_strict_overflow_warning
4699 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4700 warning (OPT_Wstrict_overflow,
4701 ("assuming signed overflow does not occur when "
4702 "assuming abs (x) < 0 is false"));
4708 /* Optimize abs(x) >= 0.0. */
4709 if (!HONOR_NANS (mode)
4710 && (!INTEGRAL_MODE_P (mode)
4711 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4713 if (INTEGRAL_MODE_P (mode)
4714 && (issue_strict_overflow_warning
4715 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4716 warning (OPT_Wstrict_overflow,
4717 ("assuming signed overflow does not occur when "
4718 "assuming abs (x) >= 0 is true"));
4719 return const_true_rtx;
4724 /* Optimize ! (abs(x) < 0.0). */
4725 return const_true_rtx;
4735 /* Simplify CODE, an operation with result mode MODE and three operands,
4736 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4737 a constant. Return 0 if no simplifications is possible. */
4740 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4741 enum machine_mode op0_mode, rtx op0, rtx op1,
4744 unsigned int width = GET_MODE_BITSIZE (mode);
4745 bool any_change = false;
4748 /* VOIDmode means "infinite" precision. */
4750 width = HOST_BITS_PER_WIDE_INT;
4755 /* Simplify negations around the multiplication. */
4756 /* -a * -b + c => a * b + c. */
4757 if (GET_CODE (op0) == NEG)
4759 tem = simplify_unary_operation (NEG, mode, op1, mode);
4761 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4763 else if (GET_CODE (op1) == NEG)
4765 tem = simplify_unary_operation (NEG, mode, op0, mode);
4767 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4770 /* Canonicalize the two multiplication operands. */
4771 /* a * -b + c => -b * a + c. */
4772 if (swap_commutative_operands_p (op0, op1))
4773 tem = op0, op0 = op1, op1 = tem, any_change = true;
4776 return gen_rtx_FMA (mode, op0, op1, op2);
4781 if (CONST_INT_P (op0)
4782 && CONST_INT_P (op1)
4783 && CONST_INT_P (op2)
4784 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4785 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4787 /* Extracting a bit-field from a constant */
4788 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4790 if (BITS_BIG_ENDIAN)
4791 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4793 val >>= INTVAL (op2);
4795 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4797 /* First zero-extend. */
4798 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4799 /* If desired, propagate sign bit. */
4800 if (code == SIGN_EXTRACT
4801 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4803 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4806 /* Clear the bits that don't belong in our mode,
4807 unless they and our sign bit are all one.
4808 So we get either a reasonable negative value or a reasonable
4809 unsigned value for this mode. */
4810 if (width < HOST_BITS_PER_WIDE_INT
4811 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4812 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4813 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4815 return gen_int_mode (val, mode);
4820 if (CONST_INT_P (op0))
4821 return op0 != const0_rtx ? op1 : op2;
4823 /* Convert c ? a : a into "a". */
4824 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4827 /* Convert a != b ? a : b into "a". */
4828 if (GET_CODE (op0) == NE
4829 && ! side_effects_p (op0)
4830 && ! HONOR_NANS (mode)
4831 && ! HONOR_SIGNED_ZEROS (mode)
4832 && ((rtx_equal_p (XEXP (op0, 0), op1)
4833 && rtx_equal_p (XEXP (op0, 1), op2))
4834 || (rtx_equal_p (XEXP (op0, 0), op2)
4835 && rtx_equal_p (XEXP (op0, 1), op1))))
4838 /* Convert a == b ? a : b into "b". */
4839 if (GET_CODE (op0) == EQ
4840 && ! side_effects_p (op0)
4841 && ! HONOR_NANS (mode)
4842 && ! HONOR_SIGNED_ZEROS (mode)
4843 && ((rtx_equal_p (XEXP (op0, 0), op1)
4844 && rtx_equal_p (XEXP (op0, 1), op2))
4845 || (rtx_equal_p (XEXP (op0, 0), op2)
4846 && rtx_equal_p (XEXP (op0, 1), op1))))
4849 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4851 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4852 ? GET_MODE (XEXP (op0, 1))
4853 : GET_MODE (XEXP (op0, 0)));
4856 /* Look for happy constants in op1 and op2. */
4857 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4859 HOST_WIDE_INT t = INTVAL (op1);
4860 HOST_WIDE_INT f = INTVAL (op2);
4862 if (t == STORE_FLAG_VALUE && f == 0)
4863 code = GET_CODE (op0);
4864 else if (t == 0 && f == STORE_FLAG_VALUE)
4867 tmp = reversed_comparison_code (op0, NULL_RTX);
4875 return simplify_gen_relational (code, mode, cmp_mode,
4876 XEXP (op0, 0), XEXP (op0, 1));
4879 if (cmp_mode == VOIDmode)
4880 cmp_mode = op0_mode;
4881 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4882 cmp_mode, XEXP (op0, 0),
4885 /* See if any simplifications were possible. */
4888 if (CONST_INT_P (temp))
4889 return temp == const0_rtx ? op2 : op1;
4891 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4897 gcc_assert (GET_MODE (op0) == mode);
4898 gcc_assert (GET_MODE (op1) == mode);
4899 gcc_assert (VECTOR_MODE_P (mode));
4900 op2 = avoid_constant_pool_reference (op2);
4901 if (CONST_INT_P (op2))
4903 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4904 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4905 int mask = (1 << n_elts) - 1;
4907 if (!(INTVAL (op2) & mask))
4909 if ((INTVAL (op2) & mask) == mask)
4912 op0 = avoid_constant_pool_reference (op0);
4913 op1 = avoid_constant_pool_reference (op1);
4914 if (GET_CODE (op0) == CONST_VECTOR
4915 && GET_CODE (op1) == CONST_VECTOR)
4917 rtvec v = rtvec_alloc (n_elts);
4920 for (i = 0; i < n_elts; i++)
4921 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4922 ? CONST_VECTOR_ELT (op0, i)
4923 : CONST_VECTOR_ELT (op1, i));
4924 return gen_rtx_CONST_VECTOR (mode, v);
4936 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4938 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4940 Works by unpacking OP into a collection of 8-bit values
4941 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4942 and then repacking them again for OUTERMODE. */
4945 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4946 enum machine_mode innermode, unsigned int byte)
4948 /* We support up to 512-bit values (for V8DFmode). */
4952 value_mask = (1 << value_bit) - 1
4954 unsigned char value[max_bitsize / value_bit];
4963 rtvec result_v = NULL;
4964 enum mode_class outer_class;
4965 enum machine_mode outer_submode;
4967 /* Some ports misuse CCmode. */
4968 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4971 /* We have no way to represent a complex constant at the rtl level. */
4972 if (COMPLEX_MODE_P (outermode))
4975 /* Unpack the value. */
4977 if (GET_CODE (op) == CONST_VECTOR)
4979 num_elem = CONST_VECTOR_NUNITS (op);
4980 elems = &CONST_VECTOR_ELT (op, 0);
4981 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4987 elem_bitsize = max_bitsize;
4989 /* If this asserts, it is too complicated; reducing value_bit may help. */
4990 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4991 /* I don't know how to handle endianness of sub-units. */
4992 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4994 for (elem = 0; elem < num_elem; elem++)
4997 rtx el = elems[elem];
4999 /* Vectors are kept in target memory order. (This is probably
5002 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5003 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5005 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5006 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5007 unsigned bytele = (subword_byte % UNITS_PER_WORD
5008 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5009 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5012 switch (GET_CODE (el))
5016 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5018 *vp++ = INTVAL (el) >> i;
5019 /* CONST_INTs are always logically sign-extended. */
5020 for (; i < elem_bitsize; i += value_bit)
5021 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5025 if (GET_MODE (el) == VOIDmode)
5027 /* If this triggers, someone should have generated a
5028 CONST_INT instead. */
5029 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5031 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5032 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5033 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5036 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5039 /* It shouldn't matter what's done here, so fill it with
5041 for (; i < elem_bitsize; i += value_bit)
5046 long tmp[max_bitsize / 32];
5047 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5049 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5050 gcc_assert (bitsize <= elem_bitsize);
5051 gcc_assert (bitsize % value_bit == 0);
5053 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5056 /* real_to_target produces its result in words affected by
5057 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5058 and use WORDS_BIG_ENDIAN instead; see the documentation
5059 of SUBREG in rtl.texi. */
5060 for (i = 0; i < bitsize; i += value_bit)
5063 if (WORDS_BIG_ENDIAN)
5064 ibase = bitsize - 1 - i;
5067 *vp++ = tmp[ibase / 32] >> i % 32;
5070 /* It shouldn't matter what's done here, so fill it with
5072 for (; i < elem_bitsize; i += value_bit)
5078 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5080 for (i = 0; i < elem_bitsize; i += value_bit)
5081 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5085 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5086 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5087 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5089 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5090 >> (i - HOST_BITS_PER_WIDE_INT);
5091 for (; i < elem_bitsize; i += value_bit)
5101 /* Now, pick the right byte to start with. */
5102 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5103 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5104 will already have offset 0. */
5105 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5107 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5109 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5110 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5111 byte = (subword_byte % UNITS_PER_WORD
5112 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5115 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5116 so if it's become negative it will instead be very large.) */
5117 gcc_assert (byte < GET_MODE_SIZE (innermode));
5119 /* Convert from bytes to chunks of size value_bit. */
5120 value_start = byte * (BITS_PER_UNIT / value_bit);
5122 /* Re-pack the value. */
5124 if (VECTOR_MODE_P (outermode))
5126 num_elem = GET_MODE_NUNITS (outermode);
5127 result_v = rtvec_alloc (num_elem);
5128 elems = &RTVEC_ELT (result_v, 0);
5129 outer_submode = GET_MODE_INNER (outermode);
5135 outer_submode = outermode;
5138 outer_class = GET_MODE_CLASS (outer_submode);
5139 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5141 gcc_assert (elem_bitsize % value_bit == 0);
5142 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5144 for (elem = 0; elem < num_elem; elem++)
5148 /* Vectors are stored in target memory order. (This is probably
5151 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5152 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5154 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5155 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5156 unsigned bytele = (subword_byte % UNITS_PER_WORD
5157 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5158 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5161 switch (outer_class)
5164 case MODE_PARTIAL_INT:
5166 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5169 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5171 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5172 for (; i < elem_bitsize; i += value_bit)
5173 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5174 << (i - HOST_BITS_PER_WIDE_INT);
5176 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5178 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5179 elems[elem] = gen_int_mode (lo, outer_submode);
5180 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5181 elems[elem] = immed_double_const (lo, hi, outer_submode);
5188 case MODE_DECIMAL_FLOAT:
5191 long tmp[max_bitsize / 32];
5193 /* real_from_target wants its input in words affected by
5194 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5195 and use WORDS_BIG_ENDIAN instead; see the documentation
5196 of SUBREG in rtl.texi. */
5197 for (i = 0; i < max_bitsize / 32; i++)
5199 for (i = 0; i < elem_bitsize; i += value_bit)
5202 if (WORDS_BIG_ENDIAN)
5203 ibase = elem_bitsize - 1 - i;
5206 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5209 real_from_target (&r, tmp, outer_submode);
5210 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5222 f.mode = outer_submode;
5225 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5227 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5228 for (; i < elem_bitsize; i += value_bit)
5229 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5230 << (i - HOST_BITS_PER_WIDE_INT));
5232 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5240 if (VECTOR_MODE_P (outermode))
5241 return gen_rtx_CONST_VECTOR (outermode, result_v);
5246 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5247 Return 0 if no simplifications are possible. */
5249 simplify_subreg (enum machine_mode outermode, rtx op,
5250 enum machine_mode innermode, unsigned int byte)
5252 /* Little bit of sanity checking. */
5253 gcc_assert (innermode != VOIDmode);
5254 gcc_assert (outermode != VOIDmode);
5255 gcc_assert (innermode != BLKmode);
5256 gcc_assert (outermode != BLKmode);
5258 gcc_assert (GET_MODE (op) == innermode
5259 || GET_MODE (op) == VOIDmode);
5261 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5262 gcc_assert (byte < GET_MODE_SIZE (innermode));
5264 if (outermode == innermode && !byte)
5267 if (CONST_INT_P (op)
5268 || GET_CODE (op) == CONST_DOUBLE
5269 || GET_CODE (op) == CONST_FIXED
5270 || GET_CODE (op) == CONST_VECTOR)
5271 return simplify_immed_subreg (outermode, op, innermode, byte);
5273 /* Changing mode twice with SUBREG => just change it once,
5274 or not at all if changing back op starting mode. */
5275 if (GET_CODE (op) == SUBREG)
5277 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5278 int final_offset = byte + SUBREG_BYTE (op);
5281 if (outermode == innermostmode
5282 && byte == 0 && SUBREG_BYTE (op) == 0)
5283 return SUBREG_REG (op);
5285 /* The SUBREG_BYTE represents offset, as if the value were stored
5286 in memory. Irritating exception is paradoxical subreg, where
5287 we define SUBREG_BYTE to be 0. On big endian machines, this
5288 value should be negative. For a moment, undo this exception. */
5289 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5291 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5292 if (WORDS_BIG_ENDIAN)
5293 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5294 if (BYTES_BIG_ENDIAN)
5295 final_offset += difference % UNITS_PER_WORD;
5297 if (SUBREG_BYTE (op) == 0
5298 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5300 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5301 if (WORDS_BIG_ENDIAN)
5302 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5303 if (BYTES_BIG_ENDIAN)
5304 final_offset += difference % UNITS_PER_WORD;
5307 /* See whether resulting subreg will be paradoxical. */
5308 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5310 /* In nonparadoxical subregs we can't handle negative offsets. */
5311 if (final_offset < 0)
5313 /* Bail out in case resulting subreg would be incorrect. */
5314 if (final_offset % GET_MODE_SIZE (outermode)
5315 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5321 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5323 /* In paradoxical subreg, see if we are still looking on lower part.
5324 If so, our SUBREG_BYTE will be 0. */
5325 if (WORDS_BIG_ENDIAN)
5326 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5327 if (BYTES_BIG_ENDIAN)
5328 offset += difference % UNITS_PER_WORD;
5329 if (offset == final_offset)
5335 /* Recurse for further possible simplifications. */
5336 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5340 if (validate_subreg (outermode, innermostmode,
5341 SUBREG_REG (op), final_offset))
5343 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5344 if (SUBREG_PROMOTED_VAR_P (op)
5345 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5346 && GET_MODE_CLASS (outermode) == MODE_INT
5347 && IN_RANGE (GET_MODE_SIZE (outermode),
5348 GET_MODE_SIZE (innermode),
5349 GET_MODE_SIZE (innermostmode))
5350 && subreg_lowpart_p (newx))
5352 SUBREG_PROMOTED_VAR_P (newx) = 1;
5353 SUBREG_PROMOTED_UNSIGNED_SET
5354 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5361 /* Merge implicit and explicit truncations. */
5363 if (GET_CODE (op) == TRUNCATE
5364 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5365 && subreg_lowpart_offset (outermode, innermode) == byte)
5366 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5367 GET_MODE (XEXP (op, 0)));
5369 /* SUBREG of a hard register => just change the register number
5370 and/or mode. If the hard register is not valid in that mode,
5371 suppress this simplification. If the hard register is the stack,
5372 frame, or argument pointer, leave this as a SUBREG. */
5374 if (REG_P (op) && HARD_REGISTER_P (op))
5376 unsigned int regno, final_regno;
5379 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5380 if (HARD_REGISTER_NUM_P (final_regno))
5383 int final_offset = byte;
5385 /* Adjust offset for paradoxical subregs. */
5387 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5389 int difference = (GET_MODE_SIZE (innermode)
5390 - GET_MODE_SIZE (outermode));
5391 if (WORDS_BIG_ENDIAN)
5392 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5393 if (BYTES_BIG_ENDIAN)
5394 final_offset += difference % UNITS_PER_WORD;
5397 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5399 /* Propagate original regno. We don't have any way to specify
5400 the offset inside original regno, so do so only for lowpart.
5401 The information is used only by alias analysis that can not
5402 grog partial register anyway. */
5404 if (subreg_lowpart_offset (outermode, innermode) == byte)
5405 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5410 /* If we have a SUBREG of a register that we are replacing and we are
5411 replacing it with a MEM, make a new MEM and try replacing the
5412 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5413 or if we would be widening it. */
5416 && ! mode_dependent_address_p (XEXP (op, 0))
5417 /* Allow splitting of volatile memory references in case we don't
5418 have instruction to move the whole thing. */
5419 && (! MEM_VOLATILE_P (op)
5420 || ! have_insn_for (SET, innermode))
5421 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5422 return adjust_address_nv (op, outermode, byte);
5424 /* Handle complex values represented as CONCAT
5425 of real and imaginary part. */
5426 if (GET_CODE (op) == CONCAT)
5428 unsigned int part_size, final_offset;
5431 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5432 if (byte < part_size)
5434 part = XEXP (op, 0);
5435 final_offset = byte;
5439 part = XEXP (op, 1);
5440 final_offset = byte - part_size;
5443 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5446 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5449 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5450 return gen_rtx_SUBREG (outermode, part, final_offset);
5454 /* Optimize SUBREG truncations of zero and sign extended values. */
5455 if ((GET_CODE (op) == ZERO_EXTEND
5456 || GET_CODE (op) == SIGN_EXTEND)
5457 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5459 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5461 /* If we're requesting the lowpart of a zero or sign extension,
5462 there are three possibilities. If the outermode is the same
5463 as the origmode, we can omit both the extension and the subreg.
5464 If the outermode is not larger than the origmode, we can apply
5465 the truncation without the extension. Finally, if the outermode
5466 is larger than the origmode, but both are integer modes, we
5467 can just extend to the appropriate mode. */
5470 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5471 if (outermode == origmode)
5472 return XEXP (op, 0);
5473 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5474 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5475 subreg_lowpart_offset (outermode,
5477 if (SCALAR_INT_MODE_P (outermode))
5478 return simplify_gen_unary (GET_CODE (op), outermode,
5479 XEXP (op, 0), origmode);
5482 /* A SUBREG resulting from a zero extension may fold to zero if
5483 it extracts higher bits that the ZERO_EXTEND's source bits. */
5484 if (GET_CODE (op) == ZERO_EXTEND
5485 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5486 return CONST0_RTX (outermode);
5489 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5490 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5491 the outer subreg is effectively a truncation to the original mode. */
5492 if ((GET_CODE (op) == LSHIFTRT
5493 || GET_CODE (op) == ASHIFTRT)
5494 && SCALAR_INT_MODE_P (outermode)
5495 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5496 to avoid the possibility that an outer LSHIFTRT shifts by more
5497 than the sign extension's sign_bit_copies and introduces zeros
5498 into the high bits of the result. */
5499 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5500 && CONST_INT_P (XEXP (op, 1))
5501 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5502 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5503 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5504 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5505 return simplify_gen_binary (ASHIFTRT, outermode,
5506 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5508 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5509 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5510 the outer subreg is effectively a truncation to the original mode. */
5511 if ((GET_CODE (op) == LSHIFTRT
5512 || GET_CODE (op) == ASHIFTRT)
5513 && SCALAR_INT_MODE_P (outermode)
5514 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5515 && CONST_INT_P (XEXP (op, 1))
5516 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5517 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5518 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5519 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5520 return simplify_gen_binary (LSHIFTRT, outermode,
5521 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5523 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5524 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5525 the outer subreg is effectively a truncation to the original mode. */
5526 if (GET_CODE (op) == ASHIFT
5527 && SCALAR_INT_MODE_P (outermode)
5528 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5529 && CONST_INT_P (XEXP (op, 1))
5530 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5531 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5532 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5533 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5534 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5535 return simplify_gen_binary (ASHIFT, outermode,
5536 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5538 /* Recognize a word extraction from a multi-word subreg. */
5539 if ((GET_CODE (op) == LSHIFTRT
5540 || GET_CODE (op) == ASHIFTRT)
5541 && SCALAR_INT_MODE_P (outermode)
5542 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5543 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5544 && CONST_INT_P (XEXP (op, 1))
5545 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5546 && INTVAL (XEXP (op, 1)) >= 0
5547 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5548 && byte == subreg_lowpart_offset (outermode, innermode))
5550 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5551 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5553 ? byte - shifted_bytes
5554 : byte + shifted_bytes));
5557 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5558 and try replacing the SUBREG and shift with it. Don't do this if
5559 the MEM has a mode-dependent address or if we would be widening it. */
5561 if ((GET_CODE (op) == LSHIFTRT
5562 || GET_CODE (op) == ASHIFTRT)
5563 && MEM_P (XEXP (op, 0))
5564 && CONST_INT_P (XEXP (op, 1))
5565 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5566 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5567 && INTVAL (XEXP (op, 1)) > 0
5568 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5569 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5570 && ! MEM_VOLATILE_P (XEXP (op, 0))
5571 && byte == subreg_lowpart_offset (outermode, innermode)
5572 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5573 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5575 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5576 return adjust_address_nv (XEXP (op, 0), outermode,
5578 ? byte - shifted_bytes
5579 : byte + shifted_bytes));
5585 /* Make a SUBREG operation or equivalent if it folds. */
5588 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5589 enum machine_mode innermode, unsigned int byte)
5593 newx = simplify_subreg (outermode, op, innermode, byte);
5597 if (GET_CODE (op) == SUBREG
5598 || GET_CODE (op) == CONCAT
5599 || GET_MODE (op) == VOIDmode)
5602 if (validate_subreg (outermode, innermode, op, byte))
5603 return gen_rtx_SUBREG (outermode, op, byte);
5608 /* Simplify X, an rtx expression.
5610 Return the simplified expression or NULL if no simplifications
5613 This is the preferred entry point into the simplification routines;
5614 however, we still allow passes to call the more specific routines.
5616 Right now GCC has three (yes, three) major bodies of RTL simplification
5617 code that need to be unified.
5619 1. fold_rtx in cse.c. This code uses various CSE specific
5620 information to aid in RTL simplification.
5622 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5623 it uses combine specific information to aid in RTL
5626 3. The routines in this file.
5629 Long term we want to only have one body of simplification code; to
5630 get to that state I recommend the following steps:
5632 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5633 which are not pass dependent state into these routines.
5635 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5636 use this routine whenever possible.
5638 3. Allow for pass dependent state to be provided to these
5639 routines and add simplifications based on the pass dependent
5640 state. Remove code from cse.c & combine.c that becomes
5643 It will take time, but ultimately the compiler will be easier to
5644 maintain and improve. It's totally silly that when we add a
5645 simplification that it needs to be added to 4 places (3 for RTL
5646 simplification and 1 for tree simplification. */
5649 simplify_rtx (const_rtx x)
5651 const enum rtx_code code = GET_CODE (x);
5652 const enum machine_mode mode = GET_MODE (x);
5654 switch (GET_RTX_CLASS (code))
5657 return simplify_unary_operation (code, mode,
5658 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5659 case RTX_COMM_ARITH:
5660 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5661 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5663 /* Fall through.... */
5666 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5669 case RTX_BITFIELD_OPS:
5670 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5671 XEXP (x, 0), XEXP (x, 1),
5675 case RTX_COMM_COMPARE:
5676 return simplify_relational_operation (code, mode,
5677 ((GET_MODE (XEXP (x, 0))
5679 ? GET_MODE (XEXP (x, 0))
5680 : GET_MODE (XEXP (x, 1))),
5686 return simplify_subreg (mode, SUBREG_REG (x),
5687 GET_MODE (SUBREG_REG (x)),
5694 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5695 if (GET_CODE (XEXP (x, 0)) == HIGH
5696 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))