1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
68 neg_const_int (enum machine_mode mode, const_rtx i)
70 return gen_int_mode (- INTVAL (i), mode);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 unsigned HOST_WIDE_INT val;
82 if (GET_MODE_CLASS (mode) != MODE_INT)
85 width = GET_MODE_BITSIZE (mode);
89 if (width <= HOST_BITS_PER_WIDE_INT
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x)
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
138 switch (GET_CODE (x))
144 /* Handle float extensions of constant pool references. */
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
160 if (GET_MODE (x) == BLKmode)
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset != 0 || cmode != GET_MODE (x))
193 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
194 if (tem && CONSTANT_P (tem))
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
209 delegitimize_mem_from_attrs (rtx x)
211 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
212 use their base addresses as equivalent. */
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
231 case ARRAY_RANGE_REF:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
250 offset += bitpos / BITS_PER_UNIT;
252 offset += TREE_INT_CST_LOW (toffset);
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
268 offset += INTVAL (MEM_OFFSET (x));
270 newx = DECL_RTL (decl);
274 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
283 || (GET_CODE (o) == PLUS
284 && GET_CODE (XEXP (o, 1)) == CONST_INT
285 && (offset == INTVAL (XEXP (o, 1))
286 || (GET_CODE (n) == PLUS
287 && GET_CODE (XEXP (n, 1)) == CONST_INT
288 && (INTVAL (XEXP (n, 1)) + offset
289 == INTVAL (XEXP (o, 1)))
290 && (n = XEXP (n, 0))))
291 && (o = XEXP (o, 0))))
292 && rtx_equal_p (o, n)))
293 x = adjust_address_nv (newx, mode, offset);
295 else if (GET_MODE (x) == GET_MODE (newx)
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
308 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
309 enum machine_mode op_mode)
313 /* If this simplifies, use it. */
314 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
317 return gen_rtx_fmt_e (code, mode, op);
320 /* Likewise for ternary operations. */
323 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
324 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
328 /* If this simplifies, use it. */
329 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
333 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
340 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
341 enum machine_mode cmp_mode, rtx op0, rtx op1)
345 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
349 return gen_rtx_fmt_ee (code, mode, op0, op1);
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
358 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
359 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 enum rtx_code code = GET_CODE (x);
362 enum machine_mode mode = GET_MODE (x);
363 enum machine_mode op_mode;
365 rtx op0, op1, op2, newx, op;
369 if (__builtin_expect (fn != NULL, 0))
371 newx = fn (x, old_rtx, data);
375 else if (rtx_equal_p (x, old_rtx))
376 return copy_rtx ((rtx) data);
378 switch (GET_RTX_CLASS (code))
382 op_mode = GET_MODE (op0);
383 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
384 if (op0 == XEXP (x, 0))
386 return simplify_gen_unary (code, mode, op0, op_mode);
390 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
391 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
392 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
394 return simplify_gen_binary (code, mode, op0, op1);
397 case RTX_COMM_COMPARE:
400 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
401 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
402 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
403 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
405 return simplify_gen_relational (code, mode, op_mode, op0, op1);
408 case RTX_BITFIELD_OPS:
410 op_mode = GET_MODE (op0);
411 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
412 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
413 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
414 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
416 if (op_mode == VOIDmode)
417 op_mode = GET_MODE (op0);
418 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
423 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
424 if (op0 == SUBREG_REG (x))
426 op0 = simplify_gen_subreg (GET_MODE (x), op0,
427 GET_MODE (SUBREG_REG (x)),
429 return op0 ? op0 : x;
436 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
439 return replace_equiv_address_nv (x, op0);
441 else if (code == LO_SUM)
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return gen_rtx_LO_SUM (mode, op0, op1);
461 fmt = GET_RTX_FORMAT (code);
462 for (i = 0; fmt[i]; i++)
467 newvec = XVEC (newx, i);
468 for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
472 if (op != RTVEC_ELT (vec, j))
476 newvec = shallow_copy_rtvec (vec);
478 newx = shallow_copy_rtx (x);
479 XVEC (newx, i) = newvec;
481 RTVEC_ELT (newvec, j) = op;
489 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
490 if (op != XEXP (x, i))
493 newx = shallow_copy_rtx (x);
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
506 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
515 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
516 rtx op, enum machine_mode op_mode)
520 trueop = avoid_constant_pool_reference (op);
522 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
526 return simplify_unary_operation_1 (code, mode, op);
529 /* Perform some simplifications we can do even if the operands
532 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 enum rtx_code reversed;
540 /* (not (not X)) == X. */
541 if (GET_CODE (op) == NOT)
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op)
547 && (mode == BImode || STORE_FLAG_VALUE == -1)
548 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
549 return simplify_gen_relational (reversed, mode, VOIDmode,
550 XEXP (op, 0), XEXP (op, 1));
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op) == PLUS
554 && XEXP (op, 1) == constm1_rtx)
555 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op) == NEG)
559 return plus_constant (XEXP (op, 0), -1);
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op) == XOR
563 && CONST_INT_P (XEXP (op, 1))
564 && (temp = simplify_unary_operation (NOT, mode,
565 XEXP (op, 1), mode)) != 0)
566 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op) == PLUS
570 && CONST_INT_P (XEXP (op, 1))
571 && mode_signbit_p (mode, XEXP (op, 1))
572 && (temp = simplify_unary_operation (NOT, mode,
573 XEXP (op, 1), mode)) != 0)
574 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
582 if (GET_CODE (op) == ASHIFT
583 && XEXP (op, 0) == const1_rtx)
585 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
586 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
593 if (STORE_FLAG_VALUE == -1
594 && GET_CODE (op) == ASHIFTRT
595 && GET_CODE (XEXP (op, 1))
596 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
597 return simplify_gen_relational (GE, mode, VOIDmode,
598 XEXP (op, 0), const0_rtx);
601 if (GET_CODE (op) == SUBREG
602 && subreg_lowpart_p (op)
603 && (GET_MODE_SIZE (GET_MODE (op))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
605 && GET_CODE (SUBREG_REG (op)) == ASHIFT
606 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
611 x = gen_rtx_ROTATE (inner_mode,
612 simplify_gen_unary (NOT, inner_mode, const1_rtx,
614 XEXP (SUBREG_REG (op), 1));
615 return rtl_hooks.gen_lowpart_no_emit (mode, x);
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
623 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
626 enum machine_mode op_mode;
628 op_mode = GET_MODE (in1);
629 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631 op_mode = GET_MODE (in2);
632 if (op_mode == VOIDmode)
634 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
639 in2 = in1; in1 = tem;
642 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op) == NEG)
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op) == PLUS
654 && XEXP (op, 1) == const1_rtx)
655 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op) == NOT)
659 return plus_constant (XEXP (op, 0), 1);
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
669 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671 if (GET_CODE (op) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op, 1))
677 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
681 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
689 /* (neg (mult A B)) becomes (mult A (neg B)).
690 This works even for floating-point values. */
691 if (GET_CODE (op) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
695 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
701 if (GET_CODE (op) == ASHIFT)
703 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
705 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op) == ASHIFTRT
711 && CONST_INT_P (XEXP (op, 1))
712 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (op, 0), XEXP (op, 1));
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op) == LSHIFTRT
719 && CONST_INT_P (XEXP (op, 1))
720 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
721 return simplify_gen_binary (ASHIFTRT, mode,
722 XEXP (op, 0), XEXP (op, 1));
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op) == XOR
726 && XEXP (op, 1) == const1_rtx
727 && nonzero_bits (XEXP (op, 0), mode) == 1)
728 return plus_constant (XEXP (op, 0), -1);
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op) == LT
733 && XEXP (op, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 enum machine_mode inner = GET_MODE (XEXP (op, 0));
737 int isize = GET_MODE_BITSIZE (inner);
738 if (STORE_FLAG_VALUE == 1)
740 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
741 GEN_INT (isize - 1));
744 if (GET_MODE_BITSIZE (mode) > isize)
745 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
746 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 else if (STORE_FLAG_VALUE == -1)
750 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
751 GEN_INT (isize - 1));
754 if (GET_MODE_BITSIZE (mode) > isize)
755 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
756 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
765 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op) == SIGN_EXTEND
770 || GET_CODE (op) == ZERO_EXTEND)
771 && GET_MODE (XEXP (op, 0)) == mode)
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op) == ABS
777 || GET_CODE (op) == NEG)
778 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
781 return simplify_gen_unary (GET_CODE (op), mode,
782 XEXP (XEXP (op, 0), 0), mode);
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
786 if (GET_CODE (op) == SUBREG
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
789 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
790 GET_MODE (XEXP (SUBREG_REG (op), 0)));
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
800 GET_MODE_BITSIZE (GET_MODE (op)))
801 ? (num_sign_bit_copies (op, GET_MODE (op))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
803 - GET_MODE_BITSIZE (mode)))
804 : truncated_to_mode (mode, op))
805 && ! (GET_CODE (op) == LSHIFTRT
806 && GET_CODE (XEXP (op, 0)) == MULT))
807 return rtl_hooks.gen_lowpart_no_emit (mode, op);
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
815 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
816 return rtl_hooks.gen_lowpart_no_emit (mode, op);
820 if (DECIMAL_FLOAT_MODE_P (mode))
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op, 0)) == mode)
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations)
839 || GET_CODE (op) == FLOAT_EXTEND)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
842 > GET_MODE_SIZE (mode)
843 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0))))))))
855 return simplify_gen_unary (FLOAT, mode,
857 GET_MODE (XEXP (op, 0)));
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op) == ABS
862 || GET_CODE (op) == NEG)
863 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
865 return simplify_gen_unary (GET_CODE (op), mode,
866 XEXP (XEXP (op, 0), 0), mode);
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
873 return SUBREG_REG (op);
877 if (DECIMAL_FLOAT_MODE_P (mode))
880 /* (float_extend (float_extend x)) is (float_extend x)
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
885 if (GET_CODE (op) == FLOAT_EXTEND
886 || (GET_CODE (op) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
888 && ((unsigned)significand_size (GET_MODE (op))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
890 - num_sign_bit_copies (XEXP (op, 0),
891 GET_MODE (XEXP (op, 0)))))))
892 return simplify_gen_unary (GET_CODE (op), mode,
894 GET_MODE (XEXP (op, 0)));
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
902 GET_MODE (XEXP (op, 0)));
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
906 if (GET_MODE (op) == VOIDmode)
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op))
912 <= HOST_BITS_PER_WIDE_INT)
913 && ((nonzero_bits (op, GET_MODE (op))
914 & ((unsigned HOST_WIDE_INT) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
921 return gen_rtx_NEG (mode, op);
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op) == SIGN_EXTEND
928 || GET_CODE (op) == ZERO_EXTEND)
929 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
930 GET_MODE (XEXP (op, 0)));
934 switch (GET_CODE (op))
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
940 GET_MODE (XEXP (op, 0)));
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op, 1)))
946 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)));
956 switch (GET_CODE (op))
962 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
963 GET_MODE (XEXP (op, 0)));
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op, 1)))
969 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
970 GET_MODE (XEXP (op, 0)));
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op) == BSWAP)
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op) == SIGN_EXTEND)
987 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
988 GET_MODE (XEXP (op, 0)));
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
996 if (GET_CODE (op) == TRUNCATE
997 && GET_MODE (XEXP (op, 0)) == mode
998 && GET_CODE (XEXP (op, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1001 return XEXP (op, 0);
1003 /* Check for a sign extension of a subreg of a promoted
1004 variable, where the promotion is sign-extended, and the
1005 target mode is the same as the variable's promotion. */
1006 if (GET_CODE (op) == SUBREG
1007 && SUBREG_PROMOTED_VAR_P (op)
1008 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1009 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1010 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1012 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1013 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1014 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1016 gcc_assert (GET_MODE_BITSIZE (mode)
1017 > GET_MODE_BITSIZE (GET_MODE (op)));
1018 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1019 GET_MODE (XEXP (op, 0)));
1022 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1023 is (sign_extend:M (subreg:O <X>)) if there is mode with
1024 GET_MODE_BITSIZE (N) - I bits.
1025 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1026 is similarly (zero_extend:M (subreg:O <X>)). */
1027 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1028 && GET_CODE (XEXP (op, 0)) == ASHIFT
1029 && CONST_INT_P (XEXP (op, 1))
1030 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1031 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1033 enum machine_mode tmode
1034 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1035 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1036 gcc_assert (GET_MODE_BITSIZE (mode)
1037 > GET_MODE_BITSIZE (GET_MODE (op)));
1038 if (tmode != BLKmode)
1041 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1042 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1043 ? SIGN_EXTEND : ZERO_EXTEND,
1044 mode, inner, tmode);
1048 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1049 /* As we do not know which address space the pointer is refering to,
1050 we can do this only if the target does not support different pointer
1051 or address modes depending on the address space. */
1052 if (target_default_pointer_address_modes_p ()
1053 && ! POINTERS_EXTEND_UNSIGNED
1054 && mode == Pmode && GET_MODE (op) == ptr_mode
1056 || (GET_CODE (op) == SUBREG
1057 && REG_P (SUBREG_REG (op))
1058 && REG_POINTER (SUBREG_REG (op))
1059 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1060 return convert_memory_address (Pmode, op);
1065 /* Check for a zero extension of a subreg of a promoted
1066 variable, where the promotion is zero-extended, and the
1067 target mode is the same as the variable's promotion. */
1068 if (GET_CODE (op) == SUBREG
1069 && SUBREG_PROMOTED_VAR_P (op)
1070 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1071 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1072 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1074 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1075 if (GET_CODE (op) == ZERO_EXTEND)
1076 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1077 GET_MODE (XEXP (op, 0)));
1079 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1080 is (zero_extend:M (subreg:O <X>)) if there is mode with
1081 GET_MODE_BITSIZE (N) - I bits. */
1082 if (GET_CODE (op) == LSHIFTRT
1083 && GET_CODE (XEXP (op, 0)) == ASHIFT
1084 && CONST_INT_P (XEXP (op, 1))
1085 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1086 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1088 enum machine_mode tmode
1089 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1090 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1091 if (tmode != BLKmode)
1094 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1095 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1099 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1100 /* As we do not know which address space the pointer is refering to,
1101 we can do this only if the target does not support different pointer
1102 or address modes depending on the address space. */
1103 if (target_default_pointer_address_modes_p ()
1104 && POINTERS_EXTEND_UNSIGNED > 0
1105 && mode == Pmode && GET_MODE (op) == ptr_mode
1107 || (GET_CODE (op) == SUBREG
1108 && REG_P (SUBREG_REG (op))
1109 && REG_POINTER (SUBREG_REG (op))
1110 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1111 return convert_memory_address (Pmode, op);
1122 /* Try to compute the value of a unary operation CODE whose output mode is to
1123 be MODE with input operand OP whose mode was originally OP_MODE.
1124 Return zero if the value cannot be computed. */
1126 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1127 rtx op, enum machine_mode op_mode)
1129 unsigned int width = GET_MODE_BITSIZE (mode);
1131 if (code == VEC_DUPLICATE)
1133 gcc_assert (VECTOR_MODE_P (mode));
1134 if (GET_MODE (op) != VOIDmode)
1136 if (!VECTOR_MODE_P (GET_MODE (op)))
1137 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1139 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1142 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1143 || GET_CODE (op) == CONST_VECTOR)
1145 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1146 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1147 rtvec v = rtvec_alloc (n_elts);
1150 if (GET_CODE (op) != CONST_VECTOR)
1151 for (i = 0; i < n_elts; i++)
1152 RTVEC_ELT (v, i) = op;
1155 enum machine_mode inmode = GET_MODE (op);
1156 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1157 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1159 gcc_assert (in_n_elts < n_elts);
1160 gcc_assert ((n_elts % in_n_elts) == 0);
1161 for (i = 0; i < n_elts; i++)
1162 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1164 return gen_rtx_CONST_VECTOR (mode, v);
1168 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1170 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1171 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1172 enum machine_mode opmode = GET_MODE (op);
1173 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1174 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1175 rtvec v = rtvec_alloc (n_elts);
1178 gcc_assert (op_n_elts == n_elts);
1179 for (i = 0; i < n_elts; i++)
1181 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1182 CONST_VECTOR_ELT (op, i),
1183 GET_MODE_INNER (opmode));
1186 RTVEC_ELT (v, i) = x;
1188 return gen_rtx_CONST_VECTOR (mode, v);
1191 /* The order of these tests is critical so that, for example, we don't
1192 check the wrong mode (input vs. output) for a conversion operation,
1193 such as FIX. At some point, this should be simplified. */
1195 if (code == FLOAT && GET_MODE (op) == VOIDmode
1196 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1198 HOST_WIDE_INT hv, lv;
1201 if (CONST_INT_P (op))
1202 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1204 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1206 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1207 d = real_value_truncate (mode, d);
1208 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1210 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1211 && (GET_CODE (op) == CONST_DOUBLE
1212 || CONST_INT_P (op)))
1214 HOST_WIDE_INT hv, lv;
1217 if (CONST_INT_P (op))
1218 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1220 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1222 if (op_mode == VOIDmode)
1224 /* We don't know how to interpret negative-looking numbers in
1225 this case, so don't try to fold those. */
1229 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1232 hv = 0, lv &= GET_MODE_MASK (op_mode);
1234 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1235 d = real_value_truncate (mode, d);
1236 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1239 if (CONST_INT_P (op)
1240 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1242 HOST_WIDE_INT arg0 = INTVAL (op);
1256 val = (arg0 >= 0 ? arg0 : - arg0);
1260 arg0 &= GET_MODE_MASK (mode);
1261 val = ffs_hwi (arg0);
1265 arg0 &= GET_MODE_MASK (mode);
1266 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1269 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1273 arg0 &= GET_MODE_MASK (mode);
1276 /* Even if the value at zero is undefined, we have to come
1277 up with some replacement. Seems good enough. */
1278 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1279 val = GET_MODE_BITSIZE (mode);
1282 val = ctz_hwi (arg0);
1286 arg0 &= GET_MODE_MASK (mode);
1289 val++, arg0 &= arg0 - 1;
1293 arg0 &= GET_MODE_MASK (mode);
1296 val++, arg0 &= arg0 - 1;
1305 for (s = 0; s < width; s += 8)
1307 unsigned int d = width - s - 8;
1308 unsigned HOST_WIDE_INT byte;
1309 byte = (arg0 >> s) & 0xff;
1320 /* When zero-extending a CONST_INT, we need to know its
1322 gcc_assert (op_mode != VOIDmode);
1323 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1325 /* If we were really extending the mode,
1326 we would have to distinguish between zero-extension
1327 and sign-extension. */
1328 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1331 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1332 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1333 << GET_MODE_BITSIZE (op_mode));
1339 if (op_mode == VOIDmode)
1341 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1343 /* If we were really extending the mode,
1344 we would have to distinguish between zero-extension
1345 and sign-extension. */
1346 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1349 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1352 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1353 << GET_MODE_BITSIZE (op_mode));
1354 if (val & ((unsigned HOST_WIDE_INT) 1
1355 << (GET_MODE_BITSIZE (op_mode) - 1)))
1357 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1365 case FLOAT_TRUNCATE:
1377 return gen_int_mode (val, mode);
1380 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1381 for a DImode operation on a CONST_INT. */
1382 else if (GET_MODE (op) == VOIDmode
1383 && width <= HOST_BITS_PER_WIDE_INT * 2
1384 && (GET_CODE (op) == CONST_DOUBLE
1385 || CONST_INT_P (op)))
1387 unsigned HOST_WIDE_INT l1, lv;
1388 HOST_WIDE_INT h1, hv;
1390 if (GET_CODE (op) == CONST_DOUBLE)
1391 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1393 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1403 neg_double (l1, h1, &lv, &hv);
1408 neg_double (l1, h1, &lv, &hv);
1418 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1426 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1427 - HOST_BITS_PER_WIDE_INT;
1429 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1430 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1431 lv = GET_MODE_BITSIZE (mode);
1439 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1440 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1441 lv = GET_MODE_BITSIZE (mode);
1469 for (s = 0; s < width; s += 8)
1471 unsigned int d = width - s - 8;
1472 unsigned HOST_WIDE_INT byte;
1474 if (s < HOST_BITS_PER_WIDE_INT)
1475 byte = (l1 >> s) & 0xff;
1477 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1479 if (d < HOST_BITS_PER_WIDE_INT)
1482 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1488 /* This is just a change-of-mode, so do nothing. */
1493 gcc_assert (op_mode != VOIDmode);
1495 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1499 lv = l1 & GET_MODE_MASK (op_mode);
1503 if (op_mode == VOIDmode
1504 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1508 lv = l1 & GET_MODE_MASK (op_mode);
1509 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1510 && (lv & ((unsigned HOST_WIDE_INT) 1
1511 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1512 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1514 hv = HWI_SIGN_EXTEND (lv);
1525 return immed_double_const (lv, hv, mode);
1528 else if (GET_CODE (op) == CONST_DOUBLE
1529 && SCALAR_FLOAT_MODE_P (mode)
1530 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1532 REAL_VALUE_TYPE d, t;
1533 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1538 if (HONOR_SNANS (mode) && real_isnan (&d))
1540 real_sqrt (&t, mode, &d);
1544 d = real_value_abs (&d);
1547 d = real_value_negate (&d);
1549 case FLOAT_TRUNCATE:
1550 d = real_value_truncate (mode, d);
1553 /* All this does is change the mode, unless changing
1555 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1556 real_convert (&d, mode, &d);
1559 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1566 real_to_target (tmp, &d, GET_MODE (op));
1567 for (i = 0; i < 4; i++)
1569 real_from_target (&d, tmp, mode);
1575 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1578 else if (GET_CODE (op) == CONST_DOUBLE
1579 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1580 && GET_MODE_CLASS (mode) == MODE_INT
1581 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1583 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1584 operators are intentionally left unspecified (to ease implementation
1585 by target backends), for consistency, this routine implements the
1586 same semantics for constant folding as used by the middle-end. */
1588 /* This was formerly used only for non-IEEE float.
1589 eggert@twinsun.com says it is safe for IEEE also. */
1590 HOST_WIDE_INT xh, xl, th, tl;
1591 REAL_VALUE_TYPE x, t;
1592 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1596 if (REAL_VALUE_ISNAN (x))
1599 /* Test against the signed upper bound. */
1600 if (width > HOST_BITS_PER_WIDE_INT)
1602 th = ((unsigned HOST_WIDE_INT) 1
1603 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1609 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1611 real_from_integer (&t, VOIDmode, tl, th, 0);
1612 if (REAL_VALUES_LESS (t, x))
1619 /* Test against the signed lower bound. */
1620 if (width > HOST_BITS_PER_WIDE_INT)
1622 th = (unsigned HOST_WIDE_INT) (-1)
1623 << (width - HOST_BITS_PER_WIDE_INT - 1);
1629 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1631 real_from_integer (&t, VOIDmode, tl, th, 0);
1632 if (REAL_VALUES_LESS (x, t))
1638 REAL_VALUE_TO_INT (&xl, &xh, x);
1642 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1645 /* Test against the unsigned upper bound. */
1646 if (width == 2*HOST_BITS_PER_WIDE_INT)
1651 else if (width >= HOST_BITS_PER_WIDE_INT)
1653 th = ((unsigned HOST_WIDE_INT) 1
1654 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1660 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1662 real_from_integer (&t, VOIDmode, tl, th, 1);
1663 if (REAL_VALUES_LESS (t, x))
1670 REAL_VALUE_TO_INT (&xl, &xh, x);
1676 return immed_double_const (xl, xh, mode);
1682 /* Subroutine of simplify_binary_operation to simplify a commutative,
1683 associative binary operation CODE with result mode MODE, operating
1684 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1685 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1686 canonicalization is possible. */
1689 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1694 /* Linearize the operator to the left. */
1695 if (GET_CODE (op1) == code)
1697 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1698 if (GET_CODE (op0) == code)
1700 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1701 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1704 /* "a op (b op c)" becomes "(b op c) op a". */
1705 if (! swap_commutative_operands_p (op1, op0))
1706 return simplify_gen_binary (code, mode, op1, op0);
1713 if (GET_CODE (op0) == code)
1715 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1716 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1718 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1719 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1722 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1723 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1725 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1727 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1728 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1730 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1737 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1738 and OP1. Return 0 if no simplification is possible.
1740 Don't use this for relational operations such as EQ or LT.
1741 Use simplify_relational_operation instead. */
1743 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1746 rtx trueop0, trueop1;
1749 /* Relational operations don't work here. We must know the mode
1750 of the operands in order to do the comparison correctly.
1751 Assuming a full word can give incorrect results.
1752 Consider comparing 128 with -128 in QImode. */
1753 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1754 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1756 /* Make sure the constant is second. */
1757 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1758 && swap_commutative_operands_p (op0, op1))
1760 tem = op0, op0 = op1, op1 = tem;
1763 trueop0 = avoid_constant_pool_reference (op0);
1764 trueop1 = avoid_constant_pool_reference (op1);
1766 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1769 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1772 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1773 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1774 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1775 actual constants. */
1778 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1779 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1781 rtx tem, reversed, opleft, opright;
1783 unsigned int width = GET_MODE_BITSIZE (mode);
1785 /* Even if we can't compute a constant result,
1786 there are some cases worth simplifying. */
1791 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1792 when x is NaN, infinite, or finite and nonzero. They aren't
1793 when x is -0 and the rounding mode is not towards -infinity,
1794 since (-0) + 0 is then 0. */
1795 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1798 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1799 transformations are safe even for IEEE. */
1800 if (GET_CODE (op0) == NEG)
1801 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1802 else if (GET_CODE (op1) == NEG)
1803 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1805 /* (~a) + 1 -> -a */
1806 if (INTEGRAL_MODE_P (mode)
1807 && GET_CODE (op0) == NOT
1808 && trueop1 == const1_rtx)
1809 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1811 /* Handle both-operands-constant cases. We can only add
1812 CONST_INTs to constants since the sum of relocatable symbols
1813 can't be handled by most assemblers. Don't add CONST_INT
1814 to CONST_INT since overflow won't be computed properly if wider
1815 than HOST_BITS_PER_WIDE_INT. */
1817 if ((GET_CODE (op0) == CONST
1818 || GET_CODE (op0) == SYMBOL_REF
1819 || GET_CODE (op0) == LABEL_REF)
1820 && CONST_INT_P (op1))
1821 return plus_constant (op0, INTVAL (op1));
1822 else if ((GET_CODE (op1) == CONST
1823 || GET_CODE (op1) == SYMBOL_REF
1824 || GET_CODE (op1) == LABEL_REF)
1825 && CONST_INT_P (op0))
1826 return plus_constant (op1, INTVAL (op0));
1828 /* See if this is something like X * C - X or vice versa or
1829 if the multiplication is written as a shift. If so, we can
1830 distribute and make a new multiply, shift, or maybe just
1831 have X (if C is 2 in the example above). But don't make
1832 something more expensive than we had before. */
1834 if (SCALAR_INT_MODE_P (mode))
1836 double_int coeff0, coeff1;
1837 rtx lhs = op0, rhs = op1;
1839 coeff0 = double_int_one;
1840 coeff1 = double_int_one;
1842 if (GET_CODE (lhs) == NEG)
1844 coeff0 = double_int_minus_one;
1845 lhs = XEXP (lhs, 0);
1847 else if (GET_CODE (lhs) == MULT
1848 && CONST_INT_P (XEXP (lhs, 1)))
1850 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1851 lhs = XEXP (lhs, 0);
1853 else if (GET_CODE (lhs) == ASHIFT
1854 && CONST_INT_P (XEXP (lhs, 1))
1855 && INTVAL (XEXP (lhs, 1)) >= 0
1856 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1858 coeff0 = double_int_setbit (double_int_zero,
1859 INTVAL (XEXP (lhs, 1)));
1860 lhs = XEXP (lhs, 0);
1863 if (GET_CODE (rhs) == NEG)
1865 coeff1 = double_int_minus_one;
1866 rhs = XEXP (rhs, 0);
1868 else if (GET_CODE (rhs) == MULT
1869 && CONST_INT_P (XEXP (rhs, 1)))
1871 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1872 rhs = XEXP (rhs, 0);
1874 else if (GET_CODE (rhs) == ASHIFT
1875 && CONST_INT_P (XEXP (rhs, 1))
1876 && INTVAL (XEXP (rhs, 1)) >= 0
1877 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1879 coeff1 = double_int_setbit (double_int_zero,
1880 INTVAL (XEXP (rhs, 1)));
1881 rhs = XEXP (rhs, 0);
1884 if (rtx_equal_p (lhs, rhs))
1886 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1889 bool speed = optimize_function_for_speed_p (cfun);
1891 val = double_int_add (coeff0, coeff1);
1892 coeff = immed_double_int_const (val, mode);
1894 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1895 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1900 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1901 if ((CONST_INT_P (op1)
1902 || GET_CODE (op1) == CONST_DOUBLE)
1903 && GET_CODE (op0) == XOR
1904 && (CONST_INT_P (XEXP (op0, 1))
1905 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1906 && mode_signbit_p (mode, op1))
1907 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1908 simplify_gen_binary (XOR, mode, op1,
1911 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1912 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1913 && GET_CODE (op0) == MULT
1914 && GET_CODE (XEXP (op0, 0)) == NEG)
1918 in1 = XEXP (XEXP (op0, 0), 0);
1919 in2 = XEXP (op0, 1);
1920 return simplify_gen_binary (MINUS, mode, op1,
1921 simplify_gen_binary (MULT, mode,
1925 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1926 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1928 if (COMPARISON_P (op0)
1929 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1930 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1931 && (reversed = reversed_comparison (op0, mode)))
1933 simplify_gen_unary (NEG, mode, reversed, mode);
1935 /* If one of the operands is a PLUS or a MINUS, see if we can
1936 simplify this by the associative law.
1937 Don't use the associative law for floating point.
1938 The inaccuracy makes it nonassociative,
1939 and subtle programs can break if operations are associated. */
1941 if (INTEGRAL_MODE_P (mode)
1942 && (plus_minus_operand_p (op0)
1943 || plus_minus_operand_p (op1))
1944 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1947 /* Reassociate floating point addition only when the user
1948 specifies associative math operations. */
1949 if (FLOAT_MODE_P (mode)
1950 && flag_associative_math)
1952 tem = simplify_associative_operation (code, mode, op0, op1);
1959 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1960 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1961 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1962 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1964 rtx xop00 = XEXP (op0, 0);
1965 rtx xop10 = XEXP (op1, 0);
1968 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1970 if (REG_P (xop00) && REG_P (xop10)
1971 && GET_MODE (xop00) == GET_MODE (xop10)
1972 && REGNO (xop00) == REGNO (xop10)
1973 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1974 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1981 /* We can't assume x-x is 0 even with non-IEEE floating point,
1982 but since it is zero except in very strange circumstances, we
1983 will treat it as zero with -ffinite-math-only. */
1984 if (rtx_equal_p (trueop0, trueop1)
1985 && ! side_effects_p (op0)
1986 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1987 return CONST0_RTX (mode);
1989 /* Change subtraction from zero into negation. (0 - x) is the
1990 same as -x when x is NaN, infinite, or finite and nonzero.
1991 But if the mode has signed zeros, and does not round towards
1992 -infinity, then 0 - 0 is 0, not -0. */
1993 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1994 return simplify_gen_unary (NEG, mode, op1, mode);
1996 /* (-1 - a) is ~a. */
1997 if (trueop0 == constm1_rtx)
1998 return simplify_gen_unary (NOT, mode, op1, mode);
2000 /* Subtracting 0 has no effect unless the mode has signed zeros
2001 and supports rounding towards -infinity. In such a case,
2003 if (!(HONOR_SIGNED_ZEROS (mode)
2004 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2005 && trueop1 == CONST0_RTX (mode))
2008 /* See if this is something like X * C - X or vice versa or
2009 if the multiplication is written as a shift. If so, we can
2010 distribute and make a new multiply, shift, or maybe just
2011 have X (if C is 2 in the example above). But don't make
2012 something more expensive than we had before. */
2014 if (SCALAR_INT_MODE_P (mode))
2016 double_int coeff0, negcoeff1;
2017 rtx lhs = op0, rhs = op1;
2019 coeff0 = double_int_one;
2020 negcoeff1 = double_int_minus_one;
2022 if (GET_CODE (lhs) == NEG)
2024 coeff0 = double_int_minus_one;
2025 lhs = XEXP (lhs, 0);
2027 else if (GET_CODE (lhs) == MULT
2028 && CONST_INT_P (XEXP (lhs, 1)))
2030 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2031 lhs = XEXP (lhs, 0);
2033 else if (GET_CODE (lhs) == ASHIFT
2034 && CONST_INT_P (XEXP (lhs, 1))
2035 && INTVAL (XEXP (lhs, 1)) >= 0
2036 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2038 coeff0 = double_int_setbit (double_int_zero,
2039 INTVAL (XEXP (lhs, 1)));
2040 lhs = XEXP (lhs, 0);
2043 if (GET_CODE (rhs) == NEG)
2045 negcoeff1 = double_int_one;
2046 rhs = XEXP (rhs, 0);
2048 else if (GET_CODE (rhs) == MULT
2049 && CONST_INT_P (XEXP (rhs, 1)))
2051 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2052 rhs = XEXP (rhs, 0);
2054 else if (GET_CODE (rhs) == ASHIFT
2055 && CONST_INT_P (XEXP (rhs, 1))
2056 && INTVAL (XEXP (rhs, 1)) >= 0
2057 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2059 negcoeff1 = double_int_setbit (double_int_zero,
2060 INTVAL (XEXP (rhs, 1)));
2061 negcoeff1 = double_int_neg (negcoeff1);
2062 rhs = XEXP (rhs, 0);
2065 if (rtx_equal_p (lhs, rhs))
2067 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2070 bool speed = optimize_function_for_speed_p (cfun);
2072 val = double_int_add (coeff0, negcoeff1);
2073 coeff = immed_double_int_const (val, mode);
2075 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2076 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2081 /* (a - (-b)) -> (a + b). True even for IEEE. */
2082 if (GET_CODE (op1) == NEG)
2083 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2085 /* (-x - c) may be simplified as (-c - x). */
2086 if (GET_CODE (op0) == NEG
2087 && (CONST_INT_P (op1)
2088 || GET_CODE (op1) == CONST_DOUBLE))
2090 tem = simplify_unary_operation (NEG, mode, op1, mode);
2092 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2095 /* Don't let a relocatable value get a negative coeff. */
2096 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2097 return simplify_gen_binary (PLUS, mode,
2099 neg_const_int (mode, op1));
2101 /* (x - (x & y)) -> (x & ~y) */
2102 if (GET_CODE (op1) == AND)
2104 if (rtx_equal_p (op0, XEXP (op1, 0)))
2106 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2107 GET_MODE (XEXP (op1, 1)));
2108 return simplify_gen_binary (AND, mode, op0, tem);
2110 if (rtx_equal_p (op0, XEXP (op1, 1)))
2112 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2113 GET_MODE (XEXP (op1, 0)));
2114 return simplify_gen_binary (AND, mode, op0, tem);
2118 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2119 by reversing the comparison code if valid. */
2120 if (STORE_FLAG_VALUE == 1
2121 && trueop0 == const1_rtx
2122 && COMPARISON_P (op1)
2123 && (reversed = reversed_comparison (op1, mode)))
2126 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2127 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2128 && GET_CODE (op1) == MULT
2129 && GET_CODE (XEXP (op1, 0)) == NEG)
2133 in1 = XEXP (XEXP (op1, 0), 0);
2134 in2 = XEXP (op1, 1);
2135 return simplify_gen_binary (PLUS, mode,
2136 simplify_gen_binary (MULT, mode,
2141 /* Canonicalize (minus (neg A) (mult B C)) to
2142 (minus (mult (neg B) C) A). */
2143 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2144 && GET_CODE (op1) == MULT
2145 && GET_CODE (op0) == NEG)
2149 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2150 in2 = XEXP (op1, 1);
2151 return simplify_gen_binary (MINUS, mode,
2152 simplify_gen_binary (MULT, mode,
2157 /* If one of the operands is a PLUS or a MINUS, see if we can
2158 simplify this by the associative law. This will, for example,
2159 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2160 Don't use the associative law for floating point.
2161 The inaccuracy makes it nonassociative,
2162 and subtle programs can break if operations are associated. */
2164 if (INTEGRAL_MODE_P (mode)
2165 && (plus_minus_operand_p (op0)
2166 || plus_minus_operand_p (op1))
2167 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2172 if (trueop1 == constm1_rtx)
2173 return simplify_gen_unary (NEG, mode, op0, mode);
2175 if (GET_CODE (op0) == NEG)
2177 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2178 /* If op1 is a MULT as well and simplify_unary_operation
2179 just moved the NEG to the second operand, simplify_gen_binary
2180 below could through simplify_associative_operation move
2181 the NEG around again and recurse endlessly. */
2183 && GET_CODE (op1) == MULT
2184 && GET_CODE (temp) == MULT
2185 && XEXP (op1, 0) == XEXP (temp, 0)
2186 && GET_CODE (XEXP (temp, 1)) == NEG
2187 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2190 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2192 if (GET_CODE (op1) == NEG)
2194 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2195 /* If op0 is a MULT as well and simplify_unary_operation
2196 just moved the NEG to the second operand, simplify_gen_binary
2197 below could through simplify_associative_operation move
2198 the NEG around again and recurse endlessly. */
2200 && GET_CODE (op0) == MULT
2201 && GET_CODE (temp) == MULT
2202 && XEXP (op0, 0) == XEXP (temp, 0)
2203 && GET_CODE (XEXP (temp, 1)) == NEG
2204 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2207 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2210 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2211 x is NaN, since x * 0 is then also NaN. Nor is it valid
2212 when the mode has signed zeros, since multiplying a negative
2213 number by 0 will give -0, not 0. */
2214 if (!HONOR_NANS (mode)
2215 && !HONOR_SIGNED_ZEROS (mode)
2216 && trueop1 == CONST0_RTX (mode)
2217 && ! side_effects_p (op0))
2220 /* In IEEE floating point, x*1 is not equivalent to x for
2222 if (!HONOR_SNANS (mode)
2223 && trueop1 == CONST1_RTX (mode))
2226 /* Convert multiply by constant power of two into shift unless
2227 we are still generating RTL. This test is a kludge. */
2228 if (CONST_INT_P (trueop1)
2229 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2230 /* If the mode is larger than the host word size, and the
2231 uppermost bit is set, then this isn't a power of two due
2232 to implicit sign extension. */
2233 && (width <= HOST_BITS_PER_WIDE_INT
2234 || val != HOST_BITS_PER_WIDE_INT - 1))
2235 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2237 /* Likewise for multipliers wider than a word. */
2238 if (GET_CODE (trueop1) == CONST_DOUBLE
2239 && (GET_MODE (trueop1) == VOIDmode
2240 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2241 && GET_MODE (op0) == mode
2242 && CONST_DOUBLE_LOW (trueop1) == 0
2243 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2244 return simplify_gen_binary (ASHIFT, mode, op0,
2245 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2247 /* x*2 is x+x and x*(-1) is -x */
2248 if (GET_CODE (trueop1) == CONST_DOUBLE
2249 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2250 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2251 && GET_MODE (op0) == mode)
2254 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2256 if (REAL_VALUES_EQUAL (d, dconst2))
2257 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2259 if (!HONOR_SNANS (mode)
2260 && REAL_VALUES_EQUAL (d, dconstm1))
2261 return simplify_gen_unary (NEG, mode, op0, mode);
2264 /* Optimize -x * -x as x * x. */
2265 if (FLOAT_MODE_P (mode)
2266 && GET_CODE (op0) == NEG
2267 && GET_CODE (op1) == NEG
2268 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2269 && !side_effects_p (XEXP (op0, 0)))
2270 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2272 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2273 if (SCALAR_FLOAT_MODE_P (mode)
2274 && GET_CODE (op0) == ABS
2275 && GET_CODE (op1) == ABS
2276 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2277 && !side_effects_p (XEXP (op0, 0)))
2278 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2280 /* Reassociate multiplication, but for floating point MULTs
2281 only when the user specifies unsafe math optimizations. */
2282 if (! FLOAT_MODE_P (mode)
2283 || flag_unsafe_math_optimizations)
2285 tem = simplify_associative_operation (code, mode, op0, op1);
2292 if (trueop1 == CONST0_RTX (mode))
2294 if (CONST_INT_P (trueop1)
2295 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2296 == GET_MODE_MASK (mode)))
2298 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2300 /* A | (~A) -> -1 */
2301 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2302 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2303 && ! side_effects_p (op0)
2304 && SCALAR_INT_MODE_P (mode))
2307 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2308 if (CONST_INT_P (op1)
2309 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2310 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2313 /* Canonicalize (X & C1) | C2. */
2314 if (GET_CODE (op0) == AND
2315 && CONST_INT_P (trueop1)
2316 && CONST_INT_P (XEXP (op0, 1)))
2318 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2319 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2320 HOST_WIDE_INT c2 = INTVAL (trueop1);
2322 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2324 && !side_effects_p (XEXP (op0, 0)))
2327 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2328 if (((c1|c2) & mask) == mask)
2329 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2331 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2332 if (((c1 & ~c2) & mask) != (c1 & mask))
2334 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2335 gen_int_mode (c1 & ~c2, mode));
2336 return simplify_gen_binary (IOR, mode, tem, op1);
2340 /* Convert (A & B) | A to A. */
2341 if (GET_CODE (op0) == AND
2342 && (rtx_equal_p (XEXP (op0, 0), op1)
2343 || rtx_equal_p (XEXP (op0, 1), op1))
2344 && ! side_effects_p (XEXP (op0, 0))
2345 && ! side_effects_p (XEXP (op0, 1)))
2348 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2349 mode size to (rotate A CX). */
2351 if (GET_CODE (op1) == ASHIFT
2352 || GET_CODE (op1) == SUBREG)
2363 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2364 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2365 && CONST_INT_P (XEXP (opleft, 1))
2366 && CONST_INT_P (XEXP (opright, 1))
2367 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2368 == GET_MODE_BITSIZE (mode)))
2369 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2371 /* Same, but for ashift that has been "simplified" to a wider mode
2372 by simplify_shift_const. */
2374 if (GET_CODE (opleft) == SUBREG
2375 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2376 && GET_CODE (opright) == LSHIFTRT
2377 && GET_CODE (XEXP (opright, 0)) == SUBREG
2378 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2379 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2380 && (GET_MODE_SIZE (GET_MODE (opleft))
2381 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2382 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2383 SUBREG_REG (XEXP (opright, 0)))
2384 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2385 && CONST_INT_P (XEXP (opright, 1))
2386 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2387 == GET_MODE_BITSIZE (mode)))
2388 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2389 XEXP (SUBREG_REG (opleft), 1));
2391 /* If we have (ior (and (X C1) C2)), simplify this by making
2392 C1 as small as possible if C1 actually changes. */
2393 if (CONST_INT_P (op1)
2394 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2395 || INTVAL (op1) > 0)
2396 && GET_CODE (op0) == AND
2397 && CONST_INT_P (XEXP (op0, 1))
2398 && CONST_INT_P (op1)
2399 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2400 return simplify_gen_binary (IOR, mode,
2402 (AND, mode, XEXP (op0, 0),
2403 GEN_INT (UINTVAL (XEXP (op0, 1))
2407 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2408 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2409 the PLUS does not affect any of the bits in OP1: then we can do
2410 the IOR as a PLUS and we can associate. This is valid if OP1
2411 can be safely shifted left C bits. */
2412 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2413 && GET_CODE (XEXP (op0, 0)) == PLUS
2414 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2415 && CONST_INT_P (XEXP (op0, 1))
2416 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2418 int count = INTVAL (XEXP (op0, 1));
2419 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2421 if (mask >> count == INTVAL (trueop1)
2422 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2423 return simplify_gen_binary (ASHIFTRT, mode,
2424 plus_constant (XEXP (op0, 0), mask),
2428 tem = simplify_associative_operation (code, mode, op0, op1);
2434 if (trueop1 == CONST0_RTX (mode))
2436 if (CONST_INT_P (trueop1)
2437 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2438 == GET_MODE_MASK (mode)))
2439 return simplify_gen_unary (NOT, mode, op0, mode);
2440 if (rtx_equal_p (trueop0, trueop1)
2441 && ! side_effects_p (op0)
2442 && GET_MODE_CLASS (mode) != MODE_CC)
2443 return CONST0_RTX (mode);
2445 /* Canonicalize XOR of the most significant bit to PLUS. */
2446 if ((CONST_INT_P (op1)
2447 || GET_CODE (op1) == CONST_DOUBLE)
2448 && mode_signbit_p (mode, op1))
2449 return simplify_gen_binary (PLUS, mode, op0, op1);
2450 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2451 if ((CONST_INT_P (op1)
2452 || GET_CODE (op1) == CONST_DOUBLE)
2453 && GET_CODE (op0) == PLUS
2454 && (CONST_INT_P (XEXP (op0, 1))
2455 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2456 && mode_signbit_p (mode, XEXP (op0, 1)))
2457 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2458 simplify_gen_binary (XOR, mode, op1,
2461 /* If we are XORing two things that have no bits in common,
2462 convert them into an IOR. This helps to detect rotation encoded
2463 using those methods and possibly other simplifications. */
2465 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2466 && (nonzero_bits (op0, mode)
2467 & nonzero_bits (op1, mode)) == 0)
2468 return (simplify_gen_binary (IOR, mode, op0, op1));
2470 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2471 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2474 int num_negated = 0;
2476 if (GET_CODE (op0) == NOT)
2477 num_negated++, op0 = XEXP (op0, 0);
2478 if (GET_CODE (op1) == NOT)
2479 num_negated++, op1 = XEXP (op1, 0);
2481 if (num_negated == 2)
2482 return simplify_gen_binary (XOR, mode, op0, op1);
2483 else if (num_negated == 1)
2484 return simplify_gen_unary (NOT, mode,
2485 simplify_gen_binary (XOR, mode, op0, op1),
2489 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2490 correspond to a machine insn or result in further simplifications
2491 if B is a constant. */
2493 if (GET_CODE (op0) == AND
2494 && rtx_equal_p (XEXP (op0, 1), op1)
2495 && ! side_effects_p (op1))
2496 return simplify_gen_binary (AND, mode,
2497 simplify_gen_unary (NOT, mode,
2498 XEXP (op0, 0), mode),
2501 else if (GET_CODE (op0) == AND
2502 && rtx_equal_p (XEXP (op0, 0), op1)
2503 && ! side_effects_p (op1))
2504 return simplify_gen_binary (AND, mode,
2505 simplify_gen_unary (NOT, mode,
2506 XEXP (op0, 1), mode),
2509 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2510 comparison if STORE_FLAG_VALUE is 1. */
2511 if (STORE_FLAG_VALUE == 1
2512 && trueop1 == const1_rtx
2513 && COMPARISON_P (op0)
2514 && (reversed = reversed_comparison (op0, mode)))
2517 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2518 is (lt foo (const_int 0)), so we can perform the above
2519 simplification if STORE_FLAG_VALUE is 1. */
2521 if (STORE_FLAG_VALUE == 1
2522 && trueop1 == const1_rtx
2523 && GET_CODE (op0) == LSHIFTRT
2524 && CONST_INT_P (XEXP (op0, 1))
2525 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2526 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2528 /* (xor (comparison foo bar) (const_int sign-bit))
2529 when STORE_FLAG_VALUE is the sign bit. */
2530 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2531 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2532 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2533 && trueop1 == const_true_rtx
2534 && COMPARISON_P (op0)
2535 && (reversed = reversed_comparison (op0, mode)))
2538 tem = simplify_associative_operation (code, mode, op0, op1);
2544 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2546 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2548 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2549 HOST_WIDE_INT nzop1;
2550 if (CONST_INT_P (trueop1))
2552 HOST_WIDE_INT val1 = INTVAL (trueop1);
2553 /* If we are turning off bits already known off in OP0, we need
2555 if ((nzop0 & ~val1) == 0)
2558 nzop1 = nonzero_bits (trueop1, mode);
2559 /* If we are clearing all the nonzero bits, the result is zero. */
2560 if ((nzop1 & nzop0) == 0
2561 && !side_effects_p (op0) && !side_effects_p (op1))
2562 return CONST0_RTX (mode);
2564 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2565 && GET_MODE_CLASS (mode) != MODE_CC)
2568 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2569 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2570 && ! side_effects_p (op0)
2571 && GET_MODE_CLASS (mode) != MODE_CC)
2572 return CONST0_RTX (mode);
2574 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2575 there are no nonzero bits of C outside of X's mode. */
2576 if ((GET_CODE (op0) == SIGN_EXTEND
2577 || GET_CODE (op0) == ZERO_EXTEND)
2578 && CONST_INT_P (trueop1)
2579 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2580 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2581 & UINTVAL (trueop1)) == 0)
2583 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2584 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2585 gen_int_mode (INTVAL (trueop1),
2587 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2590 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2591 we might be able to further simplify the AND with X and potentially
2592 remove the truncation altogether. */
2593 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2595 rtx x = XEXP (op0, 0);
2596 enum machine_mode xmode = GET_MODE (x);
2597 tem = simplify_gen_binary (AND, xmode, x,
2598 gen_int_mode (INTVAL (trueop1), xmode));
2599 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2602 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2603 if (GET_CODE (op0) == IOR
2604 && CONST_INT_P (trueop1)
2605 && CONST_INT_P (XEXP (op0, 1)))
2607 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2608 return simplify_gen_binary (IOR, mode,
2609 simplify_gen_binary (AND, mode,
2610 XEXP (op0, 0), op1),
2611 gen_int_mode (tmp, mode));
2614 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2615 insn (and may simplify more). */
2616 if (GET_CODE (op0) == XOR
2617 && rtx_equal_p (XEXP (op0, 0), op1)
2618 && ! side_effects_p (op1))
2619 return simplify_gen_binary (AND, mode,
2620 simplify_gen_unary (NOT, mode,
2621 XEXP (op0, 1), mode),
2624 if (GET_CODE (op0) == XOR
2625 && rtx_equal_p (XEXP (op0, 1), op1)
2626 && ! side_effects_p (op1))
2627 return simplify_gen_binary (AND, mode,
2628 simplify_gen_unary (NOT, mode,
2629 XEXP (op0, 0), mode),
2632 /* Similarly for (~(A ^ B)) & A. */
2633 if (GET_CODE (op0) == NOT
2634 && GET_CODE (XEXP (op0, 0)) == XOR
2635 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2636 && ! side_effects_p (op1))
2637 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2639 if (GET_CODE (op0) == NOT
2640 && GET_CODE (XEXP (op0, 0)) == XOR
2641 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2642 && ! side_effects_p (op1))
2643 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2645 /* Convert (A | B) & A to A. */
2646 if (GET_CODE (op0) == IOR
2647 && (rtx_equal_p (XEXP (op0, 0), op1)
2648 || rtx_equal_p (XEXP (op0, 1), op1))
2649 && ! side_effects_p (XEXP (op0, 0))
2650 && ! side_effects_p (XEXP (op0, 1)))
2653 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2654 ((A & N) + B) & M -> (A + B) & M
2655 Similarly if (N & M) == 0,
2656 ((A | N) + B) & M -> (A + B) & M
2657 and for - instead of + and/or ^ instead of |.
2658 Also, if (N & M) == 0, then
2659 (A +- N) & M -> A & M. */
2660 if (CONST_INT_P (trueop1)
2661 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2662 && ~UINTVAL (trueop1)
2663 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2664 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2669 pmop[0] = XEXP (op0, 0);
2670 pmop[1] = XEXP (op0, 1);
2672 if (CONST_INT_P (pmop[1])
2673 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2674 return simplify_gen_binary (AND, mode, pmop[0], op1);
2676 for (which = 0; which < 2; which++)
2679 switch (GET_CODE (tem))
2682 if (CONST_INT_P (XEXP (tem, 1))
2683 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2684 == UINTVAL (trueop1))
2685 pmop[which] = XEXP (tem, 0);
2689 if (CONST_INT_P (XEXP (tem, 1))
2690 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2691 pmop[which] = XEXP (tem, 0);
2698 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2700 tem = simplify_gen_binary (GET_CODE (op0), mode,
2702 return simplify_gen_binary (code, mode, tem, op1);
2706 /* (and X (ior (not X) Y) -> (and X Y) */
2707 if (GET_CODE (op1) == IOR
2708 && GET_CODE (XEXP (op1, 0)) == NOT
2709 && op0 == XEXP (XEXP (op1, 0), 0))
2710 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2712 /* (and (ior (not X) Y) X) -> (and X Y) */
2713 if (GET_CODE (op0) == IOR
2714 && GET_CODE (XEXP (op0, 0)) == NOT
2715 && op1 == XEXP (XEXP (op0, 0), 0))
2716 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2718 tem = simplify_associative_operation (code, mode, op0, op1);
2724 /* 0/x is 0 (or x&0 if x has side-effects). */
2725 if (trueop0 == CONST0_RTX (mode))
2727 if (side_effects_p (op1))
2728 return simplify_gen_binary (AND, mode, op1, trueop0);
2732 if (trueop1 == CONST1_RTX (mode))
2733 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2734 /* Convert divide by power of two into shift. */
2735 if (CONST_INT_P (trueop1)
2736 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2737 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2741 /* Handle floating point and integers separately. */
2742 if (SCALAR_FLOAT_MODE_P (mode))
2744 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2745 safe for modes with NaNs, since 0.0 / 0.0 will then be
2746 NaN rather than 0.0. Nor is it safe for modes with signed
2747 zeros, since dividing 0 by a negative number gives -0.0 */
2748 if (trueop0 == CONST0_RTX (mode)
2749 && !HONOR_NANS (mode)
2750 && !HONOR_SIGNED_ZEROS (mode)
2751 && ! side_effects_p (op1))
2754 if (trueop1 == CONST1_RTX (mode)
2755 && !HONOR_SNANS (mode))
2758 if (GET_CODE (trueop1) == CONST_DOUBLE
2759 && trueop1 != CONST0_RTX (mode))
2762 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2765 if (REAL_VALUES_EQUAL (d, dconstm1)
2766 && !HONOR_SNANS (mode))
2767 return simplify_gen_unary (NEG, mode, op0, mode);
2769 /* Change FP division by a constant into multiplication.
2770 Only do this with -freciprocal-math. */
2771 if (flag_reciprocal_math
2772 && !REAL_VALUES_EQUAL (d, dconst0))
2774 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2775 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2776 return simplify_gen_binary (MULT, mode, op0, tem);
2780 else if (SCALAR_INT_MODE_P (mode))
2782 /* 0/x is 0 (or x&0 if x has side-effects). */
2783 if (trueop0 == CONST0_RTX (mode)
2784 && !cfun->can_throw_non_call_exceptions)
2786 if (side_effects_p (op1))
2787 return simplify_gen_binary (AND, mode, op1, trueop0);
2791 if (trueop1 == CONST1_RTX (mode))
2792 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2794 if (trueop1 == constm1_rtx)
2796 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2797 return simplify_gen_unary (NEG, mode, x, mode);
2803 /* 0%x is 0 (or x&0 if x has side-effects). */
2804 if (trueop0 == CONST0_RTX (mode))
2806 if (side_effects_p (op1))
2807 return simplify_gen_binary (AND, mode, op1, trueop0);
2810 /* x%1 is 0 (of x&0 if x has side-effects). */
2811 if (trueop1 == CONST1_RTX (mode))
2813 if (side_effects_p (op0))
2814 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2815 return CONST0_RTX (mode);
2817 /* Implement modulus by power of two as AND. */
2818 if (CONST_INT_P (trueop1)
2819 && exact_log2 (UINTVAL (trueop1)) > 0)
2820 return simplify_gen_binary (AND, mode, op0,
2821 GEN_INT (INTVAL (op1) - 1));
2825 /* 0%x is 0 (or x&0 if x has side-effects). */
2826 if (trueop0 == CONST0_RTX (mode))
2828 if (side_effects_p (op1))
2829 return simplify_gen_binary (AND, mode, op1, trueop0);
2832 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2833 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2835 if (side_effects_p (op0))
2836 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2837 return CONST0_RTX (mode);
2844 if (trueop1 == CONST0_RTX (mode))
2846 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2848 /* Rotating ~0 always results in ~0. */
2849 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2850 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2851 && ! side_effects_p (op1))
2854 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2856 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2857 if (val != INTVAL (op1))
2858 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2865 if (trueop1 == CONST0_RTX (mode))
2867 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2869 goto canonicalize_shift;
2872 if (trueop1 == CONST0_RTX (mode))
2874 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2876 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2877 if (GET_CODE (op0) == CLZ
2878 && CONST_INT_P (trueop1)
2879 && STORE_FLAG_VALUE == 1
2880 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2882 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2883 unsigned HOST_WIDE_INT zero_val = 0;
2885 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2886 && zero_val == GET_MODE_BITSIZE (imode)
2887 && INTVAL (trueop1) == exact_log2 (zero_val))
2888 return simplify_gen_relational (EQ, mode, imode,
2889 XEXP (op0, 0), const0_rtx);
2891 goto canonicalize_shift;
2894 if (width <= HOST_BITS_PER_WIDE_INT
2895 && CONST_INT_P (trueop1)
2896 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
2897 && ! side_effects_p (op0))
2899 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2901 tem = simplify_associative_operation (code, mode, op0, op1);
2907 if (width <= HOST_BITS_PER_WIDE_INT
2908 && CONST_INT_P (trueop1)
2909 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
2910 && ! side_effects_p (op0))
2912 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2914 tem = simplify_associative_operation (code, mode, op0, op1);
2920 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2922 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2924 tem = simplify_associative_operation (code, mode, op0, op1);
2930 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2932 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2934 tem = simplify_associative_operation (code, mode, op0, op1);
2947 /* ??? There are simplifications that can be done. */
2951 if (!VECTOR_MODE_P (mode))
2953 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2954 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2955 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2956 gcc_assert (XVECLEN (trueop1, 0) == 1);
2957 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2959 if (GET_CODE (trueop0) == CONST_VECTOR)
2960 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2963 /* Extract a scalar element from a nested VEC_SELECT expression
2964 (with optional nested VEC_CONCAT expression). Some targets
2965 (i386) extract scalar element from a vector using chain of
2966 nested VEC_SELECT expressions. When input operand is a memory
2967 operand, this operation can be simplified to a simple scalar
2968 load from an offseted memory address. */
2969 if (GET_CODE (trueop0) == VEC_SELECT)
2971 rtx op0 = XEXP (trueop0, 0);
2972 rtx op1 = XEXP (trueop0, 1);
2974 enum machine_mode opmode = GET_MODE (op0);
2975 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2976 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2978 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2984 gcc_assert (GET_CODE (op1) == PARALLEL);
2985 gcc_assert (i < n_elts);
2987 /* Select element, pointed by nested selector. */
2988 elem = INTVAL (XVECEXP (op1, 0, i));
2990 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2991 if (GET_CODE (op0) == VEC_CONCAT)
2993 rtx op00 = XEXP (op0, 0);
2994 rtx op01 = XEXP (op0, 1);
2996 enum machine_mode mode00, mode01;
2997 int n_elts00, n_elts01;
2999 mode00 = GET_MODE (op00);
3000 mode01 = GET_MODE (op01);
3002 /* Find out number of elements of each operand. */
3003 if (VECTOR_MODE_P (mode00))
3005 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3006 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3011 if (VECTOR_MODE_P (mode01))
3013 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3014 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3019 gcc_assert (n_elts == n_elts00 + n_elts01);
3021 /* Select correct operand of VEC_CONCAT
3022 and adjust selector. */
3023 if (elem < n_elts01)
3034 vec = rtvec_alloc (1);
3035 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3037 tmp = gen_rtx_fmt_ee (code, mode,
3038 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3041 if (GET_CODE (trueop0) == VEC_DUPLICATE
3042 && GET_MODE (XEXP (trueop0, 0)) == mode)
3043 return XEXP (trueop0, 0);
3047 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3048 gcc_assert (GET_MODE_INNER (mode)
3049 == GET_MODE_INNER (GET_MODE (trueop0)));
3050 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3052 if (GET_CODE (trueop0) == CONST_VECTOR)
3054 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3055 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3056 rtvec v = rtvec_alloc (n_elts);
3059 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3060 for (i = 0; i < n_elts; i++)
3062 rtx x = XVECEXP (trueop1, 0, i);
3064 gcc_assert (CONST_INT_P (x));
3065 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3069 return gen_rtx_CONST_VECTOR (mode, v);
3073 if (XVECLEN (trueop1, 0) == 1
3074 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3075 && GET_CODE (trueop0) == VEC_CONCAT)
3078 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3080 /* Try to find the element in the VEC_CONCAT. */
3081 while (GET_MODE (vec) != mode
3082 && GET_CODE (vec) == VEC_CONCAT)
3084 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3085 if (offset < vec_size)
3086 vec = XEXP (vec, 0);
3090 vec = XEXP (vec, 1);
3092 vec = avoid_constant_pool_reference (vec);
3095 if (GET_MODE (vec) == mode)
3102 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3103 ? GET_MODE (trueop0)
3104 : GET_MODE_INNER (mode));
3105 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3106 ? GET_MODE (trueop1)
3107 : GET_MODE_INNER (mode));
3109 gcc_assert (VECTOR_MODE_P (mode));
3110 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3111 == GET_MODE_SIZE (mode));
3113 if (VECTOR_MODE_P (op0_mode))
3114 gcc_assert (GET_MODE_INNER (mode)
3115 == GET_MODE_INNER (op0_mode));
3117 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3119 if (VECTOR_MODE_P (op1_mode))
3120 gcc_assert (GET_MODE_INNER (mode)
3121 == GET_MODE_INNER (op1_mode));
3123 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3125 if ((GET_CODE (trueop0) == CONST_VECTOR
3126 || CONST_INT_P (trueop0)
3127 || GET_CODE (trueop0) == CONST_DOUBLE)
3128 && (GET_CODE (trueop1) == CONST_VECTOR
3129 || CONST_INT_P (trueop1)
3130 || GET_CODE (trueop1) == CONST_DOUBLE))
3132 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3133 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3134 rtvec v = rtvec_alloc (n_elts);
3136 unsigned in_n_elts = 1;
3138 if (VECTOR_MODE_P (op0_mode))
3139 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3140 for (i = 0; i < n_elts; i++)
3144 if (!VECTOR_MODE_P (op0_mode))
3145 RTVEC_ELT (v, i) = trueop0;
3147 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3151 if (!VECTOR_MODE_P (op1_mode))
3152 RTVEC_ELT (v, i) = trueop1;
3154 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3159 return gen_rtx_CONST_VECTOR (mode, v);
3172 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3175 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3177 unsigned int width = GET_MODE_BITSIZE (mode);
3179 if (VECTOR_MODE_P (mode)
3180 && code != VEC_CONCAT
3181 && GET_CODE (op0) == CONST_VECTOR
3182 && GET_CODE (op1) == CONST_VECTOR)
3184 unsigned n_elts = GET_MODE_NUNITS (mode);
3185 enum machine_mode op0mode = GET_MODE (op0);
3186 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3187 enum machine_mode op1mode = GET_MODE (op1);
3188 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3189 rtvec v = rtvec_alloc (n_elts);
3192 gcc_assert (op0_n_elts == n_elts);
3193 gcc_assert (op1_n_elts == n_elts);
3194 for (i = 0; i < n_elts; i++)
3196 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3197 CONST_VECTOR_ELT (op0, i),
3198 CONST_VECTOR_ELT (op1, i));
3201 RTVEC_ELT (v, i) = x;
3204 return gen_rtx_CONST_VECTOR (mode, v);
3207 if (VECTOR_MODE_P (mode)
3208 && code == VEC_CONCAT
3209 && (CONST_INT_P (op0)
3210 || GET_CODE (op0) == CONST_DOUBLE
3211 || GET_CODE (op0) == CONST_FIXED)
3212 && (CONST_INT_P (op1)
3213 || GET_CODE (op1) == CONST_DOUBLE
3214 || GET_CODE (op1) == CONST_FIXED))
3216 unsigned n_elts = GET_MODE_NUNITS (mode);
3217 rtvec v = rtvec_alloc (n_elts);
3219 gcc_assert (n_elts >= 2);
3222 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3223 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3225 RTVEC_ELT (v, 0) = op0;
3226 RTVEC_ELT (v, 1) = op1;
3230 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3231 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3234 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3235 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3236 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3238 for (i = 0; i < op0_n_elts; ++i)
3239 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3240 for (i = 0; i < op1_n_elts; ++i)
3241 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3244 return gen_rtx_CONST_VECTOR (mode, v);
3247 if (SCALAR_FLOAT_MODE_P (mode)
3248 && GET_CODE (op0) == CONST_DOUBLE
3249 && GET_CODE (op1) == CONST_DOUBLE
3250 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3261 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3263 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3265 for (i = 0; i < 4; i++)
3282 real_from_target (&r, tmp0, mode);
3283 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3287 REAL_VALUE_TYPE f0, f1, value, result;
3290 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3291 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3292 real_convert (&f0, mode, &f0);
3293 real_convert (&f1, mode, &f1);
3295 if (HONOR_SNANS (mode)
3296 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3300 && REAL_VALUES_EQUAL (f1, dconst0)
3301 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3304 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3305 && flag_trapping_math
3306 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3308 int s0 = REAL_VALUE_NEGATIVE (f0);
3309 int s1 = REAL_VALUE_NEGATIVE (f1);
3314 /* Inf + -Inf = NaN plus exception. */
3319 /* Inf - Inf = NaN plus exception. */
3324 /* Inf / Inf = NaN plus exception. */
3331 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3332 && flag_trapping_math
3333 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3334 || (REAL_VALUE_ISINF (f1)
3335 && REAL_VALUES_EQUAL (f0, dconst0))))
3336 /* Inf * 0 = NaN plus exception. */
3339 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3341 real_convert (&result, mode, &value);
3343 /* Don't constant fold this floating point operation if
3344 the result has overflowed and flag_trapping_math. */
3346 if (flag_trapping_math
3347 && MODE_HAS_INFINITIES (mode)
3348 && REAL_VALUE_ISINF (result)
3349 && !REAL_VALUE_ISINF (f0)
3350 && !REAL_VALUE_ISINF (f1))
3351 /* Overflow plus exception. */
3354 /* Don't constant fold this floating point operation if the
3355 result may dependent upon the run-time rounding mode and
3356 flag_rounding_math is set, or if GCC's software emulation
3357 is unable to accurately represent the result. */
3359 if ((flag_rounding_math
3360 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3361 && (inexact || !real_identical (&result, &value)))
3364 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3368 /* We can fold some multi-word operations. */
3369 if (GET_MODE_CLASS (mode) == MODE_INT
3370 && width == HOST_BITS_PER_DOUBLE_INT
3371 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3372 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3374 double_int o0, o1, res, tmp;
3376 o0 = rtx_to_double_int (op0);
3377 o1 = rtx_to_double_int (op1);
3382 /* A - B == A + (-B). */
3383 o1 = double_int_neg (o1);
3385 /* Fall through.... */
3388 res = double_int_add (o0, o1);
3392 res = double_int_mul (o0, o1);
3396 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3397 o0.low, o0.high, o1.low, o1.high,
3398 &res.low, &res.high,
3399 &tmp.low, &tmp.high))
3404 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3405 o0.low, o0.high, o1.low, o1.high,
3406 &tmp.low, &tmp.high,
3407 &res.low, &res.high))
3412 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3413 o0.low, o0.high, o1.low, o1.high,
3414 &res.low, &res.high,
3415 &tmp.low, &tmp.high))
3420 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3421 o0.low, o0.high, o1.low, o1.high,
3422 &tmp.low, &tmp.high,
3423 &res.low, &res.high))
3428 res = double_int_and (o0, o1);
3432 res = double_int_ior (o0, o1);
3436 res = double_int_xor (o0, o1);
3440 res = double_int_smin (o0, o1);
3444 res = double_int_smax (o0, o1);
3448 res = double_int_umin (o0, o1);
3452 res = double_int_umax (o0, o1);
3455 case LSHIFTRT: case ASHIFTRT:
3457 case ROTATE: case ROTATERT:
3459 unsigned HOST_WIDE_INT cnt;
3461 if (SHIFT_COUNT_TRUNCATED)
3462 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3464 if (!double_int_fits_in_uhwi_p (o1)
3465 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3468 cnt = double_int_to_uhwi (o1);
3470 if (code == LSHIFTRT || code == ASHIFTRT)
3471 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3473 else if (code == ASHIFT)
3474 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3476 else if (code == ROTATE)
3477 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3478 else /* code == ROTATERT */
3479 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3487 return immed_double_int_const (res, mode);
3490 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3491 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3493 /* Get the integer argument values in two forms:
3494 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3496 arg0 = INTVAL (op0);
3497 arg1 = INTVAL (op1);
3499 if (width < HOST_BITS_PER_WIDE_INT)
3501 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3502 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3505 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3506 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3509 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3510 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3518 /* Compute the value of the arithmetic. */
3523 val = arg0s + arg1s;
3527 val = arg0s - arg1s;
3531 val = arg0s * arg1s;
3536 || ((unsigned HOST_WIDE_INT) arg0s
3537 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3540 val = arg0s / arg1s;
3545 || ((unsigned HOST_WIDE_INT) arg0s
3546 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3549 val = arg0s % arg1s;
3554 || ((unsigned HOST_WIDE_INT) arg0s
3555 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3558 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3563 || ((unsigned HOST_WIDE_INT) arg0s
3564 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3567 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3585 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3586 the value is in range. We can't return any old value for
3587 out-of-range arguments because either the middle-end (via
3588 shift_truncation_mask) or the back-end might be relying on
3589 target-specific knowledge. Nor can we rely on
3590 shift_truncation_mask, since the shift might not be part of an
3591 ashlM3, lshrM3 or ashrM3 instruction. */
3592 if (SHIFT_COUNT_TRUNCATED)
3593 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3594 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3597 val = (code == ASHIFT
3598 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3599 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3601 /* Sign-extend the result for arithmetic right shifts. */
3602 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3603 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3611 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3612 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3620 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3621 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3625 /* Do nothing here. */
3629 val = arg0s <= arg1s ? arg0s : arg1s;
3633 val = ((unsigned HOST_WIDE_INT) arg0
3634 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3638 val = arg0s > arg1s ? arg0s : arg1s;
3642 val = ((unsigned HOST_WIDE_INT) arg0
3643 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3656 /* ??? There are simplifications that can be done. */
3663 return gen_int_mode (val, mode);
3671 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3674 Rather than test for specific case, we do this by a brute-force method
3675 and do all possible simplifications until no more changes occur. Then
3676 we rebuild the operation. */
3678 struct simplify_plus_minus_op_data
3685 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3689 result = (commutative_operand_precedence (y)
3690 - commutative_operand_precedence (x));
3694 /* Group together equal REGs to do more simplification. */
3695 if (REG_P (x) && REG_P (y))
3696 return REGNO (x) > REGNO (y);
3702 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3705 struct simplify_plus_minus_op_data ops[8];
3707 int n_ops = 2, input_ops = 2;
3708 int changed, n_constants = 0, canonicalized = 0;
3711 memset (ops, 0, sizeof ops);
3713 /* Set up the two operands and then expand them until nothing has been
3714 changed. If we run out of room in our array, give up; this should
3715 almost never happen. */
3720 ops[1].neg = (code == MINUS);
3726 for (i = 0; i < n_ops; i++)
3728 rtx this_op = ops[i].op;
3729 int this_neg = ops[i].neg;
3730 enum rtx_code this_code = GET_CODE (this_op);
3739 ops[n_ops].op = XEXP (this_op, 1);
3740 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3743 ops[i].op = XEXP (this_op, 0);
3746 canonicalized |= this_neg;
3750 ops[i].op = XEXP (this_op, 0);
3751 ops[i].neg = ! this_neg;
3758 && GET_CODE (XEXP (this_op, 0)) == PLUS
3759 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3760 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3762 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3763 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3764 ops[n_ops].neg = this_neg;
3772 /* ~a -> (-a - 1) */
3775 ops[n_ops].op = constm1_rtx;
3776 ops[n_ops++].neg = this_neg;
3777 ops[i].op = XEXP (this_op, 0);
3778 ops[i].neg = !this_neg;
3788 ops[i].op = neg_const_int (mode, this_op);
3802 if (n_constants > 1)
3805 gcc_assert (n_ops >= 2);
3807 /* If we only have two operands, we can avoid the loops. */
3810 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3813 /* Get the two operands. Be careful with the order, especially for
3814 the cases where code == MINUS. */
3815 if (ops[0].neg && ops[1].neg)
3817 lhs = gen_rtx_NEG (mode, ops[0].op);
3820 else if (ops[0].neg)
3831 return simplify_const_binary_operation (code, mode, lhs, rhs);
3834 /* Now simplify each pair of operands until nothing changes. */
3837 /* Insertion sort is good enough for an eight-element array. */
3838 for (i = 1; i < n_ops; i++)
3840 struct simplify_plus_minus_op_data save;
3842 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3848 ops[j + 1] = ops[j];
3849 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3854 for (i = n_ops - 1; i > 0; i--)
3855 for (j = i - 1; j >= 0; j--)
3857 rtx lhs = ops[j].op, rhs = ops[i].op;
3858 int lneg = ops[j].neg, rneg = ops[i].neg;
3860 if (lhs != 0 && rhs != 0)
3862 enum rtx_code ncode = PLUS;
3868 tem = lhs, lhs = rhs, rhs = tem;
3870 else if (swap_commutative_operands_p (lhs, rhs))
3871 tem = lhs, lhs = rhs, rhs = tem;
3873 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3874 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3876 rtx tem_lhs, tem_rhs;
3878 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3879 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3880 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3882 if (tem && !CONSTANT_P (tem))
3883 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3886 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3888 /* Reject "simplifications" that just wrap the two
3889 arguments in a CONST. Failure to do so can result
3890 in infinite recursion with simplify_binary_operation
3891 when it calls us to simplify CONST operations. */
3893 && ! (GET_CODE (tem) == CONST
3894 && GET_CODE (XEXP (tem, 0)) == ncode
3895 && XEXP (XEXP (tem, 0), 0) == lhs
3896 && XEXP (XEXP (tem, 0), 1) == rhs))
3899 if (GET_CODE (tem) == NEG)
3900 tem = XEXP (tem, 0), lneg = !lneg;
3901 if (CONST_INT_P (tem) && lneg)
3902 tem = neg_const_int (mode, tem), lneg = 0;
3906 ops[j].op = NULL_RTX;
3913 /* If nothing changed, fail. */
3917 /* Pack all the operands to the lower-numbered entries. */
3918 for (i = 0, j = 0; j < n_ops; j++)
3928 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3930 && CONST_INT_P (ops[1].op)
3931 && CONSTANT_P (ops[0].op)
3933 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3935 /* We suppressed creation of trivial CONST expressions in the
3936 combination loop to avoid recursion. Create one manually now.
3937 The combination loop should have ensured that there is exactly
3938 one CONST_INT, and the sort will have ensured that it is last
3939 in the array and that any other constant will be next-to-last. */
3942 && CONST_INT_P (ops[n_ops - 1].op)
3943 && CONSTANT_P (ops[n_ops - 2].op))
3945 rtx value = ops[n_ops - 1].op;
3946 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3947 value = neg_const_int (mode, value);
3948 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3952 /* Put a non-negated operand first, if possible. */
3954 for (i = 0; i < n_ops && ops[i].neg; i++)
3957 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3966 /* Now make the result by performing the requested operations. */
3968 for (i = 1; i < n_ops; i++)
3969 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3970 mode, result, ops[i].op);
3975 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3977 plus_minus_operand_p (const_rtx x)
3979 return GET_CODE (x) == PLUS
3980 || GET_CODE (x) == MINUS
3981 || (GET_CODE (x) == CONST
3982 && GET_CODE (XEXP (x, 0)) == PLUS
3983 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3984 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3987 /* Like simplify_binary_operation except used for relational operators.
3988 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3989 not also be VOIDmode.
3991 CMP_MODE specifies in which mode the comparison is done in, so it is
3992 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3993 the operands or, if both are VOIDmode, the operands are compared in
3994 "infinite precision". */
3996 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3997 enum machine_mode cmp_mode, rtx op0, rtx op1)
3999 rtx tem, trueop0, trueop1;
4001 if (cmp_mode == VOIDmode)
4002 cmp_mode = GET_MODE (op0);
4003 if (cmp_mode == VOIDmode)
4004 cmp_mode = GET_MODE (op1);
4006 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4009 if (SCALAR_FLOAT_MODE_P (mode))
4011 if (tem == const0_rtx)
4012 return CONST0_RTX (mode);
4013 #ifdef FLOAT_STORE_FLAG_VALUE
4015 REAL_VALUE_TYPE val;
4016 val = FLOAT_STORE_FLAG_VALUE (mode);
4017 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4023 if (VECTOR_MODE_P (mode))
4025 if (tem == const0_rtx)
4026 return CONST0_RTX (mode);
4027 #ifdef VECTOR_STORE_FLAG_VALUE
4032 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4033 if (val == NULL_RTX)
4035 if (val == const1_rtx)
4036 return CONST1_RTX (mode);
4038 units = GET_MODE_NUNITS (mode);
4039 v = rtvec_alloc (units);
4040 for (i = 0; i < units; i++)
4041 RTVEC_ELT (v, i) = val;
4042 return gen_rtx_raw_CONST_VECTOR (mode, v);
4052 /* For the following tests, ensure const0_rtx is op1. */
4053 if (swap_commutative_operands_p (op0, op1)
4054 || (op0 == const0_rtx && op1 != const0_rtx))
4055 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4057 /* If op0 is a compare, extract the comparison arguments from it. */
4058 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4059 return simplify_gen_relational (code, mode, VOIDmode,
4060 XEXP (op0, 0), XEXP (op0, 1));
4062 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4066 trueop0 = avoid_constant_pool_reference (op0);
4067 trueop1 = avoid_constant_pool_reference (op1);
4068 return simplify_relational_operation_1 (code, mode, cmp_mode,
4072 /* This part of simplify_relational_operation is only used when CMP_MODE
4073 is not in class MODE_CC (i.e. it is a real comparison).
4075 MODE is the mode of the result, while CMP_MODE specifies in which
4076 mode the comparison is done in, so it is the mode of the operands. */
4079 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4080 enum machine_mode cmp_mode, rtx op0, rtx op1)
4082 enum rtx_code op0code = GET_CODE (op0);
4084 if (op1 == const0_rtx && COMPARISON_P (op0))
4086 /* If op0 is a comparison, extract the comparison arguments
4090 if (GET_MODE (op0) == mode)
4091 return simplify_rtx (op0);
4093 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4094 XEXP (op0, 0), XEXP (op0, 1));
4096 else if (code == EQ)
4098 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4099 if (new_code != UNKNOWN)
4100 return simplify_gen_relational (new_code, mode, VOIDmode,
4101 XEXP (op0, 0), XEXP (op0, 1));
4105 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4106 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4107 if ((code == LTU || code == GEU)
4108 && GET_CODE (op0) == PLUS
4109 && CONST_INT_P (XEXP (op0, 1))
4110 && (rtx_equal_p (op1, XEXP (op0, 0))
4111 || rtx_equal_p (op1, XEXP (op0, 1))))
4114 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4115 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4116 cmp_mode, XEXP (op0, 0), new_cmp);
4119 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4120 if ((code == LTU || code == GEU)
4121 && GET_CODE (op0) == PLUS
4122 && rtx_equal_p (op1, XEXP (op0, 1))
4123 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4124 && !rtx_equal_p (op1, XEXP (op0, 0)))
4125 return simplify_gen_relational (code, mode, cmp_mode, op0,
4126 copy_rtx (XEXP (op0, 0)));
4128 if (op1 == const0_rtx)
4130 /* Canonicalize (GTU x 0) as (NE x 0). */
4132 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4133 /* Canonicalize (LEU x 0) as (EQ x 0). */
4135 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4137 else if (op1 == const1_rtx)
4142 /* Canonicalize (GE x 1) as (GT x 0). */
4143 return simplify_gen_relational (GT, mode, cmp_mode,
4146 /* Canonicalize (GEU x 1) as (NE x 0). */
4147 return simplify_gen_relational (NE, mode, cmp_mode,
4150 /* Canonicalize (LT x 1) as (LE x 0). */
4151 return simplify_gen_relational (LE, mode, cmp_mode,
4154 /* Canonicalize (LTU x 1) as (EQ x 0). */
4155 return simplify_gen_relational (EQ, mode, cmp_mode,
4161 else if (op1 == constm1_rtx)
4163 /* Canonicalize (LE x -1) as (LT x 0). */
4165 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4166 /* Canonicalize (GT x -1) as (GE x 0). */
4168 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4171 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4172 if ((code == EQ || code == NE)
4173 && (op0code == PLUS || op0code == MINUS)
4175 && CONSTANT_P (XEXP (op0, 1))
4176 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4178 rtx x = XEXP (op0, 0);
4179 rtx c = XEXP (op0, 1);
4180 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4181 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4183 /* Detect an infinite recursive condition, where we oscillate at this
4184 simplification case between:
4185 A + B == C <---> C - B == A,
4186 where A, B, and C are all constants with non-simplifiable expressions,
4187 usually SYMBOL_REFs. */
4188 if (GET_CODE (tem) == invcode
4190 && rtx_equal_p (c, XEXP (tem, 1)))
4193 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4196 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4197 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4199 && op1 == const0_rtx
4200 && GET_MODE_CLASS (mode) == MODE_INT
4201 && cmp_mode != VOIDmode
4202 /* ??? Work-around BImode bugs in the ia64 backend. */
4204 && cmp_mode != BImode
4205 && nonzero_bits (op0, cmp_mode) == 1
4206 && STORE_FLAG_VALUE == 1)
4207 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4208 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4209 : lowpart_subreg (mode, op0, cmp_mode);
4211 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4212 if ((code == EQ || code == NE)
4213 && op1 == const0_rtx
4215 return simplify_gen_relational (code, mode, cmp_mode,
4216 XEXP (op0, 0), XEXP (op0, 1));
4218 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4219 if ((code == EQ || code == NE)
4221 && rtx_equal_p (XEXP (op0, 0), op1)
4222 && !side_effects_p (XEXP (op0, 0)))
4223 return simplify_gen_relational (code, mode, cmp_mode,
4224 XEXP (op0, 1), const0_rtx);
4226 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4227 if ((code == EQ || code == NE)
4229 && rtx_equal_p (XEXP (op0, 1), op1)
4230 && !side_effects_p (XEXP (op0, 1)))
4231 return simplify_gen_relational (code, mode, cmp_mode,
4232 XEXP (op0, 0), const0_rtx);
4234 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4235 if ((code == EQ || code == NE)
4237 && (CONST_INT_P (op1)
4238 || GET_CODE (op1) == CONST_DOUBLE)
4239 && (CONST_INT_P (XEXP (op0, 1))
4240 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4241 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4242 simplify_gen_binary (XOR, cmp_mode,
4243 XEXP (op0, 1), op1));
4245 if (op0code == POPCOUNT && op1 == const0_rtx)
4251 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4252 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4253 XEXP (op0, 0), const0_rtx);
4258 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4259 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4260 XEXP (op0, 0), const0_rtx);
4279 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4280 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4281 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4282 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4283 For floating-point comparisons, assume that the operands were ordered. */
4286 comparison_result (enum rtx_code code, int known_results)
4292 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4295 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4299 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4302 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4306 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4309 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4312 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4314 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4317 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4319 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4322 return const_true_rtx;
4330 /* Check if the given comparison (done in the given MODE) is actually a
4331 tautology or a contradiction.
4332 If no simplification is possible, this function returns zero.
4333 Otherwise, it returns either const_true_rtx or const0_rtx. */
4336 simplify_const_relational_operation (enum rtx_code code,
4337 enum machine_mode mode,
4344 gcc_assert (mode != VOIDmode
4345 || (GET_MODE (op0) == VOIDmode
4346 && GET_MODE (op1) == VOIDmode));
4348 /* If op0 is a compare, extract the comparison arguments from it. */
4349 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4351 op1 = XEXP (op0, 1);
4352 op0 = XEXP (op0, 0);
4354 if (GET_MODE (op0) != VOIDmode)
4355 mode = GET_MODE (op0);
4356 else if (GET_MODE (op1) != VOIDmode)
4357 mode = GET_MODE (op1);
4362 /* We can't simplify MODE_CC values since we don't know what the
4363 actual comparison is. */
4364 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4367 /* Make sure the constant is second. */
4368 if (swap_commutative_operands_p (op0, op1))
4370 tem = op0, op0 = op1, op1 = tem;
4371 code = swap_condition (code);
4374 trueop0 = avoid_constant_pool_reference (op0);
4375 trueop1 = avoid_constant_pool_reference (op1);
4377 /* For integer comparisons of A and B maybe we can simplify A - B and can
4378 then simplify a comparison of that with zero. If A and B are both either
4379 a register or a CONST_INT, this can't help; testing for these cases will
4380 prevent infinite recursion here and speed things up.
4382 We can only do this for EQ and NE comparisons as otherwise we may
4383 lose or introduce overflow which we cannot disregard as undefined as
4384 we do not know the signedness of the operation on either the left or
4385 the right hand side of the comparison. */
4387 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4388 && (code == EQ || code == NE)
4389 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4390 && (REG_P (op1) || CONST_INT_P (trueop1)))
4391 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4392 /* We cannot do this if tem is a nonzero address. */
4393 && ! nonzero_address_p (tem))
4394 return simplify_const_relational_operation (signed_condition (code),
4395 mode, tem, const0_rtx);
4397 if (! HONOR_NANS (mode) && code == ORDERED)
4398 return const_true_rtx;
4400 if (! HONOR_NANS (mode) && code == UNORDERED)
4403 /* For modes without NaNs, if the two operands are equal, we know the
4404 result except if they have side-effects. Even with NaNs we know
4405 the result of unordered comparisons and, if signaling NaNs are
4406 irrelevant, also the result of LT/GT/LTGT. */
4407 if ((! HONOR_NANS (GET_MODE (trueop0))
4408 || code == UNEQ || code == UNLE || code == UNGE
4409 || ((code == LT || code == GT || code == LTGT)
4410 && ! HONOR_SNANS (GET_MODE (trueop0))))
4411 && rtx_equal_p (trueop0, trueop1)
4412 && ! side_effects_p (trueop0))
4413 return comparison_result (code, CMP_EQ);
4415 /* If the operands are floating-point constants, see if we can fold
4417 if (GET_CODE (trueop0) == CONST_DOUBLE
4418 && GET_CODE (trueop1) == CONST_DOUBLE
4419 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4421 REAL_VALUE_TYPE d0, d1;
4423 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4424 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4426 /* Comparisons are unordered iff at least one of the values is NaN. */
4427 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4437 return const_true_rtx;
4450 return comparison_result (code,
4451 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4452 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4455 /* Otherwise, see if the operands are both integers. */
4456 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4457 && (GET_CODE (trueop0) == CONST_DOUBLE
4458 || CONST_INT_P (trueop0))
4459 && (GET_CODE (trueop1) == CONST_DOUBLE
4460 || CONST_INT_P (trueop1)))
4462 int width = GET_MODE_BITSIZE (mode);
4463 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4464 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4466 /* Get the two words comprising each integer constant. */
4467 if (GET_CODE (trueop0) == CONST_DOUBLE)
4469 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4470 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4474 l0u = l0s = INTVAL (trueop0);
4475 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4478 if (GET_CODE (trueop1) == CONST_DOUBLE)
4480 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4481 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4485 l1u = l1s = INTVAL (trueop1);
4486 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4489 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4490 we have to sign or zero-extend the values. */
4491 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4493 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4494 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4496 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4497 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4499 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4500 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4502 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4503 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4505 if (h0u == h1u && l0u == l1u)
4506 return comparison_result (code, CMP_EQ);
4510 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4511 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4512 return comparison_result (code, cr);
4516 /* Optimize comparisons with upper and lower bounds. */
4517 if (SCALAR_INT_MODE_P (mode)
4518 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4519 && CONST_INT_P (trueop1))
4522 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4523 HOST_WIDE_INT val = INTVAL (trueop1);
4524 HOST_WIDE_INT mmin, mmax;
4534 /* Get a reduced range if the sign bit is zero. */
4535 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4542 rtx mmin_rtx, mmax_rtx;
4543 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4545 mmin = INTVAL (mmin_rtx);
4546 mmax = INTVAL (mmax_rtx);
4549 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4551 mmin >>= (sign_copies - 1);
4552 mmax >>= (sign_copies - 1);
4558 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4560 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4561 return const_true_rtx;
4562 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4567 return const_true_rtx;
4572 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4574 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4575 return const_true_rtx;
4576 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4581 return const_true_rtx;
4587 /* x == y is always false for y out of range. */
4588 if (val < mmin || val > mmax)
4592 /* x > y is always false for y >= mmax, always true for y < mmin. */
4594 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4596 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4597 return const_true_rtx;
4603 return const_true_rtx;
4606 /* x < y is always false for y <= mmin, always true for y > mmax. */
4608 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4610 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4611 return const_true_rtx;
4617 return const_true_rtx;
4621 /* x != y is always true for y out of range. */
4622 if (val < mmin || val > mmax)
4623 return const_true_rtx;
4631 /* Optimize integer comparisons with zero. */
4632 if (trueop1 == const0_rtx)
4634 /* Some addresses are known to be nonzero. We don't know
4635 their sign, but equality comparisons are known. */
4636 if (nonzero_address_p (trueop0))
4638 if (code == EQ || code == LEU)
4640 if (code == NE || code == GTU)
4641 return const_true_rtx;
4644 /* See if the first operand is an IOR with a constant. If so, we
4645 may be able to determine the result of this comparison. */
4646 if (GET_CODE (op0) == IOR)
4648 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4649 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4651 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4652 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4653 && (UINTVAL (inner_const)
4654 & ((unsigned HOST_WIDE_INT) 1
4664 return const_true_rtx;
4668 return const_true_rtx;
4682 /* Optimize comparison of ABS with zero. */
4683 if (trueop1 == CONST0_RTX (mode)
4684 && (GET_CODE (trueop0) == ABS
4685 || (GET_CODE (trueop0) == FLOAT_EXTEND
4686 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4691 /* Optimize abs(x) < 0.0. */
4692 if (!HONOR_SNANS (mode)
4693 && (!INTEGRAL_MODE_P (mode)
4694 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4696 if (INTEGRAL_MODE_P (mode)
4697 && (issue_strict_overflow_warning
4698 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4699 warning (OPT_Wstrict_overflow,
4700 ("assuming signed overflow does not occur when "
4701 "assuming abs (x) < 0 is false"));
4707 /* Optimize abs(x) >= 0.0. */
4708 if (!HONOR_NANS (mode)
4709 && (!INTEGRAL_MODE_P (mode)
4710 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4712 if (INTEGRAL_MODE_P (mode)
4713 && (issue_strict_overflow_warning
4714 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4715 warning (OPT_Wstrict_overflow,
4716 ("assuming signed overflow does not occur when "
4717 "assuming abs (x) >= 0 is true"));
4718 return const_true_rtx;
4723 /* Optimize ! (abs(x) < 0.0). */
4724 return const_true_rtx;
4734 /* Simplify CODE, an operation with result mode MODE and three operands,
4735 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4736 a constant. Return 0 if no simplifications is possible. */
4739 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4740 enum machine_mode op0_mode, rtx op0, rtx op1,
4743 unsigned int width = GET_MODE_BITSIZE (mode);
4744 bool any_change = false;
4747 /* VOIDmode means "infinite" precision. */
4749 width = HOST_BITS_PER_WIDE_INT;
4754 /* Simplify negations around the multiplication. */
4755 /* -a * -b + c => a * b + c. */
4756 if (GET_CODE (op0) == NEG)
4758 tem = simplify_unary_operation (NEG, mode, op1, mode);
4760 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4762 else if (GET_CODE (op1) == NEG)
4764 tem = simplify_unary_operation (NEG, mode, op0, mode);
4766 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4769 /* Canonicalize the two multiplication operands. */
4770 /* a * -b + c => -b * a + c. */
4771 if (swap_commutative_operands_p (op0, op1))
4772 tem = op0, op0 = op1, op1 = tem, any_change = true;
4775 return gen_rtx_FMA (mode, op0, op1, op2);
4780 if (CONST_INT_P (op0)
4781 && CONST_INT_P (op1)
4782 && CONST_INT_P (op2)
4783 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4784 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4786 /* Extracting a bit-field from a constant */
4787 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4789 if (BITS_BIG_ENDIAN)
4790 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4792 val >>= INTVAL (op2);
4794 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4796 /* First zero-extend. */
4797 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4798 /* If desired, propagate sign bit. */
4799 if (code == SIGN_EXTRACT
4800 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4802 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4805 /* Clear the bits that don't belong in our mode,
4806 unless they and our sign bit are all one.
4807 So we get either a reasonable negative value or a reasonable
4808 unsigned value for this mode. */
4809 if (width < HOST_BITS_PER_WIDE_INT
4810 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4811 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4812 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4814 return gen_int_mode (val, mode);
4819 if (CONST_INT_P (op0))
4820 return op0 != const0_rtx ? op1 : op2;
4822 /* Convert c ? a : a into "a". */
4823 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4826 /* Convert a != b ? a : b into "a". */
4827 if (GET_CODE (op0) == NE
4828 && ! side_effects_p (op0)
4829 && ! HONOR_NANS (mode)
4830 && ! HONOR_SIGNED_ZEROS (mode)
4831 && ((rtx_equal_p (XEXP (op0, 0), op1)
4832 && rtx_equal_p (XEXP (op0, 1), op2))
4833 || (rtx_equal_p (XEXP (op0, 0), op2)
4834 && rtx_equal_p (XEXP (op0, 1), op1))))
4837 /* Convert a == b ? a : b into "b". */
4838 if (GET_CODE (op0) == EQ
4839 && ! side_effects_p (op0)
4840 && ! HONOR_NANS (mode)
4841 && ! HONOR_SIGNED_ZEROS (mode)
4842 && ((rtx_equal_p (XEXP (op0, 0), op1)
4843 && rtx_equal_p (XEXP (op0, 1), op2))
4844 || (rtx_equal_p (XEXP (op0, 0), op2)
4845 && rtx_equal_p (XEXP (op0, 1), op1))))
4848 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4850 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4851 ? GET_MODE (XEXP (op0, 1))
4852 : GET_MODE (XEXP (op0, 0)));
4855 /* Look for happy constants in op1 and op2. */
4856 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4858 HOST_WIDE_INT t = INTVAL (op1);
4859 HOST_WIDE_INT f = INTVAL (op2);
4861 if (t == STORE_FLAG_VALUE && f == 0)
4862 code = GET_CODE (op0);
4863 else if (t == 0 && f == STORE_FLAG_VALUE)
4866 tmp = reversed_comparison_code (op0, NULL_RTX);
4874 return simplify_gen_relational (code, mode, cmp_mode,
4875 XEXP (op0, 0), XEXP (op0, 1));
4878 if (cmp_mode == VOIDmode)
4879 cmp_mode = op0_mode;
4880 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4881 cmp_mode, XEXP (op0, 0),
4884 /* See if any simplifications were possible. */
4887 if (CONST_INT_P (temp))
4888 return temp == const0_rtx ? op2 : op1;
4890 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4896 gcc_assert (GET_MODE (op0) == mode);
4897 gcc_assert (GET_MODE (op1) == mode);
4898 gcc_assert (VECTOR_MODE_P (mode));
4899 op2 = avoid_constant_pool_reference (op2);
4900 if (CONST_INT_P (op2))
4902 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4903 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4904 int mask = (1 << n_elts) - 1;
4906 if (!(INTVAL (op2) & mask))
4908 if ((INTVAL (op2) & mask) == mask)
4911 op0 = avoid_constant_pool_reference (op0);
4912 op1 = avoid_constant_pool_reference (op1);
4913 if (GET_CODE (op0) == CONST_VECTOR
4914 && GET_CODE (op1) == CONST_VECTOR)
4916 rtvec v = rtvec_alloc (n_elts);
4919 for (i = 0; i < n_elts; i++)
4920 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4921 ? CONST_VECTOR_ELT (op0, i)
4922 : CONST_VECTOR_ELT (op1, i));
4923 return gen_rtx_CONST_VECTOR (mode, v);
4935 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4937 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4939 Works by unpacking OP into a collection of 8-bit values
4940 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4941 and then repacking them again for OUTERMODE. */
4944 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4945 enum machine_mode innermode, unsigned int byte)
4947 /* We support up to 512-bit values (for V8DFmode). */
4951 value_mask = (1 << value_bit) - 1
4953 unsigned char value[max_bitsize / value_bit];
4962 rtvec result_v = NULL;
4963 enum mode_class outer_class;
4964 enum machine_mode outer_submode;
4966 /* Some ports misuse CCmode. */
4967 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4970 /* We have no way to represent a complex constant at the rtl level. */
4971 if (COMPLEX_MODE_P (outermode))
4974 /* Unpack the value. */
4976 if (GET_CODE (op) == CONST_VECTOR)
4978 num_elem = CONST_VECTOR_NUNITS (op);
4979 elems = &CONST_VECTOR_ELT (op, 0);
4980 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4986 elem_bitsize = max_bitsize;
4988 /* If this asserts, it is too complicated; reducing value_bit may help. */
4989 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4990 /* I don't know how to handle endianness of sub-units. */
4991 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4993 for (elem = 0; elem < num_elem; elem++)
4996 rtx el = elems[elem];
4998 /* Vectors are kept in target memory order. (This is probably
5001 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5002 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5004 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5005 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5006 unsigned bytele = (subword_byte % UNITS_PER_WORD
5007 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5008 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5011 switch (GET_CODE (el))
5015 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5017 *vp++ = INTVAL (el) >> i;
5018 /* CONST_INTs are always logically sign-extended. */
5019 for (; i < elem_bitsize; i += value_bit)
5020 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5024 if (GET_MODE (el) == VOIDmode)
5026 /* If this triggers, someone should have generated a
5027 CONST_INT instead. */
5028 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5030 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5031 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5032 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5035 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5038 /* It shouldn't matter what's done here, so fill it with
5040 for (; i < elem_bitsize; i += value_bit)
5045 long tmp[max_bitsize / 32];
5046 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5048 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5049 gcc_assert (bitsize <= elem_bitsize);
5050 gcc_assert (bitsize % value_bit == 0);
5052 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5055 /* real_to_target produces its result in words affected by
5056 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5057 and use WORDS_BIG_ENDIAN instead; see the documentation
5058 of SUBREG in rtl.texi. */
5059 for (i = 0; i < bitsize; i += value_bit)
5062 if (WORDS_BIG_ENDIAN)
5063 ibase = bitsize - 1 - i;
5066 *vp++ = tmp[ibase / 32] >> i % 32;
5069 /* It shouldn't matter what's done here, so fill it with
5071 for (; i < elem_bitsize; i += value_bit)
5077 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5079 for (i = 0; i < elem_bitsize; i += value_bit)
5080 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5084 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5085 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5086 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5088 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5089 >> (i - HOST_BITS_PER_WIDE_INT);
5090 for (; i < elem_bitsize; i += value_bit)
5100 /* Now, pick the right byte to start with. */
5101 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5102 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5103 will already have offset 0. */
5104 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5106 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5108 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5109 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5110 byte = (subword_byte % UNITS_PER_WORD
5111 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5114 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5115 so if it's become negative it will instead be very large.) */
5116 gcc_assert (byte < GET_MODE_SIZE (innermode));
5118 /* Convert from bytes to chunks of size value_bit. */
5119 value_start = byte * (BITS_PER_UNIT / value_bit);
5121 /* Re-pack the value. */
5123 if (VECTOR_MODE_P (outermode))
5125 num_elem = GET_MODE_NUNITS (outermode);
5126 result_v = rtvec_alloc (num_elem);
5127 elems = &RTVEC_ELT (result_v, 0);
5128 outer_submode = GET_MODE_INNER (outermode);
5134 outer_submode = outermode;
5137 outer_class = GET_MODE_CLASS (outer_submode);
5138 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5140 gcc_assert (elem_bitsize % value_bit == 0);
5141 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5143 for (elem = 0; elem < num_elem; elem++)
5147 /* Vectors are stored in target memory order. (This is probably
5150 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5151 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5153 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5154 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5155 unsigned bytele = (subword_byte % UNITS_PER_WORD
5156 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5157 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5160 switch (outer_class)
5163 case MODE_PARTIAL_INT:
5165 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5168 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5170 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5171 for (; i < elem_bitsize; i += value_bit)
5172 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5173 << (i - HOST_BITS_PER_WIDE_INT);
5175 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5177 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5178 elems[elem] = gen_int_mode (lo, outer_submode);
5179 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5180 elems[elem] = immed_double_const (lo, hi, outer_submode);
5187 case MODE_DECIMAL_FLOAT:
5190 long tmp[max_bitsize / 32];
5192 /* real_from_target wants its input in words affected by
5193 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5194 and use WORDS_BIG_ENDIAN instead; see the documentation
5195 of SUBREG in rtl.texi. */
5196 for (i = 0; i < max_bitsize / 32; i++)
5198 for (i = 0; i < elem_bitsize; i += value_bit)
5201 if (WORDS_BIG_ENDIAN)
5202 ibase = elem_bitsize - 1 - i;
5205 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5208 real_from_target (&r, tmp, outer_submode);
5209 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5221 f.mode = outer_submode;
5224 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5226 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5227 for (; i < elem_bitsize; i += value_bit)
5228 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5229 << (i - HOST_BITS_PER_WIDE_INT));
5231 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5239 if (VECTOR_MODE_P (outermode))
5240 return gen_rtx_CONST_VECTOR (outermode, result_v);
5245 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5246 Return 0 if no simplifications are possible. */
5248 simplify_subreg (enum machine_mode outermode, rtx op,
5249 enum machine_mode innermode, unsigned int byte)
5251 /* Little bit of sanity checking. */
5252 gcc_assert (innermode != VOIDmode);
5253 gcc_assert (outermode != VOIDmode);
5254 gcc_assert (innermode != BLKmode);
5255 gcc_assert (outermode != BLKmode);
5257 gcc_assert (GET_MODE (op) == innermode
5258 || GET_MODE (op) == VOIDmode);
5260 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5261 gcc_assert (byte < GET_MODE_SIZE (innermode));
5263 if (outermode == innermode && !byte)
5266 if (CONST_INT_P (op)
5267 || GET_CODE (op) == CONST_DOUBLE
5268 || GET_CODE (op) == CONST_FIXED
5269 || GET_CODE (op) == CONST_VECTOR)
5270 return simplify_immed_subreg (outermode, op, innermode, byte);
5272 /* Changing mode twice with SUBREG => just change it once,
5273 or not at all if changing back op starting mode. */
5274 if (GET_CODE (op) == SUBREG)
5276 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5277 int final_offset = byte + SUBREG_BYTE (op);
5280 if (outermode == innermostmode
5281 && byte == 0 && SUBREG_BYTE (op) == 0)
5282 return SUBREG_REG (op);
5284 /* The SUBREG_BYTE represents offset, as if the value were stored
5285 in memory. Irritating exception is paradoxical subreg, where
5286 we define SUBREG_BYTE to be 0. On big endian machines, this
5287 value should be negative. For a moment, undo this exception. */
5288 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5290 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5291 if (WORDS_BIG_ENDIAN)
5292 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5293 if (BYTES_BIG_ENDIAN)
5294 final_offset += difference % UNITS_PER_WORD;
5296 if (SUBREG_BYTE (op) == 0
5297 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5299 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5300 if (WORDS_BIG_ENDIAN)
5301 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5302 if (BYTES_BIG_ENDIAN)
5303 final_offset += difference % UNITS_PER_WORD;
5306 /* See whether resulting subreg will be paradoxical. */
5307 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5309 /* In nonparadoxical subregs we can't handle negative offsets. */
5310 if (final_offset < 0)
5312 /* Bail out in case resulting subreg would be incorrect. */
5313 if (final_offset % GET_MODE_SIZE (outermode)
5314 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5320 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5322 /* In paradoxical subreg, see if we are still looking on lower part.
5323 If so, our SUBREG_BYTE will be 0. */
5324 if (WORDS_BIG_ENDIAN)
5325 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5326 if (BYTES_BIG_ENDIAN)
5327 offset += difference % UNITS_PER_WORD;
5328 if (offset == final_offset)
5334 /* Recurse for further possible simplifications. */
5335 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5339 if (validate_subreg (outermode, innermostmode,
5340 SUBREG_REG (op), final_offset))
5342 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5343 if (SUBREG_PROMOTED_VAR_P (op)
5344 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5345 && GET_MODE_CLASS (outermode) == MODE_INT
5346 && IN_RANGE (GET_MODE_SIZE (outermode),
5347 GET_MODE_SIZE (innermode),
5348 GET_MODE_SIZE (innermostmode))
5349 && subreg_lowpart_p (newx))
5351 SUBREG_PROMOTED_VAR_P (newx) = 1;
5352 SUBREG_PROMOTED_UNSIGNED_SET
5353 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5360 /* Merge implicit and explicit truncations. */
5362 if (GET_CODE (op) == TRUNCATE
5363 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5364 && subreg_lowpart_offset (outermode, innermode) == byte)
5365 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5366 GET_MODE (XEXP (op, 0)));
5368 /* SUBREG of a hard register => just change the register number
5369 and/or mode. If the hard register is not valid in that mode,
5370 suppress this simplification. If the hard register is the stack,
5371 frame, or argument pointer, leave this as a SUBREG. */
5373 if (REG_P (op) && HARD_REGISTER_P (op))
5375 unsigned int regno, final_regno;
5378 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5379 if (HARD_REGISTER_NUM_P (final_regno))
5382 int final_offset = byte;
5384 /* Adjust offset for paradoxical subregs. */
5386 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5388 int difference = (GET_MODE_SIZE (innermode)
5389 - GET_MODE_SIZE (outermode));
5390 if (WORDS_BIG_ENDIAN)
5391 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5392 if (BYTES_BIG_ENDIAN)
5393 final_offset += difference % UNITS_PER_WORD;
5396 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5398 /* Propagate original regno. We don't have any way to specify
5399 the offset inside original regno, so do so only for lowpart.
5400 The information is used only by alias analysis that can not
5401 grog partial register anyway. */
5403 if (subreg_lowpart_offset (outermode, innermode) == byte)
5404 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5409 /* If we have a SUBREG of a register that we are replacing and we are
5410 replacing it with a MEM, make a new MEM and try replacing the
5411 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5412 or if we would be widening it. */
5415 && ! mode_dependent_address_p (XEXP (op, 0))
5416 /* Allow splitting of volatile memory references in case we don't
5417 have instruction to move the whole thing. */
5418 && (! MEM_VOLATILE_P (op)
5419 || ! have_insn_for (SET, innermode))
5420 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5421 return adjust_address_nv (op, outermode, byte);
5423 /* Handle complex values represented as CONCAT
5424 of real and imaginary part. */
5425 if (GET_CODE (op) == CONCAT)
5427 unsigned int part_size, final_offset;
5430 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5431 if (byte < part_size)
5433 part = XEXP (op, 0);
5434 final_offset = byte;
5438 part = XEXP (op, 1);
5439 final_offset = byte - part_size;
5442 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5445 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5448 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5449 return gen_rtx_SUBREG (outermode, part, final_offset);
5453 /* Optimize SUBREG truncations of zero and sign extended values. */
5454 if ((GET_CODE (op) == ZERO_EXTEND
5455 || GET_CODE (op) == SIGN_EXTEND)
5456 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5458 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5460 /* If we're requesting the lowpart of a zero or sign extension,
5461 there are three possibilities. If the outermode is the same
5462 as the origmode, we can omit both the extension and the subreg.
5463 If the outermode is not larger than the origmode, we can apply
5464 the truncation without the extension. Finally, if the outermode
5465 is larger than the origmode, but both are integer modes, we
5466 can just extend to the appropriate mode. */
5469 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5470 if (outermode == origmode)
5471 return XEXP (op, 0);
5472 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5473 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5474 subreg_lowpart_offset (outermode,
5476 if (SCALAR_INT_MODE_P (outermode))
5477 return simplify_gen_unary (GET_CODE (op), outermode,
5478 XEXP (op, 0), origmode);
5481 /* A SUBREG resulting from a zero extension may fold to zero if
5482 it extracts higher bits that the ZERO_EXTEND's source bits. */
5483 if (GET_CODE (op) == ZERO_EXTEND
5484 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5485 return CONST0_RTX (outermode);
5488 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5489 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5490 the outer subreg is effectively a truncation to the original mode. */
5491 if ((GET_CODE (op) == LSHIFTRT
5492 || GET_CODE (op) == ASHIFTRT)
5493 && SCALAR_INT_MODE_P (outermode)
5494 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5495 to avoid the possibility that an outer LSHIFTRT shifts by more
5496 than the sign extension's sign_bit_copies and introduces zeros
5497 into the high bits of the result. */
5498 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5499 && CONST_INT_P (XEXP (op, 1))
5500 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5501 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5502 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5503 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5504 return simplify_gen_binary (ASHIFTRT, outermode,
5505 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5507 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5508 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5509 the outer subreg is effectively a truncation to the original mode. */
5510 if ((GET_CODE (op) == LSHIFTRT
5511 || GET_CODE (op) == ASHIFTRT)
5512 && SCALAR_INT_MODE_P (outermode)
5513 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5514 && CONST_INT_P (XEXP (op, 1))
5515 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5516 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5517 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5518 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5519 return simplify_gen_binary (LSHIFTRT, outermode,
5520 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5522 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5523 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5524 the outer subreg is effectively a truncation to the original mode. */
5525 if (GET_CODE (op) == ASHIFT
5526 && SCALAR_INT_MODE_P (outermode)
5527 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5528 && CONST_INT_P (XEXP (op, 1))
5529 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5530 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5531 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5532 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5533 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5534 return simplify_gen_binary (ASHIFT, outermode,
5535 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5537 /* Recognize a word extraction from a multi-word subreg. */
5538 if ((GET_CODE (op) == LSHIFTRT
5539 || GET_CODE (op) == ASHIFTRT)
5540 && SCALAR_INT_MODE_P (outermode)
5541 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5542 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5543 && CONST_INT_P (XEXP (op, 1))
5544 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5545 && INTVAL (XEXP (op, 1)) >= 0
5546 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5547 && byte == subreg_lowpart_offset (outermode, innermode))
5549 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5550 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5552 ? byte - shifted_bytes
5553 : byte + shifted_bytes));
5556 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5557 and try replacing the SUBREG and shift with it. Don't do this if
5558 the MEM has a mode-dependent address or if we would be widening it. */
5560 if ((GET_CODE (op) == LSHIFTRT
5561 || GET_CODE (op) == ASHIFTRT)
5562 && MEM_P (XEXP (op, 0))
5563 && CONST_INT_P (XEXP (op, 1))
5564 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5565 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5566 && INTVAL (XEXP (op, 1)) > 0
5567 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5568 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5569 && ! MEM_VOLATILE_P (XEXP (op, 0))
5570 && byte == subreg_lowpart_offset (outermode, innermode)
5571 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5572 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5574 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5575 return adjust_address_nv (XEXP (op, 0), outermode,
5577 ? byte - shifted_bytes
5578 : byte + shifted_bytes));
5584 /* Make a SUBREG operation or equivalent if it folds. */
5587 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5588 enum machine_mode innermode, unsigned int byte)
5592 newx = simplify_subreg (outermode, op, innermode, byte);
5596 if (GET_CODE (op) == SUBREG
5597 || GET_CODE (op) == CONCAT
5598 || GET_MODE (op) == VOIDmode)
5601 if (validate_subreg (outermode, innermode, op, byte))
5602 return gen_rtx_SUBREG (outermode, op, byte);
5607 /* Simplify X, an rtx expression.
5609 Return the simplified expression or NULL if no simplifications
5612 This is the preferred entry point into the simplification routines;
5613 however, we still allow passes to call the more specific routines.
5615 Right now GCC has three (yes, three) major bodies of RTL simplification
5616 code that need to be unified.
5618 1. fold_rtx in cse.c. This code uses various CSE specific
5619 information to aid in RTL simplification.
5621 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5622 it uses combine specific information to aid in RTL
5625 3. The routines in this file.
5628 Long term we want to only have one body of simplification code; to
5629 get to that state I recommend the following steps:
5631 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5632 which are not pass dependent state into these routines.
5634 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5635 use this routine whenever possible.
5637 3. Allow for pass dependent state to be provided to these
5638 routines and add simplifications based on the pass dependent
5639 state. Remove code from cse.c & combine.c that becomes
5642 It will take time, but ultimately the compiler will be easier to
5643 maintain and improve. It's totally silly that when we add a
5644 simplification that it needs to be added to 4 places (3 for RTL
5645 simplification and 1 for tree simplification. */
5648 simplify_rtx (const_rtx x)
5650 const enum rtx_code code = GET_CODE (x);
5651 const enum machine_mode mode = GET_MODE (x);
5653 switch (GET_RTX_CLASS (code))
5656 return simplify_unary_operation (code, mode,
5657 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5658 case RTX_COMM_ARITH:
5659 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5660 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5662 /* Fall through.... */
5665 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5668 case RTX_BITFIELD_OPS:
5669 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5670 XEXP (x, 0), XEXP (x, 1),
5674 case RTX_COMM_COMPARE:
5675 return simplify_relational_operation (code, mode,
5676 ((GET_MODE (XEXP (x, 0))
5678 ? GET_MODE (XEXP (x, 0))
5679 : GET_MODE (XEXP (x, 1))),
5685 return simplify_subreg (mode, SUBREG_REG (x),
5686 GET_MODE (SUBREG_REG (x)),
5693 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5694 if (GET_CODE (XEXP (x, 0)) == HIGH
5695 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))