1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
68 neg_const_int (enum machine_mode mode, const_rtx i)
70 return gen_int_mode (- INTVAL (i), mode);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 unsigned HOST_WIDE_INT val;
82 if (GET_MODE_CLASS (mode) != MODE_INT)
85 width = GET_MODE_BITSIZE (mode);
89 if (width <= HOST_BITS_PER_WIDE_INT
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x)
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
138 switch (GET_CODE (x))
144 /* Handle float extensions of constant pool references. */
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
160 if (GET_MODE (x) == BLKmode)
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset != 0 || cmode != GET_MODE (x))
193 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
194 if (tem && CONSTANT_P (tem))
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
209 delegitimize_mem_from_attrs (rtx x)
211 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
212 use their base addresses as equivalent. */
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
231 case ARRAY_RANGE_REF:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
250 offset += bitpos / BITS_PER_UNIT;
252 offset += TREE_INT_CST_LOW (toffset);
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
268 offset += INTVAL (MEM_OFFSET (x));
270 newx = DECL_RTL (decl);
274 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
283 || (GET_CODE (o) == PLUS
284 && GET_CODE (XEXP (o, 1)) == CONST_INT
285 && (offset == INTVAL (XEXP (o, 1))
286 || (GET_CODE (n) == PLUS
287 && GET_CODE (XEXP (n, 1)) == CONST_INT
288 && (INTVAL (XEXP (n, 1)) + offset
289 == INTVAL (XEXP (o, 1)))
290 && (n = XEXP (n, 0))))
291 && (o = XEXP (o, 0))))
292 && rtx_equal_p (o, n)))
293 x = adjust_address_nv (newx, mode, offset);
295 else if (GET_MODE (x) == GET_MODE (newx)
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
308 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
309 enum machine_mode op_mode)
313 /* If this simplifies, use it. */
314 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
317 return gen_rtx_fmt_e (code, mode, op);
320 /* Likewise for ternary operations. */
323 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
324 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
328 /* If this simplifies, use it. */
329 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
333 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
340 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
341 enum machine_mode cmp_mode, rtx op0, rtx op1)
345 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
349 return gen_rtx_fmt_ee (code, mode, op0, op1);
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
358 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
359 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 enum rtx_code code = GET_CODE (x);
362 enum machine_mode mode = GET_MODE (x);
363 enum machine_mode op_mode;
365 rtx op0, op1, op2, newx, op;
369 if (__builtin_expect (fn != NULL, 0))
371 newx = fn (x, old_rtx, data);
375 else if (rtx_equal_p (x, old_rtx))
376 return copy_rtx ((rtx) data);
378 switch (GET_RTX_CLASS (code))
382 op_mode = GET_MODE (op0);
383 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
384 if (op0 == XEXP (x, 0))
386 return simplify_gen_unary (code, mode, op0, op_mode);
390 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
391 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
392 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
394 return simplify_gen_binary (code, mode, op0, op1);
397 case RTX_COMM_COMPARE:
400 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
401 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
402 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
403 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
405 return simplify_gen_relational (code, mode, op_mode, op0, op1);
408 case RTX_BITFIELD_OPS:
410 op_mode = GET_MODE (op0);
411 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
412 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
413 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
414 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
416 if (op_mode == VOIDmode)
417 op_mode = GET_MODE (op0);
418 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
423 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
424 if (op0 == SUBREG_REG (x))
426 op0 = simplify_gen_subreg (GET_MODE (x), op0,
427 GET_MODE (SUBREG_REG (x)),
429 return op0 ? op0 : x;
436 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
439 return replace_equiv_address_nv (x, op0);
441 else if (code == LO_SUM)
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return gen_rtx_LO_SUM (mode, op0, op1);
461 fmt = GET_RTX_FORMAT (code);
462 for (i = 0; fmt[i]; i++)
467 newvec = XVEC (newx, i);
468 for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
472 if (op != RTVEC_ELT (vec, j))
476 newvec = shallow_copy_rtvec (vec);
478 newx = shallow_copy_rtx (x);
479 XVEC (newx, i) = newvec;
481 RTVEC_ELT (newvec, j) = op;
489 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
490 if (op != XEXP (x, i))
493 newx = shallow_copy_rtx (x);
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
506 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
515 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
516 rtx op, enum machine_mode op_mode)
520 trueop = avoid_constant_pool_reference (op);
522 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
526 return simplify_unary_operation_1 (code, mode, op);
529 /* Perform some simplifications we can do even if the operands
532 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 enum rtx_code reversed;
540 /* (not (not X)) == X. */
541 if (GET_CODE (op) == NOT)
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op)
547 && (mode == BImode || STORE_FLAG_VALUE == -1)
548 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
549 return simplify_gen_relational (reversed, mode, VOIDmode,
550 XEXP (op, 0), XEXP (op, 1));
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op) == PLUS
554 && XEXP (op, 1) == constm1_rtx)
555 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op) == NEG)
559 return plus_constant (XEXP (op, 0), -1);
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op) == XOR
563 && CONST_INT_P (XEXP (op, 1))
564 && (temp = simplify_unary_operation (NOT, mode,
565 XEXP (op, 1), mode)) != 0)
566 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op) == PLUS
570 && CONST_INT_P (XEXP (op, 1))
571 && mode_signbit_p (mode, XEXP (op, 1))
572 && (temp = simplify_unary_operation (NOT, mode,
573 XEXP (op, 1), mode)) != 0)
574 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
582 if (GET_CODE (op) == ASHIFT
583 && XEXP (op, 0) == const1_rtx)
585 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
586 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
593 if (STORE_FLAG_VALUE == -1
594 && GET_CODE (op) == ASHIFTRT
595 && GET_CODE (XEXP (op, 1))
596 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
597 return simplify_gen_relational (GE, mode, VOIDmode,
598 XEXP (op, 0), const0_rtx);
601 if (GET_CODE (op) == SUBREG
602 && subreg_lowpart_p (op)
603 && (GET_MODE_SIZE (GET_MODE (op))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
605 && GET_CODE (SUBREG_REG (op)) == ASHIFT
606 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
611 x = gen_rtx_ROTATE (inner_mode,
612 simplify_gen_unary (NOT, inner_mode, const1_rtx,
614 XEXP (SUBREG_REG (op), 1));
615 return rtl_hooks.gen_lowpart_no_emit (mode, x);
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
623 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
626 enum machine_mode op_mode;
628 op_mode = GET_MODE (in1);
629 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631 op_mode = GET_MODE (in2);
632 if (op_mode == VOIDmode)
634 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
639 in2 = in1; in1 = tem;
642 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op) == NEG)
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op) == PLUS
654 && XEXP (op, 1) == const1_rtx)
655 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op) == NOT)
659 return plus_constant (XEXP (op, 0), 1);
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
669 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671 if (GET_CODE (op) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op, 1))
677 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
681 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
689 /* (neg (mult A B)) becomes (mult A (neg B)).
690 This works even for floating-point values. */
691 if (GET_CODE (op) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
695 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
701 if (GET_CODE (op) == ASHIFT)
703 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
705 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op) == ASHIFTRT
711 && CONST_INT_P (XEXP (op, 1))
712 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (op, 0), XEXP (op, 1));
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op) == LSHIFTRT
719 && CONST_INT_P (XEXP (op, 1))
720 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
721 return simplify_gen_binary (ASHIFTRT, mode,
722 XEXP (op, 0), XEXP (op, 1));
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op) == XOR
726 && XEXP (op, 1) == const1_rtx
727 && nonzero_bits (XEXP (op, 0), mode) == 1)
728 return plus_constant (XEXP (op, 0), -1);
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op) == LT
733 && XEXP (op, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 enum machine_mode inner = GET_MODE (XEXP (op, 0));
737 int isize = GET_MODE_BITSIZE (inner);
738 if (STORE_FLAG_VALUE == 1)
740 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
741 GEN_INT (isize - 1));
744 if (GET_MODE_BITSIZE (mode) > isize)
745 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
746 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 else if (STORE_FLAG_VALUE == -1)
750 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
751 GEN_INT (isize - 1));
754 if (GET_MODE_BITSIZE (mode) > isize)
755 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
756 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
765 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op) == SIGN_EXTEND
770 || GET_CODE (op) == ZERO_EXTEND)
771 && GET_MODE (XEXP (op, 0)) == mode)
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op) == ABS
777 || GET_CODE (op) == NEG)
778 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
781 return simplify_gen_unary (GET_CODE (op), mode,
782 XEXP (XEXP (op, 0), 0), mode);
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
786 if (GET_CODE (op) == SUBREG
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
789 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
790 GET_MODE (XEXP (SUBREG_REG (op), 0)));
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
800 GET_MODE_BITSIZE (GET_MODE (op)))
801 ? (num_sign_bit_copies (op, GET_MODE (op))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
803 - GET_MODE_BITSIZE (mode)))
804 : truncated_to_mode (mode, op))
805 && ! (GET_CODE (op) == LSHIFTRT
806 && GET_CODE (XEXP (op, 0)) == MULT))
807 return rtl_hooks.gen_lowpart_no_emit (mode, op);
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
815 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
816 return rtl_hooks.gen_lowpart_no_emit (mode, op);
820 if (DECIMAL_FLOAT_MODE_P (mode))
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op, 0)) == mode)
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations)
839 || GET_CODE (op) == FLOAT_EXTEND)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
842 > GET_MODE_SIZE (mode)
843 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0))))))))
855 return simplify_gen_unary (FLOAT, mode,
857 GET_MODE (XEXP (op, 0)));
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op) == ABS
862 || GET_CODE (op) == NEG)
863 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
865 return simplify_gen_unary (GET_CODE (op), mode,
866 XEXP (XEXP (op, 0), 0), mode);
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
873 return SUBREG_REG (op);
877 if (DECIMAL_FLOAT_MODE_P (mode))
880 /* (float_extend (float_extend x)) is (float_extend x)
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
885 if (GET_CODE (op) == FLOAT_EXTEND
886 || (GET_CODE (op) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
888 && ((unsigned)significand_size (GET_MODE (op))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
890 - num_sign_bit_copies (XEXP (op, 0),
891 GET_MODE (XEXP (op, 0)))))))
892 return simplify_gen_unary (GET_CODE (op), mode,
894 GET_MODE (XEXP (op, 0)));
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
902 GET_MODE (XEXP (op, 0)));
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
906 if (GET_MODE (op) == VOIDmode)
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op))
912 <= HOST_BITS_PER_WIDE_INT)
913 && ((nonzero_bits (op, GET_MODE (op))
914 & ((unsigned HOST_WIDE_INT) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
921 return gen_rtx_NEG (mode, op);
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op) == SIGN_EXTEND
928 || GET_CODE (op) == ZERO_EXTEND)
929 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
930 GET_MODE (XEXP (op, 0)));
934 switch (GET_CODE (op))
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
940 GET_MODE (XEXP (op, 0)));
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op, 1)))
946 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)));
956 switch (GET_CODE (op))
962 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
963 GET_MODE (XEXP (op, 0)));
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op, 1)))
969 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
970 GET_MODE (XEXP (op, 0)));
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op) == BSWAP)
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op) == SIGN_EXTEND)
987 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
988 GET_MODE (XEXP (op, 0)));
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
996 if (GET_CODE (op) == TRUNCATE
997 && GET_MODE (XEXP (op, 0)) == mode
998 && GET_CODE (XEXP (op, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1001 return XEXP (op, 0);
1003 /* Extending a widening multiplication should be canonicalized to
1004 a wider widening multiplication. */
1005 if (GET_CODE (op) == MULT)
1007 rtx lhs = XEXP (op, 0);
1008 rtx rhs = XEXP (op, 1);
1009 enum rtx_code lcode = GET_CODE (lhs);
1010 enum rtx_code rcode = GET_CODE (rhs);
1012 /* Widening multiplies usually extend both operands, but sometimes
1013 they use a shift to extract a portion of a register. */
1014 if ((lcode == SIGN_EXTEND
1015 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1016 && (rcode == SIGN_EXTEND
1017 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1019 enum machine_mode lmode = GET_MODE (lhs);
1020 enum machine_mode rmode = GET_MODE (rhs);
1023 if (lcode == ASHIFTRT)
1024 /* Number of bits not shifted off the end. */
1025 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1026 else /* lcode == SIGN_EXTEND */
1027 /* Size of inner mode. */
1028 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1030 if (rcode == ASHIFTRT)
1031 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1032 else /* rcode == SIGN_EXTEND */
1033 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1035 /* We can only widen multiplies if the result is mathematiclly
1036 equivalent. I.e. if overflow was impossible. */
1037 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1038 return simplify_gen_binary
1040 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1041 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1045 /* Check for a sign extension of a subreg of a promoted
1046 variable, where the promotion is sign-extended, and the
1047 target mode is the same as the variable's promotion. */
1048 if (GET_CODE (op) == SUBREG
1049 && SUBREG_PROMOTED_VAR_P (op)
1050 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1051 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1052 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1054 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1055 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1056 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1058 gcc_assert (GET_MODE_BITSIZE (mode)
1059 > GET_MODE_BITSIZE (GET_MODE (op)));
1060 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1061 GET_MODE (XEXP (op, 0)));
1064 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1065 is (sign_extend:M (subreg:O <X>)) if there is mode with
1066 GET_MODE_BITSIZE (N) - I bits.
1067 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1068 is similarly (zero_extend:M (subreg:O <X>)). */
1069 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1070 && GET_CODE (XEXP (op, 0)) == ASHIFT
1071 && CONST_INT_P (XEXP (op, 1))
1072 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1073 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1075 enum machine_mode tmode
1076 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1077 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1078 gcc_assert (GET_MODE_BITSIZE (mode)
1079 > GET_MODE_BITSIZE (GET_MODE (op)));
1080 if (tmode != BLKmode)
1083 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1084 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1085 ? SIGN_EXTEND : ZERO_EXTEND,
1086 mode, inner, tmode);
1090 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1091 /* As we do not know which address space the pointer is refering to,
1092 we can do this only if the target does not support different pointer
1093 or address modes depending on the address space. */
1094 if (target_default_pointer_address_modes_p ()
1095 && ! POINTERS_EXTEND_UNSIGNED
1096 && mode == Pmode && GET_MODE (op) == ptr_mode
1098 || (GET_CODE (op) == SUBREG
1099 && REG_P (SUBREG_REG (op))
1100 && REG_POINTER (SUBREG_REG (op))
1101 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1102 return convert_memory_address (Pmode, op);
1107 /* Check for a zero extension of a subreg of a promoted
1108 variable, where the promotion is zero-extended, and the
1109 target mode is the same as the variable's promotion. */
1110 if (GET_CODE (op) == SUBREG
1111 && SUBREG_PROMOTED_VAR_P (op)
1112 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1113 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1114 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1116 /* Extending a widening multiplication should be canonicalized to
1117 a wider widening multiplication. */
1118 if (GET_CODE (op) == MULT)
1120 rtx lhs = XEXP (op, 0);
1121 rtx rhs = XEXP (op, 1);
1122 enum rtx_code lcode = GET_CODE (lhs);
1123 enum rtx_code rcode = GET_CODE (rhs);
1125 /* Widening multiplies usually extend both operands, but sometimes
1126 they use a shift to extract a portion of a register. */
1127 if ((lcode == ZERO_EXTEND
1128 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1129 && (rcode == ZERO_EXTEND
1130 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1132 enum machine_mode lmode = GET_MODE (lhs);
1133 enum machine_mode rmode = GET_MODE (rhs);
1136 if (lcode == LSHIFTRT)
1137 /* Number of bits not shifted off the end. */
1138 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1139 else /* lcode == ZERO_EXTEND */
1140 /* Size of inner mode. */
1141 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1143 if (rcode == LSHIFTRT)
1144 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1145 else /* rcode == ZERO_EXTEND */
1146 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1148 /* We can only widen multiplies if the result is mathematiclly
1149 equivalent. I.e. if overflow was impossible. */
1150 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1151 return simplify_gen_binary
1153 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1154 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1158 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1159 if (GET_CODE (op) == ZERO_EXTEND)
1160 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1161 GET_MODE (XEXP (op, 0)));
1163 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1164 is (zero_extend:M (subreg:O <X>)) if there is mode with
1165 GET_MODE_BITSIZE (N) - I bits. */
1166 if (GET_CODE (op) == LSHIFTRT
1167 && GET_CODE (XEXP (op, 0)) == ASHIFT
1168 && CONST_INT_P (XEXP (op, 1))
1169 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1170 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1172 enum machine_mode tmode
1173 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1174 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1175 if (tmode != BLKmode)
1178 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1179 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1183 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1184 /* As we do not know which address space the pointer is refering to,
1185 we can do this only if the target does not support different pointer
1186 or address modes depending on the address space. */
1187 if (target_default_pointer_address_modes_p ()
1188 && POINTERS_EXTEND_UNSIGNED > 0
1189 && mode == Pmode && GET_MODE (op) == ptr_mode
1191 || (GET_CODE (op) == SUBREG
1192 && REG_P (SUBREG_REG (op))
1193 && REG_POINTER (SUBREG_REG (op))
1194 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1195 return convert_memory_address (Pmode, op);
1206 /* Try to compute the value of a unary operation CODE whose output mode is to
1207 be MODE with input operand OP whose mode was originally OP_MODE.
1208 Return zero if the value cannot be computed. */
1210 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1211 rtx op, enum machine_mode op_mode)
1213 unsigned int width = GET_MODE_BITSIZE (mode);
1214 unsigned int op_width = GET_MODE_BITSIZE (op_mode);
1216 if (code == VEC_DUPLICATE)
1218 gcc_assert (VECTOR_MODE_P (mode));
1219 if (GET_MODE (op) != VOIDmode)
1221 if (!VECTOR_MODE_P (GET_MODE (op)))
1222 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1224 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1227 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1228 || GET_CODE (op) == CONST_VECTOR)
1230 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1231 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1232 rtvec v = rtvec_alloc (n_elts);
1235 if (GET_CODE (op) != CONST_VECTOR)
1236 for (i = 0; i < n_elts; i++)
1237 RTVEC_ELT (v, i) = op;
1240 enum machine_mode inmode = GET_MODE (op);
1241 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1242 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1244 gcc_assert (in_n_elts < n_elts);
1245 gcc_assert ((n_elts % in_n_elts) == 0);
1246 for (i = 0; i < n_elts; i++)
1247 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1249 return gen_rtx_CONST_VECTOR (mode, v);
1253 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1255 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1256 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1257 enum machine_mode opmode = GET_MODE (op);
1258 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1259 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1260 rtvec v = rtvec_alloc (n_elts);
1263 gcc_assert (op_n_elts == n_elts);
1264 for (i = 0; i < n_elts; i++)
1266 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1267 CONST_VECTOR_ELT (op, i),
1268 GET_MODE_INNER (opmode));
1271 RTVEC_ELT (v, i) = x;
1273 return gen_rtx_CONST_VECTOR (mode, v);
1276 /* The order of these tests is critical so that, for example, we don't
1277 check the wrong mode (input vs. output) for a conversion operation,
1278 such as FIX. At some point, this should be simplified. */
1280 if (code == FLOAT && GET_MODE (op) == VOIDmode
1281 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1283 HOST_WIDE_INT hv, lv;
1286 if (CONST_INT_P (op))
1287 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1289 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1291 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1292 d = real_value_truncate (mode, d);
1293 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1295 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1296 && (GET_CODE (op) == CONST_DOUBLE
1297 || CONST_INT_P (op)))
1299 HOST_WIDE_INT hv, lv;
1302 if (CONST_INT_P (op))
1303 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1305 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1307 if (op_mode == VOIDmode)
1309 /* We don't know how to interpret negative-looking numbers in
1310 this case, so don't try to fold those. */
1314 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1317 hv = 0, lv &= GET_MODE_MASK (op_mode);
1319 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1320 d = real_value_truncate (mode, d);
1321 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1324 if (CONST_INT_P (op)
1325 && width <= HOST_BITS_PER_WIDE_INT
1326 && op_width <= HOST_BITS_PER_WIDE_INT && op_width > 0)
1328 HOST_WIDE_INT arg0 = INTVAL (op);
1342 val = (arg0 >= 0 ? arg0 : - arg0);
1346 arg0 &= GET_MODE_MASK (op_mode);
1347 val = ffs_hwi (arg0);
1351 arg0 &= GET_MODE_MASK (op_mode);
1352 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (op_mode, val))
1355 val = GET_MODE_BITSIZE (op_mode) - floor_log2 (arg0) - 1;
1359 arg0 &= GET_MODE_MASK (op_mode);
1361 val = GET_MODE_BITSIZE (op_mode) - 1;
1363 val = GET_MODE_BITSIZE (op_mode) - floor_log2 (arg0) - 2;
1365 val = GET_MODE_BITSIZE (op_mode) - floor_log2 (~arg0) - 2;
1369 arg0 &= GET_MODE_MASK (op_mode);
1372 /* Even if the value at zero is undefined, we have to come
1373 up with some replacement. Seems good enough. */
1374 if (! CTZ_DEFINED_VALUE_AT_ZERO (op_mode, val))
1375 val = GET_MODE_BITSIZE (op_mode);
1378 val = ctz_hwi (arg0);
1382 arg0 &= GET_MODE_MASK (op_mode);
1385 val++, arg0 &= arg0 - 1;
1389 arg0 &= GET_MODE_MASK (op_mode);
1392 val++, arg0 &= arg0 - 1;
1401 for (s = 0; s < width; s += 8)
1403 unsigned int d = width - s - 8;
1404 unsigned HOST_WIDE_INT byte;
1405 byte = (arg0 >> s) & 0xff;
1416 /* When zero-extending a CONST_INT, we need to know its
1418 gcc_assert (op_mode != VOIDmode);
1419 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1421 /* If we were really extending the mode,
1422 we would have to distinguish between zero-extension
1423 and sign-extension. */
1424 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1427 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1428 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1429 << GET_MODE_BITSIZE (op_mode));
1435 if (op_mode == VOIDmode)
1437 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1439 /* If we were really extending the mode,
1440 we would have to distinguish between zero-extension
1441 and sign-extension. */
1442 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1445 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1448 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1449 << GET_MODE_BITSIZE (op_mode));
1450 if (val & ((unsigned HOST_WIDE_INT) 1
1451 << (GET_MODE_BITSIZE (op_mode) - 1)))
1453 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1461 case FLOAT_TRUNCATE:
1473 return gen_int_mode (val, mode);
1476 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1477 for a DImode operation on a CONST_INT. */
1478 else if (GET_MODE (op) == VOIDmode
1479 && width <= HOST_BITS_PER_WIDE_INT * 2
1480 && (GET_CODE (op) == CONST_DOUBLE
1481 || CONST_INT_P (op)))
1483 unsigned HOST_WIDE_INT l1, lv;
1484 HOST_WIDE_INT h1, hv;
1486 if (GET_CODE (op) == CONST_DOUBLE)
1487 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1489 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1499 neg_double (l1, h1, &lv, &hv);
1504 neg_double (l1, h1, &lv, &hv);
1514 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1522 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1523 - HOST_BITS_PER_WIDE_INT;
1525 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1526 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1527 lv = GET_MODE_BITSIZE (mode);
1535 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1536 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1537 lv = GET_MODE_BITSIZE (mode);
1565 for (s = 0; s < width; s += 8)
1567 unsigned int d = width - s - 8;
1568 unsigned HOST_WIDE_INT byte;
1570 if (s < HOST_BITS_PER_WIDE_INT)
1571 byte = (l1 >> s) & 0xff;
1573 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1575 if (d < HOST_BITS_PER_WIDE_INT)
1578 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1584 /* This is just a change-of-mode, so do nothing. */
1589 gcc_assert (op_mode != VOIDmode);
1591 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1595 lv = l1 & GET_MODE_MASK (op_mode);
1599 if (op_mode == VOIDmode
1600 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1604 lv = l1 & GET_MODE_MASK (op_mode);
1605 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1606 && (lv & ((unsigned HOST_WIDE_INT) 1
1607 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1608 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1610 hv = HWI_SIGN_EXTEND (lv);
1621 return immed_double_const (lv, hv, mode);
1624 else if (GET_CODE (op) == CONST_DOUBLE
1625 && SCALAR_FLOAT_MODE_P (mode)
1626 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1628 REAL_VALUE_TYPE d, t;
1629 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1634 if (HONOR_SNANS (mode) && real_isnan (&d))
1636 real_sqrt (&t, mode, &d);
1640 d = real_value_abs (&d);
1643 d = real_value_negate (&d);
1645 case FLOAT_TRUNCATE:
1646 d = real_value_truncate (mode, d);
1649 /* All this does is change the mode, unless changing
1651 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1652 real_convert (&d, mode, &d);
1655 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1662 real_to_target (tmp, &d, GET_MODE (op));
1663 for (i = 0; i < 4; i++)
1665 real_from_target (&d, tmp, mode);
1671 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1674 else if (GET_CODE (op) == CONST_DOUBLE
1675 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1676 && GET_MODE_CLASS (mode) == MODE_INT
1677 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1679 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1680 operators are intentionally left unspecified (to ease implementation
1681 by target backends), for consistency, this routine implements the
1682 same semantics for constant folding as used by the middle-end. */
1684 /* This was formerly used only for non-IEEE float.
1685 eggert@twinsun.com says it is safe for IEEE also. */
1686 HOST_WIDE_INT xh, xl, th, tl;
1687 REAL_VALUE_TYPE x, t;
1688 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1692 if (REAL_VALUE_ISNAN (x))
1695 /* Test against the signed upper bound. */
1696 if (width > HOST_BITS_PER_WIDE_INT)
1698 th = ((unsigned HOST_WIDE_INT) 1
1699 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1705 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1707 real_from_integer (&t, VOIDmode, tl, th, 0);
1708 if (REAL_VALUES_LESS (t, x))
1715 /* Test against the signed lower bound. */
1716 if (width > HOST_BITS_PER_WIDE_INT)
1718 th = (unsigned HOST_WIDE_INT) (-1)
1719 << (width - HOST_BITS_PER_WIDE_INT - 1);
1725 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1727 real_from_integer (&t, VOIDmode, tl, th, 0);
1728 if (REAL_VALUES_LESS (x, t))
1734 REAL_VALUE_TO_INT (&xl, &xh, x);
1738 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1741 /* Test against the unsigned upper bound. */
1742 if (width == 2*HOST_BITS_PER_WIDE_INT)
1747 else if (width >= HOST_BITS_PER_WIDE_INT)
1749 th = ((unsigned HOST_WIDE_INT) 1
1750 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1756 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1758 real_from_integer (&t, VOIDmode, tl, th, 1);
1759 if (REAL_VALUES_LESS (t, x))
1766 REAL_VALUE_TO_INT (&xl, &xh, x);
1772 return immed_double_const (xl, xh, mode);
1778 /* Subroutine of simplify_binary_operation to simplify a commutative,
1779 associative binary operation CODE with result mode MODE, operating
1780 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1781 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1782 canonicalization is possible. */
1785 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1790 /* Linearize the operator to the left. */
1791 if (GET_CODE (op1) == code)
1793 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1794 if (GET_CODE (op0) == code)
1796 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1797 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1800 /* "a op (b op c)" becomes "(b op c) op a". */
1801 if (! swap_commutative_operands_p (op1, op0))
1802 return simplify_gen_binary (code, mode, op1, op0);
1809 if (GET_CODE (op0) == code)
1811 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1812 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1814 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1815 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1818 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1819 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1821 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1823 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1824 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1826 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1833 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1834 and OP1. Return 0 if no simplification is possible.
1836 Don't use this for relational operations such as EQ or LT.
1837 Use simplify_relational_operation instead. */
1839 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1842 rtx trueop0, trueop1;
1845 /* Relational operations don't work here. We must know the mode
1846 of the operands in order to do the comparison correctly.
1847 Assuming a full word can give incorrect results.
1848 Consider comparing 128 with -128 in QImode. */
1849 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1850 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1852 /* Make sure the constant is second. */
1853 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1854 && swap_commutative_operands_p (op0, op1))
1856 tem = op0, op0 = op1, op1 = tem;
1859 trueop0 = avoid_constant_pool_reference (op0);
1860 trueop1 = avoid_constant_pool_reference (op1);
1862 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1865 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1868 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1869 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1870 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1871 actual constants. */
1874 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1875 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1877 rtx tem, reversed, opleft, opright;
1879 unsigned int width = GET_MODE_BITSIZE (mode);
1881 /* Even if we can't compute a constant result,
1882 there are some cases worth simplifying. */
1887 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1888 when x is NaN, infinite, or finite and nonzero. They aren't
1889 when x is -0 and the rounding mode is not towards -infinity,
1890 since (-0) + 0 is then 0. */
1891 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1894 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1895 transformations are safe even for IEEE. */
1896 if (GET_CODE (op0) == NEG)
1897 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1898 else if (GET_CODE (op1) == NEG)
1899 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1901 /* (~a) + 1 -> -a */
1902 if (INTEGRAL_MODE_P (mode)
1903 && GET_CODE (op0) == NOT
1904 && trueop1 == const1_rtx)
1905 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1907 /* Handle both-operands-constant cases. We can only add
1908 CONST_INTs to constants since the sum of relocatable symbols
1909 can't be handled by most assemblers. Don't add CONST_INT
1910 to CONST_INT since overflow won't be computed properly if wider
1911 than HOST_BITS_PER_WIDE_INT. */
1913 if ((GET_CODE (op0) == CONST
1914 || GET_CODE (op0) == SYMBOL_REF
1915 || GET_CODE (op0) == LABEL_REF)
1916 && CONST_INT_P (op1))
1917 return plus_constant (op0, INTVAL (op1));
1918 else if ((GET_CODE (op1) == CONST
1919 || GET_CODE (op1) == SYMBOL_REF
1920 || GET_CODE (op1) == LABEL_REF)
1921 && CONST_INT_P (op0))
1922 return plus_constant (op1, INTVAL (op0));
1924 /* See if this is something like X * C - X or vice versa or
1925 if the multiplication is written as a shift. If so, we can
1926 distribute and make a new multiply, shift, or maybe just
1927 have X (if C is 2 in the example above). But don't make
1928 something more expensive than we had before. */
1930 if (SCALAR_INT_MODE_P (mode))
1932 double_int coeff0, coeff1;
1933 rtx lhs = op0, rhs = op1;
1935 coeff0 = double_int_one;
1936 coeff1 = double_int_one;
1938 if (GET_CODE (lhs) == NEG)
1940 coeff0 = double_int_minus_one;
1941 lhs = XEXP (lhs, 0);
1943 else if (GET_CODE (lhs) == MULT
1944 && CONST_INT_P (XEXP (lhs, 1)))
1946 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1947 lhs = XEXP (lhs, 0);
1949 else if (GET_CODE (lhs) == ASHIFT
1950 && CONST_INT_P (XEXP (lhs, 1))
1951 && INTVAL (XEXP (lhs, 1)) >= 0
1952 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1954 coeff0 = double_int_setbit (double_int_zero,
1955 INTVAL (XEXP (lhs, 1)));
1956 lhs = XEXP (lhs, 0);
1959 if (GET_CODE (rhs) == NEG)
1961 coeff1 = double_int_minus_one;
1962 rhs = XEXP (rhs, 0);
1964 else if (GET_CODE (rhs) == MULT
1965 && CONST_INT_P (XEXP (rhs, 1)))
1967 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1968 rhs = XEXP (rhs, 0);
1970 else if (GET_CODE (rhs) == ASHIFT
1971 && CONST_INT_P (XEXP (rhs, 1))
1972 && INTVAL (XEXP (rhs, 1)) >= 0
1973 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1975 coeff1 = double_int_setbit (double_int_zero,
1976 INTVAL (XEXP (rhs, 1)));
1977 rhs = XEXP (rhs, 0);
1980 if (rtx_equal_p (lhs, rhs))
1982 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1985 bool speed = optimize_function_for_speed_p (cfun);
1987 val = double_int_add (coeff0, coeff1);
1988 coeff = immed_double_int_const (val, mode);
1990 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1991 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1996 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1997 if ((CONST_INT_P (op1)
1998 || GET_CODE (op1) == CONST_DOUBLE)
1999 && GET_CODE (op0) == XOR
2000 && (CONST_INT_P (XEXP (op0, 1))
2001 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2002 && mode_signbit_p (mode, op1))
2003 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2004 simplify_gen_binary (XOR, mode, op1,
2007 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2008 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2009 && GET_CODE (op0) == MULT
2010 && GET_CODE (XEXP (op0, 0)) == NEG)
2014 in1 = XEXP (XEXP (op0, 0), 0);
2015 in2 = XEXP (op0, 1);
2016 return simplify_gen_binary (MINUS, mode, op1,
2017 simplify_gen_binary (MULT, mode,
2021 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2022 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2024 if (COMPARISON_P (op0)
2025 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2026 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2027 && (reversed = reversed_comparison (op0, mode)))
2029 simplify_gen_unary (NEG, mode, reversed, mode);
2031 /* If one of the operands is a PLUS or a MINUS, see if we can
2032 simplify this by the associative law.
2033 Don't use the associative law for floating point.
2034 The inaccuracy makes it nonassociative,
2035 and subtle programs can break if operations are associated. */
2037 if (INTEGRAL_MODE_P (mode)
2038 && (plus_minus_operand_p (op0)
2039 || plus_minus_operand_p (op1))
2040 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2043 /* Reassociate floating point addition only when the user
2044 specifies associative math operations. */
2045 if (FLOAT_MODE_P (mode)
2046 && flag_associative_math)
2048 tem = simplify_associative_operation (code, mode, op0, op1);
2055 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2056 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2057 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2058 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2060 rtx xop00 = XEXP (op0, 0);
2061 rtx xop10 = XEXP (op1, 0);
2064 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2066 if (REG_P (xop00) && REG_P (xop10)
2067 && GET_MODE (xop00) == GET_MODE (xop10)
2068 && REGNO (xop00) == REGNO (xop10)
2069 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2070 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2077 /* We can't assume x-x is 0 even with non-IEEE floating point,
2078 but since it is zero except in very strange circumstances, we
2079 will treat it as zero with -ffinite-math-only. */
2080 if (rtx_equal_p (trueop0, trueop1)
2081 && ! side_effects_p (op0)
2082 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2083 return CONST0_RTX (mode);
2085 /* Change subtraction from zero into negation. (0 - x) is the
2086 same as -x when x is NaN, infinite, or finite and nonzero.
2087 But if the mode has signed zeros, and does not round towards
2088 -infinity, then 0 - 0 is 0, not -0. */
2089 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2090 return simplify_gen_unary (NEG, mode, op1, mode);
2092 /* (-1 - a) is ~a. */
2093 if (trueop0 == constm1_rtx)
2094 return simplify_gen_unary (NOT, mode, op1, mode);
2096 /* Subtracting 0 has no effect unless the mode has signed zeros
2097 and supports rounding towards -infinity. In such a case,
2099 if (!(HONOR_SIGNED_ZEROS (mode)
2100 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2101 && trueop1 == CONST0_RTX (mode))
2104 /* See if this is something like X * C - X or vice versa or
2105 if the multiplication is written as a shift. If so, we can
2106 distribute and make a new multiply, shift, or maybe just
2107 have X (if C is 2 in the example above). But don't make
2108 something more expensive than we had before. */
2110 if (SCALAR_INT_MODE_P (mode))
2112 double_int coeff0, negcoeff1;
2113 rtx lhs = op0, rhs = op1;
2115 coeff0 = double_int_one;
2116 negcoeff1 = double_int_minus_one;
2118 if (GET_CODE (lhs) == NEG)
2120 coeff0 = double_int_minus_one;
2121 lhs = XEXP (lhs, 0);
2123 else if (GET_CODE (lhs) == MULT
2124 && CONST_INT_P (XEXP (lhs, 1)))
2126 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2127 lhs = XEXP (lhs, 0);
2129 else if (GET_CODE (lhs) == ASHIFT
2130 && CONST_INT_P (XEXP (lhs, 1))
2131 && INTVAL (XEXP (lhs, 1)) >= 0
2132 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2134 coeff0 = double_int_setbit (double_int_zero,
2135 INTVAL (XEXP (lhs, 1)));
2136 lhs = XEXP (lhs, 0);
2139 if (GET_CODE (rhs) == NEG)
2141 negcoeff1 = double_int_one;
2142 rhs = XEXP (rhs, 0);
2144 else if (GET_CODE (rhs) == MULT
2145 && CONST_INT_P (XEXP (rhs, 1)))
2147 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2148 rhs = XEXP (rhs, 0);
2150 else if (GET_CODE (rhs) == ASHIFT
2151 && CONST_INT_P (XEXP (rhs, 1))
2152 && INTVAL (XEXP (rhs, 1)) >= 0
2153 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2155 negcoeff1 = double_int_setbit (double_int_zero,
2156 INTVAL (XEXP (rhs, 1)));
2157 negcoeff1 = double_int_neg (negcoeff1);
2158 rhs = XEXP (rhs, 0);
2161 if (rtx_equal_p (lhs, rhs))
2163 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2166 bool speed = optimize_function_for_speed_p (cfun);
2168 val = double_int_add (coeff0, negcoeff1);
2169 coeff = immed_double_int_const (val, mode);
2171 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2172 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2177 /* (a - (-b)) -> (a + b). True even for IEEE. */
2178 if (GET_CODE (op1) == NEG)
2179 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2181 /* (-x - c) may be simplified as (-c - x). */
2182 if (GET_CODE (op0) == NEG
2183 && (CONST_INT_P (op1)
2184 || GET_CODE (op1) == CONST_DOUBLE))
2186 tem = simplify_unary_operation (NEG, mode, op1, mode);
2188 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2191 /* Don't let a relocatable value get a negative coeff. */
2192 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2193 return simplify_gen_binary (PLUS, mode,
2195 neg_const_int (mode, op1));
2197 /* (x - (x & y)) -> (x & ~y) */
2198 if (GET_CODE (op1) == AND)
2200 if (rtx_equal_p (op0, XEXP (op1, 0)))
2202 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2203 GET_MODE (XEXP (op1, 1)));
2204 return simplify_gen_binary (AND, mode, op0, tem);
2206 if (rtx_equal_p (op0, XEXP (op1, 1)))
2208 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2209 GET_MODE (XEXP (op1, 0)));
2210 return simplify_gen_binary (AND, mode, op0, tem);
2214 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2215 by reversing the comparison code if valid. */
2216 if (STORE_FLAG_VALUE == 1
2217 && trueop0 == const1_rtx
2218 && COMPARISON_P (op1)
2219 && (reversed = reversed_comparison (op1, mode)))
2222 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2223 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2224 && GET_CODE (op1) == MULT
2225 && GET_CODE (XEXP (op1, 0)) == NEG)
2229 in1 = XEXP (XEXP (op1, 0), 0);
2230 in2 = XEXP (op1, 1);
2231 return simplify_gen_binary (PLUS, mode,
2232 simplify_gen_binary (MULT, mode,
2237 /* Canonicalize (minus (neg A) (mult B C)) to
2238 (minus (mult (neg B) C) A). */
2239 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2240 && GET_CODE (op1) == MULT
2241 && GET_CODE (op0) == NEG)
2245 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2246 in2 = XEXP (op1, 1);
2247 return simplify_gen_binary (MINUS, mode,
2248 simplify_gen_binary (MULT, mode,
2253 /* If one of the operands is a PLUS or a MINUS, see if we can
2254 simplify this by the associative law. This will, for example,
2255 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2256 Don't use the associative law for floating point.
2257 The inaccuracy makes it nonassociative,
2258 and subtle programs can break if operations are associated. */
2260 if (INTEGRAL_MODE_P (mode)
2261 && (plus_minus_operand_p (op0)
2262 || plus_minus_operand_p (op1))
2263 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2268 if (trueop1 == constm1_rtx)
2269 return simplify_gen_unary (NEG, mode, op0, mode);
2271 if (GET_CODE (op0) == NEG)
2273 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2274 /* If op1 is a MULT as well and simplify_unary_operation
2275 just moved the NEG to the second operand, simplify_gen_binary
2276 below could through simplify_associative_operation move
2277 the NEG around again and recurse endlessly. */
2279 && GET_CODE (op1) == MULT
2280 && GET_CODE (temp) == MULT
2281 && XEXP (op1, 0) == XEXP (temp, 0)
2282 && GET_CODE (XEXP (temp, 1)) == NEG
2283 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2286 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2288 if (GET_CODE (op1) == NEG)
2290 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2291 /* If op0 is a MULT as well and simplify_unary_operation
2292 just moved the NEG to the second operand, simplify_gen_binary
2293 below could through simplify_associative_operation move
2294 the NEG around again and recurse endlessly. */
2296 && GET_CODE (op0) == MULT
2297 && GET_CODE (temp) == MULT
2298 && XEXP (op0, 0) == XEXP (temp, 0)
2299 && GET_CODE (XEXP (temp, 1)) == NEG
2300 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2303 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2306 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2307 x is NaN, since x * 0 is then also NaN. Nor is it valid
2308 when the mode has signed zeros, since multiplying a negative
2309 number by 0 will give -0, not 0. */
2310 if (!HONOR_NANS (mode)
2311 && !HONOR_SIGNED_ZEROS (mode)
2312 && trueop1 == CONST0_RTX (mode)
2313 && ! side_effects_p (op0))
2316 /* In IEEE floating point, x*1 is not equivalent to x for
2318 if (!HONOR_SNANS (mode)
2319 && trueop1 == CONST1_RTX (mode))
2322 /* Convert multiply by constant power of two into shift unless
2323 we are still generating RTL. This test is a kludge. */
2324 if (CONST_INT_P (trueop1)
2325 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2326 /* If the mode is larger than the host word size, and the
2327 uppermost bit is set, then this isn't a power of two due
2328 to implicit sign extension. */
2329 && (width <= HOST_BITS_PER_WIDE_INT
2330 || val != HOST_BITS_PER_WIDE_INT - 1))
2331 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2333 /* Likewise for multipliers wider than a word. */
2334 if (GET_CODE (trueop1) == CONST_DOUBLE
2335 && (GET_MODE (trueop1) == VOIDmode
2336 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2337 && GET_MODE (op0) == mode
2338 && CONST_DOUBLE_LOW (trueop1) == 0
2339 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2340 return simplify_gen_binary (ASHIFT, mode, op0,
2341 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2343 /* x*2 is x+x and x*(-1) is -x */
2344 if (GET_CODE (trueop1) == CONST_DOUBLE
2345 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2346 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2347 && GET_MODE (op0) == mode)
2350 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2352 if (REAL_VALUES_EQUAL (d, dconst2))
2353 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2355 if (!HONOR_SNANS (mode)
2356 && REAL_VALUES_EQUAL (d, dconstm1))
2357 return simplify_gen_unary (NEG, mode, op0, mode);
2360 /* Optimize -x * -x as x * x. */
2361 if (FLOAT_MODE_P (mode)
2362 && GET_CODE (op0) == NEG
2363 && GET_CODE (op1) == NEG
2364 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2365 && !side_effects_p (XEXP (op0, 0)))
2366 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2368 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2369 if (SCALAR_FLOAT_MODE_P (mode)
2370 && GET_CODE (op0) == ABS
2371 && GET_CODE (op1) == ABS
2372 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2373 && !side_effects_p (XEXP (op0, 0)))
2374 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2376 /* Reassociate multiplication, but for floating point MULTs
2377 only when the user specifies unsafe math optimizations. */
2378 if (! FLOAT_MODE_P (mode)
2379 || flag_unsafe_math_optimizations)
2381 tem = simplify_associative_operation (code, mode, op0, op1);
2388 if (trueop1 == CONST0_RTX (mode))
2390 if (CONST_INT_P (trueop1)
2391 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2392 == GET_MODE_MASK (mode)))
2394 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2396 /* A | (~A) -> -1 */
2397 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2398 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2399 && ! side_effects_p (op0)
2400 && SCALAR_INT_MODE_P (mode))
2403 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2404 if (CONST_INT_P (op1)
2405 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2406 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2409 /* Canonicalize (X & C1) | C2. */
2410 if (GET_CODE (op0) == AND
2411 && CONST_INT_P (trueop1)
2412 && CONST_INT_P (XEXP (op0, 1)))
2414 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2415 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2416 HOST_WIDE_INT c2 = INTVAL (trueop1);
2418 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2420 && !side_effects_p (XEXP (op0, 0)))
2423 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2424 if (((c1|c2) & mask) == mask)
2425 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2427 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2428 if (((c1 & ~c2) & mask) != (c1 & mask))
2430 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2431 gen_int_mode (c1 & ~c2, mode));
2432 return simplify_gen_binary (IOR, mode, tem, op1);
2436 /* Convert (A & B) | A to A. */
2437 if (GET_CODE (op0) == AND
2438 && (rtx_equal_p (XEXP (op0, 0), op1)
2439 || rtx_equal_p (XEXP (op0, 1), op1))
2440 && ! side_effects_p (XEXP (op0, 0))
2441 && ! side_effects_p (XEXP (op0, 1)))
2444 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2445 mode size to (rotate A CX). */
2447 if (GET_CODE (op1) == ASHIFT
2448 || GET_CODE (op1) == SUBREG)
2459 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2460 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2461 && CONST_INT_P (XEXP (opleft, 1))
2462 && CONST_INT_P (XEXP (opright, 1))
2463 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2464 == GET_MODE_BITSIZE (mode)))
2465 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2467 /* Same, but for ashift that has been "simplified" to a wider mode
2468 by simplify_shift_const. */
2470 if (GET_CODE (opleft) == SUBREG
2471 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2472 && GET_CODE (opright) == LSHIFTRT
2473 && GET_CODE (XEXP (opright, 0)) == SUBREG
2474 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2475 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2476 && (GET_MODE_SIZE (GET_MODE (opleft))
2477 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2478 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2479 SUBREG_REG (XEXP (opright, 0)))
2480 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2481 && CONST_INT_P (XEXP (opright, 1))
2482 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2483 == GET_MODE_BITSIZE (mode)))
2484 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2485 XEXP (SUBREG_REG (opleft), 1));
2487 /* If we have (ior (and (X C1) C2)), simplify this by making
2488 C1 as small as possible if C1 actually changes. */
2489 if (CONST_INT_P (op1)
2490 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2491 || INTVAL (op1) > 0)
2492 && GET_CODE (op0) == AND
2493 && CONST_INT_P (XEXP (op0, 1))
2494 && CONST_INT_P (op1)
2495 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2496 return simplify_gen_binary (IOR, mode,
2498 (AND, mode, XEXP (op0, 0),
2499 GEN_INT (UINTVAL (XEXP (op0, 1))
2503 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2504 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2505 the PLUS does not affect any of the bits in OP1: then we can do
2506 the IOR as a PLUS and we can associate. This is valid if OP1
2507 can be safely shifted left C bits. */
2508 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2509 && GET_CODE (XEXP (op0, 0)) == PLUS
2510 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2511 && CONST_INT_P (XEXP (op0, 1))
2512 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2514 int count = INTVAL (XEXP (op0, 1));
2515 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2517 if (mask >> count == INTVAL (trueop1)
2518 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2519 return simplify_gen_binary (ASHIFTRT, mode,
2520 plus_constant (XEXP (op0, 0), mask),
2524 tem = simplify_associative_operation (code, mode, op0, op1);
2530 if (trueop1 == CONST0_RTX (mode))
2532 if (CONST_INT_P (trueop1)
2533 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2534 == GET_MODE_MASK (mode)))
2535 return simplify_gen_unary (NOT, mode, op0, mode);
2536 if (rtx_equal_p (trueop0, trueop1)
2537 && ! side_effects_p (op0)
2538 && GET_MODE_CLASS (mode) != MODE_CC)
2539 return CONST0_RTX (mode);
2541 /* Canonicalize XOR of the most significant bit to PLUS. */
2542 if ((CONST_INT_P (op1)
2543 || GET_CODE (op1) == CONST_DOUBLE)
2544 && mode_signbit_p (mode, op1))
2545 return simplify_gen_binary (PLUS, mode, op0, op1);
2546 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2547 if ((CONST_INT_P (op1)
2548 || GET_CODE (op1) == CONST_DOUBLE)
2549 && GET_CODE (op0) == PLUS
2550 && (CONST_INT_P (XEXP (op0, 1))
2551 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2552 && mode_signbit_p (mode, XEXP (op0, 1)))
2553 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2554 simplify_gen_binary (XOR, mode, op1,
2557 /* If we are XORing two things that have no bits in common,
2558 convert them into an IOR. This helps to detect rotation encoded
2559 using those methods and possibly other simplifications. */
2561 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2562 && (nonzero_bits (op0, mode)
2563 & nonzero_bits (op1, mode)) == 0)
2564 return (simplify_gen_binary (IOR, mode, op0, op1));
2566 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2567 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2570 int num_negated = 0;
2572 if (GET_CODE (op0) == NOT)
2573 num_negated++, op0 = XEXP (op0, 0);
2574 if (GET_CODE (op1) == NOT)
2575 num_negated++, op1 = XEXP (op1, 0);
2577 if (num_negated == 2)
2578 return simplify_gen_binary (XOR, mode, op0, op1);
2579 else if (num_negated == 1)
2580 return simplify_gen_unary (NOT, mode,
2581 simplify_gen_binary (XOR, mode, op0, op1),
2585 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2586 correspond to a machine insn or result in further simplifications
2587 if B is a constant. */
2589 if (GET_CODE (op0) == AND
2590 && rtx_equal_p (XEXP (op0, 1), op1)
2591 && ! side_effects_p (op1))
2592 return simplify_gen_binary (AND, mode,
2593 simplify_gen_unary (NOT, mode,
2594 XEXP (op0, 0), mode),
2597 else if (GET_CODE (op0) == AND
2598 && rtx_equal_p (XEXP (op0, 0), op1)
2599 && ! side_effects_p (op1))
2600 return simplify_gen_binary (AND, mode,
2601 simplify_gen_unary (NOT, mode,
2602 XEXP (op0, 1), mode),
2605 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2606 we can transform like this:
2607 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2608 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2609 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2610 Attempt a few simplifications when B and C are both constants. */
2611 if (GET_CODE (op0) == AND
2612 && CONST_INT_P (op1)
2613 && CONST_INT_P (XEXP (op0, 1)))
2615 rtx a = XEXP (op0, 0);
2616 rtx b = XEXP (op0, 1);
2618 HOST_WIDE_INT bval = INTVAL (b);
2619 HOST_WIDE_INT cval = INTVAL (c);
2622 = simplify_binary_operation (AND, mode,
2623 simplify_gen_unary (NOT, mode, a, mode),
2625 if ((~cval & bval) == 0)
2627 /* Try to simplify ~A&C | ~B&C. */
2628 if (na_c != NULL_RTX)
2629 return simplify_gen_binary (IOR, mode, na_c,
2630 GEN_INT (~bval & cval));
2634 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2635 if (na_c == const0_rtx)
2637 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2638 GEN_INT (~cval & bval));
2639 return simplify_gen_binary (IOR, mode, a_nc_b,
2640 GEN_INT (~bval & cval));
2645 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2646 comparison if STORE_FLAG_VALUE is 1. */
2647 if (STORE_FLAG_VALUE == 1
2648 && trueop1 == const1_rtx
2649 && COMPARISON_P (op0)
2650 && (reversed = reversed_comparison (op0, mode)))
2653 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2654 is (lt foo (const_int 0)), so we can perform the above
2655 simplification if STORE_FLAG_VALUE is 1. */
2657 if (STORE_FLAG_VALUE == 1
2658 && trueop1 == const1_rtx
2659 && GET_CODE (op0) == LSHIFTRT
2660 && CONST_INT_P (XEXP (op0, 1))
2661 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2662 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2664 /* (xor (comparison foo bar) (const_int sign-bit))
2665 when STORE_FLAG_VALUE is the sign bit. */
2666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2667 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2668 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2669 && trueop1 == const_true_rtx
2670 && COMPARISON_P (op0)
2671 && (reversed = reversed_comparison (op0, mode)))
2674 tem = simplify_associative_operation (code, mode, op0, op1);
2680 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2682 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2684 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2685 HOST_WIDE_INT nzop1;
2686 if (CONST_INT_P (trueop1))
2688 HOST_WIDE_INT val1 = INTVAL (trueop1);
2689 /* If we are turning off bits already known off in OP0, we need
2691 if ((nzop0 & ~val1) == 0)
2694 nzop1 = nonzero_bits (trueop1, mode);
2695 /* If we are clearing all the nonzero bits, the result is zero. */
2696 if ((nzop1 & nzop0) == 0
2697 && !side_effects_p (op0) && !side_effects_p (op1))
2698 return CONST0_RTX (mode);
2700 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2701 && GET_MODE_CLASS (mode) != MODE_CC)
2704 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2705 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2706 && ! side_effects_p (op0)
2707 && GET_MODE_CLASS (mode) != MODE_CC)
2708 return CONST0_RTX (mode);
2710 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2711 there are no nonzero bits of C outside of X's mode. */
2712 if ((GET_CODE (op0) == SIGN_EXTEND
2713 || GET_CODE (op0) == ZERO_EXTEND)
2714 && CONST_INT_P (trueop1)
2715 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2716 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2717 & UINTVAL (trueop1)) == 0)
2719 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2720 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2721 gen_int_mode (INTVAL (trueop1),
2723 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2726 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2727 we might be able to further simplify the AND with X and potentially
2728 remove the truncation altogether. */
2729 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2731 rtx x = XEXP (op0, 0);
2732 enum machine_mode xmode = GET_MODE (x);
2733 tem = simplify_gen_binary (AND, xmode, x,
2734 gen_int_mode (INTVAL (trueop1), xmode));
2735 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2738 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2739 if (GET_CODE (op0) == IOR
2740 && CONST_INT_P (trueop1)
2741 && CONST_INT_P (XEXP (op0, 1)))
2743 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2744 return simplify_gen_binary (IOR, mode,
2745 simplify_gen_binary (AND, mode,
2746 XEXP (op0, 0), op1),
2747 gen_int_mode (tmp, mode));
2750 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2751 insn (and may simplify more). */
2752 if (GET_CODE (op0) == XOR
2753 && rtx_equal_p (XEXP (op0, 0), op1)
2754 && ! side_effects_p (op1))
2755 return simplify_gen_binary (AND, mode,
2756 simplify_gen_unary (NOT, mode,
2757 XEXP (op0, 1), mode),
2760 if (GET_CODE (op0) == XOR
2761 && rtx_equal_p (XEXP (op0, 1), op1)
2762 && ! side_effects_p (op1))
2763 return simplify_gen_binary (AND, mode,
2764 simplify_gen_unary (NOT, mode,
2765 XEXP (op0, 0), mode),
2768 /* Similarly for (~(A ^ B)) & A. */
2769 if (GET_CODE (op0) == NOT
2770 && GET_CODE (XEXP (op0, 0)) == XOR
2771 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2772 && ! side_effects_p (op1))
2773 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2775 if (GET_CODE (op0) == NOT
2776 && GET_CODE (XEXP (op0, 0)) == XOR
2777 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2778 && ! side_effects_p (op1))
2779 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2781 /* Convert (A | B) & A to A. */
2782 if (GET_CODE (op0) == IOR
2783 && (rtx_equal_p (XEXP (op0, 0), op1)
2784 || rtx_equal_p (XEXP (op0, 1), op1))
2785 && ! side_effects_p (XEXP (op0, 0))
2786 && ! side_effects_p (XEXP (op0, 1)))
2789 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2790 ((A & N) + B) & M -> (A + B) & M
2791 Similarly if (N & M) == 0,
2792 ((A | N) + B) & M -> (A + B) & M
2793 and for - instead of + and/or ^ instead of |.
2794 Also, if (N & M) == 0, then
2795 (A +- N) & M -> A & M. */
2796 if (CONST_INT_P (trueop1)
2797 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2798 && ~UINTVAL (trueop1)
2799 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2800 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2805 pmop[0] = XEXP (op0, 0);
2806 pmop[1] = XEXP (op0, 1);
2808 if (CONST_INT_P (pmop[1])
2809 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2810 return simplify_gen_binary (AND, mode, pmop[0], op1);
2812 for (which = 0; which < 2; which++)
2815 switch (GET_CODE (tem))
2818 if (CONST_INT_P (XEXP (tem, 1))
2819 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2820 == UINTVAL (trueop1))
2821 pmop[which] = XEXP (tem, 0);
2825 if (CONST_INT_P (XEXP (tem, 1))
2826 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2827 pmop[which] = XEXP (tem, 0);
2834 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2836 tem = simplify_gen_binary (GET_CODE (op0), mode,
2838 return simplify_gen_binary (code, mode, tem, op1);
2842 /* (and X (ior (not X) Y) -> (and X Y) */
2843 if (GET_CODE (op1) == IOR
2844 && GET_CODE (XEXP (op1, 0)) == NOT
2845 && op0 == XEXP (XEXP (op1, 0), 0))
2846 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2848 /* (and (ior (not X) Y) X) -> (and X Y) */
2849 if (GET_CODE (op0) == IOR
2850 && GET_CODE (XEXP (op0, 0)) == NOT
2851 && op1 == XEXP (XEXP (op0, 0), 0))
2852 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2854 tem = simplify_associative_operation (code, mode, op0, op1);
2860 /* 0/x is 0 (or x&0 if x has side-effects). */
2861 if (trueop0 == CONST0_RTX (mode))
2863 if (side_effects_p (op1))
2864 return simplify_gen_binary (AND, mode, op1, trueop0);
2868 if (trueop1 == CONST1_RTX (mode))
2869 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2870 /* Convert divide by power of two into shift. */
2871 if (CONST_INT_P (trueop1)
2872 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2873 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2877 /* Handle floating point and integers separately. */
2878 if (SCALAR_FLOAT_MODE_P (mode))
2880 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2881 safe for modes with NaNs, since 0.0 / 0.0 will then be
2882 NaN rather than 0.0. Nor is it safe for modes with signed
2883 zeros, since dividing 0 by a negative number gives -0.0 */
2884 if (trueop0 == CONST0_RTX (mode)
2885 && !HONOR_NANS (mode)
2886 && !HONOR_SIGNED_ZEROS (mode)
2887 && ! side_effects_p (op1))
2890 if (trueop1 == CONST1_RTX (mode)
2891 && !HONOR_SNANS (mode))
2894 if (GET_CODE (trueop1) == CONST_DOUBLE
2895 && trueop1 != CONST0_RTX (mode))
2898 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2901 if (REAL_VALUES_EQUAL (d, dconstm1)
2902 && !HONOR_SNANS (mode))
2903 return simplify_gen_unary (NEG, mode, op0, mode);
2905 /* Change FP division by a constant into multiplication.
2906 Only do this with -freciprocal-math. */
2907 if (flag_reciprocal_math
2908 && !REAL_VALUES_EQUAL (d, dconst0))
2910 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2911 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2912 return simplify_gen_binary (MULT, mode, op0, tem);
2918 /* 0/x is 0 (or x&0 if x has side-effects). */
2919 if (trueop0 == CONST0_RTX (mode)
2920 && !cfun->can_throw_non_call_exceptions)
2922 if (side_effects_p (op1))
2923 return simplify_gen_binary (AND, mode, op1, trueop0);
2927 if (trueop1 == CONST1_RTX (mode))
2928 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2930 if (trueop1 == constm1_rtx)
2932 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2933 return simplify_gen_unary (NEG, mode, x, mode);
2939 /* 0%x is 0 (or x&0 if x has side-effects). */
2940 if (trueop0 == CONST0_RTX (mode))
2942 if (side_effects_p (op1))
2943 return simplify_gen_binary (AND, mode, op1, trueop0);
2946 /* x%1 is 0 (of x&0 if x has side-effects). */
2947 if (trueop1 == CONST1_RTX (mode))
2949 if (side_effects_p (op0))
2950 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2951 return CONST0_RTX (mode);
2953 /* Implement modulus by power of two as AND. */
2954 if (CONST_INT_P (trueop1)
2955 && exact_log2 (UINTVAL (trueop1)) > 0)
2956 return simplify_gen_binary (AND, mode, op0,
2957 GEN_INT (INTVAL (op1) - 1));
2961 /* 0%x is 0 (or x&0 if x has side-effects). */
2962 if (trueop0 == CONST0_RTX (mode))
2964 if (side_effects_p (op1))
2965 return simplify_gen_binary (AND, mode, op1, trueop0);
2968 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2969 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2971 if (side_effects_p (op0))
2972 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2973 return CONST0_RTX (mode);
2980 if (trueop1 == CONST0_RTX (mode))
2982 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2984 /* Rotating ~0 always results in ~0. */
2985 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2986 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2987 && ! side_effects_p (op1))
2990 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2992 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2993 if (val != INTVAL (op1))
2994 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3001 if (trueop1 == CONST0_RTX (mode))
3003 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3005 goto canonicalize_shift;
3008 if (trueop1 == CONST0_RTX (mode))
3010 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3012 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3013 if (GET_CODE (op0) == CLZ
3014 && CONST_INT_P (trueop1)
3015 && STORE_FLAG_VALUE == 1
3016 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3018 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3019 unsigned HOST_WIDE_INT zero_val = 0;
3021 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3022 && zero_val == GET_MODE_BITSIZE (imode)
3023 && INTVAL (trueop1) == exact_log2 (zero_val))
3024 return simplify_gen_relational (EQ, mode, imode,
3025 XEXP (op0, 0), const0_rtx);
3027 goto canonicalize_shift;
3030 if (width <= HOST_BITS_PER_WIDE_INT
3031 && CONST_INT_P (trueop1)
3032 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
3033 && ! side_effects_p (op0))
3035 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3037 tem = simplify_associative_operation (code, mode, op0, op1);
3043 if (width <= HOST_BITS_PER_WIDE_INT
3044 && CONST_INT_P (trueop1)
3045 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3046 && ! side_effects_p (op0))
3048 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3050 tem = simplify_associative_operation (code, mode, op0, op1);
3056 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3058 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3060 tem = simplify_associative_operation (code, mode, op0, op1);
3066 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3068 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3070 tem = simplify_associative_operation (code, mode, op0, op1);
3083 /* ??? There are simplifications that can be done. */
3087 if (!VECTOR_MODE_P (mode))
3089 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3090 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3091 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3092 gcc_assert (XVECLEN (trueop1, 0) == 1);
3093 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3095 if (GET_CODE (trueop0) == CONST_VECTOR)
3096 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3099 /* Extract a scalar element from a nested VEC_SELECT expression
3100 (with optional nested VEC_CONCAT expression). Some targets
3101 (i386) extract scalar element from a vector using chain of
3102 nested VEC_SELECT expressions. When input operand is a memory
3103 operand, this operation can be simplified to a simple scalar
3104 load from an offseted memory address. */
3105 if (GET_CODE (trueop0) == VEC_SELECT)
3107 rtx op0 = XEXP (trueop0, 0);
3108 rtx op1 = XEXP (trueop0, 1);
3110 enum machine_mode opmode = GET_MODE (op0);
3111 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3112 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3114 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3120 gcc_assert (GET_CODE (op1) == PARALLEL);
3121 gcc_assert (i < n_elts);
3123 /* Select element, pointed by nested selector. */
3124 elem = INTVAL (XVECEXP (op1, 0, i));
3126 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3127 if (GET_CODE (op0) == VEC_CONCAT)
3129 rtx op00 = XEXP (op0, 0);
3130 rtx op01 = XEXP (op0, 1);
3132 enum machine_mode mode00, mode01;
3133 int n_elts00, n_elts01;
3135 mode00 = GET_MODE (op00);
3136 mode01 = GET_MODE (op01);
3138 /* Find out number of elements of each operand. */
3139 if (VECTOR_MODE_P (mode00))
3141 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3142 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3147 if (VECTOR_MODE_P (mode01))
3149 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3150 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3155 gcc_assert (n_elts == n_elts00 + n_elts01);
3157 /* Select correct operand of VEC_CONCAT
3158 and adjust selector. */
3159 if (elem < n_elts01)
3170 vec = rtvec_alloc (1);
3171 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3173 tmp = gen_rtx_fmt_ee (code, mode,
3174 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3177 if (GET_CODE (trueop0) == VEC_DUPLICATE
3178 && GET_MODE (XEXP (trueop0, 0)) == mode)
3179 return XEXP (trueop0, 0);
3183 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3184 gcc_assert (GET_MODE_INNER (mode)
3185 == GET_MODE_INNER (GET_MODE (trueop0)));
3186 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3188 if (GET_CODE (trueop0) == CONST_VECTOR)
3190 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3191 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3192 rtvec v = rtvec_alloc (n_elts);
3195 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3196 for (i = 0; i < n_elts; i++)
3198 rtx x = XVECEXP (trueop1, 0, i);
3200 gcc_assert (CONST_INT_P (x));
3201 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3205 return gen_rtx_CONST_VECTOR (mode, v);
3209 if (XVECLEN (trueop1, 0) == 1
3210 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3211 && GET_CODE (trueop0) == VEC_CONCAT)
3214 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3216 /* Try to find the element in the VEC_CONCAT. */
3217 while (GET_MODE (vec) != mode
3218 && GET_CODE (vec) == VEC_CONCAT)
3220 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3221 if (offset < vec_size)
3222 vec = XEXP (vec, 0);
3226 vec = XEXP (vec, 1);
3228 vec = avoid_constant_pool_reference (vec);
3231 if (GET_MODE (vec) == mode)
3238 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3239 ? GET_MODE (trueop0)
3240 : GET_MODE_INNER (mode));
3241 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3242 ? GET_MODE (trueop1)
3243 : GET_MODE_INNER (mode));
3245 gcc_assert (VECTOR_MODE_P (mode));
3246 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3247 == GET_MODE_SIZE (mode));
3249 if (VECTOR_MODE_P (op0_mode))
3250 gcc_assert (GET_MODE_INNER (mode)
3251 == GET_MODE_INNER (op0_mode));
3253 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3255 if (VECTOR_MODE_P (op1_mode))
3256 gcc_assert (GET_MODE_INNER (mode)
3257 == GET_MODE_INNER (op1_mode));
3259 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3261 if ((GET_CODE (trueop0) == CONST_VECTOR
3262 || CONST_INT_P (trueop0)
3263 || GET_CODE (trueop0) == CONST_DOUBLE)
3264 && (GET_CODE (trueop1) == CONST_VECTOR
3265 || CONST_INT_P (trueop1)
3266 || GET_CODE (trueop1) == CONST_DOUBLE))
3268 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3269 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3270 rtvec v = rtvec_alloc (n_elts);
3272 unsigned in_n_elts = 1;
3274 if (VECTOR_MODE_P (op0_mode))
3275 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3276 for (i = 0; i < n_elts; i++)
3280 if (!VECTOR_MODE_P (op0_mode))
3281 RTVEC_ELT (v, i) = trueop0;
3283 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3287 if (!VECTOR_MODE_P (op1_mode))
3288 RTVEC_ELT (v, i) = trueop1;
3290 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3295 return gen_rtx_CONST_VECTOR (mode, v);
3308 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3311 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3313 unsigned int width = GET_MODE_BITSIZE (mode);
3315 if (VECTOR_MODE_P (mode)
3316 && code != VEC_CONCAT
3317 && GET_CODE (op0) == CONST_VECTOR
3318 && GET_CODE (op1) == CONST_VECTOR)
3320 unsigned n_elts = GET_MODE_NUNITS (mode);
3321 enum machine_mode op0mode = GET_MODE (op0);
3322 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3323 enum machine_mode op1mode = GET_MODE (op1);
3324 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3325 rtvec v = rtvec_alloc (n_elts);
3328 gcc_assert (op0_n_elts == n_elts);
3329 gcc_assert (op1_n_elts == n_elts);
3330 for (i = 0; i < n_elts; i++)
3332 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3333 CONST_VECTOR_ELT (op0, i),
3334 CONST_VECTOR_ELT (op1, i));
3337 RTVEC_ELT (v, i) = x;
3340 return gen_rtx_CONST_VECTOR (mode, v);
3343 if (VECTOR_MODE_P (mode)
3344 && code == VEC_CONCAT
3345 && (CONST_INT_P (op0)
3346 || GET_CODE (op0) == CONST_DOUBLE
3347 || GET_CODE (op0) == CONST_FIXED)
3348 && (CONST_INT_P (op1)
3349 || GET_CODE (op1) == CONST_DOUBLE
3350 || GET_CODE (op1) == CONST_FIXED))
3352 unsigned n_elts = GET_MODE_NUNITS (mode);
3353 rtvec v = rtvec_alloc (n_elts);
3355 gcc_assert (n_elts >= 2);
3358 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3359 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3361 RTVEC_ELT (v, 0) = op0;
3362 RTVEC_ELT (v, 1) = op1;
3366 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3367 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3370 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3371 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3372 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3374 for (i = 0; i < op0_n_elts; ++i)
3375 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3376 for (i = 0; i < op1_n_elts; ++i)
3377 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3380 return gen_rtx_CONST_VECTOR (mode, v);
3383 if (SCALAR_FLOAT_MODE_P (mode)
3384 && GET_CODE (op0) == CONST_DOUBLE
3385 && GET_CODE (op1) == CONST_DOUBLE
3386 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3397 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3399 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3401 for (i = 0; i < 4; i++)
3418 real_from_target (&r, tmp0, mode);
3419 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3423 REAL_VALUE_TYPE f0, f1, value, result;
3426 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3427 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3428 real_convert (&f0, mode, &f0);
3429 real_convert (&f1, mode, &f1);
3431 if (HONOR_SNANS (mode)
3432 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3436 && REAL_VALUES_EQUAL (f1, dconst0)
3437 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3440 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3441 && flag_trapping_math
3442 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3444 int s0 = REAL_VALUE_NEGATIVE (f0);
3445 int s1 = REAL_VALUE_NEGATIVE (f1);
3450 /* Inf + -Inf = NaN plus exception. */
3455 /* Inf - Inf = NaN plus exception. */
3460 /* Inf / Inf = NaN plus exception. */
3467 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3468 && flag_trapping_math
3469 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3470 || (REAL_VALUE_ISINF (f1)
3471 && REAL_VALUES_EQUAL (f0, dconst0))))
3472 /* Inf * 0 = NaN plus exception. */
3475 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3477 real_convert (&result, mode, &value);
3479 /* Don't constant fold this floating point operation if
3480 the result has overflowed and flag_trapping_math. */
3482 if (flag_trapping_math
3483 && MODE_HAS_INFINITIES (mode)
3484 && REAL_VALUE_ISINF (result)
3485 && !REAL_VALUE_ISINF (f0)
3486 && !REAL_VALUE_ISINF (f1))
3487 /* Overflow plus exception. */
3490 /* Don't constant fold this floating point operation if the
3491 result may dependent upon the run-time rounding mode and
3492 flag_rounding_math is set, or if GCC's software emulation
3493 is unable to accurately represent the result. */
3495 if ((flag_rounding_math
3496 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3497 && (inexact || !real_identical (&result, &value)))
3500 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3504 /* We can fold some multi-word operations. */
3505 if (GET_MODE_CLASS (mode) == MODE_INT
3506 && width == HOST_BITS_PER_DOUBLE_INT
3507 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3508 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3510 double_int o0, o1, res, tmp;
3512 o0 = rtx_to_double_int (op0);
3513 o1 = rtx_to_double_int (op1);
3518 /* A - B == A + (-B). */
3519 o1 = double_int_neg (o1);
3521 /* Fall through.... */
3524 res = double_int_add (o0, o1);
3528 res = double_int_mul (o0, o1);
3532 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3533 o0.low, o0.high, o1.low, o1.high,
3534 &res.low, &res.high,
3535 &tmp.low, &tmp.high))
3540 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3541 o0.low, o0.high, o1.low, o1.high,
3542 &tmp.low, &tmp.high,
3543 &res.low, &res.high))
3548 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3549 o0.low, o0.high, o1.low, o1.high,
3550 &res.low, &res.high,
3551 &tmp.low, &tmp.high))
3556 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3557 o0.low, o0.high, o1.low, o1.high,
3558 &tmp.low, &tmp.high,
3559 &res.low, &res.high))
3564 res = double_int_and (o0, o1);
3568 res = double_int_ior (o0, o1);
3572 res = double_int_xor (o0, o1);
3576 res = double_int_smin (o0, o1);
3580 res = double_int_smax (o0, o1);
3584 res = double_int_umin (o0, o1);
3588 res = double_int_umax (o0, o1);
3591 case LSHIFTRT: case ASHIFTRT:
3593 case ROTATE: case ROTATERT:
3595 unsigned HOST_WIDE_INT cnt;
3597 if (SHIFT_COUNT_TRUNCATED)
3598 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3600 if (!double_int_fits_in_uhwi_p (o1)
3601 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3604 cnt = double_int_to_uhwi (o1);
3606 if (code == LSHIFTRT || code == ASHIFTRT)
3607 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3609 else if (code == ASHIFT)
3610 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3612 else if (code == ROTATE)
3613 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3614 else /* code == ROTATERT */
3615 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3623 return immed_double_int_const (res, mode);
3626 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3627 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3629 /* Get the integer argument values in two forms:
3630 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3632 arg0 = INTVAL (op0);
3633 arg1 = INTVAL (op1);
3635 if (width < HOST_BITS_PER_WIDE_INT)
3637 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3638 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3641 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3642 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3645 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3646 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3654 /* Compute the value of the arithmetic. */
3659 val = arg0s + arg1s;
3663 val = arg0s - arg1s;
3667 val = arg0s * arg1s;
3672 || ((unsigned HOST_WIDE_INT) arg0s
3673 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3676 val = arg0s / arg1s;
3681 || ((unsigned HOST_WIDE_INT) arg0s
3682 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3685 val = arg0s % arg1s;
3690 || ((unsigned HOST_WIDE_INT) arg0s
3691 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3694 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3699 || ((unsigned HOST_WIDE_INT) arg0s
3700 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3703 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3721 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3722 the value is in range. We can't return any old value for
3723 out-of-range arguments because either the middle-end (via
3724 shift_truncation_mask) or the back-end might be relying on
3725 target-specific knowledge. Nor can we rely on
3726 shift_truncation_mask, since the shift might not be part of an
3727 ashlM3, lshrM3 or ashrM3 instruction. */
3728 if (SHIFT_COUNT_TRUNCATED)
3729 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3730 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3733 val = (code == ASHIFT
3734 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3735 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3737 /* Sign-extend the result for arithmetic right shifts. */
3738 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3739 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3747 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3748 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3756 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3757 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3761 /* Do nothing here. */
3765 val = arg0s <= arg1s ? arg0s : arg1s;
3769 val = ((unsigned HOST_WIDE_INT) arg0
3770 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3774 val = arg0s > arg1s ? arg0s : arg1s;
3778 val = ((unsigned HOST_WIDE_INT) arg0
3779 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3792 /* ??? There are simplifications that can be done. */
3799 return gen_int_mode (val, mode);
3807 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3810 Rather than test for specific case, we do this by a brute-force method
3811 and do all possible simplifications until no more changes occur. Then
3812 we rebuild the operation. */
3814 struct simplify_plus_minus_op_data
3821 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3825 result = (commutative_operand_precedence (y)
3826 - commutative_operand_precedence (x));
3830 /* Group together equal REGs to do more simplification. */
3831 if (REG_P (x) && REG_P (y))
3832 return REGNO (x) > REGNO (y);
3838 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3841 struct simplify_plus_minus_op_data ops[8];
3843 int n_ops = 2, input_ops = 2;
3844 int changed, n_constants = 0, canonicalized = 0;
3847 memset (ops, 0, sizeof ops);
3849 /* Set up the two operands and then expand them until nothing has been
3850 changed. If we run out of room in our array, give up; this should
3851 almost never happen. */
3856 ops[1].neg = (code == MINUS);
3862 for (i = 0; i < n_ops; i++)
3864 rtx this_op = ops[i].op;
3865 int this_neg = ops[i].neg;
3866 enum rtx_code this_code = GET_CODE (this_op);
3875 ops[n_ops].op = XEXP (this_op, 1);
3876 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3879 ops[i].op = XEXP (this_op, 0);
3882 canonicalized |= this_neg;
3886 ops[i].op = XEXP (this_op, 0);
3887 ops[i].neg = ! this_neg;
3894 && GET_CODE (XEXP (this_op, 0)) == PLUS
3895 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3896 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3898 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3899 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3900 ops[n_ops].neg = this_neg;
3908 /* ~a -> (-a - 1) */
3911 ops[n_ops].op = constm1_rtx;
3912 ops[n_ops++].neg = this_neg;
3913 ops[i].op = XEXP (this_op, 0);
3914 ops[i].neg = !this_neg;
3924 ops[i].op = neg_const_int (mode, this_op);
3938 if (n_constants > 1)
3941 gcc_assert (n_ops >= 2);
3943 /* If we only have two operands, we can avoid the loops. */
3946 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3949 /* Get the two operands. Be careful with the order, especially for
3950 the cases where code == MINUS. */
3951 if (ops[0].neg && ops[1].neg)
3953 lhs = gen_rtx_NEG (mode, ops[0].op);
3956 else if (ops[0].neg)
3967 return simplify_const_binary_operation (code, mode, lhs, rhs);
3970 /* Now simplify each pair of operands until nothing changes. */
3973 /* Insertion sort is good enough for an eight-element array. */
3974 for (i = 1; i < n_ops; i++)
3976 struct simplify_plus_minus_op_data save;
3978 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3984 ops[j + 1] = ops[j];
3985 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3990 for (i = n_ops - 1; i > 0; i--)
3991 for (j = i - 1; j >= 0; j--)
3993 rtx lhs = ops[j].op, rhs = ops[i].op;
3994 int lneg = ops[j].neg, rneg = ops[i].neg;
3996 if (lhs != 0 && rhs != 0)
3998 enum rtx_code ncode = PLUS;
4004 tem = lhs, lhs = rhs, rhs = tem;
4006 else if (swap_commutative_operands_p (lhs, rhs))
4007 tem = lhs, lhs = rhs, rhs = tem;
4009 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4010 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4012 rtx tem_lhs, tem_rhs;
4014 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4015 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4016 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4018 if (tem && !CONSTANT_P (tem))
4019 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4022 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4024 /* Reject "simplifications" that just wrap the two
4025 arguments in a CONST. Failure to do so can result
4026 in infinite recursion with simplify_binary_operation
4027 when it calls us to simplify CONST operations. */
4029 && ! (GET_CODE (tem) == CONST
4030 && GET_CODE (XEXP (tem, 0)) == ncode
4031 && XEXP (XEXP (tem, 0), 0) == lhs
4032 && XEXP (XEXP (tem, 0), 1) == rhs))
4035 if (GET_CODE (tem) == NEG)
4036 tem = XEXP (tem, 0), lneg = !lneg;
4037 if (CONST_INT_P (tem) && lneg)
4038 tem = neg_const_int (mode, tem), lneg = 0;
4042 ops[j].op = NULL_RTX;
4049 /* If nothing changed, fail. */
4053 /* Pack all the operands to the lower-numbered entries. */
4054 for (i = 0, j = 0; j < n_ops; j++)
4064 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4066 && CONST_INT_P (ops[1].op)
4067 && CONSTANT_P (ops[0].op)
4069 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4071 /* We suppressed creation of trivial CONST expressions in the
4072 combination loop to avoid recursion. Create one manually now.
4073 The combination loop should have ensured that there is exactly
4074 one CONST_INT, and the sort will have ensured that it is last
4075 in the array and that any other constant will be next-to-last. */
4078 && CONST_INT_P (ops[n_ops - 1].op)
4079 && CONSTANT_P (ops[n_ops - 2].op))
4081 rtx value = ops[n_ops - 1].op;
4082 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4083 value = neg_const_int (mode, value);
4084 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
4088 /* Put a non-negated operand first, if possible. */
4090 for (i = 0; i < n_ops && ops[i].neg; i++)
4093 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4102 /* Now make the result by performing the requested operations. */
4104 for (i = 1; i < n_ops; i++)
4105 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4106 mode, result, ops[i].op);
4111 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4113 plus_minus_operand_p (const_rtx x)
4115 return GET_CODE (x) == PLUS
4116 || GET_CODE (x) == MINUS
4117 || (GET_CODE (x) == CONST
4118 && GET_CODE (XEXP (x, 0)) == PLUS
4119 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4120 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4123 /* Like simplify_binary_operation except used for relational operators.
4124 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4125 not also be VOIDmode.
4127 CMP_MODE specifies in which mode the comparison is done in, so it is
4128 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4129 the operands or, if both are VOIDmode, the operands are compared in
4130 "infinite precision". */
4132 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4133 enum machine_mode cmp_mode, rtx op0, rtx op1)
4135 rtx tem, trueop0, trueop1;
4137 if (cmp_mode == VOIDmode)
4138 cmp_mode = GET_MODE (op0);
4139 if (cmp_mode == VOIDmode)
4140 cmp_mode = GET_MODE (op1);
4142 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4145 if (SCALAR_FLOAT_MODE_P (mode))
4147 if (tem == const0_rtx)
4148 return CONST0_RTX (mode);
4149 #ifdef FLOAT_STORE_FLAG_VALUE
4151 REAL_VALUE_TYPE val;
4152 val = FLOAT_STORE_FLAG_VALUE (mode);
4153 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4159 if (VECTOR_MODE_P (mode))
4161 if (tem == const0_rtx)
4162 return CONST0_RTX (mode);
4163 #ifdef VECTOR_STORE_FLAG_VALUE
4168 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4169 if (val == NULL_RTX)
4171 if (val == const1_rtx)
4172 return CONST1_RTX (mode);
4174 units = GET_MODE_NUNITS (mode);
4175 v = rtvec_alloc (units);
4176 for (i = 0; i < units; i++)
4177 RTVEC_ELT (v, i) = val;
4178 return gen_rtx_raw_CONST_VECTOR (mode, v);
4188 /* For the following tests, ensure const0_rtx is op1. */
4189 if (swap_commutative_operands_p (op0, op1)
4190 || (op0 == const0_rtx && op1 != const0_rtx))
4191 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4193 /* If op0 is a compare, extract the comparison arguments from it. */
4194 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4195 return simplify_gen_relational (code, mode, VOIDmode,
4196 XEXP (op0, 0), XEXP (op0, 1));
4198 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4202 trueop0 = avoid_constant_pool_reference (op0);
4203 trueop1 = avoid_constant_pool_reference (op1);
4204 return simplify_relational_operation_1 (code, mode, cmp_mode,
4208 /* This part of simplify_relational_operation is only used when CMP_MODE
4209 is not in class MODE_CC (i.e. it is a real comparison).
4211 MODE is the mode of the result, while CMP_MODE specifies in which
4212 mode the comparison is done in, so it is the mode of the operands. */
4215 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4216 enum machine_mode cmp_mode, rtx op0, rtx op1)
4218 enum rtx_code op0code = GET_CODE (op0);
4220 if (op1 == const0_rtx && COMPARISON_P (op0))
4222 /* If op0 is a comparison, extract the comparison arguments
4226 if (GET_MODE (op0) == mode)
4227 return simplify_rtx (op0);
4229 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4230 XEXP (op0, 0), XEXP (op0, 1));
4232 else if (code == EQ)
4234 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4235 if (new_code != UNKNOWN)
4236 return simplify_gen_relational (new_code, mode, VOIDmode,
4237 XEXP (op0, 0), XEXP (op0, 1));
4241 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4242 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4243 if ((code == LTU || code == GEU)
4244 && GET_CODE (op0) == PLUS
4245 && CONST_INT_P (XEXP (op0, 1))
4246 && (rtx_equal_p (op1, XEXP (op0, 0))
4247 || rtx_equal_p (op1, XEXP (op0, 1))))
4250 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4251 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4252 cmp_mode, XEXP (op0, 0), new_cmp);
4255 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4256 if ((code == LTU || code == GEU)
4257 && GET_CODE (op0) == PLUS
4258 && rtx_equal_p (op1, XEXP (op0, 1))
4259 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4260 && !rtx_equal_p (op1, XEXP (op0, 0)))
4261 return simplify_gen_relational (code, mode, cmp_mode, op0,
4262 copy_rtx (XEXP (op0, 0)));
4264 if (op1 == const0_rtx)
4266 /* Canonicalize (GTU x 0) as (NE x 0). */
4268 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4269 /* Canonicalize (LEU x 0) as (EQ x 0). */
4271 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4273 else if (op1 == const1_rtx)
4278 /* Canonicalize (GE x 1) as (GT x 0). */
4279 return simplify_gen_relational (GT, mode, cmp_mode,
4282 /* Canonicalize (GEU x 1) as (NE x 0). */
4283 return simplify_gen_relational (NE, mode, cmp_mode,
4286 /* Canonicalize (LT x 1) as (LE x 0). */
4287 return simplify_gen_relational (LE, mode, cmp_mode,
4290 /* Canonicalize (LTU x 1) as (EQ x 0). */
4291 return simplify_gen_relational (EQ, mode, cmp_mode,
4297 else if (op1 == constm1_rtx)
4299 /* Canonicalize (LE x -1) as (LT x 0). */
4301 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4302 /* Canonicalize (GT x -1) as (GE x 0). */
4304 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4307 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4308 if ((code == EQ || code == NE)
4309 && (op0code == PLUS || op0code == MINUS)
4311 && CONSTANT_P (XEXP (op0, 1))
4312 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4314 rtx x = XEXP (op0, 0);
4315 rtx c = XEXP (op0, 1);
4317 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4319 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4322 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4323 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4325 && op1 == const0_rtx
4326 && GET_MODE_CLASS (mode) == MODE_INT
4327 && cmp_mode != VOIDmode
4328 /* ??? Work-around BImode bugs in the ia64 backend. */
4330 && cmp_mode != BImode
4331 && nonzero_bits (op0, cmp_mode) == 1
4332 && STORE_FLAG_VALUE == 1)
4333 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4334 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4335 : lowpart_subreg (mode, op0, cmp_mode);
4337 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4338 if ((code == EQ || code == NE)
4339 && op1 == const0_rtx
4341 return simplify_gen_relational (code, mode, cmp_mode,
4342 XEXP (op0, 0), XEXP (op0, 1));
4344 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4345 if ((code == EQ || code == NE)
4347 && rtx_equal_p (XEXP (op0, 0), op1)
4348 && !side_effects_p (XEXP (op0, 0)))
4349 return simplify_gen_relational (code, mode, cmp_mode,
4350 XEXP (op0, 1), const0_rtx);
4352 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4353 if ((code == EQ || code == NE)
4355 && rtx_equal_p (XEXP (op0, 1), op1)
4356 && !side_effects_p (XEXP (op0, 1)))
4357 return simplify_gen_relational (code, mode, cmp_mode,
4358 XEXP (op0, 0), const0_rtx);
4360 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4361 if ((code == EQ || code == NE)
4363 && (CONST_INT_P (op1)
4364 || GET_CODE (op1) == CONST_DOUBLE)
4365 && (CONST_INT_P (XEXP (op0, 1))
4366 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4367 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4368 simplify_gen_binary (XOR, cmp_mode,
4369 XEXP (op0, 1), op1));
4371 if (op0code == POPCOUNT && op1 == const0_rtx)
4377 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4378 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4379 XEXP (op0, 0), const0_rtx);
4384 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4385 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4386 XEXP (op0, 0), const0_rtx);
4405 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4406 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4407 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4408 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4409 For floating-point comparisons, assume that the operands were ordered. */
4412 comparison_result (enum rtx_code code, int known_results)
4418 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4421 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4425 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4428 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4432 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4435 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4438 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4440 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4443 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4445 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4448 return const_true_rtx;
4456 /* Check if the given comparison (done in the given MODE) is actually a
4457 tautology or a contradiction.
4458 If no simplification is possible, this function returns zero.
4459 Otherwise, it returns either const_true_rtx or const0_rtx. */
4462 simplify_const_relational_operation (enum rtx_code code,
4463 enum machine_mode mode,
4470 gcc_assert (mode != VOIDmode
4471 || (GET_MODE (op0) == VOIDmode
4472 && GET_MODE (op1) == VOIDmode));
4474 /* If op0 is a compare, extract the comparison arguments from it. */
4475 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4477 op1 = XEXP (op0, 1);
4478 op0 = XEXP (op0, 0);
4480 if (GET_MODE (op0) != VOIDmode)
4481 mode = GET_MODE (op0);
4482 else if (GET_MODE (op1) != VOIDmode)
4483 mode = GET_MODE (op1);
4488 /* We can't simplify MODE_CC values since we don't know what the
4489 actual comparison is. */
4490 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4493 /* Make sure the constant is second. */
4494 if (swap_commutative_operands_p (op0, op1))
4496 tem = op0, op0 = op1, op1 = tem;
4497 code = swap_condition (code);
4500 trueop0 = avoid_constant_pool_reference (op0);
4501 trueop1 = avoid_constant_pool_reference (op1);
4503 /* For integer comparisons of A and B maybe we can simplify A - B and can
4504 then simplify a comparison of that with zero. If A and B are both either
4505 a register or a CONST_INT, this can't help; testing for these cases will
4506 prevent infinite recursion here and speed things up.
4508 We can only do this for EQ and NE comparisons as otherwise we may
4509 lose or introduce overflow which we cannot disregard as undefined as
4510 we do not know the signedness of the operation on either the left or
4511 the right hand side of the comparison. */
4513 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4514 && (code == EQ || code == NE)
4515 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4516 && (REG_P (op1) || CONST_INT_P (trueop1)))
4517 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4518 /* We cannot do this if tem is a nonzero address. */
4519 && ! nonzero_address_p (tem))
4520 return simplify_const_relational_operation (signed_condition (code),
4521 mode, tem, const0_rtx);
4523 if (! HONOR_NANS (mode) && code == ORDERED)
4524 return const_true_rtx;
4526 if (! HONOR_NANS (mode) && code == UNORDERED)
4529 /* For modes without NaNs, if the two operands are equal, we know the
4530 result except if they have side-effects. Even with NaNs we know
4531 the result of unordered comparisons and, if signaling NaNs are
4532 irrelevant, also the result of LT/GT/LTGT. */
4533 if ((! HONOR_NANS (GET_MODE (trueop0))
4534 || code == UNEQ || code == UNLE || code == UNGE
4535 || ((code == LT || code == GT || code == LTGT)
4536 && ! HONOR_SNANS (GET_MODE (trueop0))))
4537 && rtx_equal_p (trueop0, trueop1)
4538 && ! side_effects_p (trueop0))
4539 return comparison_result (code, CMP_EQ);
4541 /* If the operands are floating-point constants, see if we can fold
4543 if (GET_CODE (trueop0) == CONST_DOUBLE
4544 && GET_CODE (trueop1) == CONST_DOUBLE
4545 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4547 REAL_VALUE_TYPE d0, d1;
4549 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4550 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4552 /* Comparisons are unordered iff at least one of the values is NaN. */
4553 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4563 return const_true_rtx;
4576 return comparison_result (code,
4577 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4578 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4581 /* Otherwise, see if the operands are both integers. */
4582 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4583 && (GET_CODE (trueop0) == CONST_DOUBLE
4584 || CONST_INT_P (trueop0))
4585 && (GET_CODE (trueop1) == CONST_DOUBLE
4586 || CONST_INT_P (trueop1)))
4588 int width = GET_MODE_BITSIZE (mode);
4589 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4590 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4592 /* Get the two words comprising each integer constant. */
4593 if (GET_CODE (trueop0) == CONST_DOUBLE)
4595 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4596 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4600 l0u = l0s = INTVAL (trueop0);
4601 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4604 if (GET_CODE (trueop1) == CONST_DOUBLE)
4606 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4607 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4611 l1u = l1s = INTVAL (trueop1);
4612 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4615 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4616 we have to sign or zero-extend the values. */
4617 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4619 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4620 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4622 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4623 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4625 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4626 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4628 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4629 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4631 if (h0u == h1u && l0u == l1u)
4632 return comparison_result (code, CMP_EQ);
4636 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4637 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4638 return comparison_result (code, cr);
4642 /* Optimize comparisons with upper and lower bounds. */
4643 if (SCALAR_INT_MODE_P (mode)
4644 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4645 && CONST_INT_P (trueop1))
4648 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4649 HOST_WIDE_INT val = INTVAL (trueop1);
4650 HOST_WIDE_INT mmin, mmax;
4660 /* Get a reduced range if the sign bit is zero. */
4661 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4668 rtx mmin_rtx, mmax_rtx;
4669 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4671 mmin = INTVAL (mmin_rtx);
4672 mmax = INTVAL (mmax_rtx);
4675 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4677 mmin >>= (sign_copies - 1);
4678 mmax >>= (sign_copies - 1);
4684 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4686 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4687 return const_true_rtx;
4688 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4693 return const_true_rtx;
4698 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4700 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4701 return const_true_rtx;
4702 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4707 return const_true_rtx;
4713 /* x == y is always false for y out of range. */
4714 if (val < mmin || val > mmax)
4718 /* x > y is always false for y >= mmax, always true for y < mmin. */
4720 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4722 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4723 return const_true_rtx;
4729 return const_true_rtx;
4732 /* x < y is always false for y <= mmin, always true for y > mmax. */
4734 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4736 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4737 return const_true_rtx;
4743 return const_true_rtx;
4747 /* x != y is always true for y out of range. */
4748 if (val < mmin || val > mmax)
4749 return const_true_rtx;
4757 /* Optimize integer comparisons with zero. */
4758 if (trueop1 == const0_rtx)
4760 /* Some addresses are known to be nonzero. We don't know
4761 their sign, but equality comparisons are known. */
4762 if (nonzero_address_p (trueop0))
4764 if (code == EQ || code == LEU)
4766 if (code == NE || code == GTU)
4767 return const_true_rtx;
4770 /* See if the first operand is an IOR with a constant. If so, we
4771 may be able to determine the result of this comparison. */
4772 if (GET_CODE (op0) == IOR)
4774 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4775 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4777 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4778 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4779 && (UINTVAL (inner_const)
4780 & ((unsigned HOST_WIDE_INT) 1
4790 return const_true_rtx;
4794 return const_true_rtx;
4808 /* Optimize comparison of ABS with zero. */
4809 if (trueop1 == CONST0_RTX (mode)
4810 && (GET_CODE (trueop0) == ABS
4811 || (GET_CODE (trueop0) == FLOAT_EXTEND
4812 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4817 /* Optimize abs(x) < 0.0. */
4818 if (!HONOR_SNANS (mode)
4819 && (!INTEGRAL_MODE_P (mode)
4820 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4822 if (INTEGRAL_MODE_P (mode)
4823 && (issue_strict_overflow_warning
4824 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4825 warning (OPT_Wstrict_overflow,
4826 ("assuming signed overflow does not occur when "
4827 "assuming abs (x) < 0 is false"));
4833 /* Optimize abs(x) >= 0.0. */
4834 if (!HONOR_NANS (mode)
4835 && (!INTEGRAL_MODE_P (mode)
4836 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4838 if (INTEGRAL_MODE_P (mode)
4839 && (issue_strict_overflow_warning
4840 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4841 warning (OPT_Wstrict_overflow,
4842 ("assuming signed overflow does not occur when "
4843 "assuming abs (x) >= 0 is true"));
4844 return const_true_rtx;
4849 /* Optimize ! (abs(x) < 0.0). */
4850 return const_true_rtx;
4860 /* Simplify CODE, an operation with result mode MODE and three operands,
4861 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4862 a constant. Return 0 if no simplifications is possible. */
4865 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4866 enum machine_mode op0_mode, rtx op0, rtx op1,
4869 unsigned int width = GET_MODE_BITSIZE (mode);
4870 bool any_change = false;
4873 /* VOIDmode means "infinite" precision. */
4875 width = HOST_BITS_PER_WIDE_INT;
4880 /* Simplify negations around the multiplication. */
4881 /* -a * -b + c => a * b + c. */
4882 if (GET_CODE (op0) == NEG)
4884 tem = simplify_unary_operation (NEG, mode, op1, mode);
4886 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4888 else if (GET_CODE (op1) == NEG)
4890 tem = simplify_unary_operation (NEG, mode, op0, mode);
4892 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4895 /* Canonicalize the two multiplication operands. */
4896 /* a * -b + c => -b * a + c. */
4897 if (swap_commutative_operands_p (op0, op1))
4898 tem = op0, op0 = op1, op1 = tem, any_change = true;
4901 return gen_rtx_FMA (mode, op0, op1, op2);
4906 if (CONST_INT_P (op0)
4907 && CONST_INT_P (op1)
4908 && CONST_INT_P (op2)
4909 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4910 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4912 /* Extracting a bit-field from a constant */
4913 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4915 if (BITS_BIG_ENDIAN)
4916 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4918 val >>= INTVAL (op2);
4920 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4922 /* First zero-extend. */
4923 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4924 /* If desired, propagate sign bit. */
4925 if (code == SIGN_EXTRACT
4926 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4928 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4931 /* Clear the bits that don't belong in our mode,
4932 unless they and our sign bit are all one.
4933 So we get either a reasonable negative value or a reasonable
4934 unsigned value for this mode. */
4935 if (width < HOST_BITS_PER_WIDE_INT
4936 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4937 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4938 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4940 return gen_int_mode (val, mode);
4945 if (CONST_INT_P (op0))
4946 return op0 != const0_rtx ? op1 : op2;
4948 /* Convert c ? a : a into "a". */
4949 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4952 /* Convert a != b ? a : b into "a". */
4953 if (GET_CODE (op0) == NE
4954 && ! side_effects_p (op0)
4955 && ! HONOR_NANS (mode)
4956 && ! HONOR_SIGNED_ZEROS (mode)
4957 && ((rtx_equal_p (XEXP (op0, 0), op1)
4958 && rtx_equal_p (XEXP (op0, 1), op2))
4959 || (rtx_equal_p (XEXP (op0, 0), op2)
4960 && rtx_equal_p (XEXP (op0, 1), op1))))
4963 /* Convert a == b ? a : b into "b". */
4964 if (GET_CODE (op0) == EQ
4965 && ! side_effects_p (op0)
4966 && ! HONOR_NANS (mode)
4967 && ! HONOR_SIGNED_ZEROS (mode)
4968 && ((rtx_equal_p (XEXP (op0, 0), op1)
4969 && rtx_equal_p (XEXP (op0, 1), op2))
4970 || (rtx_equal_p (XEXP (op0, 0), op2)
4971 && rtx_equal_p (XEXP (op0, 1), op1))))
4974 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4976 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4977 ? GET_MODE (XEXP (op0, 1))
4978 : GET_MODE (XEXP (op0, 0)));
4981 /* Look for happy constants in op1 and op2. */
4982 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4984 HOST_WIDE_INT t = INTVAL (op1);
4985 HOST_WIDE_INT f = INTVAL (op2);
4987 if (t == STORE_FLAG_VALUE && f == 0)
4988 code = GET_CODE (op0);
4989 else if (t == 0 && f == STORE_FLAG_VALUE)
4992 tmp = reversed_comparison_code (op0, NULL_RTX);
5000 return simplify_gen_relational (code, mode, cmp_mode,
5001 XEXP (op0, 0), XEXP (op0, 1));
5004 if (cmp_mode == VOIDmode)
5005 cmp_mode = op0_mode;
5006 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5007 cmp_mode, XEXP (op0, 0),
5010 /* See if any simplifications were possible. */
5013 if (CONST_INT_P (temp))
5014 return temp == const0_rtx ? op2 : op1;
5016 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5022 gcc_assert (GET_MODE (op0) == mode);
5023 gcc_assert (GET_MODE (op1) == mode);
5024 gcc_assert (VECTOR_MODE_P (mode));
5025 op2 = avoid_constant_pool_reference (op2);
5026 if (CONST_INT_P (op2))
5028 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5029 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5030 int mask = (1 << n_elts) - 1;
5032 if (!(INTVAL (op2) & mask))
5034 if ((INTVAL (op2) & mask) == mask)
5037 op0 = avoid_constant_pool_reference (op0);
5038 op1 = avoid_constant_pool_reference (op1);
5039 if (GET_CODE (op0) == CONST_VECTOR
5040 && GET_CODE (op1) == CONST_VECTOR)
5042 rtvec v = rtvec_alloc (n_elts);
5045 for (i = 0; i < n_elts; i++)
5046 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5047 ? CONST_VECTOR_ELT (op0, i)
5048 : CONST_VECTOR_ELT (op1, i));
5049 return gen_rtx_CONST_VECTOR (mode, v);
5061 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5063 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5065 Works by unpacking OP into a collection of 8-bit values
5066 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5067 and then repacking them again for OUTERMODE. */
5070 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5071 enum machine_mode innermode, unsigned int byte)
5073 /* We support up to 512-bit values (for V8DFmode). */
5077 value_mask = (1 << value_bit) - 1
5079 unsigned char value[max_bitsize / value_bit];
5088 rtvec result_v = NULL;
5089 enum mode_class outer_class;
5090 enum machine_mode outer_submode;
5092 /* Some ports misuse CCmode. */
5093 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5096 /* We have no way to represent a complex constant at the rtl level. */
5097 if (COMPLEX_MODE_P (outermode))
5100 /* Unpack the value. */
5102 if (GET_CODE (op) == CONST_VECTOR)
5104 num_elem = CONST_VECTOR_NUNITS (op);
5105 elems = &CONST_VECTOR_ELT (op, 0);
5106 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5112 elem_bitsize = max_bitsize;
5114 /* If this asserts, it is too complicated; reducing value_bit may help. */
5115 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5116 /* I don't know how to handle endianness of sub-units. */
5117 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5119 for (elem = 0; elem < num_elem; elem++)
5122 rtx el = elems[elem];
5124 /* Vectors are kept in target memory order. (This is probably
5127 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5128 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5130 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5131 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5132 unsigned bytele = (subword_byte % UNITS_PER_WORD
5133 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5134 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5137 switch (GET_CODE (el))
5141 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5143 *vp++ = INTVAL (el) >> i;
5144 /* CONST_INTs are always logically sign-extended. */
5145 for (; i < elem_bitsize; i += value_bit)
5146 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5150 if (GET_MODE (el) == VOIDmode)
5152 /* If this triggers, someone should have generated a
5153 CONST_INT instead. */
5154 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5156 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5157 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5158 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5161 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5164 /* It shouldn't matter what's done here, so fill it with
5166 for (; i < elem_bitsize; i += value_bit)
5171 long tmp[max_bitsize / 32];
5172 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5174 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5175 gcc_assert (bitsize <= elem_bitsize);
5176 gcc_assert (bitsize % value_bit == 0);
5178 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5181 /* real_to_target produces its result in words affected by
5182 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5183 and use WORDS_BIG_ENDIAN instead; see the documentation
5184 of SUBREG in rtl.texi. */
5185 for (i = 0; i < bitsize; i += value_bit)
5188 if (WORDS_BIG_ENDIAN)
5189 ibase = bitsize - 1 - i;
5192 *vp++ = tmp[ibase / 32] >> i % 32;
5195 /* It shouldn't matter what's done here, so fill it with
5197 for (; i < elem_bitsize; i += value_bit)
5203 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5205 for (i = 0; i < elem_bitsize; i += value_bit)
5206 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5210 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5211 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5212 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5214 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5215 >> (i - HOST_BITS_PER_WIDE_INT);
5216 for (; i < elem_bitsize; i += value_bit)
5226 /* Now, pick the right byte to start with. */
5227 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5228 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5229 will already have offset 0. */
5230 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5232 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5234 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5235 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5236 byte = (subword_byte % UNITS_PER_WORD
5237 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5240 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5241 so if it's become negative it will instead be very large.) */
5242 gcc_assert (byte < GET_MODE_SIZE (innermode));
5244 /* Convert from bytes to chunks of size value_bit. */
5245 value_start = byte * (BITS_PER_UNIT / value_bit);
5247 /* Re-pack the value. */
5249 if (VECTOR_MODE_P (outermode))
5251 num_elem = GET_MODE_NUNITS (outermode);
5252 result_v = rtvec_alloc (num_elem);
5253 elems = &RTVEC_ELT (result_v, 0);
5254 outer_submode = GET_MODE_INNER (outermode);
5260 outer_submode = outermode;
5263 outer_class = GET_MODE_CLASS (outer_submode);
5264 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5266 gcc_assert (elem_bitsize % value_bit == 0);
5267 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5269 for (elem = 0; elem < num_elem; elem++)
5273 /* Vectors are stored in target memory order. (This is probably
5276 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5277 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5279 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5280 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5281 unsigned bytele = (subword_byte % UNITS_PER_WORD
5282 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5283 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5286 switch (outer_class)
5289 case MODE_PARTIAL_INT:
5291 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5294 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5296 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5297 for (; i < elem_bitsize; i += value_bit)
5298 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5299 << (i - HOST_BITS_PER_WIDE_INT);
5301 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5303 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5304 elems[elem] = gen_int_mode (lo, outer_submode);
5305 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5306 elems[elem] = immed_double_const (lo, hi, outer_submode);
5313 case MODE_DECIMAL_FLOAT:
5316 long tmp[max_bitsize / 32];
5318 /* real_from_target wants its input in words affected by
5319 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5320 and use WORDS_BIG_ENDIAN instead; see the documentation
5321 of SUBREG in rtl.texi. */
5322 for (i = 0; i < max_bitsize / 32; i++)
5324 for (i = 0; i < elem_bitsize; i += value_bit)
5327 if (WORDS_BIG_ENDIAN)
5328 ibase = elem_bitsize - 1 - i;
5331 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5334 real_from_target (&r, tmp, outer_submode);
5335 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5347 f.mode = outer_submode;
5350 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5352 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5353 for (; i < elem_bitsize; i += value_bit)
5354 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5355 << (i - HOST_BITS_PER_WIDE_INT));
5357 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5365 if (VECTOR_MODE_P (outermode))
5366 return gen_rtx_CONST_VECTOR (outermode, result_v);
5371 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5372 Return 0 if no simplifications are possible. */
5374 simplify_subreg (enum machine_mode outermode, rtx op,
5375 enum machine_mode innermode, unsigned int byte)
5377 /* Little bit of sanity checking. */
5378 gcc_assert (innermode != VOIDmode);
5379 gcc_assert (outermode != VOIDmode);
5380 gcc_assert (innermode != BLKmode);
5381 gcc_assert (outermode != BLKmode);
5383 gcc_assert (GET_MODE (op) == innermode
5384 || GET_MODE (op) == VOIDmode);
5386 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5387 gcc_assert (byte < GET_MODE_SIZE (innermode));
5389 if (outermode == innermode && !byte)
5392 if (CONST_INT_P (op)
5393 || GET_CODE (op) == CONST_DOUBLE
5394 || GET_CODE (op) == CONST_FIXED
5395 || GET_CODE (op) == CONST_VECTOR)
5396 return simplify_immed_subreg (outermode, op, innermode, byte);
5398 /* Changing mode twice with SUBREG => just change it once,
5399 or not at all if changing back op starting mode. */
5400 if (GET_CODE (op) == SUBREG)
5402 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5403 int final_offset = byte + SUBREG_BYTE (op);
5406 if (outermode == innermostmode
5407 && byte == 0 && SUBREG_BYTE (op) == 0)
5408 return SUBREG_REG (op);
5410 /* The SUBREG_BYTE represents offset, as if the value were stored
5411 in memory. Irritating exception is paradoxical subreg, where
5412 we define SUBREG_BYTE to be 0. On big endian machines, this
5413 value should be negative. For a moment, undo this exception. */
5414 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5416 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5417 if (WORDS_BIG_ENDIAN)
5418 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5419 if (BYTES_BIG_ENDIAN)
5420 final_offset += difference % UNITS_PER_WORD;
5422 if (SUBREG_BYTE (op) == 0
5423 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5425 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5426 if (WORDS_BIG_ENDIAN)
5427 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5428 if (BYTES_BIG_ENDIAN)
5429 final_offset += difference % UNITS_PER_WORD;
5432 /* See whether resulting subreg will be paradoxical. */
5433 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5435 /* In nonparadoxical subregs we can't handle negative offsets. */
5436 if (final_offset < 0)
5438 /* Bail out in case resulting subreg would be incorrect. */
5439 if (final_offset % GET_MODE_SIZE (outermode)
5440 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5446 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5448 /* In paradoxical subreg, see if we are still looking on lower part.
5449 If so, our SUBREG_BYTE will be 0. */
5450 if (WORDS_BIG_ENDIAN)
5451 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5452 if (BYTES_BIG_ENDIAN)
5453 offset += difference % UNITS_PER_WORD;
5454 if (offset == final_offset)
5460 /* Recurse for further possible simplifications. */
5461 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5465 if (validate_subreg (outermode, innermostmode,
5466 SUBREG_REG (op), final_offset))
5468 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5469 if (SUBREG_PROMOTED_VAR_P (op)
5470 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5471 && GET_MODE_CLASS (outermode) == MODE_INT
5472 && IN_RANGE (GET_MODE_SIZE (outermode),
5473 GET_MODE_SIZE (innermode),
5474 GET_MODE_SIZE (innermostmode))
5475 && subreg_lowpart_p (newx))
5477 SUBREG_PROMOTED_VAR_P (newx) = 1;
5478 SUBREG_PROMOTED_UNSIGNED_SET
5479 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5486 /* Merge implicit and explicit truncations. */
5488 if (GET_CODE (op) == TRUNCATE
5489 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5490 && subreg_lowpart_offset (outermode, innermode) == byte)
5491 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5492 GET_MODE (XEXP (op, 0)));
5494 /* SUBREG of a hard register => just change the register number
5495 and/or mode. If the hard register is not valid in that mode,
5496 suppress this simplification. If the hard register is the stack,
5497 frame, or argument pointer, leave this as a SUBREG. */
5499 if (REG_P (op) && HARD_REGISTER_P (op))
5501 unsigned int regno, final_regno;
5504 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5505 if (HARD_REGISTER_NUM_P (final_regno))
5508 int final_offset = byte;
5510 /* Adjust offset for paradoxical subregs. */
5512 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5514 int difference = (GET_MODE_SIZE (innermode)
5515 - GET_MODE_SIZE (outermode));
5516 if (WORDS_BIG_ENDIAN)
5517 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5518 if (BYTES_BIG_ENDIAN)
5519 final_offset += difference % UNITS_PER_WORD;
5522 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5524 /* Propagate original regno. We don't have any way to specify
5525 the offset inside original regno, so do so only for lowpart.
5526 The information is used only by alias analysis that can not
5527 grog partial register anyway. */
5529 if (subreg_lowpart_offset (outermode, innermode) == byte)
5530 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5535 /* If we have a SUBREG of a register that we are replacing and we are
5536 replacing it with a MEM, make a new MEM and try replacing the
5537 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5538 or if we would be widening it. */
5541 && ! mode_dependent_address_p (XEXP (op, 0))
5542 /* Allow splitting of volatile memory references in case we don't
5543 have instruction to move the whole thing. */
5544 && (! MEM_VOLATILE_P (op)
5545 || ! have_insn_for (SET, innermode))
5546 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5547 return adjust_address_nv (op, outermode, byte);
5549 /* Handle complex values represented as CONCAT
5550 of real and imaginary part. */
5551 if (GET_CODE (op) == CONCAT)
5553 unsigned int part_size, final_offset;
5556 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5557 if (byte < part_size)
5559 part = XEXP (op, 0);
5560 final_offset = byte;
5564 part = XEXP (op, 1);
5565 final_offset = byte - part_size;
5568 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5571 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5574 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5575 return gen_rtx_SUBREG (outermode, part, final_offset);
5579 /* Optimize SUBREG truncations of zero and sign extended values. */
5580 if ((GET_CODE (op) == ZERO_EXTEND
5581 || GET_CODE (op) == SIGN_EXTEND)
5582 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5584 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5586 /* If we're requesting the lowpart of a zero or sign extension,
5587 there are three possibilities. If the outermode is the same
5588 as the origmode, we can omit both the extension and the subreg.
5589 If the outermode is not larger than the origmode, we can apply
5590 the truncation without the extension. Finally, if the outermode
5591 is larger than the origmode, but both are integer modes, we
5592 can just extend to the appropriate mode. */
5595 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5596 if (outermode == origmode)
5597 return XEXP (op, 0);
5598 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5599 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5600 subreg_lowpart_offset (outermode,
5602 if (SCALAR_INT_MODE_P (outermode))
5603 return simplify_gen_unary (GET_CODE (op), outermode,
5604 XEXP (op, 0), origmode);
5607 /* A SUBREG resulting from a zero extension may fold to zero if
5608 it extracts higher bits that the ZERO_EXTEND's source bits. */
5609 if (GET_CODE (op) == ZERO_EXTEND
5610 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5611 return CONST0_RTX (outermode);
5614 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5615 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5616 the outer subreg is effectively a truncation to the original mode. */
5617 if ((GET_CODE (op) == LSHIFTRT
5618 || GET_CODE (op) == ASHIFTRT)
5619 && SCALAR_INT_MODE_P (outermode)
5620 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5621 to avoid the possibility that an outer LSHIFTRT shifts by more
5622 than the sign extension's sign_bit_copies and introduces zeros
5623 into the high bits of the result. */
5624 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5625 && CONST_INT_P (XEXP (op, 1))
5626 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5627 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5628 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5629 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5630 return simplify_gen_binary (ASHIFTRT, outermode,
5631 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5633 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5634 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5635 the outer subreg is effectively a truncation to the original mode. */
5636 if ((GET_CODE (op) == LSHIFTRT
5637 || GET_CODE (op) == ASHIFTRT)
5638 && SCALAR_INT_MODE_P (outermode)
5639 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5640 && CONST_INT_P (XEXP (op, 1))
5641 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5642 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5643 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5644 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5645 return simplify_gen_binary (LSHIFTRT, outermode,
5646 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5648 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5649 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5650 the outer subreg is effectively a truncation to the original mode. */
5651 if (GET_CODE (op) == ASHIFT
5652 && SCALAR_INT_MODE_P (outermode)
5653 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5654 && CONST_INT_P (XEXP (op, 1))
5655 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5656 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5657 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5658 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5659 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5660 return simplify_gen_binary (ASHIFT, outermode,
5661 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5663 /* Recognize a word extraction from a multi-word subreg. */
5664 if ((GET_CODE (op) == LSHIFTRT
5665 || GET_CODE (op) == ASHIFTRT)
5666 && SCALAR_INT_MODE_P (outermode)
5667 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5668 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5669 && CONST_INT_P (XEXP (op, 1))
5670 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5671 && INTVAL (XEXP (op, 1)) >= 0
5672 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5673 && byte == subreg_lowpart_offset (outermode, innermode))
5675 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5676 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5678 ? byte - shifted_bytes
5679 : byte + shifted_bytes));
5682 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5683 and try replacing the SUBREG and shift with it. Don't do this if
5684 the MEM has a mode-dependent address or if we would be widening it. */
5686 if ((GET_CODE (op) == LSHIFTRT
5687 || GET_CODE (op) == ASHIFTRT)
5688 && MEM_P (XEXP (op, 0))
5689 && CONST_INT_P (XEXP (op, 1))
5690 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5691 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5692 && INTVAL (XEXP (op, 1)) > 0
5693 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5694 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5695 && ! MEM_VOLATILE_P (XEXP (op, 0))
5696 && byte == subreg_lowpart_offset (outermode, innermode)
5697 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5698 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5700 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5701 return adjust_address_nv (XEXP (op, 0), outermode,
5703 ? byte - shifted_bytes
5704 : byte + shifted_bytes));
5710 /* Make a SUBREG operation or equivalent if it folds. */
5713 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5714 enum machine_mode innermode, unsigned int byte)
5718 newx = simplify_subreg (outermode, op, innermode, byte);
5722 if (GET_CODE (op) == SUBREG
5723 || GET_CODE (op) == CONCAT
5724 || GET_MODE (op) == VOIDmode)
5727 if (validate_subreg (outermode, innermode, op, byte))
5728 return gen_rtx_SUBREG (outermode, op, byte);
5733 /* Simplify X, an rtx expression.
5735 Return the simplified expression or NULL if no simplifications
5738 This is the preferred entry point into the simplification routines;
5739 however, we still allow passes to call the more specific routines.
5741 Right now GCC has three (yes, three) major bodies of RTL simplification
5742 code that need to be unified.
5744 1. fold_rtx in cse.c. This code uses various CSE specific
5745 information to aid in RTL simplification.
5747 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5748 it uses combine specific information to aid in RTL
5751 3. The routines in this file.
5754 Long term we want to only have one body of simplification code; to
5755 get to that state I recommend the following steps:
5757 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5758 which are not pass dependent state into these routines.
5760 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5761 use this routine whenever possible.
5763 3. Allow for pass dependent state to be provided to these
5764 routines and add simplifications based on the pass dependent
5765 state. Remove code from cse.c & combine.c that becomes
5768 It will take time, but ultimately the compiler will be easier to
5769 maintain and improve. It's totally silly that when we add a
5770 simplification that it needs to be added to 4 places (3 for RTL
5771 simplification and 1 for tree simplification. */
5774 simplify_rtx (const_rtx x)
5776 const enum rtx_code code = GET_CODE (x);
5777 const enum machine_mode mode = GET_MODE (x);
5779 switch (GET_RTX_CLASS (code))
5782 return simplify_unary_operation (code, mode,
5783 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5784 case RTX_COMM_ARITH:
5785 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5786 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5788 /* Fall through.... */
5791 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5794 case RTX_BITFIELD_OPS:
5795 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5796 XEXP (x, 0), XEXP (x, 1),
5800 case RTX_COMM_COMPARE:
5801 return simplify_relational_operation (code, mode,
5802 ((GET_MODE (XEXP (x, 0))
5804 ? GET_MODE (XEXP (x, 0))
5805 : GET_MODE (XEXP (x, 1))),
5811 return simplify_subreg (mode, SUBREG_REG (x),
5812 GET_MODE (SUBREG_REG (x)),
5819 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5820 if (GET_CODE (XEXP (x, 0)) == HIGH
5821 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))