1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
231 case ARRAY_RANGE_REF:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
250 offset += bitpos / BITS_PER_UNIT;
252 offset += TREE_INT_CST_LOW (toffset);
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
354 and simplify the result. If FN is non-NULL, call this callback on each
355 X, if it returns non-NULL, replace X with its return value and simplify the
359 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
360 rtx (*fn) (rtx, const_rtx, void *), void *data)
362 enum rtx_code code = GET_CODE (x);
363 enum machine_mode mode = GET_MODE (x);
364 enum machine_mode op_mode;
366 rtx op0, op1, op2, newx, op;
370 if (__builtin_expect (fn != NULL, 0))
372 newx = fn (x, old_rtx, data);
376 else if (rtx_equal_p (x, old_rtx))
377 return copy_rtx ((rtx) data);
379 switch (GET_RTX_CLASS (code))
383 op_mode = GET_MODE (op0);
384 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
385 if (op0 == XEXP (x, 0))
387 return simplify_gen_unary (code, mode, op0, op_mode);
391 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
392 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
393 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
395 return simplify_gen_binary (code, mode, op0, op1);
398 case RTX_COMM_COMPARE:
401 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
402 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
403 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
404 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
406 return simplify_gen_relational (code, mode, op_mode, op0, op1);
409 case RTX_BITFIELD_OPS:
411 op_mode = GET_MODE (op0);
412 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
413 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
414 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
415 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
417 if (op_mode == VOIDmode)
418 op_mode = GET_MODE (op0);
419 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
424 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
425 if (op0 == SUBREG_REG (x))
427 op0 = simplify_gen_subreg (GET_MODE (x), op0,
428 GET_MODE (SUBREG_REG (x)),
430 return op0 ? op0 : x;
437 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
438 if (op0 == XEXP (x, 0))
440 return replace_equiv_address_nv (x, op0);
442 else if (code == LO_SUM)
444 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
445 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 /* (lo_sum (high x) x) -> x */
448 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
453 return gen_rtx_LO_SUM (mode, op0, op1);
462 fmt = GET_RTX_FORMAT (code);
463 for (i = 0; fmt[i]; i++)
468 newvec = XVEC (newx, i);
469 for (j = 0; j < GET_NUM_ELEM (vec); j++)
471 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
473 if (op != RTVEC_ELT (vec, j))
477 newvec = shallow_copy_rtvec (vec);
479 newx = shallow_copy_rtx (x);
480 XVEC (newx, i) = newvec;
482 RTVEC_ELT (newvec, j) = op;
490 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
491 if (op != XEXP (x, i))
494 newx = shallow_copy_rtx (x);
503 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
504 resulting RTX. Return a new RTX which is as simplified as possible. */
507 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
509 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
512 /* Try to simplify a unary operation CODE whose output mode is to be
513 MODE with input operand OP whose mode was originally OP_MODE.
514 Return zero if no simplification can be made. */
516 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
517 rtx op, enum machine_mode op_mode)
521 trueop = avoid_constant_pool_reference (op);
523 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
527 return simplify_unary_operation_1 (code, mode, op);
530 /* Perform some simplifications we can do even if the operands
533 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
535 enum rtx_code reversed;
541 /* (not (not X)) == X. */
542 if (GET_CODE (op) == NOT)
545 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
546 comparison is all ones. */
547 if (COMPARISON_P (op)
548 && (mode == BImode || STORE_FLAG_VALUE == -1)
549 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
550 return simplify_gen_relational (reversed, mode, VOIDmode,
551 XEXP (op, 0), XEXP (op, 1));
553 /* (not (plus X -1)) can become (neg X). */
554 if (GET_CODE (op) == PLUS
555 && XEXP (op, 1) == constm1_rtx)
556 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
558 /* Similarly, (not (neg X)) is (plus X -1). */
559 if (GET_CODE (op) == NEG)
560 return plus_constant (XEXP (op, 0), -1);
562 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
563 if (GET_CODE (op) == XOR
564 && CONST_INT_P (XEXP (op, 1))
565 && (temp = simplify_unary_operation (NOT, mode,
566 XEXP (op, 1), mode)) != 0)
567 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
569 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
570 if (GET_CODE (op) == PLUS
571 && CONST_INT_P (XEXP (op, 1))
572 && mode_signbit_p (mode, XEXP (op, 1))
573 && (temp = simplify_unary_operation (NOT, mode,
574 XEXP (op, 1), mode)) != 0)
575 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
578 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
579 operands other than 1, but that is not valid. We could do a
580 similar simplification for (not (lshiftrt C X)) where C is
581 just the sign bit, but this doesn't seem common enough to
583 if (GET_CODE (op) == ASHIFT
584 && XEXP (op, 0) == const1_rtx)
586 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
587 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
590 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
591 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
592 so we can perform the above simplification. */
594 if (STORE_FLAG_VALUE == -1
595 && GET_CODE (op) == ASHIFTRT
596 && GET_CODE (XEXP (op, 1))
597 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
598 return simplify_gen_relational (GE, mode, VOIDmode,
599 XEXP (op, 0), const0_rtx);
602 if (GET_CODE (op) == SUBREG
603 && subreg_lowpart_p (op)
604 && (GET_MODE_SIZE (GET_MODE (op))
605 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
606 && GET_CODE (SUBREG_REG (op)) == ASHIFT
607 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
609 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
612 x = gen_rtx_ROTATE (inner_mode,
613 simplify_gen_unary (NOT, inner_mode, const1_rtx,
615 XEXP (SUBREG_REG (op), 1));
616 return rtl_hooks.gen_lowpart_no_emit (mode, x);
619 /* Apply De Morgan's laws to reduce number of patterns for machines
620 with negating logical insns (and-not, nand, etc.). If result has
621 only one NOT, put it first, since that is how the patterns are
624 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
626 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
627 enum machine_mode op_mode;
629 op_mode = GET_MODE (in1);
630 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
632 op_mode = GET_MODE (in2);
633 if (op_mode == VOIDmode)
635 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
637 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
640 in2 = in1; in1 = tem;
643 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
649 /* (neg (neg X)) == X. */
650 if (GET_CODE (op) == NEG)
653 /* (neg (plus X 1)) can become (not X). */
654 if (GET_CODE (op) == PLUS
655 && XEXP (op, 1) == const1_rtx)
656 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
658 /* Similarly, (neg (not X)) is (plus X 1). */
659 if (GET_CODE (op) == NOT)
660 return plus_constant (XEXP (op, 0), 1);
662 /* (neg (minus X Y)) can become (minus Y X). This transformation
663 isn't safe for modes with signed zeros, since if X and Y are
664 both +0, (minus Y X) is the same as (minus X Y). If the
665 rounding mode is towards +infinity (or -infinity) then the two
666 expressions will be rounded differently. */
667 if (GET_CODE (op) == MINUS
668 && !HONOR_SIGNED_ZEROS (mode)
669 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
670 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
672 if (GET_CODE (op) == PLUS
673 && !HONOR_SIGNED_ZEROS (mode)
674 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
676 /* (neg (plus A C)) is simplified to (minus -C A). */
677 if (CONST_INT_P (XEXP (op, 1))
678 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
680 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
682 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
685 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
686 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
687 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
690 /* (neg (mult A B)) becomes (mult (neg A) B).
691 This works even for floating-point values. */
692 if (GET_CODE (op) == MULT
693 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
695 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
696 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
699 /* NEG commutes with ASHIFT since it is multiplication. Only do
700 this if we can then eliminate the NEG (e.g., if the operand
702 if (GET_CODE (op) == ASHIFT)
704 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
706 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
709 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
710 C is equal to the width of MODE minus 1. */
711 if (GET_CODE (op) == ASHIFTRT
712 && CONST_INT_P (XEXP (op, 1))
713 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
714 return simplify_gen_binary (LSHIFTRT, mode,
715 XEXP (op, 0), XEXP (op, 1));
717 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
718 C is equal to the width of MODE minus 1. */
719 if (GET_CODE (op) == LSHIFTRT
720 && CONST_INT_P (XEXP (op, 1))
721 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
722 return simplify_gen_binary (ASHIFTRT, mode,
723 XEXP (op, 0), XEXP (op, 1));
725 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
726 if (GET_CODE (op) == XOR
727 && XEXP (op, 1) == const1_rtx
728 && nonzero_bits (XEXP (op, 0), mode) == 1)
729 return plus_constant (XEXP (op, 0), -1);
731 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
732 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
733 if (GET_CODE (op) == LT
734 && XEXP (op, 1) == const0_rtx
735 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
737 enum machine_mode inner = GET_MODE (XEXP (op, 0));
738 int isize = GET_MODE_BITSIZE (inner);
739 if (STORE_FLAG_VALUE == 1)
741 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
742 GEN_INT (isize - 1));
745 if (GET_MODE_BITSIZE (mode) > isize)
746 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
747 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
749 else if (STORE_FLAG_VALUE == -1)
751 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
752 GEN_INT (isize - 1));
755 if (GET_MODE_BITSIZE (mode) > isize)
756 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
757 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
763 /* We can't handle truncation to a partial integer mode here
764 because we don't know the real bitsize of the partial
766 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
769 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
770 if ((GET_CODE (op) == SIGN_EXTEND
771 || GET_CODE (op) == ZERO_EXTEND)
772 && GET_MODE (XEXP (op, 0)) == mode)
775 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
776 (OP:SI foo:SI) if OP is NEG or ABS. */
777 if ((GET_CODE (op) == ABS
778 || GET_CODE (op) == NEG)
779 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
780 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
781 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
782 return simplify_gen_unary (GET_CODE (op), mode,
783 XEXP (XEXP (op, 0), 0), mode);
785 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 if (GET_CODE (op) == SUBREG
788 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
789 && subreg_lowpart_p (op))
790 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
791 GET_MODE (XEXP (SUBREG_REG (op), 0)));
793 /* If we know that the value is already truncated, we can
794 replace the TRUNCATE with a SUBREG. Note that this is also
795 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
796 modes we just have to apply a different definition for
797 truncation. But don't do this for an (LSHIFTRT (MULT ...))
798 since this will cause problems with the umulXi3_highpart
800 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
801 GET_MODE_BITSIZE (GET_MODE (op)))
802 ? (num_sign_bit_copies (op, GET_MODE (op))
803 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
804 - GET_MODE_BITSIZE (mode)))
805 : truncated_to_mode (mode, op))
806 && ! (GET_CODE (op) == LSHIFTRT
807 && GET_CODE (XEXP (op, 0)) == MULT))
808 return rtl_hooks.gen_lowpart_no_emit (mode, op);
810 /* A truncate of a comparison can be replaced with a subreg if
811 STORE_FLAG_VALUE permits. This is like the previous test,
812 but it works even if the comparison is done in a mode larger
813 than HOST_BITS_PER_WIDE_INT. */
814 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
816 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
817 return rtl_hooks.gen_lowpart_no_emit (mode, op);
821 if (DECIMAL_FLOAT_MODE_P (mode))
824 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
825 if (GET_CODE (op) == FLOAT_EXTEND
826 && GET_MODE (XEXP (op, 0)) == mode)
829 /* (float_truncate:SF (float_truncate:DF foo:XF))
830 = (float_truncate:SF foo:XF).
831 This may eliminate double rounding, so it is unsafe.
833 (float_truncate:SF (float_extend:XF foo:DF))
834 = (float_truncate:SF foo:DF).
836 (float_truncate:DF (float_extend:XF foo:SF))
837 = (float_extend:SF foo:DF). */
838 if ((GET_CODE (op) == FLOAT_TRUNCATE
839 && flag_unsafe_math_optimizations)
840 || GET_CODE (op) == FLOAT_EXTEND)
841 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
843 > GET_MODE_SIZE (mode)
844 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
848 /* (float_truncate (float x)) is (float x) */
849 if (GET_CODE (op) == FLOAT
850 && (flag_unsafe_math_optimizations
851 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
852 && ((unsigned)significand_size (GET_MODE (op))
853 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
854 - num_sign_bit_copies (XEXP (op, 0),
855 GET_MODE (XEXP (op, 0))))))))
856 return simplify_gen_unary (FLOAT, mode,
858 GET_MODE (XEXP (op, 0)));
860 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
861 (OP:SF foo:SF) if OP is NEG or ABS. */
862 if ((GET_CODE (op) == ABS
863 || GET_CODE (op) == NEG)
864 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
865 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
866 return simplify_gen_unary (GET_CODE (op), mode,
867 XEXP (XEXP (op, 0), 0), mode);
869 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
870 is (float_truncate:SF x). */
871 if (GET_CODE (op) == SUBREG
872 && subreg_lowpart_p (op)
873 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
874 return SUBREG_REG (op);
878 if (DECIMAL_FLOAT_MODE_P (mode))
881 /* (float_extend (float_extend x)) is (float_extend x)
883 (float_extend (float x)) is (float x) assuming that double
884 rounding can't happen.
886 if (GET_CODE (op) == FLOAT_EXTEND
887 || (GET_CODE (op) == FLOAT
888 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
889 && ((unsigned)significand_size (GET_MODE (op))
890 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
891 - num_sign_bit_copies (XEXP (op, 0),
892 GET_MODE (XEXP (op, 0)))))))
893 return simplify_gen_unary (GET_CODE (op), mode,
895 GET_MODE (XEXP (op, 0)));
900 /* (abs (neg <foo>)) -> (abs <foo>) */
901 if (GET_CODE (op) == NEG)
902 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
903 GET_MODE (XEXP (op, 0)));
905 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
907 if (GET_MODE (op) == VOIDmode)
910 /* If operand is something known to be positive, ignore the ABS. */
911 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
912 || ((GET_MODE_BITSIZE (GET_MODE (op))
913 <= HOST_BITS_PER_WIDE_INT)
914 && ((nonzero_bits (op, GET_MODE (op))
916 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
920 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
921 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
922 return gen_rtx_NEG (mode, op);
927 /* (ffs (*_extend <X>)) = (ffs <X>) */
928 if (GET_CODE (op) == SIGN_EXTEND
929 || GET_CODE (op) == ZERO_EXTEND)
930 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
931 GET_MODE (XEXP (op, 0)));
935 switch (GET_CODE (op))
939 /* (popcount (zero_extend <X>)) = (popcount <X>) */
940 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
941 GET_MODE (XEXP (op, 0)));
945 /* Rotations don't affect popcount. */
946 if (!side_effects_p (XEXP (op, 1)))
947 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
948 GET_MODE (XEXP (op, 0)));
957 switch (GET_CODE (op))
963 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
964 GET_MODE (XEXP (op, 0)));
968 /* Rotations don't affect parity. */
969 if (!side_effects_p (XEXP (op, 1)))
970 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
971 GET_MODE (XEXP (op, 0)));
980 /* (bswap (bswap x)) -> x. */
981 if (GET_CODE (op) == BSWAP)
986 /* (float (sign_extend <X>)) = (float <X>). */
987 if (GET_CODE (op) == SIGN_EXTEND)
988 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
989 GET_MODE (XEXP (op, 0)));
993 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
994 becomes just the MINUS if its mode is MODE. This allows
995 folding switch statements on machines using casesi (such as
997 if (GET_CODE (op) == TRUNCATE
998 && GET_MODE (XEXP (op, 0)) == mode
999 && GET_CODE (XEXP (op, 0)) == MINUS
1000 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1001 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1002 return XEXP (op, 0);
1004 /* Check for a sign extension of a subreg of a promoted
1005 variable, where the promotion is sign-extended, and the
1006 target mode is the same as the variable's promotion. */
1007 if (GET_CODE (op) == SUBREG
1008 && SUBREG_PROMOTED_VAR_P (op)
1009 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1010 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1011 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1013 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1014 /* As we do not know which address space the pointer is refering to,
1015 we can do this only if the target does not support different pointer
1016 or address modes depending on the address space. */
1017 if (target_default_pointer_address_modes_p ()
1018 && ! POINTERS_EXTEND_UNSIGNED
1019 && mode == Pmode && GET_MODE (op) == ptr_mode
1021 || (GET_CODE (op) == SUBREG
1022 && REG_P (SUBREG_REG (op))
1023 && REG_POINTER (SUBREG_REG (op))
1024 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1025 return convert_memory_address (Pmode, op);
1030 /* Check for a zero extension of a subreg of a promoted
1031 variable, where the promotion is zero-extended, and the
1032 target mode is the same as the variable's promotion. */
1033 if (GET_CODE (op) == SUBREG
1034 && SUBREG_PROMOTED_VAR_P (op)
1035 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1036 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1037 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1039 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1040 /* As we do not know which address space the pointer is refering to,
1041 we can do this only if the target does not support different pointer
1042 or address modes depending on the address space. */
1043 if (target_default_pointer_address_modes_p ()
1044 && POINTERS_EXTEND_UNSIGNED > 0
1045 && mode == Pmode && GET_MODE (op) == ptr_mode
1047 || (GET_CODE (op) == SUBREG
1048 && REG_P (SUBREG_REG (op))
1049 && REG_POINTER (SUBREG_REG (op))
1050 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1051 return convert_memory_address (Pmode, op);
1062 /* Try to compute the value of a unary operation CODE whose output mode is to
1063 be MODE with input operand OP whose mode was originally OP_MODE.
1064 Return zero if the value cannot be computed. */
1066 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1067 rtx op, enum machine_mode op_mode)
1069 unsigned int width = GET_MODE_BITSIZE (mode);
1071 if (code == VEC_DUPLICATE)
1073 gcc_assert (VECTOR_MODE_P (mode));
1074 if (GET_MODE (op) != VOIDmode)
1076 if (!VECTOR_MODE_P (GET_MODE (op)))
1077 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1079 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1082 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1083 || GET_CODE (op) == CONST_VECTOR)
1085 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1086 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1087 rtvec v = rtvec_alloc (n_elts);
1090 if (GET_CODE (op) != CONST_VECTOR)
1091 for (i = 0; i < n_elts; i++)
1092 RTVEC_ELT (v, i) = op;
1095 enum machine_mode inmode = GET_MODE (op);
1096 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1097 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1099 gcc_assert (in_n_elts < n_elts);
1100 gcc_assert ((n_elts % in_n_elts) == 0);
1101 for (i = 0; i < n_elts; i++)
1102 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1104 return gen_rtx_CONST_VECTOR (mode, v);
1108 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1110 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1111 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1112 enum machine_mode opmode = GET_MODE (op);
1113 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1114 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1115 rtvec v = rtvec_alloc (n_elts);
1118 gcc_assert (op_n_elts == n_elts);
1119 for (i = 0; i < n_elts; i++)
1121 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1122 CONST_VECTOR_ELT (op, i),
1123 GET_MODE_INNER (opmode));
1126 RTVEC_ELT (v, i) = x;
1128 return gen_rtx_CONST_VECTOR (mode, v);
1131 /* The order of these tests is critical so that, for example, we don't
1132 check the wrong mode (input vs. output) for a conversion operation,
1133 such as FIX. At some point, this should be simplified. */
1135 if (code == FLOAT && GET_MODE (op) == VOIDmode
1136 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1138 HOST_WIDE_INT hv, lv;
1141 if (CONST_INT_P (op))
1142 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1144 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1146 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1147 d = real_value_truncate (mode, d);
1148 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1150 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1151 && (GET_CODE (op) == CONST_DOUBLE
1152 || CONST_INT_P (op)))
1154 HOST_WIDE_INT hv, lv;
1157 if (CONST_INT_P (op))
1158 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1160 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1162 if (op_mode == VOIDmode)
1164 /* We don't know how to interpret negative-looking numbers in
1165 this case, so don't try to fold those. */
1169 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1172 hv = 0, lv &= GET_MODE_MASK (op_mode);
1174 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1175 d = real_value_truncate (mode, d);
1176 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1179 if (CONST_INT_P (op)
1180 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1182 HOST_WIDE_INT arg0 = INTVAL (op);
1196 val = (arg0 >= 0 ? arg0 : - arg0);
1200 /* Don't use ffs here. Instead, get low order bit and then its
1201 number. If arg0 is zero, this will return 0, as desired. */
1202 arg0 &= GET_MODE_MASK (mode);
1203 val = exact_log2 (arg0 & (- arg0)) + 1;
1207 arg0 &= GET_MODE_MASK (mode);
1208 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1211 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1215 arg0 &= GET_MODE_MASK (mode);
1218 /* Even if the value at zero is undefined, we have to come
1219 up with some replacement. Seems good enough. */
1220 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1221 val = GET_MODE_BITSIZE (mode);
1224 val = exact_log2 (arg0 & -arg0);
1228 arg0 &= GET_MODE_MASK (mode);
1231 val++, arg0 &= arg0 - 1;
1235 arg0 &= GET_MODE_MASK (mode);
1238 val++, arg0 &= arg0 - 1;
1247 for (s = 0; s < width; s += 8)
1249 unsigned int d = width - s - 8;
1250 unsigned HOST_WIDE_INT byte;
1251 byte = (arg0 >> s) & 0xff;
1262 /* When zero-extending a CONST_INT, we need to know its
1264 gcc_assert (op_mode != VOIDmode);
1265 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1267 /* If we were really extending the mode,
1268 we would have to distinguish between zero-extension
1269 and sign-extension. */
1270 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1273 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1274 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1280 if (op_mode == VOIDmode)
1282 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1284 /* If we were really extending the mode,
1285 we would have to distinguish between zero-extension
1286 and sign-extension. */
1287 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1290 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1293 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1295 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1296 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1304 case FLOAT_TRUNCATE:
1316 return gen_int_mode (val, mode);
1319 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1320 for a DImode operation on a CONST_INT. */
1321 else if (GET_MODE (op) == VOIDmode
1322 && width <= HOST_BITS_PER_WIDE_INT * 2
1323 && (GET_CODE (op) == CONST_DOUBLE
1324 || CONST_INT_P (op)))
1326 unsigned HOST_WIDE_INT l1, lv;
1327 HOST_WIDE_INT h1, hv;
1329 if (GET_CODE (op) == CONST_DOUBLE)
1330 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1332 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1342 neg_double (l1, h1, &lv, &hv);
1347 neg_double (l1, h1, &lv, &hv);
1359 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1362 lv = exact_log2 (l1 & -l1) + 1;
1368 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1369 - HOST_BITS_PER_WIDE_INT;
1371 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1372 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1373 lv = GET_MODE_BITSIZE (mode);
1379 lv = exact_log2 (l1 & -l1);
1381 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1382 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1383 lv = GET_MODE_BITSIZE (mode);
1411 for (s = 0; s < width; s += 8)
1413 unsigned int d = width - s - 8;
1414 unsigned HOST_WIDE_INT byte;
1416 if (s < HOST_BITS_PER_WIDE_INT)
1417 byte = (l1 >> s) & 0xff;
1419 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1421 if (d < HOST_BITS_PER_WIDE_INT)
1424 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1430 /* This is just a change-of-mode, so do nothing. */
1435 gcc_assert (op_mode != VOIDmode);
1437 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1441 lv = l1 & GET_MODE_MASK (op_mode);
1445 if (op_mode == VOIDmode
1446 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1450 lv = l1 & GET_MODE_MASK (op_mode);
1451 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1452 && (lv & ((HOST_WIDE_INT) 1
1453 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1454 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1456 hv = HWI_SIGN_EXTEND (lv);
1467 return immed_double_const (lv, hv, mode);
1470 else if (GET_CODE (op) == CONST_DOUBLE
1471 && SCALAR_FLOAT_MODE_P (mode))
1473 REAL_VALUE_TYPE d, t;
1474 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1479 if (HONOR_SNANS (mode) && real_isnan (&d))
1481 real_sqrt (&t, mode, &d);
1485 d = REAL_VALUE_ABS (d);
1488 d = REAL_VALUE_NEGATE (d);
1490 case FLOAT_TRUNCATE:
1491 d = real_value_truncate (mode, d);
1494 /* All this does is change the mode. */
1497 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1504 real_to_target (tmp, &d, GET_MODE (op));
1505 for (i = 0; i < 4; i++)
1507 real_from_target (&d, tmp, mode);
1513 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1516 else if (GET_CODE (op) == CONST_DOUBLE
1517 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1518 && GET_MODE_CLASS (mode) == MODE_INT
1519 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1521 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1522 operators are intentionally left unspecified (to ease implementation
1523 by target backends), for consistency, this routine implements the
1524 same semantics for constant folding as used by the middle-end. */
1526 /* This was formerly used only for non-IEEE float.
1527 eggert@twinsun.com says it is safe for IEEE also. */
1528 HOST_WIDE_INT xh, xl, th, tl;
1529 REAL_VALUE_TYPE x, t;
1530 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1534 if (REAL_VALUE_ISNAN (x))
1537 /* Test against the signed upper bound. */
1538 if (width > HOST_BITS_PER_WIDE_INT)
1540 th = ((unsigned HOST_WIDE_INT) 1
1541 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1547 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1549 real_from_integer (&t, VOIDmode, tl, th, 0);
1550 if (REAL_VALUES_LESS (t, x))
1557 /* Test against the signed lower bound. */
1558 if (width > HOST_BITS_PER_WIDE_INT)
1560 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1566 tl = (HOST_WIDE_INT) -1 << (width - 1);
1568 real_from_integer (&t, VOIDmode, tl, th, 0);
1569 if (REAL_VALUES_LESS (x, t))
1575 REAL_VALUE_TO_INT (&xl, &xh, x);
1579 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1582 /* Test against the unsigned upper bound. */
1583 if (width == 2*HOST_BITS_PER_WIDE_INT)
1588 else if (width >= HOST_BITS_PER_WIDE_INT)
1590 th = ((unsigned HOST_WIDE_INT) 1
1591 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1597 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1599 real_from_integer (&t, VOIDmode, tl, th, 1);
1600 if (REAL_VALUES_LESS (t, x))
1607 REAL_VALUE_TO_INT (&xl, &xh, x);
1613 return immed_double_const (xl, xh, mode);
1619 /* Subroutine of simplify_binary_operation to simplify a commutative,
1620 associative binary operation CODE with result mode MODE, operating
1621 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1622 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1623 canonicalization is possible. */
1626 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1631 /* Linearize the operator to the left. */
1632 if (GET_CODE (op1) == code)
1634 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1635 if (GET_CODE (op0) == code)
1637 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1638 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1641 /* "a op (b op c)" becomes "(b op c) op a". */
1642 if (! swap_commutative_operands_p (op1, op0))
1643 return simplify_gen_binary (code, mode, op1, op0);
1650 if (GET_CODE (op0) == code)
1652 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1653 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1655 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1656 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1659 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1660 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1662 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1664 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1665 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1667 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1674 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1675 and OP1. Return 0 if no simplification is possible.
1677 Don't use this for relational operations such as EQ or LT.
1678 Use simplify_relational_operation instead. */
1680 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1683 rtx trueop0, trueop1;
1686 /* Relational operations don't work here. We must know the mode
1687 of the operands in order to do the comparison correctly.
1688 Assuming a full word can give incorrect results.
1689 Consider comparing 128 with -128 in QImode. */
1690 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1691 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1693 /* Make sure the constant is second. */
1694 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1695 && swap_commutative_operands_p (op0, op1))
1697 tem = op0, op0 = op1, op1 = tem;
1700 trueop0 = avoid_constant_pool_reference (op0);
1701 trueop1 = avoid_constant_pool_reference (op1);
1703 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1706 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1709 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1710 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1711 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1712 actual constants. */
1715 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1716 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1718 rtx tem, reversed, opleft, opright;
1720 unsigned int width = GET_MODE_BITSIZE (mode);
1722 /* Even if we can't compute a constant result,
1723 there are some cases worth simplifying. */
1728 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1729 when x is NaN, infinite, or finite and nonzero. They aren't
1730 when x is -0 and the rounding mode is not towards -infinity,
1731 since (-0) + 0 is then 0. */
1732 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1735 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1736 transformations are safe even for IEEE. */
1737 if (GET_CODE (op0) == NEG)
1738 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1739 else if (GET_CODE (op1) == NEG)
1740 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1742 /* (~a) + 1 -> -a */
1743 if (INTEGRAL_MODE_P (mode)
1744 && GET_CODE (op0) == NOT
1745 && trueop1 == const1_rtx)
1746 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1748 /* Handle both-operands-constant cases. We can only add
1749 CONST_INTs to constants since the sum of relocatable symbols
1750 can't be handled by most assemblers. Don't add CONST_INT
1751 to CONST_INT since overflow won't be computed properly if wider
1752 than HOST_BITS_PER_WIDE_INT. */
1754 if ((GET_CODE (op0) == CONST
1755 || GET_CODE (op0) == SYMBOL_REF
1756 || GET_CODE (op0) == LABEL_REF)
1757 && CONST_INT_P (op1))
1758 return plus_constant (op0, INTVAL (op1));
1759 else if ((GET_CODE (op1) == CONST
1760 || GET_CODE (op1) == SYMBOL_REF
1761 || GET_CODE (op1) == LABEL_REF)
1762 && CONST_INT_P (op0))
1763 return plus_constant (op1, INTVAL (op0));
1765 /* See if this is something like X * C - X or vice versa or
1766 if the multiplication is written as a shift. If so, we can
1767 distribute and make a new multiply, shift, or maybe just
1768 have X (if C is 2 in the example above). But don't make
1769 something more expensive than we had before. */
1771 if (SCALAR_INT_MODE_P (mode))
1773 double_int coeff0, coeff1;
1774 rtx lhs = op0, rhs = op1;
1776 coeff0 = double_int_one;
1777 coeff1 = double_int_one;
1779 if (GET_CODE (lhs) == NEG)
1781 coeff0 = double_int_minus_one;
1782 lhs = XEXP (lhs, 0);
1784 else if (GET_CODE (lhs) == MULT
1785 && CONST_INT_P (XEXP (lhs, 1)))
1787 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1788 lhs = XEXP (lhs, 0);
1790 else if (GET_CODE (lhs) == ASHIFT
1791 && CONST_INT_P (XEXP (lhs, 1))
1792 && INTVAL (XEXP (lhs, 1)) >= 0
1793 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1795 coeff0 = double_int_setbit (double_int_zero,
1796 INTVAL (XEXP (lhs, 1)));
1797 lhs = XEXP (lhs, 0);
1800 if (GET_CODE (rhs) == NEG)
1802 coeff1 = double_int_minus_one;
1803 rhs = XEXP (rhs, 0);
1805 else if (GET_CODE (rhs) == MULT
1806 && CONST_INT_P (XEXP (rhs, 1)))
1808 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1809 rhs = XEXP (rhs, 0);
1811 else if (GET_CODE (rhs) == ASHIFT
1812 && CONST_INT_P (XEXP (rhs, 1))
1813 && INTVAL (XEXP (rhs, 1)) >= 0
1814 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1816 coeff1 = double_int_setbit (double_int_zero,
1817 INTVAL (XEXP (rhs, 1)));
1818 rhs = XEXP (rhs, 0);
1821 if (rtx_equal_p (lhs, rhs))
1823 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1826 bool speed = optimize_function_for_speed_p (cfun);
1828 val = double_int_add (coeff0, coeff1);
1829 coeff = immed_double_int_const (val, mode);
1831 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1832 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1837 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1838 if ((CONST_INT_P (op1)
1839 || GET_CODE (op1) == CONST_DOUBLE)
1840 && GET_CODE (op0) == XOR
1841 && (CONST_INT_P (XEXP (op0, 1))
1842 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1843 && mode_signbit_p (mode, op1))
1844 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1845 simplify_gen_binary (XOR, mode, op1,
1848 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1849 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1850 && GET_CODE (op0) == MULT
1851 && GET_CODE (XEXP (op0, 0)) == NEG)
1855 in1 = XEXP (XEXP (op0, 0), 0);
1856 in2 = XEXP (op0, 1);
1857 return simplify_gen_binary (MINUS, mode, op1,
1858 simplify_gen_binary (MULT, mode,
1862 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1863 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1865 if (COMPARISON_P (op0)
1866 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1867 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1868 && (reversed = reversed_comparison (op0, mode)))
1870 simplify_gen_unary (NEG, mode, reversed, mode);
1872 /* If one of the operands is a PLUS or a MINUS, see if we can
1873 simplify this by the associative law.
1874 Don't use the associative law for floating point.
1875 The inaccuracy makes it nonassociative,
1876 and subtle programs can break if operations are associated. */
1878 if (INTEGRAL_MODE_P (mode)
1879 && (plus_minus_operand_p (op0)
1880 || plus_minus_operand_p (op1))
1881 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1884 /* Reassociate floating point addition only when the user
1885 specifies associative math operations. */
1886 if (FLOAT_MODE_P (mode)
1887 && flag_associative_math)
1889 tem = simplify_associative_operation (code, mode, op0, op1);
1896 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1897 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1898 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1899 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1901 rtx xop00 = XEXP (op0, 0);
1902 rtx xop10 = XEXP (op1, 0);
1905 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1907 if (REG_P (xop00) && REG_P (xop10)
1908 && GET_MODE (xop00) == GET_MODE (xop10)
1909 && REGNO (xop00) == REGNO (xop10)
1910 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1911 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1918 /* We can't assume x-x is 0 even with non-IEEE floating point,
1919 but since it is zero except in very strange circumstances, we
1920 will treat it as zero with -ffinite-math-only. */
1921 if (rtx_equal_p (trueop0, trueop1)
1922 && ! side_effects_p (op0)
1923 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1924 return CONST0_RTX (mode);
1926 /* Change subtraction from zero into negation. (0 - x) is the
1927 same as -x when x is NaN, infinite, or finite and nonzero.
1928 But if the mode has signed zeros, and does not round towards
1929 -infinity, then 0 - 0 is 0, not -0. */
1930 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1931 return simplify_gen_unary (NEG, mode, op1, mode);
1933 /* (-1 - a) is ~a. */
1934 if (trueop0 == constm1_rtx)
1935 return simplify_gen_unary (NOT, mode, op1, mode);
1937 /* Subtracting 0 has no effect unless the mode has signed zeros
1938 and supports rounding towards -infinity. In such a case,
1940 if (!(HONOR_SIGNED_ZEROS (mode)
1941 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1942 && trueop1 == CONST0_RTX (mode))
1945 /* See if this is something like X * C - X or vice versa or
1946 if the multiplication is written as a shift. If so, we can
1947 distribute and make a new multiply, shift, or maybe just
1948 have X (if C is 2 in the example above). But don't make
1949 something more expensive than we had before. */
1951 if (SCALAR_INT_MODE_P (mode))
1953 double_int coeff0, negcoeff1;
1954 rtx lhs = op0, rhs = op1;
1956 coeff0 = double_int_one;
1957 negcoeff1 = double_int_minus_one;
1959 if (GET_CODE (lhs) == NEG)
1961 coeff0 = double_int_minus_one;
1962 lhs = XEXP (lhs, 0);
1964 else if (GET_CODE (lhs) == MULT
1965 && CONST_INT_P (XEXP (lhs, 1)))
1967 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1968 lhs = XEXP (lhs, 0);
1970 else if (GET_CODE (lhs) == ASHIFT
1971 && CONST_INT_P (XEXP (lhs, 1))
1972 && INTVAL (XEXP (lhs, 1)) >= 0
1973 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1975 coeff0 = double_int_setbit (double_int_zero,
1976 INTVAL (XEXP (lhs, 1)));
1977 lhs = XEXP (lhs, 0);
1980 if (GET_CODE (rhs) == NEG)
1982 negcoeff1 = double_int_one;
1983 rhs = XEXP (rhs, 0);
1985 else if (GET_CODE (rhs) == MULT
1986 && CONST_INT_P (XEXP (rhs, 1)))
1988 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
1989 rhs = XEXP (rhs, 0);
1991 else if (GET_CODE (rhs) == ASHIFT
1992 && CONST_INT_P (XEXP (rhs, 1))
1993 && INTVAL (XEXP (rhs, 1)) >= 0
1994 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1996 negcoeff1 = double_int_setbit (double_int_zero,
1997 INTVAL (XEXP (rhs, 1)));
1998 negcoeff1 = double_int_neg (negcoeff1);
1999 rhs = XEXP (rhs, 0);
2002 if (rtx_equal_p (lhs, rhs))
2004 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2007 bool speed = optimize_function_for_speed_p (cfun);
2009 val = double_int_add (coeff0, negcoeff1);
2010 coeff = immed_double_int_const (val, mode);
2012 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2013 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2018 /* (a - (-b)) -> (a + b). True even for IEEE. */
2019 if (GET_CODE (op1) == NEG)
2020 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2022 /* (-x - c) may be simplified as (-c - x). */
2023 if (GET_CODE (op0) == NEG
2024 && (CONST_INT_P (op1)
2025 || GET_CODE (op1) == CONST_DOUBLE))
2027 tem = simplify_unary_operation (NEG, mode, op1, mode);
2029 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2032 /* Don't let a relocatable value get a negative coeff. */
2033 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2034 return simplify_gen_binary (PLUS, mode,
2036 neg_const_int (mode, op1));
2038 /* (x - (x & y)) -> (x & ~y) */
2039 if (GET_CODE (op1) == AND)
2041 if (rtx_equal_p (op0, XEXP (op1, 0)))
2043 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2044 GET_MODE (XEXP (op1, 1)));
2045 return simplify_gen_binary (AND, mode, op0, tem);
2047 if (rtx_equal_p (op0, XEXP (op1, 1)))
2049 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2050 GET_MODE (XEXP (op1, 0)));
2051 return simplify_gen_binary (AND, mode, op0, tem);
2055 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2056 by reversing the comparison code if valid. */
2057 if (STORE_FLAG_VALUE == 1
2058 && trueop0 == const1_rtx
2059 && COMPARISON_P (op1)
2060 && (reversed = reversed_comparison (op1, mode)))
2063 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2064 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2065 && GET_CODE (op1) == MULT
2066 && GET_CODE (XEXP (op1, 0)) == NEG)
2070 in1 = XEXP (XEXP (op1, 0), 0);
2071 in2 = XEXP (op1, 1);
2072 return simplify_gen_binary (PLUS, mode,
2073 simplify_gen_binary (MULT, mode,
2078 /* Canonicalize (minus (neg A) (mult B C)) to
2079 (minus (mult (neg B) C) A). */
2080 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2081 && GET_CODE (op1) == MULT
2082 && GET_CODE (op0) == NEG)
2086 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2087 in2 = XEXP (op1, 1);
2088 return simplify_gen_binary (MINUS, mode,
2089 simplify_gen_binary (MULT, mode,
2094 /* If one of the operands is a PLUS or a MINUS, see if we can
2095 simplify this by the associative law. This will, for example,
2096 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2097 Don't use the associative law for floating point.
2098 The inaccuracy makes it nonassociative,
2099 and subtle programs can break if operations are associated. */
2101 if (INTEGRAL_MODE_P (mode)
2102 && (plus_minus_operand_p (op0)
2103 || plus_minus_operand_p (op1))
2104 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2109 if (trueop1 == constm1_rtx)
2110 return simplify_gen_unary (NEG, mode, op0, mode);
2112 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2113 x is NaN, since x * 0 is then also NaN. Nor is it valid
2114 when the mode has signed zeros, since multiplying a negative
2115 number by 0 will give -0, not 0. */
2116 if (!HONOR_NANS (mode)
2117 && !HONOR_SIGNED_ZEROS (mode)
2118 && trueop1 == CONST0_RTX (mode)
2119 && ! side_effects_p (op0))
2122 /* In IEEE floating point, x*1 is not equivalent to x for
2124 if (!HONOR_SNANS (mode)
2125 && trueop1 == CONST1_RTX (mode))
2128 /* Convert multiply by constant power of two into shift unless
2129 we are still generating RTL. This test is a kludge. */
2130 if (CONST_INT_P (trueop1)
2131 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2132 /* If the mode is larger than the host word size, and the
2133 uppermost bit is set, then this isn't a power of two due
2134 to implicit sign extension. */
2135 && (width <= HOST_BITS_PER_WIDE_INT
2136 || val != HOST_BITS_PER_WIDE_INT - 1))
2137 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2139 /* Likewise for multipliers wider than a word. */
2140 if (GET_CODE (trueop1) == CONST_DOUBLE
2141 && (GET_MODE (trueop1) == VOIDmode
2142 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2143 && GET_MODE (op0) == mode
2144 && CONST_DOUBLE_LOW (trueop1) == 0
2145 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2146 return simplify_gen_binary (ASHIFT, mode, op0,
2147 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2149 /* x*2 is x+x and x*(-1) is -x */
2150 if (GET_CODE (trueop1) == CONST_DOUBLE
2151 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2152 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2153 && GET_MODE (op0) == mode)
2156 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2158 if (REAL_VALUES_EQUAL (d, dconst2))
2159 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2161 if (!HONOR_SNANS (mode)
2162 && REAL_VALUES_EQUAL (d, dconstm1))
2163 return simplify_gen_unary (NEG, mode, op0, mode);
2166 /* Optimize -x * -x as x * x. */
2167 if (FLOAT_MODE_P (mode)
2168 && GET_CODE (op0) == NEG
2169 && GET_CODE (op1) == NEG
2170 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2171 && !side_effects_p (XEXP (op0, 0)))
2172 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2174 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2175 if (SCALAR_FLOAT_MODE_P (mode)
2176 && GET_CODE (op0) == ABS
2177 && GET_CODE (op1) == ABS
2178 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2179 && !side_effects_p (XEXP (op0, 0)))
2180 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2182 /* Reassociate multiplication, but for floating point MULTs
2183 only when the user specifies unsafe math optimizations. */
2184 if (! FLOAT_MODE_P (mode)
2185 || flag_unsafe_math_optimizations)
2187 tem = simplify_associative_operation (code, mode, op0, op1);
2194 if (trueop1 == const0_rtx)
2196 if (CONST_INT_P (trueop1)
2197 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2198 == GET_MODE_MASK (mode)))
2200 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2202 /* A | (~A) -> -1 */
2203 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2204 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2205 && ! side_effects_p (op0)
2206 && SCALAR_INT_MODE_P (mode))
2209 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2210 if (CONST_INT_P (op1)
2211 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2212 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2215 /* Canonicalize (X & C1) | C2. */
2216 if (GET_CODE (op0) == AND
2217 && CONST_INT_P (trueop1)
2218 && CONST_INT_P (XEXP (op0, 1)))
2220 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2221 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2222 HOST_WIDE_INT c2 = INTVAL (trueop1);
2224 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2226 && !side_effects_p (XEXP (op0, 0)))
2229 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2230 if (((c1|c2) & mask) == mask)
2231 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2233 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2234 if (((c1 & ~c2) & mask) != (c1 & mask))
2236 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2237 gen_int_mode (c1 & ~c2, mode));
2238 return simplify_gen_binary (IOR, mode, tem, op1);
2242 /* Convert (A & B) | A to A. */
2243 if (GET_CODE (op0) == AND
2244 && (rtx_equal_p (XEXP (op0, 0), op1)
2245 || rtx_equal_p (XEXP (op0, 1), op1))
2246 && ! side_effects_p (XEXP (op0, 0))
2247 && ! side_effects_p (XEXP (op0, 1)))
2250 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2251 mode size to (rotate A CX). */
2253 if (GET_CODE (op1) == ASHIFT
2254 || GET_CODE (op1) == SUBREG)
2265 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2266 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2267 && CONST_INT_P (XEXP (opleft, 1))
2268 && CONST_INT_P (XEXP (opright, 1))
2269 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2270 == GET_MODE_BITSIZE (mode)))
2271 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2273 /* Same, but for ashift that has been "simplified" to a wider mode
2274 by simplify_shift_const. */
2276 if (GET_CODE (opleft) == SUBREG
2277 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2278 && GET_CODE (opright) == LSHIFTRT
2279 && GET_CODE (XEXP (opright, 0)) == SUBREG
2280 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2281 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2282 && (GET_MODE_SIZE (GET_MODE (opleft))
2283 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2284 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2285 SUBREG_REG (XEXP (opright, 0)))
2286 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2287 && CONST_INT_P (XEXP (opright, 1))
2288 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2289 == GET_MODE_BITSIZE (mode)))
2290 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2291 XEXP (SUBREG_REG (opleft), 1));
2293 /* If we have (ior (and (X C1) C2)), simplify this by making
2294 C1 as small as possible if C1 actually changes. */
2295 if (CONST_INT_P (op1)
2296 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2297 || INTVAL (op1) > 0)
2298 && GET_CODE (op0) == AND
2299 && CONST_INT_P (XEXP (op0, 1))
2300 && CONST_INT_P (op1)
2301 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2302 return simplify_gen_binary (IOR, mode,
2304 (AND, mode, XEXP (op0, 0),
2305 GEN_INT (INTVAL (XEXP (op0, 1))
2309 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2310 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2311 the PLUS does not affect any of the bits in OP1: then we can do
2312 the IOR as a PLUS and we can associate. This is valid if OP1
2313 can be safely shifted left C bits. */
2314 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2315 && GET_CODE (XEXP (op0, 0)) == PLUS
2316 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2317 && CONST_INT_P (XEXP (op0, 1))
2318 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2320 int count = INTVAL (XEXP (op0, 1));
2321 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2323 if (mask >> count == INTVAL (trueop1)
2324 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2325 return simplify_gen_binary (ASHIFTRT, mode,
2326 plus_constant (XEXP (op0, 0), mask),
2330 tem = simplify_associative_operation (code, mode, op0, op1);
2336 if (trueop1 == const0_rtx)
2338 if (CONST_INT_P (trueop1)
2339 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2340 == GET_MODE_MASK (mode)))
2341 return simplify_gen_unary (NOT, mode, op0, mode);
2342 if (rtx_equal_p (trueop0, trueop1)
2343 && ! side_effects_p (op0)
2344 && GET_MODE_CLASS (mode) != MODE_CC)
2345 return CONST0_RTX (mode);
2347 /* Canonicalize XOR of the most significant bit to PLUS. */
2348 if ((CONST_INT_P (op1)
2349 || GET_CODE (op1) == CONST_DOUBLE)
2350 && mode_signbit_p (mode, op1))
2351 return simplify_gen_binary (PLUS, mode, op0, op1);
2352 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2353 if ((CONST_INT_P (op1)
2354 || GET_CODE (op1) == CONST_DOUBLE)
2355 && GET_CODE (op0) == PLUS
2356 && (CONST_INT_P (XEXP (op0, 1))
2357 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2358 && mode_signbit_p (mode, XEXP (op0, 1)))
2359 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2360 simplify_gen_binary (XOR, mode, op1,
2363 /* If we are XORing two things that have no bits in common,
2364 convert them into an IOR. This helps to detect rotation encoded
2365 using those methods and possibly other simplifications. */
2367 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2368 && (nonzero_bits (op0, mode)
2369 & nonzero_bits (op1, mode)) == 0)
2370 return (simplify_gen_binary (IOR, mode, op0, op1));
2372 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2373 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2376 int num_negated = 0;
2378 if (GET_CODE (op0) == NOT)
2379 num_negated++, op0 = XEXP (op0, 0);
2380 if (GET_CODE (op1) == NOT)
2381 num_negated++, op1 = XEXP (op1, 0);
2383 if (num_negated == 2)
2384 return simplify_gen_binary (XOR, mode, op0, op1);
2385 else if (num_negated == 1)
2386 return simplify_gen_unary (NOT, mode,
2387 simplify_gen_binary (XOR, mode, op0, op1),
2391 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2392 correspond to a machine insn or result in further simplifications
2393 if B is a constant. */
2395 if (GET_CODE (op0) == AND
2396 && rtx_equal_p (XEXP (op0, 1), op1)
2397 && ! side_effects_p (op1))
2398 return simplify_gen_binary (AND, mode,
2399 simplify_gen_unary (NOT, mode,
2400 XEXP (op0, 0), mode),
2403 else if (GET_CODE (op0) == AND
2404 && rtx_equal_p (XEXP (op0, 0), op1)
2405 && ! side_effects_p (op1))
2406 return simplify_gen_binary (AND, mode,
2407 simplify_gen_unary (NOT, mode,
2408 XEXP (op0, 1), mode),
2411 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2412 comparison if STORE_FLAG_VALUE is 1. */
2413 if (STORE_FLAG_VALUE == 1
2414 && trueop1 == const1_rtx
2415 && COMPARISON_P (op0)
2416 && (reversed = reversed_comparison (op0, mode)))
2419 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2420 is (lt foo (const_int 0)), so we can perform the above
2421 simplification if STORE_FLAG_VALUE is 1. */
2423 if (STORE_FLAG_VALUE == 1
2424 && trueop1 == const1_rtx
2425 && GET_CODE (op0) == LSHIFTRT
2426 && CONST_INT_P (XEXP (op0, 1))
2427 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2428 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2430 /* (xor (comparison foo bar) (const_int sign-bit))
2431 when STORE_FLAG_VALUE is the sign bit. */
2432 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2433 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2434 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2435 && trueop1 == const_true_rtx
2436 && COMPARISON_P (op0)
2437 && (reversed = reversed_comparison (op0, mode)))
2440 tem = simplify_associative_operation (code, mode, op0, op1);
2446 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2448 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2450 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2451 HOST_WIDE_INT nzop1;
2452 if (CONST_INT_P (trueop1))
2454 HOST_WIDE_INT val1 = INTVAL (trueop1);
2455 /* If we are turning off bits already known off in OP0, we need
2457 if ((nzop0 & ~val1) == 0)
2460 nzop1 = nonzero_bits (trueop1, mode);
2461 /* If we are clearing all the nonzero bits, the result is zero. */
2462 if ((nzop1 & nzop0) == 0
2463 && !side_effects_p (op0) && !side_effects_p (op1))
2464 return CONST0_RTX (mode);
2466 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2467 && GET_MODE_CLASS (mode) != MODE_CC)
2470 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2471 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2472 && ! side_effects_p (op0)
2473 && GET_MODE_CLASS (mode) != MODE_CC)
2474 return CONST0_RTX (mode);
2476 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2477 there are no nonzero bits of C outside of X's mode. */
2478 if ((GET_CODE (op0) == SIGN_EXTEND
2479 || GET_CODE (op0) == ZERO_EXTEND)
2480 && CONST_INT_P (trueop1)
2481 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2482 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2483 & INTVAL (trueop1)) == 0)
2485 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2486 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2487 gen_int_mode (INTVAL (trueop1),
2489 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2492 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2493 we might be able to further simplify the AND with X and potentially
2494 remove the truncation altogether. */
2495 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2497 rtx x = XEXP (op0, 0);
2498 enum machine_mode xmode = GET_MODE (x);
2499 tem = simplify_gen_binary (AND, xmode, x,
2500 gen_int_mode (INTVAL (trueop1), xmode));
2501 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2504 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2505 if (GET_CODE (op0) == IOR
2506 && CONST_INT_P (trueop1)
2507 && CONST_INT_P (XEXP (op0, 1)))
2509 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2510 return simplify_gen_binary (IOR, mode,
2511 simplify_gen_binary (AND, mode,
2512 XEXP (op0, 0), op1),
2513 gen_int_mode (tmp, mode));
2516 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2517 insn (and may simplify more). */
2518 if (GET_CODE (op0) == XOR
2519 && rtx_equal_p (XEXP (op0, 0), op1)
2520 && ! side_effects_p (op1))
2521 return simplify_gen_binary (AND, mode,
2522 simplify_gen_unary (NOT, mode,
2523 XEXP (op0, 1), mode),
2526 if (GET_CODE (op0) == XOR
2527 && rtx_equal_p (XEXP (op0, 1), op1)
2528 && ! side_effects_p (op1))
2529 return simplify_gen_binary (AND, mode,
2530 simplify_gen_unary (NOT, mode,
2531 XEXP (op0, 0), mode),
2534 /* Similarly for (~(A ^ B)) & A. */
2535 if (GET_CODE (op0) == NOT
2536 && GET_CODE (XEXP (op0, 0)) == XOR
2537 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2538 && ! side_effects_p (op1))
2539 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2541 if (GET_CODE (op0) == NOT
2542 && GET_CODE (XEXP (op0, 0)) == XOR
2543 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2544 && ! side_effects_p (op1))
2545 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2547 /* Convert (A | B) & A to A. */
2548 if (GET_CODE (op0) == IOR
2549 && (rtx_equal_p (XEXP (op0, 0), op1)
2550 || rtx_equal_p (XEXP (op0, 1), op1))
2551 && ! side_effects_p (XEXP (op0, 0))
2552 && ! side_effects_p (XEXP (op0, 1)))
2555 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2556 ((A & N) + B) & M -> (A + B) & M
2557 Similarly if (N & M) == 0,
2558 ((A | N) + B) & M -> (A + B) & M
2559 and for - instead of + and/or ^ instead of |.
2560 Also, if (N & M) == 0, then
2561 (A +- N) & M -> A & M. */
2562 if (CONST_INT_P (trueop1)
2563 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2564 && ~INTVAL (trueop1)
2565 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2566 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2571 pmop[0] = XEXP (op0, 0);
2572 pmop[1] = XEXP (op0, 1);
2574 if (CONST_INT_P (pmop[1])
2575 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2576 return simplify_gen_binary (AND, mode, pmop[0], op1);
2578 for (which = 0; which < 2; which++)
2581 switch (GET_CODE (tem))
2584 if (CONST_INT_P (XEXP (tem, 1))
2585 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2586 == INTVAL (trueop1))
2587 pmop[which] = XEXP (tem, 0);
2591 if (CONST_INT_P (XEXP (tem, 1))
2592 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2593 pmop[which] = XEXP (tem, 0);
2600 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2602 tem = simplify_gen_binary (GET_CODE (op0), mode,
2604 return simplify_gen_binary (code, mode, tem, op1);
2608 /* (and X (ior (not X) Y) -> (and X Y) */
2609 if (GET_CODE (op1) == IOR
2610 && GET_CODE (XEXP (op1, 0)) == NOT
2611 && op0 == XEXP (XEXP (op1, 0), 0))
2612 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2614 /* (and (ior (not X) Y) X) -> (and X Y) */
2615 if (GET_CODE (op0) == IOR
2616 && GET_CODE (XEXP (op0, 0)) == NOT
2617 && op1 == XEXP (XEXP (op0, 0), 0))
2618 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2620 tem = simplify_associative_operation (code, mode, op0, op1);
2626 /* 0/x is 0 (or x&0 if x has side-effects). */
2627 if (trueop0 == CONST0_RTX (mode))
2629 if (side_effects_p (op1))
2630 return simplify_gen_binary (AND, mode, op1, trueop0);
2634 if (trueop1 == CONST1_RTX (mode))
2635 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2636 /* Convert divide by power of two into shift. */
2637 if (CONST_INT_P (trueop1)
2638 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2639 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2643 /* Handle floating point and integers separately. */
2644 if (SCALAR_FLOAT_MODE_P (mode))
2646 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2647 safe for modes with NaNs, since 0.0 / 0.0 will then be
2648 NaN rather than 0.0. Nor is it safe for modes with signed
2649 zeros, since dividing 0 by a negative number gives -0.0 */
2650 if (trueop0 == CONST0_RTX (mode)
2651 && !HONOR_NANS (mode)
2652 && !HONOR_SIGNED_ZEROS (mode)
2653 && ! side_effects_p (op1))
2656 if (trueop1 == CONST1_RTX (mode)
2657 && !HONOR_SNANS (mode))
2660 if (GET_CODE (trueop1) == CONST_DOUBLE
2661 && trueop1 != CONST0_RTX (mode))
2664 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2667 if (REAL_VALUES_EQUAL (d, dconstm1)
2668 && !HONOR_SNANS (mode))
2669 return simplify_gen_unary (NEG, mode, op0, mode);
2671 /* Change FP division by a constant into multiplication.
2672 Only do this with -freciprocal-math. */
2673 if (flag_reciprocal_math
2674 && !REAL_VALUES_EQUAL (d, dconst0))
2676 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2677 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2678 return simplify_gen_binary (MULT, mode, op0, tem);
2684 /* 0/x is 0 (or x&0 if x has side-effects). */
2685 if (trueop0 == CONST0_RTX (mode))
2687 if (side_effects_p (op1))
2688 return simplify_gen_binary (AND, mode, op1, trueop0);
2692 if (trueop1 == CONST1_RTX (mode))
2693 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2695 if (trueop1 == constm1_rtx)
2697 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2698 return simplify_gen_unary (NEG, mode, x, mode);
2704 /* 0%x is 0 (or x&0 if x has side-effects). */
2705 if (trueop0 == CONST0_RTX (mode))
2707 if (side_effects_p (op1))
2708 return simplify_gen_binary (AND, mode, op1, trueop0);
2711 /* x%1 is 0 (of x&0 if x has side-effects). */
2712 if (trueop1 == CONST1_RTX (mode))
2714 if (side_effects_p (op0))
2715 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2716 return CONST0_RTX (mode);
2718 /* Implement modulus by power of two as AND. */
2719 if (CONST_INT_P (trueop1)
2720 && exact_log2 (INTVAL (trueop1)) > 0)
2721 return simplify_gen_binary (AND, mode, op0,
2722 GEN_INT (INTVAL (op1) - 1));
2726 /* 0%x is 0 (or x&0 if x has side-effects). */
2727 if (trueop0 == CONST0_RTX (mode))
2729 if (side_effects_p (op1))
2730 return simplify_gen_binary (AND, mode, op1, trueop0);
2733 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2734 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2736 if (side_effects_p (op0))
2737 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2738 return CONST0_RTX (mode);
2745 if (trueop1 == CONST0_RTX (mode))
2747 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2749 /* Rotating ~0 always results in ~0. */
2750 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2751 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2752 && ! side_effects_p (op1))
2755 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2757 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2758 if (val != INTVAL (op1))
2759 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2766 if (trueop1 == CONST0_RTX (mode))
2768 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2770 goto canonicalize_shift;
2773 if (trueop1 == CONST0_RTX (mode))
2775 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2777 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2778 if (GET_CODE (op0) == CLZ
2779 && CONST_INT_P (trueop1)
2780 && STORE_FLAG_VALUE == 1
2781 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2783 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2784 unsigned HOST_WIDE_INT zero_val = 0;
2786 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2787 && zero_val == GET_MODE_BITSIZE (imode)
2788 && INTVAL (trueop1) == exact_log2 (zero_val))
2789 return simplify_gen_relational (EQ, mode, imode,
2790 XEXP (op0, 0), const0_rtx);
2792 goto canonicalize_shift;
2795 if (width <= HOST_BITS_PER_WIDE_INT
2796 && CONST_INT_P (trueop1)
2797 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2798 && ! side_effects_p (op0))
2800 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2802 tem = simplify_associative_operation (code, mode, op0, op1);
2808 if (width <= HOST_BITS_PER_WIDE_INT
2809 && CONST_INT_P (trueop1)
2810 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2811 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2812 && ! side_effects_p (op0))
2814 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2816 tem = simplify_associative_operation (code, mode, op0, op1);
2822 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2824 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2826 tem = simplify_associative_operation (code, mode, op0, op1);
2832 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2834 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2836 tem = simplify_associative_operation (code, mode, op0, op1);
2849 /* ??? There are simplifications that can be done. */
2853 if (!VECTOR_MODE_P (mode))
2855 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2856 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2857 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2858 gcc_assert (XVECLEN (trueop1, 0) == 1);
2859 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2861 if (GET_CODE (trueop0) == CONST_VECTOR)
2862 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2865 /* Extract a scalar element from a nested VEC_SELECT expression
2866 (with optional nested VEC_CONCAT expression). Some targets
2867 (i386) extract scalar element from a vector using chain of
2868 nested VEC_SELECT expressions. When input operand is a memory
2869 operand, this operation can be simplified to a simple scalar
2870 load from an offseted memory address. */
2871 if (GET_CODE (trueop0) == VEC_SELECT)
2873 rtx op0 = XEXP (trueop0, 0);
2874 rtx op1 = XEXP (trueop0, 1);
2876 enum machine_mode opmode = GET_MODE (op0);
2877 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2878 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2880 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2886 gcc_assert (GET_CODE (op1) == PARALLEL);
2887 gcc_assert (i < n_elts);
2889 /* Select element, pointed by nested selector. */
2890 elem = INTVAL (XVECEXP (op1, 0, i));
2892 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2893 if (GET_CODE (op0) == VEC_CONCAT)
2895 rtx op00 = XEXP (op0, 0);
2896 rtx op01 = XEXP (op0, 1);
2898 enum machine_mode mode00, mode01;
2899 int n_elts00, n_elts01;
2901 mode00 = GET_MODE (op00);
2902 mode01 = GET_MODE (op01);
2904 /* Find out number of elements of each operand. */
2905 if (VECTOR_MODE_P (mode00))
2907 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2908 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2913 if (VECTOR_MODE_P (mode01))
2915 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2916 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2921 gcc_assert (n_elts == n_elts00 + n_elts01);
2923 /* Select correct operand of VEC_CONCAT
2924 and adjust selector. */
2925 if (elem < n_elts01)
2936 vec = rtvec_alloc (1);
2937 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2939 tmp = gen_rtx_fmt_ee (code, mode,
2940 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2943 if (GET_CODE (trueop0) == VEC_DUPLICATE
2944 && GET_MODE (XEXP (trueop0, 0)) == mode)
2945 return XEXP (trueop0, 0);
2949 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2950 gcc_assert (GET_MODE_INNER (mode)
2951 == GET_MODE_INNER (GET_MODE (trueop0)));
2952 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2954 if (GET_CODE (trueop0) == CONST_VECTOR)
2956 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2957 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2958 rtvec v = rtvec_alloc (n_elts);
2961 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2962 for (i = 0; i < n_elts; i++)
2964 rtx x = XVECEXP (trueop1, 0, i);
2966 gcc_assert (CONST_INT_P (x));
2967 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2971 return gen_rtx_CONST_VECTOR (mode, v);
2975 if (XVECLEN (trueop1, 0) == 1
2976 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2977 && GET_CODE (trueop0) == VEC_CONCAT)
2980 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2982 /* Try to find the element in the VEC_CONCAT. */
2983 while (GET_MODE (vec) != mode
2984 && GET_CODE (vec) == VEC_CONCAT)
2986 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2987 if (offset < vec_size)
2988 vec = XEXP (vec, 0);
2992 vec = XEXP (vec, 1);
2994 vec = avoid_constant_pool_reference (vec);
2997 if (GET_MODE (vec) == mode)
3004 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3005 ? GET_MODE (trueop0)
3006 : GET_MODE_INNER (mode));
3007 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3008 ? GET_MODE (trueop1)
3009 : GET_MODE_INNER (mode));
3011 gcc_assert (VECTOR_MODE_P (mode));
3012 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3013 == GET_MODE_SIZE (mode));
3015 if (VECTOR_MODE_P (op0_mode))
3016 gcc_assert (GET_MODE_INNER (mode)
3017 == GET_MODE_INNER (op0_mode));
3019 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3021 if (VECTOR_MODE_P (op1_mode))
3022 gcc_assert (GET_MODE_INNER (mode)
3023 == GET_MODE_INNER (op1_mode));
3025 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3027 if ((GET_CODE (trueop0) == CONST_VECTOR
3028 || CONST_INT_P (trueop0)
3029 || GET_CODE (trueop0) == CONST_DOUBLE)
3030 && (GET_CODE (trueop1) == CONST_VECTOR
3031 || CONST_INT_P (trueop1)
3032 || GET_CODE (trueop1) == CONST_DOUBLE))
3034 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3035 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3036 rtvec v = rtvec_alloc (n_elts);
3038 unsigned in_n_elts = 1;
3040 if (VECTOR_MODE_P (op0_mode))
3041 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3042 for (i = 0; i < n_elts; i++)
3046 if (!VECTOR_MODE_P (op0_mode))
3047 RTVEC_ELT (v, i) = trueop0;
3049 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3053 if (!VECTOR_MODE_P (op1_mode))
3054 RTVEC_ELT (v, i) = trueop1;
3056 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3061 return gen_rtx_CONST_VECTOR (mode, v);
3074 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3077 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3079 unsigned int width = GET_MODE_BITSIZE (mode);
3081 if (VECTOR_MODE_P (mode)
3082 && code != VEC_CONCAT
3083 && GET_CODE (op0) == CONST_VECTOR
3084 && GET_CODE (op1) == CONST_VECTOR)
3086 unsigned n_elts = GET_MODE_NUNITS (mode);
3087 enum machine_mode op0mode = GET_MODE (op0);
3088 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3089 enum machine_mode op1mode = GET_MODE (op1);
3090 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3091 rtvec v = rtvec_alloc (n_elts);
3094 gcc_assert (op0_n_elts == n_elts);
3095 gcc_assert (op1_n_elts == n_elts);
3096 for (i = 0; i < n_elts; i++)
3098 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3099 CONST_VECTOR_ELT (op0, i),
3100 CONST_VECTOR_ELT (op1, i));
3103 RTVEC_ELT (v, i) = x;
3106 return gen_rtx_CONST_VECTOR (mode, v);
3109 if (VECTOR_MODE_P (mode)
3110 && code == VEC_CONCAT
3111 && (CONST_INT_P (op0)
3112 || GET_CODE (op0) == CONST_DOUBLE
3113 || GET_CODE (op0) == CONST_FIXED)
3114 && (CONST_INT_P (op1)
3115 || GET_CODE (op1) == CONST_DOUBLE
3116 || GET_CODE (op1) == CONST_FIXED))
3118 unsigned n_elts = GET_MODE_NUNITS (mode);
3119 rtvec v = rtvec_alloc (n_elts);
3121 gcc_assert (n_elts >= 2);
3124 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3125 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3127 RTVEC_ELT (v, 0) = op0;
3128 RTVEC_ELT (v, 1) = op1;
3132 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3133 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3136 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3137 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3138 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3140 for (i = 0; i < op0_n_elts; ++i)
3141 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3142 for (i = 0; i < op1_n_elts; ++i)
3143 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3146 return gen_rtx_CONST_VECTOR (mode, v);
3149 if (SCALAR_FLOAT_MODE_P (mode)
3150 && GET_CODE (op0) == CONST_DOUBLE
3151 && GET_CODE (op1) == CONST_DOUBLE
3152 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3163 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3165 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3167 for (i = 0; i < 4; i++)
3184 real_from_target (&r, tmp0, mode);
3185 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3189 REAL_VALUE_TYPE f0, f1, value, result;
3192 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3193 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3194 real_convert (&f0, mode, &f0);
3195 real_convert (&f1, mode, &f1);
3197 if (HONOR_SNANS (mode)
3198 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3202 && REAL_VALUES_EQUAL (f1, dconst0)
3203 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3206 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3207 && flag_trapping_math
3208 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3210 int s0 = REAL_VALUE_NEGATIVE (f0);
3211 int s1 = REAL_VALUE_NEGATIVE (f1);
3216 /* Inf + -Inf = NaN plus exception. */
3221 /* Inf - Inf = NaN plus exception. */
3226 /* Inf / Inf = NaN plus exception. */
3233 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3234 && flag_trapping_math
3235 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3236 || (REAL_VALUE_ISINF (f1)
3237 && REAL_VALUES_EQUAL (f0, dconst0))))
3238 /* Inf * 0 = NaN plus exception. */
3241 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3243 real_convert (&result, mode, &value);
3245 /* Don't constant fold this floating point operation if
3246 the result has overflowed and flag_trapping_math. */
3248 if (flag_trapping_math
3249 && MODE_HAS_INFINITIES (mode)
3250 && REAL_VALUE_ISINF (result)
3251 && !REAL_VALUE_ISINF (f0)
3252 && !REAL_VALUE_ISINF (f1))
3253 /* Overflow plus exception. */
3256 /* Don't constant fold this floating point operation if the
3257 result may dependent upon the run-time rounding mode and
3258 flag_rounding_math is set, or if GCC's software emulation
3259 is unable to accurately represent the result. */
3261 if ((flag_rounding_math
3262 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3263 && (inexact || !real_identical (&result, &value)))
3266 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3270 /* We can fold some multi-word operations. */
3271 if (GET_MODE_CLASS (mode) == MODE_INT
3272 && width == HOST_BITS_PER_WIDE_INT * 2
3273 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3274 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3276 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3277 HOST_WIDE_INT h1, h2, hv, ht;
3279 if (GET_CODE (op0) == CONST_DOUBLE)
3280 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3282 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3284 if (GET_CODE (op1) == CONST_DOUBLE)
3285 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3287 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3292 /* A - B == A + (-B). */
3293 neg_double (l2, h2, &lv, &hv);
3296 /* Fall through.... */
3299 add_double (l1, h1, l2, h2, &lv, &hv);
3303 mul_double (l1, h1, l2, h2, &lv, &hv);
3307 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3308 &lv, &hv, <, &ht))
3313 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3314 <, &ht, &lv, &hv))
3319 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3320 &lv, &hv, <, &ht))
3325 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3326 <, &ht, &lv, &hv))
3331 lv = l1 & l2, hv = h1 & h2;
3335 lv = l1 | l2, hv = h1 | h2;
3339 lv = l1 ^ l2, hv = h1 ^ h2;
3345 && ((unsigned HOST_WIDE_INT) l1
3346 < (unsigned HOST_WIDE_INT) l2)))
3355 && ((unsigned HOST_WIDE_INT) l1
3356 > (unsigned HOST_WIDE_INT) l2)))
3363 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3365 && ((unsigned HOST_WIDE_INT) l1
3366 < (unsigned HOST_WIDE_INT) l2)))
3373 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3375 && ((unsigned HOST_WIDE_INT) l1
3376 > (unsigned HOST_WIDE_INT) l2)))
3382 case LSHIFTRT: case ASHIFTRT:
3384 case ROTATE: case ROTATERT:
3385 if (SHIFT_COUNT_TRUNCATED)
3386 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3388 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3391 if (code == LSHIFTRT || code == ASHIFTRT)
3392 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3394 else if (code == ASHIFT)
3395 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3396 else if (code == ROTATE)
3397 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3398 else /* code == ROTATERT */
3399 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3406 return immed_double_const (lv, hv, mode);
3409 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3410 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3412 /* Get the integer argument values in two forms:
3413 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3415 arg0 = INTVAL (op0);
3416 arg1 = INTVAL (op1);
3418 if (width < HOST_BITS_PER_WIDE_INT)
3420 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3421 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3424 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3425 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3428 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3429 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3437 /* Compute the value of the arithmetic. */
3442 val = arg0s + arg1s;
3446 val = arg0s - arg1s;
3450 val = arg0s * arg1s;
3455 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3458 val = arg0s / arg1s;
3463 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3466 val = arg0s % arg1s;
3471 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3474 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3479 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3482 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3500 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3501 the value is in range. We can't return any old value for
3502 out-of-range arguments because either the middle-end (via
3503 shift_truncation_mask) or the back-end might be relying on
3504 target-specific knowledge. Nor can we rely on
3505 shift_truncation_mask, since the shift might not be part of an
3506 ashlM3, lshrM3 or ashrM3 instruction. */
3507 if (SHIFT_COUNT_TRUNCATED)
3508 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3509 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3512 val = (code == ASHIFT
3513 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3514 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3516 /* Sign-extend the result for arithmetic right shifts. */
3517 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3518 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3526 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3527 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3535 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3536 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3540 /* Do nothing here. */
3544 val = arg0s <= arg1s ? arg0s : arg1s;
3548 val = ((unsigned HOST_WIDE_INT) arg0
3549 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3553 val = arg0s > arg1s ? arg0s : arg1s;
3557 val = ((unsigned HOST_WIDE_INT) arg0
3558 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3571 /* ??? There are simplifications that can be done. */
3578 return gen_int_mode (val, mode);
3586 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3589 Rather than test for specific case, we do this by a brute-force method
3590 and do all possible simplifications until no more changes occur. Then
3591 we rebuild the operation. */
3593 struct simplify_plus_minus_op_data
3600 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3604 result = (commutative_operand_precedence (y)
3605 - commutative_operand_precedence (x));
3609 /* Group together equal REGs to do more simplification. */
3610 if (REG_P (x) && REG_P (y))
3611 return REGNO (x) > REGNO (y);
3617 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3620 struct simplify_plus_minus_op_data ops[8];
3622 int n_ops = 2, input_ops = 2;
3623 int changed, n_constants = 0, canonicalized = 0;
3626 memset (ops, 0, sizeof ops);
3628 /* Set up the two operands and then expand them until nothing has been
3629 changed. If we run out of room in our array, give up; this should
3630 almost never happen. */
3635 ops[1].neg = (code == MINUS);
3641 for (i = 0; i < n_ops; i++)
3643 rtx this_op = ops[i].op;
3644 int this_neg = ops[i].neg;
3645 enum rtx_code this_code = GET_CODE (this_op);
3654 ops[n_ops].op = XEXP (this_op, 1);
3655 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3658 ops[i].op = XEXP (this_op, 0);
3661 canonicalized |= this_neg;
3665 ops[i].op = XEXP (this_op, 0);
3666 ops[i].neg = ! this_neg;
3673 && GET_CODE (XEXP (this_op, 0)) == PLUS
3674 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3675 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3677 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3678 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3679 ops[n_ops].neg = this_neg;
3687 /* ~a -> (-a - 1) */
3690 ops[n_ops].op = constm1_rtx;
3691 ops[n_ops++].neg = this_neg;
3692 ops[i].op = XEXP (this_op, 0);
3693 ops[i].neg = !this_neg;
3703 ops[i].op = neg_const_int (mode, this_op);
3717 if (n_constants > 1)
3720 gcc_assert (n_ops >= 2);
3722 /* If we only have two operands, we can avoid the loops. */
3725 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3728 /* Get the two operands. Be careful with the order, especially for
3729 the cases where code == MINUS. */
3730 if (ops[0].neg && ops[1].neg)
3732 lhs = gen_rtx_NEG (mode, ops[0].op);
3735 else if (ops[0].neg)
3746 return simplify_const_binary_operation (code, mode, lhs, rhs);
3749 /* Now simplify each pair of operands until nothing changes. */
3752 /* Insertion sort is good enough for an eight-element array. */
3753 for (i = 1; i < n_ops; i++)
3755 struct simplify_plus_minus_op_data save;
3757 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3763 ops[j + 1] = ops[j];
3764 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3769 for (i = n_ops - 1; i > 0; i--)
3770 for (j = i - 1; j >= 0; j--)
3772 rtx lhs = ops[j].op, rhs = ops[i].op;
3773 int lneg = ops[j].neg, rneg = ops[i].neg;
3775 if (lhs != 0 && rhs != 0)
3777 enum rtx_code ncode = PLUS;
3783 tem = lhs, lhs = rhs, rhs = tem;
3785 else if (swap_commutative_operands_p (lhs, rhs))
3786 tem = lhs, lhs = rhs, rhs = tem;
3788 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3789 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3791 rtx tem_lhs, tem_rhs;
3793 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3794 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3795 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3797 if (tem && !CONSTANT_P (tem))
3798 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3801 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3803 /* Reject "simplifications" that just wrap the two
3804 arguments in a CONST. Failure to do so can result
3805 in infinite recursion with simplify_binary_operation
3806 when it calls us to simplify CONST operations. */
3808 && ! (GET_CODE (tem) == CONST
3809 && GET_CODE (XEXP (tem, 0)) == ncode
3810 && XEXP (XEXP (tem, 0), 0) == lhs
3811 && XEXP (XEXP (tem, 0), 1) == rhs))
3814 if (GET_CODE (tem) == NEG)
3815 tem = XEXP (tem, 0), lneg = !lneg;
3816 if (CONST_INT_P (tem) && lneg)
3817 tem = neg_const_int (mode, tem), lneg = 0;
3821 ops[j].op = NULL_RTX;
3828 /* If nothing changed, fail. */
3832 /* Pack all the operands to the lower-numbered entries. */
3833 for (i = 0, j = 0; j < n_ops; j++)
3843 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3845 && CONST_INT_P (ops[1].op)
3846 && CONSTANT_P (ops[0].op)
3848 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3850 /* We suppressed creation of trivial CONST expressions in the
3851 combination loop to avoid recursion. Create one manually now.
3852 The combination loop should have ensured that there is exactly
3853 one CONST_INT, and the sort will have ensured that it is last
3854 in the array and that any other constant will be next-to-last. */
3857 && CONST_INT_P (ops[n_ops - 1].op)
3858 && CONSTANT_P (ops[n_ops - 2].op))
3860 rtx value = ops[n_ops - 1].op;
3861 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3862 value = neg_const_int (mode, value);
3863 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3867 /* Put a non-negated operand first, if possible. */
3869 for (i = 0; i < n_ops && ops[i].neg; i++)
3872 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3881 /* Now make the result by performing the requested operations. */
3883 for (i = 1; i < n_ops; i++)
3884 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3885 mode, result, ops[i].op);
3890 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3892 plus_minus_operand_p (const_rtx x)
3894 return GET_CODE (x) == PLUS
3895 || GET_CODE (x) == MINUS
3896 || (GET_CODE (x) == CONST
3897 && GET_CODE (XEXP (x, 0)) == PLUS
3898 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3899 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3902 /* Like simplify_binary_operation except used for relational operators.
3903 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3904 not also be VOIDmode.
3906 CMP_MODE specifies in which mode the comparison is done in, so it is
3907 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3908 the operands or, if both are VOIDmode, the operands are compared in
3909 "infinite precision". */
3911 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3912 enum machine_mode cmp_mode, rtx op0, rtx op1)
3914 rtx tem, trueop0, trueop1;
3916 if (cmp_mode == VOIDmode)
3917 cmp_mode = GET_MODE (op0);
3918 if (cmp_mode == VOIDmode)
3919 cmp_mode = GET_MODE (op1);
3921 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3924 if (SCALAR_FLOAT_MODE_P (mode))
3926 if (tem == const0_rtx)
3927 return CONST0_RTX (mode);
3928 #ifdef FLOAT_STORE_FLAG_VALUE
3930 REAL_VALUE_TYPE val;
3931 val = FLOAT_STORE_FLAG_VALUE (mode);
3932 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3938 if (VECTOR_MODE_P (mode))
3940 if (tem == const0_rtx)
3941 return CONST0_RTX (mode);
3942 #ifdef VECTOR_STORE_FLAG_VALUE
3947 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3948 if (val == NULL_RTX)
3950 if (val == const1_rtx)
3951 return CONST1_RTX (mode);
3953 units = GET_MODE_NUNITS (mode);
3954 v = rtvec_alloc (units);
3955 for (i = 0; i < units; i++)
3956 RTVEC_ELT (v, i) = val;
3957 return gen_rtx_raw_CONST_VECTOR (mode, v);
3967 /* For the following tests, ensure const0_rtx is op1. */
3968 if (swap_commutative_operands_p (op0, op1)
3969 || (op0 == const0_rtx && op1 != const0_rtx))
3970 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3972 /* If op0 is a compare, extract the comparison arguments from it. */
3973 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3974 return simplify_gen_relational (code, mode, VOIDmode,
3975 XEXP (op0, 0), XEXP (op0, 1));
3977 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3981 trueop0 = avoid_constant_pool_reference (op0);
3982 trueop1 = avoid_constant_pool_reference (op1);
3983 return simplify_relational_operation_1 (code, mode, cmp_mode,
3987 /* This part of simplify_relational_operation is only used when CMP_MODE
3988 is not in class MODE_CC (i.e. it is a real comparison).
3990 MODE is the mode of the result, while CMP_MODE specifies in which
3991 mode the comparison is done in, so it is the mode of the operands. */
3994 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3995 enum machine_mode cmp_mode, rtx op0, rtx op1)
3997 enum rtx_code op0code = GET_CODE (op0);
3999 if (op1 == const0_rtx && COMPARISON_P (op0))
4001 /* If op0 is a comparison, extract the comparison arguments
4005 if (GET_MODE (op0) == mode)
4006 return simplify_rtx (op0);
4008 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4009 XEXP (op0, 0), XEXP (op0, 1));
4011 else if (code == EQ)
4013 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4014 if (new_code != UNKNOWN)
4015 return simplify_gen_relational (new_code, mode, VOIDmode,
4016 XEXP (op0, 0), XEXP (op0, 1));
4020 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4021 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4022 if ((code == LTU || code == GEU)
4023 && GET_CODE (op0) == PLUS
4024 && CONST_INT_P (XEXP (op0, 1))
4025 && (rtx_equal_p (op1, XEXP (op0, 0))
4026 || rtx_equal_p (op1, XEXP (op0, 1))))
4029 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4030 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4031 cmp_mode, XEXP (op0, 0), new_cmp);
4034 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4035 if ((code == LTU || code == GEU)
4036 && GET_CODE (op0) == PLUS
4037 && rtx_equal_p (op1, XEXP (op0, 1))
4038 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4039 && !rtx_equal_p (op1, XEXP (op0, 0)))
4040 return simplify_gen_relational (code, mode, cmp_mode, op0,
4041 copy_rtx (XEXP (op0, 0)));
4043 if (op1 == const0_rtx)
4045 /* Canonicalize (GTU x 0) as (NE x 0). */
4047 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4048 /* Canonicalize (LEU x 0) as (EQ x 0). */
4050 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4052 else if (op1 == const1_rtx)
4057 /* Canonicalize (GE x 1) as (GT x 0). */
4058 return simplify_gen_relational (GT, mode, cmp_mode,
4061 /* Canonicalize (GEU x 1) as (NE x 0). */
4062 return simplify_gen_relational (NE, mode, cmp_mode,
4065 /* Canonicalize (LT x 1) as (LE x 0). */
4066 return simplify_gen_relational (LE, mode, cmp_mode,
4069 /* Canonicalize (LTU x 1) as (EQ x 0). */
4070 return simplify_gen_relational (EQ, mode, cmp_mode,
4076 else if (op1 == constm1_rtx)
4078 /* Canonicalize (LE x -1) as (LT x 0). */
4080 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4081 /* Canonicalize (GT x -1) as (GE x 0). */
4083 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4086 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4087 if ((code == EQ || code == NE)
4088 && (op0code == PLUS || op0code == MINUS)
4090 && CONSTANT_P (XEXP (op0, 1))
4091 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4093 rtx x = XEXP (op0, 0);
4094 rtx c = XEXP (op0, 1);
4096 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4098 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4101 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4102 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4104 && op1 == const0_rtx
4105 && GET_MODE_CLASS (mode) == MODE_INT
4106 && cmp_mode != VOIDmode
4107 /* ??? Work-around BImode bugs in the ia64 backend. */
4109 && cmp_mode != BImode
4110 && nonzero_bits (op0, cmp_mode) == 1
4111 && STORE_FLAG_VALUE == 1)
4112 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4113 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4114 : lowpart_subreg (mode, op0, cmp_mode);
4116 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4117 if ((code == EQ || code == NE)
4118 && op1 == const0_rtx
4120 return simplify_gen_relational (code, mode, cmp_mode,
4121 XEXP (op0, 0), XEXP (op0, 1));
4123 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4124 if ((code == EQ || code == NE)
4126 && rtx_equal_p (XEXP (op0, 0), op1)
4127 && !side_effects_p (XEXP (op0, 0)))
4128 return simplify_gen_relational (code, mode, cmp_mode,
4129 XEXP (op0, 1), const0_rtx);
4131 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4132 if ((code == EQ || code == NE)
4134 && rtx_equal_p (XEXP (op0, 1), op1)
4135 && !side_effects_p (XEXP (op0, 1)))
4136 return simplify_gen_relational (code, mode, cmp_mode,
4137 XEXP (op0, 0), const0_rtx);
4139 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4140 if ((code == EQ || code == NE)
4142 && (CONST_INT_P (op1)
4143 || GET_CODE (op1) == CONST_DOUBLE)
4144 && (CONST_INT_P (XEXP (op0, 1))
4145 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4146 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4147 simplify_gen_binary (XOR, cmp_mode,
4148 XEXP (op0, 1), op1));
4150 if (op0code == POPCOUNT && op1 == const0_rtx)
4156 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4157 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4158 XEXP (op0, 0), const0_rtx);
4163 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4164 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4165 XEXP (op0, 0), const0_rtx);
4184 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4185 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4186 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4187 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4188 For floating-point comparisons, assume that the operands were ordered. */
4191 comparison_result (enum rtx_code code, int known_results)
4197 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4200 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4204 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4207 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4211 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4214 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4217 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4219 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4222 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4224 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4227 return const_true_rtx;
4235 /* Check if the given comparison (done in the given MODE) is actually a
4236 tautology or a contradiction.
4237 If no simplification is possible, this function returns zero.
4238 Otherwise, it returns either const_true_rtx or const0_rtx. */
4241 simplify_const_relational_operation (enum rtx_code code,
4242 enum machine_mode mode,
4249 gcc_assert (mode != VOIDmode
4250 || (GET_MODE (op0) == VOIDmode
4251 && GET_MODE (op1) == VOIDmode));
4253 /* If op0 is a compare, extract the comparison arguments from it. */
4254 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4256 op1 = XEXP (op0, 1);
4257 op0 = XEXP (op0, 0);
4259 if (GET_MODE (op0) != VOIDmode)
4260 mode = GET_MODE (op0);
4261 else if (GET_MODE (op1) != VOIDmode)
4262 mode = GET_MODE (op1);
4267 /* We can't simplify MODE_CC values since we don't know what the
4268 actual comparison is. */
4269 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4272 /* Make sure the constant is second. */
4273 if (swap_commutative_operands_p (op0, op1))
4275 tem = op0, op0 = op1, op1 = tem;
4276 code = swap_condition (code);
4279 trueop0 = avoid_constant_pool_reference (op0);
4280 trueop1 = avoid_constant_pool_reference (op1);
4282 /* For integer comparisons of A and B maybe we can simplify A - B and can
4283 then simplify a comparison of that with zero. If A and B are both either
4284 a register or a CONST_INT, this can't help; testing for these cases will
4285 prevent infinite recursion here and speed things up.
4287 We can only do this for EQ and NE comparisons as otherwise we may
4288 lose or introduce overflow which we cannot disregard as undefined as
4289 we do not know the signedness of the operation on either the left or
4290 the right hand side of the comparison. */
4292 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4293 && (code == EQ || code == NE)
4294 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4295 && (REG_P (op1) || CONST_INT_P (trueop1)))
4296 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4297 /* We cannot do this if tem is a nonzero address. */
4298 && ! nonzero_address_p (tem))
4299 return simplify_const_relational_operation (signed_condition (code),
4300 mode, tem, const0_rtx);
4302 if (! HONOR_NANS (mode) && code == ORDERED)
4303 return const_true_rtx;
4305 if (! HONOR_NANS (mode) && code == UNORDERED)
4308 /* For modes without NaNs, if the two operands are equal, we know the
4309 result except if they have side-effects. Even with NaNs we know
4310 the result of unordered comparisons and, if signaling NaNs are
4311 irrelevant, also the result of LT/GT/LTGT. */
4312 if ((! HONOR_NANS (GET_MODE (trueop0))
4313 || code == UNEQ || code == UNLE || code == UNGE
4314 || ((code == LT || code == GT || code == LTGT)
4315 && ! HONOR_SNANS (GET_MODE (trueop0))))
4316 && rtx_equal_p (trueop0, trueop1)
4317 && ! side_effects_p (trueop0))
4318 return comparison_result (code, CMP_EQ);
4320 /* If the operands are floating-point constants, see if we can fold
4322 if (GET_CODE (trueop0) == CONST_DOUBLE
4323 && GET_CODE (trueop1) == CONST_DOUBLE
4324 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4326 REAL_VALUE_TYPE d0, d1;
4328 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4329 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4331 /* Comparisons are unordered iff at least one of the values is NaN. */
4332 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4342 return const_true_rtx;
4355 return comparison_result (code,
4356 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4357 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4360 /* Otherwise, see if the operands are both integers. */
4361 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4362 && (GET_CODE (trueop0) == CONST_DOUBLE
4363 || CONST_INT_P (trueop0))
4364 && (GET_CODE (trueop1) == CONST_DOUBLE
4365 || CONST_INT_P (trueop1)))
4367 int width = GET_MODE_BITSIZE (mode);
4368 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4369 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4371 /* Get the two words comprising each integer constant. */
4372 if (GET_CODE (trueop0) == CONST_DOUBLE)
4374 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4375 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4379 l0u = l0s = INTVAL (trueop0);
4380 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4383 if (GET_CODE (trueop1) == CONST_DOUBLE)
4385 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4386 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4390 l1u = l1s = INTVAL (trueop1);
4391 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4394 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4395 we have to sign or zero-extend the values. */
4396 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4398 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4399 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4401 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4402 l0s |= ((HOST_WIDE_INT) (-1) << width);
4404 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4405 l1s |= ((HOST_WIDE_INT) (-1) << width);
4407 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4408 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4410 if (h0u == h1u && l0u == l1u)
4411 return comparison_result (code, CMP_EQ);
4415 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4416 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4417 return comparison_result (code, cr);
4421 /* Optimize comparisons with upper and lower bounds. */
4422 if (SCALAR_INT_MODE_P (mode)
4423 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4424 && CONST_INT_P (trueop1))
4427 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4428 HOST_WIDE_INT val = INTVAL (trueop1);
4429 HOST_WIDE_INT mmin, mmax;
4439 /* Get a reduced range if the sign bit is zero. */
4440 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4447 rtx mmin_rtx, mmax_rtx;
4448 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4450 mmin = INTVAL (mmin_rtx);
4451 mmax = INTVAL (mmax_rtx);
4454 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4456 mmin >>= (sign_copies - 1);
4457 mmax >>= (sign_copies - 1);
4463 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4465 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4466 return const_true_rtx;
4467 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4472 return const_true_rtx;
4477 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4479 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4480 return const_true_rtx;
4481 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4486 return const_true_rtx;
4492 /* x == y is always false for y out of range. */
4493 if (val < mmin || val > mmax)
4497 /* x > y is always false for y >= mmax, always true for y < mmin. */
4499 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4501 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4502 return const_true_rtx;
4508 return const_true_rtx;
4511 /* x < y is always false for y <= mmin, always true for y > mmax. */
4513 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4515 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4516 return const_true_rtx;
4522 return const_true_rtx;
4526 /* x != y is always true for y out of range. */
4527 if (val < mmin || val > mmax)
4528 return const_true_rtx;
4536 /* Optimize integer comparisons with zero. */
4537 if (trueop1 == const0_rtx)
4539 /* Some addresses are known to be nonzero. We don't know
4540 their sign, but equality comparisons are known. */
4541 if (nonzero_address_p (trueop0))
4543 if (code == EQ || code == LEU)
4545 if (code == NE || code == GTU)
4546 return const_true_rtx;
4549 /* See if the first operand is an IOR with a constant. If so, we
4550 may be able to determine the result of this comparison. */
4551 if (GET_CODE (op0) == IOR)
4553 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4554 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4556 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4557 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4558 && (INTVAL (inner_const)
4559 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4568 return const_true_rtx;
4572 return const_true_rtx;
4586 /* Optimize comparison of ABS with zero. */
4587 if (trueop1 == CONST0_RTX (mode)
4588 && (GET_CODE (trueop0) == ABS
4589 || (GET_CODE (trueop0) == FLOAT_EXTEND
4590 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4595 /* Optimize abs(x) < 0.0. */
4596 if (!HONOR_SNANS (mode)
4597 && (!INTEGRAL_MODE_P (mode)
4598 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4600 if (INTEGRAL_MODE_P (mode)
4601 && (issue_strict_overflow_warning
4602 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4603 warning (OPT_Wstrict_overflow,
4604 ("assuming signed overflow does not occur when "
4605 "assuming abs (x) < 0 is false"));
4611 /* Optimize abs(x) >= 0.0. */
4612 if (!HONOR_NANS (mode)
4613 && (!INTEGRAL_MODE_P (mode)
4614 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4616 if (INTEGRAL_MODE_P (mode)
4617 && (issue_strict_overflow_warning
4618 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4619 warning (OPT_Wstrict_overflow,
4620 ("assuming signed overflow does not occur when "
4621 "assuming abs (x) >= 0 is true"));
4622 return const_true_rtx;
4627 /* Optimize ! (abs(x) < 0.0). */
4628 return const_true_rtx;
4638 /* Simplify CODE, an operation with result mode MODE and three operands,
4639 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4640 a constant. Return 0 if no simplifications is possible. */
4643 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4644 enum machine_mode op0_mode, rtx op0, rtx op1,
4647 unsigned int width = GET_MODE_BITSIZE (mode);
4649 /* VOIDmode means "infinite" precision. */
4651 width = HOST_BITS_PER_WIDE_INT;
4657 if (CONST_INT_P (op0)
4658 && CONST_INT_P (op1)
4659 && CONST_INT_P (op2)
4660 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4661 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4663 /* Extracting a bit-field from a constant */
4664 HOST_WIDE_INT val = INTVAL (op0);
4666 if (BITS_BIG_ENDIAN)
4667 val >>= (GET_MODE_BITSIZE (op0_mode)
4668 - INTVAL (op2) - INTVAL (op1));
4670 val >>= INTVAL (op2);
4672 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4674 /* First zero-extend. */
4675 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4676 /* If desired, propagate sign bit. */
4677 if (code == SIGN_EXTRACT
4678 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4679 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4682 /* Clear the bits that don't belong in our mode,
4683 unless they and our sign bit are all one.
4684 So we get either a reasonable negative value or a reasonable
4685 unsigned value for this mode. */
4686 if (width < HOST_BITS_PER_WIDE_INT
4687 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4688 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4689 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4691 return gen_int_mode (val, mode);
4696 if (CONST_INT_P (op0))
4697 return op0 != const0_rtx ? op1 : op2;
4699 /* Convert c ? a : a into "a". */
4700 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4703 /* Convert a != b ? a : b into "a". */
4704 if (GET_CODE (op0) == NE
4705 && ! side_effects_p (op0)
4706 && ! HONOR_NANS (mode)
4707 && ! HONOR_SIGNED_ZEROS (mode)
4708 && ((rtx_equal_p (XEXP (op0, 0), op1)
4709 && rtx_equal_p (XEXP (op0, 1), op2))
4710 || (rtx_equal_p (XEXP (op0, 0), op2)
4711 && rtx_equal_p (XEXP (op0, 1), op1))))
4714 /* Convert a == b ? a : b into "b". */
4715 if (GET_CODE (op0) == EQ
4716 && ! side_effects_p (op0)
4717 && ! HONOR_NANS (mode)
4718 && ! HONOR_SIGNED_ZEROS (mode)
4719 && ((rtx_equal_p (XEXP (op0, 0), op1)
4720 && rtx_equal_p (XEXP (op0, 1), op2))
4721 || (rtx_equal_p (XEXP (op0, 0), op2)
4722 && rtx_equal_p (XEXP (op0, 1), op1))))
4725 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4727 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4728 ? GET_MODE (XEXP (op0, 1))
4729 : GET_MODE (XEXP (op0, 0)));
4732 /* Look for happy constants in op1 and op2. */
4733 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4735 HOST_WIDE_INT t = INTVAL (op1);
4736 HOST_WIDE_INT f = INTVAL (op2);
4738 if (t == STORE_FLAG_VALUE && f == 0)
4739 code = GET_CODE (op0);
4740 else if (t == 0 && f == STORE_FLAG_VALUE)
4743 tmp = reversed_comparison_code (op0, NULL_RTX);
4751 return simplify_gen_relational (code, mode, cmp_mode,
4752 XEXP (op0, 0), XEXP (op0, 1));
4755 if (cmp_mode == VOIDmode)
4756 cmp_mode = op0_mode;
4757 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4758 cmp_mode, XEXP (op0, 0),
4761 /* See if any simplifications were possible. */
4764 if (CONST_INT_P (temp))
4765 return temp == const0_rtx ? op2 : op1;
4767 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4773 gcc_assert (GET_MODE (op0) == mode);
4774 gcc_assert (GET_MODE (op1) == mode);
4775 gcc_assert (VECTOR_MODE_P (mode));
4776 op2 = avoid_constant_pool_reference (op2);
4777 if (CONST_INT_P (op2))
4779 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4780 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4781 int mask = (1 << n_elts) - 1;
4783 if (!(INTVAL (op2) & mask))
4785 if ((INTVAL (op2) & mask) == mask)
4788 op0 = avoid_constant_pool_reference (op0);
4789 op1 = avoid_constant_pool_reference (op1);
4790 if (GET_CODE (op0) == CONST_VECTOR
4791 && GET_CODE (op1) == CONST_VECTOR)
4793 rtvec v = rtvec_alloc (n_elts);
4796 for (i = 0; i < n_elts; i++)
4797 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4798 ? CONST_VECTOR_ELT (op0, i)
4799 : CONST_VECTOR_ELT (op1, i));
4800 return gen_rtx_CONST_VECTOR (mode, v);
4812 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4814 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4816 Works by unpacking OP into a collection of 8-bit values
4817 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4818 and then repacking them again for OUTERMODE. */
4821 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4822 enum machine_mode innermode, unsigned int byte)
4824 /* We support up to 512-bit values (for V8DFmode). */
4828 value_mask = (1 << value_bit) - 1
4830 unsigned char value[max_bitsize / value_bit];
4839 rtvec result_v = NULL;
4840 enum mode_class outer_class;
4841 enum machine_mode outer_submode;
4843 /* Some ports misuse CCmode. */
4844 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4847 /* We have no way to represent a complex constant at the rtl level. */
4848 if (COMPLEX_MODE_P (outermode))
4851 /* Unpack the value. */
4853 if (GET_CODE (op) == CONST_VECTOR)
4855 num_elem = CONST_VECTOR_NUNITS (op);
4856 elems = &CONST_VECTOR_ELT (op, 0);
4857 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4863 elem_bitsize = max_bitsize;
4865 /* If this asserts, it is too complicated; reducing value_bit may help. */
4866 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4867 /* I don't know how to handle endianness of sub-units. */
4868 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4870 for (elem = 0; elem < num_elem; elem++)
4873 rtx el = elems[elem];
4875 /* Vectors are kept in target memory order. (This is probably
4878 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4879 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4881 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4882 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4883 unsigned bytele = (subword_byte % UNITS_PER_WORD
4884 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4885 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4888 switch (GET_CODE (el))
4892 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4894 *vp++ = INTVAL (el) >> i;
4895 /* CONST_INTs are always logically sign-extended. */
4896 for (; i < elem_bitsize; i += value_bit)
4897 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4901 if (GET_MODE (el) == VOIDmode)
4903 /* If this triggers, someone should have generated a
4904 CONST_INT instead. */
4905 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4907 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4908 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4909 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4912 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4915 /* It shouldn't matter what's done here, so fill it with
4917 for (; i < elem_bitsize; i += value_bit)
4922 long tmp[max_bitsize / 32];
4923 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4925 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4926 gcc_assert (bitsize <= elem_bitsize);
4927 gcc_assert (bitsize % value_bit == 0);
4929 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4932 /* real_to_target produces its result in words affected by
4933 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4934 and use WORDS_BIG_ENDIAN instead; see the documentation
4935 of SUBREG in rtl.texi. */
4936 for (i = 0; i < bitsize; i += value_bit)
4939 if (WORDS_BIG_ENDIAN)
4940 ibase = bitsize - 1 - i;
4943 *vp++ = tmp[ibase / 32] >> i % 32;
4946 /* It shouldn't matter what's done here, so fill it with
4948 for (; i < elem_bitsize; i += value_bit)
4954 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4956 for (i = 0; i < elem_bitsize; i += value_bit)
4957 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4961 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4962 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4963 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4965 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4966 >> (i - HOST_BITS_PER_WIDE_INT);
4967 for (; i < elem_bitsize; i += value_bit)
4977 /* Now, pick the right byte to start with. */
4978 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4979 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4980 will already have offset 0. */
4981 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4983 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4985 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4986 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4987 byte = (subword_byte % UNITS_PER_WORD
4988 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4991 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4992 so if it's become negative it will instead be very large.) */
4993 gcc_assert (byte < GET_MODE_SIZE (innermode));
4995 /* Convert from bytes to chunks of size value_bit. */
4996 value_start = byte * (BITS_PER_UNIT / value_bit);
4998 /* Re-pack the value. */
5000 if (VECTOR_MODE_P (outermode))
5002 num_elem = GET_MODE_NUNITS (outermode);
5003 result_v = rtvec_alloc (num_elem);
5004 elems = &RTVEC_ELT (result_v, 0);
5005 outer_submode = GET_MODE_INNER (outermode);
5011 outer_submode = outermode;
5014 outer_class = GET_MODE_CLASS (outer_submode);
5015 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5017 gcc_assert (elem_bitsize % value_bit == 0);
5018 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5020 for (elem = 0; elem < num_elem; elem++)
5024 /* Vectors are stored in target memory order. (This is probably
5027 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5028 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5030 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5031 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5032 unsigned bytele = (subword_byte % UNITS_PER_WORD
5033 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5034 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5037 switch (outer_class)
5040 case MODE_PARTIAL_INT:
5042 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5045 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5047 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5048 for (; i < elem_bitsize; i += value_bit)
5049 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5050 << (i - HOST_BITS_PER_WIDE_INT));
5052 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5054 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5055 elems[elem] = gen_int_mode (lo, outer_submode);
5056 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5057 elems[elem] = immed_double_const (lo, hi, outer_submode);
5064 case MODE_DECIMAL_FLOAT:
5067 long tmp[max_bitsize / 32];
5069 /* real_from_target wants its input in words affected by
5070 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5071 and use WORDS_BIG_ENDIAN instead; see the documentation
5072 of SUBREG in rtl.texi. */
5073 for (i = 0; i < max_bitsize / 32; i++)
5075 for (i = 0; i < elem_bitsize; i += value_bit)
5078 if (WORDS_BIG_ENDIAN)
5079 ibase = elem_bitsize - 1 - i;
5082 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5085 real_from_target (&r, tmp, outer_submode);
5086 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5098 f.mode = outer_submode;
5101 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5103 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5104 for (; i < elem_bitsize; i += value_bit)
5105 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5106 << (i - HOST_BITS_PER_WIDE_INT));
5108 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5116 if (VECTOR_MODE_P (outermode))
5117 return gen_rtx_CONST_VECTOR (outermode, result_v);
5122 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5123 Return 0 if no simplifications are possible. */
5125 simplify_subreg (enum machine_mode outermode, rtx op,
5126 enum machine_mode innermode, unsigned int byte)
5128 /* Little bit of sanity checking. */
5129 gcc_assert (innermode != VOIDmode);
5130 gcc_assert (outermode != VOIDmode);
5131 gcc_assert (innermode != BLKmode);
5132 gcc_assert (outermode != BLKmode);
5134 gcc_assert (GET_MODE (op) == innermode
5135 || GET_MODE (op) == VOIDmode);
5137 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5138 gcc_assert (byte < GET_MODE_SIZE (innermode));
5140 if (outermode == innermode && !byte)
5143 if (CONST_INT_P (op)
5144 || GET_CODE (op) == CONST_DOUBLE
5145 || GET_CODE (op) == CONST_FIXED
5146 || GET_CODE (op) == CONST_VECTOR)
5147 return simplify_immed_subreg (outermode, op, innermode, byte);
5149 /* Changing mode twice with SUBREG => just change it once,
5150 or not at all if changing back op starting mode. */
5151 if (GET_CODE (op) == SUBREG)
5153 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5154 int final_offset = byte + SUBREG_BYTE (op);
5157 if (outermode == innermostmode
5158 && byte == 0 && SUBREG_BYTE (op) == 0)
5159 return SUBREG_REG (op);
5161 /* The SUBREG_BYTE represents offset, as if the value were stored
5162 in memory. Irritating exception is paradoxical subreg, where
5163 we define SUBREG_BYTE to be 0. On big endian machines, this
5164 value should be negative. For a moment, undo this exception. */
5165 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5167 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5168 if (WORDS_BIG_ENDIAN)
5169 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5170 if (BYTES_BIG_ENDIAN)
5171 final_offset += difference % UNITS_PER_WORD;
5173 if (SUBREG_BYTE (op) == 0
5174 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5176 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5177 if (WORDS_BIG_ENDIAN)
5178 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5179 if (BYTES_BIG_ENDIAN)
5180 final_offset += difference % UNITS_PER_WORD;
5183 /* See whether resulting subreg will be paradoxical. */
5184 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5186 /* In nonparadoxical subregs we can't handle negative offsets. */
5187 if (final_offset < 0)
5189 /* Bail out in case resulting subreg would be incorrect. */
5190 if (final_offset % GET_MODE_SIZE (outermode)
5191 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5197 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5199 /* In paradoxical subreg, see if we are still looking on lower part.
5200 If so, our SUBREG_BYTE will be 0. */
5201 if (WORDS_BIG_ENDIAN)
5202 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5203 if (BYTES_BIG_ENDIAN)
5204 offset += difference % UNITS_PER_WORD;
5205 if (offset == final_offset)
5211 /* Recurse for further possible simplifications. */
5212 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5216 if (validate_subreg (outermode, innermostmode,
5217 SUBREG_REG (op), final_offset))
5219 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5220 if (SUBREG_PROMOTED_VAR_P (op)
5221 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5222 && GET_MODE_CLASS (outermode) == MODE_INT
5223 && IN_RANGE (GET_MODE_SIZE (outermode),
5224 GET_MODE_SIZE (innermode),
5225 GET_MODE_SIZE (innermostmode))
5226 && subreg_lowpart_p (newx))
5228 SUBREG_PROMOTED_VAR_P (newx) = 1;
5229 SUBREG_PROMOTED_UNSIGNED_SET
5230 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5237 /* Merge implicit and explicit truncations. */
5239 if (GET_CODE (op) == TRUNCATE
5240 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5241 && subreg_lowpart_offset (outermode, innermode) == byte)
5242 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5243 GET_MODE (XEXP (op, 0)));
5245 /* SUBREG of a hard register => just change the register number
5246 and/or mode. If the hard register is not valid in that mode,
5247 suppress this simplification. If the hard register is the stack,
5248 frame, or argument pointer, leave this as a SUBREG. */
5250 if (REG_P (op) && HARD_REGISTER_P (op))
5252 unsigned int regno, final_regno;
5255 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5256 if (HARD_REGISTER_NUM_P (final_regno))
5259 int final_offset = byte;
5261 /* Adjust offset for paradoxical subregs. */
5263 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5265 int difference = (GET_MODE_SIZE (innermode)
5266 - GET_MODE_SIZE (outermode));
5267 if (WORDS_BIG_ENDIAN)
5268 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5269 if (BYTES_BIG_ENDIAN)
5270 final_offset += difference % UNITS_PER_WORD;
5273 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5275 /* Propagate original regno. We don't have any way to specify
5276 the offset inside original regno, so do so only for lowpart.
5277 The information is used only by alias analysis that can not
5278 grog partial register anyway. */
5280 if (subreg_lowpart_offset (outermode, innermode) == byte)
5281 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5286 /* If we have a SUBREG of a register that we are replacing and we are
5287 replacing it with a MEM, make a new MEM and try replacing the
5288 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5289 or if we would be widening it. */
5292 && ! mode_dependent_address_p (XEXP (op, 0))
5293 /* Allow splitting of volatile memory references in case we don't
5294 have instruction to move the whole thing. */
5295 && (! MEM_VOLATILE_P (op)
5296 || ! have_insn_for (SET, innermode))
5297 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5298 return adjust_address_nv (op, outermode, byte);
5300 /* Handle complex values represented as CONCAT
5301 of real and imaginary part. */
5302 if (GET_CODE (op) == CONCAT)
5304 unsigned int part_size, final_offset;
5307 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5308 if (byte < part_size)
5310 part = XEXP (op, 0);
5311 final_offset = byte;
5315 part = XEXP (op, 1);
5316 final_offset = byte - part_size;
5319 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5322 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5325 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5326 return gen_rtx_SUBREG (outermode, part, final_offset);
5330 /* Optimize SUBREG truncations of zero and sign extended values. */
5331 if ((GET_CODE (op) == ZERO_EXTEND
5332 || GET_CODE (op) == SIGN_EXTEND)
5333 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5335 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5337 /* If we're requesting the lowpart of a zero or sign extension,
5338 there are three possibilities. If the outermode is the same
5339 as the origmode, we can omit both the extension and the subreg.
5340 If the outermode is not larger than the origmode, we can apply
5341 the truncation without the extension. Finally, if the outermode
5342 is larger than the origmode, but both are integer modes, we
5343 can just extend to the appropriate mode. */
5346 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5347 if (outermode == origmode)
5348 return XEXP (op, 0);
5349 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5350 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5351 subreg_lowpart_offset (outermode,
5353 if (SCALAR_INT_MODE_P (outermode))
5354 return simplify_gen_unary (GET_CODE (op), outermode,
5355 XEXP (op, 0), origmode);
5358 /* A SUBREG resulting from a zero extension may fold to zero if
5359 it extracts higher bits that the ZERO_EXTEND's source bits. */
5360 if (GET_CODE (op) == ZERO_EXTEND
5361 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5362 return CONST0_RTX (outermode);
5365 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5366 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5367 the outer subreg is effectively a truncation to the original mode. */
5368 if ((GET_CODE (op) == LSHIFTRT
5369 || GET_CODE (op) == ASHIFTRT)
5370 && SCALAR_INT_MODE_P (outermode)
5371 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5372 to avoid the possibility that an outer LSHIFTRT shifts by more
5373 than the sign extension's sign_bit_copies and introduces zeros
5374 into the high bits of the result. */
5375 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5376 && CONST_INT_P (XEXP (op, 1))
5377 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5378 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5379 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5380 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5381 return simplify_gen_binary (ASHIFTRT, outermode,
5382 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5384 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5385 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5386 the outer subreg is effectively a truncation to the original mode. */
5387 if ((GET_CODE (op) == LSHIFTRT
5388 || GET_CODE (op) == ASHIFTRT)
5389 && SCALAR_INT_MODE_P (outermode)
5390 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5391 && CONST_INT_P (XEXP (op, 1))
5392 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5393 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5394 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5395 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5396 return simplify_gen_binary (LSHIFTRT, outermode,
5397 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5399 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5400 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5401 the outer subreg is effectively a truncation to the original mode. */
5402 if (GET_CODE (op) == ASHIFT
5403 && SCALAR_INT_MODE_P (outermode)
5404 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5405 && CONST_INT_P (XEXP (op, 1))
5406 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5407 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5408 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5409 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5410 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5411 return simplify_gen_binary (ASHIFT, outermode,
5412 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5414 /* Recognize a word extraction from a multi-word subreg. */
5415 if ((GET_CODE (op) == LSHIFTRT
5416 || GET_CODE (op) == ASHIFTRT)
5417 && SCALAR_INT_MODE_P (outermode)
5418 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5419 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5420 && CONST_INT_P (XEXP (op, 1))
5421 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5422 && INTVAL (XEXP (op, 1)) >= 0
5423 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5424 && byte == subreg_lowpart_offset (outermode, innermode))
5426 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5427 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5429 ? byte - shifted_bytes
5430 : byte + shifted_bytes));
5436 /* Make a SUBREG operation or equivalent if it folds. */
5439 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5440 enum machine_mode innermode, unsigned int byte)
5444 newx = simplify_subreg (outermode, op, innermode, byte);
5448 if (GET_CODE (op) == SUBREG
5449 || GET_CODE (op) == CONCAT
5450 || GET_MODE (op) == VOIDmode)
5453 if (validate_subreg (outermode, innermode, op, byte))
5454 return gen_rtx_SUBREG (outermode, op, byte);
5459 /* Simplify X, an rtx expression.
5461 Return the simplified expression or NULL if no simplifications
5464 This is the preferred entry point into the simplification routines;
5465 however, we still allow passes to call the more specific routines.
5467 Right now GCC has three (yes, three) major bodies of RTL simplification
5468 code that need to be unified.
5470 1. fold_rtx in cse.c. This code uses various CSE specific
5471 information to aid in RTL simplification.
5473 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5474 it uses combine specific information to aid in RTL
5477 3. The routines in this file.
5480 Long term we want to only have one body of simplification code; to
5481 get to that state I recommend the following steps:
5483 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5484 which are not pass dependent state into these routines.
5486 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5487 use this routine whenever possible.
5489 3. Allow for pass dependent state to be provided to these
5490 routines and add simplifications based on the pass dependent
5491 state. Remove code from cse.c & combine.c that becomes
5494 It will take time, but ultimately the compiler will be easier to
5495 maintain and improve. It's totally silly that when we add a
5496 simplification that it needs to be added to 4 places (3 for RTL
5497 simplification and 1 for tree simplification. */
5500 simplify_rtx (const_rtx x)
5502 const enum rtx_code code = GET_CODE (x);
5503 const enum machine_mode mode = GET_MODE (x);
5505 switch (GET_RTX_CLASS (code))
5508 return simplify_unary_operation (code, mode,
5509 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5510 case RTX_COMM_ARITH:
5511 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5512 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5514 /* Fall through.... */
5517 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5520 case RTX_BITFIELD_OPS:
5521 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5522 XEXP (x, 0), XEXP (x, 1),
5526 case RTX_COMM_COMPARE:
5527 return simplify_relational_operation (code, mode,
5528 ((GET_MODE (XEXP (x, 0))
5530 ? GET_MODE (XEXP (x, 0))
5531 : GET_MODE (XEXP (x, 1))),
5537 return simplify_subreg (mode, SUBREG_REG (x),
5538 GET_MODE (SUBREG_REG (x)),
5545 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5546 if (GET_CODE (XEXP (x, 0)) == HIGH
5547 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))