1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
231 case ARRAY_RANGE_REF:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
250 offset += bitpos / BITS_PER_UNIT;
252 offset += TREE_INT_CST_LOW (toffset);
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* Replace all occurrences of OLD_RTX in X with FN (X', DATA), where X'
354 is an expression in X that is equal to OLD_RTX. Canonicalize and
357 If FN is null, assume FN (X', DATA) == copy_rtx (DATA). */
360 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
361 rtx (*fn) (rtx, void *), void *data)
363 enum rtx_code code = GET_CODE (x);
364 enum machine_mode mode = GET_MODE (x);
365 enum machine_mode op_mode;
368 /* If X is OLD_RTX, return FN (X, DATA), with a null FN. Otherwise,
369 if this is an expression, try to build a new expression, substituting
370 recursively. If we can't do anything, return our input. */
372 if (rtx_equal_p (x, old_rtx))
377 return copy_rtx ((rtx) data);
380 switch (GET_RTX_CLASS (code))
384 op_mode = GET_MODE (op0);
385 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
386 if (op0 == XEXP (x, 0))
388 return simplify_gen_unary (code, mode, op0, op_mode);
392 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
393 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
394 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
396 return simplify_gen_binary (code, mode, op0, op1);
399 case RTX_COMM_COMPARE:
402 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
403 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
404 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
405 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
407 return simplify_gen_relational (code, mode, op_mode, op0, op1);
410 case RTX_BITFIELD_OPS:
412 op_mode = GET_MODE (op0);
413 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
414 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
415 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
416 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
418 if (op_mode == VOIDmode)
419 op_mode = GET_MODE (op0);
420 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
423 /* The only case we try to handle is a SUBREG. */
426 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
427 if (op0 == SUBREG_REG (x))
429 op0 = simplify_gen_subreg (GET_MODE (x), op0,
430 GET_MODE (SUBREG_REG (x)),
432 return op0 ? op0 : x;
439 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
442 return replace_equiv_address_nv (x, op0);
444 else if (code == LO_SUM)
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
449 /* (lo_sum (high x) x) -> x */
450 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
453 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
455 return gen_rtx_LO_SUM (mode, op0, op1);
465 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
466 resulting RTX. Return a new RTX which is as simplified as possible. */
469 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
471 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
474 /* Try to simplify a unary operation CODE whose output mode is to be
475 MODE with input operand OP whose mode was originally OP_MODE.
476 Return zero if no simplification can be made. */
478 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
479 rtx op, enum machine_mode op_mode)
483 trueop = avoid_constant_pool_reference (op);
485 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
489 return simplify_unary_operation_1 (code, mode, op);
492 /* Perform some simplifications we can do even if the operands
495 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
497 enum rtx_code reversed;
503 /* (not (not X)) == X. */
504 if (GET_CODE (op) == NOT)
507 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
508 comparison is all ones. */
509 if (COMPARISON_P (op)
510 && (mode == BImode || STORE_FLAG_VALUE == -1)
511 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
512 return simplify_gen_relational (reversed, mode, VOIDmode,
513 XEXP (op, 0), XEXP (op, 1));
515 /* (not (plus X -1)) can become (neg X). */
516 if (GET_CODE (op) == PLUS
517 && XEXP (op, 1) == constm1_rtx)
518 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
520 /* Similarly, (not (neg X)) is (plus X -1). */
521 if (GET_CODE (op) == NEG)
522 return plus_constant (XEXP (op, 0), -1);
524 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
525 if (GET_CODE (op) == XOR
526 && CONST_INT_P (XEXP (op, 1))
527 && (temp = simplify_unary_operation (NOT, mode,
528 XEXP (op, 1), mode)) != 0)
529 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
531 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
532 if (GET_CODE (op) == PLUS
533 && CONST_INT_P (XEXP (op, 1))
534 && mode_signbit_p (mode, XEXP (op, 1))
535 && (temp = simplify_unary_operation (NOT, mode,
536 XEXP (op, 1), mode)) != 0)
537 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
540 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
541 operands other than 1, but that is not valid. We could do a
542 similar simplification for (not (lshiftrt C X)) where C is
543 just the sign bit, but this doesn't seem common enough to
545 if (GET_CODE (op) == ASHIFT
546 && XEXP (op, 0) == const1_rtx)
548 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
549 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
552 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
553 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
554 so we can perform the above simplification. */
556 if (STORE_FLAG_VALUE == -1
557 && GET_CODE (op) == ASHIFTRT
558 && GET_CODE (XEXP (op, 1))
559 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
560 return simplify_gen_relational (GE, mode, VOIDmode,
561 XEXP (op, 0), const0_rtx);
564 if (GET_CODE (op) == SUBREG
565 && subreg_lowpart_p (op)
566 && (GET_MODE_SIZE (GET_MODE (op))
567 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
568 && GET_CODE (SUBREG_REG (op)) == ASHIFT
569 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
571 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
574 x = gen_rtx_ROTATE (inner_mode,
575 simplify_gen_unary (NOT, inner_mode, const1_rtx,
577 XEXP (SUBREG_REG (op), 1));
578 return rtl_hooks.gen_lowpart_no_emit (mode, x);
581 /* Apply De Morgan's laws to reduce number of patterns for machines
582 with negating logical insns (and-not, nand, etc.). If result has
583 only one NOT, put it first, since that is how the patterns are
586 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
588 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
589 enum machine_mode op_mode;
591 op_mode = GET_MODE (in1);
592 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
594 op_mode = GET_MODE (in2);
595 if (op_mode == VOIDmode)
597 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
599 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
602 in2 = in1; in1 = tem;
605 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
611 /* (neg (neg X)) == X. */
612 if (GET_CODE (op) == NEG)
615 /* (neg (plus X 1)) can become (not X). */
616 if (GET_CODE (op) == PLUS
617 && XEXP (op, 1) == const1_rtx)
618 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
620 /* Similarly, (neg (not X)) is (plus X 1). */
621 if (GET_CODE (op) == NOT)
622 return plus_constant (XEXP (op, 0), 1);
624 /* (neg (minus X Y)) can become (minus Y X). This transformation
625 isn't safe for modes with signed zeros, since if X and Y are
626 both +0, (minus Y X) is the same as (minus X Y). If the
627 rounding mode is towards +infinity (or -infinity) then the two
628 expressions will be rounded differently. */
629 if (GET_CODE (op) == MINUS
630 && !HONOR_SIGNED_ZEROS (mode)
631 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
632 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
634 if (GET_CODE (op) == PLUS
635 && !HONOR_SIGNED_ZEROS (mode)
636 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
638 /* (neg (plus A C)) is simplified to (minus -C A). */
639 if (CONST_INT_P (XEXP (op, 1))
640 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
642 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
644 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
647 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
648 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
649 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
652 /* (neg (mult A B)) becomes (mult (neg A) B).
653 This works even for floating-point values. */
654 if (GET_CODE (op) == MULT
655 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
657 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
658 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
661 /* NEG commutes with ASHIFT since it is multiplication. Only do
662 this if we can then eliminate the NEG (e.g., if the operand
664 if (GET_CODE (op) == ASHIFT)
666 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
668 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
671 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
672 C is equal to the width of MODE minus 1. */
673 if (GET_CODE (op) == ASHIFTRT
674 && CONST_INT_P (XEXP (op, 1))
675 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
676 return simplify_gen_binary (LSHIFTRT, mode,
677 XEXP (op, 0), XEXP (op, 1));
679 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
680 C is equal to the width of MODE minus 1. */
681 if (GET_CODE (op) == LSHIFTRT
682 && CONST_INT_P (XEXP (op, 1))
683 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
684 return simplify_gen_binary (ASHIFTRT, mode,
685 XEXP (op, 0), XEXP (op, 1));
687 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
688 if (GET_CODE (op) == XOR
689 && XEXP (op, 1) == const1_rtx
690 && nonzero_bits (XEXP (op, 0), mode) == 1)
691 return plus_constant (XEXP (op, 0), -1);
693 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
694 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
695 if (GET_CODE (op) == LT
696 && XEXP (op, 1) == const0_rtx
697 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
699 enum machine_mode inner = GET_MODE (XEXP (op, 0));
700 int isize = GET_MODE_BITSIZE (inner);
701 if (STORE_FLAG_VALUE == 1)
703 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
704 GEN_INT (isize - 1));
707 if (GET_MODE_BITSIZE (mode) > isize)
708 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
709 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
711 else if (STORE_FLAG_VALUE == -1)
713 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
714 GEN_INT (isize - 1));
717 if (GET_MODE_BITSIZE (mode) > isize)
718 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
719 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
725 /* We can't handle truncation to a partial integer mode here
726 because we don't know the real bitsize of the partial
728 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
731 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
732 if ((GET_CODE (op) == SIGN_EXTEND
733 || GET_CODE (op) == ZERO_EXTEND)
734 && GET_MODE (XEXP (op, 0)) == mode)
737 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
738 (OP:SI foo:SI) if OP is NEG or ABS. */
739 if ((GET_CODE (op) == ABS
740 || GET_CODE (op) == NEG)
741 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
742 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
743 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
744 return simplify_gen_unary (GET_CODE (op), mode,
745 XEXP (XEXP (op, 0), 0), mode);
747 /* (truncate:A (subreg:B (truncate:C X) 0)) is
749 if (GET_CODE (op) == SUBREG
750 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
751 && subreg_lowpart_p (op))
752 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
753 GET_MODE (XEXP (SUBREG_REG (op), 0)));
755 /* If we know that the value is already truncated, we can
756 replace the TRUNCATE with a SUBREG. Note that this is also
757 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
758 modes we just have to apply a different definition for
759 truncation. But don't do this for an (LSHIFTRT (MULT ...))
760 since this will cause problems with the umulXi3_highpart
762 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
763 GET_MODE_BITSIZE (GET_MODE (op)))
764 ? (num_sign_bit_copies (op, GET_MODE (op))
765 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
766 - GET_MODE_BITSIZE (mode)))
767 : truncated_to_mode (mode, op))
768 && ! (GET_CODE (op) == LSHIFTRT
769 && GET_CODE (XEXP (op, 0)) == MULT))
770 return rtl_hooks.gen_lowpart_no_emit (mode, op);
772 /* A truncate of a comparison can be replaced with a subreg if
773 STORE_FLAG_VALUE permits. This is like the previous test,
774 but it works even if the comparison is done in a mode larger
775 than HOST_BITS_PER_WIDE_INT. */
776 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
778 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
779 return rtl_hooks.gen_lowpart_no_emit (mode, op);
783 if (DECIMAL_FLOAT_MODE_P (mode))
786 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
787 if (GET_CODE (op) == FLOAT_EXTEND
788 && GET_MODE (XEXP (op, 0)) == mode)
791 /* (float_truncate:SF (float_truncate:DF foo:XF))
792 = (float_truncate:SF foo:XF).
793 This may eliminate double rounding, so it is unsafe.
795 (float_truncate:SF (float_extend:XF foo:DF))
796 = (float_truncate:SF foo:DF).
798 (float_truncate:DF (float_extend:XF foo:SF))
799 = (float_extend:SF foo:DF). */
800 if ((GET_CODE (op) == FLOAT_TRUNCATE
801 && flag_unsafe_math_optimizations)
802 || GET_CODE (op) == FLOAT_EXTEND)
803 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
805 > GET_MODE_SIZE (mode)
806 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
810 /* (float_truncate (float x)) is (float x) */
811 if (GET_CODE (op) == FLOAT
812 && (flag_unsafe_math_optimizations
813 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
814 && ((unsigned)significand_size (GET_MODE (op))
815 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
816 - num_sign_bit_copies (XEXP (op, 0),
817 GET_MODE (XEXP (op, 0))))))))
818 return simplify_gen_unary (FLOAT, mode,
820 GET_MODE (XEXP (op, 0)));
822 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
823 (OP:SF foo:SF) if OP is NEG or ABS. */
824 if ((GET_CODE (op) == ABS
825 || GET_CODE (op) == NEG)
826 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
827 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
828 return simplify_gen_unary (GET_CODE (op), mode,
829 XEXP (XEXP (op, 0), 0), mode);
831 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
832 is (float_truncate:SF x). */
833 if (GET_CODE (op) == SUBREG
834 && subreg_lowpart_p (op)
835 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
836 return SUBREG_REG (op);
840 if (DECIMAL_FLOAT_MODE_P (mode))
843 /* (float_extend (float_extend x)) is (float_extend x)
845 (float_extend (float x)) is (float x) assuming that double
846 rounding can't happen.
848 if (GET_CODE (op) == FLOAT_EXTEND
849 || (GET_CODE (op) == FLOAT
850 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0)))))))
855 return simplify_gen_unary (GET_CODE (op), mode,
857 GET_MODE (XEXP (op, 0)));
862 /* (abs (neg <foo>)) -> (abs <foo>) */
863 if (GET_CODE (op) == NEG)
864 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
865 GET_MODE (XEXP (op, 0)));
867 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
869 if (GET_MODE (op) == VOIDmode)
872 /* If operand is something known to be positive, ignore the ABS. */
873 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
874 || ((GET_MODE_BITSIZE (GET_MODE (op))
875 <= HOST_BITS_PER_WIDE_INT)
876 && ((nonzero_bits (op, GET_MODE (op))
878 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
882 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
883 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
884 return gen_rtx_NEG (mode, op);
889 /* (ffs (*_extend <X>)) = (ffs <X>) */
890 if (GET_CODE (op) == SIGN_EXTEND
891 || GET_CODE (op) == ZERO_EXTEND)
892 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
893 GET_MODE (XEXP (op, 0)));
897 switch (GET_CODE (op))
901 /* (popcount (zero_extend <X>)) = (popcount <X>) */
902 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
903 GET_MODE (XEXP (op, 0)));
907 /* Rotations don't affect popcount. */
908 if (!side_effects_p (XEXP (op, 1)))
909 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
910 GET_MODE (XEXP (op, 0)));
919 switch (GET_CODE (op))
925 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
926 GET_MODE (XEXP (op, 0)));
930 /* Rotations don't affect parity. */
931 if (!side_effects_p (XEXP (op, 1)))
932 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
933 GET_MODE (XEXP (op, 0)));
942 /* (bswap (bswap x)) -> x. */
943 if (GET_CODE (op) == BSWAP)
948 /* (float (sign_extend <X>)) = (float <X>). */
949 if (GET_CODE (op) == SIGN_EXTEND)
950 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
951 GET_MODE (XEXP (op, 0)));
955 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
956 becomes just the MINUS if its mode is MODE. This allows
957 folding switch statements on machines using casesi (such as
959 if (GET_CODE (op) == TRUNCATE
960 && GET_MODE (XEXP (op, 0)) == mode
961 && GET_CODE (XEXP (op, 0)) == MINUS
962 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
963 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
966 /* Check for a sign extension of a subreg of a promoted
967 variable, where the promotion is sign-extended, and the
968 target mode is the same as the variable's promotion. */
969 if (GET_CODE (op) == SUBREG
970 && SUBREG_PROMOTED_VAR_P (op)
971 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
972 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
973 return rtl_hooks.gen_lowpart_no_emit (mode, op);
975 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
976 if (! POINTERS_EXTEND_UNSIGNED
977 && mode == Pmode && GET_MODE (op) == ptr_mode
979 || (GET_CODE (op) == SUBREG
980 && REG_P (SUBREG_REG (op))
981 && REG_POINTER (SUBREG_REG (op))
982 && GET_MODE (SUBREG_REG (op)) == Pmode)))
983 return convert_memory_address (Pmode, op);
988 /* Check for a zero extension of a subreg of a promoted
989 variable, where the promotion is zero-extended, and the
990 target mode is the same as the variable's promotion. */
991 if (GET_CODE (op) == SUBREG
992 && SUBREG_PROMOTED_VAR_P (op)
993 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
994 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
995 return rtl_hooks.gen_lowpart_no_emit (mode, op);
997 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
998 if (POINTERS_EXTEND_UNSIGNED > 0
999 && mode == Pmode && GET_MODE (op) == ptr_mode
1001 || (GET_CODE (op) == SUBREG
1002 && REG_P (SUBREG_REG (op))
1003 && REG_POINTER (SUBREG_REG (op))
1004 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1005 return convert_memory_address (Pmode, op);
1016 /* Try to compute the value of a unary operation CODE whose output mode is to
1017 be MODE with input operand OP whose mode was originally OP_MODE.
1018 Return zero if the value cannot be computed. */
1020 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1021 rtx op, enum machine_mode op_mode)
1023 unsigned int width = GET_MODE_BITSIZE (mode);
1025 if (code == VEC_DUPLICATE)
1027 gcc_assert (VECTOR_MODE_P (mode));
1028 if (GET_MODE (op) != VOIDmode)
1030 if (!VECTOR_MODE_P (GET_MODE (op)))
1031 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1033 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1036 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1037 || GET_CODE (op) == CONST_VECTOR)
1039 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1040 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1041 rtvec v = rtvec_alloc (n_elts);
1044 if (GET_CODE (op) != CONST_VECTOR)
1045 for (i = 0; i < n_elts; i++)
1046 RTVEC_ELT (v, i) = op;
1049 enum machine_mode inmode = GET_MODE (op);
1050 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1051 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1053 gcc_assert (in_n_elts < n_elts);
1054 gcc_assert ((n_elts % in_n_elts) == 0);
1055 for (i = 0; i < n_elts; i++)
1056 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1058 return gen_rtx_CONST_VECTOR (mode, v);
1062 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1064 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1065 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1066 enum machine_mode opmode = GET_MODE (op);
1067 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1068 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1069 rtvec v = rtvec_alloc (n_elts);
1072 gcc_assert (op_n_elts == n_elts);
1073 for (i = 0; i < n_elts; i++)
1075 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1076 CONST_VECTOR_ELT (op, i),
1077 GET_MODE_INNER (opmode));
1080 RTVEC_ELT (v, i) = x;
1082 return gen_rtx_CONST_VECTOR (mode, v);
1085 /* The order of these tests is critical so that, for example, we don't
1086 check the wrong mode (input vs. output) for a conversion operation,
1087 such as FIX. At some point, this should be simplified. */
1089 if (code == FLOAT && GET_MODE (op) == VOIDmode
1090 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1092 HOST_WIDE_INT hv, lv;
1095 if (CONST_INT_P (op))
1096 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1098 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1100 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1101 d = real_value_truncate (mode, d);
1102 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1104 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1105 && (GET_CODE (op) == CONST_DOUBLE
1106 || CONST_INT_P (op)))
1108 HOST_WIDE_INT hv, lv;
1111 if (CONST_INT_P (op))
1112 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1114 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1116 if (op_mode == VOIDmode)
1118 /* We don't know how to interpret negative-looking numbers in
1119 this case, so don't try to fold those. */
1123 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1126 hv = 0, lv &= GET_MODE_MASK (op_mode);
1128 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1129 d = real_value_truncate (mode, d);
1130 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1133 if (CONST_INT_P (op)
1134 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1136 HOST_WIDE_INT arg0 = INTVAL (op);
1150 val = (arg0 >= 0 ? arg0 : - arg0);
1154 /* Don't use ffs here. Instead, get low order bit and then its
1155 number. If arg0 is zero, this will return 0, as desired. */
1156 arg0 &= GET_MODE_MASK (mode);
1157 val = exact_log2 (arg0 & (- arg0)) + 1;
1161 arg0 &= GET_MODE_MASK (mode);
1162 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1165 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1169 arg0 &= GET_MODE_MASK (mode);
1172 /* Even if the value at zero is undefined, we have to come
1173 up with some replacement. Seems good enough. */
1174 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1175 val = GET_MODE_BITSIZE (mode);
1178 val = exact_log2 (arg0 & -arg0);
1182 arg0 &= GET_MODE_MASK (mode);
1185 val++, arg0 &= arg0 - 1;
1189 arg0 &= GET_MODE_MASK (mode);
1192 val++, arg0 &= arg0 - 1;
1201 for (s = 0; s < width; s += 8)
1203 unsigned int d = width - s - 8;
1204 unsigned HOST_WIDE_INT byte;
1205 byte = (arg0 >> s) & 0xff;
1216 /* When zero-extending a CONST_INT, we need to know its
1218 gcc_assert (op_mode != VOIDmode);
1219 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1221 /* If we were really extending the mode,
1222 we would have to distinguish between zero-extension
1223 and sign-extension. */
1224 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1227 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1228 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1234 if (op_mode == VOIDmode)
1236 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1238 /* If we were really extending the mode,
1239 we would have to distinguish between zero-extension
1240 and sign-extension. */
1241 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1244 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1247 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1249 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1250 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1258 case FLOAT_TRUNCATE:
1270 return gen_int_mode (val, mode);
1273 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1274 for a DImode operation on a CONST_INT. */
1275 else if (GET_MODE (op) == VOIDmode
1276 && width <= HOST_BITS_PER_WIDE_INT * 2
1277 && (GET_CODE (op) == CONST_DOUBLE
1278 || CONST_INT_P (op)))
1280 unsigned HOST_WIDE_INT l1, lv;
1281 HOST_WIDE_INT h1, hv;
1283 if (GET_CODE (op) == CONST_DOUBLE)
1284 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1286 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1296 neg_double (l1, h1, &lv, &hv);
1301 neg_double (l1, h1, &lv, &hv);
1313 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1316 lv = exact_log2 (l1 & -l1) + 1;
1322 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1323 - HOST_BITS_PER_WIDE_INT;
1325 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1326 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1327 lv = GET_MODE_BITSIZE (mode);
1333 lv = exact_log2 (l1 & -l1);
1335 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1336 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1337 lv = GET_MODE_BITSIZE (mode);
1365 for (s = 0; s < width; s += 8)
1367 unsigned int d = width - s - 8;
1368 unsigned HOST_WIDE_INT byte;
1370 if (s < HOST_BITS_PER_WIDE_INT)
1371 byte = (l1 >> s) & 0xff;
1373 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1375 if (d < HOST_BITS_PER_WIDE_INT)
1378 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1384 /* This is just a change-of-mode, so do nothing. */
1389 gcc_assert (op_mode != VOIDmode);
1391 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1395 lv = l1 & GET_MODE_MASK (op_mode);
1399 if (op_mode == VOIDmode
1400 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1404 lv = l1 & GET_MODE_MASK (op_mode);
1405 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1406 && (lv & ((HOST_WIDE_INT) 1
1407 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1408 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1410 hv = HWI_SIGN_EXTEND (lv);
1421 return immed_double_const (lv, hv, mode);
1424 else if (GET_CODE (op) == CONST_DOUBLE
1425 && SCALAR_FLOAT_MODE_P (mode))
1427 REAL_VALUE_TYPE d, t;
1428 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1433 if (HONOR_SNANS (mode) && real_isnan (&d))
1435 real_sqrt (&t, mode, &d);
1439 d = REAL_VALUE_ABS (d);
1442 d = REAL_VALUE_NEGATE (d);
1444 case FLOAT_TRUNCATE:
1445 d = real_value_truncate (mode, d);
1448 /* All this does is change the mode. */
1451 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1458 real_to_target (tmp, &d, GET_MODE (op));
1459 for (i = 0; i < 4; i++)
1461 real_from_target (&d, tmp, mode);
1467 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1470 else if (GET_CODE (op) == CONST_DOUBLE
1471 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1472 && GET_MODE_CLASS (mode) == MODE_INT
1473 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1475 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1476 operators are intentionally left unspecified (to ease implementation
1477 by target backends), for consistency, this routine implements the
1478 same semantics for constant folding as used by the middle-end. */
1480 /* This was formerly used only for non-IEEE float.
1481 eggert@twinsun.com says it is safe for IEEE also. */
1482 HOST_WIDE_INT xh, xl, th, tl;
1483 REAL_VALUE_TYPE x, t;
1484 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1488 if (REAL_VALUE_ISNAN (x))
1491 /* Test against the signed upper bound. */
1492 if (width > HOST_BITS_PER_WIDE_INT)
1494 th = ((unsigned HOST_WIDE_INT) 1
1495 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1501 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1503 real_from_integer (&t, VOIDmode, tl, th, 0);
1504 if (REAL_VALUES_LESS (t, x))
1511 /* Test against the signed lower bound. */
1512 if (width > HOST_BITS_PER_WIDE_INT)
1514 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1520 tl = (HOST_WIDE_INT) -1 << (width - 1);
1522 real_from_integer (&t, VOIDmode, tl, th, 0);
1523 if (REAL_VALUES_LESS (x, t))
1529 REAL_VALUE_TO_INT (&xl, &xh, x);
1533 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1536 /* Test against the unsigned upper bound. */
1537 if (width == 2*HOST_BITS_PER_WIDE_INT)
1542 else if (width >= HOST_BITS_PER_WIDE_INT)
1544 th = ((unsigned HOST_WIDE_INT) 1
1545 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1551 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1553 real_from_integer (&t, VOIDmode, tl, th, 1);
1554 if (REAL_VALUES_LESS (t, x))
1561 REAL_VALUE_TO_INT (&xl, &xh, x);
1567 return immed_double_const (xl, xh, mode);
1573 /* Subroutine of simplify_binary_operation to simplify a commutative,
1574 associative binary operation CODE with result mode MODE, operating
1575 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1576 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1577 canonicalization is possible. */
1580 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1585 /* Linearize the operator to the left. */
1586 if (GET_CODE (op1) == code)
1588 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1589 if (GET_CODE (op0) == code)
1591 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1592 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1595 /* "a op (b op c)" becomes "(b op c) op a". */
1596 if (! swap_commutative_operands_p (op1, op0))
1597 return simplify_gen_binary (code, mode, op1, op0);
1604 if (GET_CODE (op0) == code)
1606 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1607 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1609 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1610 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1613 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1614 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1616 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1618 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1619 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1621 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1628 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1629 and OP1. Return 0 if no simplification is possible.
1631 Don't use this for relational operations such as EQ or LT.
1632 Use simplify_relational_operation instead. */
1634 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1637 rtx trueop0, trueop1;
1640 /* Relational operations don't work here. We must know the mode
1641 of the operands in order to do the comparison correctly.
1642 Assuming a full word can give incorrect results.
1643 Consider comparing 128 with -128 in QImode. */
1644 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1645 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1647 /* Make sure the constant is second. */
1648 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1649 && swap_commutative_operands_p (op0, op1))
1651 tem = op0, op0 = op1, op1 = tem;
1654 trueop0 = avoid_constant_pool_reference (op0);
1655 trueop1 = avoid_constant_pool_reference (op1);
1657 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1660 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1663 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1664 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1665 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1666 actual constants. */
1669 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1670 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1672 rtx tem, reversed, opleft, opright;
1674 unsigned int width = GET_MODE_BITSIZE (mode);
1676 /* Even if we can't compute a constant result,
1677 there are some cases worth simplifying. */
1682 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1683 when x is NaN, infinite, or finite and nonzero. They aren't
1684 when x is -0 and the rounding mode is not towards -infinity,
1685 since (-0) + 0 is then 0. */
1686 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1689 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1690 transformations are safe even for IEEE. */
1691 if (GET_CODE (op0) == NEG)
1692 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1693 else if (GET_CODE (op1) == NEG)
1694 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1696 /* (~a) + 1 -> -a */
1697 if (INTEGRAL_MODE_P (mode)
1698 && GET_CODE (op0) == NOT
1699 && trueop1 == const1_rtx)
1700 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1702 /* Handle both-operands-constant cases. We can only add
1703 CONST_INTs to constants since the sum of relocatable symbols
1704 can't be handled by most assemblers. Don't add CONST_INT
1705 to CONST_INT since overflow won't be computed properly if wider
1706 than HOST_BITS_PER_WIDE_INT. */
1708 if ((GET_CODE (op0) == CONST
1709 || GET_CODE (op0) == SYMBOL_REF
1710 || GET_CODE (op0) == LABEL_REF)
1711 && CONST_INT_P (op1))
1712 return plus_constant (op0, INTVAL (op1));
1713 else if ((GET_CODE (op1) == CONST
1714 || GET_CODE (op1) == SYMBOL_REF
1715 || GET_CODE (op1) == LABEL_REF)
1716 && CONST_INT_P (op0))
1717 return plus_constant (op1, INTVAL (op0));
1719 /* See if this is something like X * C - X or vice versa or
1720 if the multiplication is written as a shift. If so, we can
1721 distribute and make a new multiply, shift, or maybe just
1722 have X (if C is 2 in the example above). But don't make
1723 something more expensive than we had before. */
1725 if (SCALAR_INT_MODE_P (mode))
1727 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1728 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1729 rtx lhs = op0, rhs = op1;
1731 if (GET_CODE (lhs) == NEG)
1735 lhs = XEXP (lhs, 0);
1737 else if (GET_CODE (lhs) == MULT
1738 && CONST_INT_P (XEXP (lhs, 1)))
1740 coeff0l = INTVAL (XEXP (lhs, 1));
1741 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1742 lhs = XEXP (lhs, 0);
1744 else if (GET_CODE (lhs) == ASHIFT
1745 && CONST_INT_P (XEXP (lhs, 1))
1746 && INTVAL (XEXP (lhs, 1)) >= 0
1747 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1749 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1751 lhs = XEXP (lhs, 0);
1754 if (GET_CODE (rhs) == NEG)
1758 rhs = XEXP (rhs, 0);
1760 else if (GET_CODE (rhs) == MULT
1761 && CONST_INT_P (XEXP (rhs, 1)))
1763 coeff1l = INTVAL (XEXP (rhs, 1));
1764 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1765 rhs = XEXP (rhs, 0);
1767 else if (GET_CODE (rhs) == ASHIFT
1768 && CONST_INT_P (XEXP (rhs, 1))
1769 && INTVAL (XEXP (rhs, 1)) >= 0
1770 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1772 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1774 rhs = XEXP (rhs, 0);
1777 if (rtx_equal_p (lhs, rhs))
1779 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1781 unsigned HOST_WIDE_INT l;
1783 bool speed = optimize_function_for_speed_p (cfun);
1785 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1786 coeff = immed_double_const (l, h, mode);
1788 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1789 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1794 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1795 if ((CONST_INT_P (op1)
1796 || GET_CODE (op1) == CONST_DOUBLE)
1797 && GET_CODE (op0) == XOR
1798 && (CONST_INT_P (XEXP (op0, 1))
1799 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1800 && mode_signbit_p (mode, op1))
1801 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1802 simplify_gen_binary (XOR, mode, op1,
1805 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1806 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1807 && GET_CODE (op0) == MULT
1808 && GET_CODE (XEXP (op0, 0)) == NEG)
1812 in1 = XEXP (XEXP (op0, 0), 0);
1813 in2 = XEXP (op0, 1);
1814 return simplify_gen_binary (MINUS, mode, op1,
1815 simplify_gen_binary (MULT, mode,
1819 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1820 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1822 if (COMPARISON_P (op0)
1823 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1824 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1825 && (reversed = reversed_comparison (op0, mode)))
1827 simplify_gen_unary (NEG, mode, reversed, mode);
1829 /* If one of the operands is a PLUS or a MINUS, see if we can
1830 simplify this by the associative law.
1831 Don't use the associative law for floating point.
1832 The inaccuracy makes it nonassociative,
1833 and subtle programs can break if operations are associated. */
1835 if (INTEGRAL_MODE_P (mode)
1836 && (plus_minus_operand_p (op0)
1837 || plus_minus_operand_p (op1))
1838 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1841 /* Reassociate floating point addition only when the user
1842 specifies associative math operations. */
1843 if (FLOAT_MODE_P (mode)
1844 && flag_associative_math)
1846 tem = simplify_associative_operation (code, mode, op0, op1);
1853 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1854 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1855 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1856 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1858 rtx xop00 = XEXP (op0, 0);
1859 rtx xop10 = XEXP (op1, 0);
1862 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1864 if (REG_P (xop00) && REG_P (xop10)
1865 && GET_MODE (xop00) == GET_MODE (xop10)
1866 && REGNO (xop00) == REGNO (xop10)
1867 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1868 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1875 /* We can't assume x-x is 0 even with non-IEEE floating point,
1876 but since it is zero except in very strange circumstances, we
1877 will treat it as zero with -ffinite-math-only. */
1878 if (rtx_equal_p (trueop0, trueop1)
1879 && ! side_effects_p (op0)
1880 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1881 return CONST0_RTX (mode);
1883 /* Change subtraction from zero into negation. (0 - x) is the
1884 same as -x when x is NaN, infinite, or finite and nonzero.
1885 But if the mode has signed zeros, and does not round towards
1886 -infinity, then 0 - 0 is 0, not -0. */
1887 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1888 return simplify_gen_unary (NEG, mode, op1, mode);
1890 /* (-1 - a) is ~a. */
1891 if (trueop0 == constm1_rtx)
1892 return simplify_gen_unary (NOT, mode, op1, mode);
1894 /* Subtracting 0 has no effect unless the mode has signed zeros
1895 and supports rounding towards -infinity. In such a case,
1897 if (!(HONOR_SIGNED_ZEROS (mode)
1898 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1899 && trueop1 == CONST0_RTX (mode))
1902 /* See if this is something like X * C - X or vice versa or
1903 if the multiplication is written as a shift. If so, we can
1904 distribute and make a new multiply, shift, or maybe just
1905 have X (if C is 2 in the example above). But don't make
1906 something more expensive than we had before. */
1908 if (SCALAR_INT_MODE_P (mode))
1910 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1911 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1912 rtx lhs = op0, rhs = op1;
1914 if (GET_CODE (lhs) == NEG)
1918 lhs = XEXP (lhs, 0);
1920 else if (GET_CODE (lhs) == MULT
1921 && CONST_INT_P (XEXP (lhs, 1)))
1923 coeff0l = INTVAL (XEXP (lhs, 1));
1924 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1925 lhs = XEXP (lhs, 0);
1927 else if (GET_CODE (lhs) == ASHIFT
1928 && CONST_INT_P (XEXP (lhs, 1))
1929 && INTVAL (XEXP (lhs, 1)) >= 0
1930 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1932 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1934 lhs = XEXP (lhs, 0);
1937 if (GET_CODE (rhs) == NEG)
1941 rhs = XEXP (rhs, 0);
1943 else if (GET_CODE (rhs) == MULT
1944 && CONST_INT_P (XEXP (rhs, 1)))
1946 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1947 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1948 rhs = XEXP (rhs, 0);
1950 else if (GET_CODE (rhs) == ASHIFT
1951 && CONST_INT_P (XEXP (rhs, 1))
1952 && INTVAL (XEXP (rhs, 1)) >= 0
1953 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1955 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1957 rhs = XEXP (rhs, 0);
1960 if (rtx_equal_p (lhs, rhs))
1962 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1964 unsigned HOST_WIDE_INT l;
1966 bool speed = optimize_function_for_speed_p (cfun);
1968 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1969 coeff = immed_double_const (l, h, mode);
1971 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1972 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1977 /* (a - (-b)) -> (a + b). True even for IEEE. */
1978 if (GET_CODE (op1) == NEG)
1979 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1981 /* (-x - c) may be simplified as (-c - x). */
1982 if (GET_CODE (op0) == NEG
1983 && (CONST_INT_P (op1)
1984 || GET_CODE (op1) == CONST_DOUBLE))
1986 tem = simplify_unary_operation (NEG, mode, op1, mode);
1988 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1991 /* Don't let a relocatable value get a negative coeff. */
1992 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
1993 return simplify_gen_binary (PLUS, mode,
1995 neg_const_int (mode, op1));
1997 /* (x - (x & y)) -> (x & ~y) */
1998 if (GET_CODE (op1) == AND)
2000 if (rtx_equal_p (op0, XEXP (op1, 0)))
2002 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2003 GET_MODE (XEXP (op1, 1)));
2004 return simplify_gen_binary (AND, mode, op0, tem);
2006 if (rtx_equal_p (op0, XEXP (op1, 1)))
2008 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2009 GET_MODE (XEXP (op1, 0)));
2010 return simplify_gen_binary (AND, mode, op0, tem);
2014 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2015 by reversing the comparison code if valid. */
2016 if (STORE_FLAG_VALUE == 1
2017 && trueop0 == const1_rtx
2018 && COMPARISON_P (op1)
2019 && (reversed = reversed_comparison (op1, mode)))
2022 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2023 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2024 && GET_CODE (op1) == MULT
2025 && GET_CODE (XEXP (op1, 0)) == NEG)
2029 in1 = XEXP (XEXP (op1, 0), 0);
2030 in2 = XEXP (op1, 1);
2031 return simplify_gen_binary (PLUS, mode,
2032 simplify_gen_binary (MULT, mode,
2037 /* Canonicalize (minus (neg A) (mult B C)) to
2038 (minus (mult (neg B) C) A). */
2039 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2040 && GET_CODE (op1) == MULT
2041 && GET_CODE (op0) == NEG)
2045 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2046 in2 = XEXP (op1, 1);
2047 return simplify_gen_binary (MINUS, mode,
2048 simplify_gen_binary (MULT, mode,
2053 /* If one of the operands is a PLUS or a MINUS, see if we can
2054 simplify this by the associative law. This will, for example,
2055 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2056 Don't use the associative law for floating point.
2057 The inaccuracy makes it nonassociative,
2058 and subtle programs can break if operations are associated. */
2060 if (INTEGRAL_MODE_P (mode)
2061 && (plus_minus_operand_p (op0)
2062 || plus_minus_operand_p (op1))
2063 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2068 if (trueop1 == constm1_rtx)
2069 return simplify_gen_unary (NEG, mode, op0, mode);
2071 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2072 x is NaN, since x * 0 is then also NaN. Nor is it valid
2073 when the mode has signed zeros, since multiplying a negative
2074 number by 0 will give -0, not 0. */
2075 if (!HONOR_NANS (mode)
2076 && !HONOR_SIGNED_ZEROS (mode)
2077 && trueop1 == CONST0_RTX (mode)
2078 && ! side_effects_p (op0))
2081 /* In IEEE floating point, x*1 is not equivalent to x for
2083 if (!HONOR_SNANS (mode)
2084 && trueop1 == CONST1_RTX (mode))
2087 /* Convert multiply by constant power of two into shift unless
2088 we are still generating RTL. This test is a kludge. */
2089 if (CONST_INT_P (trueop1)
2090 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2091 /* If the mode is larger than the host word size, and the
2092 uppermost bit is set, then this isn't a power of two due
2093 to implicit sign extension. */
2094 && (width <= HOST_BITS_PER_WIDE_INT
2095 || val != HOST_BITS_PER_WIDE_INT - 1))
2096 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2098 /* Likewise for multipliers wider than a word. */
2099 if (GET_CODE (trueop1) == CONST_DOUBLE
2100 && (GET_MODE (trueop1) == VOIDmode
2101 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2102 && GET_MODE (op0) == mode
2103 && CONST_DOUBLE_LOW (trueop1) == 0
2104 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2105 return simplify_gen_binary (ASHIFT, mode, op0,
2106 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2108 /* x*2 is x+x and x*(-1) is -x */
2109 if (GET_CODE (trueop1) == CONST_DOUBLE
2110 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2111 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2112 && GET_MODE (op0) == mode)
2115 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2117 if (REAL_VALUES_EQUAL (d, dconst2))
2118 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2120 if (!HONOR_SNANS (mode)
2121 && REAL_VALUES_EQUAL (d, dconstm1))
2122 return simplify_gen_unary (NEG, mode, op0, mode);
2125 /* Optimize -x * -x as x * x. */
2126 if (FLOAT_MODE_P (mode)
2127 && GET_CODE (op0) == NEG
2128 && GET_CODE (op1) == NEG
2129 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2130 && !side_effects_p (XEXP (op0, 0)))
2131 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2133 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2134 if (SCALAR_FLOAT_MODE_P (mode)
2135 && GET_CODE (op0) == ABS
2136 && GET_CODE (op1) == ABS
2137 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2138 && !side_effects_p (XEXP (op0, 0)))
2139 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2141 /* Reassociate multiplication, but for floating point MULTs
2142 only when the user specifies unsafe math optimizations. */
2143 if (! FLOAT_MODE_P (mode)
2144 || flag_unsafe_math_optimizations)
2146 tem = simplify_associative_operation (code, mode, op0, op1);
2153 if (trueop1 == const0_rtx)
2155 if (CONST_INT_P (trueop1)
2156 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2157 == GET_MODE_MASK (mode)))
2159 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2161 /* A | (~A) -> -1 */
2162 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2163 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2164 && ! side_effects_p (op0)
2165 && SCALAR_INT_MODE_P (mode))
2168 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2169 if (CONST_INT_P (op1)
2170 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2171 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2174 /* Canonicalize (X & C1) | C2. */
2175 if (GET_CODE (op0) == AND
2176 && CONST_INT_P (trueop1)
2177 && CONST_INT_P (XEXP (op0, 1)))
2179 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2180 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2181 HOST_WIDE_INT c2 = INTVAL (trueop1);
2183 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2185 && !side_effects_p (XEXP (op0, 0)))
2188 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2189 if (((c1|c2) & mask) == mask)
2190 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2192 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2193 if (((c1 & ~c2) & mask) != (c1 & mask))
2195 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2196 gen_int_mode (c1 & ~c2, mode));
2197 return simplify_gen_binary (IOR, mode, tem, op1);
2201 /* Convert (A & B) | A to A. */
2202 if (GET_CODE (op0) == AND
2203 && (rtx_equal_p (XEXP (op0, 0), op1)
2204 || rtx_equal_p (XEXP (op0, 1), op1))
2205 && ! side_effects_p (XEXP (op0, 0))
2206 && ! side_effects_p (XEXP (op0, 1)))
2209 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2210 mode size to (rotate A CX). */
2212 if (GET_CODE (op1) == ASHIFT
2213 || GET_CODE (op1) == SUBREG)
2224 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2225 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2226 && CONST_INT_P (XEXP (opleft, 1))
2227 && CONST_INT_P (XEXP (opright, 1))
2228 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2229 == GET_MODE_BITSIZE (mode)))
2230 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2232 /* Same, but for ashift that has been "simplified" to a wider mode
2233 by simplify_shift_const. */
2235 if (GET_CODE (opleft) == SUBREG
2236 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2237 && GET_CODE (opright) == LSHIFTRT
2238 && GET_CODE (XEXP (opright, 0)) == SUBREG
2239 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2240 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2241 && (GET_MODE_SIZE (GET_MODE (opleft))
2242 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2243 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2244 SUBREG_REG (XEXP (opright, 0)))
2245 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2246 && CONST_INT_P (XEXP (opright, 1))
2247 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2248 == GET_MODE_BITSIZE (mode)))
2249 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2250 XEXP (SUBREG_REG (opleft), 1));
2252 /* If we have (ior (and (X C1) C2)), simplify this by making
2253 C1 as small as possible if C1 actually changes. */
2254 if (CONST_INT_P (op1)
2255 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2256 || INTVAL (op1) > 0)
2257 && GET_CODE (op0) == AND
2258 && CONST_INT_P (XEXP (op0, 1))
2259 && CONST_INT_P (op1)
2260 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2261 return simplify_gen_binary (IOR, mode,
2263 (AND, mode, XEXP (op0, 0),
2264 GEN_INT (INTVAL (XEXP (op0, 1))
2268 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2269 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2270 the PLUS does not affect any of the bits in OP1: then we can do
2271 the IOR as a PLUS and we can associate. This is valid if OP1
2272 can be safely shifted left C bits. */
2273 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2274 && GET_CODE (XEXP (op0, 0)) == PLUS
2275 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2276 && CONST_INT_P (XEXP (op0, 1))
2277 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2279 int count = INTVAL (XEXP (op0, 1));
2280 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2282 if (mask >> count == INTVAL (trueop1)
2283 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2284 return simplify_gen_binary (ASHIFTRT, mode,
2285 plus_constant (XEXP (op0, 0), mask),
2289 tem = simplify_associative_operation (code, mode, op0, op1);
2295 if (trueop1 == const0_rtx)
2297 if (CONST_INT_P (trueop1)
2298 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2299 == GET_MODE_MASK (mode)))
2300 return simplify_gen_unary (NOT, mode, op0, mode);
2301 if (rtx_equal_p (trueop0, trueop1)
2302 && ! side_effects_p (op0)
2303 && GET_MODE_CLASS (mode) != MODE_CC)
2304 return CONST0_RTX (mode);
2306 /* Canonicalize XOR of the most significant bit to PLUS. */
2307 if ((CONST_INT_P (op1)
2308 || GET_CODE (op1) == CONST_DOUBLE)
2309 && mode_signbit_p (mode, op1))
2310 return simplify_gen_binary (PLUS, mode, op0, op1);
2311 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2312 if ((CONST_INT_P (op1)
2313 || GET_CODE (op1) == CONST_DOUBLE)
2314 && GET_CODE (op0) == PLUS
2315 && (CONST_INT_P (XEXP (op0, 1))
2316 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2317 && mode_signbit_p (mode, XEXP (op0, 1)))
2318 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2319 simplify_gen_binary (XOR, mode, op1,
2322 /* If we are XORing two things that have no bits in common,
2323 convert them into an IOR. This helps to detect rotation encoded
2324 using those methods and possibly other simplifications. */
2326 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2327 && (nonzero_bits (op0, mode)
2328 & nonzero_bits (op1, mode)) == 0)
2329 return (simplify_gen_binary (IOR, mode, op0, op1));
2331 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2332 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2335 int num_negated = 0;
2337 if (GET_CODE (op0) == NOT)
2338 num_negated++, op0 = XEXP (op0, 0);
2339 if (GET_CODE (op1) == NOT)
2340 num_negated++, op1 = XEXP (op1, 0);
2342 if (num_negated == 2)
2343 return simplify_gen_binary (XOR, mode, op0, op1);
2344 else if (num_negated == 1)
2345 return simplify_gen_unary (NOT, mode,
2346 simplify_gen_binary (XOR, mode, op0, op1),
2350 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2351 correspond to a machine insn or result in further simplifications
2352 if B is a constant. */
2354 if (GET_CODE (op0) == AND
2355 && rtx_equal_p (XEXP (op0, 1), op1)
2356 && ! side_effects_p (op1))
2357 return simplify_gen_binary (AND, mode,
2358 simplify_gen_unary (NOT, mode,
2359 XEXP (op0, 0), mode),
2362 else if (GET_CODE (op0) == AND
2363 && rtx_equal_p (XEXP (op0, 0), op1)
2364 && ! side_effects_p (op1))
2365 return simplify_gen_binary (AND, mode,
2366 simplify_gen_unary (NOT, mode,
2367 XEXP (op0, 1), mode),
2370 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2371 comparison if STORE_FLAG_VALUE is 1. */
2372 if (STORE_FLAG_VALUE == 1
2373 && trueop1 == const1_rtx
2374 && COMPARISON_P (op0)
2375 && (reversed = reversed_comparison (op0, mode)))
2378 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2379 is (lt foo (const_int 0)), so we can perform the above
2380 simplification if STORE_FLAG_VALUE is 1. */
2382 if (STORE_FLAG_VALUE == 1
2383 && trueop1 == const1_rtx
2384 && GET_CODE (op0) == LSHIFTRT
2385 && CONST_INT_P (XEXP (op0, 1))
2386 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2387 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2389 /* (xor (comparison foo bar) (const_int sign-bit))
2390 when STORE_FLAG_VALUE is the sign bit. */
2391 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2392 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2393 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2394 && trueop1 == const_true_rtx
2395 && COMPARISON_P (op0)
2396 && (reversed = reversed_comparison (op0, mode)))
2399 tem = simplify_associative_operation (code, mode, op0, op1);
2405 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2407 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2409 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2410 HOST_WIDE_INT nzop1;
2411 if (CONST_INT_P (trueop1))
2413 HOST_WIDE_INT val1 = INTVAL (trueop1);
2414 /* If we are turning off bits already known off in OP0, we need
2416 if ((nzop0 & ~val1) == 0)
2419 nzop1 = nonzero_bits (trueop1, mode);
2420 /* If we are clearing all the nonzero bits, the result is zero. */
2421 if ((nzop1 & nzop0) == 0
2422 && !side_effects_p (op0) && !side_effects_p (op1))
2423 return CONST0_RTX (mode);
2425 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2426 && GET_MODE_CLASS (mode) != MODE_CC)
2429 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2430 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2431 && ! side_effects_p (op0)
2432 && GET_MODE_CLASS (mode) != MODE_CC)
2433 return CONST0_RTX (mode);
2435 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2436 there are no nonzero bits of C outside of X's mode. */
2437 if ((GET_CODE (op0) == SIGN_EXTEND
2438 || GET_CODE (op0) == ZERO_EXTEND)
2439 && CONST_INT_P (trueop1)
2440 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2441 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2442 & INTVAL (trueop1)) == 0)
2444 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2445 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2446 gen_int_mode (INTVAL (trueop1),
2448 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2451 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2452 we might be able to further simplify the AND with X and potentially
2453 remove the truncation altogether. */
2454 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2456 rtx x = XEXP (op0, 0);
2457 enum machine_mode xmode = GET_MODE (x);
2458 tem = simplify_gen_binary (AND, xmode, x,
2459 gen_int_mode (INTVAL (trueop1), xmode));
2460 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2463 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2464 if (GET_CODE (op0) == IOR
2465 && CONST_INT_P (trueop1)
2466 && CONST_INT_P (XEXP (op0, 1)))
2468 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2469 return simplify_gen_binary (IOR, mode,
2470 simplify_gen_binary (AND, mode,
2471 XEXP (op0, 0), op1),
2472 gen_int_mode (tmp, mode));
2475 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2476 insn (and may simplify more). */
2477 if (GET_CODE (op0) == XOR
2478 && rtx_equal_p (XEXP (op0, 0), op1)
2479 && ! side_effects_p (op1))
2480 return simplify_gen_binary (AND, mode,
2481 simplify_gen_unary (NOT, mode,
2482 XEXP (op0, 1), mode),
2485 if (GET_CODE (op0) == XOR
2486 && rtx_equal_p (XEXP (op0, 1), op1)
2487 && ! side_effects_p (op1))
2488 return simplify_gen_binary (AND, mode,
2489 simplify_gen_unary (NOT, mode,
2490 XEXP (op0, 0), mode),
2493 /* Similarly for (~(A ^ B)) & A. */
2494 if (GET_CODE (op0) == NOT
2495 && GET_CODE (XEXP (op0, 0)) == XOR
2496 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2497 && ! side_effects_p (op1))
2498 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2500 if (GET_CODE (op0) == NOT
2501 && GET_CODE (XEXP (op0, 0)) == XOR
2502 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2503 && ! side_effects_p (op1))
2504 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2506 /* Convert (A | B) & A to A. */
2507 if (GET_CODE (op0) == IOR
2508 && (rtx_equal_p (XEXP (op0, 0), op1)
2509 || rtx_equal_p (XEXP (op0, 1), op1))
2510 && ! side_effects_p (XEXP (op0, 0))
2511 && ! side_effects_p (XEXP (op0, 1)))
2514 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2515 ((A & N) + B) & M -> (A + B) & M
2516 Similarly if (N & M) == 0,
2517 ((A | N) + B) & M -> (A + B) & M
2518 and for - instead of + and/or ^ instead of |.
2519 Also, if (N & M) == 0, then
2520 (A +- N) & M -> A & M. */
2521 if (CONST_INT_P (trueop1)
2522 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2523 && ~INTVAL (trueop1)
2524 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2525 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2530 pmop[0] = XEXP (op0, 0);
2531 pmop[1] = XEXP (op0, 1);
2533 if (CONST_INT_P (pmop[1])
2534 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2535 return simplify_gen_binary (AND, mode, pmop[0], op1);
2537 for (which = 0; which < 2; which++)
2540 switch (GET_CODE (tem))
2543 if (CONST_INT_P (XEXP (tem, 1))
2544 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2545 == INTVAL (trueop1))
2546 pmop[which] = XEXP (tem, 0);
2550 if (CONST_INT_P (XEXP (tem, 1))
2551 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2552 pmop[which] = XEXP (tem, 0);
2559 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2561 tem = simplify_gen_binary (GET_CODE (op0), mode,
2563 return simplify_gen_binary (code, mode, tem, op1);
2567 /* (and X (ior (not X) Y) -> (and X Y) */
2568 if (GET_CODE (op1) == IOR
2569 && GET_CODE (XEXP (op1, 0)) == NOT
2570 && op0 == XEXP (XEXP (op1, 0), 0))
2571 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2573 /* (and (ior (not X) Y) X) -> (and X Y) */
2574 if (GET_CODE (op0) == IOR
2575 && GET_CODE (XEXP (op0, 0)) == NOT
2576 && op1 == XEXP (XEXP (op0, 0), 0))
2577 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2579 tem = simplify_associative_operation (code, mode, op0, op1);
2585 /* 0/x is 0 (or x&0 if x has side-effects). */
2586 if (trueop0 == CONST0_RTX (mode))
2588 if (side_effects_p (op1))
2589 return simplify_gen_binary (AND, mode, op1, trueop0);
2593 if (trueop1 == CONST1_RTX (mode))
2594 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2595 /* Convert divide by power of two into shift. */
2596 if (CONST_INT_P (trueop1)
2597 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2598 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2602 /* Handle floating point and integers separately. */
2603 if (SCALAR_FLOAT_MODE_P (mode))
2605 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2606 safe for modes with NaNs, since 0.0 / 0.0 will then be
2607 NaN rather than 0.0. Nor is it safe for modes with signed
2608 zeros, since dividing 0 by a negative number gives -0.0 */
2609 if (trueop0 == CONST0_RTX (mode)
2610 && !HONOR_NANS (mode)
2611 && !HONOR_SIGNED_ZEROS (mode)
2612 && ! side_effects_p (op1))
2615 if (trueop1 == CONST1_RTX (mode)
2616 && !HONOR_SNANS (mode))
2619 if (GET_CODE (trueop1) == CONST_DOUBLE
2620 && trueop1 != CONST0_RTX (mode))
2623 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2626 if (REAL_VALUES_EQUAL (d, dconstm1)
2627 && !HONOR_SNANS (mode))
2628 return simplify_gen_unary (NEG, mode, op0, mode);
2630 /* Change FP division by a constant into multiplication.
2631 Only do this with -freciprocal-math. */
2632 if (flag_reciprocal_math
2633 && !REAL_VALUES_EQUAL (d, dconst0))
2635 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2636 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2637 return simplify_gen_binary (MULT, mode, op0, tem);
2643 /* 0/x is 0 (or x&0 if x has side-effects). */
2644 if (trueop0 == CONST0_RTX (mode))
2646 if (side_effects_p (op1))
2647 return simplify_gen_binary (AND, mode, op1, trueop0);
2651 if (trueop1 == CONST1_RTX (mode))
2652 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2654 if (trueop1 == constm1_rtx)
2656 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2657 return simplify_gen_unary (NEG, mode, x, mode);
2663 /* 0%x is 0 (or x&0 if x has side-effects). */
2664 if (trueop0 == CONST0_RTX (mode))
2666 if (side_effects_p (op1))
2667 return simplify_gen_binary (AND, mode, op1, trueop0);
2670 /* x%1 is 0 (of x&0 if x has side-effects). */
2671 if (trueop1 == CONST1_RTX (mode))
2673 if (side_effects_p (op0))
2674 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2675 return CONST0_RTX (mode);
2677 /* Implement modulus by power of two as AND. */
2678 if (CONST_INT_P (trueop1)
2679 && exact_log2 (INTVAL (trueop1)) > 0)
2680 return simplify_gen_binary (AND, mode, op0,
2681 GEN_INT (INTVAL (op1) - 1));
2685 /* 0%x is 0 (or x&0 if x has side-effects). */
2686 if (trueop0 == CONST0_RTX (mode))
2688 if (side_effects_p (op1))
2689 return simplify_gen_binary (AND, mode, op1, trueop0);
2692 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2693 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2695 if (side_effects_p (op0))
2696 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2697 return CONST0_RTX (mode);
2704 if (trueop1 == CONST0_RTX (mode))
2706 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2708 /* Rotating ~0 always results in ~0. */
2709 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2710 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2711 && ! side_effects_p (op1))
2714 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2716 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2717 if (val != INTVAL (op1))
2718 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2725 if (trueop1 == CONST0_RTX (mode))
2727 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2729 goto canonicalize_shift;
2732 if (trueop1 == CONST0_RTX (mode))
2734 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2736 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2737 if (GET_CODE (op0) == CLZ
2738 && CONST_INT_P (trueop1)
2739 && STORE_FLAG_VALUE == 1
2740 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2742 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2743 unsigned HOST_WIDE_INT zero_val = 0;
2745 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2746 && zero_val == GET_MODE_BITSIZE (imode)
2747 && INTVAL (trueop1) == exact_log2 (zero_val))
2748 return simplify_gen_relational (EQ, mode, imode,
2749 XEXP (op0, 0), const0_rtx);
2751 goto canonicalize_shift;
2754 if (width <= HOST_BITS_PER_WIDE_INT
2755 && CONST_INT_P (trueop1)
2756 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2757 && ! side_effects_p (op0))
2759 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2761 tem = simplify_associative_operation (code, mode, op0, op1);
2767 if (width <= HOST_BITS_PER_WIDE_INT
2768 && CONST_INT_P (trueop1)
2769 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2770 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2771 && ! side_effects_p (op0))
2773 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2775 tem = simplify_associative_operation (code, mode, op0, op1);
2781 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2783 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2785 tem = simplify_associative_operation (code, mode, op0, op1);
2791 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2793 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2795 tem = simplify_associative_operation (code, mode, op0, op1);
2808 /* ??? There are simplifications that can be done. */
2812 if (!VECTOR_MODE_P (mode))
2814 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2815 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2816 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2817 gcc_assert (XVECLEN (trueop1, 0) == 1);
2818 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2820 if (GET_CODE (trueop0) == CONST_VECTOR)
2821 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2824 /* Extract a scalar element from a nested VEC_SELECT expression
2825 (with optional nested VEC_CONCAT expression). Some targets
2826 (i386) extract scalar element from a vector using chain of
2827 nested VEC_SELECT expressions. When input operand is a memory
2828 operand, this operation can be simplified to a simple scalar
2829 load from an offseted memory address. */
2830 if (GET_CODE (trueop0) == VEC_SELECT)
2832 rtx op0 = XEXP (trueop0, 0);
2833 rtx op1 = XEXP (trueop0, 1);
2835 enum machine_mode opmode = GET_MODE (op0);
2836 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2837 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2839 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2845 gcc_assert (GET_CODE (op1) == PARALLEL);
2846 gcc_assert (i < n_elts);
2848 /* Select element, pointed by nested selector. */
2849 elem = INTVAL (XVECEXP (op1, 0, i));
2851 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2852 if (GET_CODE (op0) == VEC_CONCAT)
2854 rtx op00 = XEXP (op0, 0);
2855 rtx op01 = XEXP (op0, 1);
2857 enum machine_mode mode00, mode01;
2858 int n_elts00, n_elts01;
2860 mode00 = GET_MODE (op00);
2861 mode01 = GET_MODE (op01);
2863 /* Find out number of elements of each operand. */
2864 if (VECTOR_MODE_P (mode00))
2866 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2867 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2872 if (VECTOR_MODE_P (mode01))
2874 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2875 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2880 gcc_assert (n_elts == n_elts00 + n_elts01);
2882 /* Select correct operand of VEC_CONCAT
2883 and adjust selector. */
2884 if (elem < n_elts01)
2895 vec = rtvec_alloc (1);
2896 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2898 tmp = gen_rtx_fmt_ee (code, mode,
2899 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2905 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2906 gcc_assert (GET_MODE_INNER (mode)
2907 == GET_MODE_INNER (GET_MODE (trueop0)));
2908 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2910 if (GET_CODE (trueop0) == CONST_VECTOR)
2912 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2913 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2914 rtvec v = rtvec_alloc (n_elts);
2917 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2918 for (i = 0; i < n_elts; i++)
2920 rtx x = XVECEXP (trueop1, 0, i);
2922 gcc_assert (CONST_INT_P (x));
2923 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2927 return gen_rtx_CONST_VECTOR (mode, v);
2931 if (XVECLEN (trueop1, 0) == 1
2932 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2933 && GET_CODE (trueop0) == VEC_CONCAT)
2936 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2938 /* Try to find the element in the VEC_CONCAT. */
2939 while (GET_MODE (vec) != mode
2940 && GET_CODE (vec) == VEC_CONCAT)
2942 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2943 if (offset < vec_size)
2944 vec = XEXP (vec, 0);
2948 vec = XEXP (vec, 1);
2950 vec = avoid_constant_pool_reference (vec);
2953 if (GET_MODE (vec) == mode)
2960 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2961 ? GET_MODE (trueop0)
2962 : GET_MODE_INNER (mode));
2963 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2964 ? GET_MODE (trueop1)
2965 : GET_MODE_INNER (mode));
2967 gcc_assert (VECTOR_MODE_P (mode));
2968 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2969 == GET_MODE_SIZE (mode));
2971 if (VECTOR_MODE_P (op0_mode))
2972 gcc_assert (GET_MODE_INNER (mode)
2973 == GET_MODE_INNER (op0_mode));
2975 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2977 if (VECTOR_MODE_P (op1_mode))
2978 gcc_assert (GET_MODE_INNER (mode)
2979 == GET_MODE_INNER (op1_mode));
2981 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2983 if ((GET_CODE (trueop0) == CONST_VECTOR
2984 || CONST_INT_P (trueop0)
2985 || GET_CODE (trueop0) == CONST_DOUBLE)
2986 && (GET_CODE (trueop1) == CONST_VECTOR
2987 || CONST_INT_P (trueop1)
2988 || GET_CODE (trueop1) == CONST_DOUBLE))
2990 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2991 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2992 rtvec v = rtvec_alloc (n_elts);
2994 unsigned in_n_elts = 1;
2996 if (VECTOR_MODE_P (op0_mode))
2997 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2998 for (i = 0; i < n_elts; i++)
3002 if (!VECTOR_MODE_P (op0_mode))
3003 RTVEC_ELT (v, i) = trueop0;
3005 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3009 if (!VECTOR_MODE_P (op1_mode))
3010 RTVEC_ELT (v, i) = trueop1;
3012 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3017 return gen_rtx_CONST_VECTOR (mode, v);
3030 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3033 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3035 unsigned int width = GET_MODE_BITSIZE (mode);
3037 if (VECTOR_MODE_P (mode)
3038 && code != VEC_CONCAT
3039 && GET_CODE (op0) == CONST_VECTOR
3040 && GET_CODE (op1) == CONST_VECTOR)
3042 unsigned n_elts = GET_MODE_NUNITS (mode);
3043 enum machine_mode op0mode = GET_MODE (op0);
3044 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3045 enum machine_mode op1mode = GET_MODE (op1);
3046 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3047 rtvec v = rtvec_alloc (n_elts);
3050 gcc_assert (op0_n_elts == n_elts);
3051 gcc_assert (op1_n_elts == n_elts);
3052 for (i = 0; i < n_elts; i++)
3054 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3055 CONST_VECTOR_ELT (op0, i),
3056 CONST_VECTOR_ELT (op1, i));
3059 RTVEC_ELT (v, i) = x;
3062 return gen_rtx_CONST_VECTOR (mode, v);
3065 if (VECTOR_MODE_P (mode)
3066 && code == VEC_CONCAT
3067 && (CONST_INT_P (op0)
3068 || GET_CODE (op0) == CONST_DOUBLE
3069 || GET_CODE (op0) == CONST_FIXED)
3070 && (CONST_INT_P (op1)
3071 || GET_CODE (op1) == CONST_DOUBLE
3072 || GET_CODE (op1) == CONST_FIXED))
3074 unsigned n_elts = GET_MODE_NUNITS (mode);
3075 rtvec v = rtvec_alloc (n_elts);
3077 gcc_assert (n_elts >= 2);
3080 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3081 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3083 RTVEC_ELT (v, 0) = op0;
3084 RTVEC_ELT (v, 1) = op1;
3088 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3089 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3092 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3093 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3094 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3096 for (i = 0; i < op0_n_elts; ++i)
3097 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3098 for (i = 0; i < op1_n_elts; ++i)
3099 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3102 return gen_rtx_CONST_VECTOR (mode, v);
3105 if (SCALAR_FLOAT_MODE_P (mode)
3106 && GET_CODE (op0) == CONST_DOUBLE
3107 && GET_CODE (op1) == CONST_DOUBLE
3108 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3119 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3121 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3123 for (i = 0; i < 4; i++)
3140 real_from_target (&r, tmp0, mode);
3141 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3145 REAL_VALUE_TYPE f0, f1, value, result;
3148 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3149 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3150 real_convert (&f0, mode, &f0);
3151 real_convert (&f1, mode, &f1);
3153 if (HONOR_SNANS (mode)
3154 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3158 && REAL_VALUES_EQUAL (f1, dconst0)
3159 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3162 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3163 && flag_trapping_math
3164 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3166 int s0 = REAL_VALUE_NEGATIVE (f0);
3167 int s1 = REAL_VALUE_NEGATIVE (f1);
3172 /* Inf + -Inf = NaN plus exception. */
3177 /* Inf - Inf = NaN plus exception. */
3182 /* Inf / Inf = NaN plus exception. */
3189 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3190 && flag_trapping_math
3191 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3192 || (REAL_VALUE_ISINF (f1)
3193 && REAL_VALUES_EQUAL (f0, dconst0))))
3194 /* Inf * 0 = NaN plus exception. */
3197 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3199 real_convert (&result, mode, &value);
3201 /* Don't constant fold this floating point operation if
3202 the result has overflowed and flag_trapping_math. */
3204 if (flag_trapping_math
3205 && MODE_HAS_INFINITIES (mode)
3206 && REAL_VALUE_ISINF (result)
3207 && !REAL_VALUE_ISINF (f0)
3208 && !REAL_VALUE_ISINF (f1))
3209 /* Overflow plus exception. */
3212 /* Don't constant fold this floating point operation if the
3213 result may dependent upon the run-time rounding mode and
3214 flag_rounding_math is set, or if GCC's software emulation
3215 is unable to accurately represent the result. */
3217 if ((flag_rounding_math
3218 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3219 && (inexact || !real_identical (&result, &value)))
3222 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3226 /* We can fold some multi-word operations. */
3227 if (GET_MODE_CLASS (mode) == MODE_INT
3228 && width == HOST_BITS_PER_WIDE_INT * 2
3229 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3230 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3232 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3233 HOST_WIDE_INT h1, h2, hv, ht;
3235 if (GET_CODE (op0) == CONST_DOUBLE)
3236 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3238 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3240 if (GET_CODE (op1) == CONST_DOUBLE)
3241 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3243 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3248 /* A - B == A + (-B). */
3249 neg_double (l2, h2, &lv, &hv);
3252 /* Fall through.... */
3255 add_double (l1, h1, l2, h2, &lv, &hv);
3259 mul_double (l1, h1, l2, h2, &lv, &hv);
3263 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3264 &lv, &hv, <, &ht))
3269 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3270 <, &ht, &lv, &hv))
3275 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3276 &lv, &hv, <, &ht))
3281 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3282 <, &ht, &lv, &hv))
3287 lv = l1 & l2, hv = h1 & h2;
3291 lv = l1 | l2, hv = h1 | h2;
3295 lv = l1 ^ l2, hv = h1 ^ h2;
3301 && ((unsigned HOST_WIDE_INT) l1
3302 < (unsigned HOST_WIDE_INT) l2)))
3311 && ((unsigned HOST_WIDE_INT) l1
3312 > (unsigned HOST_WIDE_INT) l2)))
3319 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3321 && ((unsigned HOST_WIDE_INT) l1
3322 < (unsigned HOST_WIDE_INT) l2)))
3329 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3331 && ((unsigned HOST_WIDE_INT) l1
3332 > (unsigned HOST_WIDE_INT) l2)))
3338 case LSHIFTRT: case ASHIFTRT:
3340 case ROTATE: case ROTATERT:
3341 if (SHIFT_COUNT_TRUNCATED)
3342 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3344 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3347 if (code == LSHIFTRT || code == ASHIFTRT)
3348 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3350 else if (code == ASHIFT)
3351 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3352 else if (code == ROTATE)
3353 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3354 else /* code == ROTATERT */
3355 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3362 return immed_double_const (lv, hv, mode);
3365 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3366 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3368 /* Get the integer argument values in two forms:
3369 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3371 arg0 = INTVAL (op0);
3372 arg1 = INTVAL (op1);
3374 if (width < HOST_BITS_PER_WIDE_INT)
3376 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3377 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3380 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3381 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3384 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3385 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3393 /* Compute the value of the arithmetic. */
3398 val = arg0s + arg1s;
3402 val = arg0s - arg1s;
3406 val = arg0s * arg1s;
3411 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3414 val = arg0s / arg1s;
3419 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3422 val = arg0s % arg1s;
3427 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3430 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3435 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3438 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3456 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3457 the value is in range. We can't return any old value for
3458 out-of-range arguments because either the middle-end (via
3459 shift_truncation_mask) or the back-end might be relying on
3460 target-specific knowledge. Nor can we rely on
3461 shift_truncation_mask, since the shift might not be part of an
3462 ashlM3, lshrM3 or ashrM3 instruction. */
3463 if (SHIFT_COUNT_TRUNCATED)
3464 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3465 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3468 val = (code == ASHIFT
3469 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3470 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3472 /* Sign-extend the result for arithmetic right shifts. */
3473 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3474 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3482 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3483 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3491 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3492 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3496 /* Do nothing here. */
3500 val = arg0s <= arg1s ? arg0s : arg1s;
3504 val = ((unsigned HOST_WIDE_INT) arg0
3505 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3509 val = arg0s > arg1s ? arg0s : arg1s;
3513 val = ((unsigned HOST_WIDE_INT) arg0
3514 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3527 /* ??? There are simplifications that can be done. */
3534 return gen_int_mode (val, mode);
3542 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3545 Rather than test for specific case, we do this by a brute-force method
3546 and do all possible simplifications until no more changes occur. Then
3547 we rebuild the operation. */
3549 struct simplify_plus_minus_op_data
3556 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3560 result = (commutative_operand_precedence (y)
3561 - commutative_operand_precedence (x));
3565 /* Group together equal REGs to do more simplification. */
3566 if (REG_P (x) && REG_P (y))
3567 return REGNO (x) > REGNO (y);
3573 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3576 struct simplify_plus_minus_op_data ops[8];
3578 int n_ops = 2, input_ops = 2;
3579 int changed, n_constants = 0, canonicalized = 0;
3582 memset (ops, 0, sizeof ops);
3584 /* Set up the two operands and then expand them until nothing has been
3585 changed. If we run out of room in our array, give up; this should
3586 almost never happen. */
3591 ops[1].neg = (code == MINUS);
3597 for (i = 0; i < n_ops; i++)
3599 rtx this_op = ops[i].op;
3600 int this_neg = ops[i].neg;
3601 enum rtx_code this_code = GET_CODE (this_op);
3610 ops[n_ops].op = XEXP (this_op, 1);
3611 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3614 ops[i].op = XEXP (this_op, 0);
3617 canonicalized |= this_neg;
3621 ops[i].op = XEXP (this_op, 0);
3622 ops[i].neg = ! this_neg;
3629 && GET_CODE (XEXP (this_op, 0)) == PLUS
3630 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3631 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3633 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3634 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3635 ops[n_ops].neg = this_neg;
3643 /* ~a -> (-a - 1) */
3646 ops[n_ops].op = constm1_rtx;
3647 ops[n_ops++].neg = this_neg;
3648 ops[i].op = XEXP (this_op, 0);
3649 ops[i].neg = !this_neg;
3659 ops[i].op = neg_const_int (mode, this_op);
3673 if (n_constants > 1)
3676 gcc_assert (n_ops >= 2);
3678 /* If we only have two operands, we can avoid the loops. */
3681 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3684 /* Get the two operands. Be careful with the order, especially for
3685 the cases where code == MINUS. */
3686 if (ops[0].neg && ops[1].neg)
3688 lhs = gen_rtx_NEG (mode, ops[0].op);
3691 else if (ops[0].neg)
3702 return simplify_const_binary_operation (code, mode, lhs, rhs);
3705 /* Now simplify each pair of operands until nothing changes. */
3708 /* Insertion sort is good enough for an eight-element array. */
3709 for (i = 1; i < n_ops; i++)
3711 struct simplify_plus_minus_op_data save;
3713 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3719 ops[j + 1] = ops[j];
3720 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3725 for (i = n_ops - 1; i > 0; i--)
3726 for (j = i - 1; j >= 0; j--)
3728 rtx lhs = ops[j].op, rhs = ops[i].op;
3729 int lneg = ops[j].neg, rneg = ops[i].neg;
3731 if (lhs != 0 && rhs != 0)
3733 enum rtx_code ncode = PLUS;
3739 tem = lhs, lhs = rhs, rhs = tem;
3741 else if (swap_commutative_operands_p (lhs, rhs))
3742 tem = lhs, lhs = rhs, rhs = tem;
3744 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3745 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3747 rtx tem_lhs, tem_rhs;
3749 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3750 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3751 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3753 if (tem && !CONSTANT_P (tem))
3754 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3757 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3759 /* Reject "simplifications" that just wrap the two
3760 arguments in a CONST. Failure to do so can result
3761 in infinite recursion with simplify_binary_operation
3762 when it calls us to simplify CONST operations. */
3764 && ! (GET_CODE (tem) == CONST
3765 && GET_CODE (XEXP (tem, 0)) == ncode
3766 && XEXP (XEXP (tem, 0), 0) == lhs
3767 && XEXP (XEXP (tem, 0), 1) == rhs))
3770 if (GET_CODE (tem) == NEG)
3771 tem = XEXP (tem, 0), lneg = !lneg;
3772 if (CONST_INT_P (tem) && lneg)
3773 tem = neg_const_int (mode, tem), lneg = 0;
3777 ops[j].op = NULL_RTX;
3784 /* If nothing changed, fail. */
3788 /* Pack all the operands to the lower-numbered entries. */
3789 for (i = 0, j = 0; j < n_ops; j++)
3799 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3801 && CONST_INT_P (ops[1].op)
3802 && CONSTANT_P (ops[0].op)
3804 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3806 /* We suppressed creation of trivial CONST expressions in the
3807 combination loop to avoid recursion. Create one manually now.
3808 The combination loop should have ensured that there is exactly
3809 one CONST_INT, and the sort will have ensured that it is last
3810 in the array and that any other constant will be next-to-last. */
3813 && CONST_INT_P (ops[n_ops - 1].op)
3814 && CONSTANT_P (ops[n_ops - 2].op))
3816 rtx value = ops[n_ops - 1].op;
3817 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3818 value = neg_const_int (mode, value);
3819 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3823 /* Put a non-negated operand first, if possible. */
3825 for (i = 0; i < n_ops && ops[i].neg; i++)
3828 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3837 /* Now make the result by performing the requested operations. */
3839 for (i = 1; i < n_ops; i++)
3840 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3841 mode, result, ops[i].op);
3846 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3848 plus_minus_operand_p (const_rtx x)
3850 return GET_CODE (x) == PLUS
3851 || GET_CODE (x) == MINUS
3852 || (GET_CODE (x) == CONST
3853 && GET_CODE (XEXP (x, 0)) == PLUS
3854 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3855 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3858 /* Like simplify_binary_operation except used for relational operators.
3859 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3860 not also be VOIDmode.
3862 CMP_MODE specifies in which mode the comparison is done in, so it is
3863 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3864 the operands or, if both are VOIDmode, the operands are compared in
3865 "infinite precision". */
3867 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3868 enum machine_mode cmp_mode, rtx op0, rtx op1)
3870 rtx tem, trueop0, trueop1;
3872 if (cmp_mode == VOIDmode)
3873 cmp_mode = GET_MODE (op0);
3874 if (cmp_mode == VOIDmode)
3875 cmp_mode = GET_MODE (op1);
3877 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3880 if (SCALAR_FLOAT_MODE_P (mode))
3882 if (tem == const0_rtx)
3883 return CONST0_RTX (mode);
3884 #ifdef FLOAT_STORE_FLAG_VALUE
3886 REAL_VALUE_TYPE val;
3887 val = FLOAT_STORE_FLAG_VALUE (mode);
3888 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3894 if (VECTOR_MODE_P (mode))
3896 if (tem == const0_rtx)
3897 return CONST0_RTX (mode);
3898 #ifdef VECTOR_STORE_FLAG_VALUE
3903 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3904 if (val == NULL_RTX)
3906 if (val == const1_rtx)
3907 return CONST1_RTX (mode);
3909 units = GET_MODE_NUNITS (mode);
3910 v = rtvec_alloc (units);
3911 for (i = 0; i < units; i++)
3912 RTVEC_ELT (v, i) = val;
3913 return gen_rtx_raw_CONST_VECTOR (mode, v);
3923 /* For the following tests, ensure const0_rtx is op1. */
3924 if (swap_commutative_operands_p (op0, op1)
3925 || (op0 == const0_rtx && op1 != const0_rtx))
3926 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3928 /* If op0 is a compare, extract the comparison arguments from it. */
3929 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3930 return simplify_gen_relational (code, mode, VOIDmode,
3931 XEXP (op0, 0), XEXP (op0, 1));
3933 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3937 trueop0 = avoid_constant_pool_reference (op0);
3938 trueop1 = avoid_constant_pool_reference (op1);
3939 return simplify_relational_operation_1 (code, mode, cmp_mode,
3943 /* This part of simplify_relational_operation is only used when CMP_MODE
3944 is not in class MODE_CC (i.e. it is a real comparison).
3946 MODE is the mode of the result, while CMP_MODE specifies in which
3947 mode the comparison is done in, so it is the mode of the operands. */
3950 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3951 enum machine_mode cmp_mode, rtx op0, rtx op1)
3953 enum rtx_code op0code = GET_CODE (op0);
3955 if (op1 == const0_rtx && COMPARISON_P (op0))
3957 /* If op0 is a comparison, extract the comparison arguments
3961 if (GET_MODE (op0) == mode)
3962 return simplify_rtx (op0);
3964 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3965 XEXP (op0, 0), XEXP (op0, 1));
3967 else if (code == EQ)
3969 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3970 if (new_code != UNKNOWN)
3971 return simplify_gen_relational (new_code, mode, VOIDmode,
3972 XEXP (op0, 0), XEXP (op0, 1));
3976 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3977 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3978 if ((code == LTU || code == GEU)
3979 && GET_CODE (op0) == PLUS
3980 && CONST_INT_P (XEXP (op0, 1))
3981 && (rtx_equal_p (op1, XEXP (op0, 0))
3982 || rtx_equal_p (op1, XEXP (op0, 1))))
3985 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
3986 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
3987 cmp_mode, XEXP (op0, 0), new_cmp);
3990 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3991 if ((code == LTU || code == GEU)
3992 && GET_CODE (op0) == PLUS
3993 && rtx_equal_p (op1, XEXP (op0, 1))
3994 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3995 && !rtx_equal_p (op1, XEXP (op0, 0)))
3996 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3998 if (op1 == const0_rtx)
4000 /* Canonicalize (GTU x 0) as (NE x 0). */
4002 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4003 /* Canonicalize (LEU x 0) as (EQ x 0). */
4005 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4007 else if (op1 == const1_rtx)
4012 /* Canonicalize (GE x 1) as (GT x 0). */
4013 return simplify_gen_relational (GT, mode, cmp_mode,
4016 /* Canonicalize (GEU x 1) as (NE x 0). */
4017 return simplify_gen_relational (NE, mode, cmp_mode,
4020 /* Canonicalize (LT x 1) as (LE x 0). */
4021 return simplify_gen_relational (LE, mode, cmp_mode,
4024 /* Canonicalize (LTU x 1) as (EQ x 0). */
4025 return simplify_gen_relational (EQ, mode, cmp_mode,
4031 else if (op1 == constm1_rtx)
4033 /* Canonicalize (LE x -1) as (LT x 0). */
4035 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4036 /* Canonicalize (GT x -1) as (GE x 0). */
4038 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4041 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4042 if ((code == EQ || code == NE)
4043 && (op0code == PLUS || op0code == MINUS)
4045 && CONSTANT_P (XEXP (op0, 1))
4046 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4048 rtx x = XEXP (op0, 0);
4049 rtx c = XEXP (op0, 1);
4051 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4053 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4056 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4057 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4059 && op1 == const0_rtx
4060 && GET_MODE_CLASS (mode) == MODE_INT
4061 && cmp_mode != VOIDmode
4062 /* ??? Work-around BImode bugs in the ia64 backend. */
4064 && cmp_mode != BImode
4065 && nonzero_bits (op0, cmp_mode) == 1
4066 && STORE_FLAG_VALUE == 1)
4067 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4068 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4069 : lowpart_subreg (mode, op0, cmp_mode);
4071 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4072 if ((code == EQ || code == NE)
4073 && op1 == const0_rtx
4075 return simplify_gen_relational (code, mode, cmp_mode,
4076 XEXP (op0, 0), XEXP (op0, 1));
4078 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4079 if ((code == EQ || code == NE)
4081 && rtx_equal_p (XEXP (op0, 0), op1)
4082 && !side_effects_p (XEXP (op0, 0)))
4083 return simplify_gen_relational (code, mode, cmp_mode,
4084 XEXP (op0, 1), const0_rtx);
4086 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4087 if ((code == EQ || code == NE)
4089 && rtx_equal_p (XEXP (op0, 1), op1)
4090 && !side_effects_p (XEXP (op0, 1)))
4091 return simplify_gen_relational (code, mode, cmp_mode,
4092 XEXP (op0, 0), const0_rtx);
4094 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4095 if ((code == EQ || code == NE)
4097 && (CONST_INT_P (op1)
4098 || GET_CODE (op1) == CONST_DOUBLE)
4099 && (CONST_INT_P (XEXP (op0, 1))
4100 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4101 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4102 simplify_gen_binary (XOR, cmp_mode,
4103 XEXP (op0, 1), op1));
4105 if (op0code == POPCOUNT && op1 == const0_rtx)
4111 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4112 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4113 XEXP (op0, 0), const0_rtx);
4118 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4119 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4120 XEXP (op0, 0), const0_rtx);
4139 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4140 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4141 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4142 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4143 For floating-point comparisons, assume that the operands were ordered. */
4146 comparison_result (enum rtx_code code, int known_results)
4152 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4155 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4159 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4162 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4166 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4169 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4172 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4174 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4177 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4179 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4182 return const_true_rtx;
4190 /* Check if the given comparison (done in the given MODE) is actually a
4191 tautology or a contradiction.
4192 If no simplification is possible, this function returns zero.
4193 Otherwise, it returns either const_true_rtx or const0_rtx. */
4196 simplify_const_relational_operation (enum rtx_code code,
4197 enum machine_mode mode,
4204 gcc_assert (mode != VOIDmode
4205 || (GET_MODE (op0) == VOIDmode
4206 && GET_MODE (op1) == VOIDmode));
4208 /* If op0 is a compare, extract the comparison arguments from it. */
4209 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4211 op1 = XEXP (op0, 1);
4212 op0 = XEXP (op0, 0);
4214 if (GET_MODE (op0) != VOIDmode)
4215 mode = GET_MODE (op0);
4216 else if (GET_MODE (op1) != VOIDmode)
4217 mode = GET_MODE (op1);
4222 /* We can't simplify MODE_CC values since we don't know what the
4223 actual comparison is. */
4224 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4227 /* Make sure the constant is second. */
4228 if (swap_commutative_operands_p (op0, op1))
4230 tem = op0, op0 = op1, op1 = tem;
4231 code = swap_condition (code);
4234 trueop0 = avoid_constant_pool_reference (op0);
4235 trueop1 = avoid_constant_pool_reference (op1);
4237 /* For integer comparisons of A and B maybe we can simplify A - B and can
4238 then simplify a comparison of that with zero. If A and B are both either
4239 a register or a CONST_INT, this can't help; testing for these cases will
4240 prevent infinite recursion here and speed things up.
4242 We can only do this for EQ and NE comparisons as otherwise we may
4243 lose or introduce overflow which we cannot disregard as undefined as
4244 we do not know the signedness of the operation on either the left or
4245 the right hand side of the comparison. */
4247 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4248 && (code == EQ || code == NE)
4249 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4250 && (REG_P (op1) || CONST_INT_P (trueop1)))
4251 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4252 /* We cannot do this if tem is a nonzero address. */
4253 && ! nonzero_address_p (tem))
4254 return simplify_const_relational_operation (signed_condition (code),
4255 mode, tem, const0_rtx);
4257 if (! HONOR_NANS (mode) && code == ORDERED)
4258 return const_true_rtx;
4260 if (! HONOR_NANS (mode) && code == UNORDERED)
4263 /* For modes without NaNs, if the two operands are equal, we know the
4264 result except if they have side-effects. Even with NaNs we know
4265 the result of unordered comparisons and, if signaling NaNs are
4266 irrelevant, also the result of LT/GT/LTGT. */
4267 if ((! HONOR_NANS (GET_MODE (trueop0))
4268 || code == UNEQ || code == UNLE || code == UNGE
4269 || ((code == LT || code == GT || code == LTGT)
4270 && ! HONOR_SNANS (GET_MODE (trueop0))))
4271 && rtx_equal_p (trueop0, trueop1)
4272 && ! side_effects_p (trueop0))
4273 return comparison_result (code, CMP_EQ);
4275 /* If the operands are floating-point constants, see if we can fold
4277 if (GET_CODE (trueop0) == CONST_DOUBLE
4278 && GET_CODE (trueop1) == CONST_DOUBLE
4279 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4281 REAL_VALUE_TYPE d0, d1;
4283 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4284 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4286 /* Comparisons are unordered iff at least one of the values is NaN. */
4287 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4297 return const_true_rtx;
4310 return comparison_result (code,
4311 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4312 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4315 /* Otherwise, see if the operands are both integers. */
4316 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4317 && (GET_CODE (trueop0) == CONST_DOUBLE
4318 || CONST_INT_P (trueop0))
4319 && (GET_CODE (trueop1) == CONST_DOUBLE
4320 || CONST_INT_P (trueop1)))
4322 int width = GET_MODE_BITSIZE (mode);
4323 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4324 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4326 /* Get the two words comprising each integer constant. */
4327 if (GET_CODE (trueop0) == CONST_DOUBLE)
4329 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4330 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4334 l0u = l0s = INTVAL (trueop0);
4335 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4338 if (GET_CODE (trueop1) == CONST_DOUBLE)
4340 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4341 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4345 l1u = l1s = INTVAL (trueop1);
4346 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4349 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4350 we have to sign or zero-extend the values. */
4351 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4353 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4354 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4356 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4357 l0s |= ((HOST_WIDE_INT) (-1) << width);
4359 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4360 l1s |= ((HOST_WIDE_INT) (-1) << width);
4362 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4363 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4365 if (h0u == h1u && l0u == l1u)
4366 return comparison_result (code, CMP_EQ);
4370 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4371 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4372 return comparison_result (code, cr);
4376 /* Optimize comparisons with upper and lower bounds. */
4377 if (SCALAR_INT_MODE_P (mode)
4378 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4379 && CONST_INT_P (trueop1))
4382 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4383 HOST_WIDE_INT val = INTVAL (trueop1);
4384 HOST_WIDE_INT mmin, mmax;
4394 /* Get a reduced range if the sign bit is zero. */
4395 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4402 rtx mmin_rtx, mmax_rtx;
4403 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4405 mmin = INTVAL (mmin_rtx);
4406 mmax = INTVAL (mmax_rtx);
4409 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4411 mmin >>= (sign_copies - 1);
4412 mmax >>= (sign_copies - 1);
4418 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4420 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4421 return const_true_rtx;
4422 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4427 return const_true_rtx;
4432 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4434 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4435 return const_true_rtx;
4436 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4441 return const_true_rtx;
4447 /* x == y is always false for y out of range. */
4448 if (val < mmin || val > mmax)
4452 /* x > y is always false for y >= mmax, always true for y < mmin. */
4454 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4456 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4457 return const_true_rtx;
4463 return const_true_rtx;
4466 /* x < y is always false for y <= mmin, always true for y > mmax. */
4468 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4470 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4471 return const_true_rtx;
4477 return const_true_rtx;
4481 /* x != y is always true for y out of range. */
4482 if (val < mmin || val > mmax)
4483 return const_true_rtx;
4491 /* Optimize integer comparisons with zero. */
4492 if (trueop1 == const0_rtx)
4494 /* Some addresses are known to be nonzero. We don't know
4495 their sign, but equality comparisons are known. */
4496 if (nonzero_address_p (trueop0))
4498 if (code == EQ || code == LEU)
4500 if (code == NE || code == GTU)
4501 return const_true_rtx;
4504 /* See if the first operand is an IOR with a constant. If so, we
4505 may be able to determine the result of this comparison. */
4506 if (GET_CODE (op0) == IOR)
4508 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4509 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4511 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4512 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4513 && (INTVAL (inner_const)
4514 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4523 return const_true_rtx;
4527 return const_true_rtx;
4541 /* Optimize comparison of ABS with zero. */
4542 if (trueop1 == CONST0_RTX (mode)
4543 && (GET_CODE (trueop0) == ABS
4544 || (GET_CODE (trueop0) == FLOAT_EXTEND
4545 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4550 /* Optimize abs(x) < 0.0. */
4551 if (!HONOR_SNANS (mode)
4552 && (!INTEGRAL_MODE_P (mode)
4553 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4555 if (INTEGRAL_MODE_P (mode)
4556 && (issue_strict_overflow_warning
4557 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4558 warning (OPT_Wstrict_overflow,
4559 ("assuming signed overflow does not occur when "
4560 "assuming abs (x) < 0 is false"));
4566 /* Optimize abs(x) >= 0.0. */
4567 if (!HONOR_NANS (mode)
4568 && (!INTEGRAL_MODE_P (mode)
4569 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4571 if (INTEGRAL_MODE_P (mode)
4572 && (issue_strict_overflow_warning
4573 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4574 warning (OPT_Wstrict_overflow,
4575 ("assuming signed overflow does not occur when "
4576 "assuming abs (x) >= 0 is true"));
4577 return const_true_rtx;
4582 /* Optimize ! (abs(x) < 0.0). */
4583 return const_true_rtx;
4593 /* Simplify CODE, an operation with result mode MODE and three operands,
4594 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4595 a constant. Return 0 if no simplifications is possible. */
4598 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4599 enum machine_mode op0_mode, rtx op0, rtx op1,
4602 unsigned int width = GET_MODE_BITSIZE (mode);
4604 /* VOIDmode means "infinite" precision. */
4606 width = HOST_BITS_PER_WIDE_INT;
4612 if (CONST_INT_P (op0)
4613 && CONST_INT_P (op1)
4614 && CONST_INT_P (op2)
4615 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4616 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4618 /* Extracting a bit-field from a constant */
4619 HOST_WIDE_INT val = INTVAL (op0);
4621 if (BITS_BIG_ENDIAN)
4622 val >>= (GET_MODE_BITSIZE (op0_mode)
4623 - INTVAL (op2) - INTVAL (op1));
4625 val >>= INTVAL (op2);
4627 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4629 /* First zero-extend. */
4630 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4631 /* If desired, propagate sign bit. */
4632 if (code == SIGN_EXTRACT
4633 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4634 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4637 /* Clear the bits that don't belong in our mode,
4638 unless they and our sign bit are all one.
4639 So we get either a reasonable negative value or a reasonable
4640 unsigned value for this mode. */
4641 if (width < HOST_BITS_PER_WIDE_INT
4642 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4643 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4644 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4646 return gen_int_mode (val, mode);
4651 if (CONST_INT_P (op0))
4652 return op0 != const0_rtx ? op1 : op2;
4654 /* Convert c ? a : a into "a". */
4655 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4658 /* Convert a != b ? a : b into "a". */
4659 if (GET_CODE (op0) == NE
4660 && ! side_effects_p (op0)
4661 && ! HONOR_NANS (mode)
4662 && ! HONOR_SIGNED_ZEROS (mode)
4663 && ((rtx_equal_p (XEXP (op0, 0), op1)
4664 && rtx_equal_p (XEXP (op0, 1), op2))
4665 || (rtx_equal_p (XEXP (op0, 0), op2)
4666 && rtx_equal_p (XEXP (op0, 1), op1))))
4669 /* Convert a == b ? a : b into "b". */
4670 if (GET_CODE (op0) == EQ
4671 && ! side_effects_p (op0)
4672 && ! HONOR_NANS (mode)
4673 && ! HONOR_SIGNED_ZEROS (mode)
4674 && ((rtx_equal_p (XEXP (op0, 0), op1)
4675 && rtx_equal_p (XEXP (op0, 1), op2))
4676 || (rtx_equal_p (XEXP (op0, 0), op2)
4677 && rtx_equal_p (XEXP (op0, 1), op1))))
4680 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4682 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4683 ? GET_MODE (XEXP (op0, 1))
4684 : GET_MODE (XEXP (op0, 0)));
4687 /* Look for happy constants in op1 and op2. */
4688 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4690 HOST_WIDE_INT t = INTVAL (op1);
4691 HOST_WIDE_INT f = INTVAL (op2);
4693 if (t == STORE_FLAG_VALUE && f == 0)
4694 code = GET_CODE (op0);
4695 else if (t == 0 && f == STORE_FLAG_VALUE)
4698 tmp = reversed_comparison_code (op0, NULL_RTX);
4706 return simplify_gen_relational (code, mode, cmp_mode,
4707 XEXP (op0, 0), XEXP (op0, 1));
4710 if (cmp_mode == VOIDmode)
4711 cmp_mode = op0_mode;
4712 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4713 cmp_mode, XEXP (op0, 0),
4716 /* See if any simplifications were possible. */
4719 if (CONST_INT_P (temp))
4720 return temp == const0_rtx ? op2 : op1;
4722 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4728 gcc_assert (GET_MODE (op0) == mode);
4729 gcc_assert (GET_MODE (op1) == mode);
4730 gcc_assert (VECTOR_MODE_P (mode));
4731 op2 = avoid_constant_pool_reference (op2);
4732 if (CONST_INT_P (op2))
4734 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4735 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4736 int mask = (1 << n_elts) - 1;
4738 if (!(INTVAL (op2) & mask))
4740 if ((INTVAL (op2) & mask) == mask)
4743 op0 = avoid_constant_pool_reference (op0);
4744 op1 = avoid_constant_pool_reference (op1);
4745 if (GET_CODE (op0) == CONST_VECTOR
4746 && GET_CODE (op1) == CONST_VECTOR)
4748 rtvec v = rtvec_alloc (n_elts);
4751 for (i = 0; i < n_elts; i++)
4752 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4753 ? CONST_VECTOR_ELT (op0, i)
4754 : CONST_VECTOR_ELT (op1, i));
4755 return gen_rtx_CONST_VECTOR (mode, v);
4767 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4769 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4771 Works by unpacking OP into a collection of 8-bit values
4772 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4773 and then repacking them again for OUTERMODE. */
4776 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4777 enum machine_mode innermode, unsigned int byte)
4779 /* We support up to 512-bit values (for V8DFmode). */
4783 value_mask = (1 << value_bit) - 1
4785 unsigned char value[max_bitsize / value_bit];
4794 rtvec result_v = NULL;
4795 enum mode_class outer_class;
4796 enum machine_mode outer_submode;
4798 /* Some ports misuse CCmode. */
4799 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4802 /* We have no way to represent a complex constant at the rtl level. */
4803 if (COMPLEX_MODE_P (outermode))
4806 /* Unpack the value. */
4808 if (GET_CODE (op) == CONST_VECTOR)
4810 num_elem = CONST_VECTOR_NUNITS (op);
4811 elems = &CONST_VECTOR_ELT (op, 0);
4812 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4818 elem_bitsize = max_bitsize;
4820 /* If this asserts, it is too complicated; reducing value_bit may help. */
4821 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4822 /* I don't know how to handle endianness of sub-units. */
4823 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4825 for (elem = 0; elem < num_elem; elem++)
4828 rtx el = elems[elem];
4830 /* Vectors are kept in target memory order. (This is probably
4833 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4834 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4836 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4837 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4838 unsigned bytele = (subword_byte % UNITS_PER_WORD
4839 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4840 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4843 switch (GET_CODE (el))
4847 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4849 *vp++ = INTVAL (el) >> i;
4850 /* CONST_INTs are always logically sign-extended. */
4851 for (; i < elem_bitsize; i += value_bit)
4852 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4856 if (GET_MODE (el) == VOIDmode)
4858 /* If this triggers, someone should have generated a
4859 CONST_INT instead. */
4860 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4862 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4863 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4864 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4867 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4870 /* It shouldn't matter what's done here, so fill it with
4872 for (; i < elem_bitsize; i += value_bit)
4877 long tmp[max_bitsize / 32];
4878 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4880 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4881 gcc_assert (bitsize <= elem_bitsize);
4882 gcc_assert (bitsize % value_bit == 0);
4884 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4887 /* real_to_target produces its result in words affected by
4888 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4889 and use WORDS_BIG_ENDIAN instead; see the documentation
4890 of SUBREG in rtl.texi. */
4891 for (i = 0; i < bitsize; i += value_bit)
4894 if (WORDS_BIG_ENDIAN)
4895 ibase = bitsize - 1 - i;
4898 *vp++ = tmp[ibase / 32] >> i % 32;
4901 /* It shouldn't matter what's done here, so fill it with
4903 for (; i < elem_bitsize; i += value_bit)
4909 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4911 for (i = 0; i < elem_bitsize; i += value_bit)
4912 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4916 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4917 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4918 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4920 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4921 >> (i - HOST_BITS_PER_WIDE_INT);
4922 for (; i < elem_bitsize; i += value_bit)
4932 /* Now, pick the right byte to start with. */
4933 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4934 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4935 will already have offset 0. */
4936 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4938 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4940 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4941 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4942 byte = (subword_byte % UNITS_PER_WORD
4943 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4946 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4947 so if it's become negative it will instead be very large.) */
4948 gcc_assert (byte < GET_MODE_SIZE (innermode));
4950 /* Convert from bytes to chunks of size value_bit. */
4951 value_start = byte * (BITS_PER_UNIT / value_bit);
4953 /* Re-pack the value. */
4955 if (VECTOR_MODE_P (outermode))
4957 num_elem = GET_MODE_NUNITS (outermode);
4958 result_v = rtvec_alloc (num_elem);
4959 elems = &RTVEC_ELT (result_v, 0);
4960 outer_submode = GET_MODE_INNER (outermode);
4966 outer_submode = outermode;
4969 outer_class = GET_MODE_CLASS (outer_submode);
4970 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4972 gcc_assert (elem_bitsize % value_bit == 0);
4973 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4975 for (elem = 0; elem < num_elem; elem++)
4979 /* Vectors are stored in target memory order. (This is probably
4982 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4983 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4985 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4986 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4987 unsigned bytele = (subword_byte % UNITS_PER_WORD
4988 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4989 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4992 switch (outer_class)
4995 case MODE_PARTIAL_INT:
4997 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5000 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5002 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5003 for (; i < elem_bitsize; i += value_bit)
5004 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5005 << (i - HOST_BITS_PER_WIDE_INT));
5007 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5009 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5010 elems[elem] = gen_int_mode (lo, outer_submode);
5011 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5012 elems[elem] = immed_double_const (lo, hi, outer_submode);
5019 case MODE_DECIMAL_FLOAT:
5022 long tmp[max_bitsize / 32];
5024 /* real_from_target wants its input in words affected by
5025 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5026 and use WORDS_BIG_ENDIAN instead; see the documentation
5027 of SUBREG in rtl.texi. */
5028 for (i = 0; i < max_bitsize / 32; i++)
5030 for (i = 0; i < elem_bitsize; i += value_bit)
5033 if (WORDS_BIG_ENDIAN)
5034 ibase = elem_bitsize - 1 - i;
5037 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5040 real_from_target (&r, tmp, outer_submode);
5041 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5053 f.mode = outer_submode;
5056 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5058 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5059 for (; i < elem_bitsize; i += value_bit)
5060 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5061 << (i - HOST_BITS_PER_WIDE_INT));
5063 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5071 if (VECTOR_MODE_P (outermode))
5072 return gen_rtx_CONST_VECTOR (outermode, result_v);
5077 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5078 Return 0 if no simplifications are possible. */
5080 simplify_subreg (enum machine_mode outermode, rtx op,
5081 enum machine_mode innermode, unsigned int byte)
5083 /* Little bit of sanity checking. */
5084 gcc_assert (innermode != VOIDmode);
5085 gcc_assert (outermode != VOIDmode);
5086 gcc_assert (innermode != BLKmode);
5087 gcc_assert (outermode != BLKmode);
5089 gcc_assert (GET_MODE (op) == innermode
5090 || GET_MODE (op) == VOIDmode);
5092 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5093 gcc_assert (byte < GET_MODE_SIZE (innermode));
5095 if (outermode == innermode && !byte)
5098 if (CONST_INT_P (op)
5099 || GET_CODE (op) == CONST_DOUBLE
5100 || GET_CODE (op) == CONST_FIXED
5101 || GET_CODE (op) == CONST_VECTOR)
5102 return simplify_immed_subreg (outermode, op, innermode, byte);
5104 /* Changing mode twice with SUBREG => just change it once,
5105 or not at all if changing back op starting mode. */
5106 if (GET_CODE (op) == SUBREG)
5108 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5109 int final_offset = byte + SUBREG_BYTE (op);
5112 if (outermode == innermostmode
5113 && byte == 0 && SUBREG_BYTE (op) == 0)
5114 return SUBREG_REG (op);
5116 /* The SUBREG_BYTE represents offset, as if the value were stored
5117 in memory. Irritating exception is paradoxical subreg, where
5118 we define SUBREG_BYTE to be 0. On big endian machines, this
5119 value should be negative. For a moment, undo this exception. */
5120 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5122 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5123 if (WORDS_BIG_ENDIAN)
5124 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5125 if (BYTES_BIG_ENDIAN)
5126 final_offset += difference % UNITS_PER_WORD;
5128 if (SUBREG_BYTE (op) == 0
5129 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5131 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5132 if (WORDS_BIG_ENDIAN)
5133 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5134 if (BYTES_BIG_ENDIAN)
5135 final_offset += difference % UNITS_PER_WORD;
5138 /* See whether resulting subreg will be paradoxical. */
5139 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5141 /* In nonparadoxical subregs we can't handle negative offsets. */
5142 if (final_offset < 0)
5144 /* Bail out in case resulting subreg would be incorrect. */
5145 if (final_offset % GET_MODE_SIZE (outermode)
5146 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5152 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5154 /* In paradoxical subreg, see if we are still looking on lower part.
5155 If so, our SUBREG_BYTE will be 0. */
5156 if (WORDS_BIG_ENDIAN)
5157 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5158 if (BYTES_BIG_ENDIAN)
5159 offset += difference % UNITS_PER_WORD;
5160 if (offset == final_offset)
5166 /* Recurse for further possible simplifications. */
5167 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5171 if (validate_subreg (outermode, innermostmode,
5172 SUBREG_REG (op), final_offset))
5174 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5175 if (SUBREG_PROMOTED_VAR_P (op)
5176 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5177 && GET_MODE_CLASS (outermode) == MODE_INT
5178 && IN_RANGE (GET_MODE_SIZE (outermode),
5179 GET_MODE_SIZE (innermode),
5180 GET_MODE_SIZE (innermostmode))
5181 && subreg_lowpart_p (newx))
5183 SUBREG_PROMOTED_VAR_P (newx) = 1;
5184 SUBREG_PROMOTED_UNSIGNED_SET
5185 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5192 /* Merge implicit and explicit truncations. */
5194 if (GET_CODE (op) == TRUNCATE
5195 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5196 && subreg_lowpart_offset (outermode, innermode) == byte)
5197 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5198 GET_MODE (XEXP (op, 0)));
5200 /* SUBREG of a hard register => just change the register number
5201 and/or mode. If the hard register is not valid in that mode,
5202 suppress this simplification. If the hard register is the stack,
5203 frame, or argument pointer, leave this as a SUBREG. */
5205 if (REG_P (op) && HARD_REGISTER_P (op))
5207 unsigned int regno, final_regno;
5210 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5211 if (HARD_REGISTER_NUM_P (final_regno))
5214 int final_offset = byte;
5216 /* Adjust offset for paradoxical subregs. */
5218 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5220 int difference = (GET_MODE_SIZE (innermode)
5221 - GET_MODE_SIZE (outermode));
5222 if (WORDS_BIG_ENDIAN)
5223 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5224 if (BYTES_BIG_ENDIAN)
5225 final_offset += difference % UNITS_PER_WORD;
5228 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5230 /* Propagate original regno. We don't have any way to specify
5231 the offset inside original regno, so do so only for lowpart.
5232 The information is used only by alias analysis that can not
5233 grog partial register anyway. */
5235 if (subreg_lowpart_offset (outermode, innermode) == byte)
5236 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5241 /* If we have a SUBREG of a register that we are replacing and we are
5242 replacing it with a MEM, make a new MEM and try replacing the
5243 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5244 or if we would be widening it. */
5247 && ! mode_dependent_address_p (XEXP (op, 0))
5248 /* Allow splitting of volatile memory references in case we don't
5249 have instruction to move the whole thing. */
5250 && (! MEM_VOLATILE_P (op)
5251 || ! have_insn_for (SET, innermode))
5252 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5253 return adjust_address_nv (op, outermode, byte);
5255 /* Handle complex values represented as CONCAT
5256 of real and imaginary part. */
5257 if (GET_CODE (op) == CONCAT)
5259 unsigned int part_size, final_offset;
5262 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5263 if (byte < part_size)
5265 part = XEXP (op, 0);
5266 final_offset = byte;
5270 part = XEXP (op, 1);
5271 final_offset = byte - part_size;
5274 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5277 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5280 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5281 return gen_rtx_SUBREG (outermode, part, final_offset);
5285 /* Optimize SUBREG truncations of zero and sign extended values. */
5286 if ((GET_CODE (op) == ZERO_EXTEND
5287 || GET_CODE (op) == SIGN_EXTEND)
5288 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5290 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5292 /* If we're requesting the lowpart of a zero or sign extension,
5293 there are three possibilities. If the outermode is the same
5294 as the origmode, we can omit both the extension and the subreg.
5295 If the outermode is not larger than the origmode, we can apply
5296 the truncation without the extension. Finally, if the outermode
5297 is larger than the origmode, but both are integer modes, we
5298 can just extend to the appropriate mode. */
5301 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5302 if (outermode == origmode)
5303 return XEXP (op, 0);
5304 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5305 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5306 subreg_lowpart_offset (outermode,
5308 if (SCALAR_INT_MODE_P (outermode))
5309 return simplify_gen_unary (GET_CODE (op), outermode,
5310 XEXP (op, 0), origmode);
5313 /* A SUBREG resulting from a zero extension may fold to zero if
5314 it extracts higher bits that the ZERO_EXTEND's source bits. */
5315 if (GET_CODE (op) == ZERO_EXTEND
5316 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5317 return CONST0_RTX (outermode);
5320 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5321 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5322 the outer subreg is effectively a truncation to the original mode. */
5323 if ((GET_CODE (op) == LSHIFTRT
5324 || GET_CODE (op) == ASHIFTRT)
5325 && SCALAR_INT_MODE_P (outermode)
5326 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5327 to avoid the possibility that an outer LSHIFTRT shifts by more
5328 than the sign extension's sign_bit_copies and introduces zeros
5329 into the high bits of the result. */
5330 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5331 && CONST_INT_P (XEXP (op, 1))
5332 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5333 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5334 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5335 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5336 return simplify_gen_binary (ASHIFTRT, outermode,
5337 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5339 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5340 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5341 the outer subreg is effectively a truncation to the original mode. */
5342 if ((GET_CODE (op) == LSHIFTRT
5343 || GET_CODE (op) == ASHIFTRT)
5344 && SCALAR_INT_MODE_P (outermode)
5345 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5346 && CONST_INT_P (XEXP (op, 1))
5347 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5348 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5349 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5350 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5351 return simplify_gen_binary (LSHIFTRT, outermode,
5352 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5354 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5355 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5356 the outer subreg is effectively a truncation to the original mode. */
5357 if (GET_CODE (op) == ASHIFT
5358 && SCALAR_INT_MODE_P (outermode)
5359 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5360 && CONST_INT_P (XEXP (op, 1))
5361 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5362 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5363 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5364 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5365 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5366 return simplify_gen_binary (ASHIFT, outermode,
5367 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5369 /* Recognize a word extraction from a multi-word subreg. */
5370 if ((GET_CODE (op) == LSHIFTRT
5371 || GET_CODE (op) == ASHIFTRT)
5372 && SCALAR_INT_MODE_P (outermode)
5373 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5374 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5375 && CONST_INT_P (XEXP (op, 1))
5376 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5377 && INTVAL (XEXP (op, 1)) >= 0
5378 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5379 && byte == subreg_lowpart_offset (outermode, innermode))
5381 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5382 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5384 ? byte - shifted_bytes
5385 : byte + shifted_bytes));
5391 /* Make a SUBREG operation or equivalent if it folds. */
5394 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5395 enum machine_mode innermode, unsigned int byte)
5399 newx = simplify_subreg (outermode, op, innermode, byte);
5403 if (GET_CODE (op) == SUBREG
5404 || GET_CODE (op) == CONCAT
5405 || GET_MODE (op) == VOIDmode)
5408 if (validate_subreg (outermode, innermode, op, byte))
5409 return gen_rtx_SUBREG (outermode, op, byte);
5414 /* Simplify X, an rtx expression.
5416 Return the simplified expression or NULL if no simplifications
5419 This is the preferred entry point into the simplification routines;
5420 however, we still allow passes to call the more specific routines.
5422 Right now GCC has three (yes, three) major bodies of RTL simplification
5423 code that need to be unified.
5425 1. fold_rtx in cse.c. This code uses various CSE specific
5426 information to aid in RTL simplification.
5428 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5429 it uses combine specific information to aid in RTL
5432 3. The routines in this file.
5435 Long term we want to only have one body of simplification code; to
5436 get to that state I recommend the following steps:
5438 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5439 which are not pass dependent state into these routines.
5441 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5442 use this routine whenever possible.
5444 3. Allow for pass dependent state to be provided to these
5445 routines and add simplifications based on the pass dependent
5446 state. Remove code from cse.c & combine.c that becomes
5449 It will take time, but ultimately the compiler will be easier to
5450 maintain and improve. It's totally silly that when we add a
5451 simplification that it needs to be added to 4 places (3 for RTL
5452 simplification and 1 for tree simplification. */
5455 simplify_rtx (const_rtx x)
5457 const enum rtx_code code = GET_CODE (x);
5458 const enum machine_mode mode = GET_MODE (x);
5460 switch (GET_RTX_CLASS (code))
5463 return simplify_unary_operation (code, mode,
5464 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5465 case RTX_COMM_ARITH:
5466 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5467 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5469 /* Fall through.... */
5472 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5475 case RTX_BITFIELD_OPS:
5476 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5477 XEXP (x, 0), XEXP (x, 1),
5481 case RTX_COMM_COMPARE:
5482 return simplify_relational_operation (code, mode,
5483 ((GET_MODE (XEXP (x, 0))
5485 ? GET_MODE (XEXP (x, 0))
5486 : GET_MODE (XEXP (x, 1))),
5492 return simplify_subreg (mode, SUBREG_REG (x),
5493 GET_MODE (SUBREG_REG (x)),
5500 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5501 if (GET_CODE (XEXP (x, 0)) == HIGH
5502 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))