1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
231 case ARRAY_RANGE_REF:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
250 offset += bitpos / BITS_PER_UNIT;
252 offset += TREE_INT_CST_LOW (toffset);
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
354 resulting RTX. Return a new RTX which is as simplified as possible. */
357 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
359 enum rtx_code code = GET_CODE (x);
360 enum machine_mode mode = GET_MODE (x);
361 enum machine_mode op_mode;
364 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
365 to build a new expression substituting recursively. If we can't do
366 anything, return our input. */
368 if (rtx_equal_p (x, old_rtx))
369 return copy_rtx (new_rtx);
371 switch (GET_RTX_CLASS (code))
375 op_mode = GET_MODE (op0);
376 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
377 if (op0 == XEXP (x, 0))
379 return simplify_gen_unary (code, mode, op0, op_mode);
383 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
384 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
385 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
387 return simplify_gen_binary (code, mode, op0, op1);
390 case RTX_COMM_COMPARE:
393 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
394 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
395 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
396 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
398 return simplify_gen_relational (code, mode, op_mode, op0, op1);
401 case RTX_BITFIELD_OPS:
403 op_mode = GET_MODE (op0);
404 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
405 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
406 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
407 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
409 if (op_mode == VOIDmode)
410 op_mode = GET_MODE (op0);
411 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
414 /* The only case we try to handle is a SUBREG. */
417 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
418 if (op0 == SUBREG_REG (x))
420 op0 = simplify_gen_subreg (GET_MODE (x), op0,
421 GET_MODE (SUBREG_REG (x)),
423 return op0 ? op0 : x;
430 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
431 if (op0 == XEXP (x, 0))
433 return replace_equiv_address_nv (x, op0);
435 else if (code == LO_SUM)
437 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
438 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
440 /* (lo_sum (high x) x) -> x */
441 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
444 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
446 return gen_rtx_LO_SUM (mode, op0, op1);
456 /* Try to simplify a unary operation CODE whose output mode is to be
457 MODE with input operand OP whose mode was originally OP_MODE.
458 Return zero if no simplification can be made. */
460 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
461 rtx op, enum machine_mode op_mode)
465 if (GET_CODE (op) == CONST)
468 trueop = avoid_constant_pool_reference (op);
470 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
474 return simplify_unary_operation_1 (code, mode, op);
477 /* Perform some simplifications we can do even if the operands
480 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
482 enum rtx_code reversed;
488 /* (not (not X)) == X. */
489 if (GET_CODE (op) == NOT)
492 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
493 comparison is all ones. */
494 if (COMPARISON_P (op)
495 && (mode == BImode || STORE_FLAG_VALUE == -1)
496 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
497 return simplify_gen_relational (reversed, mode, VOIDmode,
498 XEXP (op, 0), XEXP (op, 1));
500 /* (not (plus X -1)) can become (neg X). */
501 if (GET_CODE (op) == PLUS
502 && XEXP (op, 1) == constm1_rtx)
503 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
505 /* Similarly, (not (neg X)) is (plus X -1). */
506 if (GET_CODE (op) == NEG)
507 return plus_constant (XEXP (op, 0), -1);
509 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
510 if (GET_CODE (op) == XOR
511 && CONST_INT_P (XEXP (op, 1))
512 && (temp = simplify_unary_operation (NOT, mode,
513 XEXP (op, 1), mode)) != 0)
514 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
516 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
517 if (GET_CODE (op) == PLUS
518 && CONST_INT_P (XEXP (op, 1))
519 && mode_signbit_p (mode, XEXP (op, 1))
520 && (temp = simplify_unary_operation (NOT, mode,
521 XEXP (op, 1), mode)) != 0)
522 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
525 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
526 operands other than 1, but that is not valid. We could do a
527 similar simplification for (not (lshiftrt C X)) where C is
528 just the sign bit, but this doesn't seem common enough to
530 if (GET_CODE (op) == ASHIFT
531 && XEXP (op, 0) == const1_rtx)
533 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
534 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
537 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
538 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
539 so we can perform the above simplification. */
541 if (STORE_FLAG_VALUE == -1
542 && GET_CODE (op) == ASHIFTRT
543 && GET_CODE (XEXP (op, 1))
544 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
545 return simplify_gen_relational (GE, mode, VOIDmode,
546 XEXP (op, 0), const0_rtx);
549 if (GET_CODE (op) == SUBREG
550 && subreg_lowpart_p (op)
551 && (GET_MODE_SIZE (GET_MODE (op))
552 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
553 && GET_CODE (SUBREG_REG (op)) == ASHIFT
554 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
556 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
559 x = gen_rtx_ROTATE (inner_mode,
560 simplify_gen_unary (NOT, inner_mode, const1_rtx,
562 XEXP (SUBREG_REG (op), 1));
563 return rtl_hooks.gen_lowpart_no_emit (mode, x);
566 /* Apply De Morgan's laws to reduce number of patterns for machines
567 with negating logical insns (and-not, nand, etc.). If result has
568 only one NOT, put it first, since that is how the patterns are
571 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
573 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
574 enum machine_mode op_mode;
576 op_mode = GET_MODE (in1);
577 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
579 op_mode = GET_MODE (in2);
580 if (op_mode == VOIDmode)
582 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
584 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
587 in2 = in1; in1 = tem;
590 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
596 /* (neg (neg X)) == X. */
597 if (GET_CODE (op) == NEG)
600 /* (neg (plus X 1)) can become (not X). */
601 if (GET_CODE (op) == PLUS
602 && XEXP (op, 1) == const1_rtx)
603 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
605 /* Similarly, (neg (not X)) is (plus X 1). */
606 if (GET_CODE (op) == NOT)
607 return plus_constant (XEXP (op, 0), 1);
609 /* (neg (minus X Y)) can become (minus Y X). This transformation
610 isn't safe for modes with signed zeros, since if X and Y are
611 both +0, (minus Y X) is the same as (minus X Y). If the
612 rounding mode is towards +infinity (or -infinity) then the two
613 expressions will be rounded differently. */
614 if (GET_CODE (op) == MINUS
615 && !HONOR_SIGNED_ZEROS (mode)
616 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
617 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
619 if (GET_CODE (op) == PLUS
620 && !HONOR_SIGNED_ZEROS (mode)
621 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
623 /* (neg (plus A C)) is simplified to (minus -C A). */
624 if (CONST_INT_P (XEXP (op, 1))
625 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
627 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
629 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
632 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
633 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
634 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
637 /* (neg (mult A B)) becomes (mult (neg A) B).
638 This works even for floating-point values. */
639 if (GET_CODE (op) == MULT
640 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
642 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
643 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
646 /* NEG commutes with ASHIFT since it is multiplication. Only do
647 this if we can then eliminate the NEG (e.g., if the operand
649 if (GET_CODE (op) == ASHIFT)
651 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
653 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
656 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
657 C is equal to the width of MODE minus 1. */
658 if (GET_CODE (op) == ASHIFTRT
659 && CONST_INT_P (XEXP (op, 1))
660 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
661 return simplify_gen_binary (LSHIFTRT, mode,
662 XEXP (op, 0), XEXP (op, 1));
664 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
665 C is equal to the width of MODE minus 1. */
666 if (GET_CODE (op) == LSHIFTRT
667 && CONST_INT_P (XEXP (op, 1))
668 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
669 return simplify_gen_binary (ASHIFTRT, mode,
670 XEXP (op, 0), XEXP (op, 1));
672 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
673 if (GET_CODE (op) == XOR
674 && XEXP (op, 1) == const1_rtx
675 && nonzero_bits (XEXP (op, 0), mode) == 1)
676 return plus_constant (XEXP (op, 0), -1);
678 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
679 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
680 if (GET_CODE (op) == LT
681 && XEXP (op, 1) == const0_rtx
682 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
684 enum machine_mode inner = GET_MODE (XEXP (op, 0));
685 int isize = GET_MODE_BITSIZE (inner);
686 if (STORE_FLAG_VALUE == 1)
688 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
689 GEN_INT (isize - 1));
692 if (GET_MODE_BITSIZE (mode) > isize)
693 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
694 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
696 else if (STORE_FLAG_VALUE == -1)
698 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
699 GEN_INT (isize - 1));
702 if (GET_MODE_BITSIZE (mode) > isize)
703 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
704 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
710 /* We can't handle truncation to a partial integer mode here
711 because we don't know the real bitsize of the partial
713 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
716 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
717 if ((GET_CODE (op) == SIGN_EXTEND
718 || GET_CODE (op) == ZERO_EXTEND)
719 && GET_MODE (XEXP (op, 0)) == mode)
722 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
723 (OP:SI foo:SI) if OP is NEG or ABS. */
724 if ((GET_CODE (op) == ABS
725 || GET_CODE (op) == NEG)
726 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
727 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
728 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
729 return simplify_gen_unary (GET_CODE (op), mode,
730 XEXP (XEXP (op, 0), 0), mode);
732 /* (truncate:A (subreg:B (truncate:C X) 0)) is
734 if (GET_CODE (op) == SUBREG
735 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
736 && subreg_lowpart_p (op))
737 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
738 GET_MODE (XEXP (SUBREG_REG (op), 0)));
740 /* If we know that the value is already truncated, we can
741 replace the TRUNCATE with a SUBREG. Note that this is also
742 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
743 modes we just have to apply a different definition for
744 truncation. But don't do this for an (LSHIFTRT (MULT ...))
745 since this will cause problems with the umulXi3_highpart
747 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
748 GET_MODE_BITSIZE (GET_MODE (op)))
749 ? (num_sign_bit_copies (op, GET_MODE (op))
750 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
751 - GET_MODE_BITSIZE (mode)))
752 : truncated_to_mode (mode, op))
753 && ! (GET_CODE (op) == LSHIFTRT
754 && GET_CODE (XEXP (op, 0)) == MULT))
755 return rtl_hooks.gen_lowpart_no_emit (mode, op);
757 /* A truncate of a comparison can be replaced with a subreg if
758 STORE_FLAG_VALUE permits. This is like the previous test,
759 but it works even if the comparison is done in a mode larger
760 than HOST_BITS_PER_WIDE_INT. */
761 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
763 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
764 return rtl_hooks.gen_lowpart_no_emit (mode, op);
768 if (DECIMAL_FLOAT_MODE_P (mode))
771 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
772 if (GET_CODE (op) == FLOAT_EXTEND
773 && GET_MODE (XEXP (op, 0)) == mode)
776 /* (float_truncate:SF (float_truncate:DF foo:XF))
777 = (float_truncate:SF foo:XF).
778 This may eliminate double rounding, so it is unsafe.
780 (float_truncate:SF (float_extend:XF foo:DF))
781 = (float_truncate:SF foo:DF).
783 (float_truncate:DF (float_extend:XF foo:SF))
784 = (float_extend:SF foo:DF). */
785 if ((GET_CODE (op) == FLOAT_TRUNCATE
786 && flag_unsafe_math_optimizations)
787 || GET_CODE (op) == FLOAT_EXTEND)
788 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
790 > GET_MODE_SIZE (mode)
791 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
795 /* (float_truncate (float x)) is (float x) */
796 if (GET_CODE (op) == FLOAT
797 && (flag_unsafe_math_optimizations
798 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
799 && ((unsigned)significand_size (GET_MODE (op))
800 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
801 - num_sign_bit_copies (XEXP (op, 0),
802 GET_MODE (XEXP (op, 0))))))))
803 return simplify_gen_unary (FLOAT, mode,
805 GET_MODE (XEXP (op, 0)));
807 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
808 (OP:SF foo:SF) if OP is NEG or ABS. */
809 if ((GET_CODE (op) == ABS
810 || GET_CODE (op) == NEG)
811 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
812 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
813 return simplify_gen_unary (GET_CODE (op), mode,
814 XEXP (XEXP (op, 0), 0), mode);
816 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
817 is (float_truncate:SF x). */
818 if (GET_CODE (op) == SUBREG
819 && subreg_lowpart_p (op)
820 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
821 return SUBREG_REG (op);
825 if (DECIMAL_FLOAT_MODE_P (mode))
828 /* (float_extend (float_extend x)) is (float_extend x)
830 (float_extend (float x)) is (float x) assuming that double
831 rounding can't happen.
833 if (GET_CODE (op) == FLOAT_EXTEND
834 || (GET_CODE (op) == FLOAT
835 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
836 && ((unsigned)significand_size (GET_MODE (op))
837 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
838 - num_sign_bit_copies (XEXP (op, 0),
839 GET_MODE (XEXP (op, 0)))))))
840 return simplify_gen_unary (GET_CODE (op), mode,
842 GET_MODE (XEXP (op, 0)));
847 /* (abs (neg <foo>)) -> (abs <foo>) */
848 if (GET_CODE (op) == NEG)
849 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
850 GET_MODE (XEXP (op, 0)));
852 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
854 if (GET_MODE (op) == VOIDmode)
857 /* If operand is something known to be positive, ignore the ABS. */
858 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
859 || ((GET_MODE_BITSIZE (GET_MODE (op))
860 <= HOST_BITS_PER_WIDE_INT)
861 && ((nonzero_bits (op, GET_MODE (op))
863 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
867 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
868 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
869 return gen_rtx_NEG (mode, op);
874 /* (ffs (*_extend <X>)) = (ffs <X>) */
875 if (GET_CODE (op) == SIGN_EXTEND
876 || GET_CODE (op) == ZERO_EXTEND)
877 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
878 GET_MODE (XEXP (op, 0)));
882 switch (GET_CODE (op))
886 /* (popcount (zero_extend <X>)) = (popcount <X>) */
887 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
888 GET_MODE (XEXP (op, 0)));
892 /* Rotations don't affect popcount. */
893 if (!side_effects_p (XEXP (op, 1)))
894 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
895 GET_MODE (XEXP (op, 0)));
904 switch (GET_CODE (op))
910 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
911 GET_MODE (XEXP (op, 0)));
915 /* Rotations don't affect parity. */
916 if (!side_effects_p (XEXP (op, 1)))
917 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
918 GET_MODE (XEXP (op, 0)));
927 /* (bswap (bswap x)) -> x. */
928 if (GET_CODE (op) == BSWAP)
933 /* (float (sign_extend <X>)) = (float <X>). */
934 if (GET_CODE (op) == SIGN_EXTEND)
935 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
936 GET_MODE (XEXP (op, 0)));
940 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
941 becomes just the MINUS if its mode is MODE. This allows
942 folding switch statements on machines using casesi (such as
944 if (GET_CODE (op) == TRUNCATE
945 && GET_MODE (XEXP (op, 0)) == mode
946 && GET_CODE (XEXP (op, 0)) == MINUS
947 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
948 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
951 /* Check for a sign extension of a subreg of a promoted
952 variable, where the promotion is sign-extended, and the
953 target mode is the same as the variable's promotion. */
954 if (GET_CODE (op) == SUBREG
955 && SUBREG_PROMOTED_VAR_P (op)
956 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
957 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
958 return rtl_hooks.gen_lowpart_no_emit (mode, op);
960 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
961 if (! POINTERS_EXTEND_UNSIGNED
962 && mode == Pmode && GET_MODE (op) == ptr_mode
964 || (GET_CODE (op) == SUBREG
965 && REG_P (SUBREG_REG (op))
966 && REG_POINTER (SUBREG_REG (op))
967 && GET_MODE (SUBREG_REG (op)) == Pmode)))
968 return convert_memory_address (Pmode, op);
973 /* Check for a zero extension of a subreg of a promoted
974 variable, where the promotion is zero-extended, and the
975 target mode is the same as the variable's promotion. */
976 if (GET_CODE (op) == SUBREG
977 && SUBREG_PROMOTED_VAR_P (op)
978 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
979 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
980 return rtl_hooks.gen_lowpart_no_emit (mode, op);
982 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
983 if (POINTERS_EXTEND_UNSIGNED > 0
984 && mode == Pmode && GET_MODE (op) == ptr_mode
986 || (GET_CODE (op) == SUBREG
987 && REG_P (SUBREG_REG (op))
988 && REG_POINTER (SUBREG_REG (op))
989 && GET_MODE (SUBREG_REG (op)) == Pmode)))
990 return convert_memory_address (Pmode, op);
1001 /* Try to compute the value of a unary operation CODE whose output mode is to
1002 be MODE with input operand OP whose mode was originally OP_MODE.
1003 Return zero if the value cannot be computed. */
1005 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1006 rtx op, enum machine_mode op_mode)
1008 unsigned int width = GET_MODE_BITSIZE (mode);
1010 if (code == VEC_DUPLICATE)
1012 gcc_assert (VECTOR_MODE_P (mode));
1013 if (GET_MODE (op) != VOIDmode)
1015 if (!VECTOR_MODE_P (GET_MODE (op)))
1016 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1018 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1021 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1022 || GET_CODE (op) == CONST_VECTOR)
1024 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1025 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1026 rtvec v = rtvec_alloc (n_elts);
1029 if (GET_CODE (op) != CONST_VECTOR)
1030 for (i = 0; i < n_elts; i++)
1031 RTVEC_ELT (v, i) = op;
1034 enum machine_mode inmode = GET_MODE (op);
1035 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1036 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1038 gcc_assert (in_n_elts < n_elts);
1039 gcc_assert ((n_elts % in_n_elts) == 0);
1040 for (i = 0; i < n_elts; i++)
1041 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1043 return gen_rtx_CONST_VECTOR (mode, v);
1047 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1049 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1050 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1051 enum machine_mode opmode = GET_MODE (op);
1052 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1053 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1054 rtvec v = rtvec_alloc (n_elts);
1057 gcc_assert (op_n_elts == n_elts);
1058 for (i = 0; i < n_elts; i++)
1060 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1061 CONST_VECTOR_ELT (op, i),
1062 GET_MODE_INNER (opmode));
1065 RTVEC_ELT (v, i) = x;
1067 return gen_rtx_CONST_VECTOR (mode, v);
1070 /* The order of these tests is critical so that, for example, we don't
1071 check the wrong mode (input vs. output) for a conversion operation,
1072 such as FIX. At some point, this should be simplified. */
1074 if (code == FLOAT && GET_MODE (op) == VOIDmode
1075 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1077 HOST_WIDE_INT hv, lv;
1080 if (CONST_INT_P (op))
1081 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1083 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1085 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1086 d = real_value_truncate (mode, d);
1087 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1089 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1090 && (GET_CODE (op) == CONST_DOUBLE
1091 || CONST_INT_P (op)))
1093 HOST_WIDE_INT hv, lv;
1096 if (CONST_INT_P (op))
1097 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1099 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1101 if (op_mode == VOIDmode)
1103 /* We don't know how to interpret negative-looking numbers in
1104 this case, so don't try to fold those. */
1108 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1111 hv = 0, lv &= GET_MODE_MASK (op_mode);
1113 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1114 d = real_value_truncate (mode, d);
1115 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1118 if (CONST_INT_P (op)
1119 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1121 HOST_WIDE_INT arg0 = INTVAL (op);
1135 val = (arg0 >= 0 ? arg0 : - arg0);
1139 /* Don't use ffs here. Instead, get low order bit and then its
1140 number. If arg0 is zero, this will return 0, as desired. */
1141 arg0 &= GET_MODE_MASK (mode);
1142 val = exact_log2 (arg0 & (- arg0)) + 1;
1146 arg0 &= GET_MODE_MASK (mode);
1147 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1150 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1154 arg0 &= GET_MODE_MASK (mode);
1157 /* Even if the value at zero is undefined, we have to come
1158 up with some replacement. Seems good enough. */
1159 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1160 val = GET_MODE_BITSIZE (mode);
1163 val = exact_log2 (arg0 & -arg0);
1167 arg0 &= GET_MODE_MASK (mode);
1170 val++, arg0 &= arg0 - 1;
1174 arg0 &= GET_MODE_MASK (mode);
1177 val++, arg0 &= arg0 - 1;
1186 for (s = 0; s < width; s += 8)
1188 unsigned int d = width - s - 8;
1189 unsigned HOST_WIDE_INT byte;
1190 byte = (arg0 >> s) & 0xff;
1201 /* When zero-extending a CONST_INT, we need to know its
1203 gcc_assert (op_mode != VOIDmode);
1204 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1206 /* If we were really extending the mode,
1207 we would have to distinguish between zero-extension
1208 and sign-extension. */
1209 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1212 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1213 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1219 if (op_mode == VOIDmode)
1221 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1223 /* If we were really extending the mode,
1224 we would have to distinguish between zero-extension
1225 and sign-extension. */
1226 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1229 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1232 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1234 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1235 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1243 case FLOAT_TRUNCATE:
1254 return gen_int_mode (val, mode);
1257 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1258 for a DImode operation on a CONST_INT. */
1259 else if (GET_MODE (op) == VOIDmode
1260 && width <= HOST_BITS_PER_WIDE_INT * 2
1261 && (GET_CODE (op) == CONST_DOUBLE
1262 || CONST_INT_P (op)))
1264 unsigned HOST_WIDE_INT l1, lv;
1265 HOST_WIDE_INT h1, hv;
1267 if (GET_CODE (op) == CONST_DOUBLE)
1268 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1270 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1280 neg_double (l1, h1, &lv, &hv);
1285 neg_double (l1, h1, &lv, &hv);
1297 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1300 lv = exact_log2 (l1 & -l1) + 1;
1306 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1307 - HOST_BITS_PER_WIDE_INT;
1309 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1310 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1311 lv = GET_MODE_BITSIZE (mode);
1317 lv = exact_log2 (l1 & -l1);
1319 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1320 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1321 lv = GET_MODE_BITSIZE (mode);
1349 for (s = 0; s < width; s += 8)
1351 unsigned int d = width - s - 8;
1352 unsigned HOST_WIDE_INT byte;
1354 if (s < HOST_BITS_PER_WIDE_INT)
1355 byte = (l1 >> s) & 0xff;
1357 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1359 if (d < HOST_BITS_PER_WIDE_INT)
1362 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1368 /* This is just a change-of-mode, so do nothing. */
1373 gcc_assert (op_mode != VOIDmode);
1375 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1379 lv = l1 & GET_MODE_MASK (op_mode);
1383 if (op_mode == VOIDmode
1384 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1388 lv = l1 & GET_MODE_MASK (op_mode);
1389 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1390 && (lv & ((HOST_WIDE_INT) 1
1391 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1392 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1394 hv = HWI_SIGN_EXTEND (lv);
1405 return immed_double_const (lv, hv, mode);
1408 else if (GET_CODE (op) == CONST_DOUBLE
1409 && SCALAR_FLOAT_MODE_P (mode))
1411 REAL_VALUE_TYPE d, t;
1412 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1417 if (HONOR_SNANS (mode) && real_isnan (&d))
1419 real_sqrt (&t, mode, &d);
1423 d = REAL_VALUE_ABS (d);
1426 d = REAL_VALUE_NEGATE (d);
1428 case FLOAT_TRUNCATE:
1429 d = real_value_truncate (mode, d);
1432 /* All this does is change the mode. */
1435 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1442 real_to_target (tmp, &d, GET_MODE (op));
1443 for (i = 0; i < 4; i++)
1445 real_from_target (&d, tmp, mode);
1451 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1454 else if (GET_CODE (op) == CONST_DOUBLE
1455 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1456 && GET_MODE_CLASS (mode) == MODE_INT
1457 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1459 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1460 operators are intentionally left unspecified (to ease implementation
1461 by target backends), for consistency, this routine implements the
1462 same semantics for constant folding as used by the middle-end. */
1464 /* This was formerly used only for non-IEEE float.
1465 eggert@twinsun.com says it is safe for IEEE also. */
1466 HOST_WIDE_INT xh, xl, th, tl;
1467 REAL_VALUE_TYPE x, t;
1468 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1472 if (REAL_VALUE_ISNAN (x))
1475 /* Test against the signed upper bound. */
1476 if (width > HOST_BITS_PER_WIDE_INT)
1478 th = ((unsigned HOST_WIDE_INT) 1
1479 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1485 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1487 real_from_integer (&t, VOIDmode, tl, th, 0);
1488 if (REAL_VALUES_LESS (t, x))
1495 /* Test against the signed lower bound. */
1496 if (width > HOST_BITS_PER_WIDE_INT)
1498 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1504 tl = (HOST_WIDE_INT) -1 << (width - 1);
1506 real_from_integer (&t, VOIDmode, tl, th, 0);
1507 if (REAL_VALUES_LESS (x, t))
1513 REAL_VALUE_TO_INT (&xl, &xh, x);
1517 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1520 /* Test against the unsigned upper bound. */
1521 if (width == 2*HOST_BITS_PER_WIDE_INT)
1526 else if (width >= HOST_BITS_PER_WIDE_INT)
1528 th = ((unsigned HOST_WIDE_INT) 1
1529 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1535 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1537 real_from_integer (&t, VOIDmode, tl, th, 1);
1538 if (REAL_VALUES_LESS (t, x))
1545 REAL_VALUE_TO_INT (&xl, &xh, x);
1551 return immed_double_const (xl, xh, mode);
1557 /* Subroutine of simplify_binary_operation to simplify a commutative,
1558 associative binary operation CODE with result mode MODE, operating
1559 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1560 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1561 canonicalization is possible. */
1564 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1569 /* Linearize the operator to the left. */
1570 if (GET_CODE (op1) == code)
1572 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1573 if (GET_CODE (op0) == code)
1575 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1576 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1579 /* "a op (b op c)" becomes "(b op c) op a". */
1580 if (! swap_commutative_operands_p (op1, op0))
1581 return simplify_gen_binary (code, mode, op1, op0);
1588 if (GET_CODE (op0) == code)
1590 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1591 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1593 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1594 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1597 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1598 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1600 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1602 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1603 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1605 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1612 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1613 and OP1. Return 0 if no simplification is possible.
1615 Don't use this for relational operations such as EQ or LT.
1616 Use simplify_relational_operation instead. */
1618 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1621 rtx trueop0, trueop1;
1624 /* Relational operations don't work here. We must know the mode
1625 of the operands in order to do the comparison correctly.
1626 Assuming a full word can give incorrect results.
1627 Consider comparing 128 with -128 in QImode. */
1628 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1629 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1631 /* Make sure the constant is second. */
1632 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1633 && swap_commutative_operands_p (op0, op1))
1635 tem = op0, op0 = op1, op1 = tem;
1638 trueop0 = avoid_constant_pool_reference (op0);
1639 trueop1 = avoid_constant_pool_reference (op1);
1641 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1644 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1647 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1648 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1649 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1650 actual constants. */
1653 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1654 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1656 rtx tem, reversed, opleft, opright;
1658 unsigned int width = GET_MODE_BITSIZE (mode);
1660 /* Even if we can't compute a constant result,
1661 there are some cases worth simplifying. */
1666 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1667 when x is NaN, infinite, or finite and nonzero. They aren't
1668 when x is -0 and the rounding mode is not towards -infinity,
1669 since (-0) + 0 is then 0. */
1670 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1673 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1674 transformations are safe even for IEEE. */
1675 if (GET_CODE (op0) == NEG)
1676 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1677 else if (GET_CODE (op1) == NEG)
1678 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1680 /* (~a) + 1 -> -a */
1681 if (INTEGRAL_MODE_P (mode)
1682 && GET_CODE (op0) == NOT
1683 && trueop1 == const1_rtx)
1684 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1686 /* Handle both-operands-constant cases. We can only add
1687 CONST_INTs to constants since the sum of relocatable symbols
1688 can't be handled by most assemblers. Don't add CONST_INT
1689 to CONST_INT since overflow won't be computed properly if wider
1690 than HOST_BITS_PER_WIDE_INT. */
1692 if ((GET_CODE (op0) == CONST
1693 || GET_CODE (op0) == SYMBOL_REF
1694 || GET_CODE (op0) == LABEL_REF)
1695 && CONST_INT_P (op1))
1696 return plus_constant (op0, INTVAL (op1));
1697 else if ((GET_CODE (op1) == CONST
1698 || GET_CODE (op1) == SYMBOL_REF
1699 || GET_CODE (op1) == LABEL_REF)
1700 && CONST_INT_P (op0))
1701 return plus_constant (op1, INTVAL (op0));
1703 /* See if this is something like X * C - X or vice versa or
1704 if the multiplication is written as a shift. If so, we can
1705 distribute and make a new multiply, shift, or maybe just
1706 have X (if C is 2 in the example above). But don't make
1707 something more expensive than we had before. */
1709 if (SCALAR_INT_MODE_P (mode))
1711 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1712 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1713 rtx lhs = op0, rhs = op1;
1715 if (GET_CODE (lhs) == NEG)
1719 lhs = XEXP (lhs, 0);
1721 else if (GET_CODE (lhs) == MULT
1722 && CONST_INT_P (XEXP (lhs, 1)))
1724 coeff0l = INTVAL (XEXP (lhs, 1));
1725 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1726 lhs = XEXP (lhs, 0);
1728 else if (GET_CODE (lhs) == ASHIFT
1729 && CONST_INT_P (XEXP (lhs, 1))
1730 && INTVAL (XEXP (lhs, 1)) >= 0
1731 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1733 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1735 lhs = XEXP (lhs, 0);
1738 if (GET_CODE (rhs) == NEG)
1742 rhs = XEXP (rhs, 0);
1744 else if (GET_CODE (rhs) == MULT
1745 && CONST_INT_P (XEXP (rhs, 1)))
1747 coeff1l = INTVAL (XEXP (rhs, 1));
1748 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1749 rhs = XEXP (rhs, 0);
1751 else if (GET_CODE (rhs) == ASHIFT
1752 && CONST_INT_P (XEXP (rhs, 1))
1753 && INTVAL (XEXP (rhs, 1)) >= 0
1754 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1756 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1758 rhs = XEXP (rhs, 0);
1761 if (rtx_equal_p (lhs, rhs))
1763 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1765 unsigned HOST_WIDE_INT l;
1767 bool speed = optimize_function_for_speed_p (cfun);
1769 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1770 coeff = immed_double_const (l, h, mode);
1772 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1773 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1778 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1779 if ((CONST_INT_P (op1)
1780 || GET_CODE (op1) == CONST_DOUBLE)
1781 && GET_CODE (op0) == XOR
1782 && (CONST_INT_P (XEXP (op0, 1))
1783 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1784 && mode_signbit_p (mode, op1))
1785 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1786 simplify_gen_binary (XOR, mode, op1,
1789 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1790 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1791 && GET_CODE (op0) == MULT
1792 && GET_CODE (XEXP (op0, 0)) == NEG)
1796 in1 = XEXP (XEXP (op0, 0), 0);
1797 in2 = XEXP (op0, 1);
1798 return simplify_gen_binary (MINUS, mode, op1,
1799 simplify_gen_binary (MULT, mode,
1803 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1804 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1806 if (COMPARISON_P (op0)
1807 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1808 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1809 && (reversed = reversed_comparison (op0, mode)))
1811 simplify_gen_unary (NEG, mode, reversed, mode);
1813 /* If one of the operands is a PLUS or a MINUS, see if we can
1814 simplify this by the associative law.
1815 Don't use the associative law for floating point.
1816 The inaccuracy makes it nonassociative,
1817 and subtle programs can break if operations are associated. */
1819 if (INTEGRAL_MODE_P (mode)
1820 && (plus_minus_operand_p (op0)
1821 || plus_minus_operand_p (op1))
1822 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1825 /* Reassociate floating point addition only when the user
1826 specifies associative math operations. */
1827 if (FLOAT_MODE_P (mode)
1828 && flag_associative_math)
1830 tem = simplify_associative_operation (code, mode, op0, op1);
1837 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1838 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1839 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1840 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1842 rtx xop00 = XEXP (op0, 0);
1843 rtx xop10 = XEXP (op1, 0);
1846 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1848 if (REG_P (xop00) && REG_P (xop10)
1849 && GET_MODE (xop00) == GET_MODE (xop10)
1850 && REGNO (xop00) == REGNO (xop10)
1851 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1852 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1859 /* We can't assume x-x is 0 even with non-IEEE floating point,
1860 but since it is zero except in very strange circumstances, we
1861 will treat it as zero with -ffinite-math-only. */
1862 if (rtx_equal_p (trueop0, trueop1)
1863 && ! side_effects_p (op0)
1864 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1865 return CONST0_RTX (mode);
1867 /* Change subtraction from zero into negation. (0 - x) is the
1868 same as -x when x is NaN, infinite, or finite and nonzero.
1869 But if the mode has signed zeros, and does not round towards
1870 -infinity, then 0 - 0 is 0, not -0. */
1871 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1872 return simplify_gen_unary (NEG, mode, op1, mode);
1874 /* (-1 - a) is ~a. */
1875 if (trueop0 == constm1_rtx)
1876 return simplify_gen_unary (NOT, mode, op1, mode);
1878 /* Subtracting 0 has no effect unless the mode has signed zeros
1879 and supports rounding towards -infinity. In such a case,
1881 if (!(HONOR_SIGNED_ZEROS (mode)
1882 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1883 && trueop1 == CONST0_RTX (mode))
1886 /* See if this is something like X * C - X or vice versa or
1887 if the multiplication is written as a shift. If so, we can
1888 distribute and make a new multiply, shift, or maybe just
1889 have X (if C is 2 in the example above). But don't make
1890 something more expensive than we had before. */
1892 if (SCALAR_INT_MODE_P (mode))
1894 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1895 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1896 rtx lhs = op0, rhs = op1;
1898 if (GET_CODE (lhs) == NEG)
1902 lhs = XEXP (lhs, 0);
1904 else if (GET_CODE (lhs) == MULT
1905 && CONST_INT_P (XEXP (lhs, 1)))
1907 coeff0l = INTVAL (XEXP (lhs, 1));
1908 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1909 lhs = XEXP (lhs, 0);
1911 else if (GET_CODE (lhs) == ASHIFT
1912 && CONST_INT_P (XEXP (lhs, 1))
1913 && INTVAL (XEXP (lhs, 1)) >= 0
1914 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1916 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1918 lhs = XEXP (lhs, 0);
1921 if (GET_CODE (rhs) == NEG)
1925 rhs = XEXP (rhs, 0);
1927 else if (GET_CODE (rhs) == MULT
1928 && CONST_INT_P (XEXP (rhs, 1)))
1930 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1931 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1932 rhs = XEXP (rhs, 0);
1934 else if (GET_CODE (rhs) == ASHIFT
1935 && CONST_INT_P (XEXP (rhs, 1))
1936 && INTVAL (XEXP (rhs, 1)) >= 0
1937 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1939 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1941 rhs = XEXP (rhs, 0);
1944 if (rtx_equal_p (lhs, rhs))
1946 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1948 unsigned HOST_WIDE_INT l;
1950 bool speed = optimize_function_for_speed_p (cfun);
1952 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1953 coeff = immed_double_const (l, h, mode);
1955 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1956 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1961 /* (a - (-b)) -> (a + b). True even for IEEE. */
1962 if (GET_CODE (op1) == NEG)
1963 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1965 /* (-x - c) may be simplified as (-c - x). */
1966 if (GET_CODE (op0) == NEG
1967 && (CONST_INT_P (op1)
1968 || GET_CODE (op1) == CONST_DOUBLE))
1970 tem = simplify_unary_operation (NEG, mode, op1, mode);
1972 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1975 /* Don't let a relocatable value get a negative coeff. */
1976 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
1977 return simplify_gen_binary (PLUS, mode,
1979 neg_const_int (mode, op1));
1981 /* (x - (x & y)) -> (x & ~y) */
1982 if (GET_CODE (op1) == AND)
1984 if (rtx_equal_p (op0, XEXP (op1, 0)))
1986 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1987 GET_MODE (XEXP (op1, 1)));
1988 return simplify_gen_binary (AND, mode, op0, tem);
1990 if (rtx_equal_p (op0, XEXP (op1, 1)))
1992 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1993 GET_MODE (XEXP (op1, 0)));
1994 return simplify_gen_binary (AND, mode, op0, tem);
1998 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1999 by reversing the comparison code if valid. */
2000 if (STORE_FLAG_VALUE == 1
2001 && trueop0 == const1_rtx
2002 && COMPARISON_P (op1)
2003 && (reversed = reversed_comparison (op1, mode)))
2006 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2007 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2008 && GET_CODE (op1) == MULT
2009 && GET_CODE (XEXP (op1, 0)) == NEG)
2013 in1 = XEXP (XEXP (op1, 0), 0);
2014 in2 = XEXP (op1, 1);
2015 return simplify_gen_binary (PLUS, mode,
2016 simplify_gen_binary (MULT, mode,
2021 /* Canonicalize (minus (neg A) (mult B C)) to
2022 (minus (mult (neg B) C) A). */
2023 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2024 && GET_CODE (op1) == MULT
2025 && GET_CODE (op0) == NEG)
2029 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2030 in2 = XEXP (op1, 1);
2031 return simplify_gen_binary (MINUS, mode,
2032 simplify_gen_binary (MULT, mode,
2037 /* If one of the operands is a PLUS or a MINUS, see if we can
2038 simplify this by the associative law. This will, for example,
2039 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2040 Don't use the associative law for floating point.
2041 The inaccuracy makes it nonassociative,
2042 and subtle programs can break if operations are associated. */
2044 if (INTEGRAL_MODE_P (mode)
2045 && (plus_minus_operand_p (op0)
2046 || plus_minus_operand_p (op1))
2047 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2052 if (trueop1 == constm1_rtx)
2053 return simplify_gen_unary (NEG, mode, op0, mode);
2055 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2056 x is NaN, since x * 0 is then also NaN. Nor is it valid
2057 when the mode has signed zeros, since multiplying a negative
2058 number by 0 will give -0, not 0. */
2059 if (!HONOR_NANS (mode)
2060 && !HONOR_SIGNED_ZEROS (mode)
2061 && trueop1 == CONST0_RTX (mode)
2062 && ! side_effects_p (op0))
2065 /* In IEEE floating point, x*1 is not equivalent to x for
2067 if (!HONOR_SNANS (mode)
2068 && trueop1 == CONST1_RTX (mode))
2071 /* Convert multiply by constant power of two into shift unless
2072 we are still generating RTL. This test is a kludge. */
2073 if (CONST_INT_P (trueop1)
2074 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2075 /* If the mode is larger than the host word size, and the
2076 uppermost bit is set, then this isn't a power of two due
2077 to implicit sign extension. */
2078 && (width <= HOST_BITS_PER_WIDE_INT
2079 || val != HOST_BITS_PER_WIDE_INT - 1))
2080 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2082 /* Likewise for multipliers wider than a word. */
2083 if (GET_CODE (trueop1) == CONST_DOUBLE
2084 && (GET_MODE (trueop1) == VOIDmode
2085 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2086 && GET_MODE (op0) == mode
2087 && CONST_DOUBLE_LOW (trueop1) == 0
2088 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2089 return simplify_gen_binary (ASHIFT, mode, op0,
2090 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2092 /* x*2 is x+x and x*(-1) is -x */
2093 if (GET_CODE (trueop1) == CONST_DOUBLE
2094 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2095 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2096 && GET_MODE (op0) == mode)
2099 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2101 if (REAL_VALUES_EQUAL (d, dconst2))
2102 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2104 if (!HONOR_SNANS (mode)
2105 && REAL_VALUES_EQUAL (d, dconstm1))
2106 return simplify_gen_unary (NEG, mode, op0, mode);
2109 /* Optimize -x * -x as x * x. */
2110 if (FLOAT_MODE_P (mode)
2111 && GET_CODE (op0) == NEG
2112 && GET_CODE (op1) == NEG
2113 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2114 && !side_effects_p (XEXP (op0, 0)))
2115 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2117 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2118 if (SCALAR_FLOAT_MODE_P (mode)
2119 && GET_CODE (op0) == ABS
2120 && GET_CODE (op1) == ABS
2121 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2122 && !side_effects_p (XEXP (op0, 0)))
2123 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2125 /* Reassociate multiplication, but for floating point MULTs
2126 only when the user specifies unsafe math optimizations. */
2127 if (! FLOAT_MODE_P (mode)
2128 || flag_unsafe_math_optimizations)
2130 tem = simplify_associative_operation (code, mode, op0, op1);
2137 if (trueop1 == const0_rtx)
2139 if (CONST_INT_P (trueop1)
2140 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2141 == GET_MODE_MASK (mode)))
2143 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2145 /* A | (~A) -> -1 */
2146 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2147 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2148 && ! side_effects_p (op0)
2149 && SCALAR_INT_MODE_P (mode))
2152 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2153 if (CONST_INT_P (op1)
2154 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2155 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2158 /* Canonicalize (X & C1) | C2. */
2159 if (GET_CODE (op0) == AND
2160 && CONST_INT_P (trueop1)
2161 && CONST_INT_P (XEXP (op0, 1)))
2163 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2164 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2165 HOST_WIDE_INT c2 = INTVAL (trueop1);
2167 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2169 && !side_effects_p (XEXP (op0, 0)))
2172 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2173 if (((c1|c2) & mask) == mask)
2174 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2176 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2177 if (((c1 & ~c2) & mask) != (c1 & mask))
2179 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2180 gen_int_mode (c1 & ~c2, mode));
2181 return simplify_gen_binary (IOR, mode, tem, op1);
2185 /* Convert (A & B) | A to A. */
2186 if (GET_CODE (op0) == AND
2187 && (rtx_equal_p (XEXP (op0, 0), op1)
2188 || rtx_equal_p (XEXP (op0, 1), op1))
2189 && ! side_effects_p (XEXP (op0, 0))
2190 && ! side_effects_p (XEXP (op0, 1)))
2193 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2194 mode size to (rotate A CX). */
2196 if (GET_CODE (op1) == ASHIFT
2197 || GET_CODE (op1) == SUBREG)
2208 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2209 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2210 && CONST_INT_P (XEXP (opleft, 1))
2211 && CONST_INT_P (XEXP (opright, 1))
2212 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2213 == GET_MODE_BITSIZE (mode)))
2214 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2216 /* Same, but for ashift that has been "simplified" to a wider mode
2217 by simplify_shift_const. */
2219 if (GET_CODE (opleft) == SUBREG
2220 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2221 && GET_CODE (opright) == LSHIFTRT
2222 && GET_CODE (XEXP (opright, 0)) == SUBREG
2223 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2224 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2225 && (GET_MODE_SIZE (GET_MODE (opleft))
2226 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2227 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2228 SUBREG_REG (XEXP (opright, 0)))
2229 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2230 && CONST_INT_P (XEXP (opright, 1))
2231 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2232 == GET_MODE_BITSIZE (mode)))
2233 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2234 XEXP (SUBREG_REG (opleft), 1));
2236 /* If we have (ior (and (X C1) C2)), simplify this by making
2237 C1 as small as possible if C1 actually changes. */
2238 if (CONST_INT_P (op1)
2239 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2240 || INTVAL (op1) > 0)
2241 && GET_CODE (op0) == AND
2242 && CONST_INT_P (XEXP (op0, 1))
2243 && CONST_INT_P (op1)
2244 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2245 return simplify_gen_binary (IOR, mode,
2247 (AND, mode, XEXP (op0, 0),
2248 GEN_INT (INTVAL (XEXP (op0, 1))
2252 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2253 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2254 the PLUS does not affect any of the bits in OP1: then we can do
2255 the IOR as a PLUS and we can associate. This is valid if OP1
2256 can be safely shifted left C bits. */
2257 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2258 && GET_CODE (XEXP (op0, 0)) == PLUS
2259 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2260 && CONST_INT_P (XEXP (op0, 1))
2261 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2263 int count = INTVAL (XEXP (op0, 1));
2264 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2266 if (mask >> count == INTVAL (trueop1)
2267 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2268 return simplify_gen_binary (ASHIFTRT, mode,
2269 plus_constant (XEXP (op0, 0), mask),
2273 tem = simplify_associative_operation (code, mode, op0, op1);
2279 if (trueop1 == const0_rtx)
2281 if (CONST_INT_P (trueop1)
2282 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2283 == GET_MODE_MASK (mode)))
2284 return simplify_gen_unary (NOT, mode, op0, mode);
2285 if (rtx_equal_p (trueop0, trueop1)
2286 && ! side_effects_p (op0)
2287 && GET_MODE_CLASS (mode) != MODE_CC)
2288 return CONST0_RTX (mode);
2290 /* Canonicalize XOR of the most significant bit to PLUS. */
2291 if ((CONST_INT_P (op1)
2292 || GET_CODE (op1) == CONST_DOUBLE)
2293 && mode_signbit_p (mode, op1))
2294 return simplify_gen_binary (PLUS, mode, op0, op1);
2295 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2296 if ((CONST_INT_P (op1)
2297 || GET_CODE (op1) == CONST_DOUBLE)
2298 && GET_CODE (op0) == PLUS
2299 && (CONST_INT_P (XEXP (op0, 1))
2300 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2301 && mode_signbit_p (mode, XEXP (op0, 1)))
2302 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2303 simplify_gen_binary (XOR, mode, op1,
2306 /* If we are XORing two things that have no bits in common,
2307 convert them into an IOR. This helps to detect rotation encoded
2308 using those methods and possibly other simplifications. */
2310 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2311 && (nonzero_bits (op0, mode)
2312 & nonzero_bits (op1, mode)) == 0)
2313 return (simplify_gen_binary (IOR, mode, op0, op1));
2315 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2316 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2319 int num_negated = 0;
2321 if (GET_CODE (op0) == NOT)
2322 num_negated++, op0 = XEXP (op0, 0);
2323 if (GET_CODE (op1) == NOT)
2324 num_negated++, op1 = XEXP (op1, 0);
2326 if (num_negated == 2)
2327 return simplify_gen_binary (XOR, mode, op0, op1);
2328 else if (num_negated == 1)
2329 return simplify_gen_unary (NOT, mode,
2330 simplify_gen_binary (XOR, mode, op0, op1),
2334 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2335 correspond to a machine insn or result in further simplifications
2336 if B is a constant. */
2338 if (GET_CODE (op0) == AND
2339 && rtx_equal_p (XEXP (op0, 1), op1)
2340 && ! side_effects_p (op1))
2341 return simplify_gen_binary (AND, mode,
2342 simplify_gen_unary (NOT, mode,
2343 XEXP (op0, 0), mode),
2346 else if (GET_CODE (op0) == AND
2347 && rtx_equal_p (XEXP (op0, 0), op1)
2348 && ! side_effects_p (op1))
2349 return simplify_gen_binary (AND, mode,
2350 simplify_gen_unary (NOT, mode,
2351 XEXP (op0, 1), mode),
2354 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2355 comparison if STORE_FLAG_VALUE is 1. */
2356 if (STORE_FLAG_VALUE == 1
2357 && trueop1 == const1_rtx
2358 && COMPARISON_P (op0)
2359 && (reversed = reversed_comparison (op0, mode)))
2362 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2363 is (lt foo (const_int 0)), so we can perform the above
2364 simplification if STORE_FLAG_VALUE is 1. */
2366 if (STORE_FLAG_VALUE == 1
2367 && trueop1 == const1_rtx
2368 && GET_CODE (op0) == LSHIFTRT
2369 && CONST_INT_P (XEXP (op0, 1))
2370 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2371 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2373 /* (xor (comparison foo bar) (const_int sign-bit))
2374 when STORE_FLAG_VALUE is the sign bit. */
2375 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2376 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2377 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2378 && trueop1 == const_true_rtx
2379 && COMPARISON_P (op0)
2380 && (reversed = reversed_comparison (op0, mode)))
2383 tem = simplify_associative_operation (code, mode, op0, op1);
2389 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2391 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2393 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2394 HOST_WIDE_INT nzop1;
2395 if (CONST_INT_P (trueop1))
2397 HOST_WIDE_INT val1 = INTVAL (trueop1);
2398 /* If we are turning off bits already known off in OP0, we need
2400 if ((nzop0 & ~val1) == 0)
2403 nzop1 = nonzero_bits (trueop1, mode);
2404 /* If we are clearing all the nonzero bits, the result is zero. */
2405 if ((nzop1 & nzop0) == 0
2406 && !side_effects_p (op0) && !side_effects_p (op1))
2407 return CONST0_RTX (mode);
2409 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2410 && GET_MODE_CLASS (mode) != MODE_CC)
2413 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2414 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2415 && ! side_effects_p (op0)
2416 && GET_MODE_CLASS (mode) != MODE_CC)
2417 return CONST0_RTX (mode);
2419 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2420 there are no nonzero bits of C outside of X's mode. */
2421 if ((GET_CODE (op0) == SIGN_EXTEND
2422 || GET_CODE (op0) == ZERO_EXTEND)
2423 && CONST_INT_P (trueop1)
2424 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2425 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2426 & INTVAL (trueop1)) == 0)
2428 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2429 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2430 gen_int_mode (INTVAL (trueop1),
2432 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2435 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2436 we might be able to further simplify the AND with X and potentially
2437 remove the truncation altogether. */
2438 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2440 rtx x = XEXP (op0, 0);
2441 enum machine_mode xmode = GET_MODE (x);
2442 tem = simplify_gen_binary (AND, xmode, x,
2443 gen_int_mode (INTVAL (trueop1), xmode));
2444 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2447 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2448 if (GET_CODE (op0) == IOR
2449 && CONST_INT_P (trueop1)
2450 && CONST_INT_P (XEXP (op0, 1)))
2452 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2453 return simplify_gen_binary (IOR, mode,
2454 simplify_gen_binary (AND, mode,
2455 XEXP (op0, 0), op1),
2456 gen_int_mode (tmp, mode));
2459 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2460 insn (and may simplify more). */
2461 if (GET_CODE (op0) == XOR
2462 && rtx_equal_p (XEXP (op0, 0), op1)
2463 && ! side_effects_p (op1))
2464 return simplify_gen_binary (AND, mode,
2465 simplify_gen_unary (NOT, mode,
2466 XEXP (op0, 1), mode),
2469 if (GET_CODE (op0) == XOR
2470 && rtx_equal_p (XEXP (op0, 1), op1)
2471 && ! side_effects_p (op1))
2472 return simplify_gen_binary (AND, mode,
2473 simplify_gen_unary (NOT, mode,
2474 XEXP (op0, 0), mode),
2477 /* Similarly for (~(A ^ B)) & A. */
2478 if (GET_CODE (op0) == NOT
2479 && GET_CODE (XEXP (op0, 0)) == XOR
2480 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2481 && ! side_effects_p (op1))
2482 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2484 if (GET_CODE (op0) == NOT
2485 && GET_CODE (XEXP (op0, 0)) == XOR
2486 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2487 && ! side_effects_p (op1))
2488 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2490 /* Convert (A | B) & A to A. */
2491 if (GET_CODE (op0) == IOR
2492 && (rtx_equal_p (XEXP (op0, 0), op1)
2493 || rtx_equal_p (XEXP (op0, 1), op1))
2494 && ! side_effects_p (XEXP (op0, 0))
2495 && ! side_effects_p (XEXP (op0, 1)))
2498 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2499 ((A & N) + B) & M -> (A + B) & M
2500 Similarly if (N & M) == 0,
2501 ((A | N) + B) & M -> (A + B) & M
2502 and for - instead of + and/or ^ instead of |.
2503 Also, if (N & M) == 0, then
2504 (A +- N) & M -> A & M. */
2505 if (CONST_INT_P (trueop1)
2506 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2507 && ~INTVAL (trueop1)
2508 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2509 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2514 pmop[0] = XEXP (op0, 0);
2515 pmop[1] = XEXP (op0, 1);
2517 if (CONST_INT_P (pmop[1])
2518 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2519 return simplify_gen_binary (AND, mode, pmop[0], op1);
2521 for (which = 0; which < 2; which++)
2524 switch (GET_CODE (tem))
2527 if (CONST_INT_P (XEXP (tem, 1))
2528 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2529 == INTVAL (trueop1))
2530 pmop[which] = XEXP (tem, 0);
2534 if (CONST_INT_P (XEXP (tem, 1))
2535 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2536 pmop[which] = XEXP (tem, 0);
2543 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2545 tem = simplify_gen_binary (GET_CODE (op0), mode,
2547 return simplify_gen_binary (code, mode, tem, op1);
2551 /* (and X (ior (not X) Y) -> (and X Y) */
2552 if (GET_CODE (op1) == IOR
2553 && GET_CODE (XEXP (op1, 0)) == NOT
2554 && op0 == XEXP (XEXP (op1, 0), 0))
2555 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2557 /* (and (ior (not X) Y) X) -> (and X Y) */
2558 if (GET_CODE (op0) == IOR
2559 && GET_CODE (XEXP (op0, 0)) == NOT
2560 && op1 == XEXP (XEXP (op0, 0), 0))
2561 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2563 tem = simplify_associative_operation (code, mode, op0, op1);
2569 /* 0/x is 0 (or x&0 if x has side-effects). */
2570 if (trueop0 == CONST0_RTX (mode))
2572 if (side_effects_p (op1))
2573 return simplify_gen_binary (AND, mode, op1, trueop0);
2577 if (trueop1 == CONST1_RTX (mode))
2578 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2579 /* Convert divide by power of two into shift. */
2580 if (CONST_INT_P (trueop1)
2581 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2582 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2586 /* Handle floating point and integers separately. */
2587 if (SCALAR_FLOAT_MODE_P (mode))
2589 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2590 safe for modes with NaNs, since 0.0 / 0.0 will then be
2591 NaN rather than 0.0. Nor is it safe for modes with signed
2592 zeros, since dividing 0 by a negative number gives -0.0 */
2593 if (trueop0 == CONST0_RTX (mode)
2594 && !HONOR_NANS (mode)
2595 && !HONOR_SIGNED_ZEROS (mode)
2596 && ! side_effects_p (op1))
2599 if (trueop1 == CONST1_RTX (mode)
2600 && !HONOR_SNANS (mode))
2603 if (GET_CODE (trueop1) == CONST_DOUBLE
2604 && trueop1 != CONST0_RTX (mode))
2607 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2610 if (REAL_VALUES_EQUAL (d, dconstm1)
2611 && !HONOR_SNANS (mode))
2612 return simplify_gen_unary (NEG, mode, op0, mode);
2614 /* Change FP division by a constant into multiplication.
2615 Only do this with -freciprocal-math. */
2616 if (flag_reciprocal_math
2617 && !REAL_VALUES_EQUAL (d, dconst0))
2619 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2620 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2621 return simplify_gen_binary (MULT, mode, op0, tem);
2627 /* 0/x is 0 (or x&0 if x has side-effects). */
2628 if (trueop0 == CONST0_RTX (mode))
2630 if (side_effects_p (op1))
2631 return simplify_gen_binary (AND, mode, op1, trueop0);
2635 if (trueop1 == CONST1_RTX (mode))
2636 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2638 if (trueop1 == constm1_rtx)
2640 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2641 return simplify_gen_unary (NEG, mode, x, mode);
2647 /* 0%x is 0 (or x&0 if x has side-effects). */
2648 if (trueop0 == CONST0_RTX (mode))
2650 if (side_effects_p (op1))
2651 return simplify_gen_binary (AND, mode, op1, trueop0);
2654 /* x%1 is 0 (of x&0 if x has side-effects). */
2655 if (trueop1 == CONST1_RTX (mode))
2657 if (side_effects_p (op0))
2658 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2659 return CONST0_RTX (mode);
2661 /* Implement modulus by power of two as AND. */
2662 if (CONST_INT_P (trueop1)
2663 && exact_log2 (INTVAL (trueop1)) > 0)
2664 return simplify_gen_binary (AND, mode, op0,
2665 GEN_INT (INTVAL (op1) - 1));
2669 /* 0%x is 0 (or x&0 if x has side-effects). */
2670 if (trueop0 == CONST0_RTX (mode))
2672 if (side_effects_p (op1))
2673 return simplify_gen_binary (AND, mode, op1, trueop0);
2676 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2677 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2679 if (side_effects_p (op0))
2680 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2681 return CONST0_RTX (mode);
2688 if (trueop1 == CONST0_RTX (mode))
2690 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2692 /* Rotating ~0 always results in ~0. */
2693 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2694 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2695 && ! side_effects_p (op1))
2698 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2700 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2701 if (val != INTVAL (op1))
2702 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2709 if (trueop1 == CONST0_RTX (mode))
2711 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2713 goto canonicalize_shift;
2716 if (trueop1 == CONST0_RTX (mode))
2718 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2720 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2721 if (GET_CODE (op0) == CLZ
2722 && CONST_INT_P (trueop1)
2723 && STORE_FLAG_VALUE == 1
2724 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2726 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2727 unsigned HOST_WIDE_INT zero_val = 0;
2729 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2730 && zero_val == GET_MODE_BITSIZE (imode)
2731 && INTVAL (trueop1) == exact_log2 (zero_val))
2732 return simplify_gen_relational (EQ, mode, imode,
2733 XEXP (op0, 0), const0_rtx);
2735 goto canonicalize_shift;
2738 if (width <= HOST_BITS_PER_WIDE_INT
2739 && CONST_INT_P (trueop1)
2740 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2741 && ! side_effects_p (op0))
2743 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2745 tem = simplify_associative_operation (code, mode, op0, op1);
2751 if (width <= HOST_BITS_PER_WIDE_INT
2752 && CONST_INT_P (trueop1)
2753 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2754 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2755 && ! side_effects_p (op0))
2757 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2759 tem = simplify_associative_operation (code, mode, op0, op1);
2765 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2767 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2769 tem = simplify_associative_operation (code, mode, op0, op1);
2775 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2777 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2779 tem = simplify_associative_operation (code, mode, op0, op1);
2792 /* ??? There are simplifications that can be done. */
2796 if (!VECTOR_MODE_P (mode))
2798 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2799 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2800 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2801 gcc_assert (XVECLEN (trueop1, 0) == 1);
2802 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2804 if (GET_CODE (trueop0) == CONST_VECTOR)
2805 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2808 /* Extract a scalar element from a nested VEC_SELECT expression
2809 (with optional nested VEC_CONCAT expression). Some targets
2810 (i386) extract scalar element from a vector using chain of
2811 nested VEC_SELECT expressions. When input operand is a memory
2812 operand, this operation can be simplified to a simple scalar
2813 load from an offseted memory address. */
2814 if (GET_CODE (trueop0) == VEC_SELECT)
2816 rtx op0 = XEXP (trueop0, 0);
2817 rtx op1 = XEXP (trueop0, 1);
2819 enum machine_mode opmode = GET_MODE (op0);
2820 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2821 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2823 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2829 gcc_assert (GET_CODE (op1) == PARALLEL);
2830 gcc_assert (i < n_elts);
2832 /* Select element, pointed by nested selector. */
2833 elem = INTVAL (XVECEXP (op1, 0, i));
2835 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2836 if (GET_CODE (op0) == VEC_CONCAT)
2838 rtx op00 = XEXP (op0, 0);
2839 rtx op01 = XEXP (op0, 1);
2841 enum machine_mode mode00, mode01;
2842 int n_elts00, n_elts01;
2844 mode00 = GET_MODE (op00);
2845 mode01 = GET_MODE (op01);
2847 /* Find out number of elements of each operand. */
2848 if (VECTOR_MODE_P (mode00))
2850 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2851 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2856 if (VECTOR_MODE_P (mode01))
2858 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2859 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2864 gcc_assert (n_elts == n_elts00 + n_elts01);
2866 /* Select correct operand of VEC_CONCAT
2867 and adjust selector. */
2868 if (elem < n_elts01)
2879 vec = rtvec_alloc (1);
2880 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2882 tmp = gen_rtx_fmt_ee (code, mode,
2883 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2889 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2890 gcc_assert (GET_MODE_INNER (mode)
2891 == GET_MODE_INNER (GET_MODE (trueop0)));
2892 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2894 if (GET_CODE (trueop0) == CONST_VECTOR)
2896 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2897 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2898 rtvec v = rtvec_alloc (n_elts);
2901 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2902 for (i = 0; i < n_elts; i++)
2904 rtx x = XVECEXP (trueop1, 0, i);
2906 gcc_assert (CONST_INT_P (x));
2907 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2911 return gen_rtx_CONST_VECTOR (mode, v);
2915 if (XVECLEN (trueop1, 0) == 1
2916 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2917 && GET_CODE (trueop0) == VEC_CONCAT)
2920 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2922 /* Try to find the element in the VEC_CONCAT. */
2923 while (GET_MODE (vec) != mode
2924 && GET_CODE (vec) == VEC_CONCAT)
2926 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2927 if (offset < vec_size)
2928 vec = XEXP (vec, 0);
2932 vec = XEXP (vec, 1);
2934 vec = avoid_constant_pool_reference (vec);
2937 if (GET_MODE (vec) == mode)
2944 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2945 ? GET_MODE (trueop0)
2946 : GET_MODE_INNER (mode));
2947 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2948 ? GET_MODE (trueop1)
2949 : GET_MODE_INNER (mode));
2951 gcc_assert (VECTOR_MODE_P (mode));
2952 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2953 == GET_MODE_SIZE (mode));
2955 if (VECTOR_MODE_P (op0_mode))
2956 gcc_assert (GET_MODE_INNER (mode)
2957 == GET_MODE_INNER (op0_mode));
2959 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2961 if (VECTOR_MODE_P (op1_mode))
2962 gcc_assert (GET_MODE_INNER (mode)
2963 == GET_MODE_INNER (op1_mode));
2965 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2967 if ((GET_CODE (trueop0) == CONST_VECTOR
2968 || CONST_INT_P (trueop0)
2969 || GET_CODE (trueop0) == CONST_DOUBLE)
2970 && (GET_CODE (trueop1) == CONST_VECTOR
2971 || CONST_INT_P (trueop1)
2972 || GET_CODE (trueop1) == CONST_DOUBLE))
2974 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2975 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2976 rtvec v = rtvec_alloc (n_elts);
2978 unsigned in_n_elts = 1;
2980 if (VECTOR_MODE_P (op0_mode))
2981 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2982 for (i = 0; i < n_elts; i++)
2986 if (!VECTOR_MODE_P (op0_mode))
2987 RTVEC_ELT (v, i) = trueop0;
2989 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2993 if (!VECTOR_MODE_P (op1_mode))
2994 RTVEC_ELT (v, i) = trueop1;
2996 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3001 return gen_rtx_CONST_VECTOR (mode, v);
3014 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3017 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3019 unsigned int width = GET_MODE_BITSIZE (mode);
3021 if (VECTOR_MODE_P (mode)
3022 && code != VEC_CONCAT
3023 && GET_CODE (op0) == CONST_VECTOR
3024 && GET_CODE (op1) == CONST_VECTOR)
3026 unsigned n_elts = GET_MODE_NUNITS (mode);
3027 enum machine_mode op0mode = GET_MODE (op0);
3028 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3029 enum machine_mode op1mode = GET_MODE (op1);
3030 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3031 rtvec v = rtvec_alloc (n_elts);
3034 gcc_assert (op0_n_elts == n_elts);
3035 gcc_assert (op1_n_elts == n_elts);
3036 for (i = 0; i < n_elts; i++)
3038 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3039 CONST_VECTOR_ELT (op0, i),
3040 CONST_VECTOR_ELT (op1, i));
3043 RTVEC_ELT (v, i) = x;
3046 return gen_rtx_CONST_VECTOR (mode, v);
3049 if (VECTOR_MODE_P (mode)
3050 && code == VEC_CONCAT
3051 && (CONST_INT_P (op0)
3052 || GET_CODE (op0) == CONST_DOUBLE
3053 || GET_CODE (op0) == CONST_FIXED)
3054 && (CONST_INT_P (op1)
3055 || GET_CODE (op1) == CONST_DOUBLE
3056 || GET_CODE (op1) == CONST_FIXED))
3058 unsigned n_elts = GET_MODE_NUNITS (mode);
3059 rtvec v = rtvec_alloc (n_elts);
3061 gcc_assert (n_elts >= 2);
3064 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3065 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3067 RTVEC_ELT (v, 0) = op0;
3068 RTVEC_ELT (v, 1) = op1;
3072 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3073 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3076 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3077 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3078 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3080 for (i = 0; i < op0_n_elts; ++i)
3081 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3082 for (i = 0; i < op1_n_elts; ++i)
3083 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3086 return gen_rtx_CONST_VECTOR (mode, v);
3089 if (SCALAR_FLOAT_MODE_P (mode)
3090 && GET_CODE (op0) == CONST_DOUBLE
3091 && GET_CODE (op1) == CONST_DOUBLE
3092 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3103 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3105 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3107 for (i = 0; i < 4; i++)
3124 real_from_target (&r, tmp0, mode);
3125 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3129 REAL_VALUE_TYPE f0, f1, value, result;
3132 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3133 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3134 real_convert (&f0, mode, &f0);
3135 real_convert (&f1, mode, &f1);
3137 if (HONOR_SNANS (mode)
3138 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3142 && REAL_VALUES_EQUAL (f1, dconst0)
3143 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3146 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3147 && flag_trapping_math
3148 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3150 int s0 = REAL_VALUE_NEGATIVE (f0);
3151 int s1 = REAL_VALUE_NEGATIVE (f1);
3156 /* Inf + -Inf = NaN plus exception. */
3161 /* Inf - Inf = NaN plus exception. */
3166 /* Inf / Inf = NaN plus exception. */
3173 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3174 && flag_trapping_math
3175 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3176 || (REAL_VALUE_ISINF (f1)
3177 && REAL_VALUES_EQUAL (f0, dconst0))))
3178 /* Inf * 0 = NaN plus exception. */
3181 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3183 real_convert (&result, mode, &value);
3185 /* Don't constant fold this floating point operation if
3186 the result has overflowed and flag_trapping_math. */
3188 if (flag_trapping_math
3189 && MODE_HAS_INFINITIES (mode)
3190 && REAL_VALUE_ISINF (result)
3191 && !REAL_VALUE_ISINF (f0)
3192 && !REAL_VALUE_ISINF (f1))
3193 /* Overflow plus exception. */
3196 /* Don't constant fold this floating point operation if the
3197 result may dependent upon the run-time rounding mode and
3198 flag_rounding_math is set, or if GCC's software emulation
3199 is unable to accurately represent the result. */
3201 if ((flag_rounding_math
3202 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3203 && (inexact || !real_identical (&result, &value)))
3206 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3210 /* We can fold some multi-word operations. */
3211 if (GET_MODE_CLASS (mode) == MODE_INT
3212 && width == HOST_BITS_PER_WIDE_INT * 2
3213 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3214 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3216 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3217 HOST_WIDE_INT h1, h2, hv, ht;
3219 if (GET_CODE (op0) == CONST_DOUBLE)
3220 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3222 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3224 if (GET_CODE (op1) == CONST_DOUBLE)
3225 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3227 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3232 /* A - B == A + (-B). */
3233 neg_double (l2, h2, &lv, &hv);
3236 /* Fall through.... */
3239 add_double (l1, h1, l2, h2, &lv, &hv);
3243 mul_double (l1, h1, l2, h2, &lv, &hv);
3247 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3248 &lv, &hv, <, &ht))
3253 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3254 <, &ht, &lv, &hv))
3259 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3260 &lv, &hv, <, &ht))
3265 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3266 <, &ht, &lv, &hv))
3271 lv = l1 & l2, hv = h1 & h2;
3275 lv = l1 | l2, hv = h1 | h2;
3279 lv = l1 ^ l2, hv = h1 ^ h2;
3285 && ((unsigned HOST_WIDE_INT) l1
3286 < (unsigned HOST_WIDE_INT) l2)))
3295 && ((unsigned HOST_WIDE_INT) l1
3296 > (unsigned HOST_WIDE_INT) l2)))
3303 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3305 && ((unsigned HOST_WIDE_INT) l1
3306 < (unsigned HOST_WIDE_INT) l2)))
3313 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3315 && ((unsigned HOST_WIDE_INT) l1
3316 > (unsigned HOST_WIDE_INT) l2)))
3322 case LSHIFTRT: case ASHIFTRT:
3324 case ROTATE: case ROTATERT:
3325 if (SHIFT_COUNT_TRUNCATED)
3326 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3328 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3331 if (code == LSHIFTRT || code == ASHIFTRT)
3332 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3334 else if (code == ASHIFT)
3335 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3336 else if (code == ROTATE)
3337 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3338 else /* code == ROTATERT */
3339 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3346 return immed_double_const (lv, hv, mode);
3349 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3350 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3352 /* Get the integer argument values in two forms:
3353 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3355 arg0 = INTVAL (op0);
3356 arg1 = INTVAL (op1);
3358 if (width < HOST_BITS_PER_WIDE_INT)
3360 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3361 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3364 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3365 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3368 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3369 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3377 /* Compute the value of the arithmetic. */
3382 val = arg0s + arg1s;
3386 val = arg0s - arg1s;
3390 val = arg0s * arg1s;
3395 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3398 val = arg0s / arg1s;
3403 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3406 val = arg0s % arg1s;
3411 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3414 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3419 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3422 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3440 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3441 the value is in range. We can't return any old value for
3442 out-of-range arguments because either the middle-end (via
3443 shift_truncation_mask) or the back-end might be relying on
3444 target-specific knowledge. Nor can we rely on
3445 shift_truncation_mask, since the shift might not be part of an
3446 ashlM3, lshrM3 or ashrM3 instruction. */
3447 if (SHIFT_COUNT_TRUNCATED)
3448 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3449 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3452 val = (code == ASHIFT
3453 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3454 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3456 /* Sign-extend the result for arithmetic right shifts. */
3457 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3458 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3466 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3467 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3475 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3476 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3480 /* Do nothing here. */
3484 val = arg0s <= arg1s ? arg0s : arg1s;
3488 val = ((unsigned HOST_WIDE_INT) arg0
3489 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3493 val = arg0s > arg1s ? arg0s : arg1s;
3497 val = ((unsigned HOST_WIDE_INT) arg0
3498 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3511 /* ??? There are simplifications that can be done. */
3518 return gen_int_mode (val, mode);
3526 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3529 Rather than test for specific case, we do this by a brute-force method
3530 and do all possible simplifications until no more changes occur. Then
3531 we rebuild the operation. */
3533 struct simplify_plus_minus_op_data
3540 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3544 result = (commutative_operand_precedence (y)
3545 - commutative_operand_precedence (x));
3549 /* Group together equal REGs to do more simplification. */
3550 if (REG_P (x) && REG_P (y))
3551 return REGNO (x) > REGNO (y);
3557 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3560 struct simplify_plus_minus_op_data ops[8];
3562 int n_ops = 2, input_ops = 2;
3563 int changed, n_constants = 0, canonicalized = 0;
3566 memset (ops, 0, sizeof ops);
3568 /* Set up the two operands and then expand them until nothing has been
3569 changed. If we run out of room in our array, give up; this should
3570 almost never happen. */
3575 ops[1].neg = (code == MINUS);
3581 for (i = 0; i < n_ops; i++)
3583 rtx this_op = ops[i].op;
3584 int this_neg = ops[i].neg;
3585 enum rtx_code this_code = GET_CODE (this_op);
3594 ops[n_ops].op = XEXP (this_op, 1);
3595 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3598 ops[i].op = XEXP (this_op, 0);
3601 canonicalized |= this_neg;
3605 ops[i].op = XEXP (this_op, 0);
3606 ops[i].neg = ! this_neg;
3613 && GET_CODE (XEXP (this_op, 0)) == PLUS
3614 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3615 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3617 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3618 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3619 ops[n_ops].neg = this_neg;
3627 /* ~a -> (-a - 1) */
3630 ops[n_ops].op = constm1_rtx;
3631 ops[n_ops++].neg = this_neg;
3632 ops[i].op = XEXP (this_op, 0);
3633 ops[i].neg = !this_neg;
3643 ops[i].op = neg_const_int (mode, this_op);
3657 if (n_constants > 1)
3660 gcc_assert (n_ops >= 2);
3662 /* If we only have two operands, we can avoid the loops. */
3665 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3668 /* Get the two operands. Be careful with the order, especially for
3669 the cases where code == MINUS. */
3670 if (ops[0].neg && ops[1].neg)
3672 lhs = gen_rtx_NEG (mode, ops[0].op);
3675 else if (ops[0].neg)
3686 return simplify_const_binary_operation (code, mode, lhs, rhs);
3689 /* Now simplify each pair of operands until nothing changes. */
3692 /* Insertion sort is good enough for an eight-element array. */
3693 for (i = 1; i < n_ops; i++)
3695 struct simplify_plus_minus_op_data save;
3697 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3703 ops[j + 1] = ops[j];
3704 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3709 for (i = n_ops - 1; i > 0; i--)
3710 for (j = i - 1; j >= 0; j--)
3712 rtx lhs = ops[j].op, rhs = ops[i].op;
3713 int lneg = ops[j].neg, rneg = ops[i].neg;
3715 if (lhs != 0 && rhs != 0)
3717 enum rtx_code ncode = PLUS;
3723 tem = lhs, lhs = rhs, rhs = tem;
3725 else if (swap_commutative_operands_p (lhs, rhs))
3726 tem = lhs, lhs = rhs, rhs = tem;
3728 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3729 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3731 rtx tem_lhs, tem_rhs;
3733 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3734 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3735 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3737 if (tem && !CONSTANT_P (tem))
3738 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3741 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3743 /* Reject "simplifications" that just wrap the two
3744 arguments in a CONST. Failure to do so can result
3745 in infinite recursion with simplify_binary_operation
3746 when it calls us to simplify CONST operations. */
3748 && ! (GET_CODE (tem) == CONST
3749 && GET_CODE (XEXP (tem, 0)) == ncode
3750 && XEXP (XEXP (tem, 0), 0) == lhs
3751 && XEXP (XEXP (tem, 0), 1) == rhs))
3754 if (GET_CODE (tem) == NEG)
3755 tem = XEXP (tem, 0), lneg = !lneg;
3756 if (CONST_INT_P (tem) && lneg)
3757 tem = neg_const_int (mode, tem), lneg = 0;
3761 ops[j].op = NULL_RTX;
3768 /* If nothing changed, fail. */
3772 /* Pack all the operands to the lower-numbered entries. */
3773 for (i = 0, j = 0; j < n_ops; j++)
3783 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3785 && CONST_INT_P (ops[1].op)
3786 && CONSTANT_P (ops[0].op)
3788 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3790 /* We suppressed creation of trivial CONST expressions in the
3791 combination loop to avoid recursion. Create one manually now.
3792 The combination loop should have ensured that there is exactly
3793 one CONST_INT, and the sort will have ensured that it is last
3794 in the array and that any other constant will be next-to-last. */
3797 && CONST_INT_P (ops[n_ops - 1].op)
3798 && CONSTANT_P (ops[n_ops - 2].op))
3800 rtx value = ops[n_ops - 1].op;
3801 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3802 value = neg_const_int (mode, value);
3803 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3807 /* Put a non-negated operand first, if possible. */
3809 for (i = 0; i < n_ops && ops[i].neg; i++)
3812 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3821 /* Now make the result by performing the requested operations. */
3823 for (i = 1; i < n_ops; i++)
3824 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3825 mode, result, ops[i].op);
3830 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3832 plus_minus_operand_p (const_rtx x)
3834 return GET_CODE (x) == PLUS
3835 || GET_CODE (x) == MINUS
3836 || (GET_CODE (x) == CONST
3837 && GET_CODE (XEXP (x, 0)) == PLUS
3838 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3839 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3842 /* Like simplify_binary_operation except used for relational operators.
3843 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3844 not also be VOIDmode.
3846 CMP_MODE specifies in which mode the comparison is done in, so it is
3847 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3848 the operands or, if both are VOIDmode, the operands are compared in
3849 "infinite precision". */
3851 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3852 enum machine_mode cmp_mode, rtx op0, rtx op1)
3854 rtx tem, trueop0, trueop1;
3856 if (cmp_mode == VOIDmode)
3857 cmp_mode = GET_MODE (op0);
3858 if (cmp_mode == VOIDmode)
3859 cmp_mode = GET_MODE (op1);
3861 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3864 if (SCALAR_FLOAT_MODE_P (mode))
3866 if (tem == const0_rtx)
3867 return CONST0_RTX (mode);
3868 #ifdef FLOAT_STORE_FLAG_VALUE
3870 REAL_VALUE_TYPE val;
3871 val = FLOAT_STORE_FLAG_VALUE (mode);
3872 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3878 if (VECTOR_MODE_P (mode))
3880 if (tem == const0_rtx)
3881 return CONST0_RTX (mode);
3882 #ifdef VECTOR_STORE_FLAG_VALUE
3887 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3888 if (val == NULL_RTX)
3890 if (val == const1_rtx)
3891 return CONST1_RTX (mode);
3893 units = GET_MODE_NUNITS (mode);
3894 v = rtvec_alloc (units);
3895 for (i = 0; i < units; i++)
3896 RTVEC_ELT (v, i) = val;
3897 return gen_rtx_raw_CONST_VECTOR (mode, v);
3907 /* For the following tests, ensure const0_rtx is op1. */
3908 if (swap_commutative_operands_p (op0, op1)
3909 || (op0 == const0_rtx && op1 != const0_rtx))
3910 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3912 /* If op0 is a compare, extract the comparison arguments from it. */
3913 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3914 return simplify_gen_relational (code, mode, VOIDmode,
3915 XEXP (op0, 0), XEXP (op0, 1));
3917 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3921 trueop0 = avoid_constant_pool_reference (op0);
3922 trueop1 = avoid_constant_pool_reference (op1);
3923 return simplify_relational_operation_1 (code, mode, cmp_mode,
3927 /* This part of simplify_relational_operation is only used when CMP_MODE
3928 is not in class MODE_CC (i.e. it is a real comparison).
3930 MODE is the mode of the result, while CMP_MODE specifies in which
3931 mode the comparison is done in, so it is the mode of the operands. */
3934 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3935 enum machine_mode cmp_mode, rtx op0, rtx op1)
3937 enum rtx_code op0code = GET_CODE (op0);
3939 if (op1 == const0_rtx && COMPARISON_P (op0))
3941 /* If op0 is a comparison, extract the comparison arguments
3945 if (GET_MODE (op0) == mode)
3946 return simplify_rtx (op0);
3948 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3949 XEXP (op0, 0), XEXP (op0, 1));
3951 else if (code == EQ)
3953 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3954 if (new_code != UNKNOWN)
3955 return simplify_gen_relational (new_code, mode, VOIDmode,
3956 XEXP (op0, 0), XEXP (op0, 1));
3960 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3961 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3962 if ((code == LTU || code == GEU)
3963 && GET_CODE (op0) == PLUS
3964 && CONST_INT_P (XEXP (op0, 1))
3965 && (rtx_equal_p (op1, XEXP (op0, 0))
3966 || rtx_equal_p (op1, XEXP (op0, 1))))
3969 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
3970 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
3971 cmp_mode, XEXP (op0, 0), new_cmp);
3974 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3975 if ((code == LTU || code == GEU)
3976 && GET_CODE (op0) == PLUS
3977 && rtx_equal_p (op1, XEXP (op0, 1))
3978 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3979 && !rtx_equal_p (op1, XEXP (op0, 0)))
3980 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3982 if (op1 == const0_rtx)
3984 /* Canonicalize (GTU x 0) as (NE x 0). */
3986 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3987 /* Canonicalize (LEU x 0) as (EQ x 0). */
3989 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3991 else if (op1 == const1_rtx)
3996 /* Canonicalize (GE x 1) as (GT x 0). */
3997 return simplify_gen_relational (GT, mode, cmp_mode,
4000 /* Canonicalize (GEU x 1) as (NE x 0). */
4001 return simplify_gen_relational (NE, mode, cmp_mode,
4004 /* Canonicalize (LT x 1) as (LE x 0). */
4005 return simplify_gen_relational (LE, mode, cmp_mode,
4008 /* Canonicalize (LTU x 1) as (EQ x 0). */
4009 return simplify_gen_relational (EQ, mode, cmp_mode,
4015 else if (op1 == constm1_rtx)
4017 /* Canonicalize (LE x -1) as (LT x 0). */
4019 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4020 /* Canonicalize (GT x -1) as (GE x 0). */
4022 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4025 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4026 if ((code == EQ || code == NE)
4027 && (op0code == PLUS || op0code == MINUS)
4029 && CONSTANT_P (XEXP (op0, 1))
4030 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4032 rtx x = XEXP (op0, 0);
4033 rtx c = XEXP (op0, 1);
4035 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4037 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4040 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4041 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4043 && op1 == const0_rtx
4044 && GET_MODE_CLASS (mode) == MODE_INT
4045 && cmp_mode != VOIDmode
4046 /* ??? Work-around BImode bugs in the ia64 backend. */
4048 && cmp_mode != BImode
4049 && nonzero_bits (op0, cmp_mode) == 1
4050 && STORE_FLAG_VALUE == 1)
4051 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4052 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4053 : lowpart_subreg (mode, op0, cmp_mode);
4055 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4056 if ((code == EQ || code == NE)
4057 && op1 == const0_rtx
4059 return simplify_gen_relational (code, mode, cmp_mode,
4060 XEXP (op0, 0), XEXP (op0, 1));
4062 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4063 if ((code == EQ || code == NE)
4065 && rtx_equal_p (XEXP (op0, 0), op1)
4066 && !side_effects_p (XEXP (op0, 0)))
4067 return simplify_gen_relational (code, mode, cmp_mode,
4068 XEXP (op0, 1), const0_rtx);
4070 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4071 if ((code == EQ || code == NE)
4073 && rtx_equal_p (XEXP (op0, 1), op1)
4074 && !side_effects_p (XEXP (op0, 1)))
4075 return simplify_gen_relational (code, mode, cmp_mode,
4076 XEXP (op0, 0), const0_rtx);
4078 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4079 if ((code == EQ || code == NE)
4081 && (CONST_INT_P (op1)
4082 || GET_CODE (op1) == CONST_DOUBLE)
4083 && (CONST_INT_P (XEXP (op0, 1))
4084 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4085 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4086 simplify_gen_binary (XOR, cmp_mode,
4087 XEXP (op0, 1), op1));
4089 if (op0code == POPCOUNT && op1 == const0_rtx)
4095 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4096 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4097 XEXP (op0, 0), const0_rtx);
4102 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4103 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4104 XEXP (op0, 0), const0_rtx);
4123 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4124 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4125 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4126 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4127 For floating-point comparisons, assume that the operands were ordered. */
4130 comparison_result (enum rtx_code code, int known_results)
4136 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4139 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4143 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4146 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4150 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4153 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4156 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4158 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4161 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4163 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4166 return const_true_rtx;
4174 /* Check if the given comparison (done in the given MODE) is actually a
4175 tautology or a contradiction.
4176 If no simplification is possible, this function returns zero.
4177 Otherwise, it returns either const_true_rtx or const0_rtx. */
4180 simplify_const_relational_operation (enum rtx_code code,
4181 enum machine_mode mode,
4188 gcc_assert (mode != VOIDmode
4189 || (GET_MODE (op0) == VOIDmode
4190 && GET_MODE (op1) == VOIDmode));
4192 /* If op0 is a compare, extract the comparison arguments from it. */
4193 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4195 op1 = XEXP (op0, 1);
4196 op0 = XEXP (op0, 0);
4198 if (GET_MODE (op0) != VOIDmode)
4199 mode = GET_MODE (op0);
4200 else if (GET_MODE (op1) != VOIDmode)
4201 mode = GET_MODE (op1);
4206 /* We can't simplify MODE_CC values since we don't know what the
4207 actual comparison is. */
4208 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4211 /* Make sure the constant is second. */
4212 if (swap_commutative_operands_p (op0, op1))
4214 tem = op0, op0 = op1, op1 = tem;
4215 code = swap_condition (code);
4218 trueop0 = avoid_constant_pool_reference (op0);
4219 trueop1 = avoid_constant_pool_reference (op1);
4221 /* For integer comparisons of A and B maybe we can simplify A - B and can
4222 then simplify a comparison of that with zero. If A and B are both either
4223 a register or a CONST_INT, this can't help; testing for these cases will
4224 prevent infinite recursion here and speed things up.
4226 We can only do this for EQ and NE comparisons as otherwise we may
4227 lose or introduce overflow which we cannot disregard as undefined as
4228 we do not know the signedness of the operation on either the left or
4229 the right hand side of the comparison. */
4231 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4232 && (code == EQ || code == NE)
4233 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4234 && (REG_P (op1) || CONST_INT_P (trueop1)))
4235 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4236 /* We cannot do this if tem is a nonzero address. */
4237 && ! nonzero_address_p (tem))
4238 return simplify_const_relational_operation (signed_condition (code),
4239 mode, tem, const0_rtx);
4241 if (! HONOR_NANS (mode) && code == ORDERED)
4242 return const_true_rtx;
4244 if (! HONOR_NANS (mode) && code == UNORDERED)
4247 /* For modes without NaNs, if the two operands are equal, we know the
4248 result except if they have side-effects. Even with NaNs we know
4249 the result of unordered comparisons and, if signaling NaNs are
4250 irrelevant, also the result of LT/GT/LTGT. */
4251 if ((! HONOR_NANS (GET_MODE (trueop0))
4252 || code == UNEQ || code == UNLE || code == UNGE
4253 || ((code == LT || code == GT || code == LTGT)
4254 && ! HONOR_SNANS (GET_MODE (trueop0))))
4255 && rtx_equal_p (trueop0, trueop1)
4256 && ! side_effects_p (trueop0))
4257 return comparison_result (code, CMP_EQ);
4259 /* If the operands are floating-point constants, see if we can fold
4261 if (GET_CODE (trueop0) == CONST_DOUBLE
4262 && GET_CODE (trueop1) == CONST_DOUBLE
4263 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4265 REAL_VALUE_TYPE d0, d1;
4267 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4268 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4270 /* Comparisons are unordered iff at least one of the values is NaN. */
4271 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4281 return const_true_rtx;
4294 return comparison_result (code,
4295 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4296 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4299 /* Otherwise, see if the operands are both integers. */
4300 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4301 && (GET_CODE (trueop0) == CONST_DOUBLE
4302 || CONST_INT_P (trueop0))
4303 && (GET_CODE (trueop1) == CONST_DOUBLE
4304 || CONST_INT_P (trueop1)))
4306 int width = GET_MODE_BITSIZE (mode);
4307 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4308 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4310 /* Get the two words comprising each integer constant. */
4311 if (GET_CODE (trueop0) == CONST_DOUBLE)
4313 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4314 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4318 l0u = l0s = INTVAL (trueop0);
4319 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4322 if (GET_CODE (trueop1) == CONST_DOUBLE)
4324 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4325 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4329 l1u = l1s = INTVAL (trueop1);
4330 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4333 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4334 we have to sign or zero-extend the values. */
4335 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4337 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4338 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4340 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4341 l0s |= ((HOST_WIDE_INT) (-1) << width);
4343 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4344 l1s |= ((HOST_WIDE_INT) (-1) << width);
4346 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4347 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4349 if (h0u == h1u && l0u == l1u)
4350 return comparison_result (code, CMP_EQ);
4354 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4355 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4356 return comparison_result (code, cr);
4360 /* Optimize comparisons with upper and lower bounds. */
4361 if (SCALAR_INT_MODE_P (mode)
4362 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4363 && CONST_INT_P (trueop1))
4366 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4367 HOST_WIDE_INT val = INTVAL (trueop1);
4368 HOST_WIDE_INT mmin, mmax;
4378 /* Get a reduced range if the sign bit is zero. */
4379 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4386 rtx mmin_rtx, mmax_rtx;
4387 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4389 mmin = INTVAL (mmin_rtx);
4390 mmax = INTVAL (mmax_rtx);
4393 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4395 mmin >>= (sign_copies - 1);
4396 mmax >>= (sign_copies - 1);
4402 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4404 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4405 return const_true_rtx;
4406 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4411 return const_true_rtx;
4416 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4418 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4419 return const_true_rtx;
4420 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4425 return const_true_rtx;
4431 /* x == y is always false for y out of range. */
4432 if (val < mmin || val > mmax)
4436 /* x > y is always false for y >= mmax, always true for y < mmin. */
4438 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4440 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4441 return const_true_rtx;
4447 return const_true_rtx;
4450 /* x < y is always false for y <= mmin, always true for y > mmax. */
4452 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4454 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4455 return const_true_rtx;
4461 return const_true_rtx;
4465 /* x != y is always true for y out of range. */
4466 if (val < mmin || val > mmax)
4467 return const_true_rtx;
4475 /* Optimize integer comparisons with zero. */
4476 if (trueop1 == const0_rtx)
4478 /* Some addresses are known to be nonzero. We don't know
4479 their sign, but equality comparisons are known. */
4480 if (nonzero_address_p (trueop0))
4482 if (code == EQ || code == LEU)
4484 if (code == NE || code == GTU)
4485 return const_true_rtx;
4488 /* See if the first operand is an IOR with a constant. If so, we
4489 may be able to determine the result of this comparison. */
4490 if (GET_CODE (op0) == IOR)
4492 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4493 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4495 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4496 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4497 && (INTVAL (inner_const)
4498 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4507 return const_true_rtx;
4511 return const_true_rtx;
4525 /* Optimize comparison of ABS with zero. */
4526 if (trueop1 == CONST0_RTX (mode)
4527 && (GET_CODE (trueop0) == ABS
4528 || (GET_CODE (trueop0) == FLOAT_EXTEND
4529 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4534 /* Optimize abs(x) < 0.0. */
4535 if (!HONOR_SNANS (mode)
4536 && (!INTEGRAL_MODE_P (mode)
4537 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4539 if (INTEGRAL_MODE_P (mode)
4540 && (issue_strict_overflow_warning
4541 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4542 warning (OPT_Wstrict_overflow,
4543 ("assuming signed overflow does not occur when "
4544 "assuming abs (x) < 0 is false"));
4550 /* Optimize abs(x) >= 0.0. */
4551 if (!HONOR_NANS (mode)
4552 && (!INTEGRAL_MODE_P (mode)
4553 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4555 if (INTEGRAL_MODE_P (mode)
4556 && (issue_strict_overflow_warning
4557 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4558 warning (OPT_Wstrict_overflow,
4559 ("assuming signed overflow does not occur when "
4560 "assuming abs (x) >= 0 is true"));
4561 return const_true_rtx;
4566 /* Optimize ! (abs(x) < 0.0). */
4567 return const_true_rtx;
4577 /* Simplify CODE, an operation with result mode MODE and three operands,
4578 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4579 a constant. Return 0 if no simplifications is possible. */
4582 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4583 enum machine_mode op0_mode, rtx op0, rtx op1,
4586 unsigned int width = GET_MODE_BITSIZE (mode);
4588 /* VOIDmode means "infinite" precision. */
4590 width = HOST_BITS_PER_WIDE_INT;
4596 if (CONST_INT_P (op0)
4597 && CONST_INT_P (op1)
4598 && CONST_INT_P (op2)
4599 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4600 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4602 /* Extracting a bit-field from a constant */
4603 HOST_WIDE_INT val = INTVAL (op0);
4605 if (BITS_BIG_ENDIAN)
4606 val >>= (GET_MODE_BITSIZE (op0_mode)
4607 - INTVAL (op2) - INTVAL (op1));
4609 val >>= INTVAL (op2);
4611 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4613 /* First zero-extend. */
4614 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4615 /* If desired, propagate sign bit. */
4616 if (code == SIGN_EXTRACT
4617 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4618 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4621 /* Clear the bits that don't belong in our mode,
4622 unless they and our sign bit are all one.
4623 So we get either a reasonable negative value or a reasonable
4624 unsigned value for this mode. */
4625 if (width < HOST_BITS_PER_WIDE_INT
4626 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4627 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4628 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4630 return gen_int_mode (val, mode);
4635 if (CONST_INT_P (op0))
4636 return op0 != const0_rtx ? op1 : op2;
4638 /* Convert c ? a : a into "a". */
4639 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4642 /* Convert a != b ? a : b into "a". */
4643 if (GET_CODE (op0) == NE
4644 && ! side_effects_p (op0)
4645 && ! HONOR_NANS (mode)
4646 && ! HONOR_SIGNED_ZEROS (mode)
4647 && ((rtx_equal_p (XEXP (op0, 0), op1)
4648 && rtx_equal_p (XEXP (op0, 1), op2))
4649 || (rtx_equal_p (XEXP (op0, 0), op2)
4650 && rtx_equal_p (XEXP (op0, 1), op1))))
4653 /* Convert a == b ? a : b into "b". */
4654 if (GET_CODE (op0) == EQ
4655 && ! side_effects_p (op0)
4656 && ! HONOR_NANS (mode)
4657 && ! HONOR_SIGNED_ZEROS (mode)
4658 && ((rtx_equal_p (XEXP (op0, 0), op1)
4659 && rtx_equal_p (XEXP (op0, 1), op2))
4660 || (rtx_equal_p (XEXP (op0, 0), op2)
4661 && rtx_equal_p (XEXP (op0, 1), op1))))
4664 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4666 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4667 ? GET_MODE (XEXP (op0, 1))
4668 : GET_MODE (XEXP (op0, 0)));
4671 /* Look for happy constants in op1 and op2. */
4672 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4674 HOST_WIDE_INT t = INTVAL (op1);
4675 HOST_WIDE_INT f = INTVAL (op2);
4677 if (t == STORE_FLAG_VALUE && f == 0)
4678 code = GET_CODE (op0);
4679 else if (t == 0 && f == STORE_FLAG_VALUE)
4682 tmp = reversed_comparison_code (op0, NULL_RTX);
4690 return simplify_gen_relational (code, mode, cmp_mode,
4691 XEXP (op0, 0), XEXP (op0, 1));
4694 if (cmp_mode == VOIDmode)
4695 cmp_mode = op0_mode;
4696 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4697 cmp_mode, XEXP (op0, 0),
4700 /* See if any simplifications were possible. */
4703 if (CONST_INT_P (temp))
4704 return temp == const0_rtx ? op2 : op1;
4706 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4712 gcc_assert (GET_MODE (op0) == mode);
4713 gcc_assert (GET_MODE (op1) == mode);
4714 gcc_assert (VECTOR_MODE_P (mode));
4715 op2 = avoid_constant_pool_reference (op2);
4716 if (CONST_INT_P (op2))
4718 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4719 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4720 int mask = (1 << n_elts) - 1;
4722 if (!(INTVAL (op2) & mask))
4724 if ((INTVAL (op2) & mask) == mask)
4727 op0 = avoid_constant_pool_reference (op0);
4728 op1 = avoid_constant_pool_reference (op1);
4729 if (GET_CODE (op0) == CONST_VECTOR
4730 && GET_CODE (op1) == CONST_VECTOR)
4732 rtvec v = rtvec_alloc (n_elts);
4735 for (i = 0; i < n_elts; i++)
4736 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4737 ? CONST_VECTOR_ELT (op0, i)
4738 : CONST_VECTOR_ELT (op1, i));
4739 return gen_rtx_CONST_VECTOR (mode, v);
4751 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4753 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4755 Works by unpacking OP into a collection of 8-bit values
4756 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4757 and then repacking them again for OUTERMODE. */
4760 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4761 enum machine_mode innermode, unsigned int byte)
4763 /* We support up to 512-bit values (for V8DFmode). */
4767 value_mask = (1 << value_bit) - 1
4769 unsigned char value[max_bitsize / value_bit];
4778 rtvec result_v = NULL;
4779 enum mode_class outer_class;
4780 enum machine_mode outer_submode;
4782 /* Some ports misuse CCmode. */
4783 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4786 /* We have no way to represent a complex constant at the rtl level. */
4787 if (COMPLEX_MODE_P (outermode))
4790 /* Unpack the value. */
4792 if (GET_CODE (op) == CONST_VECTOR)
4794 num_elem = CONST_VECTOR_NUNITS (op);
4795 elems = &CONST_VECTOR_ELT (op, 0);
4796 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4802 elem_bitsize = max_bitsize;
4804 /* If this asserts, it is too complicated; reducing value_bit may help. */
4805 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4806 /* I don't know how to handle endianness of sub-units. */
4807 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4809 for (elem = 0; elem < num_elem; elem++)
4812 rtx el = elems[elem];
4814 /* Vectors are kept in target memory order. (This is probably
4817 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4818 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4820 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4821 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4822 unsigned bytele = (subword_byte % UNITS_PER_WORD
4823 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4824 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4827 switch (GET_CODE (el))
4831 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4833 *vp++ = INTVAL (el) >> i;
4834 /* CONST_INTs are always logically sign-extended. */
4835 for (; i < elem_bitsize; i += value_bit)
4836 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4840 if (GET_MODE (el) == VOIDmode)
4842 /* If this triggers, someone should have generated a
4843 CONST_INT instead. */
4844 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4846 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4847 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4848 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4851 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4854 /* It shouldn't matter what's done here, so fill it with
4856 for (; i < elem_bitsize; i += value_bit)
4861 long tmp[max_bitsize / 32];
4862 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4864 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4865 gcc_assert (bitsize <= elem_bitsize);
4866 gcc_assert (bitsize % value_bit == 0);
4868 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4871 /* real_to_target produces its result in words affected by
4872 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4873 and use WORDS_BIG_ENDIAN instead; see the documentation
4874 of SUBREG in rtl.texi. */
4875 for (i = 0; i < bitsize; i += value_bit)
4878 if (WORDS_BIG_ENDIAN)
4879 ibase = bitsize - 1 - i;
4882 *vp++ = tmp[ibase / 32] >> i % 32;
4885 /* It shouldn't matter what's done here, so fill it with
4887 for (; i < elem_bitsize; i += value_bit)
4893 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4895 for (i = 0; i < elem_bitsize; i += value_bit)
4896 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4900 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4901 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4902 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4904 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4905 >> (i - HOST_BITS_PER_WIDE_INT);
4906 for (; i < elem_bitsize; i += value_bit)
4916 /* Now, pick the right byte to start with. */
4917 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4918 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4919 will already have offset 0. */
4920 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4922 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4924 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4925 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4926 byte = (subword_byte % UNITS_PER_WORD
4927 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4930 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4931 so if it's become negative it will instead be very large.) */
4932 gcc_assert (byte < GET_MODE_SIZE (innermode));
4934 /* Convert from bytes to chunks of size value_bit. */
4935 value_start = byte * (BITS_PER_UNIT / value_bit);
4937 /* Re-pack the value. */
4939 if (VECTOR_MODE_P (outermode))
4941 num_elem = GET_MODE_NUNITS (outermode);
4942 result_v = rtvec_alloc (num_elem);
4943 elems = &RTVEC_ELT (result_v, 0);
4944 outer_submode = GET_MODE_INNER (outermode);
4950 outer_submode = outermode;
4953 outer_class = GET_MODE_CLASS (outer_submode);
4954 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4956 gcc_assert (elem_bitsize % value_bit == 0);
4957 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4959 for (elem = 0; elem < num_elem; elem++)
4963 /* Vectors are stored in target memory order. (This is probably
4966 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4967 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4969 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4970 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4971 unsigned bytele = (subword_byte % UNITS_PER_WORD
4972 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4973 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4976 switch (outer_class)
4979 case MODE_PARTIAL_INT:
4981 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4984 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4986 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4987 for (; i < elem_bitsize; i += value_bit)
4988 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4989 << (i - HOST_BITS_PER_WIDE_INT));
4991 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4993 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4994 elems[elem] = gen_int_mode (lo, outer_submode);
4995 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4996 elems[elem] = immed_double_const (lo, hi, outer_submode);
5003 case MODE_DECIMAL_FLOAT:
5006 long tmp[max_bitsize / 32];
5008 /* real_from_target wants its input in words affected by
5009 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5010 and use WORDS_BIG_ENDIAN instead; see the documentation
5011 of SUBREG in rtl.texi. */
5012 for (i = 0; i < max_bitsize / 32; i++)
5014 for (i = 0; i < elem_bitsize; i += value_bit)
5017 if (WORDS_BIG_ENDIAN)
5018 ibase = elem_bitsize - 1 - i;
5021 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5024 real_from_target (&r, tmp, outer_submode);
5025 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5037 f.mode = outer_submode;
5040 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5042 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5043 for (; i < elem_bitsize; i += value_bit)
5044 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5045 << (i - HOST_BITS_PER_WIDE_INT));
5047 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5055 if (VECTOR_MODE_P (outermode))
5056 return gen_rtx_CONST_VECTOR (outermode, result_v);
5061 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5062 Return 0 if no simplifications are possible. */
5064 simplify_subreg (enum machine_mode outermode, rtx op,
5065 enum machine_mode innermode, unsigned int byte)
5067 /* Little bit of sanity checking. */
5068 gcc_assert (innermode != VOIDmode);
5069 gcc_assert (outermode != VOIDmode);
5070 gcc_assert (innermode != BLKmode);
5071 gcc_assert (outermode != BLKmode);
5073 gcc_assert (GET_MODE (op) == innermode
5074 || GET_MODE (op) == VOIDmode);
5076 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5077 gcc_assert (byte < GET_MODE_SIZE (innermode));
5079 if (outermode == innermode && !byte)
5082 if (CONST_INT_P (op)
5083 || GET_CODE (op) == CONST_DOUBLE
5084 || GET_CODE (op) == CONST_FIXED
5085 || GET_CODE (op) == CONST_VECTOR)
5086 return simplify_immed_subreg (outermode, op, innermode, byte);
5088 /* Changing mode twice with SUBREG => just change it once,
5089 or not at all if changing back op starting mode. */
5090 if (GET_CODE (op) == SUBREG)
5092 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5093 int final_offset = byte + SUBREG_BYTE (op);
5096 if (outermode == innermostmode
5097 && byte == 0 && SUBREG_BYTE (op) == 0)
5098 return SUBREG_REG (op);
5100 /* The SUBREG_BYTE represents offset, as if the value were stored
5101 in memory. Irritating exception is paradoxical subreg, where
5102 we define SUBREG_BYTE to be 0. On big endian machines, this
5103 value should be negative. For a moment, undo this exception. */
5104 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5106 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5107 if (WORDS_BIG_ENDIAN)
5108 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5109 if (BYTES_BIG_ENDIAN)
5110 final_offset += difference % UNITS_PER_WORD;
5112 if (SUBREG_BYTE (op) == 0
5113 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5115 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5116 if (WORDS_BIG_ENDIAN)
5117 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5118 if (BYTES_BIG_ENDIAN)
5119 final_offset += difference % UNITS_PER_WORD;
5122 /* See whether resulting subreg will be paradoxical. */
5123 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5125 /* In nonparadoxical subregs we can't handle negative offsets. */
5126 if (final_offset < 0)
5128 /* Bail out in case resulting subreg would be incorrect. */
5129 if (final_offset % GET_MODE_SIZE (outermode)
5130 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5136 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5138 /* In paradoxical subreg, see if we are still looking on lower part.
5139 If so, our SUBREG_BYTE will be 0. */
5140 if (WORDS_BIG_ENDIAN)
5141 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5142 if (BYTES_BIG_ENDIAN)
5143 offset += difference % UNITS_PER_WORD;
5144 if (offset == final_offset)
5150 /* Recurse for further possible simplifications. */
5151 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5155 if (validate_subreg (outermode, innermostmode,
5156 SUBREG_REG (op), final_offset))
5158 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5159 if (SUBREG_PROMOTED_VAR_P (op)
5160 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5161 && GET_MODE_CLASS (outermode) == MODE_INT
5162 && IN_RANGE (GET_MODE_SIZE (outermode),
5163 GET_MODE_SIZE (innermode),
5164 GET_MODE_SIZE (innermostmode))
5165 && subreg_lowpart_p (newx))
5167 SUBREG_PROMOTED_VAR_P (newx) = 1;
5168 SUBREG_PROMOTED_UNSIGNED_SET
5169 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5176 /* Merge implicit and explicit truncations. */
5178 if (GET_CODE (op) == TRUNCATE
5179 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5180 && subreg_lowpart_offset (outermode, innermode) == byte)
5181 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5182 GET_MODE (XEXP (op, 0)));
5184 /* SUBREG of a hard register => just change the register number
5185 and/or mode. If the hard register is not valid in that mode,
5186 suppress this simplification. If the hard register is the stack,
5187 frame, or argument pointer, leave this as a SUBREG. */
5189 if (REG_P (op) && HARD_REGISTER_P (op))
5191 unsigned int regno, final_regno;
5194 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5195 if (HARD_REGISTER_NUM_P (final_regno))
5198 int final_offset = byte;
5200 /* Adjust offset for paradoxical subregs. */
5202 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5204 int difference = (GET_MODE_SIZE (innermode)
5205 - GET_MODE_SIZE (outermode));
5206 if (WORDS_BIG_ENDIAN)
5207 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5208 if (BYTES_BIG_ENDIAN)
5209 final_offset += difference % UNITS_PER_WORD;
5212 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5214 /* Propagate original regno. We don't have any way to specify
5215 the offset inside original regno, so do so only for lowpart.
5216 The information is used only by alias analysis that can not
5217 grog partial register anyway. */
5219 if (subreg_lowpart_offset (outermode, innermode) == byte)
5220 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5225 /* If we have a SUBREG of a register that we are replacing and we are
5226 replacing it with a MEM, make a new MEM and try replacing the
5227 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5228 or if we would be widening it. */
5231 && ! mode_dependent_address_p (XEXP (op, 0))
5232 /* Allow splitting of volatile memory references in case we don't
5233 have instruction to move the whole thing. */
5234 && (! MEM_VOLATILE_P (op)
5235 || ! have_insn_for (SET, innermode))
5236 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5237 return adjust_address_nv (op, outermode, byte);
5239 /* Handle complex values represented as CONCAT
5240 of real and imaginary part. */
5241 if (GET_CODE (op) == CONCAT)
5243 unsigned int part_size, final_offset;
5246 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5247 if (byte < part_size)
5249 part = XEXP (op, 0);
5250 final_offset = byte;
5254 part = XEXP (op, 1);
5255 final_offset = byte - part_size;
5258 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5261 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5264 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5265 return gen_rtx_SUBREG (outermode, part, final_offset);
5269 /* Optimize SUBREG truncations of zero and sign extended values. */
5270 if ((GET_CODE (op) == ZERO_EXTEND
5271 || GET_CODE (op) == SIGN_EXTEND)
5272 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5274 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5276 /* If we're requesting the lowpart of a zero or sign extension,
5277 there are three possibilities. If the outermode is the same
5278 as the origmode, we can omit both the extension and the subreg.
5279 If the outermode is not larger than the origmode, we can apply
5280 the truncation without the extension. Finally, if the outermode
5281 is larger than the origmode, but both are integer modes, we
5282 can just extend to the appropriate mode. */
5285 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5286 if (outermode == origmode)
5287 return XEXP (op, 0);
5288 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5289 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5290 subreg_lowpart_offset (outermode,
5292 if (SCALAR_INT_MODE_P (outermode))
5293 return simplify_gen_unary (GET_CODE (op), outermode,
5294 XEXP (op, 0), origmode);
5297 /* A SUBREG resulting from a zero extension may fold to zero if
5298 it extracts higher bits that the ZERO_EXTEND's source bits. */
5299 if (GET_CODE (op) == ZERO_EXTEND
5300 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5301 return CONST0_RTX (outermode);
5304 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5305 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5306 the outer subreg is effectively a truncation to the original mode. */
5307 if ((GET_CODE (op) == LSHIFTRT
5308 || GET_CODE (op) == ASHIFTRT)
5309 && SCALAR_INT_MODE_P (outermode)
5310 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5311 to avoid the possibility that an outer LSHIFTRT shifts by more
5312 than the sign extension's sign_bit_copies and introduces zeros
5313 into the high bits of the result. */
5314 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5315 && CONST_INT_P (XEXP (op, 1))
5316 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5317 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5318 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5319 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5320 return simplify_gen_binary (ASHIFTRT, outermode,
5321 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5323 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5324 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5325 the outer subreg is effectively a truncation to the original mode. */
5326 if ((GET_CODE (op) == LSHIFTRT
5327 || GET_CODE (op) == ASHIFTRT)
5328 && SCALAR_INT_MODE_P (outermode)
5329 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5330 && CONST_INT_P (XEXP (op, 1))
5331 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5332 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5333 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5334 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5335 return simplify_gen_binary (LSHIFTRT, outermode,
5336 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5338 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5339 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5340 the outer subreg is effectively a truncation to the original mode. */
5341 if (GET_CODE (op) == ASHIFT
5342 && SCALAR_INT_MODE_P (outermode)
5343 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5344 && CONST_INT_P (XEXP (op, 1))
5345 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5346 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5347 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5348 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5349 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5350 return simplify_gen_binary (ASHIFT, outermode,
5351 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5353 /* Recognize a word extraction from a multi-word subreg. */
5354 if ((GET_CODE (op) == LSHIFTRT
5355 || GET_CODE (op) == ASHIFTRT)
5356 && SCALAR_INT_MODE_P (outermode)
5357 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5358 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5359 && CONST_INT_P (XEXP (op, 1))
5360 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5361 && INTVAL (XEXP (op, 1)) >= 0
5362 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5363 && byte == subreg_lowpart_offset (outermode, innermode))
5365 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5366 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5368 ? byte - shifted_bytes
5369 : byte + shifted_bytes));
5375 /* Make a SUBREG operation or equivalent if it folds. */
5378 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5379 enum machine_mode innermode, unsigned int byte)
5383 newx = simplify_subreg (outermode, op, innermode, byte);
5387 if (GET_CODE (op) == SUBREG
5388 || GET_CODE (op) == CONCAT
5389 || GET_MODE (op) == VOIDmode)
5392 if (validate_subreg (outermode, innermode, op, byte))
5393 return gen_rtx_SUBREG (outermode, op, byte);
5398 /* Simplify X, an rtx expression.
5400 Return the simplified expression or NULL if no simplifications
5403 This is the preferred entry point into the simplification routines;
5404 however, we still allow passes to call the more specific routines.
5406 Right now GCC has three (yes, three) major bodies of RTL simplification
5407 code that need to be unified.
5409 1. fold_rtx in cse.c. This code uses various CSE specific
5410 information to aid in RTL simplification.
5412 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5413 it uses combine specific information to aid in RTL
5416 3. The routines in this file.
5419 Long term we want to only have one body of simplification code; to
5420 get to that state I recommend the following steps:
5422 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5423 which are not pass dependent state into these routines.
5425 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5426 use this routine whenever possible.
5428 3. Allow for pass dependent state to be provided to these
5429 routines and add simplifications based on the pass dependent
5430 state. Remove code from cse.c & combine.c that becomes
5433 It will take time, but ultimately the compiler will be easier to
5434 maintain and improve. It's totally silly that when we add a
5435 simplification that it needs to be added to 4 places (3 for RTL
5436 simplification and 1 for tree simplification. */
5439 simplify_rtx (const_rtx x)
5441 const enum rtx_code code = GET_CODE (x);
5442 const enum machine_mode mode = GET_MODE (x);
5444 switch (GET_RTX_CLASS (code))
5447 return simplify_unary_operation (code, mode,
5448 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5449 case RTX_COMM_ARITH:
5450 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5451 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5453 /* Fall through.... */
5456 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5459 case RTX_BITFIELD_OPS:
5460 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5461 XEXP (x, 0), XEXP (x, 1),
5465 case RTX_COMM_COMPARE:
5466 return simplify_relational_operation (code, mode,
5467 ((GET_MODE (XEXP (x, 0))
5469 ? GET_MODE (XEXP (x, 0))
5470 : GET_MODE (XEXP (x, 1))),
5476 return simplify_subreg (mode, SUBREG_REG (x),
5477 GET_MODE (SUBREG_REG (x)),
5484 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5485 if (GET_CODE (XEXP (x, 0)) == HIGH
5486 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))