1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
87 width = GET_MODE_BITSIZE (mode);
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0, op1))
121 tem = op0, op0 = op1, op1 = tem;
123 /* If this simplifies, do it. */
124 tem = simplify_binary_operation (code, mode, op0, op1);
128 /* Handle addition and subtraction specially. Otherwise, just form
131 if (code == PLUS || code == MINUS)
133 tem = simplify_plus_minus (code, mode, op0, op1, 1);
138 return gen_rtx_fmt_ee (code, mode, op0, op1);
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
144 avoid_constant_pool_reference (rtx x)
147 enum machine_mode cmode;
149 switch (GET_CODE (x))
155 /* Handle float extensions of constant pool references. */
157 c = avoid_constant_pool_reference (tmp);
158 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
162 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
163 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
173 /* Call target hook to avoid the effects of -fpic etc.... */
174 addr = targetm.delegitimize_address (addr);
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
179 if (GET_CODE (addr) != SYMBOL_REF
180 || ! CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (cmode != GET_MODE (x))
191 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
198 /* Make a unary operation by first seeing if it folds and otherwise making
199 the specified operation. */
202 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
203 enum machine_mode op_mode)
207 /* If this simplifies, use it. */
208 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
211 return gen_rtx_fmt_e (code, mode, op);
214 /* Likewise for ternary operations. */
217 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
218 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
222 /* If this simplifies, use it. */
223 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
227 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
230 /* Likewise, for relational operations.
231 CMP_MODE specifies mode comparison is done in. */
234 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
235 enum machine_mode cmp_mode, rtx op0, rtx op1)
239 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
243 return gen_rtx_fmt_ee (code, mode, op0, op1);
246 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
247 resulting RTX. Return a new RTX which is as simplified as possible. */
250 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
252 enum rtx_code code = GET_CODE (x);
253 enum machine_mode mode = GET_MODE (x);
254 enum machine_mode op_mode;
257 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
258 to build a new expression substituting recursively. If we can't do
259 anything, return our input. */
264 switch (GET_RTX_CLASS (code))
268 op_mode = GET_MODE (op0);
269 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
270 if (op0 == XEXP (x, 0))
272 return simplify_gen_unary (code, mode, op0, op_mode);
276 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
277 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
280 return simplify_gen_binary (code, mode, op0, op1);
283 case RTX_COMM_COMPARE:
286 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
287 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
288 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
289 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return simplify_gen_relational (code, mode, op_mode, op0, op1);
294 case RTX_BITFIELD_OPS:
296 op_mode = GET_MODE (op0);
297 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
298 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
299 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
300 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
302 if (op_mode == VOIDmode)
303 op_mode = GET_MODE (op0);
304 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
307 /* The only case we try to handle is a SUBREG. */
310 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
311 if (op0 == SUBREG_REG (x))
313 op0 = simplify_gen_subreg (GET_MODE (x), op0,
314 GET_MODE (SUBREG_REG (x)),
316 return op0 ? op0 : x;
323 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
324 if (op0 == XEXP (x, 0))
326 return replace_equiv_address_nv (x, op0);
328 else if (code == LO_SUM)
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
337 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
339 return gen_rtx_LO_SUM (mode, op0, op1);
341 else if (code == REG)
343 if (rtx_equal_p (x, old_rtx))
354 /* Try to simplify a unary operation CODE whose output mode is to be
355 MODE with input operand OP whose mode was originally OP_MODE.
356 Return zero if no simplification can be made. */
358 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
359 rtx op, enum machine_mode op_mode)
363 if (GET_CODE (op) == CONST)
366 trueop = avoid_constant_pool_reference (op);
368 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
372 return simplify_unary_operation_1 (code, mode, op);
375 /* Perform some simplifications we can do even if the operands
378 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
380 enum rtx_code reversed;
386 /* (not (not X)) == X. */
387 if (GET_CODE (op) == NOT)
390 /* (not (eq X Y)) == (ne X Y), etc. */
391 if (COMPARISON_P (op)
392 && (mode == BImode || STORE_FLAG_VALUE == -1)
393 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
394 return simplify_gen_relational (reversed, mode, VOIDmode,
395 XEXP (op, 0), XEXP (op, 1));
397 /* (not (plus X -1)) can become (neg X). */
398 if (GET_CODE (op) == PLUS
399 && XEXP (op, 1) == constm1_rtx)
400 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
402 /* Similarly, (not (neg X)) is (plus X -1). */
403 if (GET_CODE (op) == NEG)
404 return plus_constant (XEXP (op, 0), -1);
406 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
407 if (GET_CODE (op) == XOR
408 && GET_CODE (XEXP (op, 1)) == CONST_INT
409 && (temp = simplify_unary_operation (NOT, mode,
410 XEXP (op, 1), mode)) != 0)
411 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
413 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
414 if (GET_CODE (op) == PLUS
415 && GET_CODE (XEXP (op, 1)) == CONST_INT
416 && mode_signbit_p (mode, XEXP (op, 1))
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
422 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
423 operands other than 1, but that is not valid. We could do a
424 similar simplification for (not (lshiftrt C X)) where C is
425 just the sign bit, but this doesn't seem common enough to
427 if (GET_CODE (op) == ASHIFT
428 && XEXP (op, 0) == const1_rtx)
430 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
431 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
434 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
435 by reversing the comparison code if valid. */
436 if (STORE_FLAG_VALUE == -1
438 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
439 return simplify_gen_relational (reversed, mode, VOIDmode,
440 XEXP (op, 0), XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
456 /* (neg (neg X)) == X. */
457 if (GET_CODE (op) == NEG)
460 /* (neg (plus X 1)) can become (not X). */
461 if (GET_CODE (op) == PLUS
462 && XEXP (op, 1) == const1_rtx)
463 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
465 /* Similarly, (neg (not X)) is (plus X 1). */
466 if (GET_CODE (op) == NOT)
467 return plus_constant (XEXP (op, 0), 1);
469 /* (neg (minus X Y)) can become (minus Y X). This transformation
470 isn't safe for modes with signed zeros, since if X and Y are
471 both +0, (minus Y X) is the same as (minus X Y). If the
472 rounding mode is towards +infinity (or -infinity) then the two
473 expressions will be rounded differently. */
474 if (GET_CODE (op) == MINUS
475 && !HONOR_SIGNED_ZEROS (mode)
476 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
477 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
479 if (GET_CODE (op) == PLUS
480 && !HONOR_SIGNED_ZEROS (mode)
481 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
483 /* (neg (plus A C)) is simplified to (minus -C A). */
484 if (GET_CODE (XEXP (op, 1)) == CONST_INT
485 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
487 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
489 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
492 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
493 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
494 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
497 /* (neg (mult A B)) becomes (mult (neg A) B).
498 This works even for floating-point values. */
499 if (GET_CODE (op) == MULT
500 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
502 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
503 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
506 /* NEG commutes with ASHIFT since it is multiplication. Only do
507 this if we can then eliminate the NEG (e.g., if the operand
509 if (GET_CODE (op) == ASHIFT)
511 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
513 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
516 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
517 C is equal to the width of MODE minus 1. */
518 if (GET_CODE (op) == ASHIFTRT
519 && GET_CODE (XEXP (op, 1)) == CONST_INT
520 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
521 return simplify_gen_binary (LSHIFTRT, mode,
522 XEXP (op, 0), XEXP (op, 1));
524 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
525 C is equal to the width of MODE minus 1. */
526 if (GET_CODE (op) == LSHIFTRT
527 && GET_CODE (XEXP (op, 1)) == CONST_INT
528 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
529 return simplify_gen_binary (ASHIFTRT, mode,
530 XEXP (op, 0), XEXP (op, 1));
535 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
536 becomes just the MINUS if its mode is MODE. This allows
537 folding switch statements on machines using casesi (such as
539 if (GET_CODE (op) == TRUNCATE
540 && GET_MODE (XEXP (op, 0)) == mode
541 && GET_CODE (XEXP (op, 0)) == MINUS
542 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
543 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
546 /* Check for a sign extension of a subreg of a promoted
547 variable, where the promotion is sign-extended, and the
548 target mode is the same as the variable's promotion. */
549 if (GET_CODE (op) == SUBREG
550 && SUBREG_PROMOTED_VAR_P (op)
551 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
552 && GET_MODE (XEXP (op, 0)) == mode)
555 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
556 if (! POINTERS_EXTEND_UNSIGNED
557 && mode == Pmode && GET_MODE (op) == ptr_mode
559 || (GET_CODE (op) == SUBREG
560 && REG_P (SUBREG_REG (op))
561 && REG_POINTER (SUBREG_REG (op))
562 && GET_MODE (SUBREG_REG (op)) == Pmode)))
563 return convert_memory_address (Pmode, op);
568 /* Check for a zero extension of a subreg of a promoted
569 variable, where the promotion is zero-extended, and the
570 target mode is the same as the variable's promotion. */
571 if (GET_CODE (op) == SUBREG
572 && SUBREG_PROMOTED_VAR_P (op)
573 && SUBREG_PROMOTED_UNSIGNED_P (op)
574 && GET_MODE (XEXP (op, 0)) == mode)
577 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
578 if (POINTERS_EXTEND_UNSIGNED > 0
579 && mode == Pmode && GET_MODE (op) == ptr_mode
581 || (GET_CODE (op) == SUBREG
582 && REG_P (SUBREG_REG (op))
583 && REG_POINTER (SUBREG_REG (op))
584 && GET_MODE (SUBREG_REG (op)) == Pmode)))
585 return convert_memory_address (Pmode, op);
596 /* Try to compute the value of a unary operation CODE whose output mode is to
597 be MODE with input operand OP whose mode was originally OP_MODE.
598 Return zero if the value cannot be computed. */
600 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
601 rtx op, enum machine_mode op_mode)
603 unsigned int width = GET_MODE_BITSIZE (mode);
605 if (code == VEC_DUPLICATE)
607 gcc_assert (VECTOR_MODE_P (mode));
608 if (GET_MODE (op) != VOIDmode)
610 if (!VECTOR_MODE_P (GET_MODE (op)))
611 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
613 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
616 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
617 || GET_CODE (op) == CONST_VECTOR)
619 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
620 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
621 rtvec v = rtvec_alloc (n_elts);
624 if (GET_CODE (op) != CONST_VECTOR)
625 for (i = 0; i < n_elts; i++)
626 RTVEC_ELT (v, i) = op;
629 enum machine_mode inmode = GET_MODE (op);
630 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
631 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
633 gcc_assert (in_n_elts < n_elts);
634 gcc_assert ((n_elts % in_n_elts) == 0);
635 for (i = 0; i < n_elts; i++)
636 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
638 return gen_rtx_CONST_VECTOR (mode, v);
642 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
644 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
645 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
646 enum machine_mode opmode = GET_MODE (op);
647 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
648 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
649 rtvec v = rtvec_alloc (n_elts);
652 gcc_assert (op_n_elts == n_elts);
653 for (i = 0; i < n_elts; i++)
655 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
656 CONST_VECTOR_ELT (op, i),
657 GET_MODE_INNER (opmode));
660 RTVEC_ELT (v, i) = x;
662 return gen_rtx_CONST_VECTOR (mode, v);
665 /* The order of these tests is critical so that, for example, we don't
666 check the wrong mode (input vs. output) for a conversion operation,
667 such as FIX. At some point, this should be simplified. */
669 if (code == FLOAT && GET_MODE (op) == VOIDmode
670 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
672 HOST_WIDE_INT hv, lv;
675 if (GET_CODE (op) == CONST_INT)
676 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
678 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
680 REAL_VALUE_FROM_INT (d, lv, hv, mode);
681 d = real_value_truncate (mode, d);
682 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
684 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
685 && (GET_CODE (op) == CONST_DOUBLE
686 || GET_CODE (op) == CONST_INT))
688 HOST_WIDE_INT hv, lv;
691 if (GET_CODE (op) == CONST_INT)
692 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
694 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
696 if (op_mode == VOIDmode)
698 /* We don't know how to interpret negative-looking numbers in
699 this case, so don't try to fold those. */
703 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
706 hv = 0, lv &= GET_MODE_MASK (op_mode);
708 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
709 d = real_value_truncate (mode, d);
710 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
713 if (GET_CODE (op) == CONST_INT
714 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
716 HOST_WIDE_INT arg0 = INTVAL (op);
730 val = (arg0 >= 0 ? arg0 : - arg0);
734 /* Don't use ffs here. Instead, get low order bit and then its
735 number. If arg0 is zero, this will return 0, as desired. */
736 arg0 &= GET_MODE_MASK (mode);
737 val = exact_log2 (arg0 & (- arg0)) + 1;
741 arg0 &= GET_MODE_MASK (mode);
742 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
745 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
749 arg0 &= GET_MODE_MASK (mode);
752 /* Even if the value at zero is undefined, we have to come
753 up with some replacement. Seems good enough. */
754 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
755 val = GET_MODE_BITSIZE (mode);
758 val = exact_log2 (arg0 & -arg0);
762 arg0 &= GET_MODE_MASK (mode);
765 val++, arg0 &= arg0 - 1;
769 arg0 &= GET_MODE_MASK (mode);
772 val++, arg0 &= arg0 - 1;
781 /* When zero-extending a CONST_INT, we need to know its
783 gcc_assert (op_mode != VOIDmode);
784 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
786 /* If we were really extending the mode,
787 we would have to distinguish between zero-extension
788 and sign-extension. */
789 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
792 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
793 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
799 if (op_mode == VOIDmode)
801 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
803 /* If we were really extending the mode,
804 we would have to distinguish between zero-extension
805 and sign-extension. */
806 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
809 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
812 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
814 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
815 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
832 return gen_int_mode (val, mode);
835 /* We can do some operations on integer CONST_DOUBLEs. Also allow
836 for a DImode operation on a CONST_INT. */
837 else if (GET_MODE (op) == VOIDmode
838 && width <= HOST_BITS_PER_WIDE_INT * 2
839 && (GET_CODE (op) == CONST_DOUBLE
840 || GET_CODE (op) == CONST_INT))
842 unsigned HOST_WIDE_INT l1, lv;
843 HOST_WIDE_INT h1, hv;
845 if (GET_CODE (op) == CONST_DOUBLE)
846 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
848 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
858 neg_double (l1, h1, &lv, &hv);
863 neg_double (l1, h1, &lv, &hv);
875 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
878 lv = exact_log2 (l1 & -l1) + 1;
884 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
885 - HOST_BITS_PER_WIDE_INT;
887 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
888 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
889 lv = GET_MODE_BITSIZE (mode);
895 lv = exact_log2 (l1 & -l1);
897 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
898 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
899 lv = GET_MODE_BITSIZE (mode);
922 /* This is just a change-of-mode, so do nothing. */
927 gcc_assert (op_mode != VOIDmode);
929 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
933 lv = l1 & GET_MODE_MASK (op_mode);
937 if (op_mode == VOIDmode
938 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
942 lv = l1 & GET_MODE_MASK (op_mode);
943 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
944 && (lv & ((HOST_WIDE_INT) 1
945 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
946 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
948 hv = HWI_SIGN_EXTEND (lv);
959 return immed_double_const (lv, hv, mode);
962 else if (GET_CODE (op) == CONST_DOUBLE
963 && GET_MODE_CLASS (mode) == MODE_FLOAT)
965 REAL_VALUE_TYPE d, t;
966 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
971 if (HONOR_SNANS (mode) && real_isnan (&d))
973 real_sqrt (&t, mode, &d);
977 d = REAL_VALUE_ABS (d);
980 d = REAL_VALUE_NEGATE (d);
983 d = real_value_truncate (mode, d);
986 /* All this does is change the mode. */
989 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
996 real_to_target (tmp, &d, GET_MODE (op));
997 for (i = 0; i < 4; i++)
999 real_from_target (&d, tmp, mode);
1005 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1008 else if (GET_CODE (op) == CONST_DOUBLE
1009 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
1010 && GET_MODE_CLASS (mode) == MODE_INT
1011 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1013 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1014 operators are intentionally left unspecified (to ease implementation
1015 by target backends), for consistency, this routine implements the
1016 same semantics for constant folding as used by the middle-end. */
1018 /* This was formerly used only for non-IEEE float.
1019 eggert@twinsun.com says it is safe for IEEE also. */
1020 HOST_WIDE_INT xh, xl, th, tl;
1021 REAL_VALUE_TYPE x, t;
1022 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1026 if (REAL_VALUE_ISNAN (x))
1029 /* Test against the signed upper bound. */
1030 if (width > HOST_BITS_PER_WIDE_INT)
1032 th = ((unsigned HOST_WIDE_INT) 1
1033 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1039 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1041 real_from_integer (&t, VOIDmode, tl, th, 0);
1042 if (REAL_VALUES_LESS (t, x))
1049 /* Test against the signed lower bound. */
1050 if (width > HOST_BITS_PER_WIDE_INT)
1052 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1058 tl = (HOST_WIDE_INT) -1 << (width - 1);
1060 real_from_integer (&t, VOIDmode, tl, th, 0);
1061 if (REAL_VALUES_LESS (x, t))
1067 REAL_VALUE_TO_INT (&xl, &xh, x);
1071 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1074 /* Test against the unsigned upper bound. */
1075 if (width == 2*HOST_BITS_PER_WIDE_INT)
1080 else if (width >= HOST_BITS_PER_WIDE_INT)
1082 th = ((unsigned HOST_WIDE_INT) 1
1083 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1089 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1091 real_from_integer (&t, VOIDmode, tl, th, 1);
1092 if (REAL_VALUES_LESS (t, x))
1099 REAL_VALUE_TO_INT (&xl, &xh, x);
1105 return immed_double_const (xl, xh, mode);
1111 /* Subroutine of simplify_binary_operation to simplify a commutative,
1112 associative binary operation CODE with result mode MODE, operating
1113 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1114 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1115 canonicalization is possible. */
1118 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1123 /* Linearize the operator to the left. */
1124 if (GET_CODE (op1) == code)
1126 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1127 if (GET_CODE (op0) == code)
1129 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1130 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1133 /* "a op (b op c)" becomes "(b op c) op a". */
1134 if (! swap_commutative_operands_p (op1, op0))
1135 return simplify_gen_binary (code, mode, op1, op0);
1142 if (GET_CODE (op0) == code)
1144 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1145 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1147 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1148 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1151 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1152 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1153 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1154 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1156 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1158 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1159 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1160 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1161 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1163 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1170 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1171 and OP1. Return 0 if no simplification is possible.
1173 Don't use this for relational operations such as EQ or LT.
1174 Use simplify_relational_operation instead. */
1176 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1179 rtx trueop0, trueop1;
1182 /* Relational operations don't work here. We must know the mode
1183 of the operands in order to do the comparison correctly.
1184 Assuming a full word can give incorrect results.
1185 Consider comparing 128 with -128 in QImode. */
1186 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1187 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1193 tem = op0, op0 = op1, op1 = tem;
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1199 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1202 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1206 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1207 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1211 unsigned int width = GET_MODE_BITSIZE (mode);
1213 /* Even if we can't compute a constant result,
1214 there are some cases worth simplifying. */
1219 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1220 when x is NaN, infinite, or finite and nonzero. They aren't
1221 when x is -0 and the rounding mode is not towards -infinity,
1222 since (-0) + 0 is then 0. */
1223 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1226 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1227 transformations are safe even for IEEE. */
1228 if (GET_CODE (op0) == NEG)
1229 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1230 else if (GET_CODE (op1) == NEG)
1231 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1233 /* (~a) + 1 -> -a */
1234 if (INTEGRAL_MODE_P (mode)
1235 && GET_CODE (op0) == NOT
1236 && trueop1 == const1_rtx)
1237 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1239 /* Handle both-operands-constant cases. We can only add
1240 CONST_INTs to constants since the sum of relocatable symbols
1241 can't be handled by most assemblers. Don't add CONST_INT
1242 to CONST_INT since overflow won't be computed properly if wider
1243 than HOST_BITS_PER_WIDE_INT. */
1245 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1246 && GET_CODE (op1) == CONST_INT)
1247 return plus_constant (op0, INTVAL (op1));
1248 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1249 && GET_CODE (op0) == CONST_INT)
1250 return plus_constant (op1, INTVAL (op0));
1252 /* See if this is something like X * C - X or vice versa or
1253 if the multiplication is written as a shift. If so, we can
1254 distribute and make a new multiply, shift, or maybe just
1255 have X (if C is 2 in the example above). But don't make
1256 something more expensive than we had before. */
1258 if (! FLOAT_MODE_P (mode))
1260 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1261 rtx lhs = op0, rhs = op1;
1263 if (GET_CODE (lhs) == NEG)
1264 coeff0 = -1, lhs = XEXP (lhs, 0);
1265 else if (GET_CODE (lhs) == MULT
1266 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1267 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1268 else if (GET_CODE (lhs) == ASHIFT
1269 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1270 && INTVAL (XEXP (lhs, 1)) >= 0
1271 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1273 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1274 lhs = XEXP (lhs, 0);
1277 if (GET_CODE (rhs) == NEG)
1278 coeff1 = -1, rhs = XEXP (rhs, 0);
1279 else if (GET_CODE (rhs) == MULT
1280 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1282 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1284 else if (GET_CODE (rhs) == ASHIFT
1285 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1286 && INTVAL (XEXP (rhs, 1)) >= 0
1287 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1289 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1290 rhs = XEXP (rhs, 0);
1293 if (rtx_equal_p (lhs, rhs))
1295 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1296 tem = simplify_gen_binary (MULT, mode, lhs,
1297 GEN_INT (coeff0 + coeff1));
1298 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1303 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1304 if ((GET_CODE (op1) == CONST_INT
1305 || GET_CODE (op1) == CONST_DOUBLE)
1306 && GET_CODE (op0) == XOR
1307 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1308 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1309 && mode_signbit_p (mode, op1))
1310 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1311 simplify_gen_binary (XOR, mode, op1,
1314 /* If one of the operands is a PLUS or a MINUS, see if we can
1315 simplify this by the associative law.
1316 Don't use the associative law for floating point.
1317 The inaccuracy makes it nonassociative,
1318 and subtle programs can break if operations are associated. */
1320 if (INTEGRAL_MODE_P (mode)
1321 && (plus_minus_operand_p (op0)
1322 || plus_minus_operand_p (op1))
1323 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1326 /* Reassociate floating point addition only when the user
1327 specifies unsafe math optimizations. */
1328 if (FLOAT_MODE_P (mode)
1329 && flag_unsafe_math_optimizations)
1331 tem = simplify_associative_operation (code, mode, op0, op1);
1339 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1340 using cc0, in which case we want to leave it as a COMPARE
1341 so we can distinguish it from a register-register-copy.
1343 In IEEE floating point, x-0 is not the same as x. */
1345 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1346 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1347 && trueop1 == CONST0_RTX (mode))
1351 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1352 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1353 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1354 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1356 rtx xop00 = XEXP (op0, 0);
1357 rtx xop10 = XEXP (op1, 0);
1360 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1362 if (REG_P (xop00) && REG_P (xop10)
1363 && GET_MODE (xop00) == GET_MODE (xop10)
1364 && REGNO (xop00) == REGNO (xop10)
1365 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1366 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1373 /* We can't assume x-x is 0 even with non-IEEE floating point,
1374 but since it is zero except in very strange circumstances, we
1375 will treat it as zero with -funsafe-math-optimizations. */
1376 if (rtx_equal_p (trueop0, trueop1)
1377 && ! side_effects_p (op0)
1378 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1379 return CONST0_RTX (mode);
1381 /* Change subtraction from zero into negation. (0 - x) is the
1382 same as -x when x is NaN, infinite, or finite and nonzero.
1383 But if the mode has signed zeros, and does not round towards
1384 -infinity, then 0 - 0 is 0, not -0. */
1385 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1386 return simplify_gen_unary (NEG, mode, op1, mode);
1388 /* (-1 - a) is ~a. */
1389 if (trueop0 == constm1_rtx)
1390 return simplify_gen_unary (NOT, mode, op1, mode);
1392 /* Subtracting 0 has no effect unless the mode has signed zeros
1393 and supports rounding towards -infinity. In such a case,
1395 if (!(HONOR_SIGNED_ZEROS (mode)
1396 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1397 && trueop1 == CONST0_RTX (mode))
1400 /* See if this is something like X * C - X or vice versa or
1401 if the multiplication is written as a shift. If so, we can
1402 distribute and make a new multiply, shift, or maybe just
1403 have X (if C is 2 in the example above). But don't make
1404 something more expensive than we had before. */
1406 if (! FLOAT_MODE_P (mode))
1408 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1409 rtx lhs = op0, rhs = op1;
1411 if (GET_CODE (lhs) == NEG)
1412 coeff0 = -1, lhs = XEXP (lhs, 0);
1413 else if (GET_CODE (lhs) == MULT
1414 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1416 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1418 else if (GET_CODE (lhs) == ASHIFT
1419 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1420 && INTVAL (XEXP (lhs, 1)) >= 0
1421 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1423 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1424 lhs = XEXP (lhs, 0);
1427 if (GET_CODE (rhs) == NEG)
1428 coeff1 = - 1, rhs = XEXP (rhs, 0);
1429 else if (GET_CODE (rhs) == MULT
1430 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1432 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1434 else if (GET_CODE (rhs) == ASHIFT
1435 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1436 && INTVAL (XEXP (rhs, 1)) >= 0
1437 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1439 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1440 rhs = XEXP (rhs, 0);
1443 if (rtx_equal_p (lhs, rhs))
1445 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1446 tem = simplify_gen_binary (MULT, mode, lhs,
1447 GEN_INT (coeff0 - coeff1));
1448 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1453 /* (a - (-b)) -> (a + b). True even for IEEE. */
1454 if (GET_CODE (op1) == NEG)
1455 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1457 /* (-x - c) may be simplified as (-c - x). */
1458 if (GET_CODE (op0) == NEG
1459 && (GET_CODE (op1) == CONST_INT
1460 || GET_CODE (op1) == CONST_DOUBLE))
1462 tem = simplify_unary_operation (NEG, mode, op1, mode);
1464 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1467 /* If one of the operands is a PLUS or a MINUS, see if we can
1468 simplify this by the associative law.
1469 Don't use the associative law for floating point.
1470 The inaccuracy makes it nonassociative,
1471 and subtle programs can break if operations are associated. */
1473 if (INTEGRAL_MODE_P (mode)
1474 && (plus_minus_operand_p (op0)
1475 || plus_minus_operand_p (op1))
1476 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1479 /* Don't let a relocatable value get a negative coeff. */
1480 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1481 return simplify_gen_binary (PLUS, mode,
1483 neg_const_int (mode, op1));
1485 /* (x - (x & y)) -> (x & ~y) */
1486 if (GET_CODE (op1) == AND)
1488 if (rtx_equal_p (op0, XEXP (op1, 0)))
1490 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1491 GET_MODE (XEXP (op1, 1)));
1492 return simplify_gen_binary (AND, mode, op0, tem);
1494 if (rtx_equal_p (op0, XEXP (op1, 1)))
1496 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1497 GET_MODE (XEXP (op1, 0)));
1498 return simplify_gen_binary (AND, mode, op0, tem);
1504 if (trueop1 == constm1_rtx)
1505 return simplify_gen_unary (NEG, mode, op0, mode);
1507 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1508 x is NaN, since x * 0 is then also NaN. Nor is it valid
1509 when the mode has signed zeros, since multiplying a negative
1510 number by 0 will give -0, not 0. */
1511 if (!HONOR_NANS (mode)
1512 && !HONOR_SIGNED_ZEROS (mode)
1513 && trueop1 == CONST0_RTX (mode)
1514 && ! side_effects_p (op0))
1517 /* In IEEE floating point, x*1 is not equivalent to x for
1519 if (!HONOR_SNANS (mode)
1520 && trueop1 == CONST1_RTX (mode))
1523 /* Convert multiply by constant power of two into shift unless
1524 we are still generating RTL. This test is a kludge. */
1525 if (GET_CODE (trueop1) == CONST_INT
1526 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1527 /* If the mode is larger than the host word size, and the
1528 uppermost bit is set, then this isn't a power of two due
1529 to implicit sign extension. */
1530 && (width <= HOST_BITS_PER_WIDE_INT
1531 || val != HOST_BITS_PER_WIDE_INT - 1))
1532 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1534 /* x*2 is x+x and x*(-1) is -x */
1535 if (GET_CODE (trueop1) == CONST_DOUBLE
1536 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1537 && GET_MODE (op0) == mode)
1540 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1542 if (REAL_VALUES_EQUAL (d, dconst2))
1543 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1545 if (REAL_VALUES_EQUAL (d, dconstm1))
1546 return simplify_gen_unary (NEG, mode, op0, mode);
1549 /* Reassociate multiplication, but for floating point MULTs
1550 only when the user specifies unsafe math optimizations. */
1551 if (! FLOAT_MODE_P (mode)
1552 || flag_unsafe_math_optimizations)
1554 tem = simplify_associative_operation (code, mode, op0, op1);
1561 if (trueop1 == const0_rtx)
1563 if (GET_CODE (trueop1) == CONST_INT
1564 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1565 == GET_MODE_MASK (mode)))
1567 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1569 /* A | (~A) -> -1 */
1570 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1571 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1572 && ! side_effects_p (op0)
1573 && GET_MODE_CLASS (mode) != MODE_CC)
1575 tem = simplify_associative_operation (code, mode, op0, op1);
1581 if (trueop1 == const0_rtx)
1583 if (GET_CODE (trueop1) == CONST_INT
1584 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1585 == GET_MODE_MASK (mode)))
1586 return simplify_gen_unary (NOT, mode, op0, mode);
1587 if (trueop0 == trueop1
1588 && ! side_effects_p (op0)
1589 && GET_MODE_CLASS (mode) != MODE_CC)
1592 /* Canonicalize XOR of the most significant bit to PLUS. */
1593 if ((GET_CODE (op1) == CONST_INT
1594 || GET_CODE (op1) == CONST_DOUBLE)
1595 && mode_signbit_p (mode, op1))
1596 return simplify_gen_binary (PLUS, mode, op0, op1);
1597 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1598 if ((GET_CODE (op1) == CONST_INT
1599 || GET_CODE (op1) == CONST_DOUBLE)
1600 && GET_CODE (op0) == PLUS
1601 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1602 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1603 && mode_signbit_p (mode, XEXP (op0, 1)))
1604 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1605 simplify_gen_binary (XOR, mode, op1,
1608 tem = simplify_associative_operation (code, mode, op0, op1);
1614 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1616 /* If we are turning off bits already known off in OP0, we need
1618 if (GET_CODE (trueop1) == CONST_INT
1619 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1620 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1622 if (trueop0 == trueop1 && ! side_effects_p (op0)
1623 && GET_MODE_CLASS (mode) != MODE_CC)
1626 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1627 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1628 && ! side_effects_p (op0)
1629 && GET_MODE_CLASS (mode) != MODE_CC)
1632 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1633 there are no nonzero bits of C outside of X's mode. */
1634 if ((GET_CODE (op0) == SIGN_EXTEND
1635 || GET_CODE (op0) == ZERO_EXTEND)
1636 && GET_CODE (trueop1) == CONST_INT
1637 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1638 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1639 & INTVAL (trueop1)) == 0)
1641 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1642 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1643 gen_int_mode (INTVAL (trueop1),
1645 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1648 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1649 ((A & N) + B) & M -> (A + B) & M
1650 Similarly if (N & M) == 0,
1651 ((A | N) + B) & M -> (A + B) & M
1652 and for - instead of + and/or ^ instead of |. */
1653 if (GET_CODE (trueop1) == CONST_INT
1654 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1655 && ~INTVAL (trueop1)
1656 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1657 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1662 pmop[0] = XEXP (op0, 0);
1663 pmop[1] = XEXP (op0, 1);
1665 for (which = 0; which < 2; which++)
1668 switch (GET_CODE (tem))
1671 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1672 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1673 == INTVAL (trueop1))
1674 pmop[which] = XEXP (tem, 0);
1678 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1679 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1680 pmop[which] = XEXP (tem, 0);
1687 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1689 tem = simplify_gen_binary (GET_CODE (op0), mode,
1691 return simplify_gen_binary (code, mode, tem, op1);
1694 tem = simplify_associative_operation (code, mode, op0, op1);
1700 /* 0/x is 0 (or x&0 if x has side-effects). */
1701 if (trueop0 == const0_rtx)
1702 return side_effects_p (op1)
1703 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1706 if (trueop1 == const1_rtx)
1708 /* Handle narrowing UDIV. */
1709 rtx x = gen_lowpart_common (mode, op0);
1712 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1713 return gen_lowpart_SUBREG (mode, op0);
1716 /* Convert divide by power of two into shift. */
1717 if (GET_CODE (trueop1) == CONST_INT
1718 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1719 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1723 /* Handle floating point and integers separately. */
1724 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1726 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1727 safe for modes with NaNs, since 0.0 / 0.0 will then be
1728 NaN rather than 0.0. Nor is it safe for modes with signed
1729 zeros, since dividing 0 by a negative number gives -0.0 */
1730 if (trueop0 == CONST0_RTX (mode)
1731 && !HONOR_NANS (mode)
1732 && !HONOR_SIGNED_ZEROS (mode)
1733 && ! side_effects_p (op1))
1736 if (trueop1 == CONST1_RTX (mode)
1737 && !HONOR_SNANS (mode))
1740 if (GET_CODE (trueop1) == CONST_DOUBLE
1741 && trueop1 != CONST0_RTX (mode))
1744 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1747 if (REAL_VALUES_EQUAL (d, dconstm1)
1748 && !HONOR_SNANS (mode))
1749 return simplify_gen_unary (NEG, mode, op0, mode);
1751 /* Change FP division by a constant into multiplication.
1752 Only do this with -funsafe-math-optimizations. */
1753 if (flag_unsafe_math_optimizations
1754 && !REAL_VALUES_EQUAL (d, dconst0))
1756 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1757 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1758 return simplify_gen_binary (MULT, mode, op0, tem);
1764 /* 0/x is 0 (or x&0 if x has side-effects). */
1765 if (trueop0 == const0_rtx)
1766 return side_effects_p (op1)
1767 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1770 if (trueop1 == const1_rtx)
1772 /* Handle narrowing DIV. */
1773 rtx x = gen_lowpart_common (mode, op0);
1776 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1777 return gen_lowpart_SUBREG (mode, op0);
1781 if (trueop1 == constm1_rtx)
1783 rtx x = gen_lowpart_common (mode, op0);
1785 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1786 ? gen_lowpart_SUBREG (mode, op0) : op0;
1787 return simplify_gen_unary (NEG, mode, x, mode);
1793 /* 0%x is 0 (or x&0 if x has side-effects). */
1794 if (trueop0 == const0_rtx)
1795 return side_effects_p (op1)
1796 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1798 /* x%1 is 0 (of x&0 if x has side-effects). */
1799 if (trueop1 == const1_rtx)
1800 return side_effects_p (op0)
1801 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1803 /* Implement modulus by power of two as AND. */
1804 if (GET_CODE (trueop1) == CONST_INT
1805 && exact_log2 (INTVAL (trueop1)) > 0)
1806 return simplify_gen_binary (AND, mode, op0,
1807 GEN_INT (INTVAL (op1) - 1));
1811 /* 0%x is 0 (or x&0 if x has side-effects). */
1812 if (trueop0 == const0_rtx)
1813 return side_effects_p (op1)
1814 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1816 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1817 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
1818 return side_effects_p (op0)
1819 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1826 /* Rotating ~0 always results in ~0. */
1827 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1828 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1829 && ! side_effects_p (op1))
1832 /* Fall through.... */
1836 if (trueop1 == const0_rtx)
1838 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1843 if (width <= HOST_BITS_PER_WIDE_INT
1844 && GET_CODE (trueop1) == CONST_INT
1845 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1846 && ! side_effects_p (op0))
1848 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1850 tem = simplify_associative_operation (code, mode, op0, op1);
1856 if (width <= HOST_BITS_PER_WIDE_INT
1857 && GET_CODE (trueop1) == CONST_INT
1858 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1859 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1860 && ! side_effects_p (op0))
1862 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1864 tem = simplify_associative_operation (code, mode, op0, op1);
1870 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1872 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1874 tem = simplify_associative_operation (code, mode, op0, op1);
1880 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1882 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1884 tem = simplify_associative_operation (code, mode, op0, op1);
1893 /* ??? There are simplifications that can be done. */
1897 if (!VECTOR_MODE_P (mode))
1899 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1900 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1901 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1902 gcc_assert (XVECLEN (trueop1, 0) == 1);
1903 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1905 if (GET_CODE (trueop0) == CONST_VECTOR)
1906 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1911 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1912 gcc_assert (GET_MODE_INNER (mode)
1913 == GET_MODE_INNER (GET_MODE (trueop0)));
1914 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1916 if (GET_CODE (trueop0) == CONST_VECTOR)
1918 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1919 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1920 rtvec v = rtvec_alloc (n_elts);
1923 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1924 for (i = 0; i < n_elts; i++)
1926 rtx x = XVECEXP (trueop1, 0, i);
1928 gcc_assert (GET_CODE (x) == CONST_INT);
1929 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1933 return gen_rtx_CONST_VECTOR (mode, v);
1939 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1940 ? GET_MODE (trueop0)
1941 : GET_MODE_INNER (mode));
1942 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1943 ? GET_MODE (trueop1)
1944 : GET_MODE_INNER (mode));
1946 gcc_assert (VECTOR_MODE_P (mode));
1947 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1948 == GET_MODE_SIZE (mode));
1950 if (VECTOR_MODE_P (op0_mode))
1951 gcc_assert (GET_MODE_INNER (mode)
1952 == GET_MODE_INNER (op0_mode));
1954 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
1956 if (VECTOR_MODE_P (op1_mode))
1957 gcc_assert (GET_MODE_INNER (mode)
1958 == GET_MODE_INNER (op1_mode));
1960 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
1962 if ((GET_CODE (trueop0) == CONST_VECTOR
1963 || GET_CODE (trueop0) == CONST_INT
1964 || GET_CODE (trueop0) == CONST_DOUBLE)
1965 && (GET_CODE (trueop1) == CONST_VECTOR
1966 || GET_CODE (trueop1) == CONST_INT
1967 || GET_CODE (trueop1) == CONST_DOUBLE))
1969 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1970 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1971 rtvec v = rtvec_alloc (n_elts);
1973 unsigned in_n_elts = 1;
1975 if (VECTOR_MODE_P (op0_mode))
1976 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1977 for (i = 0; i < n_elts; i++)
1981 if (!VECTOR_MODE_P (op0_mode))
1982 RTVEC_ELT (v, i) = trueop0;
1984 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1988 if (!VECTOR_MODE_P (op1_mode))
1989 RTVEC_ELT (v, i) = trueop1;
1991 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1996 return gen_rtx_CONST_VECTOR (mode, v);
2009 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2012 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2014 unsigned int width = GET_MODE_BITSIZE (mode);
2016 if (VECTOR_MODE_P (mode)
2017 && code != VEC_CONCAT
2018 && GET_CODE (op0) == CONST_VECTOR
2019 && GET_CODE (op1) == CONST_VECTOR)
2021 unsigned n_elts = GET_MODE_NUNITS (mode);
2022 enum machine_mode op0mode = GET_MODE (op0);
2023 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2024 enum machine_mode op1mode = GET_MODE (op1);
2025 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2026 rtvec v = rtvec_alloc (n_elts);
2029 gcc_assert (op0_n_elts == n_elts);
2030 gcc_assert (op1_n_elts == n_elts);
2031 for (i = 0; i < n_elts; i++)
2033 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2034 CONST_VECTOR_ELT (op0, i),
2035 CONST_VECTOR_ELT (op1, i));
2038 RTVEC_ELT (v, i) = x;
2041 return gen_rtx_CONST_VECTOR (mode, v);
2044 if (VECTOR_MODE_P (mode)
2045 && code == VEC_CONCAT
2046 && CONSTANT_P (op0) && CONSTANT_P (op1))
2048 unsigned n_elts = GET_MODE_NUNITS (mode);
2049 rtvec v = rtvec_alloc (n_elts);
2051 gcc_assert (n_elts >= 2);
2054 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2055 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2057 RTVEC_ELT (v, 0) = op0;
2058 RTVEC_ELT (v, 1) = op1;
2062 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2063 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2066 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2067 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2068 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2070 for (i = 0; i < op0_n_elts; ++i)
2071 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2072 for (i = 0; i < op1_n_elts; ++i)
2073 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2076 return gen_rtx_CONST_VECTOR (mode, v);
2079 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2080 && GET_CODE (op0) == CONST_DOUBLE
2081 && GET_CODE (op1) == CONST_DOUBLE
2082 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2093 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2095 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2097 for (i = 0; i < 4; i++)
2114 real_from_target (&r, tmp0, mode);
2115 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2119 REAL_VALUE_TYPE f0, f1, value, result;
2122 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2123 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2124 real_convert (&f0, mode, &f0);
2125 real_convert (&f1, mode, &f1);
2127 if (HONOR_SNANS (mode)
2128 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2132 && REAL_VALUES_EQUAL (f1, dconst0)
2133 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2136 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2137 && flag_trapping_math
2138 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2140 int s0 = REAL_VALUE_NEGATIVE (f0);
2141 int s1 = REAL_VALUE_NEGATIVE (f1);
2146 /* Inf + -Inf = NaN plus exception. */
2151 /* Inf - Inf = NaN plus exception. */
2156 /* Inf / Inf = NaN plus exception. */
2163 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2164 && flag_trapping_math
2165 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2166 || (REAL_VALUE_ISINF (f1)
2167 && REAL_VALUES_EQUAL (f0, dconst0))))
2168 /* Inf * 0 = NaN plus exception. */
2171 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2173 real_convert (&result, mode, &value);
2175 /* Don't constant fold this floating point operation if the
2176 result may dependent upon the run-time rounding mode and
2177 flag_rounding_math is set, or if GCC's software emulation
2178 is unable to accurately represent the result. */
2180 if ((flag_rounding_math
2181 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2182 && !flag_unsafe_math_optimizations))
2183 && (inexact || !real_identical (&result, &value)))
2186 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2190 /* We can fold some multi-word operations. */
2191 if (GET_MODE_CLASS (mode) == MODE_INT
2192 && width == HOST_BITS_PER_WIDE_INT * 2
2193 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2194 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2196 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2197 HOST_WIDE_INT h1, h2, hv, ht;
2199 if (GET_CODE (op0) == CONST_DOUBLE)
2200 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2202 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2204 if (GET_CODE (op1) == CONST_DOUBLE)
2205 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2207 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2212 /* A - B == A + (-B). */
2213 neg_double (l2, h2, &lv, &hv);
2216 /* Fall through.... */
2219 add_double (l1, h1, l2, h2, &lv, &hv);
2223 mul_double (l1, h1, l2, h2, &lv, &hv);
2227 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2228 &lv, &hv, <, &ht))
2233 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2234 <, &ht, &lv, &hv))
2239 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2240 &lv, &hv, <, &ht))
2245 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2246 <, &ht, &lv, &hv))
2251 lv = l1 & l2, hv = h1 & h2;
2255 lv = l1 | l2, hv = h1 | h2;
2259 lv = l1 ^ l2, hv = h1 ^ h2;
2265 && ((unsigned HOST_WIDE_INT) l1
2266 < (unsigned HOST_WIDE_INT) l2)))
2275 && ((unsigned HOST_WIDE_INT) l1
2276 > (unsigned HOST_WIDE_INT) l2)))
2283 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2285 && ((unsigned HOST_WIDE_INT) l1
2286 < (unsigned HOST_WIDE_INT) l2)))
2293 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2295 && ((unsigned HOST_WIDE_INT) l1
2296 > (unsigned HOST_WIDE_INT) l2)))
2302 case LSHIFTRT: case ASHIFTRT:
2304 case ROTATE: case ROTATERT:
2305 if (SHIFT_COUNT_TRUNCATED)
2306 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2308 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2311 if (code == LSHIFTRT || code == ASHIFTRT)
2312 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2314 else if (code == ASHIFT)
2315 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2316 else if (code == ROTATE)
2317 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2318 else /* code == ROTATERT */
2319 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2326 return immed_double_const (lv, hv, mode);
2329 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2330 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2332 /* Get the integer argument values in two forms:
2333 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2335 arg0 = INTVAL (op0);
2336 arg1 = INTVAL (op1);
2338 if (width < HOST_BITS_PER_WIDE_INT)
2340 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2341 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2344 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2345 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2348 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2349 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2357 /* Compute the value of the arithmetic. */
2362 val = arg0s + arg1s;
2366 val = arg0s - arg1s;
2370 val = arg0s * arg1s;
2375 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2378 val = arg0s / arg1s;
2383 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2386 val = arg0s % arg1s;
2391 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2394 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2399 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2402 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2420 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2421 the value is in range. We can't return any old value for
2422 out-of-range arguments because either the middle-end (via
2423 shift_truncation_mask) or the back-end might be relying on
2424 target-specific knowledge. Nor can we rely on
2425 shift_truncation_mask, since the shift might not be part of an
2426 ashlM3, lshrM3 or ashrM3 instruction. */
2427 if (SHIFT_COUNT_TRUNCATED)
2428 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2429 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2432 val = (code == ASHIFT
2433 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2434 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2436 /* Sign-extend the result for arithmetic right shifts. */
2437 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2438 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2446 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2447 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2455 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2456 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2460 /* Do nothing here. */
2464 val = arg0s <= arg1s ? arg0s : arg1s;
2468 val = ((unsigned HOST_WIDE_INT) arg0
2469 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2473 val = arg0s > arg1s ? arg0s : arg1s;
2477 val = ((unsigned HOST_WIDE_INT) arg0
2478 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2485 /* ??? There are simplifications that can be done. */
2492 return gen_int_mode (val, mode);
2500 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2503 Rather than test for specific case, we do this by a brute-force method
2504 and do all possible simplifications until no more changes occur. Then
2505 we rebuild the operation.
2507 If FORCE is true, then always generate the rtx. This is used to
2508 canonicalize stuff emitted from simplify_gen_binary. Note that this
2509 can still fail if the rtx is too complex. It won't fail just because
2510 the result is not 'simpler' than the input, however. */
2512 struct simplify_plus_minus_op_data
2519 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2521 const struct simplify_plus_minus_op_data *d1 = p1;
2522 const struct simplify_plus_minus_op_data *d2 = p2;
2524 return (commutative_operand_precedence (d2->op)
2525 - commutative_operand_precedence (d1->op));
2529 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2532 struct simplify_plus_minus_op_data ops[8];
2534 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2538 memset (ops, 0, sizeof ops);
2540 /* Set up the two operands and then expand them until nothing has been
2541 changed. If we run out of room in our array, give up; this should
2542 almost never happen. */
2547 ops[1].neg = (code == MINUS);
2553 for (i = 0; i < n_ops; i++)
2555 rtx this_op = ops[i].op;
2556 int this_neg = ops[i].neg;
2557 enum rtx_code this_code = GET_CODE (this_op);
2566 ops[n_ops].op = XEXP (this_op, 1);
2567 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2570 ops[i].op = XEXP (this_op, 0);
2576 ops[i].op = XEXP (this_op, 0);
2577 ops[i].neg = ! this_neg;
2583 && GET_CODE (XEXP (this_op, 0)) == PLUS
2584 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2585 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2587 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2588 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2589 ops[n_ops].neg = this_neg;
2597 /* ~a -> (-a - 1) */
2600 ops[n_ops].op = constm1_rtx;
2601 ops[n_ops++].neg = this_neg;
2602 ops[i].op = XEXP (this_op, 0);
2603 ops[i].neg = !this_neg;
2611 ops[i].op = neg_const_int (mode, this_op);
2624 /* If we only have two operands, we can't do anything. */
2625 if (n_ops <= 2 && !force)
2628 /* Count the number of CONSTs we didn't split above. */
2629 for (i = 0; i < n_ops; i++)
2630 if (GET_CODE (ops[i].op) == CONST)
2633 /* Now simplify each pair of operands until nothing changes. The first
2634 time through just simplify constants against each other. */
2641 for (i = 0; i < n_ops - 1; i++)
2642 for (j = i + 1; j < n_ops; j++)
2644 rtx lhs = ops[i].op, rhs = ops[j].op;
2645 int lneg = ops[i].neg, rneg = ops[j].neg;
2647 if (lhs != 0 && rhs != 0
2648 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2650 enum rtx_code ncode = PLUS;
2656 tem = lhs, lhs = rhs, rhs = tem;
2658 else if (swap_commutative_operands_p (lhs, rhs))
2659 tem = lhs, lhs = rhs, rhs = tem;
2661 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2663 /* Reject "simplifications" that just wrap the two
2664 arguments in a CONST. Failure to do so can result
2665 in infinite recursion with simplify_binary_operation
2666 when it calls us to simplify CONST operations. */
2668 && ! (GET_CODE (tem) == CONST
2669 && GET_CODE (XEXP (tem, 0)) == ncode
2670 && XEXP (XEXP (tem, 0), 0) == lhs
2671 && XEXP (XEXP (tem, 0), 1) == rhs)
2672 /* Don't allow -x + -1 -> ~x simplifications in the
2673 first pass. This allows us the chance to combine
2674 the -1 with other constants. */
2676 && GET_CODE (tem) == NOT
2677 && XEXP (tem, 0) == rhs))
2680 if (GET_CODE (tem) == NEG)
2681 tem = XEXP (tem, 0), lneg = !lneg;
2682 if (GET_CODE (tem) == CONST_INT && lneg)
2683 tem = neg_const_int (mode, tem), lneg = 0;
2687 ops[j].op = NULL_RTX;
2697 /* Pack all the operands to the lower-numbered entries. */
2698 for (i = 0, j = 0; j < n_ops; j++)
2703 /* Sort the operations based on swap_commutative_operands_p. */
2704 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2706 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2708 && GET_CODE (ops[1].op) == CONST_INT
2709 && CONSTANT_P (ops[0].op)
2711 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2713 /* We suppressed creation of trivial CONST expressions in the
2714 combination loop to avoid recursion. Create one manually now.
2715 The combination loop should have ensured that there is exactly
2716 one CONST_INT, and the sort will have ensured that it is last
2717 in the array and that any other constant will be next-to-last. */
2720 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2721 && CONSTANT_P (ops[n_ops - 2].op))
2723 rtx value = ops[n_ops - 1].op;
2724 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2725 value = neg_const_int (mode, value);
2726 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2730 /* Count the number of CONSTs that we generated. */
2732 for (i = 0; i < n_ops; i++)
2733 if (GET_CODE (ops[i].op) == CONST)
2736 /* Give up if we didn't reduce the number of operands we had. Make
2737 sure we count a CONST as two operands. If we have the same
2738 number of operands, but have made more CONSTs than before, this
2739 is also an improvement, so accept it. */
2741 && (n_ops + n_consts > input_ops
2742 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2745 /* Put a non-negated operand first, if possible. */
2747 for (i = 0; i < n_ops && ops[i].neg; i++)
2750 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2759 /* Now make the result by performing the requested operations. */
2761 for (i = 1; i < n_ops; i++)
2762 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2763 mode, result, ops[i].op);
2768 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2770 plus_minus_operand_p (rtx x)
2772 return GET_CODE (x) == PLUS
2773 || GET_CODE (x) == MINUS
2774 || (GET_CODE (x) == CONST
2775 && GET_CODE (XEXP (x, 0)) == PLUS
2776 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2777 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2780 /* Like simplify_binary_operation except used for relational operators.
2781 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2782 not also be VOIDmode.
2784 CMP_MODE specifies in which mode the comparison is done in, so it is
2785 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2786 the operands or, if both are VOIDmode, the operands are compared in
2787 "infinite precision". */
2789 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2790 enum machine_mode cmp_mode, rtx op0, rtx op1)
2792 rtx tem, trueop0, trueop1;
2794 if (cmp_mode == VOIDmode)
2795 cmp_mode = GET_MODE (op0);
2796 if (cmp_mode == VOIDmode)
2797 cmp_mode = GET_MODE (op1);
2799 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2802 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2804 if (tem == const0_rtx)
2805 return CONST0_RTX (mode);
2806 #ifdef FLOAT_STORE_FLAG_VALUE
2808 REAL_VALUE_TYPE val;
2809 val = FLOAT_STORE_FLAG_VALUE (mode);
2810 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2816 if (VECTOR_MODE_P (mode))
2818 if (tem == const0_rtx)
2819 return CONST0_RTX (mode);
2820 #ifdef VECTOR_STORE_FLAG_VALUE
2825 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2826 if (val == NULL_RTX)
2828 if (val == const1_rtx)
2829 return CONST1_RTX (mode);
2831 units = GET_MODE_NUNITS (mode);
2832 v = rtvec_alloc (units);
2833 for (i = 0; i < units; i++)
2834 RTVEC_ELT (v, i) = val;
2835 return gen_rtx_raw_CONST_VECTOR (mode, v);
2845 /* For the following tests, ensure const0_rtx is op1. */
2846 if (swap_commutative_operands_p (op0, op1)
2847 || (op0 == const0_rtx && op1 != const0_rtx))
2848 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2850 /* If op0 is a compare, extract the comparison arguments from it. */
2851 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2852 return simplify_relational_operation (code, mode, VOIDmode,
2853 XEXP (op0, 0), XEXP (op0, 1));
2855 if (mode == VOIDmode
2856 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2860 trueop0 = avoid_constant_pool_reference (op0);
2861 trueop1 = avoid_constant_pool_reference (op1);
2862 return simplify_relational_operation_1 (code, mode, cmp_mode,
2866 /* This part of simplify_relational_operation is only used when CMP_MODE
2867 is not in class MODE_CC (i.e. it is a real comparison).
2869 MODE is the mode of the result, while CMP_MODE specifies in which
2870 mode the comparison is done in, so it is the mode of the operands. */
2873 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2874 enum machine_mode cmp_mode, rtx op0, rtx op1)
2876 enum rtx_code op0code = GET_CODE (op0);
2878 if (GET_CODE (op1) == CONST_INT)
2880 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2882 /* If op0 is a comparison, extract the comparison arguments form it. */
2885 if (GET_MODE (op0) == mode)
2886 return simplify_rtx (op0);
2888 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2889 XEXP (op0, 0), XEXP (op0, 1));
2891 else if (code == EQ)
2893 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2894 if (new_code != UNKNOWN)
2895 return simplify_gen_relational (new_code, mode, VOIDmode,
2896 XEXP (op0, 0), XEXP (op0, 1));
2901 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2902 if ((code == EQ || code == NE)
2903 && (op0code == PLUS || op0code == MINUS)
2905 && CONSTANT_P (XEXP (op0, 1))
2906 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2908 rtx x = XEXP (op0, 0);
2909 rtx c = XEXP (op0, 1);
2911 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2913 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2916 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2917 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2919 && op1 == const0_rtx
2920 && GET_MODE_CLASS (mode) == MODE_INT
2921 && cmp_mode != VOIDmode
2922 /* ??? Work-around BImode bugs in the ia64 backend. */
2924 && cmp_mode != BImode
2925 && nonzero_bits (op0, cmp_mode) == 1
2926 && STORE_FLAG_VALUE == 1)
2927 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2928 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2929 : lowpart_subreg (mode, op0, cmp_mode);
2934 /* Check if the given comparison (done in the given MODE) is actually a
2935 tautology or a contradiction.
2936 If no simplification is possible, this function returns zero.
2937 Otherwise, it returns either const_true_rtx or const0_rtx. */
2940 simplify_const_relational_operation (enum rtx_code code,
2941 enum machine_mode mode,
2944 int equal, op0lt, op0ltu, op1lt, op1ltu;
2949 gcc_assert (mode != VOIDmode
2950 || (GET_MODE (op0) == VOIDmode
2951 && GET_MODE (op1) == VOIDmode));
2953 /* If op0 is a compare, extract the comparison arguments from it. */
2954 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2955 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2957 /* We can't simplify MODE_CC values since we don't know what the
2958 actual comparison is. */
2959 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2962 /* Make sure the constant is second. */
2963 if (swap_commutative_operands_p (op0, op1))
2965 tem = op0, op0 = op1, op1 = tem;
2966 code = swap_condition (code);
2969 trueop0 = avoid_constant_pool_reference (op0);
2970 trueop1 = avoid_constant_pool_reference (op1);
2972 /* For integer comparisons of A and B maybe we can simplify A - B and can
2973 then simplify a comparison of that with zero. If A and B are both either
2974 a register or a CONST_INT, this can't help; testing for these cases will
2975 prevent infinite recursion here and speed things up.
2977 If CODE is an unsigned comparison, then we can never do this optimization,
2978 because it gives an incorrect result if the subtraction wraps around zero.
2979 ANSI C defines unsigned operations such that they never overflow, and
2980 thus such cases can not be ignored; but we cannot do it even for
2981 signed comparisons for languages such as Java, so test flag_wrapv. */
2983 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2984 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2985 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2986 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2987 /* We cannot do this for == or != if tem is a nonzero address. */
2988 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2989 && code != GTU && code != GEU && code != LTU && code != LEU)
2990 return simplify_const_relational_operation (signed_condition (code),
2991 mode, tem, const0_rtx);
2993 if (flag_unsafe_math_optimizations && code == ORDERED)
2994 return const_true_rtx;
2996 if (flag_unsafe_math_optimizations && code == UNORDERED)
2999 /* For modes without NaNs, if the two operands are equal, we know the
3000 result except if they have side-effects. */
3001 if (! HONOR_NANS (GET_MODE (trueop0))
3002 && rtx_equal_p (trueop0, trueop1)
3003 && ! side_effects_p (trueop0))
3004 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3006 /* If the operands are floating-point constants, see if we can fold
3008 else if (GET_CODE (trueop0) == CONST_DOUBLE
3009 && GET_CODE (trueop1) == CONST_DOUBLE
3010 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
3012 REAL_VALUE_TYPE d0, d1;
3014 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3015 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3017 /* Comparisons are unordered iff at least one of the values is NaN. */
3018 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3028 return const_true_rtx;
3041 equal = REAL_VALUES_EQUAL (d0, d1);
3042 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3043 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3046 /* Otherwise, see if the operands are both integers. */
3047 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3048 && (GET_CODE (trueop0) == CONST_DOUBLE
3049 || GET_CODE (trueop0) == CONST_INT)
3050 && (GET_CODE (trueop1) == CONST_DOUBLE
3051 || GET_CODE (trueop1) == CONST_INT))
3053 int width = GET_MODE_BITSIZE (mode);
3054 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3055 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3057 /* Get the two words comprising each integer constant. */
3058 if (GET_CODE (trueop0) == CONST_DOUBLE)
3060 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3061 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3065 l0u = l0s = INTVAL (trueop0);
3066 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3069 if (GET_CODE (trueop1) == CONST_DOUBLE)
3071 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3072 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3076 l1u = l1s = INTVAL (trueop1);
3077 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3080 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3081 we have to sign or zero-extend the values. */
3082 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3084 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3085 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3087 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3088 l0s |= ((HOST_WIDE_INT) (-1) << width);
3090 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3091 l1s |= ((HOST_WIDE_INT) (-1) << width);
3093 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3094 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3096 equal = (h0u == h1u && l0u == l1u);
3097 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3098 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3099 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3100 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3103 /* Otherwise, there are some code-specific tests we can make. */
3106 /* Optimize comparisons with upper and lower bounds. */
3107 if (SCALAR_INT_MODE_P (mode)
3108 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3121 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3128 /* x >= min is always true. */
3129 if (rtx_equal_p (trueop1, mmin))
3130 tem = const_true_rtx;
3136 /* x <= max is always true. */
3137 if (rtx_equal_p (trueop1, mmax))
3138 tem = const_true_rtx;
3143 /* x > max is always false. */
3144 if (rtx_equal_p (trueop1, mmax))
3150 /* x < min is always false. */
3151 if (rtx_equal_p (trueop1, mmin))
3158 if (tem == const0_rtx
3159 || tem == const_true_rtx)
3166 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3171 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3172 return const_true_rtx;
3176 /* Optimize abs(x) < 0.0. */
3177 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3179 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3181 if (GET_CODE (tem) == ABS)
3187 /* Optimize abs(x) >= 0.0. */
3188 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3190 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3192 if (GET_CODE (tem) == ABS)
3193 return const_true_rtx;
3198 /* Optimize ! (abs(x) < 0.0). */
3199 if (trueop1 == CONST0_RTX (mode))
3201 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3203 if (GET_CODE (tem) == ABS)
3204 return const_true_rtx;
3215 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3221 return equal ? const_true_rtx : const0_rtx;
3224 return ! equal ? const_true_rtx : const0_rtx;
3227 return op0lt ? const_true_rtx : const0_rtx;
3230 return op1lt ? const_true_rtx : const0_rtx;
3232 return op0ltu ? const_true_rtx : const0_rtx;
3234 return op1ltu ? const_true_rtx : const0_rtx;
3237 return equal || op0lt ? const_true_rtx : const0_rtx;
3240 return equal || op1lt ? const_true_rtx : const0_rtx;
3242 return equal || op0ltu ? const_true_rtx : const0_rtx;
3244 return equal || op1ltu ? const_true_rtx : const0_rtx;
3246 return const_true_rtx;
3254 /* Simplify CODE, an operation with result mode MODE and three operands,
3255 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3256 a constant. Return 0 if no simplifications is possible. */
3259 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3260 enum machine_mode op0_mode, rtx op0, rtx op1,
3263 unsigned int width = GET_MODE_BITSIZE (mode);
3265 /* VOIDmode means "infinite" precision. */
3267 width = HOST_BITS_PER_WIDE_INT;
3273 if (GET_CODE (op0) == CONST_INT
3274 && GET_CODE (op1) == CONST_INT
3275 && GET_CODE (op2) == CONST_INT
3276 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3277 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3279 /* Extracting a bit-field from a constant */
3280 HOST_WIDE_INT val = INTVAL (op0);
3282 if (BITS_BIG_ENDIAN)
3283 val >>= (GET_MODE_BITSIZE (op0_mode)
3284 - INTVAL (op2) - INTVAL (op1));
3286 val >>= INTVAL (op2);
3288 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3290 /* First zero-extend. */
3291 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3292 /* If desired, propagate sign bit. */
3293 if (code == SIGN_EXTRACT
3294 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3295 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3298 /* Clear the bits that don't belong in our mode,
3299 unless they and our sign bit are all one.
3300 So we get either a reasonable negative value or a reasonable
3301 unsigned value for this mode. */
3302 if (width < HOST_BITS_PER_WIDE_INT
3303 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3304 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3305 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3307 return gen_int_mode (val, mode);
3312 if (GET_CODE (op0) == CONST_INT)
3313 return op0 != const0_rtx ? op1 : op2;
3315 /* Convert c ? a : a into "a". */
3316 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3319 /* Convert a != b ? a : b into "a". */
3320 if (GET_CODE (op0) == NE
3321 && ! side_effects_p (op0)
3322 && ! HONOR_NANS (mode)
3323 && ! HONOR_SIGNED_ZEROS (mode)
3324 && ((rtx_equal_p (XEXP (op0, 0), op1)
3325 && rtx_equal_p (XEXP (op0, 1), op2))
3326 || (rtx_equal_p (XEXP (op0, 0), op2)
3327 && rtx_equal_p (XEXP (op0, 1), op1))))
3330 /* Convert a == b ? a : b into "b". */
3331 if (GET_CODE (op0) == EQ
3332 && ! side_effects_p (op0)
3333 && ! HONOR_NANS (mode)
3334 && ! HONOR_SIGNED_ZEROS (mode)
3335 && ((rtx_equal_p (XEXP (op0, 0), op1)
3336 && rtx_equal_p (XEXP (op0, 1), op2))
3337 || (rtx_equal_p (XEXP (op0, 0), op2)
3338 && rtx_equal_p (XEXP (op0, 1), op1))))
3341 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3343 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3344 ? GET_MODE (XEXP (op0, 1))
3345 : GET_MODE (XEXP (op0, 0)));
3348 /* Look for happy constants in op1 and op2. */
3349 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3351 HOST_WIDE_INT t = INTVAL (op1);
3352 HOST_WIDE_INT f = INTVAL (op2);
3354 if (t == STORE_FLAG_VALUE && f == 0)
3355 code = GET_CODE (op0);
3356 else if (t == 0 && f == STORE_FLAG_VALUE)
3359 tmp = reversed_comparison_code (op0, NULL_RTX);
3367 return simplify_gen_relational (code, mode, cmp_mode,
3368 XEXP (op0, 0), XEXP (op0, 1));
3371 if (cmp_mode == VOIDmode)
3372 cmp_mode = op0_mode;
3373 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3374 cmp_mode, XEXP (op0, 0),
3377 /* See if any simplifications were possible. */
3380 if (GET_CODE (temp) == CONST_INT)
3381 return temp == const0_rtx ? op2 : op1;
3383 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3389 gcc_assert (GET_MODE (op0) == mode);
3390 gcc_assert (GET_MODE (op1) == mode);
3391 gcc_assert (VECTOR_MODE_P (mode));
3392 op2 = avoid_constant_pool_reference (op2);
3393 if (GET_CODE (op2) == CONST_INT)
3395 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3396 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3397 int mask = (1 << n_elts) - 1;
3399 if (!(INTVAL (op2) & mask))
3401 if ((INTVAL (op2) & mask) == mask)
3404 op0 = avoid_constant_pool_reference (op0);
3405 op1 = avoid_constant_pool_reference (op1);
3406 if (GET_CODE (op0) == CONST_VECTOR
3407 && GET_CODE (op1) == CONST_VECTOR)
3409 rtvec v = rtvec_alloc (n_elts);
3412 for (i = 0; i < n_elts; i++)
3413 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3414 ? CONST_VECTOR_ELT (op0, i)
3415 : CONST_VECTOR_ELT (op1, i));
3416 return gen_rtx_CONST_VECTOR (mode, v);
3428 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3429 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3431 Works by unpacking OP into a collection of 8-bit values
3432 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3433 and then repacking them again for OUTERMODE. */
3436 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3437 enum machine_mode innermode, unsigned int byte)
3439 /* We support up to 512-bit values (for V8DFmode). */
3443 value_mask = (1 << value_bit) - 1
3445 unsigned char value[max_bitsize / value_bit];
3454 rtvec result_v = NULL;
3455 enum mode_class outer_class;
3456 enum machine_mode outer_submode;
3458 /* Some ports misuse CCmode. */
3459 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3462 /* We have no way to represent a complex constant at the rtl level. */
3463 if (COMPLEX_MODE_P (outermode))
3466 /* Unpack the value. */
3468 if (GET_CODE (op) == CONST_VECTOR)
3470 num_elem = CONST_VECTOR_NUNITS (op);
3471 elems = &CONST_VECTOR_ELT (op, 0);
3472 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3478 elem_bitsize = max_bitsize;
3480 /* If this asserts, it is too complicated; reducing value_bit may help. */
3481 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3482 /* I don't know how to handle endianness of sub-units. */
3483 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3485 for (elem = 0; elem < num_elem; elem++)
3488 rtx el = elems[elem];
3490 /* Vectors are kept in target memory order. (This is probably
3493 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3494 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3496 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3497 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3498 unsigned bytele = (subword_byte % UNITS_PER_WORD
3499 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3500 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3503 switch (GET_CODE (el))
3507 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3509 *vp++ = INTVAL (el) >> i;
3510 /* CONST_INTs are always logically sign-extended. */
3511 for (; i < elem_bitsize; i += value_bit)
3512 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3516 if (GET_MODE (el) == VOIDmode)
3518 /* If this triggers, someone should have generated a
3519 CONST_INT instead. */
3520 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3522 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3523 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3524 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3527 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3530 /* It shouldn't matter what's done here, so fill it with
3532 for (; i < max_bitsize; i += value_bit)
3537 long tmp[max_bitsize / 32];
3538 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3540 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3541 gcc_assert (bitsize <= elem_bitsize);
3542 gcc_assert (bitsize % value_bit == 0);
3544 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3547 /* real_to_target produces its result in words affected by
3548 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3549 and use WORDS_BIG_ENDIAN instead; see the documentation
3550 of SUBREG in rtl.texi. */
3551 for (i = 0; i < bitsize; i += value_bit)
3554 if (WORDS_BIG_ENDIAN)
3555 ibase = bitsize - 1 - i;
3558 *vp++ = tmp[ibase / 32] >> i % 32;
3561 /* It shouldn't matter what's done here, so fill it with
3563 for (; i < elem_bitsize; i += value_bit)
3573 /* Now, pick the right byte to start with. */
3574 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3575 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3576 will already have offset 0. */
3577 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3579 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3581 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3582 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3583 byte = (subword_byte % UNITS_PER_WORD
3584 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3587 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3588 so if it's become negative it will instead be very large.) */
3589 gcc_assert (byte < GET_MODE_SIZE (innermode));
3591 /* Convert from bytes to chunks of size value_bit. */
3592 value_start = byte * (BITS_PER_UNIT / value_bit);
3594 /* Re-pack the value. */
3596 if (VECTOR_MODE_P (outermode))
3598 num_elem = GET_MODE_NUNITS (outermode);
3599 result_v = rtvec_alloc (num_elem);
3600 elems = &RTVEC_ELT (result_v, 0);
3601 outer_submode = GET_MODE_INNER (outermode);
3607 outer_submode = outermode;
3610 outer_class = GET_MODE_CLASS (outer_submode);
3611 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3613 gcc_assert (elem_bitsize % value_bit == 0);
3614 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3616 for (elem = 0; elem < num_elem; elem++)
3620 /* Vectors are stored in target memory order. (This is probably
3623 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3624 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3626 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3627 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3628 unsigned bytele = (subword_byte % UNITS_PER_WORD
3629 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3630 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3633 switch (outer_class)
3636 case MODE_PARTIAL_INT:
3638 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3641 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3643 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3644 for (; i < elem_bitsize; i += value_bit)
3645 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3646 << (i - HOST_BITS_PER_WIDE_INT));
3648 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3650 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3651 elems[elem] = gen_int_mode (lo, outer_submode);
3653 elems[elem] = immed_double_const (lo, hi, outer_submode);
3660 long tmp[max_bitsize / 32];
3662 /* real_from_target wants its input in words affected by
3663 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3664 and use WORDS_BIG_ENDIAN instead; see the documentation
3665 of SUBREG in rtl.texi. */
3666 for (i = 0; i < max_bitsize / 32; i++)
3668 for (i = 0; i < elem_bitsize; i += value_bit)
3671 if (WORDS_BIG_ENDIAN)
3672 ibase = elem_bitsize - 1 - i;
3675 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3678 real_from_target (&r, tmp, outer_submode);
3679 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3687 if (VECTOR_MODE_P (outermode))
3688 return gen_rtx_CONST_VECTOR (outermode, result_v);
3693 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3694 Return 0 if no simplifications are possible. */
3696 simplify_subreg (enum machine_mode outermode, rtx op,
3697 enum machine_mode innermode, unsigned int byte)
3699 /* Little bit of sanity checking. */
3700 gcc_assert (innermode != VOIDmode);
3701 gcc_assert (outermode != VOIDmode);
3702 gcc_assert (innermode != BLKmode);
3703 gcc_assert (outermode != BLKmode);
3705 gcc_assert (GET_MODE (op) == innermode
3706 || GET_MODE (op) == VOIDmode);
3708 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3709 gcc_assert (byte < GET_MODE_SIZE (innermode));
3711 if (outermode == innermode && !byte)
3714 if (GET_CODE (op) == CONST_INT
3715 || GET_CODE (op) == CONST_DOUBLE
3716 || GET_CODE (op) == CONST_VECTOR)
3717 return simplify_immed_subreg (outermode, op, innermode, byte);
3719 /* Changing mode twice with SUBREG => just change it once,
3720 or not at all if changing back op starting mode. */
3721 if (GET_CODE (op) == SUBREG)
3723 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3724 int final_offset = byte + SUBREG_BYTE (op);
3727 if (outermode == innermostmode
3728 && byte == 0 && SUBREG_BYTE (op) == 0)
3729 return SUBREG_REG (op);
3731 /* The SUBREG_BYTE represents offset, as if the value were stored
3732 in memory. Irritating exception is paradoxical subreg, where
3733 we define SUBREG_BYTE to be 0. On big endian machines, this
3734 value should be negative. For a moment, undo this exception. */
3735 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3737 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3738 if (WORDS_BIG_ENDIAN)
3739 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3740 if (BYTES_BIG_ENDIAN)
3741 final_offset += difference % UNITS_PER_WORD;
3743 if (SUBREG_BYTE (op) == 0
3744 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3746 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3747 if (WORDS_BIG_ENDIAN)
3748 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3749 if (BYTES_BIG_ENDIAN)
3750 final_offset += difference % UNITS_PER_WORD;
3753 /* See whether resulting subreg will be paradoxical. */
3754 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3756 /* In nonparadoxical subregs we can't handle negative offsets. */
3757 if (final_offset < 0)
3759 /* Bail out in case resulting subreg would be incorrect. */
3760 if (final_offset % GET_MODE_SIZE (outermode)
3761 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3767 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3769 /* In paradoxical subreg, see if we are still looking on lower part.
3770 If so, our SUBREG_BYTE will be 0. */
3771 if (WORDS_BIG_ENDIAN)
3772 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3773 if (BYTES_BIG_ENDIAN)
3774 offset += difference % UNITS_PER_WORD;
3775 if (offset == final_offset)
3781 /* Recurse for further possible simplifications. */
3782 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3786 if (validate_subreg (outermode, innermostmode,
3787 SUBREG_REG (op), final_offset))
3788 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3792 /* SUBREG of a hard register => just change the register number
3793 and/or mode. If the hard register is not valid in that mode,
3794 suppress this simplification. If the hard register is the stack,
3795 frame, or argument pointer, leave this as a SUBREG. */
3798 && REGNO (op) < FIRST_PSEUDO_REGISTER
3799 #ifdef CANNOT_CHANGE_MODE_CLASS
3800 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3801 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3802 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3804 && ((reload_completed && !frame_pointer_needed)
3805 || (REGNO (op) != FRAME_POINTER_REGNUM
3806 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3807 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3810 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3811 && REGNO (op) != ARG_POINTER_REGNUM
3813 && REGNO (op) != STACK_POINTER_REGNUM
3814 && subreg_offset_representable_p (REGNO (op), innermode,
3817 unsigned int regno = REGNO (op);
3818 unsigned int final_regno
3819 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3821 /* ??? We do allow it if the current REG is not valid for
3822 its mode. This is a kludge to work around how float/complex
3823 arguments are passed on 32-bit SPARC and should be fixed. */
3824 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3825 || ! HARD_REGNO_MODE_OK (regno, innermode))
3827 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3829 /* Propagate original regno. We don't have any way to specify
3830 the offset inside original regno, so do so only for lowpart.
3831 The information is used only by alias analysis that can not
3832 grog partial register anyway. */
3834 if (subreg_lowpart_offset (outermode, innermode) == byte)
3835 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3840 /* If we have a SUBREG of a register that we are replacing and we are
3841 replacing it with a MEM, make a new MEM and try replacing the
3842 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3843 or if we would be widening it. */
3846 && ! mode_dependent_address_p (XEXP (op, 0))
3847 /* Allow splitting of volatile memory references in case we don't
3848 have instruction to move the whole thing. */
3849 && (! MEM_VOLATILE_P (op)
3850 || ! have_insn_for (SET, innermode))
3851 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3852 return adjust_address_nv (op, outermode, byte);
3854 /* Handle complex values represented as CONCAT
3855 of real and imaginary part. */
3856 if (GET_CODE (op) == CONCAT)
3858 unsigned int inner_size, final_offset;
3861 inner_size = GET_MODE_UNIT_SIZE (innermode);
3862 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3863 final_offset = byte % inner_size;
3864 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3867 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3870 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3871 return gen_rtx_SUBREG (outermode, part, final_offset);
3875 /* Optimize SUBREG truncations of zero and sign extended values. */
3876 if ((GET_CODE (op) == ZERO_EXTEND
3877 || GET_CODE (op) == SIGN_EXTEND)
3878 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3880 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3882 /* If we're requesting the lowpart of a zero or sign extension,
3883 there are three possibilities. If the outermode is the same
3884 as the origmode, we can omit both the extension and the subreg.
3885 If the outermode is not larger than the origmode, we can apply
3886 the truncation without the extension. Finally, if the outermode
3887 is larger than the origmode, but both are integer modes, we
3888 can just extend to the appropriate mode. */
3891 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3892 if (outermode == origmode)
3893 return XEXP (op, 0);
3894 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3895 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3896 subreg_lowpart_offset (outermode,
3898 if (SCALAR_INT_MODE_P (outermode))
3899 return simplify_gen_unary (GET_CODE (op), outermode,
3900 XEXP (op, 0), origmode);
3903 /* A SUBREG resulting from a zero extension may fold to zero if
3904 it extracts higher bits that the ZERO_EXTEND's source bits. */
3905 if (GET_CODE (op) == ZERO_EXTEND
3906 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3907 return CONST0_RTX (outermode);
3910 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3911 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3912 the outer subreg is effectively a truncation to the original mode. */
3913 if ((GET_CODE (op) == LSHIFTRT
3914 || GET_CODE (op) == ASHIFTRT)
3915 && SCALAR_INT_MODE_P (outermode)
3916 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3917 to avoid the possibility that an outer LSHIFTRT shifts by more
3918 than the sign extension's sign_bit_copies and introduces zeros
3919 into the high bits of the result. */
3920 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3921 && GET_CODE (XEXP (op, 1)) == CONST_INT
3922 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3923 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3924 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3925 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3926 return simplify_gen_binary (ASHIFTRT, outermode,
3927 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3929 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3930 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3931 the outer subreg is effectively a truncation to the original mode. */
3932 if ((GET_CODE (op) == LSHIFTRT
3933 || GET_CODE (op) == ASHIFTRT)
3934 && SCALAR_INT_MODE_P (outermode)
3935 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3936 && GET_CODE (XEXP (op, 1)) == CONST_INT
3937 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3938 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3939 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3940 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3941 return simplify_gen_binary (LSHIFTRT, outermode,
3942 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3944 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3945 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3946 the outer subreg is effectively a truncation to the original mode. */
3947 if (GET_CODE (op) == ASHIFT
3948 && SCALAR_INT_MODE_P (outermode)
3949 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3950 && GET_CODE (XEXP (op, 1)) == CONST_INT
3951 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3952 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3953 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3954 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3955 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3956 return simplify_gen_binary (ASHIFT, outermode,
3957 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3962 /* Make a SUBREG operation or equivalent if it folds. */
3965 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3966 enum machine_mode innermode, unsigned int byte)
3970 newx = simplify_subreg (outermode, op, innermode, byte);
3974 if (GET_CODE (op) == SUBREG
3975 || GET_CODE (op) == CONCAT
3976 || GET_MODE (op) == VOIDmode)
3979 if (validate_subreg (outermode, innermode, op, byte))
3980 return gen_rtx_SUBREG (outermode, op, byte);
3985 /* Simplify X, an rtx expression.
3987 Return the simplified expression or NULL if no simplifications
3990 This is the preferred entry point into the simplification routines;
3991 however, we still allow passes to call the more specific routines.
3993 Right now GCC has three (yes, three) major bodies of RTL simplification
3994 code that need to be unified.
3996 1. fold_rtx in cse.c. This code uses various CSE specific
3997 information to aid in RTL simplification.
3999 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4000 it uses combine specific information to aid in RTL
4003 3. The routines in this file.
4006 Long term we want to only have one body of simplification code; to
4007 get to that state I recommend the following steps:
4009 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4010 which are not pass dependent state into these routines.
4012 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4013 use this routine whenever possible.
4015 3. Allow for pass dependent state to be provided to these
4016 routines and add simplifications based on the pass dependent
4017 state. Remove code from cse.c & combine.c that becomes
4020 It will take time, but ultimately the compiler will be easier to
4021 maintain and improve. It's totally silly that when we add a
4022 simplification that it needs to be added to 4 places (3 for RTL
4023 simplification and 1 for tree simplification. */
4026 simplify_rtx (rtx x)
4028 enum rtx_code code = GET_CODE (x);
4029 enum machine_mode mode = GET_MODE (x);
4031 switch (GET_RTX_CLASS (code))
4034 return simplify_unary_operation (code, mode,
4035 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4036 case RTX_COMM_ARITH:
4037 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4038 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4040 /* Fall through.... */
4043 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4046 case RTX_BITFIELD_OPS:
4047 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4048 XEXP (x, 0), XEXP (x, 1),
4052 case RTX_COMM_COMPARE:
4053 return simplify_relational_operation (code, mode,
4054 ((GET_MODE (XEXP (x, 0))
4056 ? GET_MODE (XEXP (x, 0))
4057 : GET_MODE (XEXP (x, 1))),
4063 return simplify_gen_subreg (mode, SUBREG_REG (x),
4064 GET_MODE (SUBREG_REG (x)),
4071 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4072 if (GET_CODE (XEXP (x, 0)) == HIGH
4073 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))