1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* Put complex operands first and constants second if commutative. */
118 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
119 && swap_commutative_operands_p (op0, op1))
120 tem = op0, op0 = op1, op1 = tem;
122 /* If this simplifies, do it. */
123 tem = simplify_binary_operation (code, mode, op0, op1);
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code))
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
284 return simplify_gen_unary (code, mode, op0, op_mode);
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return simplify_gen_binary (code, mode, op0, op1);
295 case RTX_COMM_COMPARE:
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306 case RTX_BITFIELD_OPS:
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319 /* The only case we try to handle is a SUBREG. */
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
328 return op0 ? op0 : x;
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
375 if (GET_CODE (op) == CONST)
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
402 /* (not (eq X Y)) == (ne X Y), etc. */
403 if (COMPARISON_P (op)
404 && (mode == BImode || STORE_FLAG_VALUE == -1)
405 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
406 return simplify_gen_relational (reversed, mode, VOIDmode,
407 XEXP (op, 0), XEXP (op, 1));
409 /* (not (plus X -1)) can become (neg X). */
410 if (GET_CODE (op) == PLUS
411 && XEXP (op, 1) == constm1_rtx)
412 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
414 /* Similarly, (not (neg X)) is (plus X -1). */
415 if (GET_CODE (op) == NEG)
416 return plus_constant (XEXP (op, 0), -1);
418 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
419 if (GET_CODE (op) == XOR
420 && GET_CODE (XEXP (op, 1)) == CONST_INT
421 && (temp = simplify_unary_operation (NOT, mode,
422 XEXP (op, 1), mode)) != 0)
423 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
425 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
426 if (GET_CODE (op) == PLUS
427 && GET_CODE (XEXP (op, 1)) == CONST_INT
428 && mode_signbit_p (mode, XEXP (op, 1))
429 && (temp = simplify_unary_operation (NOT, mode,
430 XEXP (op, 1), mode)) != 0)
431 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
434 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
435 operands other than 1, but that is not valid. We could do a
436 similar simplification for (not (lshiftrt C X)) where C is
437 just the sign bit, but this doesn't seem common enough to
439 if (GET_CODE (op) == ASHIFT
440 && XEXP (op, 0) == const1_rtx)
442 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
443 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
446 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
447 by reversing the comparison code if valid. */
448 if (STORE_FLAG_VALUE == -1
450 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
451 return simplify_gen_relational (reversed, mode, VOIDmode,
452 XEXP (op, 0), XEXP (op, 1));
454 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
455 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
456 so we can perform the above simplification. */
458 if (STORE_FLAG_VALUE == -1
459 && GET_CODE (op) == ASHIFTRT
460 && GET_CODE (XEXP (op, 1)) == CONST_INT
461 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
462 return simplify_gen_relational (GE, mode, VOIDmode,
463 XEXP (op, 0), const0_rtx);
468 /* (neg (neg X)) == X. */
469 if (GET_CODE (op) == NEG)
472 /* (neg (plus X 1)) can become (not X). */
473 if (GET_CODE (op) == PLUS
474 && XEXP (op, 1) == const1_rtx)
475 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
477 /* Similarly, (neg (not X)) is (plus X 1). */
478 if (GET_CODE (op) == NOT)
479 return plus_constant (XEXP (op, 0), 1);
481 /* (neg (minus X Y)) can become (minus Y X). This transformation
482 isn't safe for modes with signed zeros, since if X and Y are
483 both +0, (minus Y X) is the same as (minus X Y). If the
484 rounding mode is towards +infinity (or -infinity) then the two
485 expressions will be rounded differently. */
486 if (GET_CODE (op) == MINUS
487 && !HONOR_SIGNED_ZEROS (mode)
488 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
489 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
491 if (GET_CODE (op) == PLUS
492 && !HONOR_SIGNED_ZEROS (mode)
493 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
495 /* (neg (plus A C)) is simplified to (minus -C A). */
496 if (GET_CODE (XEXP (op, 1)) == CONST_INT
497 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
499 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
501 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
504 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
505 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
506 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
509 /* (neg (mult A B)) becomes (mult (neg A) B).
510 This works even for floating-point values. */
511 if (GET_CODE (op) == MULT
512 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
514 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
515 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
518 /* NEG commutes with ASHIFT since it is multiplication. Only do
519 this if we can then eliminate the NEG (e.g., if the operand
521 if (GET_CODE (op) == ASHIFT)
523 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
525 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
528 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
529 C is equal to the width of MODE minus 1. */
530 if (GET_CODE (op) == ASHIFTRT
531 && GET_CODE (XEXP (op, 1)) == CONST_INT
532 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
533 return simplify_gen_binary (LSHIFTRT, mode,
534 XEXP (op, 0), XEXP (op, 1));
536 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
537 C is equal to the width of MODE minus 1. */
538 if (GET_CODE (op) == LSHIFTRT
539 && GET_CODE (XEXP (op, 1)) == CONST_INT
540 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
541 return simplify_gen_binary (ASHIFTRT, mode,
542 XEXP (op, 0), XEXP (op, 1));
547 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
548 becomes just the MINUS if its mode is MODE. This allows
549 folding switch statements on machines using casesi (such as
551 if (GET_CODE (op) == TRUNCATE
552 && GET_MODE (XEXP (op, 0)) == mode
553 && GET_CODE (XEXP (op, 0)) == MINUS
554 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
555 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
558 /* Check for a sign extension of a subreg of a promoted
559 variable, where the promotion is sign-extended, and the
560 target mode is the same as the variable's promotion. */
561 if (GET_CODE (op) == SUBREG
562 && SUBREG_PROMOTED_VAR_P (op)
563 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
564 && GET_MODE (XEXP (op, 0)) == mode)
567 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
568 if (! POINTERS_EXTEND_UNSIGNED
569 && mode == Pmode && GET_MODE (op) == ptr_mode
571 || (GET_CODE (op) == SUBREG
572 && REG_P (SUBREG_REG (op))
573 && REG_POINTER (SUBREG_REG (op))
574 && GET_MODE (SUBREG_REG (op)) == Pmode)))
575 return convert_memory_address (Pmode, op);
580 /* Check for a zero extension of a subreg of a promoted
581 variable, where the promotion is zero-extended, and the
582 target mode is the same as the variable's promotion. */
583 if (GET_CODE (op) == SUBREG
584 && SUBREG_PROMOTED_VAR_P (op)
585 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
586 && GET_MODE (XEXP (op, 0)) == mode)
589 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
590 if (POINTERS_EXTEND_UNSIGNED > 0
591 && mode == Pmode && GET_MODE (op) == ptr_mode
593 || (GET_CODE (op) == SUBREG
594 && REG_P (SUBREG_REG (op))
595 && REG_POINTER (SUBREG_REG (op))
596 && GET_MODE (SUBREG_REG (op)) == Pmode)))
597 return convert_memory_address (Pmode, op);
608 /* Try to compute the value of a unary operation CODE whose output mode is to
609 be MODE with input operand OP whose mode was originally OP_MODE.
610 Return zero if the value cannot be computed. */
612 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
613 rtx op, enum machine_mode op_mode)
615 unsigned int width = GET_MODE_BITSIZE (mode);
617 if (code == VEC_DUPLICATE)
619 gcc_assert (VECTOR_MODE_P (mode));
620 if (GET_MODE (op) != VOIDmode)
622 if (!VECTOR_MODE_P (GET_MODE (op)))
623 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
625 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
628 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
629 || GET_CODE (op) == CONST_VECTOR)
631 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
632 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
633 rtvec v = rtvec_alloc (n_elts);
636 if (GET_CODE (op) != CONST_VECTOR)
637 for (i = 0; i < n_elts; i++)
638 RTVEC_ELT (v, i) = op;
641 enum machine_mode inmode = GET_MODE (op);
642 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
643 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
645 gcc_assert (in_n_elts < n_elts);
646 gcc_assert ((n_elts % in_n_elts) == 0);
647 for (i = 0; i < n_elts; i++)
648 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
650 return gen_rtx_CONST_VECTOR (mode, v);
654 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
656 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
657 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
658 enum machine_mode opmode = GET_MODE (op);
659 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
660 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
661 rtvec v = rtvec_alloc (n_elts);
664 gcc_assert (op_n_elts == n_elts);
665 for (i = 0; i < n_elts; i++)
667 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
668 CONST_VECTOR_ELT (op, i),
669 GET_MODE_INNER (opmode));
672 RTVEC_ELT (v, i) = x;
674 return gen_rtx_CONST_VECTOR (mode, v);
677 /* The order of these tests is critical so that, for example, we don't
678 check the wrong mode (input vs. output) for a conversion operation,
679 such as FIX. At some point, this should be simplified. */
681 if (code == FLOAT && GET_MODE (op) == VOIDmode
682 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
684 HOST_WIDE_INT hv, lv;
687 if (GET_CODE (op) == CONST_INT)
688 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
690 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
692 REAL_VALUE_FROM_INT (d, lv, hv, mode);
693 d = real_value_truncate (mode, d);
694 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
696 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
697 && (GET_CODE (op) == CONST_DOUBLE
698 || GET_CODE (op) == CONST_INT))
700 HOST_WIDE_INT hv, lv;
703 if (GET_CODE (op) == CONST_INT)
704 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
706 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
708 if (op_mode == VOIDmode)
710 /* We don't know how to interpret negative-looking numbers in
711 this case, so don't try to fold those. */
715 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
718 hv = 0, lv &= GET_MODE_MASK (op_mode);
720 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
721 d = real_value_truncate (mode, d);
722 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
725 if (GET_CODE (op) == CONST_INT
726 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
728 HOST_WIDE_INT arg0 = INTVAL (op);
742 val = (arg0 >= 0 ? arg0 : - arg0);
746 /* Don't use ffs here. Instead, get low order bit and then its
747 number. If arg0 is zero, this will return 0, as desired. */
748 arg0 &= GET_MODE_MASK (mode);
749 val = exact_log2 (arg0 & (- arg0)) + 1;
753 arg0 &= GET_MODE_MASK (mode);
754 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
757 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
761 arg0 &= GET_MODE_MASK (mode);
764 /* Even if the value at zero is undefined, we have to come
765 up with some replacement. Seems good enough. */
766 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
767 val = GET_MODE_BITSIZE (mode);
770 val = exact_log2 (arg0 & -arg0);
774 arg0 &= GET_MODE_MASK (mode);
777 val++, arg0 &= arg0 - 1;
781 arg0 &= GET_MODE_MASK (mode);
784 val++, arg0 &= arg0 - 1;
793 /* When zero-extending a CONST_INT, we need to know its
795 gcc_assert (op_mode != VOIDmode);
796 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
798 /* If we were really extending the mode,
799 we would have to distinguish between zero-extension
800 and sign-extension. */
801 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
804 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
805 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
811 if (op_mode == VOIDmode)
813 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
815 /* If we were really extending the mode,
816 we would have to distinguish between zero-extension
817 and sign-extension. */
818 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
821 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
824 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
826 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
827 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
844 return gen_int_mode (val, mode);
847 /* We can do some operations on integer CONST_DOUBLEs. Also allow
848 for a DImode operation on a CONST_INT. */
849 else if (GET_MODE (op) == VOIDmode
850 && width <= HOST_BITS_PER_WIDE_INT * 2
851 && (GET_CODE (op) == CONST_DOUBLE
852 || GET_CODE (op) == CONST_INT))
854 unsigned HOST_WIDE_INT l1, lv;
855 HOST_WIDE_INT h1, hv;
857 if (GET_CODE (op) == CONST_DOUBLE)
858 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
860 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
870 neg_double (l1, h1, &lv, &hv);
875 neg_double (l1, h1, &lv, &hv);
887 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
890 lv = exact_log2 (l1 & -l1) + 1;
896 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
897 - HOST_BITS_PER_WIDE_INT;
899 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
900 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
901 lv = GET_MODE_BITSIZE (mode);
907 lv = exact_log2 (l1 & -l1);
909 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
910 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
911 lv = GET_MODE_BITSIZE (mode);
934 /* This is just a change-of-mode, so do nothing. */
939 gcc_assert (op_mode != VOIDmode);
941 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
945 lv = l1 & GET_MODE_MASK (op_mode);
949 if (op_mode == VOIDmode
950 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
954 lv = l1 & GET_MODE_MASK (op_mode);
955 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
956 && (lv & ((HOST_WIDE_INT) 1
957 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
958 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
960 hv = HWI_SIGN_EXTEND (lv);
971 return immed_double_const (lv, hv, mode);
974 else if (GET_CODE (op) == CONST_DOUBLE
975 && SCALAR_FLOAT_MODE_P (mode))
977 REAL_VALUE_TYPE d, t;
978 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
983 if (HONOR_SNANS (mode) && real_isnan (&d))
985 real_sqrt (&t, mode, &d);
989 d = REAL_VALUE_ABS (d);
992 d = REAL_VALUE_NEGATE (d);
995 d = real_value_truncate (mode, d);
998 /* All this does is change the mode. */
1001 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1008 real_to_target (tmp, &d, GET_MODE (op));
1009 for (i = 0; i < 4; i++)
1011 real_from_target (&d, tmp, mode);
1017 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1020 else if (GET_CODE (op) == CONST_DOUBLE
1021 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1022 && GET_MODE_CLASS (mode) == MODE_INT
1023 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1025 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1026 operators are intentionally left unspecified (to ease implementation
1027 by target backends), for consistency, this routine implements the
1028 same semantics for constant folding as used by the middle-end. */
1030 /* This was formerly used only for non-IEEE float.
1031 eggert@twinsun.com says it is safe for IEEE also. */
1032 HOST_WIDE_INT xh, xl, th, tl;
1033 REAL_VALUE_TYPE x, t;
1034 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1038 if (REAL_VALUE_ISNAN (x))
1041 /* Test against the signed upper bound. */
1042 if (width > HOST_BITS_PER_WIDE_INT)
1044 th = ((unsigned HOST_WIDE_INT) 1
1045 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1051 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1053 real_from_integer (&t, VOIDmode, tl, th, 0);
1054 if (REAL_VALUES_LESS (t, x))
1061 /* Test against the signed lower bound. */
1062 if (width > HOST_BITS_PER_WIDE_INT)
1064 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1070 tl = (HOST_WIDE_INT) -1 << (width - 1);
1072 real_from_integer (&t, VOIDmode, tl, th, 0);
1073 if (REAL_VALUES_LESS (x, t))
1079 REAL_VALUE_TO_INT (&xl, &xh, x);
1083 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1086 /* Test against the unsigned upper bound. */
1087 if (width == 2*HOST_BITS_PER_WIDE_INT)
1092 else if (width >= HOST_BITS_PER_WIDE_INT)
1094 th = ((unsigned HOST_WIDE_INT) 1
1095 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1101 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1103 real_from_integer (&t, VOIDmode, tl, th, 1);
1104 if (REAL_VALUES_LESS (t, x))
1111 REAL_VALUE_TO_INT (&xl, &xh, x);
1117 return immed_double_const (xl, xh, mode);
1123 /* Subroutine of simplify_binary_operation to simplify a commutative,
1124 associative binary operation CODE with result mode MODE, operating
1125 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1126 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1127 canonicalization is possible. */
1130 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1135 /* Linearize the operator to the left. */
1136 if (GET_CODE (op1) == code)
1138 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1139 if (GET_CODE (op0) == code)
1141 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1142 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1145 /* "a op (b op c)" becomes "(b op c) op a". */
1146 if (! swap_commutative_operands_p (op1, op0))
1147 return simplify_gen_binary (code, mode, op1, op0);
1154 if (GET_CODE (op0) == code)
1156 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1157 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1159 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1160 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1163 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1164 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1165 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1166 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1168 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1170 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1171 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1172 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1173 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1175 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1182 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1183 and OP1. Return 0 if no simplification is possible.
1185 Don't use this for relational operations such as EQ or LT.
1186 Use simplify_relational_operation instead. */
1188 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1191 rtx trueop0, trueop1;
1194 /* Relational operations don't work here. We must know the mode
1195 of the operands in order to do the comparison correctly.
1196 Assuming a full word can give incorrect results.
1197 Consider comparing 128 with -128 in QImode. */
1198 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1199 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1201 /* Make sure the constant is second. */
1202 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1203 && swap_commutative_operands_p (op0, op1))
1205 tem = op0, op0 = op1, op1 = tem;
1208 trueop0 = avoid_constant_pool_reference (op0);
1209 trueop1 = avoid_constant_pool_reference (op1);
1211 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1214 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1218 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1219 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1223 unsigned int width = GET_MODE_BITSIZE (mode);
1225 /* Even if we can't compute a constant result,
1226 there are some cases worth simplifying. */
1231 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1232 when x is NaN, infinite, or finite and nonzero. They aren't
1233 when x is -0 and the rounding mode is not towards -infinity,
1234 since (-0) + 0 is then 0. */
1235 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1238 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1239 transformations are safe even for IEEE. */
1240 if (GET_CODE (op0) == NEG)
1241 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1242 else if (GET_CODE (op1) == NEG)
1243 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1245 /* (~a) + 1 -> -a */
1246 if (INTEGRAL_MODE_P (mode)
1247 && GET_CODE (op0) == NOT
1248 && trueop1 == const1_rtx)
1249 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1251 /* Handle both-operands-constant cases. We can only add
1252 CONST_INTs to constants since the sum of relocatable symbols
1253 can't be handled by most assemblers. Don't add CONST_INT
1254 to CONST_INT since overflow won't be computed properly if wider
1255 than HOST_BITS_PER_WIDE_INT. */
1257 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1258 && GET_CODE (op1) == CONST_INT)
1259 return plus_constant (op0, INTVAL (op1));
1260 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1261 && GET_CODE (op0) == CONST_INT)
1262 return plus_constant (op1, INTVAL (op0));
1264 /* See if this is something like X * C - X or vice versa or
1265 if the multiplication is written as a shift. If so, we can
1266 distribute and make a new multiply, shift, or maybe just
1267 have X (if C is 2 in the example above). But don't make
1268 something more expensive than we had before. */
1270 if (SCALAR_INT_MODE_P (mode))
1272 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1273 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1274 rtx lhs = op0, rhs = op1;
1276 if (GET_CODE (lhs) == NEG)
1280 lhs = XEXP (lhs, 0);
1282 else if (GET_CODE (lhs) == MULT
1283 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1285 coeff0l = INTVAL (XEXP (lhs, 1));
1286 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1287 lhs = XEXP (lhs, 0);
1289 else if (GET_CODE (lhs) == ASHIFT
1290 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1291 && INTVAL (XEXP (lhs, 1)) >= 0
1292 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1294 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1296 lhs = XEXP (lhs, 0);
1299 if (GET_CODE (rhs) == NEG)
1303 rhs = XEXP (rhs, 0);
1305 else if (GET_CODE (rhs) == MULT
1306 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1308 coeff1l = INTVAL (XEXP (rhs, 1));
1309 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1310 rhs = XEXP (rhs, 0);
1312 else if (GET_CODE (rhs) == ASHIFT
1313 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1314 && INTVAL (XEXP (rhs, 1)) >= 0
1315 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1317 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1319 rhs = XEXP (rhs, 0);
1322 if (rtx_equal_p (lhs, rhs))
1324 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1326 unsigned HOST_WIDE_INT l;
1329 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1330 coeff = immed_double_const (l, h, mode);
1332 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1333 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1338 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1339 if ((GET_CODE (op1) == CONST_INT
1340 || GET_CODE (op1) == CONST_DOUBLE)
1341 && GET_CODE (op0) == XOR
1342 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1343 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1344 && mode_signbit_p (mode, op1))
1345 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1346 simplify_gen_binary (XOR, mode, op1,
1349 /* If one of the operands is a PLUS or a MINUS, see if we can
1350 simplify this by the associative law.
1351 Don't use the associative law for floating point.
1352 The inaccuracy makes it nonassociative,
1353 and subtle programs can break if operations are associated. */
1355 if (INTEGRAL_MODE_P (mode)
1356 && (plus_minus_operand_p (op0)
1357 || plus_minus_operand_p (op1))
1358 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1361 /* Reassociate floating point addition only when the user
1362 specifies unsafe math optimizations. */
1363 if (FLOAT_MODE_P (mode)
1364 && flag_unsafe_math_optimizations)
1366 tem = simplify_associative_operation (code, mode, op0, op1);
1374 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1375 using cc0, in which case we want to leave it as a COMPARE
1376 so we can distinguish it from a register-register-copy.
1378 In IEEE floating point, x-0 is not the same as x. */
1380 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1381 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1382 && trueop1 == CONST0_RTX (mode))
1386 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1387 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1388 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1389 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1391 rtx xop00 = XEXP (op0, 0);
1392 rtx xop10 = XEXP (op1, 0);
1395 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1397 if (REG_P (xop00) && REG_P (xop10)
1398 && GET_MODE (xop00) == GET_MODE (xop10)
1399 && REGNO (xop00) == REGNO (xop10)
1400 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1401 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1408 /* We can't assume x-x is 0 even with non-IEEE floating point,
1409 but since it is zero except in very strange circumstances, we
1410 will treat it as zero with -funsafe-math-optimizations. */
1411 if (rtx_equal_p (trueop0, trueop1)
1412 && ! side_effects_p (op0)
1413 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1414 return CONST0_RTX (mode);
1416 /* Change subtraction from zero into negation. (0 - x) is the
1417 same as -x when x is NaN, infinite, or finite and nonzero.
1418 But if the mode has signed zeros, and does not round towards
1419 -infinity, then 0 - 0 is 0, not -0. */
1420 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1421 return simplify_gen_unary (NEG, mode, op1, mode);
1423 /* (-1 - a) is ~a. */
1424 if (trueop0 == constm1_rtx)
1425 return simplify_gen_unary (NOT, mode, op1, mode);
1427 /* Subtracting 0 has no effect unless the mode has signed zeros
1428 and supports rounding towards -infinity. In such a case,
1430 if (!(HONOR_SIGNED_ZEROS (mode)
1431 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1432 && trueop1 == CONST0_RTX (mode))
1435 /* See if this is something like X * C - X or vice versa or
1436 if the multiplication is written as a shift. If so, we can
1437 distribute and make a new multiply, shift, or maybe just
1438 have X (if C is 2 in the example above). But don't make
1439 something more expensive than we had before. */
1441 if (SCALAR_INT_MODE_P (mode))
1443 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1444 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1445 rtx lhs = op0, rhs = op1;
1447 if (GET_CODE (lhs) == NEG)
1451 lhs = XEXP (lhs, 0);
1453 else if (GET_CODE (lhs) == MULT
1454 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1456 coeff0l = INTVAL (XEXP (lhs, 1));
1457 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1458 lhs = XEXP (lhs, 0);
1460 else if (GET_CODE (lhs) == ASHIFT
1461 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1462 && INTVAL (XEXP (lhs, 1)) >= 0
1463 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1465 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1467 lhs = XEXP (lhs, 0);
1470 if (GET_CODE (rhs) == NEG)
1474 rhs = XEXP (rhs, 0);
1476 else if (GET_CODE (rhs) == MULT
1477 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1479 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1480 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1481 rhs = XEXP (rhs, 0);
1483 else if (GET_CODE (rhs) == ASHIFT
1484 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1485 && INTVAL (XEXP (rhs, 1)) >= 0
1486 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1488 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1490 rhs = XEXP (rhs, 0);
1493 if (rtx_equal_p (lhs, rhs))
1495 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1497 unsigned HOST_WIDE_INT l;
1500 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1501 coeff = immed_double_const (l, h, mode);
1503 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1504 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1509 /* (a - (-b)) -> (a + b). True even for IEEE. */
1510 if (GET_CODE (op1) == NEG)
1511 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1513 /* (-x - c) may be simplified as (-c - x). */
1514 if (GET_CODE (op0) == NEG
1515 && (GET_CODE (op1) == CONST_INT
1516 || GET_CODE (op1) == CONST_DOUBLE))
1518 tem = simplify_unary_operation (NEG, mode, op1, mode);
1520 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1523 /* Don't let a relocatable value get a negative coeff. */
1524 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1525 return simplify_gen_binary (PLUS, mode,
1527 neg_const_int (mode, op1));
1529 /* (x - (x & y)) -> (x & ~y) */
1530 if (GET_CODE (op1) == AND)
1532 if (rtx_equal_p (op0, XEXP (op1, 0)))
1534 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1535 GET_MODE (XEXP (op1, 1)));
1536 return simplify_gen_binary (AND, mode, op0, tem);
1538 if (rtx_equal_p (op0, XEXP (op1, 1)))
1540 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1541 GET_MODE (XEXP (op1, 0)));
1542 return simplify_gen_binary (AND, mode, op0, tem);
1546 /* If one of the operands is a PLUS or a MINUS, see if we can
1547 simplify this by the associative law. This will, for example,
1548 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1549 Don't use the associative law for floating point.
1550 The inaccuracy makes it nonassociative,
1551 and subtle programs can break if operations are associated. */
1553 if (INTEGRAL_MODE_P (mode)
1554 && (plus_minus_operand_p (op0)
1555 || plus_minus_operand_p (op1))
1556 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1561 if (trueop1 == constm1_rtx)
1562 return simplify_gen_unary (NEG, mode, op0, mode);
1564 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1565 x is NaN, since x * 0 is then also NaN. Nor is it valid
1566 when the mode has signed zeros, since multiplying a negative
1567 number by 0 will give -0, not 0. */
1568 if (!HONOR_NANS (mode)
1569 && !HONOR_SIGNED_ZEROS (mode)
1570 && trueop1 == CONST0_RTX (mode)
1571 && ! side_effects_p (op0))
1574 /* In IEEE floating point, x*1 is not equivalent to x for
1576 if (!HONOR_SNANS (mode)
1577 && trueop1 == CONST1_RTX (mode))
1580 /* Convert multiply by constant power of two into shift unless
1581 we are still generating RTL. This test is a kludge. */
1582 if (GET_CODE (trueop1) == CONST_INT
1583 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1584 /* If the mode is larger than the host word size, and the
1585 uppermost bit is set, then this isn't a power of two due
1586 to implicit sign extension. */
1587 && (width <= HOST_BITS_PER_WIDE_INT
1588 || val != HOST_BITS_PER_WIDE_INT - 1))
1589 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1591 /* Likewise for multipliers wider than a word. */
1592 else if (GET_CODE (trueop1) == CONST_DOUBLE
1593 && (GET_MODE (trueop1) == VOIDmode
1594 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1595 && GET_MODE (op0) == mode
1596 && CONST_DOUBLE_LOW (trueop1) == 0
1597 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1598 return simplify_gen_binary (ASHIFT, mode, op0,
1599 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1601 /* x*2 is x+x and x*(-1) is -x */
1602 if (GET_CODE (trueop1) == CONST_DOUBLE
1603 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1604 && GET_MODE (op0) == mode)
1607 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1609 if (REAL_VALUES_EQUAL (d, dconst2))
1610 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1612 if (REAL_VALUES_EQUAL (d, dconstm1))
1613 return simplify_gen_unary (NEG, mode, op0, mode);
1616 /* Reassociate multiplication, but for floating point MULTs
1617 only when the user specifies unsafe math optimizations. */
1618 if (! FLOAT_MODE_P (mode)
1619 || flag_unsafe_math_optimizations)
1621 tem = simplify_associative_operation (code, mode, op0, op1);
1628 if (trueop1 == const0_rtx)
1630 if (GET_CODE (trueop1) == CONST_INT
1631 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1632 == GET_MODE_MASK (mode)))
1634 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1636 /* A | (~A) -> -1 */
1637 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1638 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1639 && ! side_effects_p (op0)
1640 && SCALAR_INT_MODE_P (mode))
1642 tem = simplify_associative_operation (code, mode, op0, op1);
1648 if (trueop1 == const0_rtx)
1650 if (GET_CODE (trueop1) == CONST_INT
1651 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1652 == GET_MODE_MASK (mode)))
1653 return simplify_gen_unary (NOT, mode, op0, mode);
1654 if (rtx_equal_p (trueop0, trueop1)
1655 && ! side_effects_p (op0)
1656 && GET_MODE_CLASS (mode) != MODE_CC)
1657 return CONST0_RTX (mode);
1659 /* Canonicalize XOR of the most significant bit to PLUS. */
1660 if ((GET_CODE (op1) == CONST_INT
1661 || GET_CODE (op1) == CONST_DOUBLE)
1662 && mode_signbit_p (mode, op1))
1663 return simplify_gen_binary (PLUS, mode, op0, op1);
1664 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1665 if ((GET_CODE (op1) == CONST_INT
1666 || GET_CODE (op1) == CONST_DOUBLE)
1667 && GET_CODE (op0) == PLUS
1668 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1669 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1670 && mode_signbit_p (mode, XEXP (op0, 1)))
1671 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1672 simplify_gen_binary (XOR, mode, op1,
1675 tem = simplify_associative_operation (code, mode, op0, op1);
1681 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1683 /* If we are turning off bits already known off in OP0, we need
1685 if (GET_CODE (trueop1) == CONST_INT
1686 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1687 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1689 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
1690 && GET_MODE_CLASS (mode) != MODE_CC)
1693 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1694 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1695 && ! side_effects_p (op0)
1696 && GET_MODE_CLASS (mode) != MODE_CC)
1697 return CONST0_RTX (mode);
1699 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1700 there are no nonzero bits of C outside of X's mode. */
1701 if ((GET_CODE (op0) == SIGN_EXTEND
1702 || GET_CODE (op0) == ZERO_EXTEND)
1703 && GET_CODE (trueop1) == CONST_INT
1704 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1705 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1706 & INTVAL (trueop1)) == 0)
1708 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1709 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1710 gen_int_mode (INTVAL (trueop1),
1712 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1715 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1716 ((A & N) + B) & M -> (A + B) & M
1717 Similarly if (N & M) == 0,
1718 ((A | N) + B) & M -> (A + B) & M
1719 and for - instead of + and/or ^ instead of |. */
1720 if (GET_CODE (trueop1) == CONST_INT
1721 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1722 && ~INTVAL (trueop1)
1723 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1724 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1729 pmop[0] = XEXP (op0, 0);
1730 pmop[1] = XEXP (op0, 1);
1732 for (which = 0; which < 2; which++)
1735 switch (GET_CODE (tem))
1738 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1739 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1740 == INTVAL (trueop1))
1741 pmop[which] = XEXP (tem, 0);
1745 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1746 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1747 pmop[which] = XEXP (tem, 0);
1754 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1756 tem = simplify_gen_binary (GET_CODE (op0), mode,
1758 return simplify_gen_binary (code, mode, tem, op1);
1761 tem = simplify_associative_operation (code, mode, op0, op1);
1767 /* 0/x is 0 (or x&0 if x has side-effects). */
1768 if (trueop0 == CONST0_RTX (mode))
1770 if (side_effects_p (op1))
1771 return simplify_gen_binary (AND, mode, op1, trueop0);
1775 if (trueop1 == CONST1_RTX (mode))
1776 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1777 /* Convert divide by power of two into shift. */
1778 if (GET_CODE (trueop1) == CONST_INT
1779 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1780 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1784 /* Handle floating point and integers separately. */
1785 if (SCALAR_FLOAT_MODE_P (mode))
1787 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1788 safe for modes with NaNs, since 0.0 / 0.0 will then be
1789 NaN rather than 0.0. Nor is it safe for modes with signed
1790 zeros, since dividing 0 by a negative number gives -0.0 */
1791 if (trueop0 == CONST0_RTX (mode)
1792 && !HONOR_NANS (mode)
1793 && !HONOR_SIGNED_ZEROS (mode)
1794 && ! side_effects_p (op1))
1797 if (trueop1 == CONST1_RTX (mode)
1798 && !HONOR_SNANS (mode))
1801 if (GET_CODE (trueop1) == CONST_DOUBLE
1802 && trueop1 != CONST0_RTX (mode))
1805 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1808 if (REAL_VALUES_EQUAL (d, dconstm1)
1809 && !HONOR_SNANS (mode))
1810 return simplify_gen_unary (NEG, mode, op0, mode);
1812 /* Change FP division by a constant into multiplication.
1813 Only do this with -funsafe-math-optimizations. */
1814 if (flag_unsafe_math_optimizations
1815 && !REAL_VALUES_EQUAL (d, dconst0))
1817 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1818 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1819 return simplify_gen_binary (MULT, mode, op0, tem);
1825 /* 0/x is 0 (or x&0 if x has side-effects). */
1826 if (trueop0 == CONST0_RTX (mode))
1828 if (side_effects_p (op1))
1829 return simplify_gen_binary (AND, mode, op1, trueop0);
1833 if (trueop1 == CONST1_RTX (mode))
1834 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1836 if (trueop1 == constm1_rtx)
1838 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
1839 return simplify_gen_unary (NEG, mode, x, mode);
1845 /* 0%x is 0 (or x&0 if x has side-effects). */
1846 if (trueop0 == CONST0_RTX (mode))
1848 if (side_effects_p (op1))
1849 return simplify_gen_binary (AND, mode, op1, trueop0);
1852 /* x%1 is 0 (of x&0 if x has side-effects). */
1853 if (trueop1 == CONST1_RTX (mode))
1855 if (side_effects_p (op0))
1856 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1857 return CONST0_RTX (mode);
1859 /* Implement modulus by power of two as AND. */
1860 if (GET_CODE (trueop1) == CONST_INT
1861 && exact_log2 (INTVAL (trueop1)) > 0)
1862 return simplify_gen_binary (AND, mode, op0,
1863 GEN_INT (INTVAL (op1) - 1));
1867 /* 0%x is 0 (or x&0 if x has side-effects). */
1868 if (trueop0 == CONST0_RTX (mode))
1870 if (side_effects_p (op1))
1871 return simplify_gen_binary (AND, mode, op1, trueop0);
1874 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1875 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
1877 if (side_effects_p (op0))
1878 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1879 return CONST0_RTX (mode);
1886 /* Rotating ~0 always results in ~0. */
1887 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1888 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1889 && ! side_effects_p (op1))
1892 /* Fall through.... */
1896 if (trueop1 == CONST0_RTX (mode))
1898 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
1903 if (width <= HOST_BITS_PER_WIDE_INT
1904 && GET_CODE (trueop1) == CONST_INT
1905 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1906 && ! side_effects_p (op0))
1908 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1910 tem = simplify_associative_operation (code, mode, op0, op1);
1916 if (width <= HOST_BITS_PER_WIDE_INT
1917 && GET_CODE (trueop1) == CONST_INT
1918 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1919 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1920 && ! side_effects_p (op0))
1922 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1924 tem = simplify_associative_operation (code, mode, op0, op1);
1930 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1932 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1934 tem = simplify_associative_operation (code, mode, op0, op1);
1940 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1942 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1944 tem = simplify_associative_operation (code, mode, op0, op1);
1953 /* ??? There are simplifications that can be done. */
1957 if (!VECTOR_MODE_P (mode))
1959 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1960 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1961 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1962 gcc_assert (XVECLEN (trueop1, 0) == 1);
1963 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1965 if (GET_CODE (trueop0) == CONST_VECTOR)
1966 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1971 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1972 gcc_assert (GET_MODE_INNER (mode)
1973 == GET_MODE_INNER (GET_MODE (trueop0)));
1974 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1976 if (GET_CODE (trueop0) == CONST_VECTOR)
1978 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1979 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1980 rtvec v = rtvec_alloc (n_elts);
1983 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1984 for (i = 0; i < n_elts; i++)
1986 rtx x = XVECEXP (trueop1, 0, i);
1988 gcc_assert (GET_CODE (x) == CONST_INT);
1989 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1993 return gen_rtx_CONST_VECTOR (mode, v);
1999 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2000 ? GET_MODE (trueop0)
2001 : GET_MODE_INNER (mode));
2002 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2003 ? GET_MODE (trueop1)
2004 : GET_MODE_INNER (mode));
2006 gcc_assert (VECTOR_MODE_P (mode));
2007 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2008 == GET_MODE_SIZE (mode));
2010 if (VECTOR_MODE_P (op0_mode))
2011 gcc_assert (GET_MODE_INNER (mode)
2012 == GET_MODE_INNER (op0_mode));
2014 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2016 if (VECTOR_MODE_P (op1_mode))
2017 gcc_assert (GET_MODE_INNER (mode)
2018 == GET_MODE_INNER (op1_mode));
2020 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2022 if ((GET_CODE (trueop0) == CONST_VECTOR
2023 || GET_CODE (trueop0) == CONST_INT
2024 || GET_CODE (trueop0) == CONST_DOUBLE)
2025 && (GET_CODE (trueop1) == CONST_VECTOR
2026 || GET_CODE (trueop1) == CONST_INT
2027 || GET_CODE (trueop1) == CONST_DOUBLE))
2029 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2030 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2031 rtvec v = rtvec_alloc (n_elts);
2033 unsigned in_n_elts = 1;
2035 if (VECTOR_MODE_P (op0_mode))
2036 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2037 for (i = 0; i < n_elts; i++)
2041 if (!VECTOR_MODE_P (op0_mode))
2042 RTVEC_ELT (v, i) = trueop0;
2044 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2048 if (!VECTOR_MODE_P (op1_mode))
2049 RTVEC_ELT (v, i) = trueop1;
2051 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2056 return gen_rtx_CONST_VECTOR (mode, v);
2069 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2072 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2074 unsigned int width = GET_MODE_BITSIZE (mode);
2076 if (VECTOR_MODE_P (mode)
2077 && code != VEC_CONCAT
2078 && GET_CODE (op0) == CONST_VECTOR
2079 && GET_CODE (op1) == CONST_VECTOR)
2081 unsigned n_elts = GET_MODE_NUNITS (mode);
2082 enum machine_mode op0mode = GET_MODE (op0);
2083 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2084 enum machine_mode op1mode = GET_MODE (op1);
2085 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2086 rtvec v = rtvec_alloc (n_elts);
2089 gcc_assert (op0_n_elts == n_elts);
2090 gcc_assert (op1_n_elts == n_elts);
2091 for (i = 0; i < n_elts; i++)
2093 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2094 CONST_VECTOR_ELT (op0, i),
2095 CONST_VECTOR_ELT (op1, i));
2098 RTVEC_ELT (v, i) = x;
2101 return gen_rtx_CONST_VECTOR (mode, v);
2104 if (VECTOR_MODE_P (mode)
2105 && code == VEC_CONCAT
2106 && CONSTANT_P (op0) && CONSTANT_P (op1))
2108 unsigned n_elts = GET_MODE_NUNITS (mode);
2109 rtvec v = rtvec_alloc (n_elts);
2111 gcc_assert (n_elts >= 2);
2114 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2115 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2117 RTVEC_ELT (v, 0) = op0;
2118 RTVEC_ELT (v, 1) = op1;
2122 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2123 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2126 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2127 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2128 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2130 for (i = 0; i < op0_n_elts; ++i)
2131 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2132 for (i = 0; i < op1_n_elts; ++i)
2133 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2136 return gen_rtx_CONST_VECTOR (mode, v);
2139 if (SCALAR_FLOAT_MODE_P (mode)
2140 && GET_CODE (op0) == CONST_DOUBLE
2141 && GET_CODE (op1) == CONST_DOUBLE
2142 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2153 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2155 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2157 for (i = 0; i < 4; i++)
2174 real_from_target (&r, tmp0, mode);
2175 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2179 REAL_VALUE_TYPE f0, f1, value, result;
2182 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2183 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2184 real_convert (&f0, mode, &f0);
2185 real_convert (&f1, mode, &f1);
2187 if (HONOR_SNANS (mode)
2188 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2192 && REAL_VALUES_EQUAL (f1, dconst0)
2193 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2196 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2197 && flag_trapping_math
2198 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2200 int s0 = REAL_VALUE_NEGATIVE (f0);
2201 int s1 = REAL_VALUE_NEGATIVE (f1);
2206 /* Inf + -Inf = NaN plus exception. */
2211 /* Inf - Inf = NaN plus exception. */
2216 /* Inf / Inf = NaN plus exception. */
2223 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2224 && flag_trapping_math
2225 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2226 || (REAL_VALUE_ISINF (f1)
2227 && REAL_VALUES_EQUAL (f0, dconst0))))
2228 /* Inf * 0 = NaN plus exception. */
2231 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2233 real_convert (&result, mode, &value);
2235 /* Don't constant fold this floating point operation if
2236 the result has overflowed and flag_trapping_math. */
2238 if (flag_trapping_math
2239 && MODE_HAS_INFINITIES (mode)
2240 && REAL_VALUE_ISINF (result)
2241 && !REAL_VALUE_ISINF (f0)
2242 && !REAL_VALUE_ISINF (f1))
2243 /* Overflow plus exception. */
2246 /* Don't constant fold this floating point operation if the
2247 result may dependent upon the run-time rounding mode and
2248 flag_rounding_math is set, or if GCC's software emulation
2249 is unable to accurately represent the result. */
2251 if ((flag_rounding_math
2252 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2253 && !flag_unsafe_math_optimizations))
2254 && (inexact || !real_identical (&result, &value)))
2257 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2261 /* We can fold some multi-word operations. */
2262 if (GET_MODE_CLASS (mode) == MODE_INT
2263 && width == HOST_BITS_PER_WIDE_INT * 2
2264 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2265 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2267 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2268 HOST_WIDE_INT h1, h2, hv, ht;
2270 if (GET_CODE (op0) == CONST_DOUBLE)
2271 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2273 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2275 if (GET_CODE (op1) == CONST_DOUBLE)
2276 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2278 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2283 /* A - B == A + (-B). */
2284 neg_double (l2, h2, &lv, &hv);
2287 /* Fall through.... */
2290 add_double (l1, h1, l2, h2, &lv, &hv);
2294 mul_double (l1, h1, l2, h2, &lv, &hv);
2298 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2299 &lv, &hv, <, &ht))
2304 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2305 <, &ht, &lv, &hv))
2310 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2311 &lv, &hv, <, &ht))
2316 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2317 <, &ht, &lv, &hv))
2322 lv = l1 & l2, hv = h1 & h2;
2326 lv = l1 | l2, hv = h1 | h2;
2330 lv = l1 ^ l2, hv = h1 ^ h2;
2336 && ((unsigned HOST_WIDE_INT) l1
2337 < (unsigned HOST_WIDE_INT) l2)))
2346 && ((unsigned HOST_WIDE_INT) l1
2347 > (unsigned HOST_WIDE_INT) l2)))
2354 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2356 && ((unsigned HOST_WIDE_INT) l1
2357 < (unsigned HOST_WIDE_INT) l2)))
2364 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2366 && ((unsigned HOST_WIDE_INT) l1
2367 > (unsigned HOST_WIDE_INT) l2)))
2373 case LSHIFTRT: case ASHIFTRT:
2375 case ROTATE: case ROTATERT:
2376 if (SHIFT_COUNT_TRUNCATED)
2377 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2379 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2382 if (code == LSHIFTRT || code == ASHIFTRT)
2383 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2385 else if (code == ASHIFT)
2386 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2387 else if (code == ROTATE)
2388 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2389 else /* code == ROTATERT */
2390 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2397 return immed_double_const (lv, hv, mode);
2400 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2401 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2403 /* Get the integer argument values in two forms:
2404 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2406 arg0 = INTVAL (op0);
2407 arg1 = INTVAL (op1);
2409 if (width < HOST_BITS_PER_WIDE_INT)
2411 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2412 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2415 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2416 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2419 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2420 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2428 /* Compute the value of the arithmetic. */
2433 val = arg0s + arg1s;
2437 val = arg0s - arg1s;
2441 val = arg0s * arg1s;
2446 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2449 val = arg0s / arg1s;
2454 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2457 val = arg0s % arg1s;
2462 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2465 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2470 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2473 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2491 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2492 the value is in range. We can't return any old value for
2493 out-of-range arguments because either the middle-end (via
2494 shift_truncation_mask) or the back-end might be relying on
2495 target-specific knowledge. Nor can we rely on
2496 shift_truncation_mask, since the shift might not be part of an
2497 ashlM3, lshrM3 or ashrM3 instruction. */
2498 if (SHIFT_COUNT_TRUNCATED)
2499 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2500 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2503 val = (code == ASHIFT
2504 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2505 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2507 /* Sign-extend the result for arithmetic right shifts. */
2508 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2509 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2517 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2518 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2526 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2527 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2531 /* Do nothing here. */
2535 val = arg0s <= arg1s ? arg0s : arg1s;
2539 val = ((unsigned HOST_WIDE_INT) arg0
2540 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2544 val = arg0s > arg1s ? arg0s : arg1s;
2548 val = ((unsigned HOST_WIDE_INT) arg0
2549 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2556 /* ??? There are simplifications that can be done. */
2563 return gen_int_mode (val, mode);
2571 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2574 Rather than test for specific case, we do this by a brute-force method
2575 and do all possible simplifications until no more changes occur. Then
2576 we rebuild the operation. */
2578 struct simplify_plus_minus_op_data
2586 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2588 const struct simplify_plus_minus_op_data *d1 = p1;
2589 const struct simplify_plus_minus_op_data *d2 = p2;
2592 result = (commutative_operand_precedence (d2->op)
2593 - commutative_operand_precedence (d1->op));
2596 return d1->ix - d2->ix;
2600 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2603 struct simplify_plus_minus_op_data ops[8];
2605 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2606 int first, changed, canonicalized = 0;
2609 memset (ops, 0, sizeof ops);
2611 /* Set up the two operands and then expand them until nothing has been
2612 changed. If we run out of room in our array, give up; this should
2613 almost never happen. */
2618 ops[1].neg = (code == MINUS);
2624 for (i = 0; i < n_ops; i++)
2626 rtx this_op = ops[i].op;
2627 int this_neg = ops[i].neg;
2628 enum rtx_code this_code = GET_CODE (this_op);
2637 ops[n_ops].op = XEXP (this_op, 1);
2638 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2641 ops[i].op = XEXP (this_op, 0);
2644 canonicalized |= this_neg;
2648 ops[i].op = XEXP (this_op, 0);
2649 ops[i].neg = ! this_neg;
2656 && GET_CODE (XEXP (this_op, 0)) == PLUS
2657 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2658 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2660 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2661 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2662 ops[n_ops].neg = this_neg;
2671 /* ~a -> (-a - 1) */
2674 ops[n_ops].op = constm1_rtx;
2675 ops[n_ops++].neg = this_neg;
2676 ops[i].op = XEXP (this_op, 0);
2677 ops[i].neg = !this_neg;
2686 ops[i].op = neg_const_int (mode, this_op);
2700 gcc_assert (n_ops >= 2);
2704 /* If we only have two operands, we can avoid the loops. */
2707 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
2710 /* Get the two operands. Be careful with the order, especially for
2711 the cases where code == MINUS. */
2712 if (ops[0].neg && ops[1].neg)
2714 lhs = gen_rtx_NEG (mode, ops[0].op);
2717 else if (ops[0].neg)
2728 return simplify_const_binary_operation (code, mode, lhs, rhs);
2731 /* Count the number of CONSTs we didn't split above. */
2732 for (i = 0; i < n_ops; i++)
2733 if (GET_CODE (ops[i].op) == CONST)
2736 /* Now simplify each pair of operands until nothing changes. The first
2737 time through just simplify constants against each other. */
2744 for (i = 0; i < n_ops - 1; i++)
2745 for (j = i + 1; j < n_ops; j++)
2747 rtx lhs = ops[i].op, rhs = ops[j].op;
2748 int lneg = ops[i].neg, rneg = ops[j].neg;
2750 if (lhs != 0 && rhs != 0
2751 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2753 enum rtx_code ncode = PLUS;
2759 tem = lhs, lhs = rhs, rhs = tem;
2761 else if (swap_commutative_operands_p (lhs, rhs))
2762 tem = lhs, lhs = rhs, rhs = tem;
2764 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2766 /* Reject "simplifications" that just wrap the two
2767 arguments in a CONST. Failure to do so can result
2768 in infinite recursion with simplify_binary_operation
2769 when it calls us to simplify CONST operations. */
2771 && ! (GET_CODE (tem) == CONST
2772 && GET_CODE (XEXP (tem, 0)) == ncode
2773 && XEXP (XEXP (tem, 0), 0) == lhs
2774 && XEXP (XEXP (tem, 0), 1) == rhs)
2775 /* Don't allow -x + -1 -> ~x simplifications in the
2776 first pass. This allows us the chance to combine
2777 the -1 with other constants. */
2779 && GET_CODE (tem) == NOT
2780 && XEXP (tem, 0) == rhs))
2783 if (GET_CODE (tem) == NEG)
2784 tem = XEXP (tem, 0), lneg = !lneg;
2785 if (GET_CODE (tem) == CONST_INT && lneg)
2786 tem = neg_const_int (mode, tem), lneg = 0;
2790 ops[j].op = NULL_RTX;
2800 /* Pack all the operands to the lower-numbered entries. */
2801 for (i = 0, j = 0; j < n_ops; j++)
2805 /* Stabilize sort. */
2811 /* Sort the operations based on swap_commutative_operands_p. */
2812 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2814 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2816 && GET_CODE (ops[1].op) == CONST_INT
2817 && CONSTANT_P (ops[0].op)
2819 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2821 /* We suppressed creation of trivial CONST expressions in the
2822 combination loop to avoid recursion. Create one manually now.
2823 The combination loop should have ensured that there is exactly
2824 one CONST_INT, and the sort will have ensured that it is last
2825 in the array and that any other constant will be next-to-last. */
2828 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2829 && CONSTANT_P (ops[n_ops - 2].op))
2831 rtx value = ops[n_ops - 1].op;
2832 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2833 value = neg_const_int (mode, value);
2834 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2838 /* Count the number of CONSTs that we generated. */
2840 for (i = 0; i < n_ops; i++)
2841 if (GET_CODE (ops[i].op) == CONST)
2844 /* Put a non-negated operand first, if possible. */
2846 for (i = 0; i < n_ops && ops[i].neg; i++)
2849 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2858 /* Now make the result by performing the requested operations. */
2860 for (i = 1; i < n_ops; i++)
2861 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2862 mode, result, ops[i].op);
2867 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2869 plus_minus_operand_p (rtx x)
2871 return GET_CODE (x) == PLUS
2872 || GET_CODE (x) == MINUS
2873 || (GET_CODE (x) == CONST
2874 && GET_CODE (XEXP (x, 0)) == PLUS
2875 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2876 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2879 /* Like simplify_binary_operation except used for relational operators.
2880 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2881 not also be VOIDmode.
2883 CMP_MODE specifies in which mode the comparison is done in, so it is
2884 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2885 the operands or, if both are VOIDmode, the operands are compared in
2886 "infinite precision". */
2888 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2889 enum machine_mode cmp_mode, rtx op0, rtx op1)
2891 rtx tem, trueop0, trueop1;
2893 if (cmp_mode == VOIDmode)
2894 cmp_mode = GET_MODE (op0);
2895 if (cmp_mode == VOIDmode)
2896 cmp_mode = GET_MODE (op1);
2898 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2901 if (SCALAR_FLOAT_MODE_P (mode))
2903 if (tem == const0_rtx)
2904 return CONST0_RTX (mode);
2905 #ifdef FLOAT_STORE_FLAG_VALUE
2907 REAL_VALUE_TYPE val;
2908 val = FLOAT_STORE_FLAG_VALUE (mode);
2909 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2915 if (VECTOR_MODE_P (mode))
2917 if (tem == const0_rtx)
2918 return CONST0_RTX (mode);
2919 #ifdef VECTOR_STORE_FLAG_VALUE
2924 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2925 if (val == NULL_RTX)
2927 if (val == const1_rtx)
2928 return CONST1_RTX (mode);
2930 units = GET_MODE_NUNITS (mode);
2931 v = rtvec_alloc (units);
2932 for (i = 0; i < units; i++)
2933 RTVEC_ELT (v, i) = val;
2934 return gen_rtx_raw_CONST_VECTOR (mode, v);
2944 /* For the following tests, ensure const0_rtx is op1. */
2945 if (swap_commutative_operands_p (op0, op1)
2946 || (op0 == const0_rtx && op1 != const0_rtx))
2947 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2949 /* If op0 is a compare, extract the comparison arguments from it. */
2950 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2951 return simplify_relational_operation (code, mode, VOIDmode,
2952 XEXP (op0, 0), XEXP (op0, 1));
2954 if (mode == VOIDmode
2955 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2959 trueop0 = avoid_constant_pool_reference (op0);
2960 trueop1 = avoid_constant_pool_reference (op1);
2961 return simplify_relational_operation_1 (code, mode, cmp_mode,
2965 /* This part of simplify_relational_operation is only used when CMP_MODE
2966 is not in class MODE_CC (i.e. it is a real comparison).
2968 MODE is the mode of the result, while CMP_MODE specifies in which
2969 mode the comparison is done in, so it is the mode of the operands. */
2972 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2973 enum machine_mode cmp_mode, rtx op0, rtx op1)
2975 enum rtx_code op0code = GET_CODE (op0);
2977 if (GET_CODE (op1) == CONST_INT)
2979 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2981 /* If op0 is a comparison, extract the comparison arguments form it. */
2984 if (GET_MODE (op0) == mode)
2985 return simplify_rtx (op0);
2987 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2988 XEXP (op0, 0), XEXP (op0, 1));
2990 else if (code == EQ)
2992 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2993 if (new_code != UNKNOWN)
2994 return simplify_gen_relational (new_code, mode, VOIDmode,
2995 XEXP (op0, 0), XEXP (op0, 1));
3000 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3001 if ((code == EQ || code == NE)
3002 && (op0code == PLUS || op0code == MINUS)
3004 && CONSTANT_P (XEXP (op0, 1))
3005 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3007 rtx x = XEXP (op0, 0);
3008 rtx c = XEXP (op0, 1);
3010 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3012 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3015 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3016 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3018 && op1 == const0_rtx
3019 && GET_MODE_CLASS (mode) == MODE_INT
3020 && cmp_mode != VOIDmode
3021 /* ??? Work-around BImode bugs in the ia64 backend. */
3023 && cmp_mode != BImode
3024 && nonzero_bits (op0, cmp_mode) == 1
3025 && STORE_FLAG_VALUE == 1)
3026 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3027 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3028 : lowpart_subreg (mode, op0, cmp_mode);
3033 /* Check if the given comparison (done in the given MODE) is actually a
3034 tautology or a contradiction.
3035 If no simplification is possible, this function returns zero.
3036 Otherwise, it returns either const_true_rtx or const0_rtx. */
3039 simplify_const_relational_operation (enum rtx_code code,
3040 enum machine_mode mode,
3043 int equal, op0lt, op0ltu, op1lt, op1ltu;
3048 gcc_assert (mode != VOIDmode
3049 || (GET_MODE (op0) == VOIDmode
3050 && GET_MODE (op1) == VOIDmode));
3052 /* If op0 is a compare, extract the comparison arguments from it. */
3053 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3055 op1 = XEXP (op0, 1);
3056 op0 = XEXP (op0, 0);
3058 if (GET_MODE (op0) != VOIDmode)
3059 mode = GET_MODE (op0);
3060 else if (GET_MODE (op1) != VOIDmode)
3061 mode = GET_MODE (op1);
3066 /* We can't simplify MODE_CC values since we don't know what the
3067 actual comparison is. */
3068 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3071 /* Make sure the constant is second. */
3072 if (swap_commutative_operands_p (op0, op1))
3074 tem = op0, op0 = op1, op1 = tem;
3075 code = swap_condition (code);
3078 trueop0 = avoid_constant_pool_reference (op0);
3079 trueop1 = avoid_constant_pool_reference (op1);
3081 /* For integer comparisons of A and B maybe we can simplify A - B and can
3082 then simplify a comparison of that with zero. If A and B are both either
3083 a register or a CONST_INT, this can't help; testing for these cases will
3084 prevent infinite recursion here and speed things up.
3086 If CODE is an unsigned comparison, then we can never do this optimization,
3087 because it gives an incorrect result if the subtraction wraps around zero.
3088 ANSI C defines unsigned operations such that they never overflow, and
3089 thus such cases can not be ignored; but we cannot do it even for
3090 signed comparisons for languages such as Java, so test flag_wrapv. */
3092 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3093 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3094 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3095 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3096 /* We cannot do this for == or != if tem is a nonzero address. */
3097 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3098 && code != GTU && code != GEU && code != LTU && code != LEU)
3099 return simplify_const_relational_operation (signed_condition (code),
3100 mode, tem, const0_rtx);
3102 if (flag_unsafe_math_optimizations && code == ORDERED)
3103 return const_true_rtx;
3105 if (flag_unsafe_math_optimizations && code == UNORDERED)
3108 /* For modes without NaNs, if the two operands are equal, we know the
3109 result except if they have side-effects. */
3110 if (! HONOR_NANS (GET_MODE (trueop0))
3111 && rtx_equal_p (trueop0, trueop1)
3112 && ! side_effects_p (trueop0))
3113 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3115 /* If the operands are floating-point constants, see if we can fold
3117 else if (GET_CODE (trueop0) == CONST_DOUBLE
3118 && GET_CODE (trueop1) == CONST_DOUBLE
3119 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3121 REAL_VALUE_TYPE d0, d1;
3123 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3124 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3126 /* Comparisons are unordered iff at least one of the values is NaN. */
3127 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3137 return const_true_rtx;
3150 equal = REAL_VALUES_EQUAL (d0, d1);
3151 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3152 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3155 /* Otherwise, see if the operands are both integers. */
3156 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3157 && (GET_CODE (trueop0) == CONST_DOUBLE
3158 || GET_CODE (trueop0) == CONST_INT)
3159 && (GET_CODE (trueop1) == CONST_DOUBLE
3160 || GET_CODE (trueop1) == CONST_INT))
3162 int width = GET_MODE_BITSIZE (mode);
3163 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3164 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3166 /* Get the two words comprising each integer constant. */
3167 if (GET_CODE (trueop0) == CONST_DOUBLE)
3169 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3170 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3174 l0u = l0s = INTVAL (trueop0);
3175 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3178 if (GET_CODE (trueop1) == CONST_DOUBLE)
3180 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3181 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3185 l1u = l1s = INTVAL (trueop1);
3186 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3189 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3190 we have to sign or zero-extend the values. */
3191 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3193 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3194 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3196 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3197 l0s |= ((HOST_WIDE_INT) (-1) << width);
3199 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3200 l1s |= ((HOST_WIDE_INT) (-1) << width);
3202 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3203 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3205 equal = (h0u == h1u && l0u == l1u);
3206 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3207 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3208 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3209 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3212 /* Otherwise, there are some code-specific tests we can make. */
3215 /* Optimize comparisons with upper and lower bounds. */
3216 if (SCALAR_INT_MODE_P (mode)
3217 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3230 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3237 /* x >= min is always true. */
3238 if (rtx_equal_p (trueop1, mmin))
3239 tem = const_true_rtx;
3245 /* x <= max is always true. */
3246 if (rtx_equal_p (trueop1, mmax))
3247 tem = const_true_rtx;
3252 /* x > max is always false. */
3253 if (rtx_equal_p (trueop1, mmax))
3259 /* x < min is always false. */
3260 if (rtx_equal_p (trueop1, mmin))
3267 if (tem == const0_rtx
3268 || tem == const_true_rtx)
3275 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3280 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3281 return const_true_rtx;
3285 /* Optimize abs(x) < 0.0. */
3286 if (trueop1 == CONST0_RTX (mode)
3287 && !HONOR_SNANS (mode)
3288 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3290 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3292 if (GET_CODE (tem) == ABS)
3298 /* Optimize abs(x) >= 0.0. */
3299 if (trueop1 == CONST0_RTX (mode)
3300 && !HONOR_NANS (mode)
3301 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3303 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3305 if (GET_CODE (tem) == ABS)
3306 return const_true_rtx;
3311 /* Optimize ! (abs(x) < 0.0). */
3312 if (trueop1 == CONST0_RTX (mode))
3314 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3316 if (GET_CODE (tem) == ABS)
3317 return const_true_rtx;
3328 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3334 return equal ? const_true_rtx : const0_rtx;
3337 return ! equal ? const_true_rtx : const0_rtx;
3340 return op0lt ? const_true_rtx : const0_rtx;
3343 return op1lt ? const_true_rtx : const0_rtx;
3345 return op0ltu ? const_true_rtx : const0_rtx;
3347 return op1ltu ? const_true_rtx : const0_rtx;
3350 return equal || op0lt ? const_true_rtx : const0_rtx;
3353 return equal || op1lt ? const_true_rtx : const0_rtx;
3355 return equal || op0ltu ? const_true_rtx : const0_rtx;
3357 return equal || op1ltu ? const_true_rtx : const0_rtx;
3359 return const_true_rtx;
3367 /* Simplify CODE, an operation with result mode MODE and three operands,
3368 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3369 a constant. Return 0 if no simplifications is possible. */
3372 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3373 enum machine_mode op0_mode, rtx op0, rtx op1,
3376 unsigned int width = GET_MODE_BITSIZE (mode);
3378 /* VOIDmode means "infinite" precision. */
3380 width = HOST_BITS_PER_WIDE_INT;
3386 if (GET_CODE (op0) == CONST_INT
3387 && GET_CODE (op1) == CONST_INT
3388 && GET_CODE (op2) == CONST_INT
3389 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3390 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3392 /* Extracting a bit-field from a constant */
3393 HOST_WIDE_INT val = INTVAL (op0);
3395 if (BITS_BIG_ENDIAN)
3396 val >>= (GET_MODE_BITSIZE (op0_mode)
3397 - INTVAL (op2) - INTVAL (op1));
3399 val >>= INTVAL (op2);
3401 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3403 /* First zero-extend. */
3404 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3405 /* If desired, propagate sign bit. */
3406 if (code == SIGN_EXTRACT
3407 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3408 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3411 /* Clear the bits that don't belong in our mode,
3412 unless they and our sign bit are all one.
3413 So we get either a reasonable negative value or a reasonable
3414 unsigned value for this mode. */
3415 if (width < HOST_BITS_PER_WIDE_INT
3416 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3417 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3418 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3420 return gen_int_mode (val, mode);
3425 if (GET_CODE (op0) == CONST_INT)
3426 return op0 != const0_rtx ? op1 : op2;
3428 /* Convert c ? a : a into "a". */
3429 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3432 /* Convert a != b ? a : b into "a". */
3433 if (GET_CODE (op0) == NE
3434 && ! side_effects_p (op0)
3435 && ! HONOR_NANS (mode)
3436 && ! HONOR_SIGNED_ZEROS (mode)
3437 && ((rtx_equal_p (XEXP (op0, 0), op1)
3438 && rtx_equal_p (XEXP (op0, 1), op2))
3439 || (rtx_equal_p (XEXP (op0, 0), op2)
3440 && rtx_equal_p (XEXP (op0, 1), op1))))
3443 /* Convert a == b ? a : b into "b". */
3444 if (GET_CODE (op0) == EQ
3445 && ! side_effects_p (op0)
3446 && ! HONOR_NANS (mode)
3447 && ! HONOR_SIGNED_ZEROS (mode)
3448 && ((rtx_equal_p (XEXP (op0, 0), op1)
3449 && rtx_equal_p (XEXP (op0, 1), op2))
3450 || (rtx_equal_p (XEXP (op0, 0), op2)
3451 && rtx_equal_p (XEXP (op0, 1), op1))))
3454 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3456 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3457 ? GET_MODE (XEXP (op0, 1))
3458 : GET_MODE (XEXP (op0, 0)));
3461 /* Look for happy constants in op1 and op2. */
3462 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3464 HOST_WIDE_INT t = INTVAL (op1);
3465 HOST_WIDE_INT f = INTVAL (op2);
3467 if (t == STORE_FLAG_VALUE && f == 0)
3468 code = GET_CODE (op0);
3469 else if (t == 0 && f == STORE_FLAG_VALUE)
3472 tmp = reversed_comparison_code (op0, NULL_RTX);
3480 return simplify_gen_relational (code, mode, cmp_mode,
3481 XEXP (op0, 0), XEXP (op0, 1));
3484 if (cmp_mode == VOIDmode)
3485 cmp_mode = op0_mode;
3486 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3487 cmp_mode, XEXP (op0, 0),
3490 /* See if any simplifications were possible. */
3493 if (GET_CODE (temp) == CONST_INT)
3494 return temp == const0_rtx ? op2 : op1;
3496 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3502 gcc_assert (GET_MODE (op0) == mode);
3503 gcc_assert (GET_MODE (op1) == mode);
3504 gcc_assert (VECTOR_MODE_P (mode));
3505 op2 = avoid_constant_pool_reference (op2);
3506 if (GET_CODE (op2) == CONST_INT)
3508 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3509 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3510 int mask = (1 << n_elts) - 1;
3512 if (!(INTVAL (op2) & mask))
3514 if ((INTVAL (op2) & mask) == mask)
3517 op0 = avoid_constant_pool_reference (op0);
3518 op1 = avoid_constant_pool_reference (op1);
3519 if (GET_CODE (op0) == CONST_VECTOR
3520 && GET_CODE (op1) == CONST_VECTOR)
3522 rtvec v = rtvec_alloc (n_elts);
3525 for (i = 0; i < n_elts; i++)
3526 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3527 ? CONST_VECTOR_ELT (op0, i)
3528 : CONST_VECTOR_ELT (op1, i));
3529 return gen_rtx_CONST_VECTOR (mode, v);
3541 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3542 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3544 Works by unpacking OP into a collection of 8-bit values
3545 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3546 and then repacking them again for OUTERMODE. */
3549 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3550 enum machine_mode innermode, unsigned int byte)
3552 /* We support up to 512-bit values (for V8DFmode). */
3556 value_mask = (1 << value_bit) - 1
3558 unsigned char value[max_bitsize / value_bit];
3567 rtvec result_v = NULL;
3568 enum mode_class outer_class;
3569 enum machine_mode outer_submode;
3571 /* Some ports misuse CCmode. */
3572 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3575 /* We have no way to represent a complex constant at the rtl level. */
3576 if (COMPLEX_MODE_P (outermode))
3579 /* Unpack the value. */
3581 if (GET_CODE (op) == CONST_VECTOR)
3583 num_elem = CONST_VECTOR_NUNITS (op);
3584 elems = &CONST_VECTOR_ELT (op, 0);
3585 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3591 elem_bitsize = max_bitsize;
3593 /* If this asserts, it is too complicated; reducing value_bit may help. */
3594 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3595 /* I don't know how to handle endianness of sub-units. */
3596 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3598 for (elem = 0; elem < num_elem; elem++)
3601 rtx el = elems[elem];
3603 /* Vectors are kept in target memory order. (This is probably
3606 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3607 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3609 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3610 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3611 unsigned bytele = (subword_byte % UNITS_PER_WORD
3612 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3613 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3616 switch (GET_CODE (el))
3620 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3622 *vp++ = INTVAL (el) >> i;
3623 /* CONST_INTs are always logically sign-extended. */
3624 for (; i < elem_bitsize; i += value_bit)
3625 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3629 if (GET_MODE (el) == VOIDmode)
3631 /* If this triggers, someone should have generated a
3632 CONST_INT instead. */
3633 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3635 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3636 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3637 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3640 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3643 /* It shouldn't matter what's done here, so fill it with
3645 for (; i < elem_bitsize; i += value_bit)
3650 long tmp[max_bitsize / 32];
3651 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3653 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
3654 gcc_assert (bitsize <= elem_bitsize);
3655 gcc_assert (bitsize % value_bit == 0);
3657 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3660 /* real_to_target produces its result in words affected by
3661 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3662 and use WORDS_BIG_ENDIAN instead; see the documentation
3663 of SUBREG in rtl.texi. */
3664 for (i = 0; i < bitsize; i += value_bit)
3667 if (WORDS_BIG_ENDIAN)
3668 ibase = bitsize - 1 - i;
3671 *vp++ = tmp[ibase / 32] >> i % 32;
3674 /* It shouldn't matter what's done here, so fill it with
3676 for (; i < elem_bitsize; i += value_bit)
3686 /* Now, pick the right byte to start with. */
3687 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3688 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3689 will already have offset 0. */
3690 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3692 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3694 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3695 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3696 byte = (subword_byte % UNITS_PER_WORD
3697 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3700 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3701 so if it's become negative it will instead be very large.) */
3702 gcc_assert (byte < GET_MODE_SIZE (innermode));
3704 /* Convert from bytes to chunks of size value_bit. */
3705 value_start = byte * (BITS_PER_UNIT / value_bit);
3707 /* Re-pack the value. */
3709 if (VECTOR_MODE_P (outermode))
3711 num_elem = GET_MODE_NUNITS (outermode);
3712 result_v = rtvec_alloc (num_elem);
3713 elems = &RTVEC_ELT (result_v, 0);
3714 outer_submode = GET_MODE_INNER (outermode);
3720 outer_submode = outermode;
3723 outer_class = GET_MODE_CLASS (outer_submode);
3724 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3726 gcc_assert (elem_bitsize % value_bit == 0);
3727 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3729 for (elem = 0; elem < num_elem; elem++)
3733 /* Vectors are stored in target memory order. (This is probably
3736 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3737 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3739 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3740 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3741 unsigned bytele = (subword_byte % UNITS_PER_WORD
3742 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3743 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3746 switch (outer_class)
3749 case MODE_PARTIAL_INT:
3751 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3754 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3756 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3757 for (; i < elem_bitsize; i += value_bit)
3758 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3759 << (i - HOST_BITS_PER_WIDE_INT));
3761 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3763 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3764 elems[elem] = gen_int_mode (lo, outer_submode);
3765 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
3766 elems[elem] = immed_double_const (lo, hi, outer_submode);
3775 long tmp[max_bitsize / 32];
3777 /* real_from_target wants its input in words affected by
3778 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3779 and use WORDS_BIG_ENDIAN instead; see the documentation
3780 of SUBREG in rtl.texi. */
3781 for (i = 0; i < max_bitsize / 32; i++)
3783 for (i = 0; i < elem_bitsize; i += value_bit)
3786 if (WORDS_BIG_ENDIAN)
3787 ibase = elem_bitsize - 1 - i;
3790 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3793 real_from_target (&r, tmp, outer_submode);
3794 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3802 if (VECTOR_MODE_P (outermode))
3803 return gen_rtx_CONST_VECTOR (outermode, result_v);
3808 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3809 Return 0 if no simplifications are possible. */
3811 simplify_subreg (enum machine_mode outermode, rtx op,
3812 enum machine_mode innermode, unsigned int byte)
3814 /* Little bit of sanity checking. */
3815 gcc_assert (innermode != VOIDmode);
3816 gcc_assert (outermode != VOIDmode);
3817 gcc_assert (innermode != BLKmode);
3818 gcc_assert (outermode != BLKmode);
3820 gcc_assert (GET_MODE (op) == innermode
3821 || GET_MODE (op) == VOIDmode);
3823 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3824 gcc_assert (byte < GET_MODE_SIZE (innermode));
3826 if (outermode == innermode && !byte)
3829 if (GET_CODE (op) == CONST_INT
3830 || GET_CODE (op) == CONST_DOUBLE
3831 || GET_CODE (op) == CONST_VECTOR)
3832 return simplify_immed_subreg (outermode, op, innermode, byte);
3834 /* Changing mode twice with SUBREG => just change it once,
3835 or not at all if changing back op starting mode. */
3836 if (GET_CODE (op) == SUBREG)
3838 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3839 int final_offset = byte + SUBREG_BYTE (op);
3842 if (outermode == innermostmode
3843 && byte == 0 && SUBREG_BYTE (op) == 0)
3844 return SUBREG_REG (op);
3846 /* The SUBREG_BYTE represents offset, as if the value were stored
3847 in memory. Irritating exception is paradoxical subreg, where
3848 we define SUBREG_BYTE to be 0. On big endian machines, this
3849 value should be negative. For a moment, undo this exception. */
3850 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3852 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3853 if (WORDS_BIG_ENDIAN)
3854 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3855 if (BYTES_BIG_ENDIAN)
3856 final_offset += difference % UNITS_PER_WORD;
3858 if (SUBREG_BYTE (op) == 0
3859 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3861 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3862 if (WORDS_BIG_ENDIAN)
3863 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3864 if (BYTES_BIG_ENDIAN)
3865 final_offset += difference % UNITS_PER_WORD;
3868 /* See whether resulting subreg will be paradoxical. */
3869 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3871 /* In nonparadoxical subregs we can't handle negative offsets. */
3872 if (final_offset < 0)
3874 /* Bail out in case resulting subreg would be incorrect. */
3875 if (final_offset % GET_MODE_SIZE (outermode)
3876 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3882 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3884 /* In paradoxical subreg, see if we are still looking on lower part.
3885 If so, our SUBREG_BYTE will be 0. */
3886 if (WORDS_BIG_ENDIAN)
3887 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3888 if (BYTES_BIG_ENDIAN)
3889 offset += difference % UNITS_PER_WORD;
3890 if (offset == final_offset)
3896 /* Recurse for further possible simplifications. */
3897 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3901 if (validate_subreg (outermode, innermostmode,
3902 SUBREG_REG (op), final_offset))
3903 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3907 /* SUBREG of a hard register => just change the register number
3908 and/or mode. If the hard register is not valid in that mode,
3909 suppress this simplification. If the hard register is the stack,
3910 frame, or argument pointer, leave this as a SUBREG. */
3913 && REGNO (op) < FIRST_PSEUDO_REGISTER
3914 #ifdef CANNOT_CHANGE_MODE_CLASS
3915 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3916 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3917 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3919 && ((reload_completed && !frame_pointer_needed)
3920 || (REGNO (op) != FRAME_POINTER_REGNUM
3921 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3922 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3925 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3926 && REGNO (op) != ARG_POINTER_REGNUM
3928 && REGNO (op) != STACK_POINTER_REGNUM
3929 && subreg_offset_representable_p (REGNO (op), innermode,
3932 unsigned int regno = REGNO (op);
3933 unsigned int final_regno
3934 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3936 /* ??? We do allow it if the current REG is not valid for
3937 its mode. This is a kludge to work around how float/complex
3938 arguments are passed on 32-bit SPARC and should be fixed. */
3939 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3940 || ! HARD_REGNO_MODE_OK (regno, innermode))
3942 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3944 /* Propagate original regno. We don't have any way to specify
3945 the offset inside original regno, so do so only for lowpart.
3946 The information is used only by alias analysis that can not
3947 grog partial register anyway. */
3949 if (subreg_lowpart_offset (outermode, innermode) == byte)
3950 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3955 /* If we have a SUBREG of a register that we are replacing and we are
3956 replacing it with a MEM, make a new MEM and try replacing the
3957 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3958 or if we would be widening it. */
3961 && ! mode_dependent_address_p (XEXP (op, 0))
3962 /* Allow splitting of volatile memory references in case we don't
3963 have instruction to move the whole thing. */
3964 && (! MEM_VOLATILE_P (op)
3965 || ! have_insn_for (SET, innermode))
3966 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3967 return adjust_address_nv (op, outermode, byte);
3969 /* Handle complex values represented as CONCAT
3970 of real and imaginary part. */
3971 if (GET_CODE (op) == CONCAT)
3973 unsigned int inner_size, final_offset;
3976 inner_size = GET_MODE_UNIT_SIZE (innermode);
3977 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3978 final_offset = byte % inner_size;
3979 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3982 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3985 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3986 return gen_rtx_SUBREG (outermode, part, final_offset);
3990 /* Optimize SUBREG truncations of zero and sign extended values. */
3991 if ((GET_CODE (op) == ZERO_EXTEND
3992 || GET_CODE (op) == SIGN_EXTEND)
3993 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3995 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3997 /* If we're requesting the lowpart of a zero or sign extension,
3998 there are three possibilities. If the outermode is the same
3999 as the origmode, we can omit both the extension and the subreg.
4000 If the outermode is not larger than the origmode, we can apply
4001 the truncation without the extension. Finally, if the outermode
4002 is larger than the origmode, but both are integer modes, we
4003 can just extend to the appropriate mode. */
4006 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4007 if (outermode == origmode)
4008 return XEXP (op, 0);
4009 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4010 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4011 subreg_lowpart_offset (outermode,
4013 if (SCALAR_INT_MODE_P (outermode))
4014 return simplify_gen_unary (GET_CODE (op), outermode,
4015 XEXP (op, 0), origmode);
4018 /* A SUBREG resulting from a zero extension may fold to zero if
4019 it extracts higher bits that the ZERO_EXTEND's source bits. */
4020 if (GET_CODE (op) == ZERO_EXTEND
4021 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4022 return CONST0_RTX (outermode);
4025 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4026 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4027 the outer subreg is effectively a truncation to the original mode. */
4028 if ((GET_CODE (op) == LSHIFTRT
4029 || GET_CODE (op) == ASHIFTRT)
4030 && SCALAR_INT_MODE_P (outermode)
4031 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4032 to avoid the possibility that an outer LSHIFTRT shifts by more
4033 than the sign extension's sign_bit_copies and introduces zeros
4034 into the high bits of the result. */
4035 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4036 && GET_CODE (XEXP (op, 1)) == CONST_INT
4037 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4038 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4039 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4040 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4041 return simplify_gen_binary (ASHIFTRT, outermode,
4042 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4044 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4045 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4046 the outer subreg is effectively a truncation to the original mode. */
4047 if ((GET_CODE (op) == LSHIFTRT
4048 || GET_CODE (op) == ASHIFTRT)
4049 && SCALAR_INT_MODE_P (outermode)
4050 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4051 && GET_CODE (XEXP (op, 1)) == CONST_INT
4052 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4053 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4054 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4055 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4056 return simplify_gen_binary (LSHIFTRT, outermode,
4057 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4059 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4060 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4061 the outer subreg is effectively a truncation to the original mode. */
4062 if (GET_CODE (op) == ASHIFT
4063 && SCALAR_INT_MODE_P (outermode)
4064 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4065 && GET_CODE (XEXP (op, 1)) == CONST_INT
4066 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4067 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4068 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4069 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4070 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4071 return simplify_gen_binary (ASHIFT, outermode,
4072 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4077 /* Make a SUBREG operation or equivalent if it folds. */
4080 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4081 enum machine_mode innermode, unsigned int byte)
4085 newx = simplify_subreg (outermode, op, innermode, byte);
4089 if (GET_CODE (op) == SUBREG
4090 || GET_CODE (op) == CONCAT
4091 || GET_MODE (op) == VOIDmode)
4094 if (validate_subreg (outermode, innermode, op, byte))
4095 return gen_rtx_SUBREG (outermode, op, byte);
4100 /* Simplify X, an rtx expression.
4102 Return the simplified expression or NULL if no simplifications
4105 This is the preferred entry point into the simplification routines;
4106 however, we still allow passes to call the more specific routines.
4108 Right now GCC has three (yes, three) major bodies of RTL simplification
4109 code that need to be unified.
4111 1. fold_rtx in cse.c. This code uses various CSE specific
4112 information to aid in RTL simplification.
4114 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4115 it uses combine specific information to aid in RTL
4118 3. The routines in this file.
4121 Long term we want to only have one body of simplification code; to
4122 get to that state I recommend the following steps:
4124 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4125 which are not pass dependent state into these routines.
4127 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4128 use this routine whenever possible.
4130 3. Allow for pass dependent state to be provided to these
4131 routines and add simplifications based on the pass dependent
4132 state. Remove code from cse.c & combine.c that becomes
4135 It will take time, but ultimately the compiler will be easier to
4136 maintain and improve. It's totally silly that when we add a
4137 simplification that it needs to be added to 4 places (3 for RTL
4138 simplification and 1 for tree simplification. */
4141 simplify_rtx (rtx x)
4143 enum rtx_code code = GET_CODE (x);
4144 enum machine_mode mode = GET_MODE (x);
4146 switch (GET_RTX_CLASS (code))
4149 return simplify_unary_operation (code, mode,
4150 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4151 case RTX_COMM_ARITH:
4152 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4153 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4155 /* Fall through.... */
4158 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4161 case RTX_BITFIELD_OPS:
4162 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4163 XEXP (x, 0), XEXP (x, 1),
4167 case RTX_COMM_COMPARE:
4168 return simplify_relational_operation (code, mode,
4169 ((GET_MODE (XEXP (x, 0))
4171 ? GET_MODE (XEXP (x, 0))
4172 : GET_MODE (XEXP (x, 1))),
4178 return simplify_gen_subreg (mode, SUBREG_REG (x),
4179 GET_MODE (SUBREG_REG (x)),
4186 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4187 if (GET_CODE (XEXP (x, 0)) == HIGH
4188 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))