1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code, mode, op0, op1)
109 enum machine_mode mode;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
117 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
119 || (GET_CODE (op0) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
122 tem = op0, op0 = op1, op1 = tem;
124 /* If this simplifies, do it. */
125 tem = simplify_binary_operation (code, mode, op0, op1);
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
133 if (code == PLUS && GET_CODE (op1) == CONST_INT
134 && GET_MODE (op0) != VOIDmode)
135 return plus_constant (op0, INTVAL (op1));
136 else if (code == MINUS && GET_CODE (op1) == CONST_INT
137 && GET_MODE (op0) != VOIDmode)
138 return plus_constant (op0, - INTVAL (op1));
140 return gen_rtx_fmt_ee (code, mode, op0, op1);
143 /* Try to simplify a unary operation CODE whose output mode is to be
144 MODE with input operand OP whose mode was originally OP_MODE.
145 Return zero if no simplification can be made. */
148 simplify_unary_operation (code, mode, op, op_mode)
150 enum machine_mode mode;
152 enum machine_mode op_mode;
154 unsigned int width = GET_MODE_BITSIZE (mode);
156 /* The order of these tests is critical so that, for example, we don't
157 check the wrong mode (input vs. output) for a conversion operation,
158 such as FIX. At some point, this should be simplified. */
160 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
162 if (code == FLOAT && GET_MODE (op) == VOIDmode
163 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
165 HOST_WIDE_INT hv, lv;
168 if (GET_CODE (op) == CONST_INT)
169 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
171 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
173 #ifdef REAL_ARITHMETIC
174 REAL_VALUE_FROM_INT (d, lv, hv, mode);
179 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
180 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
181 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
187 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
188 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
189 d += (double) (unsigned HOST_WIDE_INT) lv;
191 #endif /* REAL_ARITHMETIC */
192 d = real_value_truncate (mode, d);
193 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
195 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
196 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
198 HOST_WIDE_INT hv, lv;
201 if (GET_CODE (op) == CONST_INT)
202 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
204 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
206 if (op_mode == VOIDmode)
208 /* We don't know how to interpret negative-looking numbers in
209 this case, so don't try to fold those. */
213 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
216 hv = 0, lv &= GET_MODE_MASK (op_mode);
218 #ifdef REAL_ARITHMETIC
219 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
222 d = (double) (unsigned HOST_WIDE_INT) hv;
223 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
224 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
225 d += (double) (unsigned HOST_WIDE_INT) lv;
226 #endif /* REAL_ARITHMETIC */
227 d = real_value_truncate (mode, d);
228 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
232 if (GET_CODE (op) == CONST_INT
233 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
235 register HOST_WIDE_INT arg0 = INTVAL (op);
236 register HOST_WIDE_INT val;
249 val = (arg0 >= 0 ? arg0 : - arg0);
253 /* Don't use ffs here. Instead, get low order bit and then its
254 number. If arg0 is zero, this will return 0, as desired. */
255 arg0 &= GET_MODE_MASK (mode);
256 val = exact_log2 (arg0 & (- arg0)) + 1;
264 if (op_mode == VOIDmode)
266 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
268 /* If we were really extending the mode,
269 we would have to distinguish between zero-extension
270 and sign-extension. */
271 if (width != GET_MODE_BITSIZE (op_mode))
275 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
276 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
282 if (op_mode == VOIDmode)
284 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
286 /* If we were really extending the mode,
287 we would have to distinguish between zero-extension
288 and sign-extension. */
289 if (width != GET_MODE_BITSIZE (op_mode))
293 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
296 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
298 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
299 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
314 val = trunc_int_for_mode (val, mode);
316 return GEN_INT (val);
319 /* We can do some operations on integer CONST_DOUBLEs. Also allow
320 for a DImode operation on a CONST_INT. */
321 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
322 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
324 unsigned HOST_WIDE_INT l1, lv;
325 HOST_WIDE_INT h1, hv;
327 if (GET_CODE (op) == CONST_DOUBLE)
328 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
330 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
340 neg_double (l1, h1, &lv, &hv);
345 neg_double (l1, h1, &lv, &hv);
353 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
355 lv = exact_log2 (l1 & (-l1)) + 1;
359 /* This is just a change-of-mode, so do nothing. */
364 if (op_mode == VOIDmode
365 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
369 lv = l1 & GET_MODE_MASK (op_mode);
373 if (op_mode == VOIDmode
374 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
378 lv = l1 & GET_MODE_MASK (op_mode);
379 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
380 && (lv & ((HOST_WIDE_INT) 1
381 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
382 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
384 hv = HWI_SIGN_EXTEND (lv);
395 return immed_double_const (lv, hv, mode);
398 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
399 else if (GET_CODE (op) == CONST_DOUBLE
400 && GET_MODE_CLASS (mode) == MODE_FLOAT)
406 if (setjmp (handler))
407 /* There used to be a warning here, but that is inadvisable.
408 People may want to cause traps, and the natural way
409 to do it should not get a warning. */
412 set_float_handler (handler);
414 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
419 d = REAL_VALUE_NEGATE (d);
423 if (REAL_VALUE_NEGATIVE (d))
424 d = REAL_VALUE_NEGATE (d);
428 d = real_value_truncate (mode, d);
432 /* All this does is change the mode. */
436 d = REAL_VALUE_RNDZINT (d);
440 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
450 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
451 set_float_handler (NULL_PTR);
455 else if (GET_CODE (op) == CONST_DOUBLE
456 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
457 && GET_MODE_CLASS (mode) == MODE_INT
458 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
464 if (setjmp (handler))
467 set_float_handler (handler);
469 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
474 val = REAL_VALUE_FIX (d);
478 val = REAL_VALUE_UNSIGNED_FIX (d);
485 set_float_handler (NULL_PTR);
487 val = trunc_int_for_mode (val, mode);
489 return GEN_INT (val);
492 /* This was formerly used only for non-IEEE float.
493 eggert@twinsun.com says it is safe for IEEE also. */
496 enum rtx_code reversed;
497 /* There are some simplifications we can do even if the operands
502 /* (not (not X)) == X. */
503 if (GET_CODE (op) == NOT)
506 /* (not (eq X Y)) == (ne X Y), etc. */
507 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
508 && ((reversed = reversed_comparison_code (op, NULL_RTX))
510 return gen_rtx_fmt_ee (reversed,
511 op_mode, XEXP (op, 0), XEXP (op, 1));
515 /* (neg (neg X)) == X. */
516 if (GET_CODE (op) == NEG)
521 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
522 becomes just the MINUS if its mode is MODE. This allows
523 folding switch statements on machines using casesi (such as
525 if (GET_CODE (op) == TRUNCATE
526 && GET_MODE (XEXP (op, 0)) == mode
527 && GET_CODE (XEXP (op, 0)) == MINUS
528 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
529 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
532 #ifdef POINTERS_EXTEND_UNSIGNED
533 if (! POINTERS_EXTEND_UNSIGNED
534 && mode == Pmode && GET_MODE (op) == ptr_mode
536 || (GET_CODE (op) == SUBREG
537 && GET_CODE (SUBREG_REG (op)) == REG
538 && REG_POINTER (SUBREG_REG (op))
539 && GET_MODE (SUBREG_REG (op)) == Pmode)))
540 return convert_memory_address (Pmode, op);
544 #ifdef POINTERS_EXTEND_UNSIGNED
546 if (POINTERS_EXTEND_UNSIGNED
547 && mode == Pmode && GET_MODE (op) == ptr_mode
549 || (GET_CODE (op) == SUBREG
550 && GET_CODE (SUBREG_REG (op)) == REG
551 && REG_POINTER (SUBREG_REG (op))
552 && GET_MODE (SUBREG_REG (op)) == Pmode)))
553 return convert_memory_address (Pmode, op);
565 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
566 and OP1. Return 0 if no simplification is possible.
568 Don't use this for relational operations such as EQ or LT.
569 Use simplify_relational_operation instead. */
572 simplify_binary_operation (code, mode, op0, op1)
574 enum machine_mode mode;
577 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
579 unsigned int width = GET_MODE_BITSIZE (mode);
582 /* Relational operations don't work here. We must know the mode
583 of the operands in order to do the comparison correctly.
584 Assuming a full word can give incorrect results.
585 Consider comparing 128 with -128 in QImode. */
587 if (GET_RTX_CLASS (code) == '<')
590 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
591 if (GET_MODE_CLASS (mode) == MODE_FLOAT
592 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
593 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
595 REAL_VALUE_TYPE f0, f1, value;
598 if (setjmp (handler))
601 set_float_handler (handler);
603 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
604 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
605 f0 = real_value_truncate (mode, f0);
606 f1 = real_value_truncate (mode, f1);
608 #ifdef REAL_ARITHMETIC
609 #ifndef REAL_INFINITY
610 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
613 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
627 #ifndef REAL_INFINITY
634 value = MIN (f0, f1);
637 value = MAX (f0, f1);
644 value = real_value_truncate (mode, value);
645 set_float_handler (NULL_PTR);
646 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
648 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
650 /* We can fold some multi-word operations. */
651 if (GET_MODE_CLASS (mode) == MODE_INT
652 && width == HOST_BITS_PER_WIDE_INT * 2
653 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
654 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
656 unsigned HOST_WIDE_INT l1, l2, lv;
657 HOST_WIDE_INT h1, h2, hv;
659 if (GET_CODE (op0) == CONST_DOUBLE)
660 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
662 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
664 if (GET_CODE (op1) == CONST_DOUBLE)
665 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
667 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
672 /* A - B == A + (-B). */
673 neg_double (l2, h2, &lv, &hv);
676 /* .. fall through ... */
679 add_double (l1, h1, l2, h2, &lv, &hv);
683 mul_double (l1, h1, l2, h2, &lv, &hv);
686 case DIV: case MOD: case UDIV: case UMOD:
687 /* We'd need to include tree.h to do this and it doesn't seem worth
692 lv = l1 & l2, hv = h1 & h2;
696 lv = l1 | l2, hv = h1 | h2;
700 lv = l1 ^ l2, hv = h1 ^ h2;
706 && ((unsigned HOST_WIDE_INT) l1
707 < (unsigned HOST_WIDE_INT) l2)))
716 && ((unsigned HOST_WIDE_INT) l1
717 > (unsigned HOST_WIDE_INT) l2)))
724 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
726 && ((unsigned HOST_WIDE_INT) l1
727 < (unsigned HOST_WIDE_INT) l2)))
734 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
736 && ((unsigned HOST_WIDE_INT) l1
737 > (unsigned HOST_WIDE_INT) l2)))
743 case LSHIFTRT: case ASHIFTRT:
745 case ROTATE: case ROTATERT:
746 #ifdef SHIFT_COUNT_TRUNCATED
747 if (SHIFT_COUNT_TRUNCATED)
748 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
751 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
754 if (code == LSHIFTRT || code == ASHIFTRT)
755 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
757 else if (code == ASHIFT)
758 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
759 else if (code == ROTATE)
760 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
761 else /* code == ROTATERT */
762 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
769 return immed_double_const (lv, hv, mode);
772 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
773 || width > HOST_BITS_PER_WIDE_INT || width == 0)
775 /* Even if we can't compute a constant result,
776 there are some cases worth simplifying. */
781 /* In IEEE floating point, x+0 is not the same as x. Similarly
782 for the other optimizations below. */
783 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
784 && FLOAT_MODE_P (mode) && ! flag_fast_math)
787 if (op1 == CONST0_RTX (mode))
790 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
791 if (GET_CODE (op0) == NEG)
792 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
793 else if (GET_CODE (op1) == NEG)
794 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
796 /* Handle both-operands-constant cases. We can only add
797 CONST_INTs to constants since the sum of relocatable symbols
798 can't be handled by most assemblers. Don't add CONST_INT
799 to CONST_INT since overflow won't be computed properly if wider
800 than HOST_BITS_PER_WIDE_INT. */
802 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
803 && GET_CODE (op1) == CONST_INT)
804 return plus_constant (op0, INTVAL (op1));
805 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
806 && GET_CODE (op0) == CONST_INT)
807 return plus_constant (op1, INTVAL (op0));
809 /* See if this is something like X * C - X or vice versa or
810 if the multiplication is written as a shift. If so, we can
811 distribute and make a new multiply, shift, or maybe just
812 have X (if C is 2 in the example above). But don't make
813 real multiply if we didn't have one before. */
815 if (! FLOAT_MODE_P (mode))
817 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
818 rtx lhs = op0, rhs = op1;
821 if (GET_CODE (lhs) == NEG)
822 coeff0 = -1, lhs = XEXP (lhs, 0);
823 else if (GET_CODE (lhs) == MULT
824 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
826 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
829 else if (GET_CODE (lhs) == ASHIFT
830 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
831 && INTVAL (XEXP (lhs, 1)) >= 0
832 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
834 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
838 if (GET_CODE (rhs) == NEG)
839 coeff1 = -1, rhs = XEXP (rhs, 0);
840 else if (GET_CODE (rhs) == MULT
841 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
843 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
846 else if (GET_CODE (rhs) == ASHIFT
847 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
848 && INTVAL (XEXP (rhs, 1)) >= 0
849 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
851 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
855 if (rtx_equal_p (lhs, rhs))
857 tem = simplify_gen_binary (MULT, mode, lhs,
858 GEN_INT (coeff0 + coeff1));
859 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
863 /* If one of the operands is a PLUS or a MINUS, see if we can
864 simplify this by the associative law.
865 Don't use the associative law for floating point.
866 The inaccuracy makes it nonassociative,
867 and subtle programs can break if operations are associated. */
869 if (INTEGRAL_MODE_P (mode)
870 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
871 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
872 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
878 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
879 using cc0, in which case we want to leave it as a COMPARE
880 so we can distinguish it from a register-register-copy.
882 In IEEE floating point, x-0 is not the same as x. */
884 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
885 || ! FLOAT_MODE_P (mode) || flag_fast_math)
886 && op1 == CONST0_RTX (mode))
890 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
891 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
892 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
893 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
895 rtx xop00 = XEXP (op0, 0);
896 rtx xop10 = XEXP (op1, 0);
899 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
901 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
902 && GET_MODE (xop00) == GET_MODE (xop10)
903 && REGNO (xop00) == REGNO (xop10)
904 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
905 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
912 /* None of these optimizations can be done for IEEE
914 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
915 && FLOAT_MODE_P (mode) && ! flag_fast_math)
918 /* We can't assume x-x is 0 even with non-IEEE floating point,
919 but since it is zero except in very strange circumstances, we
920 will treat it as zero with -ffast-math. */
921 if (rtx_equal_p (op0, op1)
922 && ! side_effects_p (op0)
923 && (! FLOAT_MODE_P (mode) || flag_fast_math))
924 return CONST0_RTX (mode);
926 /* Change subtraction from zero into negation. */
927 if (op0 == CONST0_RTX (mode))
928 return gen_rtx_NEG (mode, op1);
930 /* (-1 - a) is ~a. */
931 if (op0 == constm1_rtx)
932 return gen_rtx_NOT (mode, op1);
934 /* Subtracting 0 has no effect. */
935 if (op1 == CONST0_RTX (mode))
938 /* See if this is something like X * C - X or vice versa or
939 if the multiplication is written as a shift. If so, we can
940 distribute and make a new multiply, shift, or maybe just
941 have X (if C is 2 in the example above). But don't make
942 real multiply if we didn't have one before. */
944 if (! FLOAT_MODE_P (mode))
946 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
947 rtx lhs = op0, rhs = op1;
950 if (GET_CODE (lhs) == NEG)
951 coeff0 = -1, lhs = XEXP (lhs, 0);
952 else if (GET_CODE (lhs) == MULT
953 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
955 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
958 else if (GET_CODE (lhs) == ASHIFT
959 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
960 && INTVAL (XEXP (lhs, 1)) >= 0
961 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
963 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
967 if (GET_CODE (rhs) == NEG)
968 coeff1 = - 1, rhs = XEXP (rhs, 0);
969 else if (GET_CODE (rhs) == MULT
970 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
972 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
975 else if (GET_CODE (rhs) == ASHIFT
976 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
977 && INTVAL (XEXP (rhs, 1)) >= 0
978 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
980 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
984 if (rtx_equal_p (lhs, rhs))
986 tem = simplify_gen_binary (MULT, mode, lhs,
987 GEN_INT (coeff0 - coeff1));
988 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
992 /* (a - (-b)) -> (a + b). */
993 if (GET_CODE (op1) == NEG)
994 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
996 /* If one of the operands is a PLUS or a MINUS, see if we can
997 simplify this by the associative law.
998 Don't use the associative law for floating point.
999 The inaccuracy makes it nonassociative,
1000 and subtle programs can break if operations are associated. */
1002 if (INTEGRAL_MODE_P (mode)
1003 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1004 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1005 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1008 /* Don't let a relocatable value get a negative coeff. */
1009 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1010 return plus_constant (op0, - INTVAL (op1));
1012 /* (x - (x & y)) -> (x & ~y) */
1013 if (GET_CODE (op1) == AND)
1015 if (rtx_equal_p (op0, XEXP (op1, 0)))
1016 return simplify_gen_binary (AND, mode, op0,
1017 gen_rtx_NOT (mode, XEXP (op1, 1)));
1018 if (rtx_equal_p (op0, XEXP (op1, 1)))
1019 return simplify_gen_binary (AND, mode, op0,
1020 gen_rtx_NOT (mode, XEXP (op1, 0)));
1025 if (op1 == constm1_rtx)
1027 tem = simplify_unary_operation (NEG, mode, op0, mode);
1029 return tem ? tem : gen_rtx_NEG (mode, op0);
1032 /* In IEEE floating point, x*0 is not always 0. */
1033 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1034 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1035 && op1 == CONST0_RTX (mode)
1036 && ! side_effects_p (op0))
1039 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1040 However, ANSI says we can drop signals,
1041 so we can do this anyway. */
1042 if (op1 == CONST1_RTX (mode))
1045 /* Convert multiply by constant power of two into shift unless
1046 we are still generating RTL. This test is a kludge. */
1047 if (GET_CODE (op1) == CONST_INT
1048 && (val = exact_log2 (INTVAL (op1))) >= 0
1049 /* If the mode is larger than the host word size, and the
1050 uppermost bit is set, then this isn't a power of two due
1051 to implicit sign extension. */
1052 && (width <= HOST_BITS_PER_WIDE_INT
1053 || val != HOST_BITS_PER_WIDE_INT - 1)
1054 && ! rtx_equal_function_value_matters)
1055 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1057 if (GET_CODE (op1) == CONST_DOUBLE
1058 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1062 int op1is2, op1ism1;
1064 if (setjmp (handler))
1067 set_float_handler (handler);
1068 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1069 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1070 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1071 set_float_handler (NULL_PTR);
1073 /* x*2 is x+x and x*(-1) is -x */
1074 if (op1is2 && GET_MODE (op0) == mode)
1075 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1077 else if (op1ism1 && GET_MODE (op0) == mode)
1078 return gen_rtx_NEG (mode, op0);
1083 if (op1 == const0_rtx)
1085 if (GET_CODE (op1) == CONST_INT
1086 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1088 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1090 /* A | (~A) -> -1 */
1091 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1092 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1093 && ! side_effects_p (op0)
1094 && GET_MODE_CLASS (mode) != MODE_CC)
1099 if (op1 == const0_rtx)
1101 if (GET_CODE (op1) == CONST_INT
1102 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1103 return gen_rtx_NOT (mode, op0);
1104 if (op0 == op1 && ! side_effects_p (op0)
1105 && GET_MODE_CLASS (mode) != MODE_CC)
1110 if (op1 == const0_rtx && ! side_effects_p (op0))
1112 if (GET_CODE (op1) == CONST_INT
1113 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1115 if (op0 == op1 && ! side_effects_p (op0)
1116 && GET_MODE_CLASS (mode) != MODE_CC)
1119 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1120 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1121 && ! side_effects_p (op0)
1122 && GET_MODE_CLASS (mode) != MODE_CC)
1127 /* Convert divide by power of two into shift (divide by 1 handled
1129 if (GET_CODE (op1) == CONST_INT
1130 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1131 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1133 /* ... fall through ... */
1136 if (op1 == CONST1_RTX (mode))
1139 /* In IEEE floating point, 0/x is not always 0. */
1140 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1141 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1142 && op0 == CONST0_RTX (mode)
1143 && ! side_effects_p (op1))
1146 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1147 /* Change division by a constant into multiplication. Only do
1148 this with -ffast-math until an expert says it is safe in
1150 else if (GET_CODE (op1) == CONST_DOUBLE
1151 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1152 && op1 != CONST0_RTX (mode)
1156 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1158 if (! REAL_VALUES_EQUAL (d, dconst0))
1160 #if defined (REAL_ARITHMETIC)
1161 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1162 return gen_rtx_MULT (mode, op0,
1163 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1166 gen_rtx_MULT (mode, op0,
1167 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1175 /* Handle modulus by power of two (mod with 1 handled below). */
1176 if (GET_CODE (op1) == CONST_INT
1177 && exact_log2 (INTVAL (op1)) > 0)
1178 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1180 /* ... fall through ... */
1183 if ((op0 == const0_rtx || op1 == const1_rtx)
1184 && ! side_effects_p (op0) && ! side_effects_p (op1))
1190 /* Rotating ~0 always results in ~0. */
1191 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1192 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1193 && ! side_effects_p (op1))
1196 /* ... fall through ... */
1201 if (op1 == const0_rtx)
1203 if (op0 == const0_rtx && ! side_effects_p (op1))
1208 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1209 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1210 && ! side_effects_p (op0))
1212 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1217 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1218 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1219 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1220 && ! side_effects_p (op0))
1222 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1227 if (op1 == const0_rtx && ! side_effects_p (op0))
1229 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1234 if (op1 == constm1_rtx && ! side_effects_p (op0))
1236 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1247 /* Get the integer argument values in two forms:
1248 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1250 arg0 = INTVAL (op0);
1251 arg1 = INTVAL (op1);
1253 if (width < HOST_BITS_PER_WIDE_INT)
1255 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1256 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1259 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1260 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1263 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1264 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1272 /* Compute the value of the arithmetic. */
1277 val = arg0s + arg1s;
1281 val = arg0s - arg1s;
1285 val = arg0s * arg1s;
1291 val = arg0s / arg1s;
1297 val = arg0s % arg1s;
1303 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1309 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1325 /* If shift count is undefined, don't fold it; let the machine do
1326 what it wants. But truncate it if the machine will do that. */
1330 #ifdef SHIFT_COUNT_TRUNCATED
1331 if (SHIFT_COUNT_TRUNCATED)
1335 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1342 #ifdef SHIFT_COUNT_TRUNCATED
1343 if (SHIFT_COUNT_TRUNCATED)
1347 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1354 #ifdef SHIFT_COUNT_TRUNCATED
1355 if (SHIFT_COUNT_TRUNCATED)
1359 val = arg0s >> arg1;
1361 /* Bootstrap compiler may not have sign extended the right shift.
1362 Manually extend the sign to insure bootstrap cc matches gcc. */
1363 if (arg0s < 0 && arg1 > 0)
1364 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1373 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1374 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1382 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1383 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1387 /* Do nothing here. */
1391 val = arg0s <= arg1s ? arg0s : arg1s;
1395 val = ((unsigned HOST_WIDE_INT) arg0
1396 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1400 val = arg0s > arg1s ? arg0s : arg1s;
1404 val = ((unsigned HOST_WIDE_INT) arg0
1405 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1412 val = trunc_int_for_mode (val, mode);
1414 return GEN_INT (val);
1417 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1420 Rather than test for specific case, we do this by a brute-force method
1421 and do all possible simplifications until no more changes occur. Then
1422 we rebuild the operation. */
1425 simplify_plus_minus (code, mode, op0, op1)
1427 enum machine_mode mode;
1433 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1434 int first = 1, negate = 0, changed;
1437 memset ((char *) ops, 0, sizeof ops);
1439 /* Set up the two operands and then expand them until nothing has been
1440 changed. If we run out of room in our array, give up; this should
1441 almost never happen. */
1443 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1450 for (i = 0; i < n_ops; i++)
1451 switch (GET_CODE (ops[i]))
1458 ops[n_ops] = XEXP (ops[i], 1);
1459 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1460 ops[i] = XEXP (ops[i], 0);
1466 ops[i] = XEXP (ops[i], 0);
1467 negs[i] = ! negs[i];
1472 ops[i] = XEXP (ops[i], 0);
1478 /* ~a -> (-a - 1) */
1481 ops[n_ops] = constm1_rtx;
1482 negs[n_ops++] = negs[i];
1483 ops[i] = XEXP (ops[i], 0);
1484 negs[i] = ! negs[i];
1491 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1499 /* If we only have two operands, we can't do anything. */
1503 /* Now simplify each pair of operands until nothing changes. The first
1504 time through just simplify constants against each other. */
1511 for (i = 0; i < n_ops - 1; i++)
1512 for (j = i + 1; j < n_ops; j++)
1513 if (ops[i] != 0 && ops[j] != 0
1514 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1516 rtx lhs = ops[i], rhs = ops[j];
1517 enum rtx_code ncode = PLUS;
1519 if (negs[i] && ! negs[j])
1520 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1521 else if (! negs[i] && negs[j])
1524 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1527 ops[i] = tem, ops[j] = 0;
1528 negs[i] = negs[i] && negs[j];
1529 if (GET_CODE (tem) == NEG)
1530 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1532 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1533 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1541 /* Pack all the operands to the lower-numbered entries and give up if
1542 we didn't reduce the number of operands we had. Make sure we
1543 count a CONST as two operands. If we have the same number of
1544 operands, but have made more CONSTs than we had, this is also
1545 an improvement, so accept it. */
1547 for (i = 0, j = 0; j < n_ops; j++)
1550 ops[i] = ops[j], negs[i++] = negs[j];
1551 if (GET_CODE (ops[j]) == CONST)
1555 if (i + n_consts > input_ops
1556 || (i + n_consts == input_ops && n_consts <= input_consts))
1561 /* If we have a CONST_INT, put it last. */
1562 for (i = 0; i < n_ops - 1; i++)
1563 if (GET_CODE (ops[i]) == CONST_INT)
1565 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1566 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1569 /* Put a non-negated operand first. If there aren't any, make all
1570 operands positive and negate the whole thing later. */
1571 for (i = 0; i < n_ops && negs[i]; i++)
1576 for (i = 0; i < n_ops; i++)
1582 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1583 j = negs[0], negs[0] = negs[i], negs[i] = j;
1586 /* Now make the result by performing the requested operations. */
1588 for (i = 1; i < n_ops; i++)
1589 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1591 return negate ? gen_rtx_NEG (mode, result) : result;
1596 rtx op0, op1; /* Input */
1597 int equal, op0lt, op1lt; /* Output */
1602 check_fold_consts (data)
1605 struct cfc_args *args = (struct cfc_args *) data;
1606 REAL_VALUE_TYPE d0, d1;
1608 /* We may possibly raise an exception while reading the value. */
1609 args->unordered = 1;
1610 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1611 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1613 /* Comparisons of Inf versus Inf are ordered. */
1614 if (REAL_VALUE_ISNAN (d0)
1615 || REAL_VALUE_ISNAN (d1))
1617 args->equal = REAL_VALUES_EQUAL (d0, d1);
1618 args->op0lt = REAL_VALUES_LESS (d0, d1);
1619 args->op1lt = REAL_VALUES_LESS (d1, d0);
1620 args->unordered = 0;
1623 /* Like simplify_binary_operation except used for relational operators.
1624 MODE is the mode of the operands, not that of the result. If MODE
1625 is VOIDmode, both operands must also be VOIDmode and we compare the
1626 operands in "infinite precision".
1628 If no simplification is possible, this function returns zero. Otherwise,
1629 it returns either const_true_rtx or const0_rtx. */
1632 simplify_relational_operation (code, mode, op0, op1)
1634 enum machine_mode mode;
1637 int equal, op0lt, op0ltu, op1lt, op1ltu;
1640 if (mode == VOIDmode
1641 && (GET_MODE (op0) != VOIDmode
1642 || GET_MODE (op1) != VOIDmode))
1645 /* If op0 is a compare, extract the comparison arguments from it. */
1646 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1647 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1649 /* We can't simplify MODE_CC values since we don't know what the
1650 actual comparison is. */
1651 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1658 /* Make sure the constant is second. */
1659 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1660 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1662 tem = op0, op0 = op1, op1 = tem;
1663 code = swap_condition (code);
1666 /* For integer comparisons of A and B maybe we can simplify A - B and can
1667 then simplify a comparison of that with zero. If A and B are both either
1668 a register or a CONST_INT, this can't help; testing for these cases will
1669 prevent infinite recursion here and speed things up.
1671 If CODE is an unsigned comparison, then we can never do this optimization,
1672 because it gives an incorrect result if the subtraction wraps around zero.
1673 ANSI C defines unsigned operations such that they never overflow, and
1674 thus such cases can not be ignored. */
1676 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1677 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1678 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1679 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1680 && code != GTU && code != GEU && code != LTU && code != LEU)
1681 return simplify_relational_operation (signed_condition (code),
1682 mode, tem, const0_rtx);
1684 if (flag_fast_math && code == ORDERED)
1685 return const_true_rtx;
1687 if (flag_fast_math && code == UNORDERED)
1690 /* For non-IEEE floating-point, if the two operands are equal, we know the
1692 if (rtx_equal_p (op0, op1)
1693 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1694 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
1695 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1697 /* If the operands are floating-point constants, see if we can fold
1699 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1700 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1701 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1703 struct cfc_args args;
1705 /* Setup input for check_fold_consts() */
1710 if (!do_float_handler (check_fold_consts, (PTR) &args))
1723 return const_true_rtx;
1736 /* Receive output from check_fold_consts() */
1738 op0lt = op0ltu = args.op0lt;
1739 op1lt = op1ltu = args.op1lt;
1741 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1743 /* Otherwise, see if the operands are both integers. */
1744 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1745 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1746 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1748 int width = GET_MODE_BITSIZE (mode);
1749 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1750 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1752 /* Get the two words comprising each integer constant. */
1753 if (GET_CODE (op0) == CONST_DOUBLE)
1755 l0u = l0s = CONST_DOUBLE_LOW (op0);
1756 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1760 l0u = l0s = INTVAL (op0);
1761 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1764 if (GET_CODE (op1) == CONST_DOUBLE)
1766 l1u = l1s = CONST_DOUBLE_LOW (op1);
1767 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1771 l1u = l1s = INTVAL (op1);
1772 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1775 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1776 we have to sign or zero-extend the values. */
1777 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1779 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1780 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1782 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1783 l0s |= ((HOST_WIDE_INT) (-1) << width);
1785 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1786 l1s |= ((HOST_WIDE_INT) (-1) << width);
1788 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1789 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1791 equal = (h0u == h1u && l0u == l1u);
1792 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1793 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1794 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1795 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1798 /* Otherwise, there are some code-specific tests we can make. */
1804 /* References to the frame plus a constant or labels cannot
1805 be zero, but a SYMBOL_REF can due to #pragma weak. */
1806 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1807 || GET_CODE (op0) == LABEL_REF)
1808 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1809 /* On some machines, the ap reg can be 0 sometimes. */
1810 && op0 != arg_pointer_rtx
1817 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1818 || GET_CODE (op0) == LABEL_REF)
1819 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1820 && op0 != arg_pointer_rtx
1823 return const_true_rtx;
1827 /* Unsigned values are never negative. */
1828 if (op1 == const0_rtx)
1829 return const_true_rtx;
1833 if (op1 == const0_rtx)
1838 /* Unsigned values are never greater than the largest
1840 if (GET_CODE (op1) == CONST_INT
1841 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1842 && INTEGRAL_MODE_P (mode))
1843 return const_true_rtx;
1847 if (GET_CODE (op1) == CONST_INT
1848 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1849 && INTEGRAL_MODE_P (mode))
1860 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1866 return equal ? const_true_rtx : const0_rtx;
1869 return ! equal ? const_true_rtx : const0_rtx;
1872 return op0lt ? const_true_rtx : const0_rtx;
1875 return op1lt ? const_true_rtx : const0_rtx;
1877 return op0ltu ? const_true_rtx : const0_rtx;
1879 return op1ltu ? const_true_rtx : const0_rtx;
1882 return equal || op0lt ? const_true_rtx : const0_rtx;
1885 return equal || op1lt ? const_true_rtx : const0_rtx;
1887 return equal || op0ltu ? const_true_rtx : const0_rtx;
1889 return equal || op1ltu ? const_true_rtx : const0_rtx;
1891 return const_true_rtx;
1899 /* Simplify CODE, an operation with result mode MODE and three operands,
1900 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1901 a constant. Return 0 if no simplifications is possible. */
1904 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
1906 enum machine_mode mode, op0_mode;
1909 unsigned int width = GET_MODE_BITSIZE (mode);
1911 /* VOIDmode means "infinite" precision. */
1913 width = HOST_BITS_PER_WIDE_INT;
1919 if (GET_CODE (op0) == CONST_INT
1920 && GET_CODE (op1) == CONST_INT
1921 && GET_CODE (op2) == CONST_INT
1922 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
1923 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
1925 /* Extracting a bit-field from a constant */
1926 HOST_WIDE_INT val = INTVAL (op0);
1928 if (BITS_BIG_ENDIAN)
1929 val >>= (GET_MODE_BITSIZE (op0_mode)
1930 - INTVAL (op2) - INTVAL (op1));
1932 val >>= INTVAL (op2);
1934 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
1936 /* First zero-extend. */
1937 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
1938 /* If desired, propagate sign bit. */
1939 if (code == SIGN_EXTRACT
1940 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
1941 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
1944 /* Clear the bits that don't belong in our mode,
1945 unless they and our sign bit are all one.
1946 So we get either a reasonable negative value or a reasonable
1947 unsigned value for this mode. */
1948 if (width < HOST_BITS_PER_WIDE_INT
1949 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
1950 != ((HOST_WIDE_INT) (-1) << (width - 1))))
1951 val &= ((HOST_WIDE_INT) 1 << width) - 1;
1953 return GEN_INT (val);
1958 if (GET_CODE (op0) == CONST_INT)
1959 return op0 != const0_rtx ? op1 : op2;
1961 /* Convert a == b ? b : a to "a". */
1962 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
1963 && (! FLOAT_MODE_P (mode) || flag_fast_math)
1964 && rtx_equal_p (XEXP (op0, 0), op1)
1965 && rtx_equal_p (XEXP (op0, 1), op2))
1967 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
1968 && (! FLOAT_MODE_P (mode) || flag_fast_math)
1969 && rtx_equal_p (XEXP (op0, 1), op1)
1970 && rtx_equal_p (XEXP (op0, 0), op2))
1972 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
1974 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
1975 ? GET_MODE (XEXP (op0, 1))
1976 : GET_MODE (XEXP (op0, 0)));
1978 if (cmp_mode == VOIDmode)
1979 cmp_mode = op0_mode;
1980 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
1981 XEXP (op0, 0), XEXP (op0, 1));
1983 /* See if any simplifications were possible. */
1984 if (temp == const0_rtx)
1986 else if (temp == const1_rtx)
1991 /* Look for happy constants in op1 and op2. */
1992 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
1994 HOST_WIDE_INT t = INTVAL (op1);
1995 HOST_WIDE_INT f = INTVAL (op2);
1997 if (t == STORE_FLAG_VALUE && f == 0)
1998 code = GET_CODE (op0);
1999 else if (t == 0 && f == STORE_FLAG_VALUE)
2002 tmp = reversed_comparison_code (op0, NULL_RTX);
2010 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2022 /* Simplify X, an rtx expression.
2024 Return the simplified expression or NULL if no simplifications
2027 This is the preferred entry point into the simplification routines;
2028 however, we still allow passes to call the more specific routines.
2030 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2031 code that need to be unified.
2033 1. fold_rtx in cse.c. This code uses various CSE specific
2034 information to aid in RTL simplification.
2036 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2037 it uses combine specific information to aid in RTL
2040 3. The routines in this file.
2043 Long term we want to only have one body of simplification code; to
2044 get to that state I recommend the following steps:
2046 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2047 which are not pass dependent state into these routines.
2049 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2050 use this routine whenever possible.
2052 3. Allow for pass dependent state to be provided to these
2053 routines and add simplifications based on the pass dependent
2054 state. Remove code from cse.c & combine.c that becomes
2057 It will take time, but ultimately the compiler will be easier to
2058 maintain and improve. It's totally silly that when we add a
2059 simplification that it needs to be added to 4 places (3 for RTL
2060 simplification and 1 for tree simplification. */
2067 enum machine_mode mode;
2069 mode = GET_MODE (x);
2070 code = GET_CODE (x);
2072 switch (GET_RTX_CLASS (code))
2075 return simplify_unary_operation (code, mode,
2076 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2079 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2083 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2084 XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
2087 return simplify_relational_operation (code,
2088 (GET_MODE (XEXP (x, 0)) != VOIDmode
2089 ? GET_MODE (XEXP (x, 0))
2090 : GET_MODE (XEXP (x, 1))),
2091 XEXP (x, 0), XEXP (x, 1));