1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
24 /* stdio.h must precede rtl.h for FFS. */
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
93 static rtx simplify_plus_minus PARAMS ((enum rtx_code, enum machine_mode,
95 static void check_fold_consts PARAMS ((PTR));
97 /* Make a binary operation by properly ordering the operands and
98 seeing if the expression folds. */
101 simplify_gen_binary (code, mode, op0, op1)
103 enum machine_mode mode;
108 /* Put complex operands first and constants second if commutative. */
109 if (GET_RTX_CLASS (code) == 'c'
110 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
111 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
112 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
113 || (GET_CODE (op0) == SUBREG
114 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
115 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
116 tem = op0, op0 = op1, op1 = tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
124 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
125 just form the operation. */
127 if (code == PLUS && GET_CODE (op1) == CONST_INT
128 && GET_MODE (op0) != VOIDmode)
129 return plus_constant (op0, INTVAL (op1));
130 else if (code == MINUS && GET_CODE (op1) == CONST_INT
131 && GET_MODE (op0) != VOIDmode)
132 return plus_constant (op0, - INTVAL (op1));
134 return gen_rtx_fmt_ee (code, mode, op0, op1);
137 /* Try to simplify a unary operation CODE whose output mode is to be
138 MODE with input operand OP whose mode was originally OP_MODE.
139 Return zero if no simplification can be made. */
142 simplify_unary_operation (code, mode, op, op_mode)
144 enum machine_mode mode;
146 enum machine_mode op_mode;
148 register int width = GET_MODE_BITSIZE (mode);
150 /* The order of these tests is critical so that, for example, we don't
151 check the wrong mode (input vs. output) for a conversion operation,
152 such as FIX. At some point, this should be simplified. */
154 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
156 if (code == FLOAT && GET_MODE (op) == VOIDmode
157 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
159 HOST_WIDE_INT hv, lv;
162 if (GET_CODE (op) == CONST_INT)
163 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
165 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
167 #ifdef REAL_ARITHMETIC
168 REAL_VALUE_FROM_INT (d, lv, hv, mode);
173 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
174 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
175 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
181 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
182 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
183 d += (double) (unsigned HOST_WIDE_INT) lv;
185 #endif /* REAL_ARITHMETIC */
186 d = real_value_truncate (mode, d);
187 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
189 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
190 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
192 HOST_WIDE_INT hv, lv;
195 if (GET_CODE (op) == CONST_INT)
196 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
198 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
200 if (op_mode == VOIDmode)
202 /* We don't know how to interpret negative-looking numbers in
203 this case, so don't try to fold those. */
207 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
210 hv = 0, lv &= GET_MODE_MASK (op_mode);
212 #ifdef REAL_ARITHMETIC
213 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
216 d = (double) (unsigned HOST_WIDE_INT) hv;
217 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
218 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
219 d += (double) (unsigned HOST_WIDE_INT) lv;
220 #endif /* REAL_ARITHMETIC */
221 d = real_value_truncate (mode, d);
222 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
226 if (GET_CODE (op) == CONST_INT
227 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
229 register HOST_WIDE_INT arg0 = INTVAL (op);
230 register HOST_WIDE_INT val;
243 val = (arg0 >= 0 ? arg0 : - arg0);
247 /* Don't use ffs here. Instead, get low order bit and then its
248 number. If arg0 is zero, this will return 0, as desired. */
249 arg0 &= GET_MODE_MASK (mode);
250 val = exact_log2 (arg0 & (- arg0)) + 1;
258 if (op_mode == VOIDmode)
260 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
262 /* If we were really extending the mode,
263 we would have to distinguish between zero-extension
264 and sign-extension. */
265 if (width != GET_MODE_BITSIZE (op_mode))
269 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
270 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
276 if (op_mode == VOIDmode)
278 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
280 /* If we were really extending the mode,
281 we would have to distinguish between zero-extension
282 and sign-extension. */
283 if (width != GET_MODE_BITSIZE (op_mode))
287 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
290 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
292 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
293 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
306 val = trunc_int_for_mode (val, mode);
308 return GEN_INT (val);
311 /* We can do some operations on integer CONST_DOUBLEs. Also allow
312 for a DImode operation on a CONST_INT. */
313 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
314 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
316 HOST_WIDE_INT l1, h1, lv, hv;
318 if (GET_CODE (op) == CONST_DOUBLE)
319 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
321 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
331 neg_double (l1, h1, &lv, &hv);
336 neg_double (l1, h1, &lv, &hv);
344 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
346 lv = exact_log2 (l1 & (-l1)) + 1;
350 /* This is just a change-of-mode, so do nothing. */
355 if (op_mode == VOIDmode
356 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
360 lv = l1 & GET_MODE_MASK (op_mode);
364 if (op_mode == VOIDmode
365 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
369 lv = l1 & GET_MODE_MASK (op_mode);
370 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
371 && (lv & ((HOST_WIDE_INT) 1
372 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
373 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
375 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
386 return immed_double_const (lv, hv, mode);
389 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
390 else if (GET_CODE (op) == CONST_DOUBLE
391 && GET_MODE_CLASS (mode) == MODE_FLOAT)
397 if (setjmp (handler))
398 /* There used to be a warning here, but that is inadvisable.
399 People may want to cause traps, and the natural way
400 to do it should not get a warning. */
403 set_float_handler (handler);
405 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
410 d = REAL_VALUE_NEGATE (d);
414 if (REAL_VALUE_NEGATIVE (d))
415 d = REAL_VALUE_NEGATE (d);
419 d = real_value_truncate (mode, d);
423 /* All this does is change the mode. */
427 d = REAL_VALUE_RNDZINT (d);
431 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
441 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
442 set_float_handler (NULL_PTR);
446 else if (GET_CODE (op) == CONST_DOUBLE
447 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
448 && GET_MODE_CLASS (mode) == MODE_INT
449 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
455 if (setjmp (handler))
458 set_float_handler (handler);
460 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
465 val = REAL_VALUE_FIX (d);
469 val = REAL_VALUE_UNSIGNED_FIX (d);
476 set_float_handler (NULL_PTR);
478 val = trunc_int_for_mode (val, mode);
480 return GEN_INT (val);
483 /* This was formerly used only for non-IEEE float.
484 eggert@twinsun.com says it is safe for IEEE also. */
487 /* There are some simplifications we can do even if the operands
493 /* (not (not X)) == X, similarly for NEG. */
494 if (GET_CODE (op) == code)
499 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
500 becomes just the MINUS if its mode is MODE. This allows
501 folding switch statements on machines using casesi (such as
503 if (GET_CODE (op) == TRUNCATE
504 && GET_MODE (XEXP (op, 0)) == mode
505 && GET_CODE (XEXP (op, 0)) == MINUS
506 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
507 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
510 #ifdef POINTERS_EXTEND_UNSIGNED
511 if (! POINTERS_EXTEND_UNSIGNED
512 && mode == Pmode && GET_MODE (op) == ptr_mode
514 return convert_memory_address (Pmode, op);
518 #ifdef POINTERS_EXTEND_UNSIGNED
520 if (POINTERS_EXTEND_UNSIGNED
521 && mode == Pmode && GET_MODE (op) == ptr_mode
523 return convert_memory_address (Pmode, op);
535 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
536 and OP1. Return 0 if no simplification is possible.
538 Don't use this for relational operations such as EQ or LT.
539 Use simplify_relational_operation instead. */
542 simplify_binary_operation (code, mode, op0, op1)
544 enum machine_mode mode;
547 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
549 int width = GET_MODE_BITSIZE (mode);
552 /* Relational operations don't work here. We must know the mode
553 of the operands in order to do the comparison correctly.
554 Assuming a full word can give incorrect results.
555 Consider comparing 128 with -128 in QImode. */
557 if (GET_RTX_CLASS (code) == '<')
560 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
561 if (GET_MODE_CLASS (mode) == MODE_FLOAT
562 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
563 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
565 REAL_VALUE_TYPE f0, f1, value;
568 if (setjmp (handler))
571 set_float_handler (handler);
573 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
574 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
575 f0 = real_value_truncate (mode, f0);
576 f1 = real_value_truncate (mode, f1);
578 #ifdef REAL_ARITHMETIC
579 #ifndef REAL_INFINITY
580 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
583 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
597 #ifndef REAL_INFINITY
604 value = MIN (f0, f1);
607 value = MAX (f0, f1);
614 value = real_value_truncate (mode, value);
615 set_float_handler (NULL_PTR);
616 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
618 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
620 /* We can fold some multi-word operations. */
621 if (GET_MODE_CLASS (mode) == MODE_INT
622 && width == HOST_BITS_PER_WIDE_INT * 2
623 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
624 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
626 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
628 if (GET_CODE (op0) == CONST_DOUBLE)
629 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
631 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
633 if (GET_CODE (op1) == CONST_DOUBLE)
634 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
636 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
641 /* A - B == A + (-B). */
642 neg_double (l2, h2, &lv, &hv);
645 /* .. fall through ... */
648 add_double (l1, h1, l2, h2, &lv, &hv);
652 mul_double (l1, h1, l2, h2, &lv, &hv);
655 case DIV: case MOD: case UDIV: case UMOD:
656 /* We'd need to include tree.h to do this and it doesn't seem worth
661 lv = l1 & l2, hv = h1 & h2;
665 lv = l1 | l2, hv = h1 | h2;
669 lv = l1 ^ l2, hv = h1 ^ h2;
675 && ((unsigned HOST_WIDE_INT) l1
676 < (unsigned HOST_WIDE_INT) l2)))
685 && ((unsigned HOST_WIDE_INT) l1
686 > (unsigned HOST_WIDE_INT) l2)))
693 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
695 && ((unsigned HOST_WIDE_INT) l1
696 < (unsigned HOST_WIDE_INT) l2)))
703 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
705 && ((unsigned HOST_WIDE_INT) l1
706 > (unsigned HOST_WIDE_INT) l2)))
712 case LSHIFTRT: case ASHIFTRT:
714 case ROTATE: case ROTATERT:
715 #ifdef SHIFT_COUNT_TRUNCATED
716 if (SHIFT_COUNT_TRUNCATED)
717 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
720 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
723 if (code == LSHIFTRT || code == ASHIFTRT)
724 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
726 else if (code == ASHIFT)
727 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
728 else if (code == ROTATE)
729 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
730 else /* code == ROTATERT */
731 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
738 return immed_double_const (lv, hv, mode);
741 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
742 || width > HOST_BITS_PER_WIDE_INT || width == 0)
744 /* Even if we can't compute a constant result,
745 there are some cases worth simplifying. */
750 /* In IEEE floating point, x+0 is not the same as x. Similarly
751 for the other optimizations below. */
752 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
753 && FLOAT_MODE_P (mode) && ! flag_fast_math)
756 if (op1 == CONST0_RTX (mode))
759 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
760 if (GET_CODE (op0) == NEG)
761 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
762 else if (GET_CODE (op1) == NEG)
763 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
765 /* Handle both-operands-constant cases. We can only add
766 CONST_INTs to constants since the sum of relocatable symbols
767 can't be handled by most assemblers. Don't add CONST_INT
768 to CONST_INT since overflow won't be computed properly if wider
769 than HOST_BITS_PER_WIDE_INT. */
771 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
772 && GET_CODE (op1) == CONST_INT)
773 return plus_constant (op0, INTVAL (op1));
774 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
775 && GET_CODE (op0) == CONST_INT)
776 return plus_constant (op1, INTVAL (op0));
778 /* See if this is something like X * C - X or vice versa or
779 if the multiplication is written as a shift. If so, we can
780 distribute and make a new multiply, shift, or maybe just
781 have X (if C is 2 in the example above). But don't make
782 real multiply if we didn't have one before. */
784 if (! FLOAT_MODE_P (mode))
786 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
787 rtx lhs = op0, rhs = op1;
790 if (GET_CODE (lhs) == NEG)
791 coeff0 = -1, lhs = XEXP (lhs, 0);
792 else if (GET_CODE (lhs) == MULT
793 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
795 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
798 else if (GET_CODE (lhs) == ASHIFT
799 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
800 && INTVAL (XEXP (lhs, 1)) >= 0
801 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
803 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
807 if (GET_CODE (rhs) == NEG)
808 coeff1 = -1, rhs = XEXP (rhs, 0);
809 else if (GET_CODE (rhs) == MULT
810 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
812 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
815 else if (GET_CODE (rhs) == ASHIFT
816 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
817 && INTVAL (XEXP (rhs, 1)) >= 0
818 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
820 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
824 if (rtx_equal_p (lhs, rhs))
826 tem = simplify_gen_binary (MULT, mode, lhs,
827 GEN_INT (coeff0 + coeff1));
828 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
832 /* If one of the operands is a PLUS or a MINUS, see if we can
833 simplify this by the associative law.
834 Don't use the associative law for floating point.
835 The inaccuracy makes it nonassociative,
836 and subtle programs can break if operations are associated. */
838 if (INTEGRAL_MODE_P (mode)
839 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
840 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
841 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
847 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
848 using cc0, in which case we want to leave it as a COMPARE
849 so we can distinguish it from a register-register-copy.
851 In IEEE floating point, x-0 is not the same as x. */
853 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
854 || ! FLOAT_MODE_P (mode) || flag_fast_math)
855 && op1 == CONST0_RTX (mode))
858 /* Do nothing here. */
863 /* None of these optimizations can be done for IEEE
865 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
866 && FLOAT_MODE_P (mode) && ! flag_fast_math)
869 /* We can't assume x-x is 0 even with non-IEEE floating point,
870 but since it is zero except in very strange circumstances, we
871 will treat it as zero with -ffast-math. */
872 if (rtx_equal_p (op0, op1)
873 && ! side_effects_p (op0)
874 && (! FLOAT_MODE_P (mode) || flag_fast_math))
875 return CONST0_RTX (mode);
877 /* Change subtraction from zero into negation. */
878 if (op0 == CONST0_RTX (mode))
879 return gen_rtx_NEG (mode, op1);
881 /* (-1 - a) is ~a. */
882 if (op0 == constm1_rtx)
883 return gen_rtx_NOT (mode, op1);
885 /* Subtracting 0 has no effect. */
886 if (op1 == CONST0_RTX (mode))
889 /* See if this is something like X * C - X or vice versa or
890 if the multiplication is written as a shift. If so, we can
891 distribute and make a new multiply, shift, or maybe just
892 have X (if C is 2 in the example above). But don't make
893 real multiply if we didn't have one before. */
895 if (! FLOAT_MODE_P (mode))
897 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
898 rtx lhs = op0, rhs = op1;
901 if (GET_CODE (lhs) == NEG)
902 coeff0 = -1, lhs = XEXP (lhs, 0);
903 else if (GET_CODE (lhs) == MULT
904 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
906 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
909 else if (GET_CODE (lhs) == ASHIFT
910 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
911 && INTVAL (XEXP (lhs, 1)) >= 0
912 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
914 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
918 if (GET_CODE (rhs) == NEG)
919 coeff1 = - 1, rhs = XEXP (rhs, 0);
920 else if (GET_CODE (rhs) == MULT
921 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
923 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
926 else if (GET_CODE (rhs) == ASHIFT
927 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
928 && INTVAL (XEXP (rhs, 1)) >= 0
929 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
931 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
935 if (rtx_equal_p (lhs, rhs))
937 tem = simplify_gen_binary (MULT, mode, lhs,
938 GEN_INT (coeff0 - coeff1));
939 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
943 /* (a - (-b)) -> (a + b). */
944 if (GET_CODE (op1) == NEG)
945 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
947 /* If one of the operands is a PLUS or a MINUS, see if we can
948 simplify this by the associative law.
949 Don't use the associative law for floating point.
950 The inaccuracy makes it nonassociative,
951 and subtle programs can break if operations are associated. */
953 if (INTEGRAL_MODE_P (mode)
954 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
955 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
956 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
959 /* Don't let a relocatable value get a negative coeff. */
960 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
961 return plus_constant (op0, - INTVAL (op1));
963 /* (x - (x & y)) -> (x & ~y) */
964 if (GET_CODE (op1) == AND)
966 if (rtx_equal_p (op0, XEXP (op1, 0)))
967 return simplify_gen_binary (AND, mode, op0,
968 gen_rtx_NOT (mode, XEXP (op1, 1)));
969 if (rtx_equal_p (op0, XEXP (op1, 1)))
970 return simplify_gen_binary (AND, mode, op0,
971 gen_rtx_NOT (mode, XEXP (op1, 0)));
976 if (op1 == constm1_rtx)
978 tem = simplify_unary_operation (NEG, mode, op0, mode);
980 return tem ? tem : gen_rtx_NEG (mode, op0);
983 /* In IEEE floating point, x*0 is not always 0. */
984 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
985 || ! FLOAT_MODE_P (mode) || flag_fast_math)
986 && op1 == CONST0_RTX (mode)
987 && ! side_effects_p (op0))
990 /* In IEEE floating point, x*1 is not equivalent to x for nans.
991 However, ANSI says we can drop signals,
992 so we can do this anyway. */
993 if (op1 == CONST1_RTX (mode))
996 /* Convert multiply by constant power of two into shift unless
997 we are still generating RTL. This test is a kludge. */
998 if (GET_CODE (op1) == CONST_INT
999 && (val = exact_log2 (INTVAL (op1))) >= 0
1000 /* If the mode is larger than the host word size, and the
1001 uppermost bit is set, then this isn't a power of two due
1002 to implicit sign extension. */
1003 && (width <= HOST_BITS_PER_WIDE_INT
1004 || val != HOST_BITS_PER_WIDE_INT - 1)
1005 && ! rtx_equal_function_value_matters)
1006 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1008 if (GET_CODE (op1) == CONST_DOUBLE
1009 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1013 int op1is2, op1ism1;
1015 if (setjmp (handler))
1018 set_float_handler (handler);
1019 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1020 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1021 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1022 set_float_handler (NULL_PTR);
1024 /* x*2 is x+x and x*(-1) is -x */
1025 if (op1is2 && GET_MODE (op0) == mode)
1026 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1028 else if (op1ism1 && GET_MODE (op0) == mode)
1029 return gen_rtx_NEG (mode, op0);
1034 if (op1 == const0_rtx)
1036 if (GET_CODE (op1) == CONST_INT
1037 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1039 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1041 /* A | (~A) -> -1 */
1042 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1043 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1044 && ! side_effects_p (op0)
1045 && GET_MODE_CLASS (mode) != MODE_CC)
1050 if (op1 == const0_rtx)
1052 if (GET_CODE (op1) == CONST_INT
1053 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1054 return gen_rtx_NOT (mode, op0);
1055 if (op0 == op1 && ! side_effects_p (op0)
1056 && GET_MODE_CLASS (mode) != MODE_CC)
1061 if (op1 == const0_rtx && ! side_effects_p (op0))
1063 if (GET_CODE (op1) == CONST_INT
1064 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1066 if (op0 == op1 && ! side_effects_p (op0)
1067 && GET_MODE_CLASS (mode) != MODE_CC)
1070 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1071 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1072 && ! side_effects_p (op0)
1073 && GET_MODE_CLASS (mode) != MODE_CC)
1078 /* Convert divide by power of two into shift (divide by 1 handled
1080 if (GET_CODE (op1) == CONST_INT
1081 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1082 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1084 /* ... fall through ... */
1087 if (op1 == CONST1_RTX (mode))
1090 /* In IEEE floating point, 0/x is not always 0. */
1091 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1092 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1093 && op0 == CONST0_RTX (mode)
1094 && ! side_effects_p (op1))
1097 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1098 /* Change division by a constant into multiplication. Only do
1099 this with -ffast-math until an expert says it is safe in
1101 else if (GET_CODE (op1) == CONST_DOUBLE
1102 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1103 && op1 != CONST0_RTX (mode)
1107 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1109 if (! REAL_VALUES_EQUAL (d, dconst0))
1111 #if defined (REAL_ARITHMETIC)
1112 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1113 return gen_rtx_MULT (mode, op0,
1114 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1117 gen_rtx_MULT (mode, op0,
1118 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1126 /* Handle modulus by power of two (mod with 1 handled below). */
1127 if (GET_CODE (op1) == CONST_INT
1128 && exact_log2 (INTVAL (op1)) > 0)
1129 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1131 /* ... fall through ... */
1134 if ((op0 == const0_rtx || op1 == const1_rtx)
1135 && ! side_effects_p (op0) && ! side_effects_p (op1))
1141 /* Rotating ~0 always results in ~0. */
1142 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1143 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1144 && ! side_effects_p (op1))
1147 /* ... fall through ... */
1152 if (op1 == const0_rtx)
1154 if (op0 == const0_rtx && ! side_effects_p (op1))
1159 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1160 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1161 && ! side_effects_p (op0))
1163 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1168 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1169 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1170 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1171 && ! side_effects_p (op0))
1173 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1178 if (op1 == const0_rtx && ! side_effects_p (op0))
1180 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1185 if (op1 == constm1_rtx && ! side_effects_p (op0))
1187 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1198 /* Get the integer argument values in two forms:
1199 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1201 arg0 = INTVAL (op0);
1202 arg1 = INTVAL (op1);
1204 if (width < HOST_BITS_PER_WIDE_INT)
1206 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1207 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1210 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1211 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1214 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1215 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1223 /* Compute the value of the arithmetic. */
1228 val = arg0s + arg1s;
1232 val = arg0s - arg1s;
1236 val = arg0s * arg1s;
1242 val = arg0s / arg1s;
1248 val = arg0s % arg1s;
1254 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1260 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1276 /* If shift count is undefined, don't fold it; let the machine do
1277 what it wants. But truncate it if the machine will do that. */
1281 #ifdef SHIFT_COUNT_TRUNCATED
1282 if (SHIFT_COUNT_TRUNCATED)
1286 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1293 #ifdef SHIFT_COUNT_TRUNCATED
1294 if (SHIFT_COUNT_TRUNCATED)
1298 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1305 #ifdef SHIFT_COUNT_TRUNCATED
1306 if (SHIFT_COUNT_TRUNCATED)
1310 val = arg0s >> arg1;
1312 /* Bootstrap compiler may not have sign extended the right shift.
1313 Manually extend the sign to insure bootstrap cc matches gcc. */
1314 if (arg0s < 0 && arg1 > 0)
1315 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1324 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1325 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1333 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1334 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1338 /* Do nothing here. */
1342 val = arg0s <= arg1s ? arg0s : arg1s;
1346 val = ((unsigned HOST_WIDE_INT) arg0
1347 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1351 val = arg0s > arg1s ? arg0s : arg1s;
1355 val = ((unsigned HOST_WIDE_INT) arg0
1356 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1363 val = trunc_int_for_mode (val, mode);
1365 return GEN_INT (val);
1368 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1371 Rather than test for specific case, we do this by a brute-force method
1372 and do all possible simplifications until no more changes occur. Then
1373 we rebuild the operation. */
1376 simplify_plus_minus (code, mode, op0, op1)
1378 enum machine_mode mode;
1384 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1385 int first = 1, negate = 0, changed;
1388 bzero ((char *) ops, sizeof ops);
1390 /* Set up the two operands and then expand them until nothing has been
1391 changed. If we run out of room in our array, give up; this should
1392 almost never happen. */
1394 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1401 for (i = 0; i < n_ops; i++)
1402 switch (GET_CODE (ops[i]))
1409 ops[n_ops] = XEXP (ops[i], 1);
1410 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1411 ops[i] = XEXP (ops[i], 0);
1417 ops[i] = XEXP (ops[i], 0);
1418 negs[i] = ! negs[i];
1423 ops[i] = XEXP (ops[i], 0);
1429 /* ~a -> (-a - 1) */
1432 ops[n_ops] = constm1_rtx;
1433 negs[n_ops++] = negs[i];
1434 ops[i] = XEXP (ops[i], 0);
1435 negs[i] = ! negs[i];
1442 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1450 /* If we only have two operands, we can't do anything. */
1454 /* Now simplify each pair of operands until nothing changes. The first
1455 time through just simplify constants against each other. */
1462 for (i = 0; i < n_ops - 1; i++)
1463 for (j = i + 1; j < n_ops; j++)
1464 if (ops[i] != 0 && ops[j] != 0
1465 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1467 rtx lhs = ops[i], rhs = ops[j];
1468 enum rtx_code ncode = PLUS;
1470 if (negs[i] && ! negs[j])
1471 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1472 else if (! negs[i] && negs[j])
1475 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1478 ops[i] = tem, ops[j] = 0;
1479 negs[i] = negs[i] && negs[j];
1480 if (GET_CODE (tem) == NEG)
1481 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1483 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1484 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1492 /* Pack all the operands to the lower-numbered entries and give up if
1493 we didn't reduce the number of operands we had. Make sure we
1494 count a CONST as two operands. If we have the same number of
1495 operands, but have made more CONSTs than we had, this is also
1496 an improvement, so accept it. */
1498 for (i = 0, j = 0; j < n_ops; j++)
1501 ops[i] = ops[j], negs[i++] = negs[j];
1502 if (GET_CODE (ops[j]) == CONST)
1506 if (i + n_consts > input_ops
1507 || (i + n_consts == input_ops && n_consts <= input_consts))
1512 /* If we have a CONST_INT, put it last. */
1513 for (i = 0; i < n_ops - 1; i++)
1514 if (GET_CODE (ops[i]) == CONST_INT)
1516 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1517 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1520 /* Put a non-negated operand first. If there aren't any, make all
1521 operands positive and negate the whole thing later. */
1522 for (i = 0; i < n_ops && negs[i]; i++)
1527 for (i = 0; i < n_ops; i++)
1533 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1534 j = negs[0], negs[0] = negs[i], negs[i] = j;
1537 /* Now make the result by performing the requested operations. */
1539 for (i = 1; i < n_ops; i++)
1540 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1542 return negate ? gen_rtx_NEG (mode, result) : result;
1547 rtx op0, op1; /* Input */
1548 int equal, op0lt, op1lt; /* Output */
1552 check_fold_consts (data)
1555 struct cfc_args *args = (struct cfc_args *) data;
1556 REAL_VALUE_TYPE d0, d1;
1558 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1559 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1560 args->equal = REAL_VALUES_EQUAL (d0, d1);
1561 args->op0lt = REAL_VALUES_LESS (d0, d1);
1562 args->op1lt = REAL_VALUES_LESS (d1, d0);
1565 /* Like simplify_binary_operation except used for relational operators.
1566 MODE is the mode of the operands, not that of the result. If MODE
1567 is VOIDmode, both operands must also be VOIDmode and we compare the
1568 operands in "infinite precision".
1570 If no simplification is possible, this function returns zero. Otherwise,
1571 it returns either const_true_rtx or const0_rtx. */
1574 simplify_relational_operation (code, mode, op0, op1)
1576 enum machine_mode mode;
1579 int equal, op0lt, op0ltu, op1lt, op1ltu;
1582 /* If op0 is a compare, extract the comparison arguments from it. */
1583 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1584 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1586 /* We can't simplify MODE_CC values since we don't know what the
1587 actual comparison is. */
1588 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1595 /* Make sure the constant is second. */
1596 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1597 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1599 tem = op0, op0 = op1, op1 = tem;
1600 code = swap_condition (code);
1603 /* For integer comparisons of A and B maybe we can simplify A - B and can
1604 then simplify a comparison of that with zero. If A and B are both either
1605 a register or a CONST_INT, this can't help; testing for these cases will
1606 prevent infinite recursion here and speed things up.
1608 If CODE is an unsigned comparison, then we can never do this optimization,
1609 because it gives an incorrect result if the subtraction wraps around zero.
1610 ANSI C defines unsigned operations such that they never overflow, and
1611 thus such cases can not be ignored. */
1613 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1614 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1615 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1616 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1617 && code != GTU && code != GEU && code != LTU && code != LEU)
1618 return simplify_relational_operation (signed_condition (code),
1619 mode, tem, const0_rtx);
1621 /* For non-IEEE floating-point, if the two operands are equal, we know the
1623 if (rtx_equal_p (op0, op1)
1624 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1625 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
1626 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1628 /* If the operands are floating-point constants, see if we can fold
1630 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1631 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1632 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1634 struct cfc_args args;
1636 /* Setup input for check_fold_consts() */
1640 if (do_float_handler(check_fold_consts, (PTR) &args) == 0)
1641 /* We got an exception from check_fold_consts() */
1644 /* Receive output from check_fold_consts() */
1646 op0lt = op0ltu = args.op0lt;
1647 op1lt = op1ltu = args.op1lt;
1649 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1651 /* Otherwise, see if the operands are both integers. */
1652 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1653 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1654 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1656 int width = GET_MODE_BITSIZE (mode);
1657 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1658 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1660 /* Get the two words comprising each integer constant. */
1661 if (GET_CODE (op0) == CONST_DOUBLE)
1663 l0u = l0s = CONST_DOUBLE_LOW (op0);
1664 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1668 l0u = l0s = INTVAL (op0);
1669 h0u = h0s = l0s < 0 ? -1 : 0;
1672 if (GET_CODE (op1) == CONST_DOUBLE)
1674 l1u = l1s = CONST_DOUBLE_LOW (op1);
1675 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1679 l1u = l1s = INTVAL (op1);
1680 h1u = h1s = l1s < 0 ? -1 : 0;
1683 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1684 we have to sign or zero-extend the values. */
1685 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1686 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
1688 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1690 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1691 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1693 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1694 l0s |= ((HOST_WIDE_INT) (-1) << width);
1696 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1697 l1s |= ((HOST_WIDE_INT) (-1) << width);
1700 equal = (h0u == h1u && l0u == l1u);
1701 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
1702 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
1703 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1704 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1707 /* Otherwise, there are some code-specific tests we can make. */
1713 /* References to the frame plus a constant or labels cannot
1714 be zero, but a SYMBOL_REF can due to #pragma weak. */
1715 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1716 || GET_CODE (op0) == LABEL_REF)
1717 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1718 /* On some machines, the ap reg can be 0 sometimes. */
1719 && op0 != arg_pointer_rtx
1726 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1727 || GET_CODE (op0) == LABEL_REF)
1728 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1729 && op0 != arg_pointer_rtx
1732 return const_true_rtx;
1736 /* Unsigned values are never negative. */
1737 if (op1 == const0_rtx)
1738 return const_true_rtx;
1742 if (op1 == const0_rtx)
1747 /* Unsigned values are never greater than the largest
1749 if (GET_CODE (op1) == CONST_INT
1750 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1751 && INTEGRAL_MODE_P (mode))
1752 return const_true_rtx;
1756 if (GET_CODE (op1) == CONST_INT
1757 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1758 && INTEGRAL_MODE_P (mode))
1769 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1774 return equal ? const_true_rtx : const0_rtx;
1776 return ! equal ? const_true_rtx : const0_rtx;
1778 return op0lt ? const_true_rtx : const0_rtx;
1780 return op1lt ? const_true_rtx : const0_rtx;
1782 return op0ltu ? const_true_rtx : const0_rtx;
1784 return op1ltu ? const_true_rtx : const0_rtx;
1786 return equal || op0lt ? const_true_rtx : const0_rtx;
1788 return equal || op1lt ? const_true_rtx : const0_rtx;
1790 return equal || op0ltu ? const_true_rtx : const0_rtx;
1792 return equal || op1ltu ? const_true_rtx : const0_rtx;
1798 /* Simplify CODE, an operation with result mode MODE and three operands,
1799 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1800 a constant. Return 0 if no simplifications is possible. */
1803 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
1805 enum machine_mode mode, op0_mode;
1808 int width = GET_MODE_BITSIZE (mode);
1810 /* VOIDmode means "infinite" precision. */
1812 width = HOST_BITS_PER_WIDE_INT;
1818 if (GET_CODE (op0) == CONST_INT
1819 && GET_CODE (op1) == CONST_INT
1820 && GET_CODE (op2) == CONST_INT
1821 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
1822 && width <= HOST_BITS_PER_WIDE_INT)
1824 /* Extracting a bit-field from a constant */
1825 HOST_WIDE_INT val = INTVAL (op0);
1827 if (BITS_BIG_ENDIAN)
1828 val >>= (GET_MODE_BITSIZE (op0_mode)
1829 - INTVAL (op2) - INTVAL (op1));
1831 val >>= INTVAL (op2);
1833 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
1835 /* First zero-extend. */
1836 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
1837 /* If desired, propagate sign bit. */
1838 if (code == SIGN_EXTRACT
1839 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
1840 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
1843 /* Clear the bits that don't belong in our mode,
1844 unless they and our sign bit are all one.
1845 So we get either a reasonable negative value or a reasonable
1846 unsigned value for this mode. */
1847 if (width < HOST_BITS_PER_WIDE_INT
1848 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
1849 != ((HOST_WIDE_INT) (-1) << (width - 1))))
1850 val &= ((HOST_WIDE_INT) 1 << width) - 1;
1852 return GEN_INT (val);
1857 if (GET_CODE (op0) == CONST_INT)
1858 return op0 != const0_rtx ? op1 : op2;
1860 /* Convert a == b ? b : a to "a". */
1861 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
1862 && rtx_equal_p (XEXP (op0, 0), op1)
1863 && rtx_equal_p (XEXP (op0, 1), op2))
1865 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
1866 && rtx_equal_p (XEXP (op0, 1), op1)
1867 && rtx_equal_p (XEXP (op0, 0), op2))
1869 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
1872 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
1873 XEXP (op0, 0), XEXP (op0, 1));
1874 /* See if any simplifications were possible. */
1875 if (temp == const0_rtx)
1877 else if (temp == const1_rtx)
1889 /* Simplify X, an rtx expression.
1891 Return the simplified expression or NULL if no simplifications
1894 This is the preferred entry point into the simplification routines;
1895 however, we still allow passes to call the more specific routines.
1897 Right now GCC has three (yes, three) major bodies of RTL simplficiation
1898 code that need to be unified.
1900 1. fold_rtx in cse.c. This code uses various CSE specific
1901 information to aid in RTL simplification.
1903 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
1904 it uses combine specific information to aid in RTL
1907 3. The routines in this file.
1910 Long term we want to only have one body of simplification code; to
1911 get to that state I recommend the following steps:
1913 1. Pour over fold_rtx & simplify_rtx and move any simplifications
1914 which are not pass dependent state into these routines.
1916 2. As code is moved by #1, change fold_rtx & simplify_rtx to
1917 use this routine whenever possible.
1919 3. Allow for pass dependent state to be provided to these
1920 routines and add simplifications based on the pass dependent
1921 state. Remove code from cse.c & combine.c that becomes
1924 It will take time, but ultimately the compiler will be easier to
1925 maintain and improve. It's totally silly that when we add a
1926 simplification that it needs to be added to 4 places (3 for RTL
1927 simplification and 1 for tree simplification. */
1934 enum machine_mode mode;
1936 mode = GET_MODE (x);
1937 code = GET_CODE (x);
1939 switch (GET_RTX_CLASS (code))
1942 return simplify_unary_operation (code, mode,
1943 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
1946 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
1950 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
1951 XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
1954 return simplify_relational_operation (code, GET_MODE (XEXP (x, 0)),
1955 XEXP (x, 0), XEXP (x, 1));