1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
29 #include "hard-reg-set.h"
32 #include "insn-config.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx, rtx));
103 static void check_fold_consts PARAMS ((PTR));
104 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
105 static void simplify_unary_real PARAMS ((PTR));
106 static void simplify_binary_real PARAMS ((PTR));
108 static void simplify_binary_is2orm1 PARAMS ((PTR));
111 /* Negate a CONST_INT rtx, truncating (because a conversion from a
112 maximally negative number can overflow). */
114 neg_const_int (mode, i)
115 enum machine_mode mode;
118 return GEN_INT (trunc_int_for_mode (- INTVAL (i), mode));
122 /* Make a binary operation by properly ordering the operands and
123 seeing if the expression folds. */
126 simplify_gen_binary (code, mode, op0, op1)
128 enum machine_mode mode;
133 /* Put complex operands first and constants second if commutative. */
134 if (GET_RTX_CLASS (code) == 'c'
135 && swap_commutative_operands_p (op0, op1))
136 tem = op0, op0 = op1, op1 = tem;
138 /* If this simplifies, do it. */
139 tem = simplify_binary_operation (code, mode, op0, op1);
144 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
145 just form the operation. */
147 if (GET_CODE (op1) == CONST_INT
148 && GET_MODE (op0) != VOIDmode
149 && (code == PLUS || code == MINUS))
152 op1 = neg_const_int (mode, op1);
153 return plus_constant (op0, INTVAL (op1));
156 return gen_rtx_fmt_ee (code, mode, op0, op1);
159 /* If X is a MEM referencing the constant pool, return the real value.
160 Otherwise return X. */
162 avoid_constant_pool_reference (x)
166 enum machine_mode cmode;
168 if (GET_CODE (x) != MEM)
172 if (GET_CODE (addr) != SYMBOL_REF
173 || ! CONSTANT_POOL_ADDRESS_P (addr))
176 c = get_pool_constant (addr);
177 cmode = get_pool_mode (addr);
179 /* If we're accessing the constant in a different mode than it was
180 originally stored, attempt to fix that up via subreg simplifications.
181 If that fails we have no choice but to return the original memory. */
182 if (cmode != GET_MODE (x))
184 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
191 /* Make a unary operation by first seeing if it folds and otherwise making
192 the specified operation. */
195 simplify_gen_unary (code, mode, op, op_mode)
197 enum machine_mode mode;
199 enum machine_mode op_mode;
203 /* If this simplifies, use it. */
204 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
207 return gen_rtx_fmt_e (code, mode, op);
210 /* Likewise for ternary operations. */
213 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
215 enum machine_mode mode, op0_mode;
220 /* If this simplifies, use it. */
221 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
225 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
228 /* Likewise, for relational operations.
229 CMP_MODE specifies mode comparison is done in.
233 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
235 enum machine_mode mode;
236 enum machine_mode cmp_mode;
241 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
244 /* Put complex operands first and constants second. */
245 if (swap_commutative_operands_p (op0, op1))
246 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
248 return gen_rtx_fmt_ee (code, mode, op0, op1);
251 /* Replace all occurrences of OLD in X with NEW and try to simplify the
252 resulting RTX. Return a new RTX which is as simplified as possible. */
255 simplify_replace_rtx (x, old, new)
260 enum rtx_code code = GET_CODE (x);
261 enum machine_mode mode = GET_MODE (x);
263 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
264 to build a new expression substituting recursively. If we can't do
265 anything, return our input. */
270 switch (GET_RTX_CLASS (code))
274 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
275 rtx op = (XEXP (x, 0) == old
276 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
278 return simplify_gen_unary (code, mode, op, op_mode);
284 simplify_gen_binary (code, mode,
285 simplify_replace_rtx (XEXP (x, 0), old, new),
286 simplify_replace_rtx (XEXP (x, 1), old, new));
289 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
290 ? GET_MODE (XEXP (x, 0))
291 : GET_MODE (XEXP (x, 1)));
292 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
293 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
296 simplify_gen_relational (code, mode,
299 : GET_MODE (op0) != VOIDmode
308 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
309 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
312 simplify_gen_ternary (code, mode,
317 simplify_replace_rtx (XEXP (x, 1), old, new),
318 simplify_replace_rtx (XEXP (x, 2), old, new));
322 /* The only case we try to handle is a SUBREG. */
326 exp = simplify_gen_subreg (GET_MODE (x),
327 simplify_replace_rtx (SUBREG_REG (x),
329 GET_MODE (SUBREG_REG (x)),
337 if (GET_CODE (x) == MEM)
339 replace_equiv_address_nv (x,
340 simplify_replace_rtx (XEXP (x, 0),
348 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
349 /* Subroutine of simplify_unary_operation, called via do_float_handler.
350 Handles simplification of unary ops on floating point values. */
351 struct simplify_unary_real_args
355 enum machine_mode mode;
359 #define REAL_VALUE_ABS(d_) \
360 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
363 simplify_unary_real (p)
368 struct simplify_unary_real_args *args =
369 (struct simplify_unary_real_args *) p;
371 REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand);
373 if (args->want_integer)
379 case FIX: i = REAL_VALUE_FIX (d); break;
380 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
384 args->result = GEN_INT (trunc_int_for_mode (i, args->mode));
391 /* We don't attempt to optimize this. */
395 case ABS: d = REAL_VALUE_ABS (d); break;
396 case NEG: d = REAL_VALUE_NEGATE (d); break;
397 case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break;
398 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
399 case FIX: d = REAL_VALUE_RNDZINT (d); break;
400 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
404 args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode);
409 /* Try to simplify a unary operation CODE whose output mode is to be
410 MODE with input operand OP whose mode was originally OP_MODE.
411 Return zero if no simplification can be made. */
413 simplify_unary_operation (code, mode, op, op_mode)
415 enum machine_mode mode;
417 enum machine_mode op_mode;
419 unsigned int width = GET_MODE_BITSIZE (mode);
420 rtx trueop = avoid_constant_pool_reference (op);
422 /* The order of these tests is critical so that, for example, we don't
423 check the wrong mode (input vs. output) for a conversion operation,
424 such as FIX. At some point, this should be simplified. */
426 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
428 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
429 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
431 HOST_WIDE_INT hv, lv;
434 if (GET_CODE (trueop) == CONST_INT)
435 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
437 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
439 #ifdef REAL_ARITHMETIC
440 REAL_VALUE_FROM_INT (d, lv, hv, mode);
445 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
446 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
447 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
453 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
454 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
455 d += (double) (unsigned HOST_WIDE_INT) lv;
457 #endif /* REAL_ARITHMETIC */
458 d = real_value_truncate (mode, d);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
461 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
462 && (GET_CODE (trueop) == CONST_DOUBLE
463 || GET_CODE (trueop) == CONST_INT))
465 HOST_WIDE_INT hv, lv;
468 if (GET_CODE (trueop) == CONST_INT)
469 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
471 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
473 if (op_mode == VOIDmode)
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
480 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
483 hv = 0, lv &= GET_MODE_MASK (op_mode);
485 #ifdef REAL_ARITHMETIC
486 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
489 d = (double) (unsigned HOST_WIDE_INT) hv;
490 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
491 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
492 d += (double) (unsigned HOST_WIDE_INT) lv;
493 #endif /* REAL_ARITHMETIC */
494 d = real_value_truncate (mode, d);
495 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
499 if (GET_CODE (trueop) == CONST_INT
500 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
502 HOST_WIDE_INT arg0 = INTVAL (trueop);
516 val = (arg0 >= 0 ? arg0 : - arg0);
520 /* Don't use ffs here. Instead, get low order bit and then its
521 number. If arg0 is zero, this will return 0, as desired. */
522 arg0 &= GET_MODE_MASK (mode);
523 val = exact_log2 (arg0 & (- arg0)) + 1;
531 if (op_mode == VOIDmode)
533 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
535 /* If we were really extending the mode,
536 we would have to distinguish between zero-extension
537 and sign-extension. */
538 if (width != GET_MODE_BITSIZE (op_mode))
542 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
543 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
549 if (op_mode == VOIDmode)
551 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
553 /* If we were really extending the mode,
554 we would have to distinguish between zero-extension
555 and sign-extension. */
556 if (width != GET_MODE_BITSIZE (op_mode))
560 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
563 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
565 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
566 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
581 val = trunc_int_for_mode (val, mode);
583 return GEN_INT (val);
586 /* We can do some operations on integer CONST_DOUBLEs. Also allow
587 for a DImode operation on a CONST_INT. */
588 else if (GET_MODE (trueop) == VOIDmode && width <= HOST_BITS_PER_INT * 2
589 && (GET_CODE (trueop) == CONST_DOUBLE
590 || GET_CODE (trueop) == CONST_INT))
592 unsigned HOST_WIDE_INT l1, lv;
593 HOST_WIDE_INT h1, hv;
595 if (GET_CODE (trueop) == CONST_DOUBLE)
596 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
598 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
608 neg_double (l1, h1, &lv, &hv);
613 neg_double (l1, h1, &lv, &hv);
621 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
623 lv = exact_log2 (l1 & (-l1)) + 1;
627 /* This is just a change-of-mode, so do nothing. */
632 if (op_mode == VOIDmode
633 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
637 lv = l1 & GET_MODE_MASK (op_mode);
641 if (op_mode == VOIDmode
642 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
646 lv = l1 & GET_MODE_MASK (op_mode);
647 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
648 && (lv & ((HOST_WIDE_INT) 1
649 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
650 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
652 hv = HWI_SIGN_EXTEND (lv);
663 return immed_double_const (lv, hv, mode);
666 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
667 else if (GET_CODE (trueop) == CONST_DOUBLE
668 && GET_MODE_CLASS (mode) == MODE_FLOAT)
670 struct simplify_unary_real_args args;
671 args.operand = trueop;
674 args.want_integer = false;
676 if (do_float_handler (simplify_unary_real, (PTR) &args))
682 else if (GET_CODE (trueop) == CONST_DOUBLE
683 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
684 && GET_MODE_CLASS (mode) == MODE_INT
685 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
687 struct simplify_unary_real_args args;
688 args.operand = trueop;
691 args.want_integer = true;
693 if (do_float_handler (simplify_unary_real, (PTR) &args))
699 /* This was formerly used only for non-IEEE float.
700 eggert@twinsun.com says it is safe for IEEE also. */
703 enum rtx_code reversed;
704 /* There are some simplifications we can do even if the operands
709 /* (not (not X)) == X. */
710 if (GET_CODE (op) == NOT)
713 /* (not (eq X Y)) == (ne X Y), etc. */
714 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
715 && ((reversed = reversed_comparison_code (op, NULL_RTX))
717 return gen_rtx_fmt_ee (reversed,
718 op_mode, XEXP (op, 0), XEXP (op, 1));
722 /* (neg (neg X)) == X. */
723 if (GET_CODE (op) == NEG)
728 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
729 becomes just the MINUS if its mode is MODE. This allows
730 folding switch statements on machines using casesi (such as
732 if (GET_CODE (op) == TRUNCATE
733 && GET_MODE (XEXP (op, 0)) == mode
734 && GET_CODE (XEXP (op, 0)) == MINUS
735 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
736 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
739 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
740 if (! POINTERS_EXTEND_UNSIGNED
741 && mode == Pmode && GET_MODE (op) == ptr_mode
743 || (GET_CODE (op) == SUBREG
744 && GET_CODE (SUBREG_REG (op)) == REG
745 && REG_POINTER (SUBREG_REG (op))
746 && GET_MODE (SUBREG_REG (op)) == Pmode)))
747 return convert_memory_address (Pmode, op);
751 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
753 if (POINTERS_EXTEND_UNSIGNED > 0
754 && mode == Pmode && GET_MODE (op) == ptr_mode
756 || (GET_CODE (op) == SUBREG
757 && GET_CODE (SUBREG_REG (op)) == REG
758 && REG_POINTER (SUBREG_REG (op))
759 && GET_MODE (SUBREG_REG (op)) == Pmode)))
760 return convert_memory_address (Pmode, op);
772 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
773 /* Subroutine of simplify_binary_operation, called via do_float_handler.
774 Handles simplification of binary ops on floating point values. */
775 struct simplify_binary_real_args
777 rtx trueop0, trueop1;
780 enum machine_mode mode;
784 simplify_binary_real (p)
787 REAL_VALUE_TYPE f0, f1, value;
788 struct simplify_binary_real_args *args =
789 (struct simplify_binary_real_args *) p;
791 REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0);
792 REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1);
793 f0 = real_value_truncate (args->mode, f0);
794 f1 = real_value_truncate (args->mode, f1);
796 #ifdef REAL_ARITHMETIC
797 #ifndef REAL_INFINITY
798 if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
804 REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1);
818 #ifndef REAL_INFINITY
825 value = MIN (f0, f1);
828 value = MAX (f0, f1);
835 value = real_value_truncate (args->mode, value);
836 args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode);
840 /* Another subroutine called via do_float_handler. This one tests
841 the floating point value given against 2. and -1. */
842 struct simplify_binary_is2orm1_args
850 simplify_binary_is2orm1 (p)
854 struct simplify_binary_is2orm1_args *args =
855 (struct simplify_binary_is2orm1_args *) p;
857 REAL_VALUE_FROM_CONST_DOUBLE (d, args->value);
858 args->is_2 = REAL_VALUES_EQUAL (d, dconst2);
859 args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1);
862 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
863 and OP1. Return 0 if no simplification is possible.
865 Don't use this for relational operations such as EQ or LT.
866 Use simplify_relational_operation instead. */
868 simplify_binary_operation (code, mode, op0, op1)
870 enum machine_mode mode;
873 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
875 unsigned int width = GET_MODE_BITSIZE (mode);
877 rtx trueop0 = avoid_constant_pool_reference (op0);
878 rtx trueop1 = avoid_constant_pool_reference (op1);
880 /* Relational operations don't work here. We must know the mode
881 of the operands in order to do the comparison correctly.
882 Assuming a full word can give incorrect results.
883 Consider comparing 128 with -128 in QImode. */
885 if (GET_RTX_CLASS (code) == '<')
888 /* Make sure the constant is second. */
889 if (GET_RTX_CLASS (code) == 'c'
890 && swap_commutative_operands_p (trueop0, trueop1))
892 tem = op0, op0 = op1, op1 = tem;
893 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
896 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
897 if (GET_MODE_CLASS (mode) == MODE_FLOAT
898 && GET_CODE (trueop0) == CONST_DOUBLE
899 && GET_CODE (trueop1) == CONST_DOUBLE
900 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
902 struct simplify_binary_real_args args;
903 args.trueop0 = trueop0;
904 args.trueop1 = trueop1;
908 if (do_float_handler (simplify_binary_real, (PTR) &args))
912 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
914 /* We can fold some multi-word operations. */
915 if (GET_MODE_CLASS (mode) == MODE_INT
916 && width == HOST_BITS_PER_WIDE_INT * 2
917 && (GET_CODE (trueop0) == CONST_DOUBLE
918 || GET_CODE (trueop0) == CONST_INT)
919 && (GET_CODE (trueop1) == CONST_DOUBLE
920 || GET_CODE (trueop1) == CONST_INT))
922 unsigned HOST_WIDE_INT l1, l2, lv;
923 HOST_WIDE_INT h1, h2, hv;
925 if (GET_CODE (trueop0) == CONST_DOUBLE)
926 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
928 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
930 if (GET_CODE (trueop1) == CONST_DOUBLE)
931 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
933 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
938 /* A - B == A + (-B). */
939 neg_double (l2, h2, &lv, &hv);
942 /* .. fall through ... */
945 add_double (l1, h1, l2, h2, &lv, &hv);
949 mul_double (l1, h1, l2, h2, &lv, &hv);
952 case DIV: case MOD: case UDIV: case UMOD:
953 /* We'd need to include tree.h to do this and it doesn't seem worth
958 lv = l1 & l2, hv = h1 & h2;
962 lv = l1 | l2, hv = h1 | h2;
966 lv = l1 ^ l2, hv = h1 ^ h2;
972 && ((unsigned HOST_WIDE_INT) l1
973 < (unsigned HOST_WIDE_INT) l2)))
982 && ((unsigned HOST_WIDE_INT) l1
983 > (unsigned HOST_WIDE_INT) l2)))
990 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
992 && ((unsigned HOST_WIDE_INT) l1
993 < (unsigned HOST_WIDE_INT) l2)))
1000 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1002 && ((unsigned HOST_WIDE_INT) l1
1003 > (unsigned HOST_WIDE_INT) l2)))
1009 case LSHIFTRT: case ASHIFTRT:
1011 case ROTATE: case ROTATERT:
1012 #ifdef SHIFT_COUNT_TRUNCATED
1013 if (SHIFT_COUNT_TRUNCATED)
1014 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1017 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1020 if (code == LSHIFTRT || code == ASHIFTRT)
1021 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1023 else if (code == ASHIFT)
1024 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1025 else if (code == ROTATE)
1026 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1027 else /* code == ROTATERT */
1028 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1035 return immed_double_const (lv, hv, mode);
1038 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1039 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1041 /* Even if we can't compute a constant result,
1042 there are some cases worth simplifying. */
1047 /* In IEEE floating point, x+0 is not the same as x. Similarly
1048 for the other optimizations below. */
1049 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1050 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1053 if (trueop1 == CONST0_RTX (mode))
1056 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
1057 if (GET_CODE (op0) == NEG)
1058 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1059 else if (GET_CODE (op1) == NEG)
1060 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1062 /* (~a) + 1 -> -a */
1063 if (INTEGRAL_MODE_P (mode)
1064 && GET_CODE (op0) == NOT
1065 && trueop1 == const1_rtx)
1066 return gen_rtx_NEG (mode, XEXP (op0, 0));
1068 /* Handle both-operands-constant cases. We can only add
1069 CONST_INTs to constants since the sum of relocatable symbols
1070 can't be handled by most assemblers. Don't add CONST_INT
1071 to CONST_INT since overflow won't be computed properly if wider
1072 than HOST_BITS_PER_WIDE_INT. */
1074 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1075 && GET_CODE (op1) == CONST_INT)
1076 return plus_constant (op0, INTVAL (op1));
1077 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1078 && GET_CODE (op0) == CONST_INT)
1079 return plus_constant (op1, INTVAL (op0));
1081 /* See if this is something like X * C - X or vice versa or
1082 if the multiplication is written as a shift. If so, we can
1083 distribute and make a new multiply, shift, or maybe just
1084 have X (if C is 2 in the example above). But don't make
1085 real multiply if we didn't have one before. */
1087 if (! FLOAT_MODE_P (mode))
1089 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1090 rtx lhs = op0, rhs = op1;
1093 if (GET_CODE (lhs) == NEG)
1094 coeff0 = -1, lhs = XEXP (lhs, 0);
1095 else if (GET_CODE (lhs) == MULT
1096 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1098 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1101 else if (GET_CODE (lhs) == ASHIFT
1102 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1103 && INTVAL (XEXP (lhs, 1)) >= 0
1104 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1106 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1107 lhs = XEXP (lhs, 0);
1110 if (GET_CODE (rhs) == NEG)
1111 coeff1 = -1, rhs = XEXP (rhs, 0);
1112 else if (GET_CODE (rhs) == MULT
1113 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1115 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1118 else if (GET_CODE (rhs) == ASHIFT
1119 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1120 && INTVAL (XEXP (rhs, 1)) >= 0
1121 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1123 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1124 rhs = XEXP (rhs, 0);
1127 if (rtx_equal_p (lhs, rhs))
1129 tem = simplify_gen_binary (MULT, mode, lhs,
1130 GEN_INT (coeff0 + coeff1));
1131 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1135 /* If one of the operands is a PLUS or a MINUS, see if we can
1136 simplify this by the associative law.
1137 Don't use the associative law for floating point.
1138 The inaccuracy makes it nonassociative,
1139 and subtle programs can break if operations are associated. */
1141 if (INTEGRAL_MODE_P (mode)
1142 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1143 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1144 || (GET_CODE (op0) == CONST
1145 && GET_CODE (XEXP (op0, 0)) == PLUS)
1146 || (GET_CODE (op1) == CONST
1147 && GET_CODE (XEXP (op1, 0)) == PLUS))
1148 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1154 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1155 using cc0, in which case we want to leave it as a COMPARE
1156 so we can distinguish it from a register-register-copy.
1158 In IEEE floating point, x-0 is not the same as x. */
1160 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1161 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1162 && trueop1 == CONST0_RTX (mode))
1166 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1167 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1168 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1169 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1171 rtx xop00 = XEXP (op0, 0);
1172 rtx xop10 = XEXP (op1, 0);
1175 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1177 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1178 && GET_MODE (xop00) == GET_MODE (xop10)
1179 && REGNO (xop00) == REGNO (xop10)
1180 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1181 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1188 /* None of these optimizations can be done for IEEE
1190 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1191 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1194 /* We can't assume x-x is 0 even with non-IEEE floating point,
1195 but since it is zero except in very strange circumstances, we
1196 will treat it as zero with -funsafe-math-optimizations. */
1197 if (rtx_equal_p (trueop0, trueop1)
1198 && ! side_effects_p (op0)
1199 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1200 return CONST0_RTX (mode);
1202 /* Change subtraction from zero into negation. */
1203 if (trueop0 == CONST0_RTX (mode))
1204 return gen_rtx_NEG (mode, op1);
1206 /* (-1 - a) is ~a. */
1207 if (trueop0 == constm1_rtx)
1208 return gen_rtx_NOT (mode, op1);
1210 /* Subtracting 0 has no effect. */
1211 if (trueop1 == CONST0_RTX (mode))
1214 /* See if this is something like X * C - X or vice versa or
1215 if the multiplication is written as a shift. If so, we can
1216 distribute and make a new multiply, shift, or maybe just
1217 have X (if C is 2 in the example above). But don't make
1218 real multiply if we didn't have one before. */
1220 if (! FLOAT_MODE_P (mode))
1222 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1223 rtx lhs = op0, rhs = op1;
1226 if (GET_CODE (lhs) == NEG)
1227 coeff0 = -1, lhs = XEXP (lhs, 0);
1228 else if (GET_CODE (lhs) == MULT
1229 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1231 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1234 else if (GET_CODE (lhs) == ASHIFT
1235 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1236 && INTVAL (XEXP (lhs, 1)) >= 0
1237 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1239 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1240 lhs = XEXP (lhs, 0);
1243 if (GET_CODE (rhs) == NEG)
1244 coeff1 = - 1, rhs = XEXP (rhs, 0);
1245 else if (GET_CODE (rhs) == MULT
1246 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1248 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1251 else if (GET_CODE (rhs) == ASHIFT
1252 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1253 && INTVAL (XEXP (rhs, 1)) >= 0
1254 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1256 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1257 rhs = XEXP (rhs, 0);
1260 if (rtx_equal_p (lhs, rhs))
1262 tem = simplify_gen_binary (MULT, mode, lhs,
1263 GEN_INT (coeff0 - coeff1));
1264 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1268 /* (a - (-b)) -> (a + b). */
1269 if (GET_CODE (op1) == NEG)
1270 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1272 /* If one of the operands is a PLUS or a MINUS, see if we can
1273 simplify this by the associative law.
1274 Don't use the associative law for floating point.
1275 The inaccuracy makes it nonassociative,
1276 and subtle programs can break if operations are associated. */
1278 if (INTEGRAL_MODE_P (mode)
1279 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1280 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1281 || (GET_CODE (op0) == CONST
1282 && GET_CODE (XEXP (op0, 0)) == PLUS)
1283 || (GET_CODE (op1) == CONST
1284 && GET_CODE (XEXP (op1, 0)) == PLUS))
1285 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1288 /* Don't let a relocatable value get a negative coeff. */
1289 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1290 return simplify_gen_binary (PLUS, mode,
1292 neg_const_int (mode, op1));
1294 /* (x - (x & y)) -> (x & ~y) */
1295 if (GET_CODE (op1) == AND)
1297 if (rtx_equal_p (op0, XEXP (op1, 0)))
1298 return simplify_gen_binary (AND, mode, op0,
1299 gen_rtx_NOT (mode, XEXP (op1, 1)));
1300 if (rtx_equal_p (op0, XEXP (op1, 1)))
1301 return simplify_gen_binary (AND, mode, op0,
1302 gen_rtx_NOT (mode, XEXP (op1, 0)));
1307 if (trueop1 == constm1_rtx)
1309 tem = simplify_unary_operation (NEG, mode, op0, mode);
1311 return tem ? tem : gen_rtx_NEG (mode, op0);
1314 /* In IEEE floating point, x*0 is not always 0. */
1315 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1316 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1317 && trueop1 == CONST0_RTX (mode)
1318 && ! side_effects_p (op0))
1321 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1322 However, ANSI says we can drop signals,
1323 so we can do this anyway. */
1324 if (trueop1 == CONST1_RTX (mode))
1327 /* Convert multiply by constant power of two into shift unless
1328 we are still generating RTL. This test is a kludge. */
1329 if (GET_CODE (trueop1) == CONST_INT
1330 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1331 /* If the mode is larger than the host word size, and the
1332 uppermost bit is set, then this isn't a power of two due
1333 to implicit sign extension. */
1334 && (width <= HOST_BITS_PER_WIDE_INT
1335 || val != HOST_BITS_PER_WIDE_INT - 1)
1336 && ! rtx_equal_function_value_matters)
1337 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1339 if (GET_CODE (trueop1) == CONST_DOUBLE
1340 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1342 struct simplify_binary_is2orm1_args args;
1344 args.value = trueop1;
1345 if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args))
1348 /* x*2 is x+x and x*(-1) is -x */
1349 if (args.is_2 && GET_MODE (op0) == mode)
1350 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1352 else if (args.is_m1 && GET_MODE (op0) == mode)
1353 return gen_rtx_NEG (mode, op0);
1358 if (trueop1 == const0_rtx)
1360 if (GET_CODE (trueop1) == CONST_INT
1361 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1362 == GET_MODE_MASK (mode)))
1364 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1366 /* A | (~A) -> -1 */
1367 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1368 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1369 && ! side_effects_p (op0)
1370 && GET_MODE_CLASS (mode) != MODE_CC)
1375 if (trueop1 == const0_rtx)
1377 if (GET_CODE (trueop1) == CONST_INT
1378 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1379 == GET_MODE_MASK (mode)))
1380 return gen_rtx_NOT (mode, op0);
1381 if (trueop0 == trueop1 && ! side_effects_p (op0)
1382 && GET_MODE_CLASS (mode) != MODE_CC)
1387 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1389 if (GET_CODE (trueop1) == CONST_INT
1390 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1391 == GET_MODE_MASK (mode)))
1393 if (trueop0 == trueop1 && ! side_effects_p (op0)
1394 && GET_MODE_CLASS (mode) != MODE_CC)
1397 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1398 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1399 && ! side_effects_p (op0)
1400 && GET_MODE_CLASS (mode) != MODE_CC)
1405 /* Convert divide by power of two into shift (divide by 1 handled
1407 if (GET_CODE (trueop1) == CONST_INT
1408 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1409 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1411 /* ... fall through ... */
1414 if (trueop1 == CONST1_RTX (mode))
1416 /* On some platforms DIV uses narrower mode than its
1418 rtx x = gen_lowpart_common (mode, op0);
1421 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1422 return gen_lowpart_SUBREG (mode, op0);
1427 /* In IEEE floating point, 0/x is not always 0. */
1428 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1429 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1430 && trueop0 == CONST0_RTX (mode)
1431 && ! side_effects_p (op1))
1434 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1435 /* Change division by a constant into multiplication. Only do
1436 this with -funsafe-math-optimizations. */
1437 else if (GET_CODE (trueop1) == CONST_DOUBLE
1438 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1439 && trueop1 != CONST0_RTX (mode)
1440 && flag_unsafe_math_optimizations)
1443 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1445 if (! REAL_VALUES_EQUAL (d, dconst0))
1447 #if defined (REAL_ARITHMETIC)
1448 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1449 return gen_rtx_MULT (mode, op0,
1450 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1453 gen_rtx_MULT (mode, op0,
1454 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1462 /* Handle modulus by power of two (mod with 1 handled below). */
1463 if (GET_CODE (trueop1) == CONST_INT
1464 && exact_log2 (INTVAL (trueop1)) > 0)
1465 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1467 /* ... fall through ... */
1470 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1471 && ! side_effects_p (op0) && ! side_effects_p (op1))
1477 /* Rotating ~0 always results in ~0. */
1478 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1479 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1480 && ! side_effects_p (op1))
1483 /* ... fall through ... */
1488 if (trueop1 == const0_rtx)
1490 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1495 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1496 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1497 && ! side_effects_p (op0))
1499 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1504 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1505 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1506 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1507 && ! side_effects_p (op0))
1509 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1514 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1516 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1521 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1523 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1534 /* Get the integer argument values in two forms:
1535 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1537 arg0 = INTVAL (trueop0);
1538 arg1 = INTVAL (trueop1);
1540 if (width < HOST_BITS_PER_WIDE_INT)
1542 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1543 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1546 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1547 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1550 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1551 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1559 /* Compute the value of the arithmetic. */
1564 val = arg0s + arg1s;
1568 val = arg0s - arg1s;
1572 val = arg0s * arg1s;
1577 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1580 val = arg0s / arg1s;
1585 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1588 val = arg0s % arg1s;
1593 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1596 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1601 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1604 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1620 /* If shift count is undefined, don't fold it; let the machine do
1621 what it wants. But truncate it if the machine will do that. */
1625 #ifdef SHIFT_COUNT_TRUNCATED
1626 if (SHIFT_COUNT_TRUNCATED)
1630 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1637 #ifdef SHIFT_COUNT_TRUNCATED
1638 if (SHIFT_COUNT_TRUNCATED)
1642 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1649 #ifdef SHIFT_COUNT_TRUNCATED
1650 if (SHIFT_COUNT_TRUNCATED)
1654 val = arg0s >> arg1;
1656 /* Bootstrap compiler may not have sign extended the right shift.
1657 Manually extend the sign to insure bootstrap cc matches gcc. */
1658 if (arg0s < 0 && arg1 > 0)
1659 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1668 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1669 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1677 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1678 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1682 /* Do nothing here. */
1686 val = arg0s <= arg1s ? arg0s : arg1s;
1690 val = ((unsigned HOST_WIDE_INT) arg0
1691 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1695 val = arg0s > arg1s ? arg0s : arg1s;
1699 val = ((unsigned HOST_WIDE_INT) arg0
1700 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1707 val = trunc_int_for_mode (val, mode);
1709 return GEN_INT (val);
1712 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1715 Rather than test for specific case, we do this by a brute-force method
1716 and do all possible simplifications until no more changes occur. Then
1717 we rebuild the operation. */
1719 struct simplify_plus_minus_op_data
1726 simplify_plus_minus_op_data_cmp (p1, p2)
1730 const struct simplify_plus_minus_op_data *d1 = p1;
1731 const struct simplify_plus_minus_op_data *d2 = p2;
1733 return (commutative_operand_precedence (d2->op)
1734 - commutative_operand_precedence (d1->op));
1738 simplify_plus_minus (code, mode, op0, op1)
1740 enum machine_mode mode;
1743 struct simplify_plus_minus_op_data ops[8];
1745 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1746 int first, negate, changed;
1749 memset ((char *) ops, 0, sizeof ops);
1751 /* Set up the two operands and then expand them until nothing has been
1752 changed. If we run out of room in our array, give up; this should
1753 almost never happen. */
1758 ops[1].neg = (code == MINUS);
1764 for (i = 0; i < n_ops; i++)
1766 rtx this_op = ops[i].op;
1767 int this_neg = ops[i].neg;
1768 enum rtx_code this_code = GET_CODE (this_op);
1777 ops[n_ops].op = XEXP (this_op, 1);
1778 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1781 ops[i].op = XEXP (this_op, 0);
1787 ops[i].op = XEXP (this_op, 0);
1788 ops[i].neg = ! this_neg;
1793 ops[i].op = XEXP (this_op, 0);
1799 /* ~a -> (-a - 1) */
1802 ops[n_ops].op = constm1_rtx;
1803 ops[n_ops++].neg = this_neg;
1804 ops[i].op = XEXP (this_op, 0);
1805 ops[i].neg = !this_neg;
1813 ops[i].op = neg_const_int (mode, this_op);
1826 /* If we only have two operands, we can't do anything. */
1830 /* Now simplify each pair of operands until nothing changes. The first
1831 time through just simplify constants against each other. */
1838 for (i = 0; i < n_ops - 1; i++)
1839 for (j = i + 1; j < n_ops; j++)
1841 rtx lhs = ops[i].op, rhs = ops[j].op;
1842 int lneg = ops[i].neg, rneg = ops[j].neg;
1844 if (lhs != 0 && rhs != 0
1845 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1847 enum rtx_code ncode = PLUS;
1853 tem = lhs, lhs = rhs, rhs = tem;
1855 else if (swap_commutative_operands_p (lhs, rhs))
1856 tem = lhs, lhs = rhs, rhs = tem;
1858 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1860 /* Reject "simplifications" that just wrap the two
1861 arguments in a CONST. Failure to do so can result
1862 in infinite recursion with simplify_binary_operation
1863 when it calls us to simplify CONST operations. */
1865 && ! (GET_CODE (tem) == CONST
1866 && GET_CODE (XEXP (tem, 0)) == ncode
1867 && XEXP (XEXP (tem, 0), 0) == lhs
1868 && XEXP (XEXP (tem, 0), 1) == rhs)
1869 /* Don't allow -x + -1 -> ~x simplifications in the
1870 first pass. This allows us the chance to combine
1871 the -1 with other constants. */
1873 && GET_CODE (tem) == NOT
1874 && XEXP (tem, 0) == rhs))
1877 if (GET_CODE (tem) == NEG)
1878 tem = XEXP (tem, 0), lneg = !lneg;
1879 if (GET_CODE (tem) == CONST_INT && lneg)
1880 tem = neg_const_int (mode, tem), lneg = 0;
1884 ops[j].op = NULL_RTX;
1894 /* Pack all the operands to the lower-numbered entries. */
1895 for (i = 0, j = 0; j < n_ops; j++)
1900 /* Sort the operations based on swap_commutative_operands_p. */
1901 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1903 /* We suppressed creation of trivial CONST expressions in the
1904 combination loop to avoid recursion. Create one manually now.
1905 The combination loop should have ensured that there is exactly
1906 one CONST_INT, and the sort will have ensured that it is last
1907 in the array and that any other constant will be next-to-last. */
1910 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1911 && CONSTANT_P (ops[n_ops - 2].op))
1913 rtx value = ops[n_ops - 1].op;
1914 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1915 value = neg_const_int (mode, value);
1916 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1920 /* Count the number of CONSTs that we generated. */
1922 for (i = 0; i < n_ops; i++)
1923 if (GET_CODE (ops[i].op) == CONST)
1926 /* Give up if we didn't reduce the number of operands we had. Make
1927 sure we count a CONST as two operands. If we have the same
1928 number of operands, but have made more CONSTs than before, this
1929 is also an improvement, so accept it. */
1930 if (n_ops + n_consts > input_ops
1931 || (n_ops + n_consts == input_ops && n_consts <= input_consts))
1934 /* Put a non-negated operand first. If there aren't any, make all
1935 operands positive and negate the whole thing later. */
1938 for (i = 0; i < n_ops && ops[i].neg; i++)
1942 for (i = 0; i < n_ops; i++)
1954 /* Now make the result by performing the requested operations. */
1956 for (i = 1; i < n_ops; i++)
1957 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1958 mode, result, ops[i].op);
1960 return negate ? gen_rtx_NEG (mode, result) : result;
1965 rtx op0, op1; /* Input */
1966 int equal, op0lt, op1lt; /* Output */
1971 check_fold_consts (data)
1974 struct cfc_args *args = (struct cfc_args *) data;
1975 REAL_VALUE_TYPE d0, d1;
1977 /* We may possibly raise an exception while reading the value. */
1978 args->unordered = 1;
1979 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1980 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1982 /* Comparisons of Inf versus Inf are ordered. */
1983 if (REAL_VALUE_ISNAN (d0)
1984 || REAL_VALUE_ISNAN (d1))
1986 args->equal = REAL_VALUES_EQUAL (d0, d1);
1987 args->op0lt = REAL_VALUES_LESS (d0, d1);
1988 args->op1lt = REAL_VALUES_LESS (d1, d0);
1989 args->unordered = 0;
1992 /* Like simplify_binary_operation except used for relational operators.
1993 MODE is the mode of the operands, not that of the result. If MODE
1994 is VOIDmode, both operands must also be VOIDmode and we compare the
1995 operands in "infinite precision".
1997 If no simplification is possible, this function returns zero. Otherwise,
1998 it returns either const_true_rtx or const0_rtx. */
2001 simplify_relational_operation (code, mode, op0, op1)
2003 enum machine_mode mode;
2006 int equal, op0lt, op0ltu, op1lt, op1ltu;
2011 if (mode == VOIDmode
2012 && (GET_MODE (op0) != VOIDmode
2013 || GET_MODE (op1) != VOIDmode))
2016 /* If op0 is a compare, extract the comparison arguments from it. */
2017 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2018 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2020 trueop0 = avoid_constant_pool_reference (op0);
2021 trueop1 = avoid_constant_pool_reference (op1);
2023 /* We can't simplify MODE_CC values since we don't know what the
2024 actual comparison is. */
2025 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
2032 /* Make sure the constant is second. */
2033 if (swap_commutative_operands_p (trueop0, trueop1))
2035 tem = op0, op0 = op1, op1 = tem;
2036 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2037 code = swap_condition (code);
2040 /* For integer comparisons of A and B maybe we can simplify A - B and can
2041 then simplify a comparison of that with zero. If A and B are both either
2042 a register or a CONST_INT, this can't help; testing for these cases will
2043 prevent infinite recursion here and speed things up.
2045 If CODE is an unsigned comparison, then we can never do this optimization,
2046 because it gives an incorrect result if the subtraction wraps around zero.
2047 ANSI C defines unsigned operations such that they never overflow, and
2048 thus such cases can not be ignored. */
2050 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2051 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2052 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2053 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2054 && code != GTU && code != GEU && code != LTU && code != LEU)
2055 return simplify_relational_operation (signed_condition (code),
2056 mode, tem, const0_rtx);
2058 if (flag_unsafe_math_optimizations && code == ORDERED)
2059 return const_true_rtx;
2061 if (flag_unsafe_math_optimizations && code == UNORDERED)
2064 /* For non-IEEE floating-point, if the two operands are equal, we know the
2066 if (rtx_equal_p (trueop0, trueop1)
2067 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2068 || ! FLOAT_MODE_P (GET_MODE (trueop0))
2069 || flag_unsafe_math_optimizations))
2070 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2072 /* If the operands are floating-point constants, see if we can fold
2074 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2075 else if (GET_CODE (trueop0) == CONST_DOUBLE
2076 && GET_CODE (trueop1) == CONST_DOUBLE
2077 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2079 struct cfc_args args;
2081 /* Setup input for check_fold_consts() */
2086 if (!do_float_handler (check_fold_consts, (PTR) &args))
2099 return const_true_rtx;
2112 /* Receive output from check_fold_consts() */
2114 op0lt = op0ltu = args.op0lt;
2115 op1lt = op1ltu = args.op1lt;
2117 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2119 /* Otherwise, see if the operands are both integers. */
2120 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2121 && (GET_CODE (trueop0) == CONST_DOUBLE
2122 || GET_CODE (trueop0) == CONST_INT)
2123 && (GET_CODE (trueop1) == CONST_DOUBLE
2124 || GET_CODE (trueop1) == CONST_INT))
2126 int width = GET_MODE_BITSIZE (mode);
2127 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2128 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2130 /* Get the two words comprising each integer constant. */
2131 if (GET_CODE (trueop0) == CONST_DOUBLE)
2133 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2134 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2138 l0u = l0s = INTVAL (trueop0);
2139 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2142 if (GET_CODE (trueop1) == CONST_DOUBLE)
2144 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2145 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2149 l1u = l1s = INTVAL (trueop1);
2150 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2153 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2154 we have to sign or zero-extend the values. */
2155 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2157 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2158 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2160 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2161 l0s |= ((HOST_WIDE_INT) (-1) << width);
2163 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2164 l1s |= ((HOST_WIDE_INT) (-1) << width);
2166 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2167 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2169 equal = (h0u == h1u && l0u == l1u);
2170 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2171 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2172 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2173 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2176 /* Otherwise, there are some code-specific tests we can make. */
2182 /* References to the frame plus a constant or labels cannot
2183 be zero, but a SYMBOL_REF can due to #pragma weak. */
2184 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2185 || GET_CODE (trueop0) == LABEL_REF)
2186 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2187 /* On some machines, the ap reg can be 0 sometimes. */
2188 && op0 != arg_pointer_rtx
2195 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2196 || GET_CODE (trueop0) == LABEL_REF)
2197 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2198 && op0 != arg_pointer_rtx
2201 return const_true_rtx;
2205 /* Unsigned values are never negative. */
2206 if (trueop1 == const0_rtx)
2207 return const_true_rtx;
2211 if (trueop1 == const0_rtx)
2216 /* Unsigned values are never greater than the largest
2218 if (GET_CODE (trueop1) == CONST_INT
2219 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2220 && INTEGRAL_MODE_P (mode))
2221 return const_true_rtx;
2225 if (GET_CODE (trueop1) == CONST_INT
2226 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2227 && INTEGRAL_MODE_P (mode))
2238 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2244 return equal ? const_true_rtx : const0_rtx;
2247 return ! equal ? const_true_rtx : const0_rtx;
2250 return op0lt ? const_true_rtx : const0_rtx;
2253 return op1lt ? const_true_rtx : const0_rtx;
2255 return op0ltu ? const_true_rtx : const0_rtx;
2257 return op1ltu ? const_true_rtx : const0_rtx;
2260 return equal || op0lt ? const_true_rtx : const0_rtx;
2263 return equal || op1lt ? const_true_rtx : const0_rtx;
2265 return equal || op0ltu ? const_true_rtx : const0_rtx;
2267 return equal || op1ltu ? const_true_rtx : const0_rtx;
2269 return const_true_rtx;
2277 /* Simplify CODE, an operation with result mode MODE and three operands,
2278 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2279 a constant. Return 0 if no simplifications is possible. */
2282 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2284 enum machine_mode mode, op0_mode;
2287 unsigned int width = GET_MODE_BITSIZE (mode);
2289 /* VOIDmode means "infinite" precision. */
2291 width = HOST_BITS_PER_WIDE_INT;
2297 if (GET_CODE (op0) == CONST_INT
2298 && GET_CODE (op1) == CONST_INT
2299 && GET_CODE (op2) == CONST_INT
2300 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2301 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2303 /* Extracting a bit-field from a constant */
2304 HOST_WIDE_INT val = INTVAL (op0);
2306 if (BITS_BIG_ENDIAN)
2307 val >>= (GET_MODE_BITSIZE (op0_mode)
2308 - INTVAL (op2) - INTVAL (op1));
2310 val >>= INTVAL (op2);
2312 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2314 /* First zero-extend. */
2315 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2316 /* If desired, propagate sign bit. */
2317 if (code == SIGN_EXTRACT
2318 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2319 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2322 /* Clear the bits that don't belong in our mode,
2323 unless they and our sign bit are all one.
2324 So we get either a reasonable negative value or a reasonable
2325 unsigned value for this mode. */
2326 if (width < HOST_BITS_PER_WIDE_INT
2327 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2328 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2329 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2331 return GEN_INT (val);
2336 if (GET_CODE (op0) == CONST_INT)
2337 return op0 != const0_rtx ? op1 : op2;
2339 /* Convert a == b ? b : a to "a". */
2340 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2341 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2342 && rtx_equal_p (XEXP (op0, 0), op1)
2343 && rtx_equal_p (XEXP (op0, 1), op2))
2345 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2346 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2347 && rtx_equal_p (XEXP (op0, 1), op1)
2348 && rtx_equal_p (XEXP (op0, 0), op2))
2350 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2352 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2353 ? GET_MODE (XEXP (op0, 1))
2354 : GET_MODE (XEXP (op0, 0)));
2356 if (cmp_mode == VOIDmode)
2357 cmp_mode = op0_mode;
2358 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2359 XEXP (op0, 0), XEXP (op0, 1));
2361 /* See if any simplifications were possible. */
2362 if (temp == const0_rtx)
2364 else if (temp == const1_rtx)
2369 /* Look for happy constants in op1 and op2. */
2370 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2372 HOST_WIDE_INT t = INTVAL (op1);
2373 HOST_WIDE_INT f = INTVAL (op2);
2375 if (t == STORE_FLAG_VALUE && f == 0)
2376 code = GET_CODE (op0);
2377 else if (t == 0 && f == STORE_FLAG_VALUE)
2380 tmp = reversed_comparison_code (op0, NULL_RTX);
2388 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2400 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2401 Return 0 if no simplifications is possible. */
2403 simplify_subreg (outermode, op, innermode, byte)
2406 enum machine_mode outermode, innermode;
2408 /* Little bit of sanity checking. */
2409 if (innermode == VOIDmode || outermode == VOIDmode
2410 || innermode == BLKmode || outermode == BLKmode)
2413 if (GET_MODE (op) != innermode
2414 && GET_MODE (op) != VOIDmode)
2417 if (byte % GET_MODE_SIZE (outermode)
2418 || byte >= GET_MODE_SIZE (innermode))
2421 if (outermode == innermode && !byte)
2424 /* Attempt to simplify constant to non-SUBREG expression. */
2425 if (CONSTANT_P (op))
2428 unsigned HOST_WIDE_INT val = 0;
2430 /* ??? This code is partly redundant with code below, but can handle
2431 the subregs of floats and similar corner cases.
2432 Later it we should move all simplification code here and rewrite
2433 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2434 using SIMPLIFY_SUBREG. */
2435 if (subreg_lowpart_offset (outermode, innermode) == byte)
2437 rtx new = gen_lowpart_if_possible (outermode, op);
2442 /* Similar comment as above apply here. */
2443 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2444 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2445 && GET_MODE_CLASS (outermode) == MODE_INT)
2447 rtx new = constant_subword (op,
2448 (byte / UNITS_PER_WORD),
2454 offset = byte * BITS_PER_UNIT;
2455 switch (GET_CODE (op))
2458 if (GET_MODE (op) != VOIDmode)
2461 /* We can't handle this case yet. */
2462 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2465 part = offset >= HOST_BITS_PER_WIDE_INT;
2466 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2467 && BYTES_BIG_ENDIAN)
2468 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2469 && WORDS_BIG_ENDIAN))
2471 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2472 offset %= HOST_BITS_PER_WIDE_INT;
2474 /* We've already picked the word we want from a double, so
2475 pretend this is actually an integer. */
2476 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2480 if (GET_CODE (op) == CONST_INT)
2483 /* We don't handle synthetizing of non-integral constants yet. */
2484 if (GET_MODE_CLASS (outermode) != MODE_INT)
2487 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2489 if (WORDS_BIG_ENDIAN)
2490 offset = (GET_MODE_BITSIZE (innermode)
2491 - GET_MODE_BITSIZE (outermode) - offset);
2492 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2493 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2494 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2495 - 2 * (offset % BITS_PER_WORD));
2498 if (offset >= HOST_BITS_PER_WIDE_INT)
2499 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2503 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2504 val = trunc_int_for_mode (val, outermode);
2505 return GEN_INT (val);
2512 /* Changing mode twice with SUBREG => just change it once,
2513 or not at all if changing back op starting mode. */
2514 if (GET_CODE (op) == SUBREG)
2516 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2517 int final_offset = byte + SUBREG_BYTE (op);
2520 if (outermode == innermostmode
2521 && byte == 0 && SUBREG_BYTE (op) == 0)
2522 return SUBREG_REG (op);
2524 /* The SUBREG_BYTE represents offset, as if the value were stored
2525 in memory. Irritating exception is paradoxical subreg, where
2526 we define SUBREG_BYTE to be 0. On big endian machines, this
2527 value should be negative. For a moment, undo this exception. */
2528 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2530 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2531 if (WORDS_BIG_ENDIAN)
2532 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2533 if (BYTES_BIG_ENDIAN)
2534 final_offset += difference % UNITS_PER_WORD;
2536 if (SUBREG_BYTE (op) == 0
2537 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2539 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2540 if (WORDS_BIG_ENDIAN)
2541 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2542 if (BYTES_BIG_ENDIAN)
2543 final_offset += difference % UNITS_PER_WORD;
2546 /* See whether resulting subreg will be paradoxical. */
2547 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2549 /* In nonparadoxical subregs we can't handle negative offsets. */
2550 if (final_offset < 0)
2552 /* Bail out in case resulting subreg would be incorrect. */
2553 if (final_offset % GET_MODE_SIZE (outermode)
2554 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2560 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2562 /* In paradoxical subreg, see if we are still looking on lower part.
2563 If so, our SUBREG_BYTE will be 0. */
2564 if (WORDS_BIG_ENDIAN)
2565 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2566 if (BYTES_BIG_ENDIAN)
2567 offset += difference % UNITS_PER_WORD;
2568 if (offset == final_offset)
2574 /* Recurse for futher possible simplifications. */
2575 new = simplify_subreg (outermode, SUBREG_REG (op),
2576 GET_MODE (SUBREG_REG (op)),
2580 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2583 /* SUBREG of a hard register => just change the register number
2584 and/or mode. If the hard register is not valid in that mode,
2585 suppress this simplification. If the hard register is the stack,
2586 frame, or argument pointer, leave this as a SUBREG. */
2589 && (! REG_FUNCTION_VALUE_P (op)
2590 || ! rtx_equal_function_value_matters)
2591 #ifdef CLASS_CANNOT_CHANGE_MODE
2592 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2593 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2594 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2595 && (TEST_HARD_REG_BIT
2596 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2599 && REGNO (op) < FIRST_PSEUDO_REGISTER
2600 && ((reload_completed && !frame_pointer_needed)
2601 || (REGNO (op) != FRAME_POINTER_REGNUM
2602 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2603 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2606 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2607 && REGNO (op) != ARG_POINTER_REGNUM
2609 && REGNO (op) != STACK_POINTER_REGNUM)
2611 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2614 /* ??? We do allow it if the current REG is not valid for
2615 its mode. This is a kludge to work around how float/complex
2616 arguments are passed on 32-bit Sparc and should be fixed. */
2617 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2618 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2620 rtx x = gen_rtx_REG (outermode, final_regno);
2622 /* Propagate original regno. We don't have any way to specify
2623 the offset inside orignal regno, so do so only for lowpart.
2624 The information is used only by alias analysis that can not
2625 grog partial register anyway. */
2627 if (subreg_lowpart_offset (outermode, innermode) == byte)
2628 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2633 /* If we have a SUBREG of a register that we are replacing and we are
2634 replacing it with a MEM, make a new MEM and try replacing the
2635 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2636 or if we would be widening it. */
2638 if (GET_CODE (op) == MEM
2639 && ! mode_dependent_address_p (XEXP (op, 0))
2640 /* Allow splitting of volatile memory references in case we don't
2641 have instruction to move the whole thing. */
2642 && (! MEM_VOLATILE_P (op)
2643 || ! have_insn_for (SET, innermode))
2644 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2645 return adjust_address_nv (op, outermode, byte);
2647 /* Handle complex values represented as CONCAT
2648 of real and imaginary part. */
2649 if (GET_CODE (op) == CONCAT)
2651 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2652 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2653 unsigned int final_offset;
2656 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2657 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2660 /* We can at least simplify it by referring directly to the relevant part. */
2661 return gen_rtx_SUBREG (outermode, part, final_offset);
2666 /* Make a SUBREG operation or equivalent if it folds. */
2669 simplify_gen_subreg (outermode, op, innermode, byte)
2672 enum machine_mode outermode, innermode;
2675 /* Little bit of sanity checking. */
2676 if (innermode == VOIDmode || outermode == VOIDmode
2677 || innermode == BLKmode || outermode == BLKmode)
2680 if (GET_MODE (op) != innermode
2681 && GET_MODE (op) != VOIDmode)
2684 if (byte % GET_MODE_SIZE (outermode)
2685 || byte >= GET_MODE_SIZE (innermode))
2688 if (GET_CODE (op) == QUEUED)
2691 new = simplify_subreg (outermode, op, innermode, byte);
2695 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2698 return gen_rtx_SUBREG (outermode, op, byte);
2700 /* Simplify X, an rtx expression.
2702 Return the simplified expression or NULL if no simplifications
2705 This is the preferred entry point into the simplification routines;
2706 however, we still allow passes to call the more specific routines.
2708 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2709 code that need to be unified.
2711 1. fold_rtx in cse.c. This code uses various CSE specific
2712 information to aid in RTL simplification.
2714 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2715 it uses combine specific information to aid in RTL
2718 3. The routines in this file.
2721 Long term we want to only have one body of simplification code; to
2722 get to that state I recommend the following steps:
2724 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2725 which are not pass dependent state into these routines.
2727 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2728 use this routine whenever possible.
2730 3. Allow for pass dependent state to be provided to these
2731 routines and add simplifications based on the pass dependent
2732 state. Remove code from cse.c & combine.c that becomes
2735 It will take time, but ultimately the compiler will be easier to
2736 maintain and improve. It's totally silly that when we add a
2737 simplification that it needs to be added to 4 places (3 for RTL
2738 simplification and 1 for tree simplification. */
2744 enum rtx_code code = GET_CODE (x);
2745 enum machine_mode mode = GET_MODE (x);
2747 switch (GET_RTX_CLASS (code))
2750 return simplify_unary_operation (code, mode,
2751 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2753 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2758 XEXP (x, 0) = XEXP (x, 1);
2760 return simplify_binary_operation (code, mode,
2761 XEXP (x, 0), XEXP (x, 1));
2765 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2769 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2770 XEXP (x, 0), XEXP (x, 1),
2774 return simplify_relational_operation (code,
2775 ((GET_MODE (XEXP (x, 0))
2777 ? GET_MODE (XEXP (x, 0))
2778 : GET_MODE (XEXP (x, 1))),
2779 XEXP (x, 0), XEXP (x, 1));
2781 /* The only case we try to handle is a SUBREG. */
2783 return simplify_gen_subreg (mode, SUBREG_REG (x),
2784 GET_MODE (SUBREG_REG (x)),