X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fsimplify-rtx.c;h=b38ab2e59912dce35e7d45e2f9754c8607ac34ab;hb=40e175e78b8bfcaf6afa0f75e705d62f752cc3e9;hp=65b1d193d42bfc4534132ee53d0b8e24511056da;hpb=445e5fbbc093303ce94e4f8031853af2a2d2fd68;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 65b1d193d42..b38ab2e5991 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -1,12 +1,13 @@ /* RTL simplification functions for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -15,9 +16,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" @@ -30,7 +30,6 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "regs.h" #include "hard-reg-set.h" #include "flags.h" -#include "real.h" #include "insn-config.h" #include "recog.h" #include "function.h" @@ -49,9 +48,9 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #define HWI_SIGN_EXTEND(low) \ ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) -static rtx neg_const_int (enum machine_mode, rtx); -static bool plus_minus_operand_p (rtx); -static int simplify_plus_minus_op_data_cmp (const void *, const void *); +static rtx neg_const_int (enum machine_mode, const_rtx); +static bool plus_minus_operand_p (const_rtx); +static bool simplify_plus_minus_op_data_cmp (rtx, rtx); static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx); static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode, unsigned int); @@ -66,7 +65,7 @@ static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode, /* Negate a CONST_INT rtx, truncating (because a conversion from a maximally negative number can overflow). */ static rtx -neg_const_int (enum machine_mode mode, rtx i) +neg_const_int (enum machine_mode mode, const_rtx i) { return gen_int_mode (- INTVAL (i), mode); } @@ -75,7 +74,7 @@ neg_const_int (enum machine_mode mode, rtx i) the most significant bit of machine mode MODE. */ bool -mode_signbit_p (enum machine_mode mode, rtx x) +mode_signbit_p (enum machine_mode mode, const_rtx x) { unsigned HOST_WIDE_INT val; unsigned int width; @@ -86,9 +85,9 @@ mode_signbit_p (enum machine_mode mode, rtx x) width = GET_MODE_BITSIZE (mode); if (width == 0) return false; - + if (width <= HOST_BITS_PER_WIDE_INT - && GET_CODE (x) == CONST_INT) + && CONST_INT_P (x)) val = INTVAL (x); else if (width <= 2 * HOST_BITS_PER_WIDE_INT && GET_CODE (x) == CONST_DOUBLE @@ -158,6 +157,9 @@ avoid_constant_pool_reference (rtx x) return x; } + if (GET_MODE (x) == BLKmode) + return x; + addr = XEXP (x, 0); /* Call target hook to avoid the effects of -fpic etc.... */ @@ -166,7 +168,7 @@ avoid_constant_pool_reference (rtx x) /* Split the address into a base and integer offset. */ if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS - && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT) + && CONST_INT_P (XEXP (XEXP (addr, 0), 1))) { offset = INTVAL (XEXP (XEXP (addr, 0), 1)); addr = XEXP (XEXP (addr, 0), 0); @@ -198,13 +200,105 @@ avoid_constant_pool_reference (rtx x) return x; } + +/* Simplify a MEM based on its attributes. This is the default + delegitimize_address target hook, and it's recommended that every + overrider call it. */ -/* Return true if X is a MEM referencing the constant pool. */ - -bool -constant_pool_reference_p (rtx x) +rtx +delegitimize_mem_from_attrs (rtx x) { - return avoid_constant_pool_reference (x) != x; + if (MEM_P (x) + && MEM_EXPR (x) + && (!MEM_OFFSET (x) + || GET_CODE (MEM_OFFSET (x)) == CONST_INT)) + { + tree decl = MEM_EXPR (x); + enum machine_mode mode = GET_MODE (x); + HOST_WIDE_INT offset = 0; + + switch (TREE_CODE (decl)) + { + default: + decl = NULL; + break; + + case VAR_DECL: + break; + + case ARRAY_REF: + case ARRAY_RANGE_REF: + case COMPONENT_REF: + case BIT_FIELD_REF: + case REALPART_EXPR: + case IMAGPART_EXPR: + case VIEW_CONVERT_EXPR: + { + HOST_WIDE_INT bitsize, bitpos; + tree toffset; + int unsignedp = 0, volatilep = 0; + + decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset, + &mode, &unsignedp, &volatilep, false); + if (bitsize != GET_MODE_BITSIZE (mode) + || (bitpos % BITS_PER_UNIT) + || (toffset && !host_integerp (toffset, 0))) + decl = NULL; + else + { + offset += bitpos / BITS_PER_UNIT; + if (toffset) + offset += TREE_INT_CST_LOW (toffset); + } + break; + } + } + + if (decl + && mode == GET_MODE (x) + && TREE_CODE (decl) == VAR_DECL + && (TREE_STATIC (decl) + || DECL_THREAD_LOCAL_P (decl)) + && DECL_RTL_SET_P (decl) + && MEM_P (DECL_RTL (decl))) + { + rtx newx; + + if (MEM_OFFSET (x)) + offset += INTVAL (MEM_OFFSET (x)); + + newx = DECL_RTL (decl); + + if (MEM_P (newx)) + { + rtx n = XEXP (newx, 0), o = XEXP (x, 0); + + /* Avoid creating a new MEM needlessly if we already had + the same address. We do if there's no OFFSET and the + old address X is identical to NEWX, or if X is of the + form (plus NEWX OFFSET), or the NEWX is of the form + (plus Y (const_int Z)) and X is that with the offset + added: (plus Y (const_int Z+OFFSET)). */ + if (!((offset == 0 + || (GET_CODE (o) == PLUS + && GET_CODE (XEXP (o, 1)) == CONST_INT + && (offset == INTVAL (XEXP (o, 1)) + || (GET_CODE (n) == PLUS + && GET_CODE (XEXP (n, 1)) == CONST_INT + && (INTVAL (XEXP (n, 1)) + offset + == INTVAL (XEXP (o, 1))) + && (n = XEXP (n, 0)))) + && (o = XEXP (o, 0)))) + && rtx_equal_p (o, n))) + x = adjust_address_nv (newx, mode, offset); + } + else if (GET_MODE (x) == GET_MODE (newx) + && offset == 0) + x = newx; + } + } + + return x; } /* Make a unary operation by first seeing if it folds and otherwise making @@ -255,38 +349,46 @@ simplify_gen_relational (enum rtx_code code, enum machine_mode mode, return gen_rtx_fmt_ee (code, mode, op0, op1); } -/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the - resulting RTX. Return a new RTX which is as simplified as possible. */ +/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA) + and simplify the result. If FN is non-NULL, call this callback on each + X, if it returns non-NULL, replace X with its return value and simplify the + result. */ rtx -simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx) +simplify_replace_fn_rtx (rtx x, const_rtx old_rtx, + rtx (*fn) (rtx, const_rtx, void *), void *data) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); enum machine_mode op_mode; - rtx op0, op1, op2; - - /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try - to build a new expression substituting recursively. If we can't do - anything, return our input. */ + const char *fmt; + rtx op0, op1, op2, newx, op; + rtvec vec, newvec; + int i, j; - if (x == old_rtx) - return new_rtx; + if (__builtin_expect (fn != NULL, 0)) + { + newx = fn (x, old_rtx, data); + if (newx) + return newx; + } + else if (rtx_equal_p (x, old_rtx)) + return copy_rtx ((rtx) data); switch (GET_RTX_CLASS (code)) { case RTX_UNARY: op0 = XEXP (x, 0); op_mode = GET_MODE (op0); - op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); + op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); if (op0 == XEXP (x, 0)) return x; return simplify_gen_unary (code, mode, op0, op_mode); case RTX_BIN_ARITH: case RTX_COMM_ARITH: - op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); - op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); + op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); + op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) return x; return simplify_gen_binary (code, mode, op0, op1); @@ -296,8 +398,8 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx) op0 = XEXP (x, 0); op1 = XEXP (x, 1); op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1); - op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); - op1 = simplify_replace_rtx (op1, old_rtx, new_rtx); + op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); + op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data); if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) return x; return simplify_gen_relational (code, mode, op_mode, op0, op1); @@ -306,9 +408,9 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx) case RTX_BITFIELD_OPS: op0 = XEXP (x, 0); op_mode = GET_MODE (op0); - op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); - op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); - op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx); + op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); + op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); + op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data); if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2)) return x; if (op_mode == VOIDmode) @@ -316,10 +418,9 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx) return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2); case RTX_EXTRA: - /* The only case we try to handle is a SUBREG. */ if (code == SUBREG) { - op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx); + op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data); if (op0 == SUBREG_REG (x)) return x; op0 = simplify_gen_subreg (GET_MODE (x), op0, @@ -332,15 +433,15 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx) case RTX_OBJ: if (code == MEM) { - op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); + op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); if (op0 == XEXP (x, 0)) return x; return replace_equiv_address_nv (x, op0); } else if (code == LO_SUM) { - op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); - op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); + op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); + op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); /* (lo_sum (high x) x) -> x */ if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1)) @@ -350,17 +451,61 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx) return x; return gen_rtx_LO_SUM (mode, op0, op1); } - else if (code == REG) - { - if (rtx_equal_p (x, old_rtx)) - return new_rtx; - } break; default: break; } - return x; + + newx = x; + fmt = GET_RTX_FORMAT (code); + for (i = 0; fmt[i]; i++) + switch (fmt[i]) + { + case 'E': + vec = XVEC (x, i); + newvec = XVEC (newx, i); + for (j = 0; j < GET_NUM_ELEM (vec); j++) + { + op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j), + old_rtx, fn, data); + if (op != RTVEC_ELT (vec, j)) + { + if (newvec == vec) + { + newvec = shallow_copy_rtvec (vec); + if (x == newx) + newx = shallow_copy_rtx (x); + XVEC (newx, i) = newvec; + } + RTVEC_ELT (newvec, j) = op; + } + } + break; + + case 'e': + if (XEXP (x, i)) + { + op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data); + if (op != XEXP (x, i)) + { + if (x == newx) + newx = shallow_copy_rtx (x); + XEXP (newx, i) = op; + } + } + break; + } + return newx; +} + +/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the + resulting RTX. Return a new RTX which is as simplified as possible. */ + +rtx +simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx) +{ + return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx); } /* Try to simplify a unary operation CODE whose output mode is to be @@ -372,9 +517,6 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, { rtx trueop, tem; - if (GET_CODE (op) == CONST) - op = XEXP (op, 0); - trueop = avoid_constant_pool_reference (op); tem = simplify_const_unary_operation (code, mode, trueop, op_mode); @@ -418,14 +560,14 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */ if (GET_CODE (op) == XOR - && GET_CODE (XEXP (op, 1)) == CONST_INT + && CONST_INT_P (XEXP (op, 1)) && (temp = simplify_unary_operation (NOT, mode, XEXP (op, 1), mode)) != 0) return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */ if (GET_CODE (op) == PLUS - && GET_CODE (XEXP (op, 1)) == CONST_INT + && CONST_INT_P (XEXP (op, 1)) && mode_signbit_p (mode, XEXP (op, 1)) && (temp = simplify_unary_operation (NOT, mode, XEXP (op, 1), mode)) != 0) @@ -447,10 +589,10 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) /* (not (ashiftrt foo C)) where C is the number of bits in FOO minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1, so we can perform the above simplification. */ - + if (STORE_FLAG_VALUE == -1 && GET_CODE (op) == ASHIFTRT - && GET_CODE (XEXP (op, 1)) == CONST_INT + && GET_CODE (XEXP (op, 1)) && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) return simplify_gen_relational (GE, mode, VOIDmode, XEXP (op, 0), const0_rtx); @@ -511,11 +653,11 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) if (GET_CODE (op) == PLUS && XEXP (op, 1) == const1_rtx) return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode); - + /* Similarly, (neg (not X)) is (plus X 1). */ if (GET_CODE (op) == NOT) return plus_constant (XEXP (op, 0), 1); - + /* (neg (minus X Y)) can become (minus Y X). This transformation isn't safe for modes with signed zeros, since if X and Y are both +0, (minus Y X) is the same as (minus X Y). If the @@ -525,13 +667,13 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) && !HONOR_SIGNED_ZEROS (mode) && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0)); - + if (GET_CODE (op) == PLUS && !HONOR_SIGNED_ZEROS (mode) && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) { /* (neg (plus A C)) is simplified to (minus -C A). */ - if (GET_CODE (XEXP (op, 1)) == CONST_INT + if (CONST_INT_P (XEXP (op, 1)) || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE) { temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode); @@ -566,7 +708,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when C is equal to the width of MODE minus 1. */ if (GET_CODE (op) == ASHIFTRT - && GET_CODE (XEXP (op, 1)) == CONST_INT + && CONST_INT_P (XEXP (op, 1)) && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) return simplify_gen_binary (LSHIFTRT, mode, XEXP (op, 0), XEXP (op, 1)); @@ -574,11 +716,11 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when C is equal to the width of MODE minus 1. */ if (GET_CODE (op) == LSHIFTRT - && GET_CODE (XEXP (op, 1)) == CONST_INT + && CONST_INT_P (XEXP (op, 1)) && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) return simplify_gen_binary (ASHIFTRT, mode, XEXP (op, 0), XEXP (op, 1)); - + /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */ if (GET_CODE (op) == XOR && XEXP (op, 1) == const1_rtx @@ -588,7 +730,8 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */ /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */ if (GET_CODE (op) == LT - && XEXP (op, 1) == const0_rtx) + && XEXP (op, 1) == const0_rtx + && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0)))) { enum machine_mode inner = GET_MODE (XEXP (op, 0)); int isize = GET_MODE_BITSIZE (inner); @@ -650,7 +793,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) replace the TRUNCATE with a SUBREG. Note that this is also valid if TRULY_NOOP_TRUNCATION is false for the corresponding modes we just have to apply a different definition for - truncation. But don't do this for an (LSHIFTRT (MULT ...)) + truncation. But don't do this for an (LSHIFTRT (MULT ...)) since this will cause problems with the umulXi3_highpart patterns. */ if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), @@ -704,10 +847,11 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) /* (float_truncate (float x)) is (float x) */ if (GET_CODE (op) == FLOAT && (flag_unsafe_math_optimizations - || ((unsigned)significand_size (GET_MODE (op)) - >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) - - num_sign_bit_copies (XEXP (op, 0), - GET_MODE (XEXP (op, 0))))))) + || (SCALAR_FLOAT_MODE_P (GET_MODE (op)) + && ((unsigned)significand_size (GET_MODE (op)) + >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) + - num_sign_bit_copies (XEXP (op, 0), + GET_MODE (XEXP (op, 0)))))))) return simplify_gen_unary (FLOAT, mode, XEXP (op, 0), GET_MODE (XEXP (op, 0))); @@ -740,6 +884,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) */ if (GET_CODE (op) == FLOAT_EXTEND || (GET_CODE (op) == FLOAT + && SCALAR_FLOAT_MODE_P (GET_MODE (op)) && ((unsigned)significand_size (GET_MODE (op)) >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) - num_sign_bit_copies (XEXP (op, 0), @@ -786,11 +931,54 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) break; case POPCOUNT: + switch (GET_CODE (op)) + { + case BSWAP: + case ZERO_EXTEND: + /* (popcount (zero_extend )) = (popcount ) */ + return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), + GET_MODE (XEXP (op, 0))); + + case ROTATE: + case ROTATERT: + /* Rotations don't affect popcount. */ + if (!side_effects_p (XEXP (op, 1))) + return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), + GET_MODE (XEXP (op, 0))); + break; + + default: + break; + } + break; + case PARITY: - /* (pop* (zero_extend )) = (pop* ) */ - if (GET_CODE (op) == ZERO_EXTEND) - return simplify_gen_unary (code, mode, XEXP (op, 0), - GET_MODE (XEXP (op, 0))); + switch (GET_CODE (op)) + { + case NOT: + case BSWAP: + case ZERO_EXTEND: + case SIGN_EXTEND: + return simplify_gen_unary (PARITY, mode, XEXP (op, 0), + GET_MODE (XEXP (op, 0))); + + case ROTATE: + case ROTATERT: + /* Rotations don't affect parity. */ + if (!side_effects_p (XEXP (op, 1))) + return simplify_gen_unary (PARITY, mode, XEXP (op, 0), + GET_MODE (XEXP (op, 0))); + break; + + default: + break; + } + break; + + case BSWAP: + /* (bswap (bswap x)) -> x. */ + if (GET_CODE (op) == BSWAP) + return XEXP (op, 0); break; case FLOAT: @@ -818,11 +1006,15 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) if (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) && ! SUBREG_PROMOTED_UNSIGNED_P (op) - && GET_MODE (XEXP (op, 0)) == mode) - return XEXP (op, 0); + && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))) + return rtl_hooks.gen_lowpart_no_emit (mode, op); #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) - if (! POINTERS_EXTEND_UNSIGNED + /* As we do not know which address space the pointer is refering to, + we can do this only if the target does not support different pointer + or address modes depending on the address space. */ + if (target_default_pointer_address_modes_p () + && ! POINTERS_EXTEND_UNSIGNED && mode == Pmode && GET_MODE (op) == ptr_mode && (CONSTANT_P (op) || (GET_CODE (op) == SUBREG @@ -840,11 +1032,15 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) if (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) && SUBREG_PROMOTED_UNSIGNED_P (op) > 0 - && GET_MODE (XEXP (op, 0)) == mode) - return XEXP (op, 0); + && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))) + return rtl_hooks.gen_lowpart_no_emit (mode, op); #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) - if (POINTERS_EXTEND_UNSIGNED > 0 + /* As we do not know which address space the pointer is refering to, + we can do this only if the target does not support different pointer + or address modes depending on the address space. */ + if (target_default_pointer_address_modes_p () + && POINTERS_EXTEND_UNSIGNED > 0 && mode == Pmode && GET_MODE (op) == ptr_mode && (CONSTANT_P (op) || (GET_CODE (op) == SUBREG @@ -858,7 +1054,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) default: break; } - + return 0; } @@ -882,7 +1078,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER (GET_MODE (op))); } - if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE + if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_VECTOR) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); @@ -936,12 +1132,12 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, such as FIX. At some point, this should be simplified. */ if (code == FLOAT && GET_MODE (op) == VOIDmode - && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT)) + && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op))) { HOST_WIDE_INT hv, lv; REAL_VALUE_TYPE d; - if (GET_CODE (op) == CONST_INT) + if (CONST_INT_P (op)) lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); else lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); @@ -952,12 +1148,12 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, } else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode && (GET_CODE (op) == CONST_DOUBLE - || GET_CODE (op) == CONST_INT)) + || CONST_INT_P (op))) { HOST_WIDE_INT hv, lv; REAL_VALUE_TYPE d; - if (GET_CODE (op) == CONST_INT) + if (CONST_INT_P (op)) lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); else lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); @@ -979,7 +1175,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } - if (GET_CODE (op) == CONST_INT + if (CONST_INT_P (op) && width <= HOST_BITS_PER_WIDE_INT && width > 0) { HOST_WIDE_INT arg0 = INTVAL (op); @@ -1042,6 +1238,21 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, val &= 1; break; + case BSWAP: + { + unsigned int s; + + val = 0; + for (s = 0; s < width; s += 8) + { + unsigned int d = width - s - 8; + unsigned HOST_WIDE_INT byte; + byte = (arg0 >> s) & 0xff; + val |= byte << d; + } + } + break; + case TRUNCATE: val = arg0; break; @@ -1093,6 +1304,8 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, case SS_TRUNCATE: case US_TRUNCATE: case SS_NEG: + case US_NEG: + case SS_ABS: return 0; default: @@ -1107,7 +1320,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_WIDE_INT * 2 && (GET_CODE (op) == CONST_DOUBLE - || GET_CODE (op) == CONST_INT)) + || CONST_INT_P (op))) { unsigned HOST_WIDE_INT l1, lv; HOST_WIDE_INT h1, hv; @@ -1188,6 +1401,30 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, lv &= 1; break; + case BSWAP: + { + unsigned int s; + + hv = 0; + lv = 0; + for (s = 0; s < width; s += 8) + { + unsigned int d = width - s - 8; + unsigned HOST_WIDE_INT byte; + + if (s < HOST_BITS_PER_WIDE_INT) + byte = (l1 >> s) & 0xff; + else + byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff; + + if (d < HOST_BITS_PER_WIDE_INT) + lv |= byte << d; + else + hv |= byte << (d - HOST_BITS_PER_WIDE_INT); + } + } + break; + case TRUNCATE: /* This is just a change-of-mode, so do nothing. */ lv = l1, hv = h1; @@ -1244,10 +1481,10 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, d = t; break; case ABS: - d = REAL_VALUE_ABS (d); + d = real_value_abs (&d); break; case NEG: - d = REAL_VALUE_NEGATE (d); + d = real_value_negate (&d); break; case FLOAT_TRUNCATE: d = real_value_truncate (mode, d); @@ -1419,16 +1656,12 @@ simplify_associative_operation (enum rtx_code code, enum machine_mode mode, } /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */ - tem = swap_commutative_operands_p (XEXP (op0, 1), op1) - ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1)) - : simplify_binary_operation (code, mode, XEXP (op0, 1), op1); + tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1); if (tem != 0) return simplify_gen_binary (code, mode, XEXP (op0, 0), tem); /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */ - tem = swap_commutative_operands_p (XEXP (op0, 0), op1) - ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0)) - : simplify_binary_operation (code, mode, XEXP (op0, 0), op1); + tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1); if (tem != 0) return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); } @@ -1517,11 +1750,15 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, to CONST_INT since overflow won't be computed properly if wider than HOST_BITS_PER_WIDE_INT. */ - if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode - && GET_CODE (op1) == CONST_INT) + if ((GET_CODE (op0) == CONST + || GET_CODE (op0) == SYMBOL_REF + || GET_CODE (op0) == LABEL_REF) + && CONST_INT_P (op1)) return plus_constant (op0, INTVAL (op1)); - else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode - && GET_CODE (op0) == CONST_INT) + else if ((GET_CODE (op1) == CONST + || GET_CODE (op1) == SYMBOL_REF + || GET_CODE (op1) == LABEL_REF) + && CONST_INT_P (op0)) return plus_constant (op1, INTVAL (op0)); /* See if this is something like X * C - X or vice versa or @@ -1532,53 +1769,51 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (SCALAR_INT_MODE_P (mode)) { - HOST_WIDE_INT coeff0h = 0, coeff1h = 0; - unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1; + double_int coeff0, coeff1; rtx lhs = op0, rhs = op1; + coeff0 = double_int_one; + coeff1 = double_int_one; + if (GET_CODE (lhs) == NEG) { - coeff0l = -1; - coeff0h = -1; + coeff0 = double_int_minus_one; lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == MULT - && GET_CODE (XEXP (lhs, 1)) == CONST_INT) + && CONST_INT_P (XEXP (lhs, 1))) { - coeff0l = INTVAL (XEXP (lhs, 1)); - coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0; + coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1))); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == ASHIFT - && GET_CODE (XEXP (lhs, 1)) == CONST_INT - && INTVAL (XEXP (lhs, 1)) >= 0 + && CONST_INT_P (XEXP (lhs, 1)) + && INTVAL (XEXP (lhs, 1)) >= 0 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) { - coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); - coeff0h = 0; + coeff0 = double_int_setbit (double_int_zero, + INTVAL (XEXP (lhs, 1))); lhs = XEXP (lhs, 0); } if (GET_CODE (rhs) == NEG) { - coeff1l = -1; - coeff1h = -1; + coeff1 = double_int_minus_one; rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == MULT - && GET_CODE (XEXP (rhs, 1)) == CONST_INT) + && CONST_INT_P (XEXP (rhs, 1))) { - coeff1l = INTVAL (XEXP (rhs, 1)); - coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0; + coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1))); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == ASHIFT - && GET_CODE (XEXP (rhs, 1)) == CONST_INT + && CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) >= 0 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) { - coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); - coeff1h = 0; + coeff1 = double_int_setbit (double_int_zero, + INTVAL (XEXP (rhs, 1))); rhs = XEXP (rhs, 0); } @@ -1586,23 +1821,23 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, { rtx orig = gen_rtx_PLUS (mode, op0, op1); rtx coeff; - unsigned HOST_WIDE_INT l; - HOST_WIDE_INT h; + double_int val; + bool speed = optimize_function_for_speed_p (cfun); - add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h); - coeff = immed_double_const (l, h, mode); + val = double_int_add (coeff0, coeff1); + coeff = immed_double_int_const (val, mode); tem = simplify_gen_binary (MULT, mode, lhs, coeff); - return rtx_cost (tem, SET) <= rtx_cost (orig, SET) + return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed) ? tem : 0; } } /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */ - if ((GET_CODE (op1) == CONST_INT + if ((CONST_INT_P (op1) || GET_CODE (op1) == CONST_DOUBLE) && GET_CODE (op0) == XOR - && (GET_CODE (XEXP (op0, 1)) == CONST_INT + && (CONST_INT_P (XEXP (op0, 1)) || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) && mode_signbit_p (mode, op1)) return simplify_gen_binary (XOR, mode, XEXP (op0, 0), @@ -1610,7 +1845,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, XEXP (op0, 1))); /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */ - if (GET_CODE (op0) == MULT + if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) + && GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG) { rtx in1, in2; @@ -1645,9 +1881,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return tem; /* Reassociate floating point addition only when the user - specifies unsafe math optimizations. */ + specifies associative math operations. */ if (FLOAT_MODE_P (mode) - && flag_unsafe_math_optimizations) + && flag_associative_math) { tem = simplify_associative_operation (code, mode, op0, op1); if (tem) @@ -1656,19 +1892,6 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, break; case COMPARE: -#ifdef HAVE_cc0 - /* Convert (compare FOO (const_int 0)) to FOO unless we aren't - using cc0, in which case we want to leave it as a COMPARE - so we can distinguish it from a register-register-copy. - - In IEEE floating point, x-0 is not the same as x. */ - - if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT - || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) - && trueop1 == CONST0_RTX (mode)) - return op0; -#endif - /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) @@ -1693,10 +1916,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case MINUS: /* We can't assume x-x is 0 even with non-IEEE floating point, but since it is zero except in very strange circumstances, we - will treat it as zero with -funsafe-math-optimizations. */ + will treat it as zero with -ffinite-math-only. */ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0) - && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)) + && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode))) return CONST0_RTX (mode); /* Change subtraction from zero into negation. (0 - x) is the @@ -1726,53 +1949,52 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (SCALAR_INT_MODE_P (mode)) { - HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1; - unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1; + double_int coeff0, negcoeff1; rtx lhs = op0, rhs = op1; + coeff0 = double_int_one; + negcoeff1 = double_int_minus_one; + if (GET_CODE (lhs) == NEG) { - coeff0l = -1; - coeff0h = -1; + coeff0 = double_int_minus_one; lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == MULT - && GET_CODE (XEXP (lhs, 1)) == CONST_INT) + && CONST_INT_P (XEXP (lhs, 1))) { - coeff0l = INTVAL (XEXP (lhs, 1)); - coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0; + coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1))); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == ASHIFT - && GET_CODE (XEXP (lhs, 1)) == CONST_INT + && CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) >= 0 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) { - coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); - coeff0h = 0; + coeff0 = double_int_setbit (double_int_zero, + INTVAL (XEXP (lhs, 1))); lhs = XEXP (lhs, 0); } if (GET_CODE (rhs) == NEG) { - negcoeff1l = 1; - negcoeff1h = 0; + negcoeff1 = double_int_one; rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == MULT - && GET_CODE (XEXP (rhs, 1)) == CONST_INT) + && CONST_INT_P (XEXP (rhs, 1))) { - negcoeff1l = -INTVAL (XEXP (rhs, 1)); - negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1; + negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1))); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == ASHIFT - && GET_CODE (XEXP (rhs, 1)) == CONST_INT + && CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) >= 0 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) { - negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1))); - negcoeff1h = -1; + negcoeff1 = double_int_setbit (double_int_zero, + INTVAL (XEXP (rhs, 1))); + negcoeff1 = double_int_neg (negcoeff1); rhs = XEXP (rhs, 0); } @@ -1780,14 +2002,14 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, { rtx orig = gen_rtx_MINUS (mode, op0, op1); rtx coeff; - unsigned HOST_WIDE_INT l; - HOST_WIDE_INT h; + double_int val; + bool speed = optimize_function_for_speed_p (cfun); - add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h); - coeff = immed_double_const (l, h, mode); + val = double_int_add (coeff0, negcoeff1); + coeff = immed_double_int_const (val, mode); tem = simplify_gen_binary (MULT, mode, lhs, coeff); - return rtx_cost (tem, SET) <= rtx_cost (orig, SET) + return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed) ? tem : 0; } } @@ -1798,7 +2020,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, /* (-x - c) may be simplified as (-c - x). */ if (GET_CODE (op0) == NEG - && (GET_CODE (op1) == CONST_INT + && (CONST_INT_P (op1) || GET_CODE (op1) == CONST_DOUBLE)) { tem = simplify_unary_operation (NEG, mode, op1, mode); @@ -1807,7 +2029,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, } /* Don't let a relocatable value get a negative coeff. */ - if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode) + if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode) return simplify_gen_binary (PLUS, mode, op0, neg_const_int (mode, op1)); @@ -1838,7 +2060,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return reversed; /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */ - if (GET_CODE (op1) == MULT + if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) + && GET_CODE (op1) == MULT && GET_CODE (XEXP (op1, 0)) == NEG) { rtx in1, in2; @@ -1853,7 +2076,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, /* Canonicalize (minus (neg A) (mult B C)) to (minus (mult (neg B) C) A). */ - if (GET_CODE (op1) == MULT + if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) + && GET_CODE (op1) == MULT && GET_CODE (op0) == NEG) { rtx in1, in2; @@ -1902,7 +2126,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, /* Convert multiply by constant power of two into shift unless we are still generating RTL. This test is a kludge. */ - if (GET_CODE (trueop1) == CONST_INT + if (CONST_INT_P (trueop1) && (val = exact_log2 (INTVAL (trueop1))) >= 0 /* If the mode is larger than the host word size, and the uppermost bit is set, then this isn't a power of two due @@ -1924,6 +2148,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, /* x*2 is x+x and x*(-1) is -x */ if (GET_CODE (trueop1) == CONST_DOUBLE && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1)) + && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1)) && GET_MODE (op0) == mode) { REAL_VALUE_TYPE d; @@ -1967,7 +2192,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case IOR: if (trueop1 == const0_rtx) return op0; - if (GET_CODE (trueop1) == CONST_INT + if (CONST_INT_P (trueop1) && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))) return op1; @@ -1981,11 +2206,38 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return constm1_rtx; /* (ior A C) is C if all bits of A that might be nonzero are on in C. */ - if (GET_CODE (op1) == CONST_INT + if (CONST_INT_P (op1) && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0) return op1; - + + /* Canonicalize (X & C1) | C2. */ + if (GET_CODE (op0) == AND + && CONST_INT_P (trueop1) + && CONST_INT_P (XEXP (op0, 1))) + { + HOST_WIDE_INT mask = GET_MODE_MASK (mode); + HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1)); + HOST_WIDE_INT c2 = INTVAL (trueop1); + + /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */ + if ((c1 & c2) == c1 + && !side_effects_p (XEXP (op0, 0))) + return trueop1; + + /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */ + if (((c1|c2) & mask) == mask) + return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1); + + /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */ + if (((c1 & ~c2) & mask) != (c1 & mask)) + { + tem = simplify_gen_binary (AND, mode, XEXP (op0, 0), + gen_int_mode (c1 & ~c2, mode)); + return simplify_gen_binary (IOR, mode, tem, op1); + } + } + /* Convert (A & B) | A to A. */ if (GET_CODE (op0) == AND && (rtx_equal_p (XEXP (op0, 0), op1) @@ -2011,8 +2263,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0)) - && GET_CODE (XEXP (opleft, 1)) == CONST_INT - && GET_CODE (XEXP (opright, 1)) == CONST_INT + && CONST_INT_P (XEXP (opleft, 1)) + && CONST_INT_P (XEXP (opright, 1)) && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1)) == GET_MODE_BITSIZE (mode))) return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1)); @@ -2030,8 +2282,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft)))) && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0), SUBREG_REG (XEXP (opright, 0))) - && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT - && GET_CODE (XEXP (opright, 1)) == CONST_INT + && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1)) + && CONST_INT_P (XEXP (opright, 1)) && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1)) == GET_MODE_BITSIZE (mode))) return gen_rtx_ROTATE (mode, XEXP (opright, 0), @@ -2039,12 +2291,12 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, /* If we have (ior (and (X C1) C2)), simplify this by making C1 as small as possible if C1 actually changes. */ - if (GET_CODE (op1) == CONST_INT + if (CONST_INT_P (op1) && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT || INTVAL (op1) > 0) && GET_CODE (op0) == AND - && GET_CODE (XEXP (op0, 1)) == CONST_INT - && GET_CODE (op1) == CONST_INT + && CONST_INT_P (XEXP (op0, 1)) + && CONST_INT_P (op1) && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0) return simplify_gen_binary (IOR, mode, simplify_gen_binary @@ -2058,10 +2310,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, the PLUS does not affect any of the bits in OP1: then we can do the IOR as a PLUS and we can associate. This is valid if OP1 can be safely shifted left C bits. */ - if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT + if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT && GET_CODE (XEXP (op0, 0)) == PLUS - && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT - && GET_CODE (XEXP (op0, 1)) == CONST_INT + && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) + && CONST_INT_P (XEXP (op0, 1)) && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT) { int count = INTVAL (XEXP (op0, 1)); @@ -2082,7 +2334,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case XOR: if (trueop1 == const0_rtx) return op0; - if (GET_CODE (trueop1) == CONST_INT + if (CONST_INT_P (trueop1) && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))) return simplify_gen_unary (NOT, mode, op0, mode); @@ -2092,15 +2344,15 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return CONST0_RTX (mode); /* Canonicalize XOR of the most significant bit to PLUS. */ - if ((GET_CODE (op1) == CONST_INT + if ((CONST_INT_P (op1) || GET_CODE (op1) == CONST_DOUBLE) && mode_signbit_p (mode, op1)) return simplify_gen_binary (PLUS, mode, op0, op1); /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */ - if ((GET_CODE (op1) == CONST_INT + if ((CONST_INT_P (op1) || GET_CODE (op1) == CONST_DOUBLE) && GET_CODE (op0) == PLUS - && (GET_CODE (XEXP (op0, 1)) == CONST_INT + && (CONST_INT_P (XEXP (op0, 1)) || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) && mode_signbit_p (mode, XEXP (op0, 1))) return simplify_gen_binary (XOR, mode, XEXP (op0, 0), @@ -2170,7 +2422,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (STORE_FLAG_VALUE == 1 && trueop1 == const1_rtx && GET_CODE (op0) == LSHIFTRT - && GET_CODE (XEXP (op0, 1)) == CONST_INT + && CONST_INT_P (XEXP (op0, 1)) && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1) return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx); @@ -2184,8 +2436,6 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, && (reversed = reversed_comparison (op0, mode))) return reversed; - break; - tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; @@ -2194,12 +2444,24 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case AND: if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) return trueop1; - /* If we are turning off bits already known off in OP0, we need - not do an AND. */ - if (GET_CODE (trueop1) == CONST_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT - && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0) - return op0; + if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + { + HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode); + HOST_WIDE_INT nzop1; + if (CONST_INT_P (trueop1)) + { + HOST_WIDE_INT val1 = INTVAL (trueop1); + /* If we are turning off bits already known off in OP0, we need + not do an AND. */ + if ((nzop0 & ~val1) == 0) + return op0; + } + nzop1 = nonzero_bits (trueop1, mode); + /* If we are clearing all the nonzero bits, the result is zero. */ + if ((nzop1 & nzop0) == 0 + && !side_effects_p (op0) && !side_effects_p (op1)) + return CONST0_RTX (mode); + } if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0) && GET_MODE_CLASS (mode) != MODE_CC) return op0; @@ -2214,7 +2476,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, there are no nonzero bits of C outside of X's mode. */ if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND) - && GET_CODE (trueop1) == CONST_INT + && CONST_INT_P (trueop1) && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0))) & INTVAL (trueop1)) == 0) @@ -2226,6 +2488,30 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode); } + /* Transform (and (truncate X) C) into (truncate (and X C)). This way + we might be able to further simplify the AND with X and potentially + remove the truncation altogether. */ + if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1)) + { + rtx x = XEXP (op0, 0); + enum machine_mode xmode = GET_MODE (x); + tem = simplify_gen_binary (AND, xmode, x, + gen_int_mode (INTVAL (trueop1), xmode)); + return simplify_gen_unary (TRUNCATE, mode, tem, xmode); + } + + /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */ + if (GET_CODE (op0) == IOR + && CONST_INT_P (trueop1) + && CONST_INT_P (XEXP (op0, 1))) + { + HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1)); + return simplify_gen_binary (IOR, mode, + simplify_gen_binary (AND, mode, + XEXP (op0, 0), op1), + gen_int_mode (tmp, mode)); + } + /* Convert (A ^ B) & A to A & (~B) since the latter is often a single insn (and may simplify more). */ if (GET_CODE (op0) == XOR @@ -2269,8 +2555,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, ((A & N) + B) & M -> (A + B) & M Similarly if (N & M) == 0, ((A | N) + B) & M -> (A + B) & M - and for - instead of + and/or ^ instead of |. */ - if (GET_CODE (trueop1) == CONST_INT + and for - instead of + and/or ^ instead of |. + Also, if (N & M) == 0, then + (A +- N) & M -> A & M. */ + if (CONST_INT_P (trueop1) && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ~INTVAL (trueop1) && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0 @@ -2282,20 +2570,24 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, pmop[0] = XEXP (op0, 0); pmop[1] = XEXP (op0, 1); + if (CONST_INT_P (pmop[1]) + && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0) + return simplify_gen_binary (AND, mode, pmop[0], op1); + for (which = 0; which < 2; which++) { tem = pmop[which]; switch (GET_CODE (tem)) { case AND: - if (GET_CODE (XEXP (tem, 1)) == CONST_INT + if (CONST_INT_P (XEXP (tem, 1)) && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == INTVAL (trueop1)) pmop[which] = XEXP (tem, 0); break; case IOR: case XOR: - if (GET_CODE (XEXP (tem, 1)) == CONST_INT + if (CONST_INT_P (XEXP (tem, 1)) && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0) pmop[which] = XEXP (tem, 0); break; @@ -2311,6 +2603,19 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return simplify_gen_binary (code, mode, tem, op1); } } + + /* (and X (ior (not X) Y) -> (and X Y) */ + if (GET_CODE (op1) == IOR + && GET_CODE (XEXP (op1, 0)) == NOT + && op0 == XEXP (XEXP (op1, 0), 0)) + return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1)); + + /* (and (ior (not X) Y) X) -> (and X Y) */ + if (GET_CODE (op0) == IOR + && GET_CODE (XEXP (op0, 0)) == NOT + && op1 == XEXP (XEXP (op0, 0), 0)) + return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1)); + tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; @@ -2328,7 +2633,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (trueop1 == CONST1_RTX (mode)) return rtl_hooks.gen_lowpart_no_emit (mode, op0); /* Convert divide by power of two into shift. */ - if (GET_CODE (trueop1) == CONST_INT + if (CONST_INT_P (trueop1) && (val = exact_log2 (INTVAL (trueop1))) > 0) return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val)); break; @@ -2363,8 +2668,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return simplify_gen_unary (NEG, mode, op0, mode); /* Change FP division by a constant into multiplication. - Only do this with -funsafe-math-optimizations. */ - if (flag_unsafe_math_optimizations + Only do this with -freciprocal-math. */ + if (flag_reciprocal_math && !REAL_VALUES_EQUAL (d, dconst0)) { REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d); @@ -2410,7 +2715,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return CONST0_RTX (mode); } /* Implement modulus by power of two as AND. */ - if (GET_CODE (trueop1) == CONST_INT + if (CONST_INT_P (trueop1) && exact_log2 (INTVAL (trueop1)) > 0) return simplify_gen_binary (AND, mode, op0, GEN_INT (INTVAL (op1) - 1)); @@ -2441,19 +2746,27 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) return op0; /* Rotating ~0 always results in ~0. */ - if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT + if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode) && ! side_effects_p (op1)) return op0; + canonicalize_shift: + if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1)) + { + val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1); + if (val != INTVAL (op1)) + return simplify_gen_binary (code, mode, op0, GEN_INT (val)); + } break; case ASHIFT: case SS_ASHIFT: + case US_ASHIFT: if (trueop1 == CONST0_RTX (mode)) return op0; if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) return op0; - break; + goto canonicalize_shift; case LSHIFTRT: if (trueop1 == CONST0_RTX (mode)) @@ -2462,9 +2775,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return op0; /* Optimize (lshiftrt (clz X) C) as (eq X 0). */ if (GET_CODE (op0) == CLZ - && GET_CODE (trueop1) == CONST_INT + && CONST_INT_P (trueop1) && STORE_FLAG_VALUE == 1 - && INTVAL (trueop1) < width) + && INTVAL (trueop1) < (HOST_WIDE_INT)width) { enum machine_mode imode = GET_MODE (XEXP (op0, 0)); unsigned HOST_WIDE_INT zero_val = 0; @@ -2475,11 +2788,11 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return simplify_gen_relational (EQ, mode, imode, XEXP (op0, 0), const0_rtx); } - break; + goto canonicalize_shift; case SMIN: if (width <= HOST_BITS_PER_WIDE_INT - && GET_CODE (trueop1) == CONST_INT + && CONST_INT_P (trueop1) && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1) && ! side_effects_p (op0)) return op1; @@ -2492,7 +2805,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case SMAX: if (width <= HOST_BITS_PER_WIDE_INT - && GET_CODE (trueop1) == CONST_INT + && CONST_INT_P (trueop1) && ((unsigned HOST_WIDE_INT) INTVAL (trueop1) == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1) && ! side_effects_p (op0)) @@ -2528,6 +2841,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case US_PLUS: case SS_MINUS: case US_MINUS: + case SS_MULT: + case US_MULT: + case SS_DIV: + case US_DIV: /* ??? There are simplifications that can be done. */ return 0; @@ -2538,11 +2855,93 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0))); gcc_assert (GET_CODE (trueop1) == PARALLEL); gcc_assert (XVECLEN (trueop1, 0) == 1); - gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT); + gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0))); if (GET_CODE (trueop0) == CONST_VECTOR) return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0))); + + /* Extract a scalar element from a nested VEC_SELECT expression + (with optional nested VEC_CONCAT expression). Some targets + (i386) extract scalar element from a vector using chain of + nested VEC_SELECT expressions. When input operand is a memory + operand, this operation can be simplified to a simple scalar + load from an offseted memory address. */ + if (GET_CODE (trueop0) == VEC_SELECT) + { + rtx op0 = XEXP (trueop0, 0); + rtx op1 = XEXP (trueop0, 1); + + enum machine_mode opmode = GET_MODE (op0); + int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode)); + int n_elts = GET_MODE_SIZE (opmode) / elt_size; + + int i = INTVAL (XVECEXP (trueop1, 0, 0)); + int elem; + + rtvec vec; + rtx tmp_op, tmp; + + gcc_assert (GET_CODE (op1) == PARALLEL); + gcc_assert (i < n_elts); + + /* Select element, pointed by nested selector. */ + elem = INTVAL (XVECEXP (op1, 0, i)); + + /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */ + if (GET_CODE (op0) == VEC_CONCAT) + { + rtx op00 = XEXP (op0, 0); + rtx op01 = XEXP (op0, 1); + + enum machine_mode mode00, mode01; + int n_elts00, n_elts01; + + mode00 = GET_MODE (op00); + mode01 = GET_MODE (op01); + + /* Find out number of elements of each operand. */ + if (VECTOR_MODE_P (mode00)) + { + elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00)); + n_elts00 = GET_MODE_SIZE (mode00) / elt_size; + } + else + n_elts00 = 1; + + if (VECTOR_MODE_P (mode01)) + { + elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01)); + n_elts01 = GET_MODE_SIZE (mode01) / elt_size; + } + else + n_elts01 = 1; + + gcc_assert (n_elts == n_elts00 + n_elts01); + + /* Select correct operand of VEC_CONCAT + and adjust selector. */ + if (elem < n_elts01) + tmp_op = op00; + else + { + tmp_op = op01; + elem -= n_elts00; + } + } + else + tmp_op = op0; + + vec = rtvec_alloc (1); + RTVEC_ELT (vec, 0) = GEN_INT (elem); + + tmp = gen_rtx_fmt_ee (code, mode, + tmp_op, gen_rtx_PARALLEL (VOIDmode, vec)); + return tmp; + } + if (GET_CODE (trueop0) == VEC_DUPLICATE + && GET_MODE (XEXP (trueop0, 0)) == mode) + return XEXP (trueop0, 0); } else { @@ -2563,7 +2962,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, { rtx x = XVECEXP (trueop1, 0, i); - gcc_assert (GET_CODE (x) == CONST_INT); + gcc_assert (CONST_INT_P (x)); RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x)); } @@ -2573,7 +2972,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, } if (XVECLEN (trueop1, 0) == 1 - && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT + && CONST_INT_P (XVECEXP (trueop1, 0, 0)) && GET_CODE (trueop0) == VEC_CONCAT) { rtx vec = trueop0; @@ -2625,10 +3024,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, gcc_assert (GET_MODE_INNER (mode) == op1_mode); if ((GET_CODE (trueop0) == CONST_VECTOR - || GET_CODE (trueop0) == CONST_INT + || CONST_INT_P (trueop0) || GET_CODE (trueop0) == CONST_DOUBLE) && (GET_CODE (trueop1) == CONST_VECTOR - || GET_CODE (trueop1) == CONST_INT + || CONST_INT_P (trueop1) || GET_CODE (trueop1) == CONST_DOUBLE)) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); @@ -2708,7 +3107,12 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, if (VECTOR_MODE_P (mode) && code == VEC_CONCAT - && CONSTANT_P (op0) && CONSTANT_P (op1)) + && (CONST_INT_P (op0) + || GET_CODE (op0) == CONST_DOUBLE + || GET_CODE (op0) == CONST_FIXED) + && (CONST_INT_P (op1) + || GET_CODE (op1) == CONST_DOUBLE + || GET_CODE (op1) == CONST_FIXED)) { unsigned n_elts = GET_MODE_NUNITS (mode); rtvec v = rtvec_alloc (n_elts); @@ -2854,8 +3258,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, is unable to accurately represent the result. */ if ((flag_rounding_math - || (REAL_MODE_FORMAT_COMPOSITE_P (mode) - && !flag_unsafe_math_optimizations)) + || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations)) && (inexact || !real_identical (&result, &value))) return NULL_RTX; @@ -2866,8 +3269,8 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, /* We can fold some multi-word operations. */ if (GET_MODE_CLASS (mode) == MODE_INT && width == HOST_BITS_PER_WIDE_INT * 2 - && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT) - && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT)) + && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0)) + && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1))) { unsigned HOST_WIDE_INT l1, l2, lv, lt; HOST_WIDE_INT h1, h2, hv, ht; @@ -3002,7 +3405,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, return immed_double_const (lv, hv, mode); } - if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT + if (CONST_INT_P (op0) && CONST_INT_P (op1) && width <= HOST_BITS_PER_WIDE_INT && width != 0) { /* Get the integer argument values in two forms: @@ -3029,23 +3432,23 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, arg0s = arg0; arg1s = arg1; } - + /* Compute the value of the arithmetic. */ - + switch (code) { case PLUS: val = arg0s + arg1s; break; - + case MINUS: val = arg0s - arg1s; break; - + case MULT: val = arg0s * arg1s; break; - + case DIV: if (arg1s == 0 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) @@ -3053,7 +3456,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, return 0; val = arg0s / arg1s; break; - + case MOD: if (arg1s == 0 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) @@ -3061,7 +3464,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, return 0; val = arg0s % arg1s; break; - + case UDIV: if (arg1 == 0 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) @@ -3069,7 +3472,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, return 0; val = (unsigned HOST_WIDE_INT) arg0 / arg1; break; - + case UMOD: if (arg1 == 0 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) @@ -3077,19 +3480,19 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, return 0; val = (unsigned HOST_WIDE_INT) arg0 % arg1; break; - + case AND: val = arg0 & arg1; break; - + case IOR: val = arg0 | arg1; break; - + case XOR: val = arg0 ^ arg1; break; - + case LSHIFTRT: case ASHIFT: case ASHIFTRT: @@ -3104,64 +3507,69 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, arg1 = (unsigned HOST_WIDE_INT) arg1 % width; else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode)) return 0; - + val = (code == ASHIFT ? ((unsigned HOST_WIDE_INT) arg0) << arg1 : ((unsigned HOST_WIDE_INT) arg0) >> arg1); - + /* Sign-extend the result for arithmetic right shifts. */ if (code == ASHIFTRT && arg0s < 0 && arg1 > 0) val |= ((HOST_WIDE_INT) -1) << (width - arg1); break; - + case ROTATERT: if (arg1 < 0) return 0; - + arg1 %= width; val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); break; - + case ROTATE: if (arg1 < 0) return 0; - + arg1 %= width; val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); break; - + case COMPARE: /* Do nothing here. */ return 0; - + case SMIN: val = arg0s <= arg1s ? arg0s : arg1s; break; - + case UMIN: val = ((unsigned HOST_WIDE_INT) arg0 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); break; - + case SMAX: val = arg0s > arg1s ? arg0s : arg1s; break; - + case UMAX: val = ((unsigned HOST_WIDE_INT) arg0 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); break; - + case SS_PLUS: case US_PLUS: case SS_MINUS: case US_MINUS: + case SS_MULT: + case US_MULT: + case SS_DIV: + case US_DIV: case SS_ASHIFT: + case US_ASHIFT: /* ??? There are simplifications that can be done. */ return 0; - + default: gcc_unreachable (); } @@ -3185,21 +3593,23 @@ struct simplify_plus_minus_op_data { rtx op; short neg; - short ix; }; -static int -simplify_plus_minus_op_data_cmp (const void *p1, const void *p2) +static bool +simplify_plus_minus_op_data_cmp (rtx x, rtx y) { - const struct simplify_plus_minus_op_data *d1 = p1; - const struct simplify_plus_minus_op_data *d2 = p2; int result; - result = (commutative_operand_precedence (d2->op) - - commutative_operand_precedence (d1->op)); + result = (commutative_operand_precedence (y) + - commutative_operand_precedence (x)); if (result) - return result; - return d1->ix - d2->ix; + return result > 0; + + /* Group together equal REGs to do more simplification. */ + if (REG_P (x) && REG_P (y)) + return REGNO (x) > REGNO (y); + else + return false; } static rtx @@ -3209,7 +3619,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, struct simplify_plus_minus_op_data ops[8]; rtx result, tem; int n_ops = 2, input_ops = 2; - int first, changed, canonicalized = 0; + int changed, n_constants = 0, canonicalized = 0; int i, j; memset (ops, 0, sizeof ops); @@ -3286,6 +3696,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, break; case CONST_INT: + n_constants++; if (this_neg) { ops[i].op = neg_const_int (mode, this_op); @@ -3302,18 +3713,10 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, } while (changed); - gcc_assert (n_ops >= 2); - if (!canonicalized) - { - int n_constants = 0; - - for (i = 0; i < n_ops; i++) - if (GET_CODE (ops[i].op) == CONST_INT) - n_constants++; + if (n_constants > 1) + canonicalized = 1; - if (n_constants <= 1) - return NULL_RTX; - } + gcc_assert (n_ops >= 2); /* If we only have two operands, we can avoid the loops. */ if (n_ops == 2) @@ -3342,22 +3745,33 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, return simplify_const_binary_operation (code, mode, lhs, rhs); } - /* Now simplify each pair of operands until nothing changes. The first - time through just simplify constants against each other. */ - - first = 1; + /* Now simplify each pair of operands until nothing changes. */ do { - changed = first; + /* Insertion sort is good enough for an eight-element array. */ + for (i = 1; i < n_ops; i++) + { + struct simplify_plus_minus_op_data save; + j = i - 1; + if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op)) + continue; + + canonicalized = 1; + save = ops[i]; + do + ops[j + 1] = ops[j]; + while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op)); + ops[j + 1] = save; + } - for (i = 0; i < n_ops - 1; i++) - for (j = i + 1; j < n_ops; j++) + changed = 0; + for (i = n_ops - 1; i > 0; i--) + for (j = i - 1; j >= 0; j--) { - rtx lhs = ops[i].op, rhs = ops[j].op; - int lneg = ops[i].neg, rneg = ops[j].neg; + rtx lhs = ops[j].op, rhs = ops[i].op; + int lneg = ops[j].neg, rneg = ops[i].neg; - if (lhs != 0 && rhs != 0 - && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs)))) + if (lhs != 0 && rhs != 0) { enum rtx_code ncode = PLUS; @@ -3370,8 +3784,8 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, else if (swap_commutative_operands_p (lhs, rhs)) tem = lhs, lhs = rhs, rhs = tem; - if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT) - && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT)) + if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs)) + && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs))) { rtx tem_lhs, tem_rhs; @@ -3384,7 +3798,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, } else tem = simplify_binary_operation (ncode, mode, lhs, rhs); - + /* Reject "simplifications" that just wrap the two arguments in a CONST. Failure to do so can result in infinite recursion with simplify_binary_operation @@ -3393,53 +3807,45 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, && ! (GET_CODE (tem) == CONST && GET_CODE (XEXP (tem, 0)) == ncode && XEXP (XEXP (tem, 0), 0) == lhs - && XEXP (XEXP (tem, 0), 1) == rhs) - /* Don't allow -x + -1 -> ~x simplifications in the - first pass. This allows us the chance to combine - the -1 with other constants. */ - && ! (first - && GET_CODE (tem) == NOT - && XEXP (tem, 0) == rhs)) + && XEXP (XEXP (tem, 0), 1) == rhs)) { lneg &= rneg; if (GET_CODE (tem) == NEG) tem = XEXP (tem, 0), lneg = !lneg; - if (GET_CODE (tem) == CONST_INT && lneg) + if (CONST_INT_P (tem) && lneg) tem = neg_const_int (mode, tem), lneg = 0; ops[i].op = tem; ops[i].neg = lneg; ops[j].op = NULL_RTX; changed = 1; + canonicalized = 1; } } } - first = 0; + /* If nothing changed, fail. */ + if (!canonicalized) + return NULL_RTX; + + /* Pack all the operands to the lower-numbered entries. */ + for (i = 0, j = 0; j < n_ops; j++) + if (ops[j].op) + { + ops[i] = ops[j]; + i++; + } + n_ops = i; } while (changed); - /* Pack all the operands to the lower-numbered entries. */ - for (i = 0, j = 0; j < n_ops; j++) - if (ops[j].op) - { - ops[i] = ops[j]; - /* Stabilize sort. */ - ops[i].ix = i; - i++; - } - n_ops = i; - - /* Sort the operations based on swap_commutative_operands_p. */ - qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp); - /* Create (minus -C X) instead of (neg (const (plus X C))). */ if (n_ops == 2 - && GET_CODE (ops[1].op) == CONST_INT + && CONST_INT_P (ops[1].op) && CONSTANT_P (ops[0].op) && ops[0].neg) return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op); - + /* We suppressed creation of trivial CONST expressions in the combination loop to avoid recursion. Create one manually now. The combination loop should have ensured that there is exactly @@ -3447,7 +3853,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, in the array and that any other constant will be next-to-last. */ if (n_ops > 1 - && GET_CODE (ops[n_ops - 1].op) == CONST_INT + && CONST_INT_P (ops[n_ops - 1].op) && CONSTANT_P (ops[n_ops - 2].op)) { rtx value = ops[n_ops - 1].op; @@ -3482,7 +3888,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, /* Check whether an operand is suitable for calling simplify_plus_minus. */ static bool -plus_minus_operand_p (rtx x) +plus_minus_operand_p (const_rtx x) { return GET_CODE (x) == PLUS || GET_CODE (x) == MINUS @@ -3526,7 +3932,7 @@ simplify_relational_operation (enum rtx_code code, enum machine_mode mode, } #else return NULL_RTX; -#endif +#endif } if (VECTOR_MODE_P (mode)) { @@ -3564,8 +3970,8 @@ simplify_relational_operation (enum rtx_code code, enum machine_mode mode, /* If op0 is a compare, extract the comparison arguments from it. */ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) - return simplify_relational_operation (code, mode, VOIDmode, - XEXP (op0, 0), XEXP (op0, 1)); + return simplify_gen_relational (code, mode, VOIDmode, + XEXP (op0, 0), XEXP (op0, 1)); if (GET_MODE_CLASS (cmp_mode) == MODE_CC || CC0_P (op0)) @@ -3589,29 +3995,92 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode, { enum rtx_code op0code = GET_CODE (op0); - if (GET_CODE (op1) == CONST_INT) + if (op1 == const0_rtx && COMPARISON_P (op0)) { - if (INTVAL (op1) == 0 && COMPARISON_P (op0)) + /* If op0 is a comparison, extract the comparison arguments + from it. */ + if (code == NE) { - /* If op0 is a comparison, extract the comparison arguments - from it. */ - if (code == NE) - { - if (GET_MODE (op0) == mode) - return simplify_rtx (op0); - else - return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode, - XEXP (op0, 0), XEXP (op0, 1)); - } - else if (code == EQ) - { - enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX); - if (new_code != UNKNOWN) - return simplify_gen_relational (new_code, mode, VOIDmode, - XEXP (op0, 0), XEXP (op0, 1)); - } + if (GET_MODE (op0) == mode) + return simplify_rtx (op0); + else + return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode, + XEXP (op0, 0), XEXP (op0, 1)); + } + else if (code == EQ) + { + enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX); + if (new_code != UNKNOWN) + return simplify_gen_relational (new_code, mode, VOIDmode, + XEXP (op0, 0), XEXP (op0, 1)); + } + } + + /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to + (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */ + if ((code == LTU || code == GEU) + && GET_CODE (op0) == PLUS + && CONST_INT_P (XEXP (op0, 1)) + && (rtx_equal_p (op1, XEXP (op0, 0)) + || rtx_equal_p (op1, XEXP (op0, 1)))) + { + rtx new_cmp + = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode); + return simplify_gen_relational ((code == LTU ? GEU : LTU), mode, + cmp_mode, XEXP (op0, 0), new_cmp); + } + + /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */ + if ((code == LTU || code == GEU) + && GET_CODE (op0) == PLUS + && rtx_equal_p (op1, XEXP (op0, 1)) + /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */ + && !rtx_equal_p (op1, XEXP (op0, 0))) + return simplify_gen_relational (code, mode, cmp_mode, op0, + copy_rtx (XEXP (op0, 0))); + + if (op1 == const0_rtx) + { + /* Canonicalize (GTU x 0) as (NE x 0). */ + if (code == GTU) + return simplify_gen_relational (NE, mode, cmp_mode, op0, op1); + /* Canonicalize (LEU x 0) as (EQ x 0). */ + if (code == LEU) + return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1); + } + else if (op1 == const1_rtx) + { + switch (code) + { + case GE: + /* Canonicalize (GE x 1) as (GT x 0). */ + return simplify_gen_relational (GT, mode, cmp_mode, + op0, const0_rtx); + case GEU: + /* Canonicalize (GEU x 1) as (NE x 0). */ + return simplify_gen_relational (NE, mode, cmp_mode, + op0, const0_rtx); + case LT: + /* Canonicalize (LT x 1) as (LE x 0). */ + return simplify_gen_relational (LE, mode, cmp_mode, + op0, const0_rtx); + case LTU: + /* Canonicalize (LTU x 1) as (EQ x 0). */ + return simplify_gen_relational (EQ, mode, cmp_mode, + op0, const0_rtx); + default: + break; } } + else if (op1 == constm1_rtx) + { + /* Canonicalize (LE x -1) as (LT x 0). */ + if (code == LE) + return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx); + /* Canonicalize (GT x -1) as (GE x 0). */ + if (code == GT) + return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx); + } /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */ if ((code == EQ || code == NE) @@ -3669,17 +4138,99 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode, /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */ if ((code == EQ || code == NE) && op0code == XOR - && (GET_CODE (op1) == CONST_INT + && (CONST_INT_P (op1) || GET_CODE (op1) == CONST_DOUBLE) - && (GET_CODE (XEXP (op0, 1)) == CONST_INT + && (CONST_INT_P (XEXP (op0, 1)) || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)) return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0), simplify_gen_binary (XOR, cmp_mode, XEXP (op0, 1), op1)); + if (op0code == POPCOUNT && op1 == const0_rtx) + switch (code) + { + case EQ: + case LE: + case LEU: + /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */ + return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)), + XEXP (op0, 0), const0_rtx); + + case NE: + case GT: + case GTU: + /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */ + return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)), + XEXP (op0, 0), const0_rtx); + + default: + break; + } + return NULL_RTX; } +enum +{ + CMP_EQ = 1, + CMP_LT = 2, + CMP_GT = 4, + CMP_LTU = 8, + CMP_GTU = 16 +}; + + +/* Convert the known results for EQ, LT, GT, LTU, GTU contained in + KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE + For KNOWN_RESULT to make sense it should be either CMP_EQ, or the + logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU). + For floating-point comparisons, assume that the operands were ordered. */ + +static rtx +comparison_result (enum rtx_code code, int known_results) +{ + switch (code) + { + case EQ: + case UNEQ: + return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx; + case NE: + case LTGT: + return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx; + + case LT: + case UNLT: + return (known_results & CMP_LT) ? const_true_rtx : const0_rtx; + case GE: + case UNGE: + return (known_results & CMP_LT) ? const0_rtx : const_true_rtx; + + case GT: + case UNGT: + return (known_results & CMP_GT) ? const_true_rtx : const0_rtx; + case LE: + case UNLE: + return (known_results & CMP_GT) ? const0_rtx : const_true_rtx; + + case LTU: + return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx; + case GEU: + return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx; + + case GTU: + return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx; + case LEU: + return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx; + + case ORDERED: + return const_true_rtx; + case UNORDERED: + return const0_rtx; + default: + gcc_unreachable (); + } +} + /* Check if the given comparison (done in the given MODE) is actually a tautology or a contradiction. If no simplification is possible, this function returns zero. @@ -3690,7 +4241,6 @@ simplify_const_relational_operation (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1) { - int equal, op0lt, op0ltu, op1lt, op1ltu; rtx tem; rtx trueop0; rtx trueop1; @@ -3733,40 +4283,44 @@ simplify_const_relational_operation (enum rtx_code code, a register or a CONST_INT, this can't help; testing for these cases will prevent infinite recursion here and speed things up. - If CODE is an unsigned comparison, then we can never do this optimization, - because it gives an incorrect result if the subtraction wraps around zero. - ANSI C defines unsigned operations such that they never overflow, and - thus such cases can not be ignored; but we cannot do it even for - signed comparisons for languages such as Java, so test flag_wrapv. */ + We can only do this for EQ and NE comparisons as otherwise we may + lose or introduce overflow which we cannot disregard as undefined as + we do not know the signedness of the operation on either the left or + the right hand side of the comparison. */ - if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx - && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT) - && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT)) + if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx + && (code == EQ || code == NE) + && ! ((REG_P (op0) || CONST_INT_P (trueop0)) + && (REG_P (op1) || CONST_INT_P (trueop1))) && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1)) - /* We cannot do this for == or != if tem is a nonzero address. */ - && ((code != EQ && code != NE) || ! nonzero_address_p (tem)) - && code != GTU && code != GEU && code != LTU && code != LEU) + /* We cannot do this if tem is a nonzero address. */ + && ! nonzero_address_p (tem)) return simplify_const_relational_operation (signed_condition (code), mode, tem, const0_rtx); - if (flag_unsafe_math_optimizations && code == ORDERED) + if (! HONOR_NANS (mode) && code == ORDERED) return const_true_rtx; - if (flag_unsafe_math_optimizations && code == UNORDERED) + if (! HONOR_NANS (mode) && code == UNORDERED) return const0_rtx; /* For modes without NaNs, if the two operands are equal, we know the - result except if they have side-effects. */ - if (! HONOR_NANS (GET_MODE (trueop0)) + result except if they have side-effects. Even with NaNs we know + the result of unordered comparisons and, if signaling NaNs are + irrelevant, also the result of LT/GT/LTGT. */ + if ((! HONOR_NANS (GET_MODE (trueop0)) + || code == UNEQ || code == UNLE || code == UNGE + || ((code == LT || code == GT || code == LTGT) + && ! HONOR_SNANS (GET_MODE (trueop0)))) && rtx_equal_p (trueop0, trueop1) && ! side_effects_p (trueop0)) - equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0; + return comparison_result (code, CMP_EQ); /* If the operands are floating-point constants, see if we can fold the result. */ - else if (GET_CODE (trueop0) == CONST_DOUBLE - && GET_CODE (trueop1) == CONST_DOUBLE - && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0))) + if (GET_CODE (trueop0) == CONST_DOUBLE + && GET_CODE (trueop1) == CONST_DOUBLE + && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0))) { REAL_VALUE_TYPE d0, d1; @@ -3797,17 +4351,17 @@ simplify_const_relational_operation (enum rtx_code code, return 0; } - equal = REAL_VALUES_EQUAL (d0, d1); - op0lt = op0ltu = REAL_VALUES_LESS (d0, d1); - op1lt = op1ltu = REAL_VALUES_LESS (d1, d0); + return comparison_result (code, + (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ : + REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT)); } /* Otherwise, see if the operands are both integers. */ - else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) - && (GET_CODE (trueop0) == CONST_DOUBLE - || GET_CODE (trueop0) == CONST_INT) - && (GET_CODE (trueop1) == CONST_DOUBLE - || GET_CODE (trueop1) == CONST_INT)) + if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) + && (GET_CODE (trueop0) == CONST_DOUBLE + || CONST_INT_P (trueop0)) + && (GET_CODE (trueop1) == CONST_DOUBLE + || CONST_INT_P (trueop1))) { int width = GET_MODE_BITSIZE (mode); HOST_WIDE_INT l0s, h0s, l1s, h1s; @@ -3852,166 +4406,232 @@ simplify_const_relational_operation (enum rtx_code code, if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); - equal = (h0u == h1u && l0u == l1u); - op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u)); - op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u)); - op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u)); - op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u)); + if (h0u == h1u && l0u == l1u) + return comparison_result (code, CMP_EQ); + else + { + int cr; + cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT; + cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU; + return comparison_result (code, cr); + } } - /* Otherwise, there are some code-specific tests we can make. */ - else + /* Optimize comparisons with upper and lower bounds. */ + if (SCALAR_INT_MODE_P (mode) + && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && CONST_INT_P (trueop1)) { - /* Optimize comparisons with upper and lower bounds. */ - if (SCALAR_INT_MODE_P (mode) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) - { - rtx mmin, mmax; - int sign; - - if (code == GEU - || code == LEU - || code == GTU - || code == LTU) - sign = 0; - else - sign = 1; + int sign; + unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode); + HOST_WIDE_INT val = INTVAL (trueop1); + HOST_WIDE_INT mmin, mmax; + + if (code == GEU + || code == LEU + || code == GTU + || code == LTU) + sign = 0; + else + sign = 1; - get_mode_bounds (mode, sign, mode, &mmin, &mmax); + /* Get a reduced range if the sign bit is zero. */ + if (nonzero <= (GET_MODE_MASK (mode) >> 1)) + { + mmin = 0; + mmax = nonzero; + } + else + { + rtx mmin_rtx, mmax_rtx; + get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx); - tem = NULL_RTX; - switch (code) + mmin = INTVAL (mmin_rtx); + mmax = INTVAL (mmax_rtx); + if (sign) { - case GEU: - case GE: - /* x >= min is always true. */ - if (rtx_equal_p (trueop1, mmin)) - tem = const_true_rtx; - else - break; - - case LEU: - case LE: - /* x <= max is always true. */ - if (rtx_equal_p (trueop1, mmax)) - tem = const_true_rtx; - break; + unsigned int sign_copies = num_sign_bit_copies (trueop0, mode); - case GTU: - case GT: - /* x > max is always false. */ - if (rtx_equal_p (trueop1, mmax)) - tem = const0_rtx; - break; - - case LTU: - case LT: - /* x < min is always false. */ - if (rtx_equal_p (trueop1, mmin)) - tem = const0_rtx; - break; - - default: - break; + mmin >>= (sign_copies - 1); + mmax >>= (sign_copies - 1); } - if (tem == const0_rtx - || tem == const_true_rtx) - return tem; } switch (code) { + /* x >= y is always true for y <= mmin, always false for y > mmax. */ + case GEU: + if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin) + return const_true_rtx; + if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax) + return const0_rtx; + break; + case GE: + if (val <= mmin) + return const_true_rtx; + if (val > mmax) + return const0_rtx; + break; + + /* x <= y is always true for y >= mmax, always false for y < mmin. */ + case LEU: + if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax) + return const_true_rtx; + if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin) + return const0_rtx; + break; + case LE: + if (val >= mmax) + return const_true_rtx; + if (val < mmin) + return const0_rtx; + break; + case EQ: - if (trueop1 == const0_rtx && nonzero_address_p (op0)) + /* x == y is always false for y out of range. */ + if (val < mmin || val > mmax) + return const0_rtx; + break; + + /* x > y is always false for y >= mmax, always true for y < mmin. */ + case GTU: + if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax) return const0_rtx; + if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin) + return const_true_rtx; + break; + case GT: + if (val >= mmax) + return const0_rtx; + if (val < mmin) + return const_true_rtx; + break; + + /* x < y is always false for y <= mmin, always true for y > mmax. */ + case LTU: + if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin) + return const0_rtx; + if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax) + return const_true_rtx; + break; + case LT: + if (val <= mmin) + return const0_rtx; + if (val > mmax) + return const_true_rtx; break; case NE: - if (trueop1 == const0_rtx && nonzero_address_p (op0)) + /* x != y is always true for y out of range. */ + if (val < mmin || val > mmax) return const_true_rtx; break; + default: + break; + } + } + + /* Optimize integer comparisons with zero. */ + if (trueop1 == const0_rtx) + { + /* Some addresses are known to be nonzero. We don't know + their sign, but equality comparisons are known. */ + if (nonzero_address_p (trueop0)) + { + if (code == EQ || code == LEU) + return const0_rtx; + if (code == NE || code == GTU) + return const_true_rtx; + } + + /* See if the first operand is an IOR with a constant. If so, we + may be able to determine the result of this comparison. */ + if (GET_CODE (op0) == IOR) + { + rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1)); + if (CONST_INT_P (inner_const) && inner_const != const0_rtx) + { + int sign_bitnum = GET_MODE_BITSIZE (mode) - 1; + int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum + && (INTVAL (inner_const) + & ((HOST_WIDE_INT) 1 << sign_bitnum))); + + switch (code) + { + case EQ: + case LEU: + return const0_rtx; + case NE: + case GTU: + return const_true_rtx; + case LT: + case LE: + if (has_sign) + return const_true_rtx; + break; + case GT: + case GE: + if (has_sign) + return const0_rtx; + break; + default: + break; + } + } + } + } + + /* Optimize comparison of ABS with zero. */ + if (trueop1 == CONST0_RTX (mode) + && (GET_CODE (trueop0) == ABS + || (GET_CODE (trueop0) == FLOAT_EXTEND + && GET_CODE (XEXP (trueop0, 0)) == ABS))) + { + switch (code) + { case LT: /* Optimize abs(x) < 0.0. */ - if (trueop1 == CONST0_RTX (mode) - && !HONOR_SNANS (mode) - && !(flag_wrapv && INTEGRAL_MODE_P (mode))) + if (!HONOR_SNANS (mode) + && (!INTEGRAL_MODE_P (mode) + || (!flag_wrapv && !flag_trapv && flag_strict_overflow))) { - tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0) - : trueop0; - if (GET_CODE (tem) == ABS) - return const0_rtx; + if (INTEGRAL_MODE_P (mode) + && (issue_strict_overflow_warning + (WARN_STRICT_OVERFLOW_CONDITIONAL))) + warning (OPT_Wstrict_overflow, + ("assuming signed overflow does not occur when " + "assuming abs (x) < 0 is false")); + return const0_rtx; } break; case GE: /* Optimize abs(x) >= 0.0. */ - if (trueop1 == CONST0_RTX (mode) - && !HONOR_NANS (mode) - && !(flag_wrapv && INTEGRAL_MODE_P (mode))) + if (!HONOR_NANS (mode) + && (!INTEGRAL_MODE_P (mode) + || (!flag_wrapv && !flag_trapv && flag_strict_overflow))) { - tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0) - : trueop0; - if (GET_CODE (tem) == ABS) - return const_true_rtx; + if (INTEGRAL_MODE_P (mode) + && (issue_strict_overflow_warning + (WARN_STRICT_OVERFLOW_CONDITIONAL))) + warning (OPT_Wstrict_overflow, + ("assuming signed overflow does not occur when " + "assuming abs (x) >= 0 is true")); + return const_true_rtx; } break; case UNGE: /* Optimize ! (abs(x) < 0.0). */ - if (trueop1 == CONST0_RTX (mode)) - { - tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0) - : trueop0; - if (GET_CODE (tem) == ABS) - return const_true_rtx; - } - break; + return const_true_rtx; default: break; } - - return 0; } - /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set - as appropriate. */ - switch (code) - { - case EQ: - case UNEQ: - return equal ? const_true_rtx : const0_rtx; - case NE: - case LTGT: - return ! equal ? const_true_rtx : const0_rtx; - case LT: - case UNLT: - return op0lt ? const_true_rtx : const0_rtx; - case GT: - case UNGT: - return op1lt ? const_true_rtx : const0_rtx; - case LTU: - return op0ltu ? const_true_rtx : const0_rtx; - case GTU: - return op1ltu ? const_true_rtx : const0_rtx; - case LE: - case UNLE: - return equal || op0lt ? const_true_rtx : const0_rtx; - case GE: - case UNGE: - return equal || op1lt ? const_true_rtx : const0_rtx; - case LEU: - return equal || op0ltu ? const_true_rtx : const0_rtx; - case GEU: - return equal || op1ltu ? const_true_rtx : const0_rtx; - case ORDERED: - return const_true_rtx; - case UNORDERED: - return const0_rtx; - default: - gcc_unreachable (); - } + return 0; } /* Simplify CODE, an operation with result mode MODE and three operands, @@ -4033,9 +4653,9 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, { case SIGN_EXTRACT: case ZERO_EXTRACT: - if (GET_CODE (op0) == CONST_INT - && GET_CODE (op1) == CONST_INT - && GET_CODE (op2) == CONST_INT + if (CONST_INT_P (op0) + && CONST_INT_P (op1) + && CONST_INT_P (op2) && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width) && width <= (unsigned) HOST_BITS_PER_WIDE_INT) { @@ -4072,7 +4692,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, break; case IF_THEN_ELSE: - if (GET_CODE (op0) == CONST_INT) + if (CONST_INT_P (op0)) return op0 != const0_rtx ? op1 : op2; /* Convert c ? a : a into "a". */ @@ -4109,7 +4729,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, rtx temp; /* Look for happy constants in op1 and op2. */ - if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT) + if (CONST_INT_P (op1) && CONST_INT_P (op2)) { HOST_WIDE_INT t = INTVAL (op1); HOST_WIDE_INT f = INTVAL (op2); @@ -4140,7 +4760,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, /* See if any simplifications were possible. */ if (temp) { - if (GET_CODE (temp) == CONST_INT) + if (CONST_INT_P (temp)) return temp == const0_rtx ? op2 : op1; else if (temp) return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2); @@ -4153,7 +4773,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, gcc_assert (GET_MODE (op1) == mode); gcc_assert (VECTOR_MODE_P (mode)); op2 = avoid_constant_pool_reference (op2); - if (GET_CODE (op2) == CONST_INT) + if (CONST_INT_P (op2)) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); @@ -4188,15 +4808,16 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, return 0; } -/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR, - returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR. +/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED + or CONST_VECTOR, + returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR. Works by unpacking OP into a collection of 8-bit values represented as a little-endian array of 'unsigned char', selecting by BYTE, and then repacking them again for OUTERMODE. */ static rtx -simplify_immed_subreg (enum machine_mode outermode, rtx op, +simplify_immed_subreg (enum machine_mode outermode, rtx op, enum machine_mode innermode, unsigned int byte) { /* We support up to 512-bit values (for V8DFmode). */ @@ -4219,7 +4840,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, enum machine_mode outer_submode; /* Some ports misuse CCmode. */ - if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT) + if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op)) return op; /* We have no way to represent a complex constant at the rtl level. */ @@ -4244,17 +4865,17 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, gcc_assert (BITS_PER_UNIT % value_bit == 0); /* I don't know how to handle endianness of sub-units. */ gcc_assert (elem_bitsize % BITS_PER_UNIT == 0); - + for (elem = 0; elem < num_elem; elem++) { unsigned char * vp; rtx el = elems[elem]; - + /* Vectors are kept in target memory order. (This is probably a mistake.) */ { unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT; - unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) + unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) / BITS_PER_UNIT); unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; @@ -4262,19 +4883,19 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); vp = value + (bytele * BITS_PER_UNIT) / value_bit; } - + switch (GET_CODE (el)) { case CONST_INT: for (i = 0; - i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; + i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; i += value_bit) *vp++ = INTVAL (el) >> i; /* CONST_INTs are always logically sign-extended. */ for (; i < elem_bitsize; i += value_bit) *vp++ = INTVAL (el) < 0 ? -1 : 0; break; - + case CONST_DOUBLE: if (GET_MODE (el) == VOIDmode) { @@ -4320,14 +4941,33 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, ibase = i; *vp++ = tmp[ibase / 32] >> i % 32; } - + /* It shouldn't matter what's done here, so fill it with zero. */ for (; i < elem_bitsize; i += value_bit) *vp++ = 0; } break; - + + case CONST_FIXED: + if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) + { + for (i = 0; i < elem_bitsize; i += value_bit) + *vp++ = CONST_FIXED_VALUE_LOW (el) >> i; + } + else + { + for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit) + *vp++ = CONST_FIXED_VALUE_LOW (el) >> i; + for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize; + i += value_bit) + *vp++ = CONST_FIXED_VALUE_HIGH (el) + >> (i - HOST_BITS_PER_WIDE_INT); + for (; i < elem_bitsize; i += value_bit) + *vp++ = 0; + } + break; + default: gcc_unreachable (); } @@ -4339,7 +4979,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, will already have offset 0. */ if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode)) { - unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode) + unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode) - byte); unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; @@ -4355,7 +4995,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, value_start = byte * (BITS_PER_UNIT / value_bit); /* Re-pack the value. */ - + if (VECTOR_MODE_P (outermode)) { num_elem = GET_MODE_NUNITS (outermode); @@ -4379,12 +5019,12 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, for (elem = 0; elem < num_elem; elem++) { unsigned char *vp; - + /* Vectors are stored in target memory order. (This is probably a mistake.) */ { unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT; - unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) + unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) / BITS_PER_UNIT); unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; @@ -4407,7 +5047,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, for (; i < elem_bitsize; i += value_bit) hi |= ((HOST_WIDE_INT)(*vp++ & value_mask) << (i - HOST_BITS_PER_WIDE_INT)); - + /* immed_double_const doesn't call trunc_int_for_mode. I don't know why. */ if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) @@ -4418,13 +5058,13 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, return NULL_RTX; } break; - + case MODE_FLOAT: case MODE_DECIMAL_FLOAT: { REAL_VALUE_TYPE r; long tmp[max_bitsize / 32]; - + /* real_from_target wants its input in words affected by FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, and use WORDS_BIG_ENDIAN instead; see the documentation @@ -4445,7 +5085,29 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode); } break; - + + case MODE_FRACT: + case MODE_UFRACT: + case MODE_ACCUM: + case MODE_UACCUM: + { + FIXED_VALUE_TYPE f; + f.data.low = 0; + f.data.high = 0; + f.mode = outer_submode; + + for (i = 0; + i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; + i += value_bit) + f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i; + for (; i < elem_bitsize; i += value_bit) + f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask) + << (i - HOST_BITS_PER_WIDE_INT)); + + elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode); + } + break; + default: gcc_unreachable (); } @@ -4477,8 +5139,9 @@ simplify_subreg (enum machine_mode outermode, rtx op, if (outermode == innermode && !byte) return op; - if (GET_CODE (op) == CONST_INT + if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE + || GET_CODE (op) == CONST_FIXED || GET_CODE (op) == CONST_VECTOR) return simplify_immed_subreg (outermode, op, innermode, byte); @@ -4551,7 +5214,22 @@ simplify_subreg (enum machine_mode outermode, rtx op, return newx; if (validate_subreg (outermode, innermostmode, SUBREG_REG (op), final_offset)) - return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset); + { + newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset); + if (SUBREG_PROMOTED_VAR_P (op) + && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0 + && GET_MODE_CLASS (outermode) == MODE_INT + && IN_RANGE (GET_MODE_SIZE (outermode), + GET_MODE_SIZE (innermode), + GET_MODE_SIZE (innermostmode)) + && subreg_lowpart_p (newx)) + { + SUBREG_PROMOTED_VAR_P (newx) = 1; + SUBREG_PROMOTED_UNSIGNED_SET + (newx, SUBREG_PROMOTED_UNSIGNED_P (op)); + } + return newx; + } return NULL_RTX; } @@ -4568,35 +5246,13 @@ simplify_subreg (enum machine_mode outermode, rtx op, suppress this simplification. If the hard register is the stack, frame, or argument pointer, leave this as a SUBREG. */ - if (REG_P (op) - && REGNO (op) < FIRST_PSEUDO_REGISTER -#ifdef CANNOT_CHANGE_MODE_CLASS - && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode) - && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT - && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT) -#endif - && ((reload_completed && !frame_pointer_needed) - || (REGNO (op) != FRAME_POINTER_REGNUM -#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM - && REGNO (op) != HARD_FRAME_POINTER_REGNUM -#endif - )) -#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM - && REGNO (op) != ARG_POINTER_REGNUM -#endif - && REGNO (op) != STACK_POINTER_REGNUM - && subreg_offset_representable_p (REGNO (op), innermode, - byte, outermode)) + if (REG_P (op) && HARD_REGISTER_P (op)) { - unsigned int regno = REGNO (op); - unsigned int final_regno - = regno + subreg_regno_offset (regno, innermode, byte, outermode); - - /* ??? We do allow it if the current REG is not valid for - its mode. This is a kludge to work around how float/complex - arguments are passed on 32-bit SPARC and should be fixed. */ - if (HARD_REGNO_MODE_OK (final_regno, outermode) - || ! HARD_REGNO_MODE_OK (regno, innermode)) + unsigned int regno, final_regno; + + regno = REGNO (op); + final_regno = simplify_subreg_regno (regno, innermode, byte, outermode); + if (HARD_REGISTER_NUM_P (final_regno)) { rtx x; int final_offset = byte; @@ -4644,13 +5300,22 @@ simplify_subreg (enum machine_mode outermode, rtx op, of real and imaginary part. */ if (GET_CODE (op) == CONCAT) { - unsigned int inner_size, final_offset; + unsigned int part_size, final_offset; rtx part, res; - inner_size = GET_MODE_UNIT_SIZE (innermode); - part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1); - final_offset = byte % inner_size; - if (final_offset + GET_MODE_SIZE (outermode) > inner_size) + part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0))); + if (byte < part_size) + { + part = XEXP (op, 0); + final_offset = byte; + } + else + { + part = XEXP (op, 1); + final_offset = byte - part_size; + } + + if (final_offset + GET_MODE_SIZE (outermode) > part_size) return NULL_RTX; res = simplify_subreg (outermode, part, GET_MODE (part), final_offset); @@ -4707,7 +5372,7 @@ simplify_subreg (enum machine_mode outermode, rtx op, than the sign extension's sign_bit_copies and introduces zeros into the high bits of the result. */ && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode) - && GET_CODE (XEXP (op, 1)) == CONST_INT + && CONST_INT_P (XEXP (op, 1)) && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode) @@ -4722,7 +5387,7 @@ simplify_subreg (enum machine_mode outermode, rtx op, || GET_CODE (op) == ASHIFTRT) && SCALAR_INT_MODE_P (outermode) && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode) - && GET_CODE (XEXP (op, 1)) == CONST_INT + && CONST_INT_P (XEXP (op, 1)) && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode) @@ -4736,7 +5401,7 @@ simplify_subreg (enum machine_mode outermode, rtx op, if (GET_CODE (op) == ASHIFT && SCALAR_INT_MODE_P (outermode) && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode) - && GET_CODE (XEXP (op, 1)) == CONST_INT + && CONST_INT_P (XEXP (op, 1)) && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND) && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode @@ -4745,6 +5410,25 @@ simplify_subreg (enum machine_mode outermode, rtx op, return simplify_gen_binary (ASHIFT, outermode, XEXP (XEXP (op, 0), 0), XEXP (op, 1)); + /* Recognize a word extraction from a multi-word subreg. */ + if ((GET_CODE (op) == LSHIFTRT + || GET_CODE (op) == ASHIFTRT) + && SCALAR_INT_MODE_P (outermode) + && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD + && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode)) + && CONST_INT_P (XEXP (op, 1)) + && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0 + && INTVAL (XEXP (op, 1)) >= 0 + && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode) + && byte == subreg_lowpart_offset (outermode, innermode)) + { + int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT; + return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, + (WORDS_BIG_ENDIAN + ? byte - shifted_bytes + : byte + shifted_bytes)); + } + return NULL_RTX; } @@ -4812,10 +5496,10 @@ simplify_gen_subreg (enum machine_mode outermode, rtx op, simplification and 1 for tree simplification. */ rtx -simplify_rtx (rtx x) +simplify_rtx (const_rtx x) { - enum rtx_code code = GET_CODE (x); - enum machine_mode mode = GET_MODE (x); + const enum rtx_code code = GET_CODE (x); + const enum machine_mode mode = GET_MODE (x); switch (GET_RTX_CLASS (code)) { @@ -4849,9 +5533,9 @@ simplify_rtx (rtx x) case RTX_EXTRA: if (code == SUBREG) - return simplify_gen_subreg (mode, SUBREG_REG (x), - GET_MODE (SUBREG_REG (x)), - SUBREG_BYTE (x)); + return simplify_subreg (mode, SUBREG_REG (x), + GET_MODE (SUBREG_REG (x)), + SUBREG_BYTE (x)); break; case RTX_OBJ: @@ -4869,4 +5553,3 @@ simplify_rtx (rtx x) } return NULL; } -