X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fsimplify-rtx.c;h=1203bad2c9e9b6b69f4f905f7b28c31ebd053dd9;hp=754464d06f12c9bd3b7b0073596428bdcdcb8f77;hb=4c0b79b4b59ab8bf642094b120fb96233dde2cbe;hpb=4a9a1e2b3160e09b09cf4200a4ae7936b5130439 diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 754464d06f1..1203bad2c9e 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -1,12 +1,13 @@ /* RTL simplification functions for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -15,9 +16,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" @@ -49,9 +49,9 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #define HWI_SIGN_EXTEND(low) \ ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) -static rtx neg_const_int (enum machine_mode, rtx); -static bool plus_minus_operand_p (rtx); -static int simplify_plus_minus_op_data_cmp (const void *, const void *); +static rtx neg_const_int (enum machine_mode, const_rtx); +static bool plus_minus_operand_p (const_rtx); +static bool simplify_plus_minus_op_data_cmp (rtx, rtx); static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx); static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode, unsigned int); @@ -66,7 +66,7 @@ static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode, /* Negate a CONST_INT rtx, truncating (because a conversion from a maximally negative number can overflow). */ static rtx -neg_const_int (enum machine_mode mode, rtx i) +neg_const_int (enum machine_mode mode, const_rtx i) { return gen_int_mode (- INTVAL (i), mode); } @@ -75,7 +75,7 @@ neg_const_int (enum machine_mode mode, rtx i) the most significant bit of machine mode MODE. */ bool -mode_signbit_p (enum machine_mode mode, rtx x) +mode_signbit_p (enum machine_mode mode, const_rtx x) { unsigned HOST_WIDE_INT val; unsigned int width; @@ -158,6 +158,9 @@ avoid_constant_pool_reference (rtx x) return x; } + if (GET_MODE (x) == BLKmode) + return x; + addr = XEXP (x, 0); /* Call target hook to avoid the effects of -fpic etc.... */ @@ -198,14 +201,6 @@ avoid_constant_pool_reference (rtx x) return x; } - -/* Return true if X is a MEM referencing the constant pool. */ - -bool -constant_pool_reference_p (rtx x) -{ - return avoid_constant_pool_reference (x) != x; -} /* Make a unary operation by first seeing if it folds and otherwise making the specified operation. */ @@ -259,7 +254,7 @@ simplify_gen_relational (enum rtx_code code, enum machine_mode mode, resulting RTX. Return a new RTX which is as simplified as possible. */ rtx -simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx) +simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); @@ -656,7 +651,8 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (GET_MODE (op))) ? (num_sign_bit_copies (op, GET_MODE (op)) - >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1)) + > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op)) + - GET_MODE_BITSIZE (mode))) : truncated_to_mode (mode, op)) && ! (GET_CODE (op) == LSHIFTRT && GET_CODE (XEXP (op, 0)) == MULT)) @@ -703,10 +699,11 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) /* (float_truncate (float x)) is (float x) */ if (GET_CODE (op) == FLOAT && (flag_unsafe_math_optimizations - || ((unsigned)significand_size (GET_MODE (op)) - >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) - - num_sign_bit_copies (XEXP (op, 0), - GET_MODE (XEXP (op, 0))))))) + || (SCALAR_FLOAT_MODE_P (GET_MODE (op)) + && ((unsigned)significand_size (GET_MODE (op)) + >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) + - num_sign_bit_copies (XEXP (op, 0), + GET_MODE (XEXP (op, 0)))))))) return simplify_gen_unary (FLOAT, mode, XEXP (op, 0), GET_MODE (XEXP (op, 0))); @@ -739,6 +736,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) */ if (GET_CODE (op) == FLOAT_EXTEND || (GET_CODE (op) == FLOAT + && SCALAR_FLOAT_MODE_P (GET_MODE (op)) && ((unsigned)significand_size (GET_MODE (op)) >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) - num_sign_bit_copies (XEXP (op, 0), @@ -785,11 +783,54 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) break; case POPCOUNT: + switch (GET_CODE (op)) + { + case BSWAP: + case ZERO_EXTEND: + /* (popcount (zero_extend )) = (popcount ) */ + return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), + GET_MODE (XEXP (op, 0))); + + case ROTATE: + case ROTATERT: + /* Rotations don't affect popcount. */ + if (!side_effects_p (XEXP (op, 1))) + return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), + GET_MODE (XEXP (op, 0))); + break; + + default: + break; + } + break; + case PARITY: - /* (pop* (zero_extend )) = (pop* ) */ - if (GET_CODE (op) == ZERO_EXTEND) - return simplify_gen_unary (code, mode, XEXP (op, 0), - GET_MODE (XEXP (op, 0))); + switch (GET_CODE (op)) + { + case NOT: + case BSWAP: + case ZERO_EXTEND: + case SIGN_EXTEND: + return simplify_gen_unary (PARITY, mode, XEXP (op, 0), + GET_MODE (XEXP (op, 0))); + + case ROTATE: + case ROTATERT: + /* Rotations don't affect parity. */ + if (!side_effects_p (XEXP (op, 1))) + return simplify_gen_unary (PARITY, mode, XEXP (op, 0), + GET_MODE (XEXP (op, 0))); + break; + + default: + break; + } + break; + + case BSWAP: + /* (bswap (bswap x)) -> x. */ + if (GET_CODE (op) == BSWAP) + return XEXP (op, 0); break; case FLOAT: @@ -1041,6 +1082,21 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, val &= 1; break; + case BSWAP: + { + unsigned int s; + + val = 0; + for (s = 0; s < width; s += 8) + { + unsigned int d = width - s - 8; + unsigned HOST_WIDE_INT byte; + byte = (arg0 >> s) & 0xff; + val |= byte << d; + } + } + break; + case TRUNCATE: val = arg0; break; @@ -1091,6 +1147,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, case FLOAT_TRUNCATE: case SS_TRUNCATE: case US_TRUNCATE: + case SS_NEG: return 0; default: @@ -1186,6 +1243,30 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, lv &= 1; break; + case BSWAP: + { + unsigned int s; + + hv = 0; + lv = 0; + for (s = 0; s < width; s += 8) + { + unsigned int d = width - s - 8; + unsigned HOST_WIDE_INT byte; + + if (s < HOST_BITS_PER_WIDE_INT) + byte = (l1 >> s) & 0xff; + else + byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff; + + if (d < HOST_BITS_PER_WIDE_INT) + lv |= byte << d; + else + hv |= byte << (d - HOST_BITS_PER_WIDE_INT); + } + } + break; + case TRUNCATE: /* This is just a change-of-mode, so do nothing. */ lv = l1, hv = h1; @@ -1417,16 +1498,12 @@ simplify_associative_operation (enum rtx_code code, enum machine_mode mode, } /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */ - tem = swap_commutative_operands_p (XEXP (op0, 1), op1) - ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1)) - : simplify_binary_operation (code, mode, XEXP (op0, 1), op1); + tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1); if (tem != 0) return simplify_gen_binary (code, mode, XEXP (op0, 0), tem); /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */ - tem = swap_commutative_operands_p (XEXP (op0, 0), op1) - ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0)) - : simplify_binary_operation (code, mode, XEXP (op0, 0), op1); + tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1); if (tem != 0) return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); } @@ -1470,6 +1547,11 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1); } +/* Subroutine of simplify_binary_operation. Simplify a binary operation + CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or + OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the + actual constants. */ + static rtx simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1, rtx trueop0, rtx trueop1) @@ -1603,7 +1685,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, XEXP (op0, 1))); /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */ - if (GET_CODE (op0) == MULT + if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) + && GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG) { rtx in1, in2; @@ -1686,10 +1769,14 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case MINUS: /* We can't assume x-x is 0 even with non-IEEE floating point, but since it is zero except in very strange circumstances, we - will treat it as zero with -funsafe-math-optimizations. */ + will treat it as zero with -funsafe-math-optimizations and + -ffinite-math-only. */ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0) - && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)) + && (! FLOAT_MODE_P (mode) + || (flag_unsafe_math_optimizations + && !HONOR_NANS (mode) + && !HONOR_INFINITIES (mode)))) return CONST0_RTX (mode); /* Change subtraction from zero into negation. (0 - x) is the @@ -1831,7 +1918,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return reversed; /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */ - if (GET_CODE (op1) == MULT + if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) + && GET_CODE (op1) == MULT && GET_CODE (XEXP (op1, 0)) == NEG) { rtx in1, in2; @@ -1846,7 +1934,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, /* Canonicalize (minus (neg A) (mult B C)) to (minus (mult (neg B) C) A). */ - if (GET_CODE (op1) == MULT + if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) + && GET_CODE (op1) == MULT && GET_CODE (op0) == NEG) { rtx in1, in2; @@ -1905,12 +1994,12 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); /* Likewise for multipliers wider than a word. */ - else if (GET_CODE (trueop1) == CONST_DOUBLE - && (GET_MODE (trueop1) == VOIDmode - || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT) - && GET_MODE (op0) == mode - && CONST_DOUBLE_LOW (trueop1) == 0 - && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0) + if (GET_CODE (trueop1) == CONST_DOUBLE + && (GET_MODE (trueop1) == VOIDmode + || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT) + && GET_MODE (op0) == mode + && CONST_DOUBLE_LOW (trueop1) == 0 + && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0) return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val + HOST_BITS_PER_WIDE_INT)); @@ -1925,10 +2014,27 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (REAL_VALUES_EQUAL (d, dconst2)) return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0)); - if (REAL_VALUES_EQUAL (d, dconstm1)) + if (!HONOR_SNANS (mode) + && REAL_VALUES_EQUAL (d, dconstm1)) return simplify_gen_unary (NEG, mode, op0, mode); } + /* Optimize -x * -x as x * x. */ + if (FLOAT_MODE_P (mode) + && GET_CODE (op0) == NEG + && GET_CODE (op1) == NEG + && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) + && !side_effects_p (XEXP (op0, 0))) + return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0)); + + /* Likewise, optimize abs(x) * abs(x) as x * x. */ + if (SCALAR_FLOAT_MODE_P (mode) + && GET_CODE (op0) == ABS + && GET_CODE (op1) == ABS + && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) + && !side_effects_p (XEXP (op0, 0))) + return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0)); + /* Reassociate multiplication, but for floating point MULTs only when the user specifies unsafe math optimizations. */ if (! FLOAT_MODE_P (mode) @@ -1962,6 +2068,33 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0) return op1; + /* Canonicalize (X & C1) | C2. */ + if (GET_CODE (op0) == AND + && GET_CODE (trueop1) == CONST_INT + && GET_CODE (XEXP (op0, 1)) == CONST_INT) + { + HOST_WIDE_INT mask = GET_MODE_MASK (mode); + HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1)); + HOST_WIDE_INT c2 = INTVAL (trueop1); + + /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */ + if ((c1 & c2) == c1 + && !side_effects_p (XEXP (op0, 0))) + return trueop1; + + /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */ + if (((c1|c2) & mask) == mask) + return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1); + + /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */ + if (((c1 & ~c2) & mask) != (c1 & mask)) + { + tem = simplify_gen_binary (AND, mode, XEXP (op0, 0), + gen_int_mode (c1 & ~c2, mode)); + return simplify_gen_binary (IOR, mode, tem, op1); + } + } + /* Convert (A & B) | A to A. */ if (GET_CODE (op0) == AND && (rtx_equal_p (XEXP (op0, 0), op1) @@ -2202,6 +2335,18 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode); } + /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */ + if (GET_CODE (op0) == IOR + && GET_CODE (trueop1) == CONST_INT + && GET_CODE (XEXP (op0, 1)) == CONST_INT) + { + HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1)); + return simplify_gen_binary (IOR, mode, + simplify_gen_binary (AND, mode, + XEXP (op0, 0), op1), + gen_int_mode (tmp, mode)); + } + /* Convert (A ^ B) & A to A & (~B) since the latter is often a single insn (and may simplify more). */ if (GET_CODE (op0) == XOR @@ -2412,20 +2557,45 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case ROTATERT: case ROTATE: case ASHIFTRT: + if (trueop1 == CONST0_RTX (mode)) + return op0; + if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) + return op0; /* Rotating ~0 always results in ~0. */ if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode) && ! side_effects_p (op1)) return op0; - - /* Fall through.... */ + break; case ASHIFT: + case SS_ASHIFT: + if (trueop1 == CONST0_RTX (mode)) + return op0; + if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) + return op0; + break; + case LSHIFTRT: if (trueop1 == CONST0_RTX (mode)) return op0; if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) return op0; + /* Optimize (lshiftrt (clz X) C) as (eq X 0). */ + if (GET_CODE (op0) == CLZ + && GET_CODE (trueop1) == CONST_INT + && STORE_FLAG_VALUE == 1 + && INTVAL (trueop1) < (HOST_WIDE_INT)width) + { + enum machine_mode imode = GET_MODE (XEXP (op0, 0)); + unsigned HOST_WIDE_INT zero_val = 0; + + if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val) + && zero_val == GET_MODE_BITSIZE (imode) + && INTVAL (trueop1) == exact_log2 (zero_val)) + return simplify_gen_relational (EQ, mode, imode, + XEXP (op0, 0), const0_rtx); + } break; case SMIN: @@ -3109,6 +3279,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, case US_PLUS: case SS_MINUS: case US_MINUS: + case SS_ASHIFT: /* ??? There are simplifications that can be done. */ return 0; @@ -3135,21 +3306,23 @@ struct simplify_plus_minus_op_data { rtx op; short neg; - short ix; }; -static int -simplify_plus_minus_op_data_cmp (const void *p1, const void *p2) +static bool +simplify_plus_minus_op_data_cmp (rtx x, rtx y) { - const struct simplify_plus_minus_op_data *d1 = p1; - const struct simplify_plus_minus_op_data *d2 = p2; int result; - result = (commutative_operand_precedence (d2->op) - - commutative_operand_precedence (d1->op)); + result = (commutative_operand_precedence (y) + - commutative_operand_precedence (x)); if (result) - return result; - return d1->ix - d2->ix; + return result > 0; + + /* Group together equal REGs to do more simplification. */ + if (REG_P (x) && REG_P (y)) + return REGNO (x) > REGNO (y); + else + return false; } static rtx @@ -3159,7 +3332,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, struct simplify_plus_minus_op_data ops[8]; rtx result, tem; int n_ops = 2, input_ops = 2; - int first, changed, canonicalized = 0; + int changed, n_constants = 0, canonicalized = 0; int i, j; memset (ops, 0, sizeof ops); @@ -3236,6 +3409,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, break; case CONST_INT: + n_constants++; if (this_neg) { ops[i].op = neg_const_int (mode, this_op); @@ -3252,18 +3426,10 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, } while (changed); - gcc_assert (n_ops >= 2); - if (!canonicalized) - { - int n_constants = 0; + if (n_constants > 1) + canonicalized = 1; - for (i = 0; i < n_ops; i++) - if (GET_CODE (ops[i].op) == CONST_INT) - n_constants++; - - if (n_constants <= 1) - return NULL_RTX; - } + gcc_assert (n_ops >= 2); /* If we only have two operands, we can avoid the loops. */ if (n_ops == 2) @@ -3292,22 +3458,37 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, return simplify_const_binary_operation (code, mode, lhs, rhs); } - /* Now simplify each pair of operands until nothing changes. The first - time through just simplify constants against each other. */ - - first = 1; + /* Now simplify each pair of operands until nothing changes. */ do { - changed = first; + /* Insertion sort is good enough for an eight-element array. */ + for (i = 1; i < n_ops; i++) + { + struct simplify_plus_minus_op_data save; + j = i - 1; + if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op)) + continue; + + canonicalized = 1; + save = ops[i]; + do + ops[j + 1] = ops[j]; + while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op)); + ops[j + 1] = save; + } - for (i = 0; i < n_ops - 1; i++) - for (j = i + 1; j < n_ops; j++) + /* This is only useful the first time through. */ + if (!canonicalized) + return NULL_RTX; + + changed = 0; + for (i = n_ops - 1; i > 0; i--) + for (j = i - 1; j >= 0; j--) { - rtx lhs = ops[i].op, rhs = ops[j].op; - int lneg = ops[i].neg, rneg = ops[j].neg; + rtx lhs = ops[j].op, rhs = ops[i].op; + int lneg = ops[j].neg, rneg = ops[i].neg; - if (lhs != 0 && rhs != 0 - && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs)))) + if (lhs != 0 && rhs != 0) { enum rtx_code ncode = PLUS; @@ -3320,8 +3501,21 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, else if (swap_commutative_operands_p (lhs, rhs)) tem = lhs, lhs = rhs, rhs = tem; - tem = simplify_binary_operation (ncode, mode, lhs, rhs); + if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT) + && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT)) + { + rtx tem_lhs, tem_rhs; + + tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs; + tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs; + tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs); + if (tem && !CONSTANT_P (tem)) + tem = gen_rtx_CONST (GET_MODE (tem), tem); + } + else + tem = simplify_binary_operation (ncode, mode, lhs, rhs); + /* Reject "simplifications" that just wrap the two arguments in a CONST. Failure to do so can result in infinite recursion with simplify_binary_operation @@ -3330,13 +3524,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, && ! (GET_CODE (tem) == CONST && GET_CODE (XEXP (tem, 0)) == ncode && XEXP (XEXP (tem, 0), 0) == lhs - && XEXP (XEXP (tem, 0), 1) == rhs) - /* Don't allow -x + -1 -> ~x simplifications in the - first pass. This allows us the chance to combine - the -1 with other constants. */ - && ! (first - && GET_CODE (tem) == NOT - && XEXP (tem, 0) == rhs)) + && XEXP (XEXP (tem, 0), 1) == rhs)) { lneg &= rneg; if (GET_CODE (tem) == NEG) @@ -3352,24 +3540,17 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, } } - first = 0; + /* Pack all the operands to the lower-numbered entries. */ + for (i = 0, j = 0; j < n_ops; j++) + if (ops[j].op) + { + ops[i] = ops[j]; + i++; + } + n_ops = i; } while (changed); - /* Pack all the operands to the lower-numbered entries. */ - for (i = 0, j = 0; j < n_ops; j++) - if (ops[j].op) - { - ops[i] = ops[j]; - /* Stabilize sort. */ - ops[i].ix = i; - i++; - } - n_ops = i; - - /* Sort the operations based on swap_commutative_operands_p. */ - qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp); - /* Create (minus -C X) instead of (neg (const (plus X C))). */ if (n_ops == 2 && GET_CODE (ops[1].op) == CONST_INT @@ -3419,7 +3600,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, /* Check whether an operand is suitable for calling simplify_plus_minus. */ static bool -plus_minus_operand_p (rtx x) +plus_minus_operand_p (const_rtx x) { return GET_CODE (x) == PLUS || GET_CODE (x) == MINUS @@ -3504,8 +3685,7 @@ simplify_relational_operation (enum rtx_code code, enum machine_mode mode, return simplify_relational_operation (code, mode, VOIDmode, XEXP (op0, 0), XEXP (op0, 1)); - if (mode == VOIDmode - || GET_MODE_CLASS (cmp_mode) == MODE_CC + if (GET_MODE_CLASS (cmp_mode) == MODE_CC || CC0_P (op0)) return NULL_RTX; @@ -3527,29 +3707,69 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode, { enum rtx_code op0code = GET_CODE (op0); - if (GET_CODE (op1) == CONST_INT) + if (op1 == const0_rtx && COMPARISON_P (op0)) { - if (INTVAL (op1) == 0 && COMPARISON_P (op0)) + /* If op0 is a comparison, extract the comparison arguments + from it. */ + if (code == NE) { - /* If op0 is a comparison, extract the comparison arguments - from it. */ - if (code == NE) - { - if (GET_MODE (op0) == mode) - return simplify_rtx (op0); - else - return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode, - XEXP (op0, 0), XEXP (op0, 1)); - } - else if (code == EQ) - { - enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX); - if (new_code != UNKNOWN) - return simplify_gen_relational (new_code, mode, VOIDmode, - XEXP (op0, 0), XEXP (op0, 1)); - } + if (GET_MODE (op0) == mode) + return simplify_rtx (op0); + else + return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode, + XEXP (op0, 0), XEXP (op0, 1)); + } + else if (code == EQ) + { + enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX); + if (new_code != UNKNOWN) + return simplify_gen_relational (new_code, mode, VOIDmode, + XEXP (op0, 0), XEXP (op0, 1)); + } + } + + if (op1 == const0_rtx) + { + /* Canonicalize (GTU x 0) as (NE x 0). */ + if (code == GTU) + return simplify_gen_relational (NE, mode, cmp_mode, op0, op1); + /* Canonicalize (LEU x 0) as (EQ x 0). */ + if (code == LEU) + return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1); + } + else if (op1 == const1_rtx) + { + switch (code) + { + case GE: + /* Canonicalize (GE x 1) as (GT x 0). */ + return simplify_gen_relational (GT, mode, cmp_mode, + op0, const0_rtx); + case GEU: + /* Canonicalize (GEU x 1) as (NE x 0). */ + return simplify_gen_relational (NE, mode, cmp_mode, + op0, const0_rtx); + case LT: + /* Canonicalize (LT x 1) as (LE x 0). */ + return simplify_gen_relational (LE, mode, cmp_mode, + op0, const0_rtx); + case LTU: + /* Canonicalize (LTU x 1) as (EQ x 0). */ + return simplify_gen_relational (EQ, mode, cmp_mode, + op0, const0_rtx); + default: + break; } } + else if (op1 == constm1_rtx) + { + /* Canonicalize (LE x -1) as (LT x 0). */ + if (code == LE) + return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx); + /* Canonicalize (GT x -1) as (GE x 0). */ + if (code == GT) + return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx); + } /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */ if ((code == EQ || code == NE) @@ -3588,18 +3808,21 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode, return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0), XEXP (op0, 1)); - /* (eq/ne (xor x y) x) simplifies to (eq/ne x 0). */ + /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */ if ((code == EQ || code == NE) && op0code == XOR && rtx_equal_p (XEXP (op0, 0), op1) - && !side_effects_p (XEXP (op0, 1))) - return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx); - /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne y 0). */ + && !side_effects_p (XEXP (op0, 0))) + return simplify_gen_relational (code, mode, cmp_mode, + XEXP (op0, 1), const0_rtx); + + /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */ if ((code == EQ || code == NE) && op0code == XOR && rtx_equal_p (XEXP (op0, 1), op1) - && !side_effects_p (XEXP (op0, 0))) - return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx); + && !side_effects_p (XEXP (op0, 1))) + return simplify_gen_relational (code, mode, cmp_mode, + XEXP (op0, 0), const0_rtx); /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */ if ((code == EQ || code == NE) @@ -3612,6 +3835,27 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode, simplify_gen_binary (XOR, cmp_mode, XEXP (op0, 1), op1)); + if (op0code == POPCOUNT && op1 == const0_rtx) + switch (code) + { + case EQ: + case LE: + case LEU: + /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */ + return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)), + XEXP (op0, 0), const0_rtx); + + case NE: + case GT: + case GTU: + /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */ + return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)), + XEXP (op0, 0), const0_rtx); + + default: + break; + } + return NULL_RTX; } @@ -3668,26 +3912,25 @@ simplify_const_relational_operation (enum rtx_code code, a register or a CONST_INT, this can't help; testing for these cases will prevent infinite recursion here and speed things up. - If CODE is an unsigned comparison, then we can never do this optimization, - because it gives an incorrect result if the subtraction wraps around zero. - ANSI C defines unsigned operations such that they never overflow, and - thus such cases can not be ignored; but we cannot do it even for - signed comparisons for languages such as Java, so test flag_wrapv. */ + We can only do this for EQ and NE comparisons as otherwise we may + lose or introduce overflow which we cannot disregard as undefined as + we do not know the signedness of the operation on either the left or + the right hand side of the comparison. */ - if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx + if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx + && (code == EQ || code == NE) && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT) && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT)) && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1)) - /* We cannot do this for == or != if tem is a nonzero address. */ - && ((code != EQ && code != NE) || ! nonzero_address_p (tem)) - && code != GTU && code != GEU && code != LTU && code != LEU) + /* We cannot do this if tem is a nonzero address. */ + && ! nonzero_address_p (tem)) return simplify_const_relational_operation (signed_condition (code), mode, tem, const0_rtx); - if (flag_unsafe_math_optimizations && code == ORDERED) + if (! HONOR_NANS (mode) && code == ORDERED) return const_true_rtx; - if (flag_unsafe_math_optimizations && code == UNORDERED) + if (! HONOR_NANS (mode) && code == UNORDERED) return const0_rtx; /* For modes without NaNs, if the two operands are equal, we know the @@ -3870,26 +4113,52 @@ simplify_const_relational_operation (enum rtx_code code, /* Optimize abs(x) < 0.0. */ if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode) - && !(flag_wrapv && INTEGRAL_MODE_P (mode))) + && (!INTEGRAL_MODE_P (mode) + || (!flag_wrapv && !flag_trapv && flag_strict_overflow))) { tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0) : trueop0; if (GET_CODE (tem) == ABS) - return const0_rtx; + { + if (INTEGRAL_MODE_P (mode) + && (issue_strict_overflow_warning + (WARN_STRICT_OVERFLOW_CONDITIONAL))) + warning (OPT_Wstrict_overflow, + ("assuming signed overflow does not occur when " + "assuming abs (x) < 0 is false")); + return const0_rtx; + } } + + /* Optimize popcount (x) < 0. */ + if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx) + return const_true_rtx; break; case GE: /* Optimize abs(x) >= 0.0. */ if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode) - && !(flag_wrapv && INTEGRAL_MODE_P (mode))) + && (!INTEGRAL_MODE_P (mode) + || (!flag_wrapv && !flag_trapv && flag_strict_overflow))) { tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0) : trueop0; if (GET_CODE (tem) == ABS) - return const_true_rtx; + { + if (INTEGRAL_MODE_P (mode) + && (issue_strict_overflow_warning + (WARN_STRICT_OVERFLOW_CONDITIONAL))) + warning (OPT_Wstrict_overflow, + ("assuming signed overflow does not occur when " + "assuming abs (x) >= 0 is true")); + return const_true_rtx; + } } + + /* Optimize popcount (x) >= 0. */ + if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx) + return const_true_rtx; break; case UNGE: @@ -4533,7 +4802,22 @@ simplify_subreg (enum machine_mode outermode, rtx op, if (HARD_REGNO_MODE_OK (final_regno, outermode) || ! HARD_REGNO_MODE_OK (regno, innermode)) { - rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte); + rtx x; + int final_offset = byte; + + /* Adjust offset for paradoxical subregs. */ + if (byte == 0 + && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) + { + int difference = (GET_MODE_SIZE (innermode) + - GET_MODE_SIZE (outermode)); + if (WORDS_BIG_ENDIAN) + final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; + if (BYTES_BIG_ENDIAN) + final_offset += difference % UNITS_PER_WORD; + } + + x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset); /* Propagate original regno. We don't have any way to specify the offset inside original regno, so do so only for lowpart. @@ -4564,13 +4848,22 @@ simplify_subreg (enum machine_mode outermode, rtx op, of real and imaginary part. */ if (GET_CODE (op) == CONCAT) { - unsigned int inner_size, final_offset; + unsigned int part_size, final_offset; rtx part, res; - inner_size = GET_MODE_UNIT_SIZE (innermode); - part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1); - final_offset = byte % inner_size; - if (final_offset + GET_MODE_SIZE (outermode) > inner_size) + part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0))); + if (byte < part_size) + { + part = XEXP (op, 0); + final_offset = byte; + } + else + { + part = XEXP (op, 1); + final_offset = byte - part_size; + } + + if (final_offset + GET_MODE_SIZE (outermode) > part_size) return NULL_RTX; res = simplify_subreg (outermode, part, GET_MODE (part), final_offset); @@ -4769,9 +5062,9 @@ simplify_rtx (rtx x) case RTX_EXTRA: if (code == SUBREG) - return simplify_gen_subreg (mode, SUBREG_REG (x), - GET_MODE (SUBREG_REG (x)), - SUBREG_BYTE (x)); + return simplify_subreg (mode, SUBREG_REG (x), + GET_MODE (SUBREG_REG (x)), + SUBREG_BYTE (x)); break; case RTX_OBJ: @@ -4789,4 +5082,3 @@ simplify_rtx (rtx x) } return NULL; } -