/* RTL simplification functions for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of GCC.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
#include "config.h"
static rtx neg_const_int (enum machine_mode, rtx);
static bool plus_minus_operand_p (rtx);
static int simplify_plus_minus_op_data_cmp (const void *, const void *);
-static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
- rtx, int);
+static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
unsigned int);
static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
{
rtx tem;
- /* Put complex operands first and constants second if commutative. */
- if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
- && swap_commutative_operands_p (op0, op1))
- tem = op0, op0 = op1, op1 = tem;
-
/* If this simplifies, do it. */
tem = simplify_binary_operation (code, mode, op0, op1);
if (tem)
return tem;
- /* Handle addition and subtraction specially. Otherwise, just form
- the operation. */
-
- if (code == PLUS || code == MINUS)
- {
- tem = simplify_plus_minus (code, mode, op0, op1, 1);
- if (tem)
- return tem;
- }
+ /* Put complex operands first and constants second if commutative. */
+ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
+ && swap_commutative_operands_p (op0, op1))
+ tem = op0, op0 = op1, op1 = tem;
return gen_rtx_fmt_ee (code, mode, op0, op1);
}
{
rtx c, tmp, addr;
enum machine_mode cmode;
+ HOST_WIDE_INT offset = 0;
switch (GET_CODE (x))
{
/* Call target hook to avoid the effects of -fpic etc.... */
addr = targetm.delegitimize_address (addr);
+ /* Split the address into a base and integer offset. */
+ if (GET_CODE (addr) == CONST
+ && GET_CODE (XEXP (addr, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
+ {
+ offset = INTVAL (XEXP (XEXP (addr, 0), 1));
+ addr = XEXP (XEXP (addr, 0), 0);
+ }
+
if (GET_CODE (addr) == LO_SUM)
addr = XEXP (addr, 1);
- if (GET_CODE (addr) != SYMBOL_REF
- || ! CONSTANT_POOL_ADDRESS_P (addr))
- return x;
-
- c = get_pool_constant (addr);
- cmode = get_pool_mode (addr);
-
- /* If we're accessing the constant in a different mode than it was
- originally stored, attempt to fix that up via subreg simplifications.
- If that fails we have no choice but to return the original memory. */
- if (cmode != GET_MODE (x))
+ /* If this is a constant pool reference, we can turn it into its
+ constant and hope that simplifications happen. */
+ if (GET_CODE (addr) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (addr))
{
- c = simplify_subreg (GET_MODE (x), c, cmode, 0);
- return c ? c : x;
+ c = get_pool_constant (addr);
+ cmode = get_pool_mode (addr);
+
+ /* If we're accessing the constant in a different mode than it was
+ originally stored, attempt to fix that up via subreg simplifications.
+ If that fails we have no choice but to return the original memory. */
+ if (offset != 0 || cmode != GET_MODE (x))
+ {
+ rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
+ if (tem && CONSTANT_P (tem))
+ return tem;
+ }
+ else
+ return c;
}
- return c;
+ return x;
+}
+
+/* Return true if X is a MEM referencing the constant pool. */
+
+bool
+constant_pool_reference_p (rtx x)
+{
+ return avoid_constant_pool_reference (x) != x;
}
\f
/* Make a unary operation by first seeing if it folds and otherwise making
if (GET_CODE (op) == NOT)
return XEXP (op, 0);
- /* (not (eq X Y)) == (ne X Y), etc. */
+ /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
+ comparison is all ones. */
if (COMPARISON_P (op)
&& (mode == BImode || STORE_FLAG_VALUE == -1)
&& ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
}
- /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
- by reversing the comparison code if valid. */
- if (STORE_FLAG_VALUE == -1
- && COMPARISON_P (op)
- && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
- return simplify_gen_relational (reversed, mode, VOIDmode,
- XEXP (op, 0), XEXP (op, 1));
-
/* (not (ashiftrt foo C)) where C is the number of bits in FOO
minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
so we can perform the above simplification. */
-
+
if (STORE_FLAG_VALUE == -1
&& GET_CODE (op) == ASHIFTRT
&& GET_CODE (XEXP (op, 1)) == CONST_INT
return simplify_gen_relational (GE, mode, VOIDmode,
XEXP (op, 0), const0_rtx);
+
+ if (GET_CODE (op) == SUBREG
+ && subreg_lowpart_p (op)
+ && (GET_MODE_SIZE (GET_MODE (op))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
+ && GET_CODE (SUBREG_REG (op)) == ASHIFT
+ && XEXP (SUBREG_REG (op), 0) == const1_rtx)
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
+ rtx x;
+
+ x = gen_rtx_ROTATE (inner_mode,
+ simplify_gen_unary (NOT, inner_mode, const1_rtx,
+ inner_mode),
+ XEXP (SUBREG_REG (op), 1));
+ return rtl_hooks.gen_lowpart_no_emit (mode, x);
+ }
+
+ /* Apply De Morgan's laws to reduce number of patterns for machines
+ with negating logical insns (and-not, nand, etc.). If result has
+ only one NOT, put it first, since that is how the patterns are
+ coded. */
+
+ if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
+ {
+ rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
+ enum machine_mode op_mode;
+
+ op_mode = GET_MODE (in1);
+ in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
+
+ op_mode = GET_MODE (in2);
+ if (op_mode == VOIDmode)
+ op_mode = mode;
+ in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
+
+ if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
+ {
+ rtx tem = in2;
+ in2 = in1; in1 = tem;
+ }
+
+ return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
+ mode, in1, in2);
+ }
break;
case NEG:
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
+ /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
+ if (GET_CODE (op) == XOR
+ && XEXP (op, 1) == const1_rtx
+ && nonzero_bits (XEXP (op, 0), mode) == 1)
+ return plus_constant (XEXP (op, 0), -1);
+
+ /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
+ /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
+ if (GET_CODE (op) == LT
+ && XEXP (op, 1) == const0_rtx)
+ {
+ enum machine_mode inner = GET_MODE (XEXP (op, 0));
+ int isize = GET_MODE_BITSIZE (inner);
+ if (STORE_FLAG_VALUE == 1)
+ {
+ temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
+ GEN_INT (isize - 1));
+ if (mode == inner)
+ return temp;
+ if (GET_MODE_BITSIZE (mode) > isize)
+ return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
+ return simplify_gen_unary (TRUNCATE, mode, temp, inner);
+ }
+ else if (STORE_FLAG_VALUE == -1)
+ {
+ temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
+ GEN_INT (isize - 1));
+ if (mode == inner)
+ return temp;
+ if (GET_MODE_BITSIZE (mode) > isize)
+ return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
+ return simplify_gen_unary (TRUNCATE, mode, temp, inner);
+ }
+ }
+ break;
+
+ case TRUNCATE:
+ /* We can't handle truncation to a partial integer mode here
+ because we don't know the real bitsize of the partial
+ integer mode. */
+ if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ break;
+
+ /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
+ if ((GET_CODE (op) == SIGN_EXTEND
+ || GET_CODE (op) == ZERO_EXTEND)
+ && GET_MODE (XEXP (op, 0)) == mode)
+ return XEXP (op, 0);
+
+ /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
+ (OP:SI foo:SI) if OP is NEG or ABS. */
+ if ((GET_CODE (op) == ABS
+ || GET_CODE (op) == NEG)
+ && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
+ && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
+ return simplify_gen_unary (GET_CODE (op), mode,
+ XEXP (XEXP (op, 0), 0), mode);
+
+ /* (truncate:A (subreg:B (truncate:C X) 0)) is
+ (truncate:A X). */
+ if (GET_CODE (op) == SUBREG
+ && GET_CODE (SUBREG_REG (op)) == TRUNCATE
+ && subreg_lowpart_p (op))
+ return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
+ GET_MODE (XEXP (SUBREG_REG (op), 0)));
+
+ /* If we know that the value is already truncated, we can
+ replace the TRUNCATE with a SUBREG. Note that this is also
+ valid if TRULY_NOOP_TRUNCATION is false for the corresponding
+ modes we just have to apply a different definition for
+ truncation. But don't do this for an (LSHIFTRT (MULT ...))
+ since this will cause problems with the umulXi3_highpart
+ patterns. */
+ if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (op)))
+ ? (num_sign_bit_copies (op, GET_MODE (op))
+ >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1))
+ : truncated_to_mode (mode, op))
+ && ! (GET_CODE (op) == LSHIFTRT
+ && GET_CODE (XEXP (op, 0)) == MULT))
+ return rtl_hooks.gen_lowpart_no_emit (mode, op);
+
+ /* A truncate of a comparison can be replaced with a subreg if
+ STORE_FLAG_VALUE permits. This is like the previous test,
+ but it works even if the comparison is done in a mode larger
+ than HOST_BITS_PER_WIDE_INT. */
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && COMPARISON_P (op)
+ && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
+ return rtl_hooks.gen_lowpart_no_emit (mode, op);
+ break;
+
+ case FLOAT_TRUNCATE:
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ break;
+
+ /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
+ if (GET_CODE (op) == FLOAT_EXTEND
+ && GET_MODE (XEXP (op, 0)) == mode)
+ return XEXP (op, 0);
+
+ /* (float_truncate:SF (float_truncate:DF foo:XF))
+ = (float_truncate:SF foo:XF).
+ This may eliminate double rounding, so it is unsafe.
+
+ (float_truncate:SF (float_extend:XF foo:DF))
+ = (float_truncate:SF foo:DF).
+
+ (float_truncate:DF (float_extend:XF foo:SF))
+ = (float_extend:SF foo:DF). */
+ if ((GET_CODE (op) == FLOAT_TRUNCATE
+ && flag_unsafe_math_optimizations)
+ || GET_CODE (op) == FLOAT_EXTEND)
+ return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
+ 0)))
+ > GET_MODE_SIZE (mode)
+ ? FLOAT_TRUNCATE : FLOAT_EXTEND,
+ mode,
+ XEXP (op, 0), mode);
+
+ /* (float_truncate (float x)) is (float x) */
+ if (GET_CODE (op) == FLOAT
+ && (flag_unsafe_math_optimizations
+ || ((unsigned)significand_size (GET_MODE (op))
+ >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ - num_sign_bit_copies (XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)))))))
+ return simplify_gen_unary (FLOAT, mode,
+ XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+
+ /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
+ (OP:SF foo:SF) if OP is NEG or ABS. */
+ if ((GET_CODE (op) == ABS
+ || GET_CODE (op) == NEG)
+ && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
+ && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
+ return simplify_gen_unary (GET_CODE (op), mode,
+ XEXP (XEXP (op, 0), 0), mode);
+
+ /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
+ is (float_truncate:SF x). */
+ if (GET_CODE (op) == SUBREG
+ && subreg_lowpart_p (op)
+ && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
+ return SUBREG_REG (op);
+ break;
+
+ case FLOAT_EXTEND:
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ break;
+
+ /* (float_extend (float_extend x)) is (float_extend x)
+
+ (float_extend (float x)) is (float x) assuming that double
+ rounding can't happen.
+ */
+ if (GET_CODE (op) == FLOAT_EXTEND
+ || (GET_CODE (op) == FLOAT
+ && ((unsigned)significand_size (GET_MODE (op))
+ >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ - num_sign_bit_copies (XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)))))))
+ return simplify_gen_unary (GET_CODE (op), mode,
+ XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+
+ break;
+
+ case ABS:
+ /* (abs (neg <foo>)) -> (abs <foo>) */
+ if (GET_CODE (op) == NEG)
+ return simplify_gen_unary (ABS, mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+
+ /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
+ do nothing. */
+ if (GET_MODE (op) == VOIDmode)
+ break;
+
+ /* If operand is something known to be positive, ignore the ABS. */
+ if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
+ || ((GET_MODE_BITSIZE (GET_MODE (op))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((nonzero_bits (op, GET_MODE (op))
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
+ == 0)))
+ return op;
+
+ /* If operand is known to be only -1 or 0, convert ABS to NEG. */
+ if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
+ return gen_rtx_NEG (mode, op);
+
+ break;
+
+ case FFS:
+ /* (ffs (*_extend <X>)) = (ffs <X>) */
+ if (GET_CODE (op) == SIGN_EXTEND
+ || GET_CODE (op) == ZERO_EXTEND)
+ return simplify_gen_unary (FFS, mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+ break;
+
+ case POPCOUNT:
+ case PARITY:
+ /* (pop* (zero_extend <X>)) = (pop* <X>) */
+ if (GET_CODE (op) == ZERO_EXTEND)
+ return simplify_gen_unary (code, mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+ break;
+
+ case FLOAT:
+ /* (float (sign_extend <X>)) = (float <X>). */
+ if (GET_CODE (op) == SIGN_EXTEND)
+ return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
break;
case SIGN_EXTEND:
target mode is the same as the variable's promotion. */
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
- && SUBREG_PROMOTED_UNSIGNED_P (op)
+ && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
&& GET_MODE (XEXP (op, 0)) == mode)
return XEXP (op, 0);
}
else if (GET_CODE (op) == CONST_DOUBLE
- && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ && SCALAR_FLOAT_MODE_P (mode))
{
REAL_VALUE_TYPE d, t;
REAL_VALUE_FROM_CONST_DOUBLE (d, op);
}
else if (GET_CODE (op) == CONST_DOUBLE
- && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
+ && SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& GET_MODE_CLASS (mode) == MODE_INT
&& width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
{
simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1, rtx trueop0, rtx trueop1)
{
- rtx tem;
+ rtx tem, reversed, opleft, opright;
HOST_WIDE_INT val;
unsigned int width = GET_MODE_BITSIZE (mode);
have X (if C is 2 in the example above). But don't make
something more expensive than we had before. */
- if (! FLOAT_MODE_P (mode))
+ if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+ HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
+ unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
rtx lhs = op0, rhs = op1;
if (GET_CODE (lhs) == NEG)
- coeff0 = -1, lhs = XEXP (lhs, 0);
+ {
+ coeff0l = -1;
+ coeff0h = -1;
+ lhs = XEXP (lhs, 0);
+ }
else if (GET_CODE (lhs) == MULT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT)
- coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+ {
+ coeff0l = INTVAL (XEXP (lhs, 1));
+ coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ lhs = XEXP (lhs, 0);
+ }
else if (GET_CODE (lhs) == ASHIFT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ coeff0h = 0;
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
- coeff1 = -1, rhs = XEXP (rhs, 0);
+ {
+ coeff1l = -1;
+ coeff1h = -1;
+ rhs = XEXP (rhs, 0);
+ }
else if (GET_CODE (rhs) == MULT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{
- coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+ coeff1l = INTVAL (XEXP (rhs, 1));
+ coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
+ rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ coeff1h = 0;
rhs = XEXP (rhs, 0);
}
if (rtx_equal_p (lhs, rhs))
{
rtx orig = gen_rtx_PLUS (mode, op0, op1);
- tem = simplify_gen_binary (MULT, mode, lhs,
- GEN_INT (coeff0 + coeff1));
+ rtx coeff;
+ unsigned HOST_WIDE_INT l;
+ HOST_WIDE_INT h;
+
+ add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
+ coeff = immed_double_const (l, h, mode);
+
+ tem = simplify_gen_binary (MULT, mode, lhs, coeff);
return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
? tem : 0;
}
simplify_gen_binary (XOR, mode, op1,
XEXP (op0, 1)));
+ /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
+ if (GET_CODE (op0) == MULT
+ && GET_CODE (XEXP (op0, 0)) == NEG)
+ {
+ rtx in1, in2;
+
+ in1 = XEXP (XEXP (op0, 0), 0);
+ in2 = XEXP (op0, 1);
+ return simplify_gen_binary (MINUS, mode, op1,
+ simplify_gen_binary (MULT, mode,
+ in1, in2));
+ }
+
+ /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
+ C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
+ is 1. */
+ if (COMPARISON_P (op0)
+ && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
+ || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
+ && (reversed = reversed_comparison (op0, mode)))
+ return
+ simplify_gen_unary (NEG, mode, reversed, mode);
+
/* If one of the operands is a PLUS or a MINUS, see if we can
simplify this by the associative law.
Don't use the associative law for floating point.
if (INTEGRAL_MODE_P (mode)
&& (plus_minus_operand_p (op0)
|| plus_minus_operand_p (op1))
- && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
+ && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
return tem;
/* Reassociate floating point addition only when the user
have X (if C is 2 in the example above). But don't make
something more expensive than we had before. */
- if (! FLOAT_MODE_P (mode))
+ if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+ HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
+ unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
rtx lhs = op0, rhs = op1;
if (GET_CODE (lhs) == NEG)
- coeff0 = -1, lhs = XEXP (lhs, 0);
+ {
+ coeff0l = -1;
+ coeff0h = -1;
+ lhs = XEXP (lhs, 0);
+ }
else if (GET_CODE (lhs) == MULT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT)
{
- coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+ coeff0l = INTVAL (XEXP (lhs, 1));
+ coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ coeff0h = 0;
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
- coeff1 = - 1, rhs = XEXP (rhs, 0);
+ {
+ negcoeff1l = 1;
+ negcoeff1h = 0;
+ rhs = XEXP (rhs, 0);
+ }
else if (GET_CODE (rhs) == MULT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{
- coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+ negcoeff1l = -INTVAL (XEXP (rhs, 1));
+ negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
+ rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
+ negcoeff1h = -1;
rhs = XEXP (rhs, 0);
}
if (rtx_equal_p (lhs, rhs))
{
rtx orig = gen_rtx_MINUS (mode, op0, op1);
- tem = simplify_gen_binary (MULT, mode, lhs,
- GEN_INT (coeff0 - coeff1));
+ rtx coeff;
+ unsigned HOST_WIDE_INT l;
+ HOST_WIDE_INT h;
+
+ add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
+ coeff = immed_double_const (l, h, mode);
+
+ tem = simplify_gen_binary (MULT, mode, lhs, coeff);
return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
? tem : 0;
}
return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
}
- /* If one of the operands is a PLUS or a MINUS, see if we can
- simplify this by the associative law.
- Don't use the associative law for floating point.
- The inaccuracy makes it nonassociative,
- and subtle programs can break if operations are associated. */
-
- if (INTEGRAL_MODE_P (mode)
- && (plus_minus_operand_p (op0)
- || plus_minus_operand_p (op1))
- && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
- return tem;
-
/* Don't let a relocatable value get a negative coeff. */
if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
return simplify_gen_binary (PLUS, mode,
return simplify_gen_binary (AND, mode, op0, tem);
}
}
+
+ /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
+ by reversing the comparison code if valid. */
+ if (STORE_FLAG_VALUE == 1
+ && trueop0 == const1_rtx
+ && COMPARISON_P (op1)
+ && (reversed = reversed_comparison (op1, mode)))
+ return reversed;
+
+ /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
+ if (GET_CODE (op1) == MULT
+ && GET_CODE (XEXP (op1, 0)) == NEG)
+ {
+ rtx in1, in2;
+
+ in1 = XEXP (XEXP (op1, 0), 0);
+ in2 = XEXP (op1, 1);
+ return simplify_gen_binary (PLUS, mode,
+ simplify_gen_binary (MULT, mode,
+ in1, in2),
+ op0);
+ }
+
+ /* Canonicalize (minus (neg A) (mult B C)) to
+ (minus (mult (neg B) C) A). */
+ if (GET_CODE (op1) == MULT
+ && GET_CODE (op0) == NEG)
+ {
+ rtx in1, in2;
+
+ in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
+ in2 = XEXP (op1, 1);
+ return simplify_gen_binary (MINUS, mode,
+ simplify_gen_binary (MULT, mode,
+ in1, in2),
+ XEXP (op0, 0));
+ }
+
+ /* If one of the operands is a PLUS or a MINUS, see if we can
+ simplify this by the associative law. This will, for example,
+ canonicalize (minus A (plus B C)) to (minus (minus A B) C).
+ Don't use the associative law for floating point.
+ The inaccuracy makes it nonassociative,
+ and subtle programs can break if operations are associated. */
+
+ if (INTEGRAL_MODE_P (mode)
+ && (plus_minus_operand_p (op0)
+ || plus_minus_operand_p (op1))
+ && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
+ return tem;
break;
case MULT:
|| val != HOST_BITS_PER_WIDE_INT - 1))
return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
+ /* Likewise for multipliers wider than a word. */
+ else if (GET_CODE (trueop1) == CONST_DOUBLE
+ && (GET_MODE (trueop1) == VOIDmode
+ || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
+ && GET_MODE (op0) == mode
+ && CONST_DOUBLE_LOW (trueop1) == 0
+ && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
+ return simplify_gen_binary (ASHIFT, mode, op0,
+ GEN_INT (val + HOST_BITS_PER_WIDE_INT));
+
/* x*2 is x+x and x*(-1) is -x */
if (GET_CODE (trueop1) == CONST_DOUBLE
- && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
+ && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
&& GET_MODE (op0) == mode)
{
REAL_VALUE_TYPE d;
if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
|| (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
&& ! side_effects_p (op0)
- && GET_MODE_CLASS (mode) != MODE_CC)
+ && SCALAR_INT_MODE_P (mode))
return constm1_rtx;
+
+ /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
+ if (GET_CODE (op1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
+ return op1;
+
+ /* Convert (A & B) | A to A. */
+ if (GET_CODE (op0) == AND
+ && (rtx_equal_p (XEXP (op0, 0), op1)
+ || rtx_equal_p (XEXP (op0, 1), op1))
+ && ! side_effects_p (XEXP (op0, 0))
+ && ! side_effects_p (XEXP (op0, 1)))
+ return op1;
+
+ /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
+ mode size to (rotate A CX). */
+
+ if (GET_CODE (op1) == ASHIFT
+ || GET_CODE (op1) == SUBREG)
+ {
+ opleft = op1;
+ opright = op0;
+ }
+ else
+ {
+ opright = op1;
+ opleft = op0;
+ }
+
+ if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
+ && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
+ && GET_CODE (XEXP (opleft, 1)) == CONST_INT
+ && GET_CODE (XEXP (opright, 1)) == CONST_INT
+ && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
+ == GET_MODE_BITSIZE (mode)))
+ return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
+
+ /* Same, but for ashift that has been "simplified" to a wider mode
+ by simplify_shift_const. */
+
+ if (GET_CODE (opleft) == SUBREG
+ && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
+ && GET_CODE (opright) == LSHIFTRT
+ && GET_CODE (XEXP (opright, 0)) == SUBREG
+ && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
+ && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
+ && (GET_MODE_SIZE (GET_MODE (opleft))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
+ && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
+ SUBREG_REG (XEXP (opright, 0)))
+ && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
+ && GET_CODE (XEXP (opright, 1)) == CONST_INT
+ && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
+ == GET_MODE_BITSIZE (mode)))
+ return gen_rtx_ROTATE (mode, XEXP (opright, 0),
+ XEXP (SUBREG_REG (opleft), 1));
+
+ /* If we have (ior (and (X C1) C2)), simplify this by making
+ C1 as small as possible if C1 actually changes. */
+ if (GET_CODE (op1) == CONST_INT
+ && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ || INTVAL (op1) > 0)
+ && GET_CODE (op0) == AND
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (op1) == CONST_INT
+ && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
+ return simplify_gen_binary (IOR, mode,
+ simplify_gen_binary
+ (AND, mode, XEXP (op0, 0),
+ GEN_INT (INTVAL (XEXP (op0, 1))
+ & ~INTVAL (op1))),
+ op1);
+
+ /* If OP0 is (ashiftrt (plus ...) C), it might actually be
+ a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
+ the PLUS does not affect any of the bits in OP1: then we can do
+ the IOR as a PLUS and we can associate. This is valid if OP1
+ can be safely shifted left C bits. */
+ if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
+ && GET_CODE (XEXP (op0, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ int count = INTVAL (XEXP (op0, 1));
+ HOST_WIDE_INT mask = INTVAL (trueop1) << count;
+
+ if (mask >> count == INTVAL (trueop1)
+ && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
+ return simplify_gen_binary (ASHIFTRT, mode,
+ plus_constant (XEXP (op0, 0), mask),
+ XEXP (op0, 1));
+ }
+
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
&& ((INTVAL (trueop1) & GET_MODE_MASK (mode))
== GET_MODE_MASK (mode)))
return simplify_gen_unary (NOT, mode, op0, mode);
- if (trueop0 == trueop1
+ if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
- return const0_rtx;
+ return CONST0_RTX (mode);
/* Canonicalize XOR of the most significant bit to PLUS. */
if ((GET_CODE (op1) == CONST_INT
return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
simplify_gen_binary (XOR, mode, op1,
XEXP (op0, 1)));
-
+
+ /* If we are XORing two things that have no bits in common,
+ convert them into an IOR. This helps to detect rotation encoded
+ using those methods and possibly other simplifications. */
+
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode)
+ & nonzero_bits (op1, mode)) == 0)
+ return (simplify_gen_binary (IOR, mode, op0, op1));
+
+ /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
+ Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
+ (NOT y). */
+ {
+ int num_negated = 0;
+
+ if (GET_CODE (op0) == NOT)
+ num_negated++, op0 = XEXP (op0, 0);
+ if (GET_CODE (op1) == NOT)
+ num_negated++, op1 = XEXP (op1, 0);
+
+ if (num_negated == 2)
+ return simplify_gen_binary (XOR, mode, op0, op1);
+ else if (num_negated == 1)
+ return simplify_gen_unary (NOT, mode,
+ simplify_gen_binary (XOR, mode, op0, op1),
+ mode);
+ }
+
+ /* Convert (xor (and A B) B) to (and (not A) B). The latter may
+ correspond to a machine insn or result in further simplifications
+ if B is a constant. */
+
+ if (GET_CODE (op0) == AND
+ && rtx_equal_p (XEXP (op0, 1), op1)
+ && ! side_effects_p (op1))
+ return simplify_gen_binary (AND, mode,
+ simplify_gen_unary (NOT, mode,
+ XEXP (op0, 0), mode),
+ op1);
+
+ else if (GET_CODE (op0) == AND
+ && rtx_equal_p (XEXP (op0, 0), op1)
+ && ! side_effects_p (op1))
+ return simplify_gen_binary (AND, mode,
+ simplify_gen_unary (NOT, mode,
+ XEXP (op0, 1), mode),
+ op1);
+
+ /* (xor (comparison foo bar) (const_int 1)) can become the reversed
+ comparison if STORE_FLAG_VALUE is 1. */
+ if (STORE_FLAG_VALUE == 1
+ && trueop1 == const1_rtx
+ && COMPARISON_P (op0)
+ && (reversed = reversed_comparison (op0, mode)))
+ return reversed;
+
+ /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
+ is (lt foo (const_int 0)), so we can perform the above
+ simplification if STORE_FLAG_VALUE is 1. */
+
+ if (STORE_FLAG_VALUE == 1
+ && trueop1 == const1_rtx
+ && GET_CODE (op0) == LSHIFTRT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
+
+ /* (xor (comparison foo bar) (const_int sign-bit))
+ when STORE_FLAG_VALUE is the sign bit. */
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
+ == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ && trueop1 == const_true_rtx
+ && COMPARISON_P (op0)
+ && (reversed = reversed_comparison (op0, mode)))
+ return reversed;
+
+ break;
+
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break;
case AND:
- if (trueop1 == const0_rtx && ! side_effects_p (op0))
- return const0_rtx;
+ if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
+ return trueop1;
/* If we are turning off bits already known off in OP0, we need
not do an AND. */
if (GET_CODE (trueop1) == CONST_INT
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
return op0;
- if (trueop0 == trueop1 && ! side_effects_p (op0)
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return op0;
/* A & (~A) -> 0 */
|| (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
&& ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
- return const0_rtx;
+ return CONST0_RTX (mode);
/* Transform (and (extend X) C) into (zero_extend (and X C)) if
there are no nonzero bits of C outside of X's mode. */
return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
}
+ /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
+ insn (and may simplify more). */
+ if (GET_CODE (op0) == XOR
+ && rtx_equal_p (XEXP (op0, 0), op1)
+ && ! side_effects_p (op1))
+ return simplify_gen_binary (AND, mode,
+ simplify_gen_unary (NOT, mode,
+ XEXP (op0, 1), mode),
+ op1);
+
+ if (GET_CODE (op0) == XOR
+ && rtx_equal_p (XEXP (op0, 1), op1)
+ && ! side_effects_p (op1))
+ return simplify_gen_binary (AND, mode,
+ simplify_gen_unary (NOT, mode,
+ XEXP (op0, 0), mode),
+ op1);
+
+ /* Similarly for (~(A ^ B)) & A. */
+ if (GET_CODE (op0) == NOT
+ && GET_CODE (XEXP (op0, 0)) == XOR
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
+ && ! side_effects_p (op1))
+ return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
+
+ if (GET_CODE (op0) == NOT
+ && GET_CODE (XEXP (op0, 0)) == XOR
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
+ && ! side_effects_p (op1))
+ return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
+
+ /* Convert (A | B) & A to A. */
+ if (GET_CODE (op0) == IOR
+ && (rtx_equal_p (XEXP (op0, 0), op1)
+ || rtx_equal_p (XEXP (op0, 1), op1))
+ && ! side_effects_p (XEXP (op0, 0))
+ && ! side_effects_p (XEXP (op0, 1)))
+ return op1;
+
/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
((A & N) + B) & M -> (A + B) & M
Similarly if (N & M) == 0,
case UDIV:
/* 0/x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == const0_rtx)
- return side_effects_p (op1)
- ? simplify_gen_binary (AND, mode, op1, const0_rtx)
- : const0_rtx;
- /* x/1 is x. */
- if (trueop1 == const1_rtx)
- return rtl_hooks.gen_lowpart_no_emit (mode, op0);
- /* Convert divide by power of two into shift. */
- if (GET_CODE (trueop1) == CONST_INT
- && (val = exact_log2 (INTVAL (trueop1))) > 0)
- return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
- break;
+ if (trueop0 == CONST0_RTX (mode))
+ {
+ if (side_effects_p (op1))
+ return simplify_gen_binary (AND, mode, op1, trueop0);
+ return trueop0;
+ }
+ /* x/1 is x. */
+ if (trueop1 == CONST1_RTX (mode))
+ return rtl_hooks.gen_lowpart_no_emit (mode, op0);
+ /* Convert divide by power of two into shift. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && (val = exact_log2 (INTVAL (trueop1))) > 0)
+ return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
+ break;
case DIV:
/* Handle floating point and integers separately. */
- if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ if (SCALAR_FLOAT_MODE_P (mode))
{
/* Maybe change 0.0 / x to 0.0. This transformation isn't
safe for modes with NaNs, since 0.0 / 0.0 will then be
else
{
/* 0/x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == const0_rtx)
- return side_effects_p (op1)
- ? simplify_gen_binary (AND, mode, op1, const0_rtx)
- : const0_rtx;
+ if (trueop0 == CONST0_RTX (mode))
+ {
+ if (side_effects_p (op1))
+ return simplify_gen_binary (AND, mode, op1, trueop0);
+ return trueop0;
+ }
/* x/1 is x. */
- if (trueop1 == const1_rtx)
+ if (trueop1 == CONST1_RTX (mode))
return rtl_hooks.gen_lowpart_no_emit (mode, op0);
/* x/-1 is -x. */
if (trueop1 == constm1_rtx)
case UMOD:
/* 0%x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == const0_rtx)
- return side_effects_p (op1)
- ? simplify_gen_binary (AND, mode, op1, const0_rtx)
- : const0_rtx;
- /* x%1 is 0 (of x&0 if x has side-effects). */
- if (trueop1 == const1_rtx)
- return side_effects_p (op0)
- ? simplify_gen_binary (AND, mode, op0, const0_rtx)
- : const0_rtx;
- /* Implement modulus by power of two as AND. */
- if (GET_CODE (trueop1) == CONST_INT
- && exact_log2 (INTVAL (trueop1)) > 0)
- return simplify_gen_binary (AND, mode, op0,
- GEN_INT (INTVAL (op1) - 1));
- break;
+ if (trueop0 == CONST0_RTX (mode))
+ {
+ if (side_effects_p (op1))
+ return simplify_gen_binary (AND, mode, op1, trueop0);
+ return trueop0;
+ }
+ /* x%1 is 0 (of x&0 if x has side-effects). */
+ if (trueop1 == CONST1_RTX (mode))
+ {
+ if (side_effects_p (op0))
+ return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
+ return CONST0_RTX (mode);
+ }
+ /* Implement modulus by power of two as AND. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && exact_log2 (INTVAL (trueop1)) > 0)
+ return simplify_gen_binary (AND, mode, op0,
+ GEN_INT (INTVAL (op1) - 1));
+ break;
case MOD:
/* 0%x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == const0_rtx)
- return side_effects_p (op1)
- ? simplify_gen_binary (AND, mode, op1, const0_rtx)
- : const0_rtx;
- /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
- if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
- return side_effects_p (op0)
- ? simplify_gen_binary (AND, mode, op0, const0_rtx)
- : const0_rtx;
- break;
+ if (trueop0 == CONST0_RTX (mode))
+ {
+ if (side_effects_p (op1))
+ return simplify_gen_binary (AND, mode, op1, trueop0);
+ return trueop0;
+ }
+ /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
+ if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
+ {
+ if (side_effects_p (op0))
+ return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
+ return CONST0_RTX (mode);
+ }
+ break;
case ROTATERT:
case ROTATE:
case ASHIFT:
case LSHIFTRT:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
- if (trueop0 == const0_rtx && ! side_effects_p (op1))
+ if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
return op0;
break;
break;
case UMIN:
- if (trueop1 == const0_rtx && ! side_effects_p (op0))
+ if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
return gen_rtx_CONST_VECTOR (mode, v);
}
}
+
+ if (XVECLEN (trueop1, 0) == 1
+ && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
+ && GET_CODE (trueop0) == VEC_CONCAT)
+ {
+ rtx vec = trueop0;
+ int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
+
+ /* Try to find the element in the VEC_CONCAT. */
+ while (GET_MODE (vec) != mode
+ && GET_CODE (vec) == VEC_CONCAT)
+ {
+ HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
+ if (offset < vec_size)
+ vec = XEXP (vec, 0);
+ else
+ {
+ offset -= vec_size;
+ vec = XEXP (vec, 1);
+ }
+ vec = avoid_constant_pool_reference (vec);
+ }
+
+ if (GET_MODE (vec) == mode)
+ return vec;
+ }
+
return 0;
case VEC_CONCAT:
{
return gen_rtx_CONST_VECTOR (mode, v);
}
- if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ if (SCALAR_FLOAT_MODE_P (mode)
&& GET_CODE (op0) == CONST_DOUBLE
&& GET_CODE (op1) == CONST_DOUBLE
&& mode == GET_MODE (op0) && mode == GET_MODE (op1))
&f0, &f1);
real_convert (&result, mode, &value);
+ /* Don't constant fold this floating point operation if
+ the result has overflowed and flag_trapping_math. */
+
+ if (flag_trapping_math
+ && MODE_HAS_INFINITIES (mode)
+ && REAL_VALUE_ISINF (result)
+ && !REAL_VALUE_ISINF (f0)
+ && !REAL_VALUE_ISINF (f1))
+ /* Overflow plus exception. */
+ return 0;
+
/* Don't constant fold this floating point operation if the
result may dependent upon the run-time rounding mode and
flag_rounding_math is set, or if GCC's software emulation
Rather than test for specific case, we do this by a brute-force method
and do all possible simplifications until no more changes occur. Then
- we rebuild the operation.
-
- If FORCE is true, then always generate the rtx. This is used to
- canonicalize stuff emitted from simplify_gen_binary. Note that this
- can still fail if the rtx is too complex. It won't fail just because
- the result is not 'simpler' than the input, however. */
+ we rebuild the operation. */
struct simplify_plus_minus_op_data
{
rtx op;
- int neg;
+ short neg;
+ short ix;
};
static int
{
const struct simplify_plus_minus_op_data *d1 = p1;
const struct simplify_plus_minus_op_data *d2 = p2;
+ int result;
- return (commutative_operand_precedence (d2->op)
- - commutative_operand_precedence (d1->op));
+ result = (commutative_operand_precedence (d2->op)
+ - commutative_operand_precedence (d1->op));
+ if (result)
+ return result;
+ return d1->ix - d2->ix;
}
static rtx
simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
- rtx op1, int force)
+ rtx op1)
{
struct simplify_plus_minus_op_data ops[8];
rtx result, tem;
- int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
- int first, changed;
+ int n_ops = 2, input_ops = 2;
+ int first, changed, canonicalized = 0;
int i, j;
memset (ops, 0, sizeof ops);
ops[i].op = XEXP (this_op, 0);
input_ops++;
changed = 1;
+ canonicalized |= this_neg;
break;
case NEG:
ops[i].op = XEXP (this_op, 0);
ops[i].neg = ! this_neg;
changed = 1;
+ canonicalized = 1;
break;
case CONST:
ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
ops[n_ops].neg = this_neg;
n_ops++;
- input_consts++;
changed = 1;
+ canonicalized = 1;
}
break;
ops[i].op = XEXP (this_op, 0);
ops[i].neg = !this_neg;
changed = 1;
+ canonicalized = 1;
}
break;
ops[i].op = neg_const_int (mode, this_op);
ops[i].neg = 0;
changed = 1;
+ canonicalized = 1;
}
break;
}
while (changed);
- /* If we only have two operands, we can't do anything. */
- if (n_ops <= 2 && !force)
- return NULL_RTX;
+ gcc_assert (n_ops >= 2);
+ if (!canonicalized)
+ {
+ int n_constants = 0;
+
+ for (i = 0; i < n_ops; i++)
+ if (GET_CODE (ops[i].op) == CONST_INT)
+ n_constants++;
+
+ if (n_constants <= 1)
+ return NULL_RTX;
+ }
+
+ /* If we only have two operands, we can avoid the loops. */
+ if (n_ops == 2)
+ {
+ enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
+ rtx lhs, rhs;
- /* Count the number of CONSTs we didn't split above. */
- for (i = 0; i < n_ops; i++)
- if (GET_CODE (ops[i].op) == CONST)
- input_consts++;
+ /* Get the two operands. Be careful with the order, especially for
+ the cases where code == MINUS. */
+ if (ops[0].neg && ops[1].neg)
+ {
+ lhs = gen_rtx_NEG (mode, ops[0].op);
+ rhs = ops[1].op;
+ }
+ else if (ops[0].neg)
+ {
+ lhs = ops[1].op;
+ rhs = ops[0].op;
+ }
+ else
+ {
+ lhs = ops[0].op;
+ rhs = ops[1].op;
+ }
+
+ return simplify_const_binary_operation (code, mode, lhs, rhs);
+ }
/* Now simplify each pair of operands until nothing changes. The first
time through just simplify constants against each other. */
/* Pack all the operands to the lower-numbered entries. */
for (i = 0, j = 0; j < n_ops; j++)
if (ops[j].op)
- ops[i++] = ops[j];
+ {
+ ops[i] = ops[j];
+ /* Stabilize sort. */
+ ops[i].ix = i;
+ i++;
+ }
n_ops = i;
/* Sort the operations based on swap_commutative_operands_p. */
n_ops--;
}
- /* Count the number of CONSTs that we generated. */
- n_consts = 0;
- for (i = 0; i < n_ops; i++)
- if (GET_CODE (ops[i].op) == CONST)
- n_consts++;
-
- /* Give up if we didn't reduce the number of operands we had. Make
- sure we count a CONST as two operands. If we have the same
- number of operands, but have made more CONSTs than before, this
- is also an improvement, so accept it. */
- if (!force
- && (n_ops + n_consts > input_ops
- || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
- return NULL_RTX;
-
/* Put a non-negated operand first, if possible. */
for (i = 0; i < n_ops && ops[i].neg; i++)
tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
if (tem)
{
- if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ if (SCALAR_FLOAT_MODE_P (mode))
{
if (tem == const0_rtx)
return CONST0_RTX (mode);
{
if (INTVAL (op1) == 0 && COMPARISON_P (op0))
{
- /* If op0 is a comparison, extract the comparison arguments form it. */
+ /* If op0 is a comparison, extract the comparison arguments
+ from it. */
if (code == NE)
{
if (GET_MODE (op0) == mode)
? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
: lowpart_subreg (mode, op0, cmp_mode);
+ /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
+ if ((code == EQ || code == NE)
+ && op1 == const0_rtx
+ && op0code == XOR)
+ return simplify_gen_relational (code, mode, cmp_mode,
+ XEXP (op0, 0), XEXP (op0, 1));
+
+ /* (eq/ne (xor x y) x) simplifies to (eq/ne x 0). */
+ if ((code == EQ || code == NE)
+ && op0code == XOR
+ && rtx_equal_p (XEXP (op0, 0), op1)
+ && !side_effects_p (XEXP (op0, 1)))
+ return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
+ /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne y 0). */
+ if ((code == EQ || code == NE)
+ && op0code == XOR
+ && rtx_equal_p (XEXP (op0, 1), op1)
+ && !side_effects_p (XEXP (op0, 0)))
+ return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
+
+ /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
+ if ((code == EQ || code == NE)
+ && op0code == XOR
+ && (GET_CODE (op1) == CONST_INT
+ || GET_CODE (op1) == CONST_DOUBLE)
+ && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
+ return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
+ simplify_gen_binary (XOR, cmp_mode,
+ XEXP (op0, 1), op1));
+
return NULL_RTX;
}
/* If op0 is a compare, extract the comparison arguments from it. */
if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
- op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
+ {
+ op1 = XEXP (op0, 1);
+ op0 = XEXP (op0, 0);
+
+ if (GET_MODE (op0) != VOIDmode)
+ mode = GET_MODE (op0);
+ else if (GET_MODE (op1) != VOIDmode)
+ mode = GET_MODE (op1);
+ else
+ return 0;
+ }
/* We can't simplify MODE_CC values since we don't know what the
actual comparison is. */
the result. */
else if (GET_CODE (trueop0) == CONST_DOUBLE
&& GET_CODE (trueop1) == CONST_DOUBLE
- && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
+ && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
{
REAL_VALUE_TYPE d0, d1;
case LT:
/* Optimize abs(x) < 0.0. */
- if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
+ if (trueop1 == CONST0_RTX (mode)
+ && !HONOR_SNANS (mode)
+ && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
{
tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
: trueop0;
case GE:
/* Optimize abs(x) >= 0.0. */
- if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
+ if (trueop1 == CONST0_RTX (mode)
+ && !HONOR_NANS (mode)
+ && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
{
tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
: trueop0;
}
/* It shouldn't matter what's done here, so fill it with
zero. */
- for (; i < max_bitsize; i += value_bit)
+ for (; i < elem_bitsize; i += value_bit)
*vp++ = 0;
}
else
long tmp[max_bitsize / 32];
int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
- gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
+ gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
gcc_assert (bitsize <= elem_bitsize);
gcc_assert (bitsize % value_bit == 0);
know why. */
if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
elems[elem] = gen_int_mode (lo, outer_submode);
- else
+ else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
elems[elem] = immed_double_const (lo, hi, outer_submode);
+ else
+ return NULL_RTX;
}
break;
case MODE_FLOAT:
+ case MODE_DECIMAL_FLOAT:
{
REAL_VALUE_TYPE r;
long tmp[max_bitsize / 32];
return NULL_RTX;
}
+ /* Merge implicit and explicit truncations. */
+
+ if (GET_CODE (op) == TRUNCATE
+ && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
+ && subreg_lowpart_offset (outermode, innermode) == byte)
+ return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+
/* SUBREG of a hard register => just change the register number
and/or mode. If the hard register is not valid in that mode,
suppress this simplification. If the hard register is the stack,
}
return NULL;
}
+