/* RTL simplification functions for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
This file is part of GCC.
rtx, int);
static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
unsigned int);
-static bool associative_constant_p (rtx);
static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
rtx, rtx);
+static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
+ enum machine_mode, rtx, rtx);
\f
/* Negate a CONST_INT rtx, truncating (because a conversion from a
maximally negative number can overflow). */
return gen_int_mode (- INTVAL (i), mode);
}
+/* Test whether expression, X, is an immediate constant that represents
+ the most significant bit of machine mode MODE. */
+
+bool
+mode_signbit_p (enum machine_mode mode, rtx x)
+{
+ unsigned HOST_WIDE_INT val;
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_BITSIZE (mode);
+ if (width == 0)
+ return false;
+
+ if (width <= HOST_BITS_PER_WIDE_INT
+ && GET_CODE (x) == CONST_INT)
+ val = INTVAL (x);
+ else if (width <= 2 * HOST_BITS_PER_WIDE_INT
+ && GET_CODE (x) == CONST_DOUBLE
+ && CONST_DOUBLE_LOW (x) == 0)
+ {
+ val = CONST_DOUBLE_HIGH (x);
+ width -= HOST_BITS_PER_WIDE_INT;
+ }
+ else
+ return false;
+
+ if (width < HOST_BITS_PER_WIDE_INT)
+ val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
+ return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
+}
\f
/* Make a binary operation by properly ordering the operands and
seeing if the expression folds. */
rtx tem;
/* Put complex operands first and constants second if commutative. */
- if (GET_RTX_CLASS (code) == 'c'
+ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
&& swap_commutative_operands_p (op0, op1))
tem = op0, op0 = op1, op1 = tem;
addr = XEXP (x, 0);
/* Call target hook to avoid the effects of -fpic etc.... */
- addr = (*targetm.delegitimize_address) (addr);
+ addr = targetm.delegitimize_address (addr);
if (GET_CODE (addr) == LO_SUM)
addr = XEXP (addr, 1);
return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
}
-\f
+
/* Likewise, for relational operations.
- CMP_MODE specifies mode comparison is done in.
- */
+ CMP_MODE specifies mode comparison is done in. */
rtx
simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
{
rtx tem;
- if (cmp_mode == VOIDmode)
- cmp_mode = GET_MODE (op0);
- if (cmp_mode == VOIDmode)
- cmp_mode = GET_MODE (op1);
-
- if (cmp_mode != VOIDmode)
- {
- tem = simplify_relational_operation (code, cmp_mode, op0, op1);
-
- if (tem)
- {
-#ifdef FLOAT_STORE_FLAG_VALUE
- if (GET_MODE_CLASS (mode) == MODE_FLOAT)
- {
- REAL_VALUE_TYPE val;
- if (tem == const0_rtx)
- return CONST0_RTX (mode);
- if (tem != const_true_rtx)
- abort ();
- val = FLOAT_STORE_FLAG_VALUE (mode);
- return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
- }
-#endif
- return tem;
- }
- }
-
- /* For the following tests, ensure const0_rtx is op1. */
- if (swap_commutative_operands_p (op0, op1)
- || (op0 == const0_rtx && op1 != const0_rtx))
- tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
-
- /* If op0 is a compare, extract the comparison arguments from it. */
- if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
- return simplify_gen_relational (code, mode, VOIDmode,
- XEXP (op0, 0), XEXP (op0, 1));
-
- /* If op0 is a comparison, extract the comparison arguments form it. */
- if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
- {
- if (code == NE)
- {
- if (GET_MODE (op0) == mode)
- return op0;
- return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
- XEXP (op0, 0), XEXP (op0, 1));
- }
- else if (code == EQ)
- {
- enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
- if (new != UNKNOWN)
- return simplify_gen_relational (new, mode, VOIDmode,
- XEXP (op0, 0), XEXP (op0, 1));
- }
- }
+ if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
+ op0, op1)))
+ return tem;
return gen_rtx_fmt_ee (code, mode, op0, op1);
}
\f
-/* Replace all occurrences of OLD in X with NEW and try to simplify the
+/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
resulting RTX. Return a new RTX which is as simplified as possible. */
rtx
-simplify_replace_rtx (rtx x, rtx old, rtx new)
+simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
enum machine_mode op_mode;
rtx op0, op1, op2;
- /* If X is OLD, return NEW. Otherwise, if this is an expression, try
+ /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
to build a new expression substituting recursively. If we can't do
anything, return our input. */
- if (x == old)
- return new;
+ if (x == old_rtx)
+ return new_rtx;
switch (GET_RTX_CLASS (code))
{
- case '1':
+ case RTX_UNARY:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old, new);
+ op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
if (op0 == XEXP (x, 0))
return x;
return simplify_gen_unary (code, mode, op0, op_mode);
- case '2':
- case 'c':
- op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
- op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
+ case RTX_BIN_ARITH:
+ case RTX_COMM_ARITH:
+ op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
+ op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_binary (code, mode, op0, op1);
- case '<':
+ case RTX_COMPARE:
+ case RTX_COMM_COMPARE:
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
- op0 = simplify_replace_rtx (op0, old, new);
- op1 = simplify_replace_rtx (op1, old, new);
+ op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
+ op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_relational (code, mode, op_mode, op0, op1);
- case '3':
- case 'b':
+ case RTX_TERNARY:
+ case RTX_BITFIELD_OPS:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old, new);
- op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
- op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
+ op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
+ op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
+ op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
return x;
if (op_mode == VOIDmode)
op_mode = GET_MODE (op0);
return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
- case 'x':
+ case RTX_EXTRA:
/* The only case we try to handle is a SUBREG. */
if (code == SUBREG)
{
- op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
+ op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
if (op0 == SUBREG_REG (x))
return x;
op0 = simplify_gen_subreg (GET_MODE (x), op0,
}
break;
- case 'o':
+ case RTX_OBJ:
if (code == MEM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
+ op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
if (op0 == XEXP (x, 0))
return x;
return replace_equiv_address_nv (x, op0);
}
else if (code == LO_SUM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
- op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
+ op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
+ op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
/* (lo_sum (high x) x) -> x */
if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
}
else if (code == REG)
{
- if (REG_P (old) && REGNO (x) == REGNO (old))
- return new;
+ if (REG_P (old_rtx) && REGNO (x) == REGNO (old_rtx))
+ return new_rtx;
}
break;
if (code == VEC_DUPLICATE)
{
- if (!VECTOR_MODE_P (mode))
- abort ();
- if (GET_MODE (trueop) != VOIDmode
- && !VECTOR_MODE_P (GET_MODE (trueop))
- && GET_MODE_INNER (mode) != GET_MODE (trueop))
- abort ();
- if (GET_MODE (trueop) != VOIDmode
- && VECTOR_MODE_P (GET_MODE (trueop))
- && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
- abort ();
+ gcc_assert (VECTOR_MODE_P (mode));
+ if (GET_MODE (trueop) != VOIDmode)
+ {
+ if (!VECTOR_MODE_P (GET_MODE (trueop)))
+ gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
+ (GET_MODE (trueop)));
+ }
if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
|| GET_CODE (trueop) == CONST_VECTOR)
{
int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
- if (in_n_elts >= n_elts || n_elts % in_n_elts)
- abort ();
+ gcc_assert (in_n_elts < n_elts);
+ gcc_assert ((n_elts % in_n_elts) == 0);
for (i = 0; i < n_elts; i++)
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
}
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (op_n_elts != n_elts)
- abort ();
-
+ gcc_assert (op_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
case ZERO_EXTEND:
/* When zero-extending a CONST_INT, we need to know its
original mode. */
- if (op_mode == VOIDmode)
- abort ();
+ gcc_assert (op_mode != VOIDmode);
if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- if (width != GET_MODE_BITSIZE (op_mode))
- abort ();
+ gcc_assert (width == GET_MODE_BITSIZE (op_mode));
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- if (width != GET_MODE_BITSIZE (op_mode))
- abort ();
+ gcc_assert (width == GET_MODE_BITSIZE (op_mode));
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
val = trunc_int_for_mode (val, mode);
break;
case ZERO_EXTEND:
- if (op_mode == VOIDmode)
- abort ();
+ gcc_assert (op_mode != VOIDmode);
if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
return 0;
case FIX:
real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
break;
+ case NOT:
+ {
+ long tmp[4];
+ int i;
+ real_to_target (tmp, &d, GET_MODE (trueop));
+ for (i = 0; i < 4; i++)
+ tmp[i] = ~tmp[i];
+ real_from_target (&d, tmp, mode);
+ }
default:
- abort ();
+ gcc_unreachable ();
}
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
return immed_double_const (xl, xh, mode);
}
return XEXP (op, 0);
/* (not (eq X Y)) == (ne X Y), etc. */
- if (GET_RTX_CLASS (GET_CODE (op)) == '<'
+ if (COMPARISON_P (op)
&& (mode == BImode || STORE_FLAG_VALUE == -1)
&& ((reversed = reversed_comparison_code (op, NULL_RTX))
!= UNKNOWN))
mode)) != 0)
return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+ /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
+ if (GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && mode_signbit_p (mode, XEXP (op, 1))
+ && (temp = simplify_unary_operation (NOT, mode,
+ XEXP (op, 1),
+ mode)) != 0)
+ return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+
+
/* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
operands other than 1, but that is not valid. We could do a
/* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
by reversing the comparison code if valid. */
if (STORE_FLAG_VALUE == -1
- && GET_RTX_CLASS (GET_CODE (op)) == '<'
+ && COMPARISON_P (op)
&& (reversed = reversed_comparison_code (op, NULL_RTX))
!= UNKNOWN)
return simplify_gen_relational (reversed, mode, VOIDmode,
XEXP (op, 1));
}
+ /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
+ C is equal to the width of MODE minus 1. */
+ if (GET_CODE (op) == ASHIFTRT
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return simplify_gen_binary (LSHIFTRT, mode,
+ XEXP (op, 0), XEXP (op, 1));
+
+ /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
+ C is equal to the width of MODE minus 1. */
+ if (GET_CODE (op) == LSHIFTRT
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return simplify_gen_binary (ASHIFTRT, mode,
+ XEXP (op, 0), XEXP (op, 1));
+
break;
case SIGN_EXTEND:
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
- && GET_CODE (SUBREG_REG (op)) == REG
+ && REG_P (SUBREG_REG (op))
&& REG_POINTER (SUBREG_REG (op))
&& GET_MODE (SUBREG_REG (op)) == Pmode)))
return convert_memory_address (Pmode, op);
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
- && GET_CODE (SUBREG_REG (op)) == REG
+ && REG_P (SUBREG_REG (op))
&& REG_POINTER (SUBREG_REG (op))
&& GET_MODE (SUBREG_REG (op)) == Pmode)))
return convert_memory_address (Pmode, op);
}
}
\f
-/* Subroutine of simplify_associative_operation. Return true if rtx OP
- is a suitable integer or floating point immediate constant. */
-static bool
-associative_constant_p (rtx op)
-{
- if (GET_CODE (op) == CONST_INT
- || GET_CODE (op) == CONST_DOUBLE)
- return true;
- op = avoid_constant_pool_reference (op);
- return GET_CODE (op) == CONST_INT
- || GET_CODE (op) == CONST_DOUBLE;
-}
+/* Subroutine of simplify_binary_operation to simplify a commutative,
+ associative binary operation CODE with result mode MODE, operating
+ on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
+ SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
+ canonicalization is possible. */
-/* Subroutine of simplify_binary_operation to simplify an associative
- binary operation CODE with result mode MODE, operating on OP0 and OP1.
- Return 0 if no simplification is possible. */
static rtx
simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1)
{
rtx tem;
- /* Simplify (x op c1) op c2 as x op (c1 op c2). */
- if (GET_CODE (op0) == code
- && associative_constant_p (op1)
- && associative_constant_p (XEXP (op0, 1)))
+ /* Linearize the operator to the left. */
+ if (GET_CODE (op1) == code)
{
- tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
- if (! tem)
- return tem;
- return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
- }
+ /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
+ if (GET_CODE (op0) == code)
+ {
+ tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
+ return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
+ }
- /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
- if (GET_CODE (op0) == code
- && GET_CODE (op1) == code
- && associative_constant_p (XEXP (op0, 1))
- && associative_constant_p (XEXP (op1, 1)))
- {
- rtx c = simplify_binary_operation (code, mode,
- XEXP (op0, 1), XEXP (op1, 1));
- if (! c)
- return 0;
- tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
- return simplify_gen_binary (code, mode, tem, c);
- }
+ /* "a op (b op c)" becomes "(b op c) op a". */
+ if (! swap_commutative_operands_p (op1, op0))
+ return simplify_gen_binary (code, mode, op1, op0);
- /* Canonicalize (x op c) op y as (x op y) op c. */
- if (GET_CODE (op0) == code
- && associative_constant_p (XEXP (op0, 1)))
- {
- tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
- return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
}
- /* Canonicalize x op (y op c) as (x op y) op c. */
- if (GET_CODE (op1) == code
- && associative_constant_p (XEXP (op1, 1)))
+ if (GET_CODE (op0) == code)
{
- tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
- return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
+ /* Canonicalize "(x op c) op y" as "(x op y) op c". */
+ if (swap_commutative_operands_p (XEXP (op0, 1), op1))
+ {
+ tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
+ return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
+ }
+
+ /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
+ tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
+ ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
+ : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
+ if (tem != 0)
+ return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
+
+ /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
+ tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
+ ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
+ : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
+ if (tem != 0)
+ return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
}
return 0;
HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
HOST_WIDE_INT val;
unsigned int width = GET_MODE_BITSIZE (mode);
+ rtx trueop0, trueop1;
rtx tem;
- rtx trueop0 = avoid_constant_pool_reference (op0);
- rtx trueop1 = avoid_constant_pool_reference (op1);
/* Relational operations don't work here. We must know the mode
of the operands in order to do the comparison correctly.
Assuming a full word can give incorrect results.
Consider comparing 128 with -128 in QImode. */
-
- if (GET_RTX_CLASS (code) == '<')
- abort ();
+ gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
+ gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
/* Make sure the constant is second. */
- if (GET_RTX_CLASS (code) == 'c'
- && swap_commutative_operands_p (trueop0, trueop1))
+ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
+ && swap_commutative_operands_p (op0, op1))
{
tem = op0, op0 = op1, op1 = tem;
- tem = trueop0, trueop0 = trueop1, trueop1 = tem;
}
+ trueop0 = avoid_constant_pool_reference (op0);
+ trueop1 = avoid_constant_pool_reference (op1);
+
if (VECTOR_MODE_P (mode)
&& GET_CODE (trueop0) == CONST_VECTOR
&& GET_CODE (trueop1) == CONST_VECTOR)
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (op0_n_elts != n_elts || op1_n_elts != n_elts)
- abort ();
-
+ gcc_assert (op0_n_elts == n_elts);
+ gcc_assert (op1_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
&& GET_CODE (trueop1) == CONST_DOUBLE
&& mode == GET_MODE (op0) && mode == GET_MODE (op1))
{
- REAL_VALUE_TYPE f0, f1, value;
+ if (code == AND
+ || code == IOR
+ || code == XOR)
+ {
+ long tmp0[4];
+ long tmp1[4];
+ REAL_VALUE_TYPE r;
+ int i;
+
+ real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
+ GET_MODE (op0));
+ real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
+ GET_MODE (op1));
+ for (i = 0; i < 4; i++)
+ {
+ switch (code)
+ {
+ case AND:
+ tmp0[i] &= tmp1[i];
+ break;
+ case IOR:
+ tmp0[i] |= tmp1[i];
+ break;
+ case XOR:
+ tmp0[i] ^= tmp1[i];
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ real_from_target (&r, tmp0, mode);
+ return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+ }
+ else
+ {
+ REAL_VALUE_TYPE f0, f1, value;
- REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
- REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
- f0 = real_value_truncate (mode, f0);
- f1 = real_value_truncate (mode, f1);
+ REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
+ REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
+ f0 = real_value_truncate (mode, f0);
+ f1 = real_value_truncate (mode, f1);
- if (HONOR_SNANS (mode)
- && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
- return 0;
+ if (HONOR_SNANS (mode)
+ && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
+ return 0;
- if (code == DIV
- && REAL_VALUES_EQUAL (f1, dconst0)
- && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
- return 0;
+ if (code == DIV
+ && REAL_VALUES_EQUAL (f1, dconst0)
+ && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
+ return 0;
+
+ if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
+ && flag_trapping_math
+ && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
+ {
+ int s0 = REAL_VALUE_NEGATIVE (f0);
+ int s1 = REAL_VALUE_NEGATIVE (f1);
+
+ switch (code)
+ {
+ case PLUS:
+ /* Inf + -Inf = NaN plus exception. */
+ if (s0 != s1)
+ return 0;
+ break;
+ case MINUS:
+ /* Inf - Inf = NaN plus exception. */
+ if (s0 == s1)
+ return 0;
+ break;
+ case DIV:
+ /* Inf / Inf = NaN plus exception. */
+ return 0;
+ default:
+ break;
+ }
+ }
- REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
+ if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
+ && flag_trapping_math
+ && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
+ || (REAL_VALUE_ISINF (f1)
+ && REAL_VALUES_EQUAL (f0, dconst0))))
+ /* Inf * 0 = NaN plus exception. */
+ return 0;
+
+ REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
- value = real_value_truncate (mode, value);
- return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
+ value = real_value_truncate (mode, value);
+ return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
+ }
}
/* We can fold some multi-word operations. */
&& (GET_CODE (trueop1) == CONST_DOUBLE
|| GET_CODE (trueop1) == CONST_INT))
{
- unsigned HOST_WIDE_INT l1, l2, lv;
- HOST_WIDE_INT h1, h2, hv;
+ unsigned HOST_WIDE_INT l1, l2, lv, lt;
+ HOST_WIDE_INT h1, h2, hv, ht;
if (GET_CODE (trueop0) == CONST_DOUBLE)
l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
mul_double (l1, h1, l2, h2, &lv, &hv);
break;
- case DIV: case MOD: case UDIV: case UMOD:
- /* We'd need to include tree.h to do this and it doesn't seem worth
- it. */
- return 0;
+ case DIV:
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
+ &lv, &hv, <, &ht))
+ return 0;
+ break;
+
+ case MOD:
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
+ <, &ht, &lv, &hv))
+ return 0;
+ break;
+
+ case UDIV:
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
+ &lv, &hv, <, &ht))
+ return 0;
+ break;
+
+ case UMOD:
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
+ <, &ht, &lv, &hv))
+ return 0;
+ break;
case AND:
lv = l1 & l2, hv = h1 & h2;
case LSHIFTRT: case ASHIFTRT:
case ASHIFT:
case ROTATE: case ROTATERT:
-#ifdef SHIFT_COUNT_TRUNCATED
if (SHIFT_COUNT_TRUNCATED)
l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
-#endif
if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
return 0;
if the multiplication is written as a shift. If so, we can
distribute and make a new multiply, shift, or maybe just
have X (if C is 2 in the example above). But don't make
- real multiply if we didn't have one before. */
+ something more expensive than we had before. */
if (! FLOAT_MODE_P (mode))
{
HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
rtx lhs = op0, rhs = op1;
- int had_mult = 0;
if (GET_CODE (lhs) == NEG)
coeff0 = -1, lhs = XEXP (lhs, 0);
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT)
{
coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
- had_mult = 1;
}
else if (GET_CODE (lhs) == ASHIFT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{
coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
- had_mult = 1;
}
else if (GET_CODE (rhs) == ASHIFT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT
if (rtx_equal_p (lhs, rhs))
{
+ rtx orig = gen_rtx_PLUS (mode, op0, op1);
tem = simplify_gen_binary (MULT, mode, lhs,
- GEN_INT (coeff0 + coeff1));
- return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
+ GEN_INT (coeff0 + coeff1));
+ return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ ? tem : 0;
}
}
+ /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
+ if ((GET_CODE (op1) == CONST_INT
+ || GET_CODE (op1) == CONST_DOUBLE)
+ && GET_CODE (op0) == XOR
+ && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
+ && mode_signbit_p (mode, op1))
+ return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
+ simplify_gen_binary (XOR, mode, op1,
+ XEXP (op0, 1)));
+
/* If one of the operands is a PLUS or a MINUS, see if we can
simplify this by the associative law.
Don't use the associative law for floating point.
#ifdef HAVE_cc0
if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
#else
- if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
+ if (REG_P (xop00) && REG_P (xop10)
&& GET_MODE (xop00) == GET_MODE (xop10)
&& REGNO (xop00) == REGNO (xop10)
&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
if the multiplication is written as a shift. If so, we can
distribute and make a new multiply, shift, or maybe just
have X (if C is 2 in the example above). But don't make
- real multiply if we didn't have one before. */
+ something more expensive than we had before. */
if (! FLOAT_MODE_P (mode))
{
HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
rtx lhs = op0, rhs = op1;
- int had_mult = 0;
if (GET_CODE (lhs) == NEG)
coeff0 = -1, lhs = XEXP (lhs, 0);
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT)
{
coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
- had_mult = 1;
}
else if (GET_CODE (lhs) == ASHIFT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{
coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
- had_mult = 1;
}
else if (GET_CODE (rhs) == ASHIFT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT
if (rtx_equal_p (lhs, rhs))
{
+ rtx orig = gen_rtx_MINUS (mode, op0, op1);
tem = simplify_gen_binary (MULT, mode, lhs,
GEN_INT (coeff0 - coeff1));
- return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
+ return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ ? tem : 0;
}
}
uppermost bit is set, then this isn't a power of two due
to implicit sign extension. */
&& (width <= HOST_BITS_PER_WIDE_INT
- || val != HOST_BITS_PER_WIDE_INT - 1)
- && ! rtx_equal_function_value_matters)
+ || val != HOST_BITS_PER_WIDE_INT - 1))
return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
/* x*2 is x+x and x*(-1) is -x */
&& ((INTVAL (trueop1) & GET_MODE_MASK (mode))
== GET_MODE_MASK (mode)))
return simplify_gen_unary (NOT, mode, op0, mode);
- if (trueop0 == trueop1 && ! side_effects_p (op0)
+ if (trueop0 == trueop1
+ && ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return const0_rtx;
+
+ /* Canonicalize XOR of the most significant bit to PLUS. */
+ if ((GET_CODE (op1) == CONST_INT
+ || GET_CODE (op1) == CONST_DOUBLE)
+ && mode_signbit_p (mode, op1))
+ return simplify_gen_binary (PLUS, mode, op0, op1);
+ /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
+ if ((GET_CODE (op1) == CONST_INT
+ || GET_CODE (op1) == CONST_DOUBLE)
+ && GET_CODE (op0) == PLUS
+ && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
+ && mode_signbit_p (mode, XEXP (op0, 1)))
+ return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
+ simplify_gen_binary (XOR, mode, op1,
+ XEXP (op0, 1)));
+
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
case AND:
if (trueop1 == const0_rtx && ! side_effects_p (op0))
return const0_rtx;
+ /* If we are turning off bits already known off in OP0, we need
+ not do an AND. */
if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
return op0;
if (trueop0 == trueop1 && ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
&& ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return const0_rtx;
+ /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
+ ((A & N) + B) & M -> (A + B) & M
+ Similarly if (N & M) == 0,
+ ((A | N) + B) & M -> (A + B) & M
+ and for - instead of + and/or ^ instead of |. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ~INTVAL (trueop1)
+ && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
+ && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
+ {
+ rtx pmop[2];
+ int which;
+
+ pmop[0] = XEXP (op0, 0);
+ pmop[1] = XEXP (op0, 1);
+
+ for (which = 0; which < 2; which++)
+ {
+ tem = pmop[which];
+ switch (GET_CODE (tem))
+ {
+ case AND:
+ if (GET_CODE (XEXP (tem, 1)) == CONST_INT
+ && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
+ == INTVAL (trueop1))
+ pmop[which] = XEXP (tem, 0);
+ break;
+ case IOR:
+ case XOR:
+ if (GET_CODE (XEXP (tem, 1)) == CONST_INT
+ && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
+ pmop[which] = XEXP (tem, 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
+ {
+ tem = simplify_gen_binary (GET_CODE (op0), mode,
+ pmop[0], pmop[1]);
+ return simplify_gen_binary (code, mode, tem, op1);
+ }
+ }
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break;
case UDIV:
- /* Convert divide by power of two into shift (divide by 1 handled
- below). */
- if (GET_CODE (trueop1) == CONST_INT
- && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
- return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
-
- /* Fall through.... */
-
- case DIV:
- if (trueop1 == CONST1_RTX (mode))
+ /* 0/x is 0 (or x&0 if x has side-effects). */
+ if (trueop0 == const0_rtx)
+ return side_effects_p (op1)
+ ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+ : const0_rtx;
+ /* x/1 is x. */
+ if (trueop1 == const1_rtx)
{
- /* On some platforms DIV uses narrower mode than its
- operands. */
+ /* Handle narrowing UDIV. */
rtx x = gen_lowpart_common (mode, op0);
if (x)
return x;
- else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+ if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
return gen_lowpart_SUBREG (mode, op0);
- else
- return op0;
+ return op0;
}
+ /* Convert divide by power of two into shift. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
+ return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
+ break;
- /* Maybe change 0 / x to 0. This transformation isn't safe for
- modes with NaNs, since 0 / 0 will then be NaN rather than 0.
- Nor is it safe for modes with signed zeros, since dividing
- 0 by a negative number gives -0, not 0. */
- if (!HONOR_NANS (mode)
- && !HONOR_SIGNED_ZEROS (mode)
- && trueop0 == CONST0_RTX (mode)
- && ! side_effects_p (op1))
- return op0;
-
- /* Change division by a constant into multiplication. Only do
- this with -funsafe-math-optimizations. */
- else if (GET_CODE (trueop1) == CONST_DOUBLE
- && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
- && trueop1 != CONST0_RTX (mode)
- && flag_unsafe_math_optimizations)
+ case DIV:
+ /* Handle floating point and integers separately. */
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
{
- REAL_VALUE_TYPE d;
- REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
+ /* Maybe change 0.0 / x to 0.0. This transformation isn't
+ safe for modes with NaNs, since 0.0 / 0.0 will then be
+ NaN rather than 0.0. Nor is it safe for modes with signed
+ zeros, since dividing 0 by a negative number gives -0.0 */
+ if (trueop0 == CONST0_RTX (mode)
+ && !HONOR_NANS (mode)
+ && !HONOR_SIGNED_ZEROS (mode)
+ && ! side_effects_p (op1))
+ return op0;
+ /* x/1.0 is x. */
+ if (trueop1 == CONST1_RTX (mode)
+ && !HONOR_SNANS (mode))
+ return op0;
- if (! REAL_VALUES_EQUAL (d, dconst0))
+ if (GET_CODE (trueop1) == CONST_DOUBLE
+ && trueop1 != CONST0_RTX (mode))
{
- REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
- tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
- return simplify_gen_binary (MULT, mode, op0, tem);
+ REAL_VALUE_TYPE d;
+ REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
+
+ /* x/-1.0 is -x. */
+ if (REAL_VALUES_EQUAL (d, dconstm1)
+ && !HONOR_SNANS (mode))
+ return simplify_gen_unary (NEG, mode, op0, mode);
+
+ /* Change FP division by a constant into multiplication.
+ Only do this with -funsafe-math-optimizations. */
+ if (flag_unsafe_math_optimizations
+ && !REAL_VALUES_EQUAL (d, dconst0))
+ {
+ REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
+ tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+ return simplify_gen_binary (MULT, mode, op0, tem);
+ }
+ }
+ }
+ else
+ {
+ /* 0/x is 0 (or x&0 if x has side-effects). */
+ if (trueop0 == const0_rtx)
+ return side_effects_p (op1)
+ ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+ : const0_rtx;
+ /* x/1 is x. */
+ if (trueop1 == const1_rtx)
+ {
+ /* Handle narrowing DIV. */
+ rtx x = gen_lowpart_common (mode, op0);
+ if (x)
+ return x;
+ if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+ return gen_lowpart_SUBREG (mode, op0);
+ return op0;
+ }
+ /* x/-1 is -x. */
+ if (trueop1 == constm1_rtx)
+ {
+ rtx x = gen_lowpart_common (mode, op0);
+ if (!x)
+ x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+ ? gen_lowpart_SUBREG (mode, op0) : op0;
+ return simplify_gen_unary (NEG, mode, x, mode);
}
}
break;
case UMOD:
- /* Handle modulus by power of two (mod with 1 handled below). */
+ /* 0%x is 0 (or x&0 if x has side-effects). */
+ if (trueop0 == const0_rtx)
+ return side_effects_p (op1)
+ ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+ : const0_rtx;
+ /* x%1 is 0 (of x&0 if x has side-effects). */
+ if (trueop1 == const1_rtx)
+ return side_effects_p (op0)
+ ? simplify_gen_binary (AND, mode, op0, const0_rtx)
+ : const0_rtx;
+ /* Implement modulus by power of two as AND. */
if (GET_CODE (trueop1) == CONST_INT
&& exact_log2 (INTVAL (trueop1)) > 0)
return simplify_gen_binary (AND, mode, op0,
GEN_INT (INTVAL (op1) - 1));
-
- /* Fall through.... */
+ break;
case MOD:
- if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
- && ! side_effects_p (op0) && ! side_effects_p (op1))
- return const0_rtx;
+ /* 0%x is 0 (or x&0 if x has side-effects). */
+ if (trueop0 == const0_rtx)
+ return side_effects_p (op1)
+ ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+ : const0_rtx;
+ /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
+ if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
+ return side_effects_p (op0)
+ ? simplify_gen_binary (AND, mode, op0, const0_rtx)
+ : const0_rtx;
break;
case ROTATERT:
case VEC_SELECT:
if (!VECTOR_MODE_P (mode))
{
- if (!VECTOR_MODE_P (GET_MODE (trueop0))
- || (mode
- != GET_MODE_INNER (GET_MODE (trueop0)))
- || GET_CODE (trueop1) != PARALLEL
- || XVECLEN (trueop1, 0) != 1
- || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
- abort ();
+ gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+ gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
+ gcc_assert (GET_CODE (trueop1) == PARALLEL);
+ gcc_assert (XVECLEN (trueop1, 0) == 1);
+ gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
if (GET_CODE (trueop0) == CONST_VECTOR)
- return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
+ return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
+ (trueop1, 0, 0)));
}
else
{
- if (!VECTOR_MODE_P (GET_MODE (trueop0))
- || (GET_MODE_INNER (mode)
- != GET_MODE_INNER (GET_MODE (trueop0)))
- || GET_CODE (trueop1) != PARALLEL)
- abort ();
+ gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (GET_MODE (trueop0)));
+ gcc_assert (GET_CODE (trueop1) == PARALLEL);
if (GET_CODE (trueop0) == CONST_VECTOR)
{
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (XVECLEN (trueop1, 0) != (int) n_elts)
- abort ();
+ gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = XVECEXP (trueop1, 0, i);
- if (GET_CODE (x) != CONST_INT)
- abort ();
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
+ INTVAL (x));
}
return gen_rtx_CONST_VECTOR (mode, v);
? GET_MODE (trueop1)
: GET_MODE_INNER (mode));
- if (!VECTOR_MODE_P (mode)
- || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
- != GET_MODE_SIZE (mode)))
- abort ();
-
- if ((VECTOR_MODE_P (op0_mode)
- && (GET_MODE_INNER (mode)
- != GET_MODE_INNER (op0_mode)))
- || (!VECTOR_MODE_P (op0_mode)
- && GET_MODE_INNER (mode) != op0_mode))
- abort ();
-
- if ((VECTOR_MODE_P (op1_mode)
- && (GET_MODE_INNER (mode)
- != GET_MODE_INNER (op1_mode)))
- || (!VECTOR_MODE_P (op1_mode)
- && GET_MODE_INNER (mode) != op1_mode))
- abort ();
+ gcc_assert (VECTOR_MODE_P (mode));
+ gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
+ == GET_MODE_SIZE (mode));
+
+ if (VECTOR_MODE_P (op0_mode))
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (op0_mode));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == op0_mode);
+
+ if (VECTOR_MODE_P (op1_mode))
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (op1_mode));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == op1_mode);
if ((GET_CODE (trueop0) == CONST_VECTOR
|| GET_CODE (trueop0) == CONST_INT
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
return 0;
break;
case LSHIFTRT:
- /* If shift count is undefined, don't fold it; let the machine do
- what it wants. But truncate it if the machine will do that. */
- if (arg1 < 0)
- return 0;
-
-#ifdef SHIFT_COUNT_TRUNCATED
- if (SHIFT_COUNT_TRUNCATED)
- arg1 %= width;
-#endif
-
- val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
- break;
-
case ASHIFT:
- if (arg1 < 0)
- return 0;
-
-#ifdef SHIFT_COUNT_TRUNCATED
- if (SHIFT_COUNT_TRUNCATED)
- arg1 %= width;
-#endif
-
- val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
- break;
-
case ASHIFTRT:
- if (arg1 < 0)
- return 0;
-
-#ifdef SHIFT_COUNT_TRUNCATED
+ /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
+ value is in range. We can't return any old value for out-of-range
+ arguments because either the middle-end (via shift_truncation_mask)
+ or the back-end might be relying on target-specific knowledge.
+ Nor can we rely on shift_truncation_mask, since the shift might
+ not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
if (SHIFT_COUNT_TRUNCATED)
- arg1 %= width;
-#endif
-
- val = arg0s >> arg1;
+ arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
+ else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
+ return 0;
- /* Bootstrap compiler may not have sign extended the right shift.
- Manually extend the sign to insure bootstrap cc matches gcc. */
- if (arg0s < 0 && arg1 > 0)
- val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
+ val = (code == ASHIFT
+ ? ((unsigned HOST_WIDE_INT) arg0) << arg1
+ : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
+ /* Sign-extend the result for arithmetic right shifts. */
+ if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
+ val |= ((HOST_WIDE_INT) -1) << (width - arg1);
break;
case ROTATERT:
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
val = trunc_int_for_mode (val, mode);
struct simplify_plus_minus_op_data ops[8];
rtx result, tem;
int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
- int first, negate, changed;
+ int first, changed;
int i, j;
memset (ops, 0, sizeof ops);
|| (n_ops + n_consts == input_ops && n_consts <= input_consts)))
return NULL_RTX;
- /* Put a non-negated operand first. If there aren't any, make all
- operands positive and negate the whole thing later. */
+ /* Put a non-negated operand first, if possible. */
- negate = 0;
for (i = 0; i < n_ops && ops[i].neg; i++)
continue;
if (i == n_ops)
- {
- for (i = 0; i < n_ops; i++)
- ops[i].neg = 0;
- negate = 1;
- }
+ ops[0].op = gen_rtx_NEG (mode, ops[0].op);
else if (i != 0)
{
tem = ops[0].op;
result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
mode, result, ops[i].op);
- return negate ? gen_rtx_NEG (mode, result) : result;
+ return result;
}
/* Like simplify_binary_operation except used for relational operators.
- MODE is the mode of the operands, not that of the result. If MODE
- is VOIDmode, both operands must also be VOIDmode and we compare the
- operands in "infinite precision".
-
- If no simplification is possible, this function returns zero. Otherwise,
- it returns either const_true_rtx or const0_rtx. */
+ MODE is the mode of the result. If MODE is VOIDmode, both operands must
+ not also be VOIDmode.
+ CMP_MODE specifies in which mode the comparison is done in, so it is
+ the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
+ the operands or, if both are VOIDmode, the operands are compared in
+ "infinite precision". */
rtx
simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
- rtx op0, rtx op1)
+ enum machine_mode cmp_mode, rtx op0, rtx op1)
+{
+ rtx tem, trueop0, trueop1;
+
+ if (cmp_mode == VOIDmode)
+ cmp_mode = GET_MODE (op0);
+ if (cmp_mode == VOIDmode)
+ cmp_mode = GET_MODE (op1);
+
+ tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
+ if (tem)
+ {
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ if (tem == const0_rtx)
+ return CONST0_RTX (mode);
+#ifdef FLOAT_STORE_FLAG_VALUE
+ {
+ REAL_VALUE_TYPE val;
+ val = FLOAT_STORE_FLAG_VALUE (mode);
+ return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
+ }
+#else
+ return NULL_RTX;
+#endif
+ }
+ if (VECTOR_MODE_P (mode))
+ {
+ if (tem == const0_rtx)
+ return CONST0_RTX (mode);
+#ifdef VECTOR_STORE_FLAG_VALUE
+ {
+ int i, units;
+ rtvec c;
+
+ rtx val = VECTOR_STORE_FLAG_VALUE (mode);
+ if (val == NULL_RTX)
+ return NULL_RTX;
+ if (val == const1_rtx)
+ return CONST1_RTX (mode);
+
+ units = GET_MODE_NUNITS (mode);
+ v = rtvec_alloc (units);
+ for (i = 0; i < units; i++)
+ RTVEC_ELT (v, i) = val;
+ return gen_rtx_raw_CONST_VECTOR (mode, v);
+ }
+#else
+ return NULL_RTX;
+#endif
+ }
+
+ return tem;
+ }
+
+ /* For the following tests, ensure const0_rtx is op1. */
+ if (swap_commutative_operands_p (op0, op1)
+ || (op0 == const0_rtx && op1 != const0_rtx))
+ tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
+
+ /* If op0 is a compare, extract the comparison arguments from it. */
+ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
+ return simplify_relational_operation (code, mode, VOIDmode,
+ XEXP (op0, 0), XEXP (op0, 1));
+
+ if (mode == VOIDmode
+ || GET_MODE_CLASS (cmp_mode) == MODE_CC
+ || CC0_P (op0))
+ return NULL_RTX;
+
+ trueop0 = avoid_constant_pool_reference (op0);
+ trueop1 = avoid_constant_pool_reference (op1);
+ return simplify_relational_operation_1 (code, mode, cmp_mode,
+ trueop0, trueop1);
+}
+
+/* This part of simplify_relational_operation is only used when CMP_MODE
+ is not in class MODE_CC (i.e. it is a real comparison).
+
+ MODE is the mode of the result, while CMP_MODE specifies in which
+ mode the comparison is done in, so it is the mode of the operands. */
+rtx
+simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
+ enum machine_mode cmp_mode, rtx op0, rtx op1)
+{
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ if (INTVAL (op1) == 0 && COMPARISON_P (op0))
+ {
+ /* If op0 is a comparison, extract the comparison arguments form it. */
+ if (code == NE)
+ {
+ if (GET_MODE (op0) == cmp_mode)
+ return simplify_rtx (op0);
+ else
+ return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
+ XEXP (op0, 0), XEXP (op0, 1));
+ }
+ else if (code == EQ)
+ {
+ enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
+ if (new_code != UNKNOWN)
+ return simplify_gen_relational (new_code, mode, VOIDmode,
+ XEXP (op0, 0), XEXP (op0, 1));
+ }
+ }
+ }
+
+ return NULL_RTX;
+}
+
+/* Check if the given comparison (done in the given MODE) is actually a
+ tautology or a contradiction.
+ If no simplification is possible, this function returns zero.
+ Otherwise, it returns either const_true_rtx or const0_rtx. */
+
+rtx
+simplify_const_relational_operation (enum rtx_code code,
+ enum machine_mode mode,
+ rtx op0, rtx op1)
{
int equal, op0lt, op0ltu, op1lt, op1ltu;
rtx tem;
rtx trueop0;
rtx trueop1;
- if (mode == VOIDmode
- && (GET_MODE (op0) != VOIDmode
- || GET_MODE (op1) != VOIDmode))
- abort ();
+ gcc_assert (mode != VOIDmode
+ || (GET_MODE (op0) == VOIDmode
+ && GET_MODE (op1) == VOIDmode));
/* If op0 is a compare, extract the comparison arguments from it. */
if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
- trueop0 = avoid_constant_pool_reference (op0);
- trueop1 = avoid_constant_pool_reference (op1);
-
/* We can't simplify MODE_CC values since we don't know what the
actual comparison is. */
if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
return 0;
/* Make sure the constant is second. */
- if (swap_commutative_operands_p (trueop0, trueop1))
+ if (swap_commutative_operands_p (op0, op1))
{
tem = op0, op0 = op1, op1 = tem;
- tem = trueop0, trueop0 = trueop1, trueop1 = tem;
code = swap_condition (code);
}
+ trueop0 = avoid_constant_pool_reference (op0);
+ trueop1 = avoid_constant_pool_reference (op1);
+
/* For integer comparisons of A and B maybe we can simplify A - B and can
then simplify a comparison of that with zero. If A and B are both either
a register or a CONST_INT, this can't help; testing for these cases will
If CODE is an unsigned comparison, then we can never do this optimization,
because it gives an incorrect result if the subtraction wraps around zero.
ANSI C defines unsigned operations such that they never overflow, and
- thus such cases can not be ignored. */
+ thus such cases can not be ignored; but we cannot do it even for
+ signed comparisons for languages such as Java, so test flag_wrapv. */
- if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
- && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
- && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
+ if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
+ && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
+ && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
&& 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
+ /* We cannot do this for == or != if tem is a nonzero address. */
+ && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
&& code != GTU && code != GEU && code != LTU && code != LEU)
- return simplify_relational_operation (signed_condition (code),
- mode, tem, const0_rtx);
+ return simplify_const_relational_operation (signed_condition (code),
+ mode, tem, const0_rtx);
if (flag_unsafe_math_optimizations && code == ORDERED)
return const_true_rtx;
/* Otherwise, there are some code-specific tests we can make. */
else
{
+ /* Optimize comparisons with upper and lower bounds. */
+ if (SCALAR_INT_MODE_P (mode)
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ rtx mmin, mmax;
+ int sign;
+
+ if (code == GEU
+ || code == LEU
+ || code == GTU
+ || code == LTU)
+ sign = 0;
+ else
+ sign = 1;
+
+ get_mode_bounds (mode, sign, mode, &mmin, &mmax);
+
+ tem = NULL_RTX;
+ switch (code)
+ {
+ case GEU:
+ case GE:
+ /* x >= min is always true. */
+ if (rtx_equal_p (trueop1, mmin))
+ tem = const_true_rtx;
+ else
+ break;
+
+ case LEU:
+ case LE:
+ /* x <= max is always true. */
+ if (rtx_equal_p (trueop1, mmax))
+ tem = const_true_rtx;
+ break;
+
+ case GTU:
+ case GT:
+ /* x > max is always false. */
+ if (rtx_equal_p (trueop1, mmax))
+ tem = const0_rtx;
+ break;
+
+ case LTU:
+ case LT:
+ /* x < min is always false. */
+ if (rtx_equal_p (trueop1, mmin))
+ tem = const0_rtx;
+ break;
+
+ default:
+ break;
+ }
+ if (tem == const0_rtx
+ || tem == const_true_rtx)
+ return tem;
+ }
+
switch (code)
{
case EQ:
return const_true_rtx;
break;
- case GEU:
- /* Unsigned values are never negative. */
- if (trueop1 == const0_rtx)
- return const_true_rtx;
- break;
-
- case LTU:
- if (trueop1 == const0_rtx)
- return const0_rtx;
- break;
-
- case LEU:
- /* Unsigned values are never greater than the largest
- unsigned value. */
- if (GET_CODE (trueop1) == CONST_INT
- && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
- && INTEGRAL_MODE_P (mode))
- return const_true_rtx;
- break;
-
- case GTU:
- if (GET_CODE (trueop1) == CONST_INT
- && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
- && INTEGRAL_MODE_P (mode))
- return const0_rtx;
- break;
-
case LT:
/* Optimize abs(x) < 0.0. */
if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
case UNORDERED:
return const0_rtx;
default:
- abort ();
+ gcc_unreachable ();
}
}
\f
&& rtx_equal_p (XEXP (op0, 1), op1))))
return op2;
- if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
+ if (COMPARISON_P (op0) && ! side_effects_p (op0))
{
enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
? GET_MODE (XEXP (op0, 1))
: GET_MODE (XEXP (op0, 0)));
rtx temp;
- if (cmp_mode == VOIDmode)
- cmp_mode = op0_mode;
- temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
- XEXP (op0, 0), XEXP (op0, 1));
-
- /* See if any simplifications were possible. */
- if (temp == const0_rtx)
- return op2;
- else if (temp == const_true_rtx)
- return op1;
- else if (temp)
- abort ();
/* Look for happy constants in op1 and op2. */
if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
else
break;
- return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
+ return simplify_gen_relational (code, mode, cmp_mode,
+ XEXP (op0, 0), XEXP (op0, 1));
+ }
+
+ if (cmp_mode == VOIDmode)
+ cmp_mode = op0_mode;
+ temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
+ cmp_mode, XEXP (op0, 0),
+ XEXP (op0, 1));
+
+ /* See if any simplifications were possible. */
+ if (temp)
+ {
+ if (GET_CODE (temp) == CONST_INT)
+ return temp == const0_rtx ? op2 : op1;
+ else if (temp)
+ return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
}
}
break;
case VEC_MERGE:
- if (GET_MODE (op0) != mode
- || GET_MODE (op1) != mode
- || !VECTOR_MODE_P (mode))
- abort ();
+ gcc_assert (GET_MODE (op0) == mode);
+ gcc_assert (GET_MODE (op1) == mode);
+ gcc_assert (VECTOR_MODE_P (mode));
op2 = avoid_constant_pool_reference (op2);
if (GET_CODE (op2) == CONST_INT)
{
break;
default:
- abort ();
+ gcc_unreachable ();
}
return 0;
elems = &op;
elem_bitsize = max_bitsize;
}
-
- if (BITS_PER_UNIT % value_bit != 0)
- abort (); /* Too complicated; reducing value_bit may help. */
- if (elem_bitsize % BITS_PER_UNIT != 0)
- abort (); /* I don't know how to handle endianness of sub-units. */
+ /* If this asserts, it is too complicated; reducing value_bit may help. */
+ gcc_assert (BITS_PER_UNIT % value_bit == 0);
+ /* I don't know how to handle endianness of sub-units. */
+ gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
for (elem = 0; elem < num_elem; elem++)
{
{
/* If this triggers, someone should have generated a
CONST_INT instead. */
- if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
- abort ();
+ gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
*vp++ = CONST_DOUBLE_LOW (el) >> i;
while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
{
- *vp++ = CONST_DOUBLE_HIGH (el) >> i;
+ *vp++
+ = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
i += value_bit;
}
/* It shouldn't matter what's done here, so fill it with
for (; i < max_bitsize; i += value_bit)
*vp++ = 0;
}
- else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
+ else
{
long tmp[max_bitsize / 32];
int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
-
- if (bitsize > elem_bitsize)
- abort ();
- if (bitsize % value_bit != 0)
- abort ();
+
+ gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
+ gcc_assert (bitsize <= elem_bitsize);
+ gcc_assert (bitsize % value_bit == 0);
real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
GET_MODE (el));
for (; i < elem_bitsize; i += value_bit)
*vp++ = 0;
}
- else
- abort ();
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
/* BYTE should still be inside OP. (Note that BYTE is unsigned,
so if it's become negative it will instead be very large.) */
- if (byte >= GET_MODE_SIZE (innermode))
- abort ();
+ gcc_assert (byte < GET_MODE_SIZE (innermode));
/* Convert from bytes to chunks of size value_bit. */
value_start = byte * (BITS_PER_UNIT / value_bit);
outer_class = GET_MODE_CLASS (outer_submode);
elem_bitsize = GET_MODE_BITSIZE (outer_submode);
- if (elem_bitsize % value_bit != 0)
- abort ();
- if (elem_bitsize + value_start * value_bit > max_bitsize)
- abort ();
+ gcc_assert (elem_bitsize % value_bit == 0);
+ gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
for (elem = 0; elem < num_elem; elem++)
{
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
if (VECTOR_MODE_P (outermode))
enum machine_mode innermode, unsigned int byte)
{
/* Little bit of sanity checking. */
- if (innermode == VOIDmode || outermode == VOIDmode
- || innermode == BLKmode || outermode == BLKmode)
- abort ();
+ gcc_assert (innermode != VOIDmode);
+ gcc_assert (outermode != VOIDmode);
+ gcc_assert (innermode != BLKmode);
+ gcc_assert (outermode != BLKmode);
- if (GET_MODE (op) != innermode
- && GET_MODE (op) != VOIDmode)
- abort ();
+ gcc_assert (GET_MODE (op) == innermode
+ || GET_MODE (op) == VOIDmode);
- if (byte % GET_MODE_SIZE (outermode)
- || byte >= GET_MODE_SIZE (innermode))
- abort ();
+ gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
+ gcc_assert (byte < GET_MODE_SIZE (innermode));
if (outermode == innermode && !byte)
return op;
{
enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
int final_offset = byte + SUBREG_BYTE (op);
- rtx new;
+ rtx newx;
if (outermode == innermostmode
&& byte == 0 && SUBREG_BYTE (op) == 0)
}
/* Recurse for further possible simplifications. */
- new = simplify_subreg (outermode, SUBREG_REG (op),
+ newx = simplify_subreg (outermode, SUBREG_REG (op),
GET_MODE (SUBREG_REG (op)),
final_offset);
- if (new)
- return new;
+ if (newx)
+ return newx;
return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
}
frame, or argument pointer, leave this as a SUBREG. */
if (REG_P (op)
- && (! REG_FUNCTION_VALUE_P (op)
- || ! rtx_equal_function_value_matters)
&& REGNO (op) < FIRST_PSEUDO_REGISTER
#ifdef CANNOT_CHANGE_MODE_CLASS
&& ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
SUBREG with it. Don't do this if the MEM has a mode-dependent address
or if we would be widening it. */
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& ! mode_dependent_address_p (XEXP (op, 0))
/* Allow splitting of volatile memory references in case we don't
have instruction to move the whole thing. */
res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
if (res)
return res;
- /* We can at least simplify it by referring directly to the relevant part. */
+ /* We can at least simplify it by referring directly to the
+ relevant part. */
return gen_rtx_SUBREG (outermode, part, final_offset);
}
+ /* Optimize SUBREG truncations of zero and sign extended values. */
+ if ((GET_CODE (op) == ZERO_EXTEND
+ || GET_CODE (op) == SIGN_EXTEND)
+ && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
+ {
+ unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
+
+ /* If we're requesting the lowpart of a zero or sign extension,
+ there are three possibilities. If the outermode is the same
+ as the origmode, we can omit both the extension and the subreg.
+ If the outermode is not larger than the origmode, we can apply
+ the truncation without the extension. Finally, if the outermode
+ is larger than the origmode, but both are integer modes, we
+ can just extend to the appropriate mode. */
+ if (bitpos == 0)
+ {
+ enum machine_mode origmode = GET_MODE (XEXP (op, 0));
+ if (outermode == origmode)
+ return XEXP (op, 0);
+ if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
+ return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
+ subreg_lowpart_offset (outermode,
+ origmode));
+ if (SCALAR_INT_MODE_P (outermode))
+ return simplify_gen_unary (GET_CODE (op), outermode,
+ XEXP (op, 0), origmode);
+ }
+
+ /* A SUBREG resulting from a zero extension may fold to zero if
+ it extracts higher bits that the ZERO_EXTEND's source bits. */
+ if (GET_CODE (op) == ZERO_EXTEND
+ && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
+ return CONST0_RTX (outermode);
+ }
+
return NULL_RTX;
}
simplify_gen_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
- rtx new;
+ rtx newx;
/* Little bit of sanity checking. */
- if (innermode == VOIDmode || outermode == VOIDmode
- || innermode == BLKmode || outermode == BLKmode)
- abort ();
-
- if (GET_MODE (op) != innermode
- && GET_MODE (op) != VOIDmode)
- abort ();
+ gcc_assert (innermode != VOIDmode);
+ gcc_assert (outermode != VOIDmode);
+ gcc_assert (innermode != BLKmode);
+ gcc_assert (outermode != BLKmode);
- if (byte % GET_MODE_SIZE (outermode)
- || byte >= GET_MODE_SIZE (innermode))
- abort ();
+ gcc_assert (GET_MODE (op) == innermode
+ || GET_MODE (op) == VOIDmode);
- if (GET_CODE (op) == QUEUED)
- return NULL_RTX;
+ gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
+ gcc_assert (byte < GET_MODE_SIZE (innermode));
- new = simplify_subreg (outermode, op, innermode, byte);
- if (new)
- return new;
+ newx = simplify_subreg (outermode, op, innermode, byte);
+ if (newx)
+ return newx;
- if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
+ if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode
+ || (REG_P (op) && REGNO (op) < FIRST_PSEUDO_REGISTER))
return NULL_RTX;
return gen_rtx_SUBREG (outermode, op, byte);
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
- rtx temp;
switch (GET_RTX_CLASS (code))
{
- case '1':
+ case RTX_UNARY:
return simplify_unary_operation (code, mode,
XEXP (x, 0), GET_MODE (XEXP (x, 0)));
- case 'c':
+ case RTX_COMM_ARITH:
if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
/* Fall through.... */
- case '2':
+ case RTX_BIN_ARITH:
return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
- case '3':
- case 'b':
+ case RTX_TERNARY:
+ case RTX_BITFIELD_OPS:
return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
XEXP (x, 0), XEXP (x, 1),
XEXP (x, 2));
- case '<':
- temp = simplify_relational_operation (code,
- ((GET_MODE (XEXP (x, 0))
- != VOIDmode)
- ? GET_MODE (XEXP (x, 0))
- : GET_MODE (XEXP (x, 1))),
- XEXP (x, 0), XEXP (x, 1));
-#ifdef FLOAT_STORE_FLAG_VALUE
- if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
- {
- if (temp == const0_rtx)
- temp = CONST0_RTX (mode);
- else
- temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
- mode);
- }
-#endif
- return temp;
-
- case 'x':
+ case RTX_COMPARE:
+ case RTX_COMM_COMPARE:
+ return simplify_relational_operation (code, mode,
+ ((GET_MODE (XEXP (x, 0))
+ != VOIDmode)
+ ? GET_MODE (XEXP (x, 0))
+ : GET_MODE (XEXP (x, 1))),
+ XEXP (x, 0),
+ XEXP (x, 1));
+
+ case RTX_EXTRA:
if (code == SUBREG)
return simplify_gen_subreg (mode, SUBREG_REG (x),
GET_MODE (SUBREG_REG (x)),
SUBREG_BYTE (x));
- if (code == CONSTANT_P_RTX)
- {
- if (CONSTANT_P (XEXP (x, 0)))
- return const1_rtx;
- }
break;
- case 'o':
+ case RTX_OBJ:
if (code == LO_SUM)
{
/* Convert (lo_sum (high FOO) FOO) to FOO. */