/* RTL simplification functions for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
static rtx neg_const_int (enum machine_mode, rtx);
-static bool mode_signbit_p (enum machine_mode, rtx);
+static bool plus_minus_operand_p (rtx);
static int simplify_plus_minus_op_data_cmp (const void *, const void *);
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
rtx, int);
/* Test whether expression, X, is an immediate constant that represents
the most significant bit of machine mode MODE. */
-static bool
+bool
mode_signbit_p (enum machine_mode mode, rtx x)
{
unsigned HOST_WIDE_INT val;
return gen_rtx_fmt_ee (code, mode, op0, op1);
}
\f
-/* Replace all occurrences of OLD in X with NEW and try to simplify the
+/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
resulting RTX. Return a new RTX which is as simplified as possible. */
rtx
-simplify_replace_rtx (rtx x, rtx old, rtx new)
+simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
enum machine_mode op_mode;
rtx op0, op1, op2;
- /* If X is OLD, return NEW. Otherwise, if this is an expression, try
+ /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
to build a new expression substituting recursively. If we can't do
anything, return our input. */
- if (x == old)
- return new;
+ if (x == old_rtx)
+ return new_rtx;
switch (GET_RTX_CLASS (code))
{
case RTX_UNARY:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old, new);
+ op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
if (op0 == XEXP (x, 0))
return x;
return simplify_gen_unary (code, mode, op0, op_mode);
case RTX_BIN_ARITH:
case RTX_COMM_ARITH:
- op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
- op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
+ op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
+ op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_binary (code, mode, op0, op1);
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
- op0 = simplify_replace_rtx (op0, old, new);
- op1 = simplify_replace_rtx (op1, old, new);
+ op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
+ op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_relational (code, mode, op_mode, op0, op1);
case RTX_BITFIELD_OPS:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old, new);
- op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
- op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
+ op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
+ op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
+ op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
return x;
if (op_mode == VOIDmode)
/* The only case we try to handle is a SUBREG. */
if (code == SUBREG)
{
- op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
+ op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
if (op0 == SUBREG_REG (x))
return x;
op0 = simplify_gen_subreg (GET_MODE (x), op0,
case RTX_OBJ:
if (code == MEM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
+ op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
if (op0 == XEXP (x, 0))
return x;
return replace_equiv_address_nv (x, op0);
}
else if (code == LO_SUM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
- op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
+ op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
+ op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
/* (lo_sum (high x) x) -> x */
if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
}
else if (code == REG)
{
- if (REG_P (old) && REGNO (x) == REGNO (old))
- return new;
+ if (rtx_equal_p (x, old_rtx))
+ return new_rtx;
}
break;
if (code == VEC_DUPLICATE)
{
- if (!VECTOR_MODE_P (mode))
- abort ();
- if (GET_MODE (trueop) != VOIDmode
- && !VECTOR_MODE_P (GET_MODE (trueop))
- && GET_MODE_INNER (mode) != GET_MODE (trueop))
- abort ();
- if (GET_MODE (trueop) != VOIDmode
- && VECTOR_MODE_P (GET_MODE (trueop))
- && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
- abort ();
+ gcc_assert (VECTOR_MODE_P (mode));
+ if (GET_MODE (trueop) != VOIDmode)
+ {
+ if (!VECTOR_MODE_P (GET_MODE (trueop)))
+ gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
+ (GET_MODE (trueop)));
+ }
if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
|| GET_CODE (trueop) == CONST_VECTOR)
{
int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
- if (in_n_elts >= n_elts || n_elts % in_n_elts)
- abort ();
+ gcc_assert (in_n_elts < n_elts);
+ gcc_assert ((n_elts % in_n_elts) == 0);
for (i = 0; i < n_elts; i++)
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
}
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (op_n_elts != n_elts)
- abort ();
-
+ gcc_assert (op_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
case ZERO_EXTEND:
/* When zero-extending a CONST_INT, we need to know its
original mode. */
- if (op_mode == VOIDmode)
- abort ();
+ gcc_assert (op_mode != VOIDmode);
if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- if (width != GET_MODE_BITSIZE (op_mode))
- abort ();
+ gcc_assert (width == GET_MODE_BITSIZE (op_mode));
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- if (width != GET_MODE_BITSIZE (op_mode))
- abort ();
+ gcc_assert (width == GET_MODE_BITSIZE (op_mode));
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
val = trunc_int_for_mode (val, mode);
break;
case ZERO_EXTEND:
- if (op_mode == VOIDmode)
- abort ();
+ gcc_assert (op_mode != VOIDmode);
if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
return 0;
real_from_target (&d, tmp, mode);
}
default:
- abort ();
+ gcc_unreachable ();
}
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
return immed_double_const (xl, xh, mode);
}
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
- && GET_CODE (SUBREG_REG (op)) == REG
+ && REG_P (SUBREG_REG (op))
&& REG_POINTER (SUBREG_REG (op))
&& GET_MODE (SUBREG_REG (op)) == Pmode)))
return convert_memory_address (Pmode, op);
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
- && GET_CODE (SUBREG_REG (op)) == REG
+ && REG_P (SUBREG_REG (op))
&& REG_POINTER (SUBREG_REG (op))
&& GET_MODE (SUBREG_REG (op)) == Pmode)))
return convert_memory_address (Pmode, op);
rtx trueop0, trueop1;
rtx tem;
-#ifdef ENABLE_CHECKING
/* Relational operations don't work here. We must know the mode
of the operands in order to do the comparison correctly.
Assuming a full word can give incorrect results.
Consider comparing 128 with -128 in QImode. */
-
- if (GET_RTX_CLASS (code) == RTX_COMPARE
- || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
- abort ();
-#endif
+ gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
+ gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
/* Make sure the constant is second. */
if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
trueop1 = avoid_constant_pool_reference (op1);
if (VECTOR_MODE_P (mode)
+ && code != VEC_CONCAT
&& GET_CODE (trueop0) == CONST_VECTOR
&& GET_CODE (trueop1) == CONST_VECTOR)
{
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
- unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ unsigned n_elts = GET_MODE_NUNITS (mode);
enum machine_mode op0mode = GET_MODE (trueop0);
- int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
- unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
+ unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
enum machine_mode op1mode = GET_MODE (trueop1);
- int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
- unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
+ unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (op0_n_elts != n_elts || op1_n_elts != n_elts)
- abort ();
-
+ gcc_assert (op0_n_elts == n_elts);
+ gcc_assert (op1_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
return gen_rtx_CONST_VECTOR (mode, v);
}
+ if (VECTOR_MODE_P (mode)
+ && code == VEC_CONCAT
+ && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
+ {
+ unsigned n_elts = GET_MODE_NUNITS (mode);
+ rtvec v = rtvec_alloc (n_elts);
+
+ gcc_assert (n_elts >= 2);
+ if (n_elts == 2)
+ {
+ gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
+ gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
+
+ RTVEC_ELT (v, 0) = trueop0;
+ RTVEC_ELT (v, 1) = trueop1;
+ }
+ else
+ {
+ unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
+ unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
+ unsigned i;
+
+ gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
+ gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
+ gcc_assert (op0_n_elts + op1_n_elts == n_elts);
+
+ for (i = 0; i < op0_n_elts; ++i)
+ RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
+ for (i = 0; i < op1_n_elts; ++i)
+ RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
+ }
+
+ return gen_rtx_CONST_VECTOR (mode, v);
+ }
+
if (GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_CODE (trueop0) == CONST_DOUBLE
&& GET_CODE (trueop1) == CONST_DOUBLE
GET_MODE (op1));
for (i = 0; i < 4; i++)
{
- if (code == AND)
+ switch (code)
+ {
+ case AND:
tmp0[i] &= tmp1[i];
- else if (code == IOR)
+ break;
+ case IOR:
tmp0[i] |= tmp1[i];
- else if (code == XOR)
+ break;
+ case XOR:
tmp0[i] ^= tmp1[i];
- else
- abort ();
+ break;
+ default:
+ gcc_unreachable ();
+ }
}
real_from_target (&r, tmp0, mode);
return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
}
else
{
- REAL_VALUE_TYPE f0, f1, value;
+ REAL_VALUE_TYPE f0, f1, value, result;
+ bool inexact;
REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
- f0 = real_value_truncate (mode, f0);
- f1 = real_value_truncate (mode, f1);
+ real_convert (&f0, mode, &f0);
+ real_convert (&f1, mode, &f1);
if (HONOR_SNANS (mode)
&& (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
/* Inf * 0 = NaN plus exception. */
return 0;
- REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
+ inexact = real_arithmetic (&value, rtx_to_tree_code (code),
+ &f0, &f1);
+ real_convert (&result, mode, &value);
+
+ /* Don't constant fold this floating point operation if the
+ result may dependent upon the run-time rounding mode and
+ flag_rounding_math is set. */
+ if (flag_rounding_math
+ && (inexact || !real_identical (&result, &value)))
+ return NULL_RTX;
- value = real_value_truncate (mode, value);
- return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
+ return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
}
}
if the multiplication is written as a shift. If so, we can
distribute and make a new multiply, shift, or maybe just
have X (if C is 2 in the example above). But don't make
- real multiply if we didn't have one before. */
+ something more expensive than we had before. */
if (! FLOAT_MODE_P (mode))
{
HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
rtx lhs = op0, rhs = op1;
- int had_mult = 0;
if (GET_CODE (lhs) == NEG)
coeff0 = -1, lhs = XEXP (lhs, 0);
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT)
{
coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
- had_mult = 1;
}
else if (GET_CODE (lhs) == ASHIFT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{
coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
- had_mult = 1;
}
else if (GET_CODE (rhs) == ASHIFT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT
if (rtx_equal_p (lhs, rhs))
{
+ rtx orig = gen_rtx_PLUS (mode, op0, op1);
tem = simplify_gen_binary (MULT, mode, lhs,
- GEN_INT (coeff0 + coeff1));
- return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
+ GEN_INT (coeff0 + coeff1));
+ return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ ? tem : 0;
}
}
and subtle programs can break if operations are associated. */
if (INTEGRAL_MODE_P (mode)
- && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
- || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
- || (GET_CODE (op0) == CONST
- && GET_CODE (XEXP (op0, 0)) == PLUS)
- || (GET_CODE (op1) == CONST
- && GET_CODE (XEXP (op1, 0)) == PLUS))
+ && (plus_minus_operand_p (op0)
+ || plus_minus_operand_p (op1))
&& (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
return tem;
#ifdef HAVE_cc0
if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
#else
- if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
+ if (REG_P (xop00) && REG_P (xop10)
&& GET_MODE (xop00) == GET_MODE (xop10)
&& REGNO (xop00) == REGNO (xop10)
&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
if the multiplication is written as a shift. If so, we can
distribute and make a new multiply, shift, or maybe just
have X (if C is 2 in the example above). But don't make
- real multiply if we didn't have one before. */
+ something more expensive than we had before. */
if (! FLOAT_MODE_P (mode))
{
HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
rtx lhs = op0, rhs = op1;
- int had_mult = 0;
if (GET_CODE (lhs) == NEG)
coeff0 = -1, lhs = XEXP (lhs, 0);
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT)
{
coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
- had_mult = 1;
}
else if (GET_CODE (lhs) == ASHIFT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{
coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
- had_mult = 1;
}
else if (GET_CODE (rhs) == ASHIFT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT
if (rtx_equal_p (lhs, rhs))
{
+ rtx orig = gen_rtx_MINUS (mode, op0, op1);
tem = simplify_gen_binary (MULT, mode, lhs,
GEN_INT (coeff0 - coeff1));
- return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
+ return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ ? tem : 0;
}
}
and subtle programs can break if operations are associated. */
if (INTEGRAL_MODE_P (mode)
- && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
- || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
- || (GET_CODE (op0) == CONST
- && GET_CODE (XEXP (op0, 0)) == PLUS)
- || (GET_CODE (op1) == CONST
- && GET_CODE (XEXP (op1, 0)) == PLUS))
+ && (plus_minus_operand_p (op0)
+ || plus_minus_operand_p (op1))
&& (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
return tem;
uppermost bit is set, then this isn't a power of two due
to implicit sign extension. */
&& (width <= HOST_BITS_PER_WIDE_INT
- || val != HOST_BITS_PER_WIDE_INT - 1)
- && ! rtx_equal_function_value_matters)
+ || val != HOST_BITS_PER_WIDE_INT - 1))
return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
/* x*2 is x+x and x*(-1) is -x */
&& ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return const0_rtx;
+
+ /* Transform (and (extend X) C) into (zero_extend (and X C)) if
+ there are no non-zero bits of C outside of X's mode. */
+ if ((GET_CODE (op0) == SIGN_EXTEND
+ || GET_CODE (op0) == ZERO_EXTEND)
+ && GET_CODE (trueop1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
+ & INTVAL (trueop1)) == 0)
+ {
+ enum machine_mode imode = GET_MODE (XEXP (op0, 0));
+ tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
+ gen_int_mode (INTVAL (trueop1),
+ imode));
+ return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
+ }
+
+ /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
+ ((A & N) + B) & M -> (A + B) & M
+ Similarly if (N & M) == 0,
+ ((A | N) + B) & M -> (A + B) & M
+ and for - instead of + and/or ^ instead of |. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ~INTVAL (trueop1)
+ && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
+ && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
+ {
+ rtx pmop[2];
+ int which;
+
+ pmop[0] = XEXP (op0, 0);
+ pmop[1] = XEXP (op0, 1);
+
+ for (which = 0; which < 2; which++)
+ {
+ tem = pmop[which];
+ switch (GET_CODE (tem))
+ {
+ case AND:
+ if (GET_CODE (XEXP (tem, 1)) == CONST_INT
+ && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
+ == INTVAL (trueop1))
+ pmop[which] = XEXP (tem, 0);
+ break;
+ case IOR:
+ case XOR:
+ if (GET_CODE (XEXP (tem, 1)) == CONST_INT
+ && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
+ pmop[which] = XEXP (tem, 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
+ {
+ tem = simplify_gen_binary (GET_CODE (op0), mode,
+ pmop[0], pmop[1]);
+ return simplify_gen_binary (code, mode, tem, op1);
+ }
+ }
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
case VEC_SELECT:
if (!VECTOR_MODE_P (mode))
{
- if (!VECTOR_MODE_P (GET_MODE (trueop0))
- || (mode
- != GET_MODE_INNER (GET_MODE (trueop0)))
- || GET_CODE (trueop1) != PARALLEL
- || XVECLEN (trueop1, 0) != 1
- || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
- abort ();
+ gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+ gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
+ gcc_assert (GET_CODE (trueop1) == PARALLEL);
+ gcc_assert (XVECLEN (trueop1, 0) == 1);
+ gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
if (GET_CODE (trueop0) == CONST_VECTOR)
- return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
+ return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
+ (trueop1, 0, 0)));
}
else
{
- if (!VECTOR_MODE_P (GET_MODE (trueop0))
- || (GET_MODE_INNER (mode)
- != GET_MODE_INNER (GET_MODE (trueop0)))
- || GET_CODE (trueop1) != PARALLEL)
- abort ();
+ gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (GET_MODE (trueop0)));
+ gcc_assert (GET_CODE (trueop1) == PARALLEL);
if (GET_CODE (trueop0) == CONST_VECTOR)
{
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (XVECLEN (trueop1, 0) != (int) n_elts)
- abort ();
+ gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = XVECEXP (trueop1, 0, i);
- if (GET_CODE (x) != CONST_INT)
- abort ();
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
+ INTVAL (x));
}
return gen_rtx_CONST_VECTOR (mode, v);
? GET_MODE (trueop1)
: GET_MODE_INNER (mode));
- if (!VECTOR_MODE_P (mode)
- || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
- != GET_MODE_SIZE (mode)))
- abort ();
-
- if ((VECTOR_MODE_P (op0_mode)
- && (GET_MODE_INNER (mode)
- != GET_MODE_INNER (op0_mode)))
- || (!VECTOR_MODE_P (op0_mode)
- && GET_MODE_INNER (mode) != op0_mode))
- abort ();
-
- if ((VECTOR_MODE_P (op1_mode)
- && (GET_MODE_INNER (mode)
- != GET_MODE_INNER (op1_mode)))
- || (!VECTOR_MODE_P (op1_mode)
- && GET_MODE_INNER (mode) != op1_mode))
- abort ();
+ gcc_assert (VECTOR_MODE_P (mode));
+ gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
+ == GET_MODE_SIZE (mode));
+
+ if (VECTOR_MODE_P (op0_mode))
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (op0_mode));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == op0_mode);
+
+ if (VECTOR_MODE_P (op1_mode))
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (op1_mode));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == op1_mode);
if ((GET_CODE (trueop0) == CONST_VECTOR
|| GET_CODE (trueop0) == CONST_INT
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
return 0;
break;
case LSHIFTRT:
- /* If shift count is undefined, don't fold it; let the machine do
- what it wants. But truncate it if the machine will do that. */
- if (arg1 < 0)
- return 0;
-
- if (SHIFT_COUNT_TRUNCATED)
- arg1 %= width;
-
- val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
- break;
-
case ASHIFT:
- if (arg1 < 0)
- return 0;
-
- if (SHIFT_COUNT_TRUNCATED)
- arg1 %= width;
-
- val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
- break;
-
case ASHIFTRT:
- if (arg1 < 0)
- return 0;
-
+ /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
+ value is in range. We can't return any old value for out-of-range
+ arguments because either the middle-end (via shift_truncation_mask)
+ or the back-end might be relying on target-specific knowledge.
+ Nor can we rely on shift_truncation_mask, since the shift might
+ not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
if (SHIFT_COUNT_TRUNCATED)
- arg1 %= width;
-
- val = arg0s >> arg1;
+ arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
+ else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
+ return 0;
- /* Bootstrap compiler may not have sign extended the right shift.
- Manually extend the sign to insure bootstrap cc matches gcc. */
- if (arg0s < 0 && arg1 > 0)
- val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
+ val = (code == ASHIFT
+ ? ((unsigned HOST_WIDE_INT) arg0) << arg1
+ : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
+ /* Sign-extend the result for arithmetic right shifts. */
+ if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
+ val |= ((HOST_WIDE_INT) -1) << (width - arg1);
break;
case ROTATERT:
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
val = trunc_int_for_mode (val, mode);
return result;
}
+/* Check whether an operand is suitable for calling simplify_plus_minus. */
+static bool
+plus_minus_operand_p (rtx x)
+{
+ return GET_CODE (x) == PLUS
+ || GET_CODE (x) == MINUS
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && CONSTANT_P (XEXP (XEXP (x, 0), 0))
+ && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
+}
+
/* Like simplify_binary_operation except used for relational operators.
MODE is the mode of the result. If MODE is VOIDmode, both operands must
- also be VOIDmode.
+ not also be VOIDmode.
CMP_MODE specifies in which mode the comparison is done in, so it is
the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
if (tem)
{
-#ifdef FLOAT_STORE_FLAG_VALUE
if (GET_MODE_CLASS (mode) == MODE_FLOAT)
{
if (tem == const0_rtx)
return CONST0_RTX (mode);
- else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
- {
- REAL_VALUE_TYPE val;
- val = FLOAT_STORE_FLAG_VALUE (mode);
- return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
- }
+#ifdef FLOAT_STORE_FLAG_VALUE
+ {
+ REAL_VALUE_TYPE val;
+ val = FLOAT_STORE_FLAG_VALUE (mode);
+ return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
+ }
+#else
+ return NULL_RTX;
+#endif
}
+ if (VECTOR_MODE_P (mode))
+ {
+ if (tem == const0_rtx)
+ return CONST0_RTX (mode);
+#ifdef VECTOR_STORE_FLAG_VALUE
+ {
+ int i, units;
+ rtvec v;
+
+ rtx val = VECTOR_STORE_FLAG_VALUE (mode);
+ if (val == NULL_RTX)
+ return NULL_RTX;
+ if (val == const1_rtx)
+ return CONST1_RTX (mode);
+
+ units = GET_MODE_NUNITS (mode);
+ v = rtvec_alloc (units);
+ for (i = 0; i < units; i++)
+ RTVEC_ELT (v, i) = val;
+ return gen_rtx_raw_CONST_VECTOR (mode, v);
+ }
+#else
+ return NULL_RTX;
#endif
+ }
return tem;
}
MODE is the mode of the result, while CMP_MODE specifies in which
mode the comparison is done in, so it is the mode of the operands. */
-rtx
+
+static rtx
simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
enum machine_mode cmp_mode, rtx op0, rtx op1)
{
+ enum rtx_code op0code = GET_CODE (op0);
+
if (GET_CODE (op1) == CONST_INT)
{
if (INTVAL (op1) == 0 && COMPARISON_P (op0))
}
else if (code == EQ)
{
- enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
- if (new != UNKNOWN)
- return simplify_gen_relational (new, mode, VOIDmode,
+ enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
+ if (new_code != UNKNOWN)
+ return simplify_gen_relational (new_code, mode, VOIDmode,
XEXP (op0, 0), XEXP (op0, 1));
}
}
}
+ /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
+ if ((code == EQ || code == NE)
+ && (op0code == PLUS || op0code == MINUS)
+ && CONSTANT_P (op1)
+ && CONSTANT_P (XEXP (op0, 1))
+ && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
+ {
+ rtx x = XEXP (op0, 0);
+ rtx c = XEXP (op0, 1);
+
+ c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
+ cmp_mode, op1, c);
+ return simplify_gen_relational (code, mode, cmp_mode, x, c);
+ }
+
return NULL_RTX;
}
rtx trueop0;
rtx trueop1;
- if (mode == VOIDmode
- && (GET_MODE (op0) != VOIDmode
- || GET_MODE (op1) != VOIDmode))
- abort ();
+ gcc_assert (mode != VOIDmode
+ || (GET_MODE (op0) == VOIDmode
+ && GET_MODE (op1) == VOIDmode));
/* If op0 is a compare, extract the comparison arguments from it. */
if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
signed comparisons for languages such as Java, so test flag_wrapv. */
if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
- && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
- && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
+ && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
+ && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
&& 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
/* We cannot do this for == or != if tem is a nonzero address. */
&& ((code != EQ && code != NE) || ! nonzero_address_p (tem))
else
{
/* Optimize comparisons with upper and lower bounds. */
- if (INTEGRAL_MODE_P (mode)
+ if (SCALAR_INT_MODE_P (mode)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
rtx mmin, mmax;
case UNORDERED:
return const0_rtx;
default:
- abort ();
+ gcc_unreachable ();
}
}
\f
!= ((HOST_WIDE_INT) (-1) << (width - 1))))
val &= ((HOST_WIDE_INT) 1 << width) - 1;
- return GEN_INT (val);
+ return gen_int_mode (val, mode);
}
break;
break;
case VEC_MERGE:
- if (GET_MODE (op0) != mode
- || GET_MODE (op1) != mode
- || !VECTOR_MODE_P (mode))
- abort ();
+ gcc_assert (GET_MODE (op0) == mode);
+ gcc_assert (GET_MODE (op1) == mode);
+ gcc_assert (VECTOR_MODE_P (mode));
op2 = avoid_constant_pool_reference (op2);
if (GET_CODE (op2) == CONST_INT)
{
break;
default:
- abort ();
+ gcc_unreachable ();
}
return 0;
if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
return op;
+ /* We have no way to represent a complex constant at the rtl level. */
+ if (COMPLEX_MODE_P (outermode))
+ return NULL_RTX;
+
/* Unpack the value. */
if (GET_CODE (op) == CONST_VECTOR)
elems = &op;
elem_bitsize = max_bitsize;
}
-
- if (BITS_PER_UNIT % value_bit != 0)
- abort (); /* Too complicated; reducing value_bit may help. */
- if (elem_bitsize % BITS_PER_UNIT != 0)
- abort (); /* I don't know how to handle endianness of sub-units. */
+ /* If this asserts, it is too complicated; reducing value_bit may help. */
+ gcc_assert (BITS_PER_UNIT % value_bit == 0);
+ /* I don't know how to handle endianness of sub-units. */
+ gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
for (elem = 0; elem < num_elem; elem++)
{
{
/* If this triggers, someone should have generated a
CONST_INT instead. */
- if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
- abort ();
+ gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
*vp++ = CONST_DOUBLE_LOW (el) >> i;
for (; i < max_bitsize; i += value_bit)
*vp++ = 0;
}
- else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
+ else
{
long tmp[max_bitsize / 32];
int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
-
- if (bitsize > elem_bitsize)
- abort ();
- if (bitsize % value_bit != 0)
- abort ();
+
+ gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
+ gcc_assert (bitsize <= elem_bitsize);
+ gcc_assert (bitsize % value_bit == 0);
real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
GET_MODE (el));
for (; i < elem_bitsize; i += value_bit)
*vp++ = 0;
}
- else
- abort ();
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
/* BYTE should still be inside OP. (Note that BYTE is unsigned,
so if it's become negative it will instead be very large.) */
- if (byte >= GET_MODE_SIZE (innermode))
- abort ();
+ gcc_assert (byte < GET_MODE_SIZE (innermode));
/* Convert from bytes to chunks of size value_bit. */
value_start = byte * (BITS_PER_UNIT / value_bit);
outer_class = GET_MODE_CLASS (outer_submode);
elem_bitsize = GET_MODE_BITSIZE (outer_submode);
- if (elem_bitsize % value_bit != 0)
- abort ();
- if (elem_bitsize + value_start * value_bit > max_bitsize)
- abort ();
+ gcc_assert (elem_bitsize % value_bit == 0);
+ gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
for (elem = 0; elem < num_elem; elem++)
{
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
if (VECTOR_MODE_P (outermode))
enum machine_mode innermode, unsigned int byte)
{
/* Little bit of sanity checking. */
- if (innermode == VOIDmode || outermode == VOIDmode
- || innermode == BLKmode || outermode == BLKmode)
- abort ();
+ gcc_assert (innermode != VOIDmode);
+ gcc_assert (outermode != VOIDmode);
+ gcc_assert (innermode != BLKmode);
+ gcc_assert (outermode != BLKmode);
- if (GET_MODE (op) != innermode
- && GET_MODE (op) != VOIDmode)
- abort ();
+ gcc_assert (GET_MODE (op) == innermode
+ || GET_MODE (op) == VOIDmode);
- if (byte % GET_MODE_SIZE (outermode)
- || byte >= GET_MODE_SIZE (innermode))
- abort ();
+ gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
+ gcc_assert (byte < GET_MODE_SIZE (innermode));
if (outermode == innermode && !byte)
return op;
{
enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
int final_offset = byte + SUBREG_BYTE (op);
- rtx new;
+ rtx newx;
if (outermode == innermostmode
&& byte == 0 && SUBREG_BYTE (op) == 0)
}
/* Recurse for further possible simplifications. */
- new = simplify_subreg (outermode, SUBREG_REG (op),
- GET_MODE (SUBREG_REG (op)),
- final_offset);
- if (new)
- return new;
- return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
+ newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
+ final_offset);
+ if (newx)
+ return newx;
+ if (validate_subreg (outermode, innermostmode,
+ SUBREG_REG (op), final_offset))
+ return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
+ return NULL_RTX;
}
/* SUBREG of a hard register => just change the register number
frame, or argument pointer, leave this as a SUBREG. */
if (REG_P (op)
- && (! REG_FUNCTION_VALUE_P (op)
- || ! rtx_equal_function_value_matters)
&& REGNO (op) < FIRST_PSEUDO_REGISTER
#ifdef CANNOT_CHANGE_MODE_CLASS
&& ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
&& subreg_offset_representable_p (REGNO (op), innermode,
byte, outermode))
{
- rtx tem = gen_rtx_SUBREG (outermode, op, byte);
- int final_regno = subreg_hard_regno (tem, 0);
+ unsigned int regno = REGNO (op);
+ unsigned int final_regno
+ = regno + subreg_regno_offset (regno, innermode, byte, outermode);
/* ??? We do allow it if the current REG is not valid for
its mode. This is a kludge to work around how float/complex
arguments are passed on 32-bit SPARC and should be fixed. */
if (HARD_REGNO_MODE_OK (final_regno, outermode)
- || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
+ || ! HARD_REGNO_MODE_OK (regno, innermode))
{
rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
SUBREG with it. Don't do this if the MEM has a mode-dependent address
or if we would be widening it. */
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& ! mode_dependent_address_p (XEXP (op, 0))
/* Allow splitting of volatile memory references in case we don't
have instruction to move the whole thing. */
of real and imaginary part. */
if (GET_CODE (op) == CONCAT)
{
- int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
- rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
- unsigned int final_offset;
- rtx res;
+ unsigned int inner_size, final_offset;
+ rtx part, res;
+
+ inner_size = GET_MODE_UNIT_SIZE (innermode);
+ part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
+ final_offset = byte % inner_size;
+ if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
+ return NULL_RTX;
- final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
if (res)
return res;
- /* We can at least simplify it by referring directly to the
- relevant part. */
- return gen_rtx_SUBREG (outermode, part, final_offset);
+ if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
+ return gen_rtx_SUBREG (outermode, part, final_offset);
+ return NULL_RTX;
}
/* Optimize SUBREG truncations of zero and sign extended values. */
return CONST0_RTX (outermode);
}
+ /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
+ to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
+ the outer subreg is effectively a truncation to the original mode. */
+ if ((GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ASHIFTRT)
+ && SCALAR_INT_MODE_P (outermode)
+ /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
+ to avoid the possibility that an outer LSHIFTRT shifts by more
+ than the sign extension's sign_bit_copies and introduces zeros
+ into the high bits of the result. */
+ && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
+ && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
+ && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && subreg_lsb_1 (outermode, innermode, byte) == 0)
+ return simplify_gen_binary (ASHIFTRT, outermode,
+ XEXP (XEXP (op, 0), 0), XEXP (op, 1));
+
+ /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
+ to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
+ the outer subreg is effectively a truncation to the original mode. */
+ if ((GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ASHIFTRT)
+ && SCALAR_INT_MODE_P (outermode)
+ && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
+ && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
+ && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && subreg_lsb_1 (outermode, innermode, byte) == 0)
+ return simplify_gen_binary (LSHIFTRT, outermode,
+ XEXP (XEXP (op, 0), 0), XEXP (op, 1));
+
+ /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
+ to (ashift:QI (x:QI) C), where C is a suitable small constant and
+ the outer subreg is effectively a truncation to the original mode. */
+ if (GET_CODE (op) == ASHIFT
+ && SCALAR_INT_MODE_P (outermode)
+ && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
+ && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
+ && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && subreg_lsb_1 (outermode, innermode, byte) == 0)
+ return simplify_gen_binary (ASHIFT, outermode,
+ XEXP (XEXP (op, 0), 0), XEXP (op, 1));
+
return NULL_RTX;
}
simplify_gen_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
- rtx new;
- /* Little bit of sanity checking. */
- if (innermode == VOIDmode || outermode == VOIDmode
- || innermode == BLKmode || outermode == BLKmode)
- abort ();
+ rtx newx;
- if (GET_MODE (op) != innermode
- && GET_MODE (op) != VOIDmode)
- abort ();
+ newx = simplify_subreg (outermode, op, innermode, byte);
+ if (newx)
+ return newx;
- if (byte % GET_MODE_SIZE (outermode)
- || byte >= GET_MODE_SIZE (innermode))
- abort ();
-
- if (GET_CODE (op) == QUEUED)
+ if (GET_CODE (op) == SUBREG
+ || GET_CODE (op) == CONCAT
+ || GET_MODE (op) == VOIDmode)
return NULL_RTX;
- new = simplify_subreg (outermode, op, innermode, byte);
- if (new)
- return new;
+ if (validate_subreg (outermode, innermode, op, byte))
+ return gen_rtx_SUBREG (outermode, op, byte);
- if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
- return NULL_RTX;
-
- return gen_rtx_SUBREG (outermode, op, byte);
+ return NULL_RTX;
}
+
/* Simplify X, an rtx expression.
Return the simplified expression or NULL if no simplifications