/* RTL simplification functions for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
- Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011 Free Software Foundation, Inc.
This file is part of GCC.
#include "regs.h"
#include "hard-reg-set.h"
#include "flags.h"
-#include "real.h"
#include "insn-config.h"
#include "recog.h"
#include "function.h"
#include "expr.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "output.h"
#include "ggc.h"
#include "target.h"
if (GET_MODE_CLASS (mode) != MODE_INT)
return false;
- width = GET_MODE_BITSIZE (mode);
+ width = GET_MODE_PRECISION (mode);
if (width == 0)
return false;
-
+
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (x) == CONST_INT)
+ && CONST_INT_P (x))
val = INTVAL (x);
else if (width <= 2 * HOST_BITS_PER_WIDE_INT
&& GET_CODE (x) == CONST_DOUBLE
val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
}
+
+/* Test whether VAL is equal to the most significant bit of mode MODE
+ (after masking with the mode mask of MODE). Returns false if the
+ precision of MODE is too large to handle. */
+
+bool
+val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= GET_MODE_MASK (mode);
+ return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
+}
+
+/* Test whether the most significant bit of mode MODE is set in VAL.
+ Returns false if the precision of MODE is too large to handle. */
+bool
+val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
+ return val != 0;
+}
+
+/* Test whether the most significant bit of mode MODE is clear in VAL.
+ Returns false if the precision of MODE is too large to handle. */
+bool
+val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
+ return val == 0;
+}
\f
/* Make a binary operation by properly ordering the operands and
seeing if the expression folds. */
/* Split the address into a base and integer offset. */
if (GET_CODE (addr) == CONST
&& GET_CODE (XEXP (addr, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
{
offset = INTVAL (XEXP (XEXP (addr, 0), 1));
addr = XEXP (XEXP (addr, 0), 0);
return x;
}
\f
+/* Simplify a MEM based on its attributes. This is the default
+ delegitimize_address target hook, and it's recommended that every
+ overrider call it. */
+
+rtx
+delegitimize_mem_from_attrs (rtx x)
+{
+ /* MEMs without MEM_OFFSETs may have been offset, so we can't just
+ use their base addresses as equivalent. */
+ if (MEM_P (x)
+ && MEM_EXPR (x)
+ && MEM_OFFSET_KNOWN_P (x))
+ {
+ tree decl = MEM_EXPR (x);
+ enum machine_mode mode = GET_MODE (x);
+ HOST_WIDE_INT offset = 0;
+
+ switch (TREE_CODE (decl))
+ {
+ default:
+ decl = NULL;
+ break;
+
+ case VAR_DECL:
+ break;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ {
+ HOST_WIDE_INT bitsize, bitpos;
+ tree toffset;
+ int unsignedp = 0, volatilep = 0;
+
+ decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
+ &mode, &unsignedp, &volatilep, false);
+ if (bitsize != GET_MODE_BITSIZE (mode)
+ || (bitpos % BITS_PER_UNIT)
+ || (toffset && !host_integerp (toffset, 0)))
+ decl = NULL;
+ else
+ {
+ offset += bitpos / BITS_PER_UNIT;
+ if (toffset)
+ offset += TREE_INT_CST_LOW (toffset);
+ }
+ break;
+ }
+ }
+
+ if (decl
+ && mode == GET_MODE (x)
+ && TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl)
+ || DECL_THREAD_LOCAL_P (decl))
+ && DECL_RTL_SET_P (decl)
+ && MEM_P (DECL_RTL (decl)))
+ {
+ rtx newx;
+
+ offset += MEM_OFFSET (x);
+
+ newx = DECL_RTL (decl);
+
+ if (MEM_P (newx))
+ {
+ rtx n = XEXP (newx, 0), o = XEXP (x, 0);
+
+ /* Avoid creating a new MEM needlessly if we already had
+ the same address. We do if there's no OFFSET and the
+ old address X is identical to NEWX, or if X is of the
+ form (plus NEWX OFFSET), or the NEWX is of the form
+ (plus Y (const_int Z)) and X is that with the offset
+ added: (plus Y (const_int Z+OFFSET)). */
+ if (!((offset == 0
+ || (GET_CODE (o) == PLUS
+ && GET_CODE (XEXP (o, 1)) == CONST_INT
+ && (offset == INTVAL (XEXP (o, 1))
+ || (GET_CODE (n) == PLUS
+ && GET_CODE (XEXP (n, 1)) == CONST_INT
+ && (INTVAL (XEXP (n, 1)) + offset
+ == INTVAL (XEXP (o, 1)))
+ && (n = XEXP (n, 0))))
+ && (o = XEXP (o, 0))))
+ && rtx_equal_p (o, n)))
+ x = adjust_address_nv (newx, mode, offset);
+ }
+ else if (GET_MODE (x) == GET_MODE (newx)
+ && offset == 0)
+ x = newx;
+ }
+ }
+
+ return x;
+}
+\f
/* Make a unary operation by first seeing if it folds and otherwise making
the specified operation. */
return gen_rtx_fmt_ee (code, mode, op0, op1);
}
\f
-/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
- resulting RTX. Return a new RTX which is as simplified as possible. */
+/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
+ and simplify the result. If FN is non-NULL, call this callback on each
+ X, if it returns non-NULL, replace X with its return value and simplify the
+ result. */
rtx
-simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
+simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
+ rtx (*fn) (rtx, const_rtx, void *), void *data)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
enum machine_mode op_mode;
- rtx op0, op1, op2;
-
- /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
- to build a new expression substituting recursively. If we can't do
- anything, return our input. */
+ const char *fmt;
+ rtx op0, op1, op2, newx, op;
+ rtvec vec, newvec;
+ int i, j;
- if (x == old_rtx)
- return new_rtx;
+ if (__builtin_expect (fn != NULL, 0))
+ {
+ newx = fn (x, old_rtx, data);
+ if (newx)
+ return newx;
+ }
+ else if (rtx_equal_p (x, old_rtx))
+ return copy_rtx ((rtx) data);
switch (GET_RTX_CLASS (code))
{
case RTX_UNARY:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
if (op0 == XEXP (x, 0))
return x;
return simplify_gen_unary (code, mode, op0, op_mode);
case RTX_BIN_ARITH:
case RTX_COMM_ARITH:
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_binary (code, mode, op0, op1);
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
- op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_relational (code, mode, op_mode, op0, op1);
case RTX_BITFIELD_OPS:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
- op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
+ op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
return x;
if (op_mode == VOIDmode)
return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
case RTX_EXTRA:
- /* The only case we try to handle is a SUBREG. */
if (code == SUBREG)
{
- op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
if (op0 == SUBREG_REG (x))
return x;
op0 = simplify_gen_subreg (GET_MODE (x), op0,
case RTX_OBJ:
if (code == MEM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
if (op0 == XEXP (x, 0))
return x;
return replace_equiv_address_nv (x, op0);
}
else if (code == LO_SUM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
/* (lo_sum (high x) x) -> x */
if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
return x;
return gen_rtx_LO_SUM (mode, op0, op1);
}
- else if (code == REG)
- {
- if (rtx_equal_p (x, old_rtx))
- return new_rtx;
- }
break;
default:
break;
}
- return x;
+
+ newx = x;
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; fmt[i]; i++)
+ switch (fmt[i])
+ {
+ case 'E':
+ vec = XVEC (x, i);
+ newvec = XVEC (newx, i);
+ for (j = 0; j < GET_NUM_ELEM (vec); j++)
+ {
+ op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
+ old_rtx, fn, data);
+ if (op != RTVEC_ELT (vec, j))
+ {
+ if (newvec == vec)
+ {
+ newvec = shallow_copy_rtvec (vec);
+ if (x == newx)
+ newx = shallow_copy_rtx (x);
+ XVEC (newx, i) = newvec;
+ }
+ RTVEC_ELT (newvec, j) = op;
+ }
+ }
+ break;
+
+ case 'e':
+ if (XEXP (x, i))
+ {
+ op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
+ if (op != XEXP (x, i))
+ {
+ if (x == newx)
+ newx = shallow_copy_rtx (x);
+ XEXP (newx, i) = op;
+ }
+ }
+ break;
+ }
+ return newx;
+}
+
+/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
+ resulting RTX. Return a new RTX which is as simplified as possible. */
+
+rtx
+simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
+{
+ return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
}
\f
/* Try to simplify a unary operation CODE whose output mode is to be
{
rtx trueop, tem;
- if (GET_CODE (op) == CONST)
- op = XEXP (op, 0);
-
trueop = avoid_constant_pool_reference (op);
tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
/* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
if (GET_CODE (op) == XOR
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (op, 1))
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1), mode)) != 0)
return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
/* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
if (GET_CODE (op) == PLUS
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (op, 1))
&& mode_signbit_p (mode, XEXP (op, 1))
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1), mode)) != 0)
/* (not (ashiftrt foo C)) where C is the number of bits in FOO
minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
so we can perform the above simplification. */
-
+
if (STORE_FLAG_VALUE == -1
&& GET_CODE (op) == ASHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && GET_CODE (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_relational (GE, mode, VOIDmode,
XEXP (op, 0), const0_rtx);
if (GET_CODE (op) == PLUS
&& XEXP (op, 1) == const1_rtx)
return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
-
+
/* Similarly, (neg (not X)) is (plus X 1). */
if (GET_CODE (op) == NOT)
return plus_constant (XEXP (op, 0), 1);
-
+
/* (neg (minus X Y)) can become (minus Y X). This transformation
isn't safe for modes with signed zeros, since if X and Y are
both +0, (minus Y X) is the same as (minus X Y). If the
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
-
+
if (GET_CODE (op) == PLUS
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
/* (neg (plus A C)) is simplified to (minus -C A). */
- if (GET_CODE (XEXP (op, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (op, 1))
|| GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
{
temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
}
- /* (neg (mult A B)) becomes (mult (neg A) B).
+ /* (neg (mult A B)) becomes (mult A (neg B)).
This works even for floating-point values. */
if (GET_CODE (op) == MULT
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
- temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
- return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
+ temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
+ return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
}
/* NEG commutes with ASHIFT since it is multiplication. Only do
/* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == ASHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_binary (LSHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
/* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == LSHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
-
+
/* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
if (GET_CODE (op) == XOR
&& XEXP (op, 1) == const1_rtx
&& SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
{
enum machine_mode inner = GET_MODE (XEXP (op, 0));
- int isize = GET_MODE_BITSIZE (inner);
+ int isize = GET_MODE_PRECISION (inner);
if (STORE_FLAG_VALUE == 1)
{
temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
GEN_INT (isize - 1));
if (mode == inner)
return temp;
- if (GET_MODE_BITSIZE (mode) > isize)
+ if (GET_MODE_PRECISION (mode) > isize)
return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
return simplify_gen_unary (TRUNCATE, mode, temp, inner);
}
GEN_INT (isize - 1));
if (mode == inner)
return temp;
- if (GET_MODE_BITSIZE (mode) > isize)
+ if (GET_MODE_PRECISION (mode) > isize)
return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
return simplify_gen_unary (TRUNCATE, mode, temp, inner);
}
replace the TRUNCATE with a SUBREG. Note that this is also
valid if TRULY_NOOP_TRUNCATION is false for the corresponding
modes we just have to apply a different definition for
- truncation. But don't do this for an (LSHIFTRT (MULT ...))
+ truncation. But don't do this for an (LSHIFTRT (MULT ...))
since this will cause problems with the umulXi3_highpart
patterns. */
- if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
- GET_MODE_BITSIZE (GET_MODE (op)))
+ if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
? (num_sign_bit_copies (op, GET_MODE (op))
- > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
- - GET_MODE_BITSIZE (mode)))
+ > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
+ - GET_MODE_PRECISION (mode)))
: truncated_to_mode (mode, op))
&& ! (GET_CODE (op) == LSHIFTRT
&& GET_CODE (XEXP (op, 0)) == MULT))
STORE_FLAG_VALUE permits. This is like the previous test,
but it works even if the comparison is done in a mode larger
than HOST_BITS_PER_WIDE_INT. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& COMPARISON_P (op)
- && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
+ && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
return rtl_hooks.gen_lowpart_no_emit (mode, op);
break;
&& (flag_unsafe_math_optimizations
|| (SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
- num_sign_bit_copies (XEXP (op, 0),
GET_MODE (XEXP (op, 0))))))))
return simplify_gen_unary (FLOAT, mode,
|| (GET_CODE (op) == FLOAT
&& SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
- num_sign_bit_copies (XEXP (op, 0),
GET_MODE (XEXP (op, 0)))))))
return simplify_gen_unary (GET_CODE (op), mode,
/* If operand is something known to be positive, ignore the ABS. */
if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
- || ((GET_MODE_BITSIZE (GET_MODE (op))
- <= HOST_BITS_PER_WIDE_INT)
- && ((nonzero_bits (op, GET_MODE (op))
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
- == 0)))
+ || val_signbit_known_clear_p (GET_MODE (op),
+ nonzero_bits (op, GET_MODE (op))))
return op;
/* If operand is known to be only -1 or 0, convert ABS to NEG. */
- if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
+ if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
return gen_rtx_NEG (mode, op);
break;
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
return XEXP (op, 0);
+ /* Extending a widening multiplication should be canonicalized to
+ a wider widening multiplication. */
+ if (GET_CODE (op) == MULT)
+ {
+ rtx lhs = XEXP (op, 0);
+ rtx rhs = XEXP (op, 1);
+ enum rtx_code lcode = GET_CODE (lhs);
+ enum rtx_code rcode = GET_CODE (rhs);
+
+ /* Widening multiplies usually extend both operands, but sometimes
+ they use a shift to extract a portion of a register. */
+ if ((lcode == SIGN_EXTEND
+ || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
+ && (rcode == SIGN_EXTEND
+ || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
+ {
+ enum machine_mode lmode = GET_MODE (lhs);
+ enum machine_mode rmode = GET_MODE (rhs);
+ int bits;
+
+ if (lcode == ASHIFTRT)
+ /* Number of bits not shifted off the end. */
+ bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ else /* lcode == SIGN_EXTEND */
+ /* Size of inner mode. */
+ bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+
+ if (rcode == ASHIFTRT)
+ bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ else /* rcode == SIGN_EXTEND */
+ bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+
+ /* We can only widen multiplies if the result is mathematiclly
+ equivalent. I.e. if overflow was impossible. */
+ if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ return simplify_gen_binary
+ (MULT, mode,
+ simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
+ simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
+ }
+ }
+
/* Check for a sign extension of a subreg of a promoted
variable, where the promotion is sign-extended, and the
target mode is the same as the variable's promotion. */
&& GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
return rtl_hooks.gen_lowpart_no_emit (mode, op);
+ /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
+ (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
+ if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
+ {
+ gcc_assert (GET_MODE_BITSIZE (mode)
+ > GET_MODE_BITSIZE (GET_MODE (op)));
+ return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+ }
+
+ /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is (sign_extend:M (subreg:O <X>)) if there is mode with
+ GET_MODE_BITSIZE (N) - I bits.
+ (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is similarly (zero_extend:M (subreg:O <X>)). */
+ if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
+ && GET_CODE (XEXP (op, 0)) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
+ && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ {
+ enum machine_mode tmode
+ = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ gcc_assert (GET_MODE_BITSIZE (mode)
+ > GET_MODE_BITSIZE (GET_MODE (op)));
+ if (tmode != BLKmode)
+ {
+ rtx inner =
+ rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
+ return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
+ ? SIGN_EXTEND : ZERO_EXTEND,
+ mode, inner, tmode);
+ }
+ }
+
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
- if (! POINTERS_EXTEND_UNSIGNED
+ /* As we do not know which address space the pointer is refering to,
+ we can do this only if the target does not support different pointer
+ or address modes depending on the address space. */
+ if (target_default_pointer_address_modes_p ()
+ && ! POINTERS_EXTEND_UNSIGNED
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
&& GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
return rtl_hooks.gen_lowpart_no_emit (mode, op);
+ /* Extending a widening multiplication should be canonicalized to
+ a wider widening multiplication. */
+ if (GET_CODE (op) == MULT)
+ {
+ rtx lhs = XEXP (op, 0);
+ rtx rhs = XEXP (op, 1);
+ enum rtx_code lcode = GET_CODE (lhs);
+ enum rtx_code rcode = GET_CODE (rhs);
+
+ /* Widening multiplies usually extend both operands, but sometimes
+ they use a shift to extract a portion of a register. */
+ if ((lcode == ZERO_EXTEND
+ || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
+ && (rcode == ZERO_EXTEND
+ || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
+ {
+ enum machine_mode lmode = GET_MODE (lhs);
+ enum machine_mode rmode = GET_MODE (rhs);
+ int bits;
+
+ if (lcode == LSHIFTRT)
+ /* Number of bits not shifted off the end. */
+ bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ else /* lcode == ZERO_EXTEND */
+ /* Size of inner mode. */
+ bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+
+ if (rcode == LSHIFTRT)
+ bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ else /* rcode == ZERO_EXTEND */
+ bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+
+ /* We can only widen multiplies if the result is mathematiclly
+ equivalent. I.e. if overflow was impossible. */
+ if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ return simplify_gen_binary
+ (MULT, mode,
+ simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
+ simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
+ }
+ }
+
+ /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
+ if (GET_CODE (op) == ZERO_EXTEND)
+ return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+
+ /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is (zero_extend:M (subreg:O <X>)) if there is mode with
+ GET_MODE_BITSIZE (N) - I bits. */
+ if (GET_CODE (op) == LSHIFTRT
+ && GET_CODE (XEXP (op, 0)) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
+ && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ {
+ enum machine_mode tmode
+ = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ if (tmode != BLKmode)
+ {
+ rtx inner =
+ rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
+ return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
+ }
+ }
+
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
- if (POINTERS_EXTEND_UNSIGNED > 0
+ /* As we do not know which address space the pointer is refering to,
+ we can do this only if the target does not support different pointer
+ or address modes depending on the address space. */
+ if (target_default_pointer_address_modes_p ()
+ && POINTERS_EXTEND_UNSIGNED > 0
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
default:
break;
}
-
+
return 0;
}
simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode)
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
+ unsigned int op_width = GET_MODE_PRECISION (op_mode);
if (code == VEC_DUPLICATE)
{
gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
(GET_MODE (op)));
}
- if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
+ if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
|| GET_CODE (op) == CONST_VECTOR)
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
such as FIX. At some point, this should be simplified. */
if (code == FLOAT && GET_MODE (op) == VOIDmode
- && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
{
HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (GET_CODE (op) == CONST_INT)
+ if (CONST_INT_P (op))
lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else
lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
}
else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
&& (GET_CODE (op) == CONST_DOUBLE
- || GET_CODE (op) == CONST_INT))
+ || CONST_INT_P (op)))
{
HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (GET_CODE (op) == CONST_INT)
+ if (CONST_INT_P (op))
lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else
lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
if (hv < 0)
return 0;
}
- else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
+ else if (GET_MODE_PRECISION (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
;
else
hv = 0, lv &= GET_MODE_MASK (op_mode);
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
- if (GET_CODE (op) == CONST_INT
+ if (CONST_INT_P (op)
&& width <= HOST_BITS_PER_WIDE_INT && width > 0)
{
HOST_WIDE_INT arg0 = INTVAL (op);
break;
case FFS:
- /* Don't use ffs here. Instead, get low order bit and then its
- number. If arg0 is zero, this will return 0, as desired. */
arg0 &= GET_MODE_MASK (mode);
- val = exact_log2 (arg0 & (- arg0)) + 1;
+ val = ffs_hwi (arg0);
break;
case CLZ:
if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
;
else
- val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
+ val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
+ break;
+
+ case CLRSB:
+ arg0 &= GET_MODE_MASK (mode);
+ if (arg0 == 0)
+ val = GET_MODE_PRECISION (mode) - 1;
+ else if (arg0 >= 0)
+ val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
+ else if (arg0 < 0)
+ val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
break;
case CTZ:
/* Even if the value at zero is undefined, we have to come
up with some replacement. Seems good enough. */
if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
- val = GET_MODE_BITSIZE (mode);
+ val = GET_MODE_PRECISION (mode);
}
else
- val = exact_log2 (arg0 & -arg0);
+ val = ctz_hwi (arg0);
break;
case POPCOUNT:
/* When zero-extending a CONST_INT, we need to know its
original mode. */
gcc_assert (op_mode != VOIDmode);
- if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ if (op_width == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- gcc_assert (width == GET_MODE_BITSIZE (op_mode));
+ gcc_assert (width == op_width);
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
- val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
+ val = arg0 & GET_MODE_MASK (op_mode);
else
return 0;
break;
case SIGN_EXTEND:
if (op_mode == VOIDmode)
op_mode = mode;
- if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ op_width = GET_MODE_PRECISION (op_mode);
+ if (op_width == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- gcc_assert (width == GET_MODE_BITSIZE (op_mode));
+ gcc_assert (width == op_width);
val = arg0;
}
- else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
+ else if (op_width < HOST_BITS_PER_WIDE_INT)
{
- val
- = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
- if (val
- & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
- val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ val = arg0 & GET_MODE_MASK (op_mode);
+ if (val_signbit_known_set_p (op_mode, val))
+ val |= ~GET_MODE_MASK (op_mode);
}
else
return 0;
case US_TRUNCATE:
case SS_NEG:
case US_NEG:
+ case SS_ABS:
return 0;
default:
else if (GET_MODE (op) == VOIDmode
&& width <= HOST_BITS_PER_WIDE_INT * 2
&& (GET_CODE (op) == CONST_DOUBLE
- || GET_CODE (op) == CONST_INT))
+ || CONST_INT_P (op)))
{
unsigned HOST_WIDE_INT l1, lv;
HOST_WIDE_INT h1, hv;
case FFS:
hv = 0;
- if (l1 == 0)
- {
- if (h1 == 0)
- lv = 0;
- else
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
- }
+ if (l1 != 0)
+ lv = ffs_hwi (l1);
+ else if (h1 != 0)
+ lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
else
- lv = exact_log2 (l1 & -l1) + 1;
+ lv = 0;
break;
case CLZ:
hv = 0;
if (h1 != 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
+ lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
- HOST_BITS_PER_WIDE_INT;
else if (l1 != 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
+ lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
- lv = GET_MODE_BITSIZE (mode);
+ lv = GET_MODE_PRECISION (mode);
break;
case CTZ:
hv = 0;
if (l1 != 0)
- lv = exact_log2 (l1 & -l1);
+ lv = ctz_hwi (l1);
else if (h1 != 0)
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
+ lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
- lv = GET_MODE_BITSIZE (mode);
+ lv = GET_MODE_PRECISION (mode);
break;
case POPCOUNT:
case ZERO_EXTEND:
gcc_assert (op_mode != VOIDmode);
- if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ if (op_width > HOST_BITS_PER_WIDE_INT)
return 0;
hv = 0;
case SIGN_EXTEND:
if (op_mode == VOIDmode
- || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ || op_width > HOST_BITS_PER_WIDE_INT)
return 0;
else
{
lv = l1 & GET_MODE_MASK (op_mode);
- if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
- && (lv & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
- lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ if (val_signbit_known_set_p (op_mode, lv))
+ lv |= ~GET_MODE_MASK (op_mode);
hv = HWI_SIGN_EXTEND (lv);
}
}
else if (GET_CODE (op) == CONST_DOUBLE
- && SCALAR_FLOAT_MODE_P (mode))
+ && SCALAR_FLOAT_MODE_P (mode)
+ && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
{
REAL_VALUE_TYPE d, t;
REAL_VALUE_FROM_CONST_DOUBLE (d, op);
d = t;
break;
case ABS:
- d = REAL_VALUE_ABS (d);
+ d = real_value_abs (&d);
break;
case NEG:
- d = REAL_VALUE_NEGATE (d);
+ d = real_value_negate (&d);
break;
case FLOAT_TRUNCATE:
d = real_value_truncate (mode, d);
break;
case FLOAT_EXTEND:
- /* All this does is change the mode. */
+ /* All this does is change the mode, unless changing
+ mode class. */
+ if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
+ real_convert (&d, mode, &d);
break;
case FIX:
real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
/* Test against the signed lower bound. */
if (width > HOST_BITS_PER_WIDE_INT)
{
- th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
+ th = (unsigned HOST_WIDE_INT) (-1)
+ << (width - HOST_BITS_PER_WIDE_INT - 1);
tl = 0;
}
else
{
th = -1;
- tl = (HOST_WIDE_INT) -1 << (width - 1);
+ tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
}
real_from_integer (&t, VOIDmode, tl, th, 0);
if (REAL_VALUES_LESS (x, t))
{
rtx tem, reversed, opleft, opright;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
/* Even if we can't compute a constant result,
there are some cases worth simplifying. */
if ((GET_CODE (op0) == CONST
|| GET_CODE (op0) == SYMBOL_REF
|| GET_CODE (op0) == LABEL_REF)
- && GET_CODE (op1) == CONST_INT)
+ && CONST_INT_P (op1))
return plus_constant (op0, INTVAL (op1));
else if ((GET_CODE (op1) == CONST
|| GET_CODE (op1) == SYMBOL_REF
|| GET_CODE (op1) == LABEL_REF)
- && GET_CODE (op0) == CONST_INT)
+ && CONST_INT_P (op0))
return plus_constant (op1, INTVAL (op0));
/* See if this is something like X * C - X or vice versa or
if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
- unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
+ double_int coeff0, coeff1;
rtx lhs = op0, rhs = op1;
+ coeff0 = double_int_one;
+ coeff1 = double_int_one;
+
if (GET_CODE (lhs) == NEG)
{
- coeff0l = -1;
- coeff0h = -1;
+ coeff0 = double_int_minus_one;
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0l = INTVAL (XEXP (lhs, 1));
- coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT
- && INTVAL (XEXP (lhs, 1)) >= 0
+ && CONST_INT_P (XEXP (lhs, 1))
+ && INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- coeff0h = 0;
+ coeff0 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- coeff1l = -1;
- coeff1h = -1;
+ coeff1 = double_int_minus_one;
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (rhs, 1)))
{
- coeff1l = INTVAL (XEXP (rhs, 1));
- coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
+ coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (rhs, 1))
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
- coeff1h = 0;
+ coeff1 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
{
rtx orig = gen_rtx_PLUS (mode, op0, op1);
rtx coeff;
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
+ double_int val;
bool speed = optimize_function_for_speed_p (cfun);
- add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
- coeff = immed_double_const (l, h, mode);
+ val = double_int_add (coeff0, coeff1);
+ coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
- return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
+ return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
? tem : 0;
}
}
/* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& GET_CODE (op0) == XOR
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
&& mode_signbit_p (mode, op1))
return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
break;
case COMPARE:
-#ifdef HAVE_cc0
- /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
- using cc0, in which case we want to leave it as a COMPARE
- so we can distinguish it from a register-register-copy.
-
- In IEEE floating point, x-0 is not the same as x. */
- if (!(HONOR_SIGNED_ZEROS (mode)
- && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
- && trueop1 == CONST0_RTX (mode))
- return op0;
-#endif
-
/* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
|| (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
- unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
+ double_int coeff0, negcoeff1;
rtx lhs = op0, rhs = op1;
+ coeff0 = double_int_one;
+ negcoeff1 = double_int_minus_one;
+
if (GET_CODE (lhs) == NEG)
{
- coeff0l = -1;
- coeff0h = -1;
+ coeff0 = double_int_minus_one;
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0l = INTVAL (XEXP (lhs, 1));
- coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (lhs, 1))
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- coeff0h = 0;
+ coeff0 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- negcoeff1l = 1;
- negcoeff1h = 0;
+ negcoeff1 = double_int_one;
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (rhs, 1)))
{
- negcoeff1l = -INTVAL (XEXP (rhs, 1));
- negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
+ negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (rhs, 1))
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
- negcoeff1h = -1;
+ negcoeff1 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (rhs, 1)));
+ negcoeff1 = double_int_neg (negcoeff1);
rhs = XEXP (rhs, 0);
}
{
rtx orig = gen_rtx_MINUS (mode, op0, op1);
rtx coeff;
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
+ double_int val;
bool speed = optimize_function_for_speed_p (cfun);
- add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
- coeff = immed_double_const (l, h, mode);
+ val = double_int_add (coeff0, negcoeff1);
+ coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
- return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
+ return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
? tem : 0;
}
}
/* (-x - c) may be simplified as (-c - x). */
if (GET_CODE (op0) == NEG
- && (GET_CODE (op1) == CONST_INT
+ && (CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE))
{
tem = simplify_unary_operation (NEG, mode, op1, mode);
}
/* Don't let a relocatable value get a negative coeff. */
- if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
+ if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
return simplify_gen_binary (PLUS, mode,
op0,
neg_const_int (mode, op1));
if (trueop1 == constm1_rtx)
return simplify_gen_unary (NEG, mode, op0, mode);
+ if (GET_CODE (op0) == NEG)
+ {
+ rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
+ /* If op1 is a MULT as well and simplify_unary_operation
+ just moved the NEG to the second operand, simplify_gen_binary
+ below could through simplify_associative_operation move
+ the NEG around again and recurse endlessly. */
+ if (temp
+ && GET_CODE (op1) == MULT
+ && GET_CODE (temp) == MULT
+ && XEXP (op1, 0) == XEXP (temp, 0)
+ && GET_CODE (XEXP (temp, 1)) == NEG
+ && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
+ temp = NULL_RTX;
+ if (temp)
+ return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
+ }
+ if (GET_CODE (op1) == NEG)
+ {
+ rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
+ /* If op0 is a MULT as well and simplify_unary_operation
+ just moved the NEG to the second operand, simplify_gen_binary
+ below could through simplify_associative_operation move
+ the NEG around again and recurse endlessly. */
+ if (temp
+ && GET_CODE (op0) == MULT
+ && GET_CODE (temp) == MULT
+ && XEXP (op0, 0) == XEXP (temp, 0)
+ && GET_CODE (XEXP (temp, 1)) == NEG
+ && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
+ temp = NULL_RTX;
+ if (temp)
+ return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
+ }
+
/* Maybe simplify x * 0 to 0. The reduction is not valid if
x is NaN, since x * 0 is then also NaN. Nor is it valid
when the mode has signed zeros, since multiplying a negative
/* Convert multiply by constant power of two into shift unless
we are still generating RTL. This test is a kludge. */
- if (GET_CODE (trueop1) == CONST_INT
- && (val = exact_log2 (INTVAL (trueop1))) >= 0
+ if (CONST_INT_P (trueop1)
+ && (val = exact_log2 (UINTVAL (trueop1))) >= 0
/* If the mode is larger than the host word size, and the
uppermost bit is set, then this isn't a power of two due
to implicit sign extension. */
/* x*2 is x+x and x*(-1) is -x */
if (GET_CODE (trueop1) == CONST_DOUBLE
&& SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
+ && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
&& GET_MODE (op0) == mode)
{
REAL_VALUE_TYPE d;
break;
case IOR:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
- if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
return constm1_rtx;
/* (ior A C) is C if all bits of A that might be nonzero are on in C. */
- if (GET_CODE (op1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
+ if (CONST_INT_P (op1)
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
return op1;
-
+
/* Canonicalize (X & C1) | C2. */
if (GET_CODE (op0) == AND
- && GET_CODE (trueop1) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ && CONST_INT_P (trueop1)
+ && CONST_INT_P (XEXP (op0, 1)))
{
HOST_WIDE_INT mask = GET_MODE_MASK (mode);
HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
&& rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
- && GET_CODE (XEXP (opleft, 1)) == CONST_INT
- && GET_CODE (XEXP (opright, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opleft, 1))
+ && CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
/* Same, but for ashift that has been "simplified" to a wider mode
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
&& rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
SUBREG_REG (XEXP (opright, 0)))
- && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
- && GET_CODE (XEXP (opright, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
+ && CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0),
XEXP (SUBREG_REG (opleft), 1));
/* If we have (ior (and (X C1) C2)), simplify this by making
C1 as small as possible if C1 actually changes. */
- if (GET_CODE (op1) == CONST_INT
- && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (CONST_INT_P (op1)
+ && (HWI_COMPUTABLE_MODE_P (mode)
|| INTVAL (op1) > 0)
&& GET_CODE (op0) == AND
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && GET_CODE (op1) == CONST_INT
- && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
+ && CONST_INT_P (XEXP (op0, 1))
+ && CONST_INT_P (op1)
+ && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
return simplify_gen_binary (IOR, mode,
simplify_gen_binary
(AND, mode, XEXP (op0, 0),
- GEN_INT (INTVAL (XEXP (op0, 1))
- & ~INTVAL (op1))),
+ GEN_INT (UINTVAL (XEXP (op0, 1))
+ & ~UINTVAL (op1))),
op1);
/* If OP0 is (ashiftrt (plus ...) C), it might actually be
the PLUS does not affect any of the bits in OP1: then we can do
the IOR as a PLUS and we can associate. This is valid if OP1
can be safely shifted left C bits. */
- if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
+ if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
&& GET_CODE (XEXP (op0, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
+ && CONST_INT_P (XEXP (op0, 1))
&& INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
{
int count = INTVAL (XEXP (op0, 1));
break;
case XOR:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
- if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
return simplify_gen_unary (NOT, mode, op0, mode);
if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
return CONST0_RTX (mode);
/* Canonicalize XOR of the most significant bit to PLUS. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& mode_signbit_p (mode, op1))
return simplify_gen_binary (PLUS, mode, op0, op1);
/* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& GET_CODE (op0) == PLUS
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
&& mode_signbit_p (mode, XEXP (op0, 1)))
return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
convert them into an IOR. This helps to detect rotation encoded
using those methods and possibly other simplifications. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& (nonzero_bits (op0, mode)
& nonzero_bits (op1, mode)) == 0)
return (simplify_gen_binary (IOR, mode, op0, op1));
XEXP (op0, 1), mode),
op1);
+ /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
+ we can transform like this:
+ (A&B)^C == ~(A&B)&C | ~C&(A&B)
+ == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
+ == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
+ Attempt a few simplifications when B and C are both constants. */
+ if (GET_CODE (op0) == AND
+ && CONST_INT_P (op1)
+ && CONST_INT_P (XEXP (op0, 1)))
+ {
+ rtx a = XEXP (op0, 0);
+ rtx b = XEXP (op0, 1);
+ rtx c = op1;
+ HOST_WIDE_INT bval = INTVAL (b);
+ HOST_WIDE_INT cval = INTVAL (c);
+
+ rtx na_c
+ = simplify_binary_operation (AND, mode,
+ simplify_gen_unary (NOT, mode, a, mode),
+ c);
+ if ((~cval & bval) == 0)
+ {
+ /* Try to simplify ~A&C | ~B&C. */
+ if (na_c != NULL_RTX)
+ return simplify_gen_binary (IOR, mode, na_c,
+ GEN_INT (~bval & cval));
+ }
+ else
+ {
+ /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
+ if (na_c == const0_rtx)
+ {
+ rtx a_nc_b = simplify_gen_binary (AND, mode, a,
+ GEN_INT (~cval & bval));
+ return simplify_gen_binary (IOR, mode, a_nc_b,
+ GEN_INT (~bval & cval));
+ }
+ }
+ }
+
/* (xor (comparison foo bar) (const_int 1)) can become the reversed
comparison if STORE_FLAG_VALUE is 1. */
if (STORE_FLAG_VALUE == 1
if (STORE_FLAG_VALUE == 1
&& trueop1 == const1_rtx
&& GET_CODE (op0) == LSHIFTRT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op0, 1))
+ && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
/* (xor (comparison foo bar) (const_int sign-bit))
when STORE_FLAG_VALUE is the sign bit. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ if (val_signbit_p (mode, STORE_FLAG_VALUE)
&& trueop1 == const_true_rtx
&& COMPARISON_P (op0)
&& (reversed = reversed_comparison (op0, mode)))
case AND:
if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
return trueop1;
- if (GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
+ return op0;
+ if (HWI_COMPUTABLE_MODE_P (mode))
{
HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
- HOST_WIDE_INT val1 = INTVAL (trueop1);
- /* If we are turning off bits already known off in OP0, we need
- not do an AND. */
- if ((nzop0 & ~val1) == 0)
- return op0;
+ HOST_WIDE_INT nzop1;
+ if (CONST_INT_P (trueop1))
+ {
+ HOST_WIDE_INT val1 = INTVAL (trueop1);
+ /* If we are turning off bits already known off in OP0, we need
+ not do an AND. */
+ if ((nzop0 & ~val1) == 0)
+ return op0;
+ }
+ nzop1 = nonzero_bits (trueop1, mode);
/* If we are clearing all the nonzero bits, the result is zero. */
- if ((val1 & nzop0) == 0 && !side_effects_p (op0))
+ if ((nzop1 & nzop0) == 0
+ && !side_effects_p (op0) && !side_effects_p (op1))
return CONST0_RTX (mode);
}
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
there are no nonzero bits of C outside of X's mode. */
if ((GET_CODE (op0) == SIGN_EXTEND
|| GET_CODE (op0) == ZERO_EXTEND)
- && GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && CONST_INT_P (trueop1)
+ && HWI_COMPUTABLE_MODE_P (mode)
&& (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
- & INTVAL (trueop1)) == 0)
+ & UINTVAL (trueop1)) == 0)
{
enum machine_mode imode = GET_MODE (XEXP (op0, 0));
tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
}
+ /* Transform (and (truncate X) C) into (truncate (and X C)). This way
+ we might be able to further simplify the AND with X and potentially
+ remove the truncation altogether. */
+ if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
+ {
+ rtx x = XEXP (op0, 0);
+ enum machine_mode xmode = GET_MODE (x);
+ tem = simplify_gen_binary (AND, xmode, x,
+ gen_int_mode (INTVAL (trueop1), xmode));
+ return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
+ }
+
/* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
if (GET_CODE (op0) == IOR
- && GET_CODE (trueop1) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ && CONST_INT_P (trueop1)
+ && CONST_INT_P (XEXP (op0, 1)))
{
HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
return simplify_gen_binary (IOR, mode,
and for - instead of + and/or ^ instead of |.
Also, if (N & M) == 0, then
(A +- N) & M -> A & M. */
- if (GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ~INTVAL (trueop1)
- && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
+ if (CONST_INT_P (trueop1)
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && ~UINTVAL (trueop1)
+ && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
&& (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
{
rtx pmop[2];
pmop[0] = XEXP (op0, 0);
pmop[1] = XEXP (op0, 1);
- if (GET_CODE (pmop[1]) == CONST_INT
- && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
+ if (CONST_INT_P (pmop[1])
+ && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
return simplify_gen_binary (AND, mode, pmop[0], op1);
for (which = 0; which < 2; which++)
switch (GET_CODE (tem))
{
case AND:
- if (GET_CODE (XEXP (tem, 1)) == CONST_INT
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
- == INTVAL (trueop1))
+ if (CONST_INT_P (XEXP (tem, 1))
+ && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
+ == UINTVAL (trueop1))
pmop[which] = XEXP (tem, 0);
break;
case IOR:
case XOR:
- if (GET_CODE (XEXP (tem, 1)) == CONST_INT
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
+ if (CONST_INT_P (XEXP (tem, 1))
+ && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
pmop[which] = XEXP (tem, 0);
break;
default:
if (trueop1 == CONST1_RTX (mode))
return rtl_hooks.gen_lowpart_no_emit (mode, op0);
/* Convert divide by power of two into shift. */
- if (GET_CODE (trueop1) == CONST_INT
- && (val = exact_log2 (INTVAL (trueop1))) > 0)
+ if (CONST_INT_P (trueop1)
+ && (val = exact_log2 (UINTVAL (trueop1))) > 0)
return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
break;
}
}
}
- else
+ else if (SCALAR_INT_MODE_P (mode))
{
/* 0/x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == CONST0_RTX (mode))
+ if (trueop0 == CONST0_RTX (mode)
+ && !cfun->can_throw_non_call_exceptions)
{
if (side_effects_p (op1))
return simplify_gen_binary (AND, mode, op1, trueop0);
return CONST0_RTX (mode);
}
/* Implement modulus by power of two as AND. */
- if (GET_CODE (trueop1) == CONST_INT
- && exact_log2 (INTVAL (trueop1)) > 0)
+ if (CONST_INT_P (trueop1)
+ && exact_log2 (UINTVAL (trueop1)) > 0)
return simplify_gen_binary (AND, mode, op0,
GEN_INT (INTVAL (op1) - 1));
break;
if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
return op0;
/* Rotating ~0 always results in ~0. */
- if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
+ if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
+ && UINTVAL (trueop0) == GET_MODE_MASK (mode)
&& ! side_effects_p (op1))
return op0;
canonicalize_shift:
- if (SHIFT_COUNT_TRUNCATED && GET_CODE (op1) == CONST_INT)
+ if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
{
val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
if (val != INTVAL (op1))
return op0;
/* Optimize (lshiftrt (clz X) C) as (eq X 0). */
if (GET_CODE (op0) == CLZ
- && GET_CODE (trueop1) == CONST_INT
+ && CONST_INT_P (trueop1)
&& STORE_FLAG_VALUE == 1
&& INTVAL (trueop1) < (HOST_WIDE_INT)width)
{
unsigned HOST_WIDE_INT zero_val = 0;
if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
- && zero_val == GET_MODE_BITSIZE (imode)
+ && zero_val == GET_MODE_PRECISION (imode)
&& INTVAL (trueop1) == exact_log2 (zero_val))
return simplify_gen_relational (EQ, mode, imode,
XEXP (op0, 0), const0_rtx);
case SMIN:
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT
- && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
+ && mode_signbit_p (mode, trueop1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
case SMAX:
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT
- && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
- == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
+ && CONST_INT_P (trueop1)
+ && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
gcc_assert (GET_CODE (trueop1) == PARALLEL);
gcc_assert (XVECLEN (trueop1, 0) == 1);
- gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
+ gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
if (GET_CODE (trueop0) == CONST_VECTOR)
return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
return tmp;
}
+ if (GET_CODE (trueop0) == VEC_DUPLICATE
+ && GET_MODE (XEXP (trueop0, 0)) == mode)
+ return XEXP (trueop0, 0);
}
else
{
{
rtx x = XVECEXP (trueop1, 0, i);
- gcc_assert (GET_CODE (x) == CONST_INT);
+ gcc_assert (CONST_INT_P (x));
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
INTVAL (x));
}
}
if (XVECLEN (trueop1, 0) == 1
- && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
+ && CONST_INT_P (XVECEXP (trueop1, 0, 0))
&& GET_CODE (trueop0) == VEC_CONCAT)
{
rtx vec = trueop0;
gcc_assert (GET_MODE_INNER (mode) == op1_mode);
if ((GET_CODE (trueop0) == CONST_VECTOR
- || GET_CODE (trueop0) == CONST_INT
+ || CONST_INT_P (trueop0)
|| GET_CODE (trueop0) == CONST_DOUBLE)
&& (GET_CODE (trueop1) == CONST_VECTOR
- || GET_CODE (trueop1) == CONST_INT
+ || CONST_INT_P (trueop1)
|| GET_CODE (trueop1) == CONST_DOUBLE))
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
{
HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
if (VECTOR_MODE_P (mode)
&& code != VEC_CONCAT
/* We can fold some multi-word operations. */
if (GET_MODE_CLASS (mode) == MODE_INT
- && width == HOST_BITS_PER_WIDE_INT * 2
- && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
- && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
+ && width == HOST_BITS_PER_DOUBLE_INT
+ && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
+ && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
{
- unsigned HOST_WIDE_INT l1, l2, lv, lt;
- HOST_WIDE_INT h1, h2, hv, ht;
+ double_int o0, o1, res, tmp;
- if (GET_CODE (op0) == CONST_DOUBLE)
- l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
- else
- l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
-
- if (GET_CODE (op1) == CONST_DOUBLE)
- l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
- else
- l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
+ o0 = rtx_to_double_int (op0);
+ o1 = rtx_to_double_int (op1);
switch (code)
{
case MINUS:
/* A - B == A + (-B). */
- neg_double (l2, h2, &lv, &hv);
- l2 = lv, h2 = hv;
+ o1 = double_int_neg (o1);
/* Fall through.... */
case PLUS:
- add_double (l1, h1, l2, h2, &lv, &hv);
+ res = double_int_add (o0, o1);
break;
case MULT:
- mul_double (l1, h1, l2, h2, &lv, &hv);
+ res = double_int_mul (o0, o1);
break;
case DIV:
- if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
- &lv, &hv, <, &ht))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+ o0.low, o0.high, o1.low, o1.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high))
return 0;
break;
case MOD:
- if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
- <, &ht, &lv, &hv))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+ o0.low, o0.high, o1.low, o1.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high))
return 0;
break;
case UDIV:
- if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
- &lv, &hv, <, &ht))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+ o0.low, o0.high, o1.low, o1.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high))
return 0;
break;
case UMOD:
- if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
- <, &ht, &lv, &hv))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+ o0.low, o0.high, o1.low, o1.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high))
return 0;
break;
case AND:
- lv = l1 & l2, hv = h1 & h2;
+ res = double_int_and (o0, o1);
break;
case IOR:
- lv = l1 | l2, hv = h1 | h2;
+ res = double_int_ior (o0, o1);
break;
case XOR:
- lv = l1 ^ l2, hv = h1 ^ h2;
+ res = double_int_xor (o0, o1);
break;
case SMIN:
- if (h1 < h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- < (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_smin (o0, o1);
break;
case SMAX:
- if (h1 > h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- > (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_smax (o0, o1);
break;
case UMIN:
- if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- < (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_umin (o0, o1);
break;
case UMAX:
- if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- > (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_umax (o0, o1);
break;
case LSHIFTRT: case ASHIFTRT:
case ASHIFT:
case ROTATE: case ROTATERT:
- if (SHIFT_COUNT_TRUNCATED)
- l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
-
- if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
- return 0;
-
- if (code == LSHIFTRT || code == ASHIFTRT)
- rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
- code == ASHIFTRT);
- else if (code == ASHIFT)
- lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
- else if (code == ROTATE)
- lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
- else /* code == ROTATERT */
- rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+ {
+ unsigned HOST_WIDE_INT cnt;
+
+ if (SHIFT_COUNT_TRUNCATED)
+ o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
+
+ if (!double_int_fits_in_uhwi_p (o1)
+ || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
+ return 0;
+
+ cnt = double_int_to_uhwi (o1);
+
+ if (code == LSHIFTRT || code == ASHIFTRT)
+ res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
+ code == ASHIFTRT);
+ else if (code == ASHIFT)
+ res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
+ true);
+ else if (code == ROTATE)
+ res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
+ else /* code == ROTATERT */
+ res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
+ }
break;
default:
return 0;
}
- return immed_double_const (lv, hv, mode);
+ return immed_double_int_const (res, mode);
}
- if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
+ if (CONST_INT_P (op0) && CONST_INT_P (op1)
&& width <= HOST_BITS_PER_WIDE_INT && width != 0)
{
/* Get the integer argument values in two forms:
if (width < HOST_BITS_PER_WIDE_INT)
{
- arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
- arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
+ arg0 &= GET_MODE_MASK (mode);
+ arg1 &= GET_MODE_MASK (mode);
arg0s = arg0;
- if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg0s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, arg0s))
+ arg0s |= ~GET_MODE_MASK (mode);
- arg1s = arg1;
- if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg1s |= ((HOST_WIDE_INT) (-1) << width);
+ arg1s = arg1;
+ if (val_signbit_known_set_p (mode, arg1s))
+ arg1s |= ~GET_MODE_MASK (mode);
}
else
{
arg0s = arg0;
arg1s = arg1;
}
-
+
/* Compute the value of the arithmetic. */
-
+
switch (code)
{
case PLUS:
val = arg0s + arg1s;
break;
-
+
case MINUS:
val = arg0s - arg1s;
break;
-
+
case MULT:
val = arg0s * arg1s;
break;
-
+
case DIV:
if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = arg0s / arg1s;
break;
-
+
case MOD:
if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = arg0s % arg1s;
break;
-
+
case UDIV:
if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = (unsigned HOST_WIDE_INT) arg0 / arg1;
break;
-
+
case UMOD:
if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = (unsigned HOST_WIDE_INT) arg0 % arg1;
break;
-
+
case AND:
val = arg0 & arg1;
break;
-
+
case IOR:
val = arg0 | arg1;
break;
-
+
case XOR:
val = arg0 ^ arg1;
break;
-
+
case LSHIFTRT:
case ASHIFT:
case ASHIFTRT:
arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
return 0;
-
+
val = (code == ASHIFT
? ((unsigned HOST_WIDE_INT) arg0) << arg1
: ((unsigned HOST_WIDE_INT) arg0) >> arg1);
-
+
/* Sign-extend the result for arithmetic right shifts. */
if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
- val |= ((HOST_WIDE_INT) -1) << (width - arg1);
+ val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
break;
-
+
case ROTATERT:
if (arg1 < 0)
return 0;
-
+
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
| (((unsigned HOST_WIDE_INT) arg0) >> arg1));
break;
-
+
case ROTATE:
if (arg1 < 0)
return 0;
-
+
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
| (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
break;
-
+
case COMPARE:
/* Do nothing here. */
return 0;
-
+
case SMIN:
val = arg0s <= arg1s ? arg0s : arg1s;
break;
-
+
case UMIN:
val = ((unsigned HOST_WIDE_INT) arg0
<= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SMAX:
val = arg0s > arg1s ? arg0s : arg1s;
break;
-
+
case UMAX:
val = ((unsigned HOST_WIDE_INT) arg0
> (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SS_PLUS:
case US_PLUS:
case SS_MINUS:
case US_ASHIFT:
/* ??? There are simplifications that can be done. */
return 0;
-
+
default:
gcc_unreachable ();
}
else if (swap_commutative_operands_p (lhs, rhs))
tem = lhs, lhs = rhs, rhs = tem;
- if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
- && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
+ if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
+ && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
{
rtx tem_lhs, tem_rhs;
}
else
tem = simplify_binary_operation (ncode, mode, lhs, rhs);
-
+
/* Reject "simplifications" that just wrap the two
arguments in a CONST. Failure to do so can result
in infinite recursion with simplify_binary_operation
lneg &= rneg;
if (GET_CODE (tem) == NEG)
tem = XEXP (tem, 0), lneg = !lneg;
- if (GET_CODE (tem) == CONST_INT && lneg)
+ if (CONST_INT_P (tem) && lneg)
tem = neg_const_int (mode, tem), lneg = 0;
ops[i].op = tem;
/* Create (minus -C X) instead of (neg (const (plus X C))). */
if (n_ops == 2
- && GET_CODE (ops[1].op) == CONST_INT
+ && CONST_INT_P (ops[1].op)
&& CONSTANT_P (ops[0].op)
&& ops[0].neg)
return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
-
+
/* We suppressed creation of trivial CONST expressions in the
combination loop to avoid recursion. Create one manually now.
The combination loop should have ensured that there is exactly
in the array and that any other constant will be next-to-last. */
if (n_ops > 1
- && GET_CODE (ops[n_ops - 1].op) == CONST_INT
+ && CONST_INT_P (ops[n_ops - 1].op)
&& CONSTANT_P (ops[n_ops - 2].op))
{
rtx value = ops[n_ops - 1].op;
}
#else
return NULL_RTX;
-#endif
+#endif
}
if (VECTOR_MODE_P (mode))
{
/* If op0 is a compare, extract the comparison arguments from it. */
if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
- return simplify_relational_operation (code, mode, VOIDmode,
- XEXP (op0, 0), XEXP (op0, 1));
+ return simplify_gen_relational (code, mode, VOIDmode,
+ XEXP (op0, 0), XEXP (op0, 1));
if (GET_MODE_CLASS (cmp_mode) == MODE_CC
|| CC0_P (op0))
}
}
+ /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
+ (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
+ if ((code == LTU || code == GEU)
+ && GET_CODE (op0) == PLUS
+ && CONST_INT_P (XEXP (op0, 1))
+ && (rtx_equal_p (op1, XEXP (op0, 0))
+ || rtx_equal_p (op1, XEXP (op0, 1))))
+ {
+ rtx new_cmp
+ = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
+ return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
+ cmp_mode, XEXP (op0, 0), new_cmp);
+ }
+
/* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
if ((code == LTU || code == GEU)
&& GET_CODE (op0) == PLUS
&& rtx_equal_p (op1, XEXP (op0, 1))
/* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
&& !rtx_equal_p (op1, XEXP (op0, 0)))
- return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
+ return simplify_gen_relational (code, mode, cmp_mode, op0,
+ copy_rtx (XEXP (op0, 0)));
if (op1 == const0_rtx)
{
{
rtx x = XEXP (op0, 0);
rtx c = XEXP (op0, 1);
+ enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
+ rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
+
+ /* Detect an infinite recursive condition, where we oscillate at this
+ simplification case between:
+ A + B == C <---> C - B == A,
+ where A, B, and C are all constants with non-simplifiable expressions,
+ usually SYMBOL_REFs. */
+ if (GET_CODE (tem) == invcode
+ && CONSTANT_P (x)
+ && rtx_equal_p (c, XEXP (tem, 1)))
+ return NULL_RTX;
- c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
- cmp_mode, op1, c);
- return simplify_gen_relational (code, mode, cmp_mode, x, c);
+ return simplify_gen_relational (code, mode, cmp_mode, x, tem);
}
/* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
/* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
if ((code == EQ || code == NE)
&& op0code == XOR
- && (GET_CODE (op1) == CONST_INT
+ && (CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
simplify_gen_binary (XOR, cmp_mode,
return NULL_RTX;
}
-enum
+enum
{
CMP_EQ = 1,
CMP_LT = 2,
/* Convert the known results for EQ, LT, GT, LTU, GTU contained in
KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
- For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
+ For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
For floating-point comparisons, assume that the operands were ordered. */
if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
&& (code == EQ || code == NE)
- && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
- && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
+ && ! ((REG_P (op0) || CONST_INT_P (trueop0))
+ && (REG_P (op1) || CONST_INT_P (trueop1)))
&& 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
/* We cannot do this if tem is a nonzero address. */
&& ! nonzero_address_p (tem))
/* Otherwise, see if the operands are both integers. */
if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
&& (GET_CODE (trueop0) == CONST_DOUBLE
- || GET_CODE (trueop0) == CONST_INT)
+ || CONST_INT_P (trueop0))
&& (GET_CODE (trueop1) == CONST_DOUBLE
- || GET_CODE (trueop1) == CONST_INT))
+ || CONST_INT_P (trueop1)))
{
- int width = GET_MODE_BITSIZE (mode);
+ int width = GET_MODE_PRECISION (mode);
HOST_WIDE_INT l0s, h0s, l1s, h1s;
unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
we have to sign or zero-extend the values. */
if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
{
- l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
- l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
+ l0u &= GET_MODE_MASK (mode);
+ l1u &= GET_MODE_MASK (mode);
- if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- l0s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, l0s))
+ l0s |= ~GET_MODE_MASK (mode);
- if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- l1s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, l1s))
+ l1s |= ~GET_MODE_MASK (mode);
}
if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
}
/* Optimize comparisons with upper and lower bounds. */
- if (SCALAR_INT_MODE_P (mode)
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT)
+ if (HWI_COMPUTABLE_MODE_P (mode)
+ && CONST_INT_P (trueop1))
{
int sign;
unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
if (GET_CODE (op0) == IOR)
{
rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
- if (GET_CODE (inner_const) == CONST_INT && inner_const != const0_rtx)
+ if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
{
- int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
+ int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
- && (INTVAL (inner_const)
- & ((HOST_WIDE_INT) 1 << sign_bitnum)));
+ && (UINTVAL (inner_const)
+ & ((unsigned HOST_WIDE_INT) 1
+ << sign_bitnum)));
switch (code)
{
enum machine_mode op0_mode, rtx op0, rtx op1,
rtx op2)
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
+ bool any_change = false;
+ rtx tem;
/* VOIDmode means "infinite" precision. */
if (width == 0)
switch (code)
{
+ case FMA:
+ /* Simplify negations around the multiplication. */
+ /* -a * -b + c => a * b + c. */
+ if (GET_CODE (op0) == NEG)
+ {
+ tem = simplify_unary_operation (NEG, mode, op1, mode);
+ if (tem)
+ op1 = tem, op0 = XEXP (op0, 0), any_change = true;
+ }
+ else if (GET_CODE (op1) == NEG)
+ {
+ tem = simplify_unary_operation (NEG, mode, op0, mode);
+ if (tem)
+ op0 = tem, op1 = XEXP (op1, 0), any_change = true;
+ }
+
+ /* Canonicalize the two multiplication operands. */
+ /* a * -b + c => -b * a + c. */
+ if (swap_commutative_operands_p (op0, op1))
+ tem = op0, op0 = op1, op1 = tem, any_change = true;
+
+ if (any_change)
+ return gen_rtx_FMA (mode, op0, op1, op2);
+ return NULL_RTX;
+
case SIGN_EXTRACT:
case ZERO_EXTRACT:
- if (GET_CODE (op0) == CONST_INT
- && GET_CODE (op1) == CONST_INT
- && GET_CODE (op2) == CONST_INT
+ if (CONST_INT_P (op0)
+ && CONST_INT_P (op1)
+ && CONST_INT_P (op2)
&& ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
&& width <= (unsigned) HOST_BITS_PER_WIDE_INT)
{
/* Extracting a bit-field from a constant */
- HOST_WIDE_INT val = INTVAL (op0);
-
+ unsigned HOST_WIDE_INT val = UINTVAL (op0);
+ HOST_WIDE_INT op1val = INTVAL (op1);
+ HOST_WIDE_INT op2val = INTVAL (op2);
if (BITS_BIG_ENDIAN)
- val >>= (GET_MODE_BITSIZE (op0_mode)
- - INTVAL (op2) - INTVAL (op1));
+ val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
else
- val >>= INTVAL (op2);
+ val >>= op2val;
- if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
+ if (HOST_BITS_PER_WIDE_INT != op1val)
{
/* First zero-extend. */
- val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
+ val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
/* If desired, propagate sign bit. */
if (code == SIGN_EXTRACT
- && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
- val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
+ && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
+ != 0)
+ val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
}
- /* Clear the bits that don't belong in our mode,
- unless they and our sign bit are all one.
- So we get either a reasonable negative value or a reasonable
- unsigned value for this mode. */
- if (width < HOST_BITS_PER_WIDE_INT
- && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
- != ((HOST_WIDE_INT) (-1) << (width - 1))))
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
-
return gen_int_mode (val, mode);
}
break;
case IF_THEN_ELSE:
- if (GET_CODE (op0) == CONST_INT)
+ if (CONST_INT_P (op0))
return op0 != const0_rtx ? op1 : op2;
/* Convert c ? a : a into "a". */
rtx temp;
/* Look for happy constants in op1 and op2. */
- if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
+ if (CONST_INT_P (op1) && CONST_INT_P (op2))
{
HOST_WIDE_INT t = INTVAL (op1);
HOST_WIDE_INT f = INTVAL (op2);
/* See if any simplifications were possible. */
if (temp)
{
- if (GET_CODE (temp) == CONST_INT)
+ if (CONST_INT_P (temp))
return temp == const0_rtx ? op2 : op1;
else if (temp)
return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
gcc_assert (GET_MODE (op1) == mode);
gcc_assert (VECTOR_MODE_P (mode));
op2 = avoid_constant_pool_reference (op2);
- if (GET_CODE (op2) == CONST_INT)
+ if (CONST_INT_P (op2))
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
and then repacking them again for OUTERMODE. */
static rtx
-simplify_immed_subreg (enum machine_mode outermode, rtx op,
+simplify_immed_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
/* We support up to 512-bit values (for V8DFmode). */
enum machine_mode outer_submode;
/* Some ports misuse CCmode. */
- if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
+ if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
return op;
/* We have no way to represent a complex constant at the rtl level. */
gcc_assert (BITS_PER_UNIT % value_bit == 0);
/* I don't know how to handle endianness of sub-units. */
gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
-
+
for (elem = 0; elem < num_elem; elem++)
{
unsigned char * vp;
rtx el = elems[elem];
-
+
/* Vectors are kept in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
- unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
+ unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
+ (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
vp = value + (bytele * BITS_PER_UNIT) / value_bit;
}
-
+
switch (GET_CODE (el))
{
case CONST_INT:
for (i = 0;
- i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
+ i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
*vp++ = INTVAL (el) >> i;
/* CONST_INTs are always logically sign-extended. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = INTVAL (el) < 0 ? -1 : 0;
break;
-
+
case CONST_DOUBLE:
if (GET_MODE (el) == VOIDmode)
{
ibase = i;
*vp++ = tmp[ibase / 32] >> i % 32;
}
-
+
/* It shouldn't matter what's done here, so fill it with
zero. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = 0;
}
break;
-
+
default:
gcc_unreachable ();
}
will already have offset 0. */
if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
{
- unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
+ unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
- byte);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
value_start = byte * (BITS_PER_UNIT / value_bit);
/* Re-pack the value. */
-
+
if (VECTOR_MODE_P (outermode))
{
num_elem = GET_MODE_NUNITS (outermode);
for (elem = 0; elem < num_elem; elem++)
{
unsigned char *vp;
-
+
/* Vectors are stored in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
- unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
+ unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
for (i = 0;
i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
- lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
+ lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
for (; i < elem_bitsize; i += value_bit)
- hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
- << (i - HOST_BITS_PER_WIDE_INT));
-
+ hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
+ << (i - HOST_BITS_PER_WIDE_INT);
+
/* immed_double_const doesn't call trunc_int_for_mode. I don't
know why. */
if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
return NULL_RTX;
}
break;
-
+
case MODE_FLOAT:
case MODE_DECIMAL_FLOAT:
{
REAL_VALUE_TYPE r;
long tmp[max_bitsize / 32];
-
+
/* real_from_target wants its input in words affected by
FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
and use WORDS_BIG_ENDIAN instead; see the documentation
for (i = 0;
i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
- f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
+ f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
for (; i < elem_bitsize; i += value_bit)
- f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
+ f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
<< (i - HOST_BITS_PER_WIDE_INT));
elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
}
break;
-
+
default:
gcc_unreachable ();
}
if (outermode == innermode && !byte)
return op;
- if (GET_CODE (op) == CONST_INT
+ if (CONST_INT_P (op)
|| GET_CODE (op) == CONST_DOUBLE
|| GET_CODE (op) == CONST_FIXED
|| GET_CODE (op) == CONST_VECTOR)
/* Optimize SUBREG truncations of zero and sign extended values. */
if ((GET_CODE (op) == ZERO_EXTEND
|| GET_CODE (op) == SIGN_EXTEND)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
{
unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
enum machine_mode origmode = GET_MODE (XEXP (op, 0));
if (outermode == origmode)
return XEXP (op, 0);
- if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
+ if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
subreg_lowpart_offset (outermode,
origmode));
/* A SUBREG resulting from a zero extension may fold to zero if
it extracts higher bits that the ZERO_EXTEND's source bits. */
if (GET_CODE (op) == ZERO_EXTEND
- && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
+ && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
return CONST0_RTX (outermode);
}
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
&& SCALAR_INT_MODE_P (outermode)
+ && SCALAR_INT_MODE_P (innermode)
/* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
to avoid the possibility that an outer LSHIFTRT shifts by more
than the sign extension's sign_bit_copies and introduces zeros
into the high bits of the result. */
- && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (ASHIFTRT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (LSHIFTRT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
the outer subreg is effectively a truncation to the original mode. */
if (GET_CODE (op) == ASHIFT
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (ASHIFT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
/* Recognize a word extraction from a multi-word subreg. */
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
- && SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
- && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
+ && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
+ && CONST_INT_P (XEXP (op, 1))
+ && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
+ && INTVAL (XEXP (op, 1)) >= 0
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
&& byte == subreg_lowpart_offset (outermode, innermode))
{
int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
(WORDS_BIG_ENDIAN
- ? byte - shifted_bytes : byte + shifted_bytes));
+ ? byte - shifted_bytes
+ : byte + shifted_bytes));
+ }
+
+ /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
+ and try replacing the SUBREG and shift with it. Don't do this if
+ the MEM has a mode-dependent address or if we would be widening it. */
+
+ if ((GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ASHIFTRT)
+ && SCALAR_INT_MODE_P (innermode)
+ && MEM_P (XEXP (op, 0))
+ && CONST_INT_P (XEXP (op, 1))
+ && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
+ && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
+ && INTVAL (XEXP (op, 1)) > 0
+ && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
+ && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
+ && ! MEM_VOLATILE_P (XEXP (op, 0))
+ && byte == subreg_lowpart_offset (outermode, innermode)
+ && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
+ || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
+ {
+ int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
+ return adjust_address_nv (XEXP (op, 0), outermode,
+ (WORDS_BIG_ENDIAN
+ ? byte - shifted_bytes
+ : byte + shifted_bytes));
}
return NULL_RTX;