X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fsimplify-rtx.c;h=6733b84d572d41228add2c9bfd991b0ded5d885b;hp=40fedde9a0ba2a799f89eabbada09d5522a46c17;hb=a4d16d3046583e0b59ec8c4b712b1c827f7b98c3;hpb=1f3b83af524f926a42b312b954966ca349d9b69f
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 40fedde9a0b..6733b84d572 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -1,13 +1,13 @@
/* RTL simplification functions for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
- Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
@@ -16,9 +16,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+. */
#include "config.h"
@@ -31,12 +30,11 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "regs.h"
#include "hard-reg-set.h"
#include "flags.h"
-#include "real.h"
#include "insn-config.h"
#include "recog.h"
#include "function.h"
#include "expr.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "output.h"
#include "ggc.h"
#include "target.h"
@@ -50,8 +48,8 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#define HWI_SIGN_EXTEND(low) \
((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
-static rtx neg_const_int (enum machine_mode, rtx);
-static bool plus_minus_operand_p (rtx);
+static rtx neg_const_int (enum machine_mode, const_rtx);
+static bool plus_minus_operand_p (const_rtx);
static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
@@ -67,7 +65,7 @@ static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
/* Negate a CONST_INT rtx, truncating (because a conversion from a
maximally negative number can overflow). */
static rtx
-neg_const_int (enum machine_mode mode, rtx i)
+neg_const_int (enum machine_mode mode, const_rtx i)
{
return gen_int_mode (- INTVAL (i), mode);
}
@@ -76,7 +74,7 @@ neg_const_int (enum machine_mode mode, rtx i)
the most significant bit of machine mode MODE. */
bool
-mode_signbit_p (enum machine_mode mode, rtx x)
+mode_signbit_p (enum machine_mode mode, const_rtx x)
{
unsigned HOST_WIDE_INT val;
unsigned int width;
@@ -84,12 +82,12 @@ mode_signbit_p (enum machine_mode mode, rtx x)
if (GET_MODE_CLASS (mode) != MODE_INT)
return false;
- width = GET_MODE_BITSIZE (mode);
+ width = GET_MODE_PRECISION (mode);
if (width == 0)
return false;
-
+
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (x) == CONST_INT)
+ && CONST_INT_P (x))
val = INTVAL (x);
else if (width <= 2 * HOST_BITS_PER_WIDE_INT
&& GET_CODE (x) == CONST_DOUBLE
@@ -105,6 +103,62 @@ mode_signbit_p (enum machine_mode mode, rtx x)
val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
}
+
+/* Test whether VAL is equal to the most significant bit of mode MODE
+ (after masking with the mode mask of MODE). Returns false if the
+ precision of MODE is too large to handle. */
+
+bool
+val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= GET_MODE_MASK (mode);
+ return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
+}
+
+/* Test whether the most significant bit of mode MODE is set in VAL.
+ Returns false if the precision of MODE is too large to handle. */
+bool
+val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
+ return val != 0;
+}
+
+/* Test whether the most significant bit of mode MODE is clear in VAL.
+ Returns false if the precision of MODE is too large to handle. */
+bool
+val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
+ return val == 0;
+}
/* Make a binary operation by properly ordering the operands and
seeing if the expression folds. */
@@ -170,7 +224,7 @@ avoid_constant_pool_reference (rtx x)
/* Split the address into a base and integer offset. */
if (GET_CODE (addr) == CONST
&& GET_CODE (XEXP (addr, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
{
offset = INTVAL (XEXP (XEXP (addr, 0), 1));
addr = XEXP (XEXP (addr, 0), 0);
@@ -203,6 +257,106 @@ avoid_constant_pool_reference (rtx x)
return x;
}
+/* Simplify a MEM based on its attributes. This is the default
+ delegitimize_address target hook, and it's recommended that every
+ overrider call it. */
+
+rtx
+delegitimize_mem_from_attrs (rtx x)
+{
+ /* MEMs without MEM_OFFSETs may have been offset, so we can't just
+ use their base addresses as equivalent. */
+ if (MEM_P (x)
+ && MEM_EXPR (x)
+ && MEM_OFFSET_KNOWN_P (x))
+ {
+ tree decl = MEM_EXPR (x);
+ enum machine_mode mode = GET_MODE (x);
+ HOST_WIDE_INT offset = 0;
+
+ switch (TREE_CODE (decl))
+ {
+ default:
+ decl = NULL;
+ break;
+
+ case VAR_DECL:
+ break;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ {
+ HOST_WIDE_INT bitsize, bitpos;
+ tree toffset;
+ int unsignedp = 0, volatilep = 0;
+
+ decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
+ &mode, &unsignedp, &volatilep, false);
+ if (bitsize != GET_MODE_BITSIZE (mode)
+ || (bitpos % BITS_PER_UNIT)
+ || (toffset && !host_integerp (toffset, 0)))
+ decl = NULL;
+ else
+ {
+ offset += bitpos / BITS_PER_UNIT;
+ if (toffset)
+ offset += TREE_INT_CST_LOW (toffset);
+ }
+ break;
+ }
+ }
+
+ if (decl
+ && mode == GET_MODE (x)
+ && TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl)
+ || DECL_THREAD_LOCAL_P (decl))
+ && DECL_RTL_SET_P (decl)
+ && MEM_P (DECL_RTL (decl)))
+ {
+ rtx newx;
+
+ offset += MEM_OFFSET (x);
+
+ newx = DECL_RTL (decl);
+
+ if (MEM_P (newx))
+ {
+ rtx n = XEXP (newx, 0), o = XEXP (x, 0);
+
+ /* Avoid creating a new MEM needlessly if we already had
+ the same address. We do if there's no OFFSET and the
+ old address X is identical to NEWX, or if X is of the
+ form (plus NEWX OFFSET), or the NEWX is of the form
+ (plus Y (const_int Z)) and X is that with the offset
+ added: (plus Y (const_int Z+OFFSET)). */
+ if (!((offset == 0
+ || (GET_CODE (o) == PLUS
+ && GET_CODE (XEXP (o, 1)) == CONST_INT
+ && (offset == INTVAL (XEXP (o, 1))
+ || (GET_CODE (n) == PLUS
+ && GET_CODE (XEXP (n, 1)) == CONST_INT
+ && (INTVAL (XEXP (n, 1)) + offset
+ == INTVAL (XEXP (o, 1)))
+ && (n = XEXP (n, 0))))
+ && (o = XEXP (o, 0))))
+ && rtx_equal_p (o, n)))
+ x = adjust_address_nv (newx, mode, offset);
+ }
+ else if (GET_MODE (x) == GET_MODE (newx)
+ && offset == 0)
+ x = newx;
+ }
+ }
+
+ return x;
+}
+
/* Make a unary operation by first seeing if it folds and otherwise making
the specified operation. */
@@ -251,38 +405,46 @@ simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
return gen_rtx_fmt_ee (code, mode, op0, op1);
}
-/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
- resulting RTX. Return a new RTX which is as simplified as possible. */
+/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
+ and simplify the result. If FN is non-NULL, call this callback on each
+ X, if it returns non-NULL, replace X with its return value and simplify the
+ result. */
rtx
-simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
+simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
+ rtx (*fn) (rtx, const_rtx, void *), void *data)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
enum machine_mode op_mode;
- rtx op0, op1, op2;
-
- /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
- to build a new expression substituting recursively. If we can't do
- anything, return our input. */
+ const char *fmt;
+ rtx op0, op1, op2, newx, op;
+ rtvec vec, newvec;
+ int i, j;
- if (x == old_rtx)
- return new_rtx;
+ if (__builtin_expect (fn != NULL, 0))
+ {
+ newx = fn (x, old_rtx, data);
+ if (newx)
+ return newx;
+ }
+ else if (rtx_equal_p (x, old_rtx))
+ return copy_rtx ((rtx) data);
switch (GET_RTX_CLASS (code))
{
case RTX_UNARY:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
if (op0 == XEXP (x, 0))
return x;
return simplify_gen_unary (code, mode, op0, op_mode);
case RTX_BIN_ARITH:
case RTX_COMM_ARITH:
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_binary (code, mode, op0, op1);
@@ -292,8 +454,8 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
- op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return x;
return simplify_gen_relational (code, mode, op_mode, op0, op1);
@@ -302,9 +464,9 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
case RTX_BITFIELD_OPS:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
- op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
- op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
+ op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
return x;
if (op_mode == VOIDmode)
@@ -312,10 +474,9 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
case RTX_EXTRA:
- /* The only case we try to handle is a SUBREG. */
if (code == SUBREG)
{
- op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
if (op0 == SUBREG_REG (x))
return x;
op0 = simplify_gen_subreg (GET_MODE (x), op0,
@@ -328,15 +489,15 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
case RTX_OBJ:
if (code == MEM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
if (op0 == XEXP (x, 0))
return x;
return replace_equiv_address_nv (x, op0);
}
else if (code == LO_SUM)
{
- op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
- op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
+ op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
+ op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
/* (lo_sum (high x) x) -> x */
if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
@@ -346,17 +507,61 @@ simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
return x;
return gen_rtx_LO_SUM (mode, op0, op1);
}
- else if (code == REG)
- {
- if (rtx_equal_p (x, old_rtx))
- return new_rtx;
- }
break;
default:
break;
}
- return x;
+
+ newx = x;
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; fmt[i]; i++)
+ switch (fmt[i])
+ {
+ case 'E':
+ vec = XVEC (x, i);
+ newvec = XVEC (newx, i);
+ for (j = 0; j < GET_NUM_ELEM (vec); j++)
+ {
+ op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
+ old_rtx, fn, data);
+ if (op != RTVEC_ELT (vec, j))
+ {
+ if (newvec == vec)
+ {
+ newvec = shallow_copy_rtvec (vec);
+ if (x == newx)
+ newx = shallow_copy_rtx (x);
+ XVEC (newx, i) = newvec;
+ }
+ RTVEC_ELT (newvec, j) = op;
+ }
+ }
+ break;
+
+ case 'e':
+ if (XEXP (x, i))
+ {
+ op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
+ if (op != XEXP (x, i))
+ {
+ if (x == newx)
+ newx = shallow_copy_rtx (x);
+ XEXP (newx, i) = op;
+ }
+ }
+ break;
+ }
+ return newx;
+}
+
+/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
+ resulting RTX. Return a new RTX which is as simplified as possible. */
+
+rtx
+simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
+{
+ return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
}
/* Try to simplify a unary operation CODE whose output mode is to be
@@ -368,9 +573,6 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
{
rtx trueop, tem;
- if (GET_CODE (op) == CONST)
- op = XEXP (op, 0);
-
trueop = avoid_constant_pool_reference (op);
tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
@@ -414,14 +616,14 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
/* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
if (GET_CODE (op) == XOR
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (op, 1))
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1), mode)) != 0)
return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
/* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
if (GET_CODE (op) == PLUS
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (op, 1))
&& mode_signbit_p (mode, XEXP (op, 1))
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1), mode)) != 0)
@@ -443,11 +645,11 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
/* (not (ashiftrt foo C)) where C is the number of bits in FOO
minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
so we can perform the above simplification. */
-
+
if (STORE_FLAG_VALUE == -1
&& GET_CODE (op) == ASHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && GET_CODE (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_relational (GE, mode, VOIDmode,
XEXP (op, 0), const0_rtx);
@@ -507,11 +709,11 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
if (GET_CODE (op) == PLUS
&& XEXP (op, 1) == const1_rtx)
return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
-
+
/* Similarly, (neg (not X)) is (plus X 1). */
if (GET_CODE (op) == NOT)
return plus_constant (XEXP (op, 0), 1);
-
+
/* (neg (minus X Y)) can become (minus Y X). This transformation
isn't safe for modes with signed zeros, since if X and Y are
both +0, (minus Y X) is the same as (minus X Y). If the
@@ -521,13 +723,13 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
-
+
if (GET_CODE (op) == PLUS
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
/* (neg (plus A C)) is simplified to (minus -C A). */
- if (GET_CODE (XEXP (op, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (op, 1))
|| GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
{
temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
@@ -540,13 +742,13 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
}
- /* (neg (mult A B)) becomes (mult (neg A) B).
+ /* (neg (mult A B)) becomes (mult A (neg B)).
This works even for floating-point values. */
if (GET_CODE (op) == MULT
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
- temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
- return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
+ temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
+ return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
}
/* NEG commutes with ASHIFT since it is multiplication. Only do
@@ -562,19 +764,19 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
/* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == ASHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_binary (LSHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
/* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == LSHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op, 1))
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
-
+
/* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
if (GET_CODE (op) == XOR
&& XEXP (op, 1) == const1_rtx
@@ -584,17 +786,18 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
/* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
/* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
if (GET_CODE (op) == LT
- && XEXP (op, 1) == const0_rtx)
+ && XEXP (op, 1) == const0_rtx
+ && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
{
enum machine_mode inner = GET_MODE (XEXP (op, 0));
- int isize = GET_MODE_BITSIZE (inner);
+ int isize = GET_MODE_PRECISION (inner);
if (STORE_FLAG_VALUE == 1)
{
temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
GEN_INT (isize - 1));
if (mode == inner)
return temp;
- if (GET_MODE_BITSIZE (mode) > isize)
+ if (GET_MODE_PRECISION (mode) > isize)
return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
return simplify_gen_unary (TRUNCATE, mode, temp, inner);
}
@@ -604,7 +807,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
GEN_INT (isize - 1));
if (mode == inner)
return temp;
- if (GET_MODE_BITSIZE (mode) > isize)
+ if (GET_MODE_PRECISION (mode) > isize)
return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
return simplify_gen_unary (TRUNCATE, mode, temp, inner);
}
@@ -646,14 +849,13 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
replace the TRUNCATE with a SUBREG. Note that this is also
valid if TRULY_NOOP_TRUNCATION is false for the corresponding
modes we just have to apply a different definition for
- truncation. But don't do this for an (LSHIFTRT (MULT ...))
+ truncation. But don't do this for an (LSHIFTRT (MULT ...))
since this will cause problems with the umulXi3_highpart
patterns. */
- if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
- GET_MODE_BITSIZE (GET_MODE (op)))
+ if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
? (num_sign_bit_copies (op, GET_MODE (op))
- > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
- - GET_MODE_BITSIZE (mode)))
+ > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
+ - GET_MODE_PRECISION (mode)))
: truncated_to_mode (mode, op))
&& ! (GET_CODE (op) == LSHIFTRT
&& GET_CODE (XEXP (op, 0)) == MULT))
@@ -663,9 +865,9 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
STORE_FLAG_VALUE permits. This is like the previous test,
but it works even if the comparison is done in a mode larger
than HOST_BITS_PER_WIDE_INT. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& COMPARISON_P (op)
- && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
+ && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
return rtl_hooks.gen_lowpart_no_emit (mode, op);
break;
@@ -702,7 +904,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
&& (flag_unsafe_math_optimizations
|| (SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
- num_sign_bit_copies (XEXP (op, 0),
GET_MODE (XEXP (op, 0))))))))
return simplify_gen_unary (FLOAT, mode,
@@ -739,7 +941,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
|| (GET_CODE (op) == FLOAT
&& SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
- num_sign_bit_copies (XEXP (op, 0),
GET_MODE (XEXP (op, 0)))))))
return simplify_gen_unary (GET_CODE (op), mode,
@@ -761,16 +963,12 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
/* If operand is something known to be positive, ignore the ABS. */
if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
- || ((GET_MODE_BITSIZE (GET_MODE (op))
- <= HOST_BITS_PER_WIDE_INT)
- && ((nonzero_bits (op, GET_MODE (op))
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
- == 0)))
+ || val_signbit_known_clear_p (GET_MODE (op),
+ nonzero_bits (op, GET_MODE (op))))
return op;
/* If operand is known to be only -1 or 0, convert ABS to NEG. */
- if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
+ if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
return gen_rtx_NEG (mode, op);
break;
@@ -853,17 +1051,99 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
return XEXP (op, 0);
+ /* Extending a widening multiplication should be canonicalized to
+ a wider widening multiplication. */
+ if (GET_CODE (op) == MULT)
+ {
+ rtx lhs = XEXP (op, 0);
+ rtx rhs = XEXP (op, 1);
+ enum rtx_code lcode = GET_CODE (lhs);
+ enum rtx_code rcode = GET_CODE (rhs);
+
+ /* Widening multiplies usually extend both operands, but sometimes
+ they use a shift to extract a portion of a register. */
+ if ((lcode == SIGN_EXTEND
+ || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
+ && (rcode == SIGN_EXTEND
+ || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
+ {
+ enum machine_mode lmode = GET_MODE (lhs);
+ enum machine_mode rmode = GET_MODE (rhs);
+ int bits;
+
+ if (lcode == ASHIFTRT)
+ /* Number of bits not shifted off the end. */
+ bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ else /* lcode == SIGN_EXTEND */
+ /* Size of inner mode. */
+ bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+
+ if (rcode == ASHIFTRT)
+ bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ else /* rcode == SIGN_EXTEND */
+ bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+
+ /* We can only widen multiplies if the result is mathematiclly
+ equivalent. I.e. if overflow was impossible. */
+ if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ return simplify_gen_binary
+ (MULT, mode,
+ simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
+ simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
+ }
+ }
+
/* Check for a sign extension of a subreg of a promoted
variable, where the promotion is sign-extended, and the
target mode is the same as the variable's promotion. */
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
&& ! SUBREG_PROMOTED_UNSIGNED_P (op)
- && GET_MODE (XEXP (op, 0)) == mode)
- return XEXP (op, 0);
+ && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
+ return rtl_hooks.gen_lowpart_no_emit (mode, op);
+
+ /* (sign_extend:M (sign_extend:N )) is (sign_extend:M ).
+ (sign_extend:M (zero_extend:N )) is (zero_extend:M ). */
+ if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
+ {
+ gcc_assert (GET_MODE_BITSIZE (mode)
+ > GET_MODE_BITSIZE (GET_MODE (op)));
+ return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+ }
+
+ /* (sign_extend:M (ashiftrt:N (ashift (const_int I)) (const_int I)))
+ is (sign_extend:M (subreg:O )) if there is mode with
+ GET_MODE_BITSIZE (N) - I bits.
+ (sign_extend:M (lshiftrt:N (ashift (const_int I)) (const_int I)))
+ is similarly (zero_extend:M (subreg:O )). */
+ if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
+ && GET_CODE (XEXP (op, 0)) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
+ && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ {
+ enum machine_mode tmode
+ = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ gcc_assert (GET_MODE_BITSIZE (mode)
+ > GET_MODE_BITSIZE (GET_MODE (op)));
+ if (tmode != BLKmode)
+ {
+ rtx inner =
+ rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
+ return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
+ ? SIGN_EXTEND : ZERO_EXTEND,
+ mode, inner, tmode);
+ }
+ }
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
- if (! POINTERS_EXTEND_UNSIGNED
+ /* As we do not know which address space the pointer is refering to,
+ we can do this only if the target does not support different pointer
+ or address modes depending on the address space. */
+ if (target_default_pointer_address_modes_p ()
+ && ! POINTERS_EXTEND_UNSIGNED
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
@@ -881,11 +1161,82 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
&& SUBREG_PROMOTED_UNSIGNED_P (op) > 0
- && GET_MODE (XEXP (op, 0)) == mode)
- return XEXP (op, 0);
+ && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
+ return rtl_hooks.gen_lowpart_no_emit (mode, op);
+
+ /* Extending a widening multiplication should be canonicalized to
+ a wider widening multiplication. */
+ if (GET_CODE (op) == MULT)
+ {
+ rtx lhs = XEXP (op, 0);
+ rtx rhs = XEXP (op, 1);
+ enum rtx_code lcode = GET_CODE (lhs);
+ enum rtx_code rcode = GET_CODE (rhs);
+
+ /* Widening multiplies usually extend both operands, but sometimes
+ they use a shift to extract a portion of a register. */
+ if ((lcode == ZERO_EXTEND
+ || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
+ && (rcode == ZERO_EXTEND
+ || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
+ {
+ enum machine_mode lmode = GET_MODE (lhs);
+ enum machine_mode rmode = GET_MODE (rhs);
+ int bits;
+
+ if (lcode == LSHIFTRT)
+ /* Number of bits not shifted off the end. */
+ bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ else /* lcode == ZERO_EXTEND */
+ /* Size of inner mode. */
+ bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+
+ if (rcode == LSHIFTRT)
+ bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ else /* rcode == ZERO_EXTEND */
+ bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+
+ /* We can only widen multiplies if the result is mathematiclly
+ equivalent. I.e. if overflow was impossible. */
+ if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ return simplify_gen_binary
+ (MULT, mode,
+ simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
+ simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
+ }
+ }
+
+ /* (zero_extend:M (zero_extend:N )) is (zero_extend:M ). */
+ if (GET_CODE (op) == ZERO_EXTEND)
+ return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+
+ /* (zero_extend:M (lshiftrt:N (ashift (const_int I)) (const_int I)))
+ is (zero_extend:M (subreg:O )) if there is mode with
+ GET_MODE_BITSIZE (N) - I bits. */
+ if (GET_CODE (op) == LSHIFTRT
+ && GET_CODE (XEXP (op, 0)) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
+ && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ {
+ enum machine_mode tmode
+ = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ if (tmode != BLKmode)
+ {
+ rtx inner =
+ rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
+ return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
+ }
+ }
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
- if (POINTERS_EXTEND_UNSIGNED > 0
+ /* As we do not know which address space the pointer is refering to,
+ we can do this only if the target does not support different pointer
+ or address modes depending on the address space. */
+ if (target_default_pointer_address_modes_p ()
+ && POINTERS_EXTEND_UNSIGNED > 0
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
@@ -899,7 +1250,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
default:
break;
}
-
+
return 0;
}
@@ -910,7 +1261,8 @@ rtx
simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode)
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
+ unsigned int op_width = GET_MODE_PRECISION (op_mode);
if (code == VEC_DUPLICATE)
{
@@ -923,7 +1275,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
(GET_MODE (op)));
}
- if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
+ if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
|| GET_CODE (op) == CONST_VECTOR)
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
@@ -977,12 +1329,12 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
such as FIX. At some point, this should be simplified. */
if (code == FLOAT && GET_MODE (op) == VOIDmode
- && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
{
HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (GET_CODE (op) == CONST_INT)
+ if (CONST_INT_P (op))
lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else
lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
@@ -993,12 +1345,12 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
}
else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
&& (GET_CODE (op) == CONST_DOUBLE
- || GET_CODE (op) == CONST_INT))
+ || CONST_INT_P (op)))
{
HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (GET_CODE (op) == CONST_INT)
+ if (CONST_INT_P (op))
lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else
lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
@@ -1010,7 +1362,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
if (hv < 0)
return 0;
}
- else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
+ else if (GET_MODE_PRECISION (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
;
else
hv = 0, lv &= GET_MODE_MASK (op_mode);
@@ -1020,7 +1372,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
- if (GET_CODE (op) == CONST_INT
+ if (CONST_INT_P (op)
&& width <= HOST_BITS_PER_WIDE_INT && width > 0)
{
HOST_WIDE_INT arg0 = INTVAL (op);
@@ -1041,10 +1393,8 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
break;
case FFS:
- /* Don't use ffs here. Instead, get low order bit and then its
- number. If arg0 is zero, this will return 0, as desired. */
arg0 &= GET_MODE_MASK (mode);
- val = exact_log2 (arg0 & (- arg0)) + 1;
+ val = ffs_hwi (arg0);
break;
case CLZ:
@@ -1052,7 +1402,17 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
;
else
- val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
+ val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
+ break;
+
+ case CLRSB:
+ arg0 &= GET_MODE_MASK (mode);
+ if (arg0 == 0)
+ val = GET_MODE_PRECISION (mode) - 1;
+ else if (arg0 >= 0)
+ val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
+ else if (arg0 < 0)
+ val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
break;
case CTZ:
@@ -1062,10 +1422,10 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
/* Even if the value at zero is undefined, we have to come
up with some replacement. Seems good enough. */
if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
- val = GET_MODE_BITSIZE (mode);
+ val = GET_MODE_PRECISION (mode);
}
else
- val = exact_log2 (arg0 & -arg0);
+ val = ctz_hwi (arg0);
break;
case POPCOUNT:
@@ -1106,16 +1466,16 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
/* When zero-extending a CONST_INT, we need to know its
original mode. */
gcc_assert (op_mode != VOIDmode);
- if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ if (op_width == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- gcc_assert (width == GET_MODE_BITSIZE (op_mode));
+ gcc_assert (width == op_width);
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
- val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
+ val = arg0 & GET_MODE_MASK (op_mode);
else
return 0;
break;
@@ -1123,21 +1483,20 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
case SIGN_EXTEND:
if (op_mode == VOIDmode)
op_mode = mode;
- if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ op_width = GET_MODE_PRECISION (op_mode);
+ if (op_width == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- gcc_assert (width == GET_MODE_BITSIZE (op_mode));
+ gcc_assert (width == op_width);
val = arg0;
}
- else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
+ else if (op_width < HOST_BITS_PER_WIDE_INT)
{
- val
- = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
- if (val
- & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
- val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ val = arg0 & GET_MODE_MASK (op_mode);
+ if (val_signbit_known_set_p (op_mode, val))
+ val |= ~GET_MODE_MASK (op_mode);
}
else
return 0;
@@ -1149,6 +1508,8 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
case SS_TRUNCATE:
case US_TRUNCATE:
case SS_NEG:
+ case US_NEG:
+ case SS_ABS:
return 0;
default:
@@ -1163,7 +1524,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
else if (GET_MODE (op) == VOIDmode
&& width <= HOST_BITS_PER_WIDE_INT * 2
&& (GET_CODE (op) == CONST_DOUBLE
- || GET_CODE (op) == CONST_INT))
+ || CONST_INT_P (op)))
{
unsigned HOST_WIDE_INT l1, lv;
HOST_WIDE_INT h1, hv;
@@ -1193,36 +1554,33 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
case FFS:
hv = 0;
- if (l1 == 0)
- {
- if (h1 == 0)
- lv = 0;
- else
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
- }
+ if (l1 != 0)
+ lv = ffs_hwi (l1);
+ else if (h1 != 0)
+ lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
else
- lv = exact_log2 (l1 & -l1) + 1;
+ lv = 0;
break;
case CLZ:
hv = 0;
if (h1 != 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
+ lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
- HOST_BITS_PER_WIDE_INT;
else if (l1 != 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
+ lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
- lv = GET_MODE_BITSIZE (mode);
+ lv = GET_MODE_PRECISION (mode);
break;
case CTZ:
hv = 0;
if (l1 != 0)
- lv = exact_log2 (l1 & -l1);
+ lv = ctz_hwi (l1);
else if (h1 != 0)
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
+ lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
- lv = GET_MODE_BITSIZE (mode);
+ lv = GET_MODE_PRECISION (mode);
break;
case POPCOUNT:
@@ -1276,7 +1634,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
case ZERO_EXTEND:
gcc_assert (op_mode != VOIDmode);
- if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ if (op_width > HOST_BITS_PER_WIDE_INT)
return 0;
hv = 0;
@@ -1285,15 +1643,13 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
case SIGN_EXTEND:
if (op_mode == VOIDmode
- || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ || op_width > HOST_BITS_PER_WIDE_INT)
return 0;
else
{
lv = l1 & GET_MODE_MASK (op_mode);
- if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
- && (lv & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
- lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ if (val_signbit_known_set_p (op_mode, lv))
+ lv |= ~GET_MODE_MASK (op_mode);
hv = HWI_SIGN_EXTEND (lv);
}
@@ -1310,7 +1666,8 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
}
else if (GET_CODE (op) == CONST_DOUBLE
- && SCALAR_FLOAT_MODE_P (mode))
+ && SCALAR_FLOAT_MODE_P (mode)
+ && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
{
REAL_VALUE_TYPE d, t;
REAL_VALUE_FROM_CONST_DOUBLE (d, op);
@@ -1324,16 +1681,19 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
d = t;
break;
case ABS:
- d = REAL_VALUE_ABS (d);
+ d = real_value_abs (&d);
break;
case NEG:
- d = REAL_VALUE_NEGATE (d);
+ d = real_value_negate (&d);
break;
case FLOAT_TRUNCATE:
d = real_value_truncate (mode, d);
break;
case FLOAT_EXTEND:
- /* All this does is change the mode. */
+ /* All this does is change the mode, unless changing
+ mode class. */
+ if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
+ real_convert (&d, mode, &d);
break;
case FIX:
real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
@@ -1399,13 +1759,14 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
/* Test against the signed lower bound. */
if (width > HOST_BITS_PER_WIDE_INT)
{
- th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
+ th = (unsigned HOST_WIDE_INT) (-1)
+ << (width - HOST_BITS_PER_WIDE_INT - 1);
tl = 0;
}
else
{
th = -1;
- tl = (HOST_WIDE_INT) -1 << (width - 1);
+ tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
}
real_from_integer (&t, VOIDmode, tl, th, 0);
if (REAL_VALUES_LESS (x, t))
@@ -1559,7 +1920,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
{
rtx tem, reversed, opleft, opright;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
/* Even if we can't compute a constant result,
there are some cases worth simplifying. */
@@ -1593,11 +1954,15 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
to CONST_INT since overflow won't be computed properly if wider
than HOST_BITS_PER_WIDE_INT. */
- if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
- && GET_CODE (op1) == CONST_INT)
+ if ((GET_CODE (op0) == CONST
+ || GET_CODE (op0) == SYMBOL_REF
+ || GET_CODE (op0) == LABEL_REF)
+ && CONST_INT_P (op1))
return plus_constant (op0, INTVAL (op1));
- else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
- && GET_CODE (op0) == CONST_INT)
+ else if ((GET_CODE (op1) == CONST
+ || GET_CODE (op1) == SYMBOL_REF
+ || GET_CODE (op1) == LABEL_REF)
+ && CONST_INT_P (op0))
return plus_constant (op1, INTVAL (op0));
/* See if this is something like X * C - X or vice versa or
@@ -1608,53 +1973,51 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
- unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
+ double_int coeff0, coeff1;
rtx lhs = op0, rhs = op1;
+ coeff0 = double_int_one;
+ coeff1 = double_int_one;
+
if (GET_CODE (lhs) == NEG)
{
- coeff0l = -1;
- coeff0h = -1;
+ coeff0 = double_int_minus_one;
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0l = INTVAL (XEXP (lhs, 1));
- coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT
- && INTVAL (XEXP (lhs, 1)) >= 0
+ && CONST_INT_P (XEXP (lhs, 1))
+ && INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- coeff0h = 0;
+ coeff0 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- coeff1l = -1;
- coeff1h = -1;
+ coeff1 = double_int_minus_one;
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (rhs, 1)))
{
- coeff1l = INTVAL (XEXP (rhs, 1));
- coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
+ coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (rhs, 1))
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
- coeff1h = 0;
+ coeff1 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
@@ -1662,23 +2025,23 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
{
rtx orig = gen_rtx_PLUS (mode, op0, op1);
rtx coeff;
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
+ double_int val;
+ bool speed = optimize_function_for_speed_p (cfun);
- add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
- coeff = immed_double_const (l, h, mode);
+ val = double_int_add (coeff0, coeff1);
+ coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
- return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
? tem : 0;
}
}
/* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& GET_CODE (op0) == XOR
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
&& mode_signbit_p (mode, op1))
return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
@@ -1722,9 +2085,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
return tem;
/* Reassociate floating point addition only when the user
- specifies unsafe math optimizations. */
+ specifies associative math operations. */
if (FLOAT_MODE_P (mode)
- && flag_unsafe_math_optimizations)
+ && flag_associative_math)
{
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
@@ -1733,19 +2096,6 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
break;
case COMPARE:
-#ifdef HAVE_cc0
- /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
- using cc0, in which case we want to leave it as a COMPARE
- so we can distinguish it from a register-register-copy.
-
- In IEEE floating point, x-0 is not the same as x. */
-
- if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
- && trueop1 == CONST0_RTX (mode))
- return op0;
-#endif
-
/* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
|| (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
@@ -1770,14 +2120,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
case MINUS:
/* We can't assume x-x is 0 even with non-IEEE floating point,
but since it is zero except in very strange circumstances, we
- will treat it as zero with -funsafe-math-optimizations and
- -ffinite-math-only. */
+ will treat it as zero with -ffinite-math-only. */
if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
- && (! FLOAT_MODE_P (mode)
- || (flag_unsafe_math_optimizations
- && !HONOR_NANS (mode)
- && !HONOR_INFINITIES (mode))))
+ && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
return CONST0_RTX (mode);
/* Change subtraction from zero into negation. (0 - x) is the
@@ -1807,53 +2153,52 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
- unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
+ double_int coeff0, negcoeff1;
rtx lhs = op0, rhs = op1;
+ coeff0 = double_int_one;
+ negcoeff1 = double_int_minus_one;
+
if (GET_CODE (lhs) == NEG)
{
- coeff0l = -1;
- coeff0h = -1;
+ coeff0 = double_int_minus_one;
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0l = INTVAL (XEXP (lhs, 1));
- coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (lhs, 1))
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- coeff0h = 0;
+ coeff0 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- negcoeff1l = 1;
- negcoeff1h = 0;
+ negcoeff1 = double_int_one;
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (rhs, 1)))
{
- negcoeff1l = -INTVAL (XEXP (rhs, 1));
- negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
+ negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (rhs, 1))
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
- negcoeff1h = -1;
+ negcoeff1 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (rhs, 1)));
+ negcoeff1 = double_int_neg (negcoeff1);
rhs = XEXP (rhs, 0);
}
@@ -1861,14 +2206,14 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
{
rtx orig = gen_rtx_MINUS (mode, op0, op1);
rtx coeff;
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
+ double_int val;
+ bool speed = optimize_function_for_speed_p (cfun);
- add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
- coeff = immed_double_const (l, h, mode);
+ val = double_int_add (coeff0, negcoeff1);
+ coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
- return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
? tem : 0;
}
}
@@ -1879,7 +2224,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
/* (-x - c) may be simplified as (-c - x). */
if (GET_CODE (op0) == NEG
- && (GET_CODE (op1) == CONST_INT
+ && (CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE))
{
tem = simplify_unary_operation (NEG, mode, op1, mode);
@@ -1888,7 +2233,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
}
/* Don't let a relocatable value get a negative coeff. */
- if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
+ if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
return simplify_gen_binary (PLUS, mode,
op0,
neg_const_int (mode, op1));
@@ -1967,6 +2312,41 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (trueop1 == constm1_rtx)
return simplify_gen_unary (NEG, mode, op0, mode);
+ if (GET_CODE (op0) == NEG)
+ {
+ rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
+ /* If op1 is a MULT as well and simplify_unary_operation
+ just moved the NEG to the second operand, simplify_gen_binary
+ below could through simplify_associative_operation move
+ the NEG around again and recurse endlessly. */
+ if (temp
+ && GET_CODE (op1) == MULT
+ && GET_CODE (temp) == MULT
+ && XEXP (op1, 0) == XEXP (temp, 0)
+ && GET_CODE (XEXP (temp, 1)) == NEG
+ && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
+ temp = NULL_RTX;
+ if (temp)
+ return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
+ }
+ if (GET_CODE (op1) == NEG)
+ {
+ rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
+ /* If op0 is a MULT as well and simplify_unary_operation
+ just moved the NEG to the second operand, simplify_gen_binary
+ below could through simplify_associative_operation move
+ the NEG around again and recurse endlessly. */
+ if (temp
+ && GET_CODE (op0) == MULT
+ && GET_CODE (temp) == MULT
+ && XEXP (op0, 0) == XEXP (temp, 0)
+ && GET_CODE (XEXP (temp, 1)) == NEG
+ && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
+ temp = NULL_RTX;
+ if (temp)
+ return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
+ }
+
/* Maybe simplify x * 0 to 0. The reduction is not valid if
x is NaN, since x * 0 is then also NaN. Nor is it valid
when the mode has signed zeros, since multiplying a negative
@@ -1985,8 +2365,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
/* Convert multiply by constant power of two into shift unless
we are still generating RTL. This test is a kludge. */
- if (GET_CODE (trueop1) == CONST_INT
- && (val = exact_log2 (INTVAL (trueop1))) >= 0
+ if (CONST_INT_P (trueop1)
+ && (val = exact_log2 (UINTVAL (trueop1))) >= 0
/* If the mode is larger than the host word size, and the
uppermost bit is set, then this isn't a power of two due
to implicit sign extension. */
@@ -2007,6 +2387,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
/* x*2 is x+x and x*(-1) is -x */
if (GET_CODE (trueop1) == CONST_DOUBLE
&& SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
+ && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
&& GET_MODE (op0) == mode)
{
REAL_VALUE_TYPE d;
@@ -2048,11 +2429,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
break;
case IOR:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
- if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
@@ -2064,15 +2443,15 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
return constm1_rtx;
/* (ior A C) is C if all bits of A that might be nonzero are on in C. */
- if (GET_CODE (op1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
+ if (CONST_INT_P (op1)
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
return op1;
-
+
/* Canonicalize (X & C1) | C2. */
if (GET_CODE (op0) == AND
- && GET_CODE (trueop1) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ && CONST_INT_P (trueop1)
+ && CONST_INT_P (XEXP (op0, 1)))
{
HOST_WIDE_INT mask = GET_MODE_MASK (mode);
HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
@@ -2121,10 +2500,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
&& rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
- && GET_CODE (XEXP (opleft, 1)) == CONST_INT
- && GET_CODE (XEXP (opright, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opleft, 1))
+ && CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
/* Same, but for ashift that has been "simplified" to a wider mode
@@ -2140,27 +2519,27 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
&& rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
SUBREG_REG (XEXP (opright, 0)))
- && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
- && GET_CODE (XEXP (opright, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
+ && CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0),
XEXP (SUBREG_REG (opleft), 1));
/* If we have (ior (and (X C1) C2)), simplify this by making
C1 as small as possible if C1 actually changes. */
- if (GET_CODE (op1) == CONST_INT
- && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (CONST_INT_P (op1)
+ && (HWI_COMPUTABLE_MODE_P (mode)
|| INTVAL (op1) > 0)
&& GET_CODE (op0) == AND
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && GET_CODE (op1) == CONST_INT
- && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
+ && CONST_INT_P (XEXP (op0, 1))
+ && CONST_INT_P (op1)
+ && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
return simplify_gen_binary (IOR, mode,
simplify_gen_binary
(AND, mode, XEXP (op0, 0),
- GEN_INT (INTVAL (XEXP (op0, 1))
- & ~INTVAL (op1))),
+ GEN_INT (UINTVAL (XEXP (op0, 1))
+ & ~UINTVAL (op1))),
op1);
/* If OP0 is (ashiftrt (plus ...) C), it might actually be
@@ -2168,10 +2547,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
the PLUS does not affect any of the bits in OP1: then we can do
the IOR as a PLUS and we can associate. This is valid if OP1
can be safely shifted left C bits. */
- if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
+ if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
&& GET_CODE (XEXP (op0, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
+ && CONST_INT_P (XEXP (op0, 1))
&& INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
{
int count = INTVAL (XEXP (op0, 1));
@@ -2190,11 +2569,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
break;
case XOR:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
- if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
return simplify_gen_unary (NOT, mode, op0, mode);
if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
@@ -2202,15 +2579,15 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
return CONST0_RTX (mode);
/* Canonicalize XOR of the most significant bit to PLUS. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& mode_signbit_p (mode, op1))
return simplify_gen_binary (PLUS, mode, op0, op1);
/* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
- if ((GET_CODE (op1) == CONST_INT
+ if ((CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
&& GET_CODE (op0) == PLUS
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
&& mode_signbit_p (mode, XEXP (op0, 1)))
return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
@@ -2221,7 +2598,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
convert them into an IOR. This helps to detect rotation encoded
using those methods and possibly other simplifications. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& (nonzero_bits (op0, mode)
& nonzero_bits (op1, mode)) == 0)
return (simplify_gen_binary (IOR, mode, op0, op1));
@@ -2265,6 +2642,46 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
XEXP (op0, 1), mode),
op1);
+ /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
+ we can transform like this:
+ (A&B)^C == ~(A&B)&C | ~C&(A&B)
+ == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
+ == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
+ Attempt a few simplifications when B and C are both constants. */
+ if (GET_CODE (op0) == AND
+ && CONST_INT_P (op1)
+ && CONST_INT_P (XEXP (op0, 1)))
+ {
+ rtx a = XEXP (op0, 0);
+ rtx b = XEXP (op0, 1);
+ rtx c = op1;
+ HOST_WIDE_INT bval = INTVAL (b);
+ HOST_WIDE_INT cval = INTVAL (c);
+
+ rtx na_c
+ = simplify_binary_operation (AND, mode,
+ simplify_gen_unary (NOT, mode, a, mode),
+ c);
+ if ((~cval & bval) == 0)
+ {
+ /* Try to simplify ~A&C | ~B&C. */
+ if (na_c != NULL_RTX)
+ return simplify_gen_binary (IOR, mode, na_c,
+ GEN_INT (~bval & cval));
+ }
+ else
+ {
+ /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
+ if (na_c == const0_rtx)
+ {
+ rtx a_nc_b = simplify_gen_binary (AND, mode, a,
+ GEN_INT (~cval & bval));
+ return simplify_gen_binary (IOR, mode, a_nc_b,
+ GEN_INT (~bval & cval));
+ }
+ }
+ }
+
/* (xor (comparison foo bar) (const_int 1)) can become the reversed
comparison if STORE_FLAG_VALUE is 1. */
if (STORE_FLAG_VALUE == 1
@@ -2280,22 +2697,18 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (STORE_FLAG_VALUE == 1
&& trueop1 == const1_rtx
&& GET_CODE (op0) == LSHIFTRT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && CONST_INT_P (XEXP (op0, 1))
+ && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
/* (xor (comparison foo bar) (const_int sign-bit))
when STORE_FLAG_VALUE is the sign bit. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ if (val_signbit_p (mode, STORE_FLAG_VALUE)
&& trueop1 == const_true_rtx
&& COMPARISON_P (op0)
&& (reversed = reversed_comparison (op0, mode)))
return reversed;
- break;
-
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
@@ -2304,12 +2717,26 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
case AND:
if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
return trueop1;
- /* If we are turning off bits already known off in OP0, we need
- not do an AND. */
- if (GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
+ if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
return op0;
+ if (HWI_COMPUTABLE_MODE_P (mode))
+ {
+ HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
+ HOST_WIDE_INT nzop1;
+ if (CONST_INT_P (trueop1))
+ {
+ HOST_WIDE_INT val1 = INTVAL (trueop1);
+ /* If we are turning off bits already known off in OP0, we need
+ not do an AND. */
+ if ((nzop0 & ~val1) == 0)
+ return op0;
+ }
+ nzop1 = nonzero_bits (trueop1, mode);
+ /* If we are clearing all the nonzero bits, the result is zero. */
+ if ((nzop1 & nzop0) == 0
+ && !side_effects_p (op0) && !side_effects_p (op1))
+ return CONST0_RTX (mode);
+ }
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return op0;
@@ -2324,10 +2751,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
there are no nonzero bits of C outside of X's mode. */
if ((GET_CODE (op0) == SIGN_EXTEND
|| GET_CODE (op0) == ZERO_EXTEND)
- && GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && CONST_INT_P (trueop1)
+ && HWI_COMPUTABLE_MODE_P (mode)
&& (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
- & INTVAL (trueop1)) == 0)
+ & UINTVAL (trueop1)) == 0)
{
enum machine_mode imode = GET_MODE (XEXP (op0, 0));
tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
@@ -2336,10 +2763,22 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
}
+ /* Transform (and (truncate X) C) into (truncate (and X C)). This way
+ we might be able to further simplify the AND with X and potentially
+ remove the truncation altogether. */
+ if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
+ {
+ rtx x = XEXP (op0, 0);
+ enum machine_mode xmode = GET_MODE (x);
+ tem = simplify_gen_binary (AND, xmode, x,
+ gen_int_mode (INTVAL (trueop1), xmode));
+ return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
+ }
+
/* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
if (GET_CODE (op0) == IOR
- && GET_CODE (trueop1) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ && CONST_INT_P (trueop1)
+ && CONST_INT_P (XEXP (op0, 1)))
{
HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
return simplify_gen_binary (IOR, mode,
@@ -2391,11 +2830,13 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
((A & N) + B) & M -> (A + B) & M
Similarly if (N & M) == 0,
((A | N) + B) & M -> (A + B) & M
- and for - instead of + and/or ^ instead of |. */
- if (GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ~INTVAL (trueop1)
- && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
+ and for - instead of + and/or ^ instead of |.
+ Also, if (N & M) == 0, then
+ (A +- N) & M -> A & M. */
+ if (CONST_INT_P (trueop1)
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && ~UINTVAL (trueop1)
+ && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
&& (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
{
rtx pmop[2];
@@ -2404,21 +2845,25 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
pmop[0] = XEXP (op0, 0);
pmop[1] = XEXP (op0, 1);
+ if (CONST_INT_P (pmop[1])
+ && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
+ return simplify_gen_binary (AND, mode, pmop[0], op1);
+
for (which = 0; which < 2; which++)
{
tem = pmop[which];
switch (GET_CODE (tem))
{
case AND:
- if (GET_CODE (XEXP (tem, 1)) == CONST_INT
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
- == INTVAL (trueop1))
+ if (CONST_INT_P (XEXP (tem, 1))
+ && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
+ == UINTVAL (trueop1))
pmop[which] = XEXP (tem, 0);
break;
case IOR:
case XOR:
- if (GET_CODE (XEXP (tem, 1)) == CONST_INT
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
+ if (CONST_INT_P (XEXP (tem, 1))
+ && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
pmop[which] = XEXP (tem, 0);
break;
default:
@@ -2433,6 +2878,19 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
return simplify_gen_binary (code, mode, tem, op1);
}
}
+
+ /* (and X (ior (not X) Y) -> (and X Y) */
+ if (GET_CODE (op1) == IOR
+ && GET_CODE (XEXP (op1, 0)) == NOT
+ && op0 == XEXP (XEXP (op1, 0), 0))
+ return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
+
+ /* (and (ior (not X) Y) X) -> (and X Y) */
+ if (GET_CODE (op0) == IOR
+ && GET_CODE (XEXP (op0, 0)) == NOT
+ && op1 == XEXP (XEXP (op0, 0), 0))
+ return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
+
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
@@ -2450,8 +2908,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (trueop1 == CONST1_RTX (mode))
return rtl_hooks.gen_lowpart_no_emit (mode, op0);
/* Convert divide by power of two into shift. */
- if (GET_CODE (trueop1) == CONST_INT
- && (val = exact_log2 (INTVAL (trueop1))) > 0)
+ if (CONST_INT_P (trueop1)
+ && (val = exact_log2 (UINTVAL (trueop1))) > 0)
return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
break;
@@ -2485,8 +2943,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
return simplify_gen_unary (NEG, mode, op0, mode);
/* Change FP division by a constant into multiplication.
- Only do this with -funsafe-math-optimizations. */
- if (flag_unsafe_math_optimizations
+ Only do this with -freciprocal-math. */
+ if (flag_reciprocal_math
&& !REAL_VALUES_EQUAL (d, dconst0))
{
REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
@@ -2495,10 +2953,11 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
}
}
}
- else
+ else if (SCALAR_INT_MODE_P (mode))
{
/* 0/x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == CONST0_RTX (mode))
+ if (trueop0 == CONST0_RTX (mode)
+ && !cfun->can_throw_non_call_exceptions)
{
if (side_effects_p (op1))
return simplify_gen_binary (AND, mode, op1, trueop0);
@@ -2532,8 +2991,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
return CONST0_RTX (mode);
}
/* Implement modulus by power of two as AND. */
- if (GET_CODE (trueop1) == CONST_INT
- && exact_log2 (INTVAL (trueop1)) > 0)
+ if (CONST_INT_P (trueop1)
+ && exact_log2 (UINTVAL (trueop1)) > 0)
return simplify_gen_binary (AND, mode, op0,
GEN_INT (INTVAL (op1) - 1));
break;
@@ -2563,19 +3022,27 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
return op0;
/* Rotating ~0 always results in ~0. */
- if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
+ if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
+ && UINTVAL (trueop0) == GET_MODE_MASK (mode)
&& ! side_effects_p (op1))
return op0;
+ canonicalize_shift:
+ if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
+ {
+ val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
+ if (val != INTVAL (op1))
+ return simplify_gen_binary (code, mode, op0, GEN_INT (val));
+ }
break;
case ASHIFT:
case SS_ASHIFT:
+ case US_ASHIFT:
if (trueop1 == CONST0_RTX (mode))
return op0;
if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
return op0;
- break;
+ goto canonicalize_shift;
case LSHIFTRT:
if (trueop1 == CONST0_RTX (mode))
@@ -2584,7 +3051,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
return op0;
/* Optimize (lshiftrt (clz X) C) as (eq X 0). */
if (GET_CODE (op0) == CLZ
- && GET_CODE (trueop1) == CONST_INT
+ && CONST_INT_P (trueop1)
&& STORE_FLAG_VALUE == 1
&& INTVAL (trueop1) < (HOST_WIDE_INT)width)
{
@@ -2592,17 +3059,16 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
unsigned HOST_WIDE_INT zero_val = 0;
if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
- && zero_val == GET_MODE_BITSIZE (imode)
+ && zero_val == GET_MODE_PRECISION (imode)
&& INTVAL (trueop1) == exact_log2 (zero_val))
return simplify_gen_relational (EQ, mode, imode,
XEXP (op0, 0), const0_rtx);
}
- break;
+ goto canonicalize_shift;
case SMIN:
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT
- && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
+ && mode_signbit_p (mode, trueop1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
@@ -2614,9 +3080,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
case SMAX:
if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT
- && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
- == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
+ && CONST_INT_P (trueop1)
+ && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
@@ -2650,6 +3115,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
case US_PLUS:
case SS_MINUS:
case US_MINUS:
+ case SS_MULT:
+ case US_MULT:
+ case SS_DIV:
+ case US_DIV:
/* ??? There are simplifications that can be done. */
return 0;
@@ -2660,11 +3129,93 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
gcc_assert (GET_CODE (trueop1) == PARALLEL);
gcc_assert (XVECLEN (trueop1, 0) == 1);
- gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
+ gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
if (GET_CODE (trueop0) == CONST_VECTOR)
return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
(trueop1, 0, 0)));
+
+ /* Extract a scalar element from a nested VEC_SELECT expression
+ (with optional nested VEC_CONCAT expression). Some targets
+ (i386) extract scalar element from a vector using chain of
+ nested VEC_SELECT expressions. When input operand is a memory
+ operand, this operation can be simplified to a simple scalar
+ load from an offseted memory address. */
+ if (GET_CODE (trueop0) == VEC_SELECT)
+ {
+ rtx op0 = XEXP (trueop0, 0);
+ rtx op1 = XEXP (trueop0, 1);
+
+ enum machine_mode opmode = GET_MODE (op0);
+ int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
+ int n_elts = GET_MODE_SIZE (opmode) / elt_size;
+
+ int i = INTVAL (XVECEXP (trueop1, 0, 0));
+ int elem;
+
+ rtvec vec;
+ rtx tmp_op, tmp;
+
+ gcc_assert (GET_CODE (op1) == PARALLEL);
+ gcc_assert (i < n_elts);
+
+ /* Select element, pointed by nested selector. */
+ elem = INTVAL (XVECEXP (op1, 0, i));
+
+ /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
+ if (GET_CODE (op0) == VEC_CONCAT)
+ {
+ rtx op00 = XEXP (op0, 0);
+ rtx op01 = XEXP (op0, 1);
+
+ enum machine_mode mode00, mode01;
+ int n_elts00, n_elts01;
+
+ mode00 = GET_MODE (op00);
+ mode01 = GET_MODE (op01);
+
+ /* Find out number of elements of each operand. */
+ if (VECTOR_MODE_P (mode00))
+ {
+ elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
+ n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
+ }
+ else
+ n_elts00 = 1;
+
+ if (VECTOR_MODE_P (mode01))
+ {
+ elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
+ n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
+ }
+ else
+ n_elts01 = 1;
+
+ gcc_assert (n_elts == n_elts00 + n_elts01);
+
+ /* Select correct operand of VEC_CONCAT
+ and adjust selector. */
+ if (elem < n_elts01)
+ tmp_op = op00;
+ else
+ {
+ tmp_op = op01;
+ elem -= n_elts00;
+ }
+ }
+ else
+ tmp_op = op0;
+
+ vec = rtvec_alloc (1);
+ RTVEC_ELT (vec, 0) = GEN_INT (elem);
+
+ tmp = gen_rtx_fmt_ee (code, mode,
+ tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
+ return tmp;
+ }
+ if (GET_CODE (trueop0) == VEC_DUPLICATE
+ && GET_MODE (XEXP (trueop0, 0)) == mode)
+ return XEXP (trueop0, 0);
}
else
{
@@ -2685,7 +3236,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
{
rtx x = XVECEXP (trueop1, 0, i);
- gcc_assert (GET_CODE (x) == CONST_INT);
+ gcc_assert (CONST_INT_P (x));
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
INTVAL (x));
}
@@ -2695,7 +3246,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
}
if (XVECLEN (trueop1, 0) == 1
- && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
+ && CONST_INT_P (XVECEXP (trueop1, 0, 0))
&& GET_CODE (trueop0) == VEC_CONCAT)
{
rtx vec = trueop0;
@@ -2747,10 +3298,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
gcc_assert (GET_MODE_INNER (mode) == op1_mode);
if ((GET_CODE (trueop0) == CONST_VECTOR
- || GET_CODE (trueop0) == CONST_INT
+ || CONST_INT_P (trueop0)
|| GET_CODE (trueop0) == CONST_DOUBLE)
&& (GET_CODE (trueop1) == CONST_VECTOR
- || GET_CODE (trueop1) == CONST_INT
+ || CONST_INT_P (trueop1)
|| GET_CODE (trueop1) == CONST_DOUBLE))
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
@@ -2798,7 +3349,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
{
HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
if (VECTOR_MODE_P (mode)
&& code != VEC_CONCAT
@@ -2830,7 +3381,12 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
if (VECTOR_MODE_P (mode)
&& code == VEC_CONCAT
- && CONSTANT_P (op0) && CONSTANT_P (op1))
+ && (CONST_INT_P (op0)
+ || GET_CODE (op0) == CONST_DOUBLE
+ || GET_CODE (op0) == CONST_FIXED)
+ && (CONST_INT_P (op1)
+ || GET_CODE (op1) == CONST_DOUBLE
+ || GET_CODE (op1) == CONST_FIXED))
{
unsigned n_elts = GET_MODE_NUNITS (mode);
rtvec v = rtvec_alloc (n_elts);
@@ -2976,8 +3532,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
is unable to accurately represent the result. */
if ((flag_rounding_math
- || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
- && !flag_unsafe_math_optimizations))
+ || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
&& (inexact || !real_identical (&result, &value)))
return NULL_RTX;
@@ -2987,144 +3542,127 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
/* We can fold some multi-word operations. */
if (GET_MODE_CLASS (mode) == MODE_INT
- && width == HOST_BITS_PER_WIDE_INT * 2
- && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
- && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
+ && width == HOST_BITS_PER_DOUBLE_INT
+ && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
+ && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
{
- unsigned HOST_WIDE_INT l1, l2, lv, lt;
- HOST_WIDE_INT h1, h2, hv, ht;
-
- if (GET_CODE (op0) == CONST_DOUBLE)
- l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
- else
- l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
+ double_int o0, o1, res, tmp;
- if (GET_CODE (op1) == CONST_DOUBLE)
- l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
- else
- l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
+ o0 = rtx_to_double_int (op0);
+ o1 = rtx_to_double_int (op1);
switch (code)
{
case MINUS:
/* A - B == A + (-B). */
- neg_double (l2, h2, &lv, &hv);
- l2 = lv, h2 = hv;
+ o1 = double_int_neg (o1);
/* Fall through.... */
case PLUS:
- add_double (l1, h1, l2, h2, &lv, &hv);
+ res = double_int_add (o0, o1);
break;
case MULT:
- mul_double (l1, h1, l2, h2, &lv, &hv);
+ res = double_int_mul (o0, o1);
break;
case DIV:
- if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
- &lv, &hv, <, &ht))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+ o0.low, o0.high, o1.low, o1.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high))
return 0;
break;
case MOD:
- if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
- <, &ht, &lv, &hv))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+ o0.low, o0.high, o1.low, o1.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high))
return 0;
break;
case UDIV:
- if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
- &lv, &hv, <, &ht))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+ o0.low, o0.high, o1.low, o1.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high))
return 0;
break;
case UMOD:
- if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
- <, &ht, &lv, &hv))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+ o0.low, o0.high, o1.low, o1.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high))
return 0;
break;
case AND:
- lv = l1 & l2, hv = h1 & h2;
+ res = double_int_and (o0, o1);
break;
case IOR:
- lv = l1 | l2, hv = h1 | h2;
+ res = double_int_ior (o0, o1);
break;
case XOR:
- lv = l1 ^ l2, hv = h1 ^ h2;
+ res = double_int_xor (o0, o1);
break;
case SMIN:
- if (h1 < h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- < (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_smin (o0, o1);
break;
case SMAX:
- if (h1 > h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- > (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_smax (o0, o1);
break;
case UMIN:
- if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- < (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_umin (o0, o1);
break;
case UMAX:
- if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- > (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_umax (o0, o1);
break;
case LSHIFTRT: case ASHIFTRT:
case ASHIFT:
case ROTATE: case ROTATERT:
- if (SHIFT_COUNT_TRUNCATED)
- l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
-
- if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
- return 0;
-
- if (code == LSHIFTRT || code == ASHIFTRT)
- rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
- code == ASHIFTRT);
- else if (code == ASHIFT)
- lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
- else if (code == ROTATE)
- lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
- else /* code == ROTATERT */
- rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+ {
+ unsigned HOST_WIDE_INT cnt;
+
+ if (SHIFT_COUNT_TRUNCATED)
+ o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
+
+ if (!double_int_fits_in_uhwi_p (o1)
+ || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
+ return 0;
+
+ cnt = double_int_to_uhwi (o1);
+
+ if (code == LSHIFTRT || code == ASHIFTRT)
+ res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
+ code == ASHIFTRT);
+ else if (code == ASHIFT)
+ res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
+ true);
+ else if (code == ROTATE)
+ res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
+ else /* code == ROTATERT */
+ res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
+ }
break;
default:
return 0;
}
- return immed_double_const (lv, hv, mode);
+ return immed_double_int_const (res, mode);
}
- if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
+ if (CONST_INT_P (op0) && CONST_INT_P (op1)
&& width <= HOST_BITS_PER_WIDE_INT && width != 0)
{
/* Get the integer argument values in two forms:
@@ -3135,83 +3673,87 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
if (width < HOST_BITS_PER_WIDE_INT)
{
- arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
- arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
+ arg0 &= GET_MODE_MASK (mode);
+ arg1 &= GET_MODE_MASK (mode);
arg0s = arg0;
- if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg0s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, arg0s))
+ arg0s |= ~GET_MODE_MASK (mode);
- arg1s = arg1;
- if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg1s |= ((HOST_WIDE_INT) (-1) << width);
+ arg1s = arg1;
+ if (val_signbit_known_set_p (mode, arg1s))
+ arg1s |= ~GET_MODE_MASK (mode);
}
else
{
arg0s = arg0;
arg1s = arg1;
}
-
+
/* Compute the value of the arithmetic. */
-
+
switch (code)
{
case PLUS:
val = arg0s + arg1s;
break;
-
+
case MINUS:
val = arg0s - arg1s;
break;
-
+
case MULT:
val = arg0s * arg1s;
break;
-
+
case DIV:
if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = arg0s / arg1s;
break;
-
+
case MOD:
if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = arg0s % arg1s;
break;
-
+
case UDIV:
if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = (unsigned HOST_WIDE_INT) arg0 / arg1;
break;
-
+
case UMOD:
if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = (unsigned HOST_WIDE_INT) arg0 % arg1;
break;
-
+
case AND:
val = arg0 & arg1;
break;
-
+
case IOR:
val = arg0 | arg1;
break;
-
+
case XOR:
val = arg0 ^ arg1;
break;
-
+
case LSHIFTRT:
case ASHIFT:
case ASHIFTRT:
@@ -3226,64 +3768,69 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
return 0;
-
+
val = (code == ASHIFT
? ((unsigned HOST_WIDE_INT) arg0) << arg1
: ((unsigned HOST_WIDE_INT) arg0) >> arg1);
-
+
/* Sign-extend the result for arithmetic right shifts. */
if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
- val |= ((HOST_WIDE_INT) -1) << (width - arg1);
+ val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
break;
-
+
case ROTATERT:
if (arg1 < 0)
return 0;
-
+
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
| (((unsigned HOST_WIDE_INT) arg0) >> arg1));
break;
-
+
case ROTATE:
if (arg1 < 0)
return 0;
-
+
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
| (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
break;
-
+
case COMPARE:
/* Do nothing here. */
return 0;
-
+
case SMIN:
val = arg0s <= arg1s ? arg0s : arg1s;
break;
-
+
case UMIN:
val = ((unsigned HOST_WIDE_INT) arg0
<= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SMAX:
val = arg0s > arg1s ? arg0s : arg1s;
break;
-
+
case UMAX:
val = ((unsigned HOST_WIDE_INT) arg0
> (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SS_PLUS:
case US_PLUS:
case SS_MINUS:
case US_MINUS:
+ case SS_MULT:
+ case US_MULT:
+ case SS_DIV:
+ case US_DIV:
case SS_ASHIFT:
+ case US_ASHIFT:
/* ??? There are simplifications that can be done. */
return 0;
-
+
default:
gcc_unreachable ();
}
@@ -3478,10 +4025,6 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
ops[j + 1] = save;
}
- /* This is only useful the first time through. */
- if (!canonicalized)
- return NULL_RTX;
-
changed = 0;
for (i = n_ops - 1; i > 0; i--)
for (j = i - 1; j >= 0; j--)
@@ -3502,8 +4045,8 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
else if (swap_commutative_operands_p (lhs, rhs))
tem = lhs, lhs = rhs, rhs = tem;
- if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
- && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
+ if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
+ && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
{
rtx tem_lhs, tem_rhs;
@@ -3516,7 +4059,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
}
else
tem = simplify_binary_operation (ncode, mode, lhs, rhs);
-
+
/* Reject "simplifications" that just wrap the two
arguments in a CONST. Failure to do so can result
in infinite recursion with simplify_binary_operation
@@ -3530,17 +4073,22 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
lneg &= rneg;
if (GET_CODE (tem) == NEG)
tem = XEXP (tem, 0), lneg = !lneg;
- if (GET_CODE (tem) == CONST_INT && lneg)
+ if (CONST_INT_P (tem) && lneg)
tem = neg_const_int (mode, tem), lneg = 0;
ops[i].op = tem;
ops[i].neg = lneg;
ops[j].op = NULL_RTX;
changed = 1;
+ canonicalized = 1;
}
}
}
+ /* If nothing changed, fail. */
+ if (!canonicalized)
+ return NULL_RTX;
+
/* Pack all the operands to the lower-numbered entries. */
for (i = 0, j = 0; j < n_ops; j++)
if (ops[j].op)
@@ -3554,11 +4102,11 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
/* Create (minus -C X) instead of (neg (const (plus X C))). */
if (n_ops == 2
- && GET_CODE (ops[1].op) == CONST_INT
+ && CONST_INT_P (ops[1].op)
&& CONSTANT_P (ops[0].op)
&& ops[0].neg)
return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
-
+
/* We suppressed creation of trivial CONST expressions in the
combination loop to avoid recursion. Create one manually now.
The combination loop should have ensured that there is exactly
@@ -3566,7 +4114,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
in the array and that any other constant will be next-to-last. */
if (n_ops > 1
- && GET_CODE (ops[n_ops - 1].op) == CONST_INT
+ && CONST_INT_P (ops[n_ops - 1].op)
&& CONSTANT_P (ops[n_ops - 2].op))
{
rtx value = ops[n_ops - 1].op;
@@ -3601,7 +4149,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
/* Check whether an operand is suitable for calling simplify_plus_minus. */
static bool
-plus_minus_operand_p (rtx x)
+plus_minus_operand_p (const_rtx x)
{
return GET_CODE (x) == PLUS
|| GET_CODE (x) == MINUS
@@ -3645,7 +4193,7 @@ simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
}
#else
return NULL_RTX;
-#endif
+#endif
}
if (VECTOR_MODE_P (mode))
{
@@ -3683,8 +4231,8 @@ simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
/* If op0 is a compare, extract the comparison arguments from it. */
if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
- return simplify_relational_operation (code, mode, VOIDmode,
- XEXP (op0, 0), XEXP (op0, 1));
+ return simplify_gen_relational (code, mode, VOIDmode,
+ XEXP (op0, 0), XEXP (op0, 1));
if (GET_MODE_CLASS (cmp_mode) == MODE_CC
|| CC0_P (op0))
@@ -3729,6 +4277,29 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
}
}
+ /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
+ (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
+ if ((code == LTU || code == GEU)
+ && GET_CODE (op0) == PLUS
+ && CONST_INT_P (XEXP (op0, 1))
+ && (rtx_equal_p (op1, XEXP (op0, 0))
+ || rtx_equal_p (op1, XEXP (op0, 1))))
+ {
+ rtx new_cmp
+ = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
+ return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
+ cmp_mode, XEXP (op0, 0), new_cmp);
+ }
+
+ /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
+ if ((code == LTU || code == GEU)
+ && GET_CODE (op0) == PLUS
+ && rtx_equal_p (op1, XEXP (op0, 1))
+ /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
+ && !rtx_equal_p (op1, XEXP (op0, 0)))
+ return simplify_gen_relational (code, mode, cmp_mode, op0,
+ copy_rtx (XEXP (op0, 0)));
+
if (op1 == const0_rtx)
{
/* Canonicalize (GTU x 0) as (NE x 0). */
@@ -3781,10 +4352,20 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
{
rtx x = XEXP (op0, 0);
rtx c = XEXP (op0, 1);
+ enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
+ rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
+
+ /* Detect an infinite recursive condition, where we oscillate at this
+ simplification case between:
+ A + B == C <---> C - B == A,
+ where A, B, and C are all constants with non-simplifiable expressions,
+ usually SYMBOL_REFs. */
+ if (GET_CODE (tem) == invcode
+ && CONSTANT_P (x)
+ && rtx_equal_p (c, XEXP (tem, 1)))
+ return NULL_RTX;
- c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
- cmp_mode, op1, c);
- return simplify_gen_relational (code, mode, cmp_mode, x, c);
+ return simplify_gen_relational (code, mode, cmp_mode, x, tem);
}
/* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
@@ -3828,9 +4409,9 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
/* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
if ((code == EQ || code == NE)
&& op0code == XOR
- && (GET_CODE (op1) == CONST_INT
+ && (CONST_INT_P (op1)
|| GET_CODE (op1) == CONST_DOUBLE)
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (CONST_INT_P (XEXP (op0, 1))
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
simplify_gen_binary (XOR, cmp_mode,
@@ -3860,6 +4441,67 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
return NULL_RTX;
}
+enum
+{
+ CMP_EQ = 1,
+ CMP_LT = 2,
+ CMP_GT = 4,
+ CMP_LTU = 8,
+ CMP_GTU = 16
+};
+
+
+/* Convert the known results for EQ, LT, GT, LTU, GTU contained in
+ KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
+ For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
+ logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
+ For floating-point comparisons, assume that the operands were ordered. */
+
+static rtx
+comparison_result (enum rtx_code code, int known_results)
+{
+ switch (code)
+ {
+ case EQ:
+ case UNEQ:
+ return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
+ case NE:
+ case LTGT:
+ return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
+
+ case LT:
+ case UNLT:
+ return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
+ case GE:
+ case UNGE:
+ return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
+
+ case GT:
+ case UNGT:
+ return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
+ case LE:
+ case UNLE:
+ return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
+
+ case LTU:
+ return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
+ case GEU:
+ return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
+
+ case GTU:
+ return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
+ case LEU:
+ return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
+
+ case ORDERED:
+ return const_true_rtx;
+ case UNORDERED:
+ return const0_rtx;
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Check if the given comparison (done in the given MODE) is actually a
tautology or a contradiction.
If no simplification is possible, this function returns zero.
@@ -3870,7 +4512,6 @@ simplify_const_relational_operation (enum rtx_code code,
enum machine_mode mode,
rtx op0, rtx op1)
{
- int equal, op0lt, op0ltu, op1lt, op1ltu;
rtx tem;
rtx trueop0;
rtx trueop1;
@@ -3920,8 +4561,8 @@ simplify_const_relational_operation (enum rtx_code code,
if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
&& (code == EQ || code == NE)
- && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
- && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
+ && ! ((REG_P (op0) || CONST_INT_P (trueop0))
+ && (REG_P (op1) || CONST_INT_P (trueop1)))
&& 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
/* We cannot do this if tem is a nonzero address. */
&& ! nonzero_address_p (tem))
@@ -3935,17 +4576,22 @@ simplify_const_relational_operation (enum rtx_code code,
return const0_rtx;
/* For modes without NaNs, if the two operands are equal, we know the
- result except if they have side-effects. */
- if (! HONOR_NANS (GET_MODE (trueop0))
+ result except if they have side-effects. Even with NaNs we know
+ the result of unordered comparisons and, if signaling NaNs are
+ irrelevant, also the result of LT/GT/LTGT. */
+ if ((! HONOR_NANS (GET_MODE (trueop0))
+ || code == UNEQ || code == UNLE || code == UNGE
+ || ((code == LT || code == GT || code == LTGT)
+ && ! HONOR_SNANS (GET_MODE (trueop0))))
&& rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (trueop0))
- equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
+ return comparison_result (code, CMP_EQ);
/* If the operands are floating-point constants, see if we can fold
the result. */
- else if (GET_CODE (trueop0) == CONST_DOUBLE
- && GET_CODE (trueop1) == CONST_DOUBLE
- && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
+ if (GET_CODE (trueop0) == CONST_DOUBLE
+ && GET_CODE (trueop1) == CONST_DOUBLE
+ && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
{
REAL_VALUE_TYPE d0, d1;
@@ -3976,19 +4622,19 @@ simplify_const_relational_operation (enum rtx_code code,
return 0;
}
- equal = REAL_VALUES_EQUAL (d0, d1);
- op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
- op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
+ return comparison_result (code,
+ (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
+ REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
}
/* Otherwise, see if the operands are both integers. */
- else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
- && (GET_CODE (trueop0) == CONST_DOUBLE
- || GET_CODE (trueop0) == CONST_INT)
- && (GET_CODE (trueop1) == CONST_DOUBLE
- || GET_CODE (trueop1) == CONST_INT))
+ if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
+ && (GET_CODE (trueop0) == CONST_DOUBLE
+ || CONST_INT_P (trueop0))
+ && (GET_CODE (trueop1) == CONST_DOUBLE
+ || CONST_INT_P (trueop1)))
{
- int width = GET_MODE_BITSIZE (mode);
+ int width = GET_MODE_PRECISION (mode);
HOST_WIDE_INT l0s, h0s, l1s, h1s;
unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
@@ -4019,204 +4665,244 @@ simplify_const_relational_operation (enum rtx_code code,
we have to sign or zero-extend the values. */
if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
{
- l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
- l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
+ l0u &= GET_MODE_MASK (mode);
+ l1u &= GET_MODE_MASK (mode);
- if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- l0s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, l0s))
+ l0s |= ~GET_MODE_MASK (mode);
- if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- l1s |= ((HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, l1s))
+ l1s |= ~GET_MODE_MASK (mode);
}
if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
- equal = (h0u == h1u && l0u == l1u);
- op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
- op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
- op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
- op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
+ if (h0u == h1u && l0u == l1u)
+ return comparison_result (code, CMP_EQ);
+ else
+ {
+ int cr;
+ cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
+ cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
+ return comparison_result (code, cr);
+ }
}
- /* Otherwise, there are some code-specific tests we can make. */
- else
+ /* Optimize comparisons with upper and lower bounds. */
+ if (HWI_COMPUTABLE_MODE_P (mode)
+ && CONST_INT_P (trueop1))
{
- /* Optimize comparisons with upper and lower bounds. */
- if (SCALAR_INT_MODE_P (mode)
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- {
- rtx mmin, mmax;
- int sign;
-
- if (code == GEU
- || code == LEU
- || code == GTU
- || code == LTU)
- sign = 0;
- else
- sign = 1;
+ int sign;
+ unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
+ HOST_WIDE_INT val = INTVAL (trueop1);
+ HOST_WIDE_INT mmin, mmax;
+
+ if (code == GEU
+ || code == LEU
+ || code == GTU
+ || code == LTU)
+ sign = 0;
+ else
+ sign = 1;
- get_mode_bounds (mode, sign, mode, &mmin, &mmax);
+ /* Get a reduced range if the sign bit is zero. */
+ if (nonzero <= (GET_MODE_MASK (mode) >> 1))
+ {
+ mmin = 0;
+ mmax = nonzero;
+ }
+ else
+ {
+ rtx mmin_rtx, mmax_rtx;
+ get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
- tem = NULL_RTX;
- switch (code)
+ mmin = INTVAL (mmin_rtx);
+ mmax = INTVAL (mmax_rtx);
+ if (sign)
{
- case GEU:
- case GE:
- /* x >= min is always true. */
- if (rtx_equal_p (trueop1, mmin))
- tem = const_true_rtx;
- else
- break;
+ unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
- case LEU:
- case LE:
- /* x <= max is always true. */
- if (rtx_equal_p (trueop1, mmax))
- tem = const_true_rtx;
- break;
-
- case GTU:
- case GT:
- /* x > max is always false. */
- if (rtx_equal_p (trueop1, mmax))
- tem = const0_rtx;
- break;
-
- case LTU:
- case LT:
- /* x < min is always false. */
- if (rtx_equal_p (trueop1, mmin))
- tem = const0_rtx;
- break;
-
- default:
- break;
+ mmin >>= (sign_copies - 1);
+ mmax >>= (sign_copies - 1);
}
- if (tem == const0_rtx
- || tem == const_true_rtx)
- return tem;
}
switch (code)
{
+ /* x >= y is always true for y <= mmin, always false for y > mmax. */
+ case GEU:
+ if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
+ return const_true_rtx;
+ if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
+ return const0_rtx;
+ break;
+ case GE:
+ if (val <= mmin)
+ return const_true_rtx;
+ if (val > mmax)
+ return const0_rtx;
+ break;
+
+ /* x <= y is always true for y >= mmax, always false for y < mmin. */
+ case LEU:
+ if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
+ return const_true_rtx;
+ if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
+ return const0_rtx;
+ break;
+ case LE:
+ if (val >= mmax)
+ return const_true_rtx;
+ if (val < mmin)
+ return const0_rtx;
+ break;
+
case EQ:
- if (trueop1 == const0_rtx && nonzero_address_p (op0))
+ /* x == y is always false for y out of range. */
+ if (val < mmin || val > mmax)
return const0_rtx;
break;
- case NE:
- if (trueop1 == const0_rtx && nonzero_address_p (op0))
+ /* x > y is always false for y >= mmax, always true for y < mmin. */
+ case GTU:
+ if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
+ return const0_rtx;
+ if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
+ return const_true_rtx;
+ break;
+ case GT:
+ if (val >= mmax)
+ return const0_rtx;
+ if (val < mmin)
return const_true_rtx;
break;
+ /* x < y is always false for y <= mmin, always true for y > mmax. */
+ case LTU:
+ if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
+ return const0_rtx;
+ if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
+ return const_true_rtx;
+ break;
case LT:
- /* Optimize abs(x) < 0.0. */
- if (trueop1 == CONST0_RTX (mode)
- && !HONOR_SNANS (mode)
- && (!INTEGRAL_MODE_P (mode)
- || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
+ if (val <= mmin)
+ return const0_rtx;
+ if (val > mmax)
+ return const_true_rtx;
+ break;
+
+ case NE:
+ /* x != y is always true for y out of range. */
+ if (val < mmin || val > mmax)
+ return const_true_rtx;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* Optimize integer comparisons with zero. */
+ if (trueop1 == const0_rtx)
+ {
+ /* Some addresses are known to be nonzero. We don't know
+ their sign, but equality comparisons are known. */
+ if (nonzero_address_p (trueop0))
+ {
+ if (code == EQ || code == LEU)
+ return const0_rtx;
+ if (code == NE || code == GTU)
+ return const_true_rtx;
+ }
+
+ /* See if the first operand is an IOR with a constant. If so, we
+ may be able to determine the result of this comparison. */
+ if (GET_CODE (op0) == IOR)
+ {
+ rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
+ if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
{
- tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
- : trueop0;
- if (GET_CODE (tem) == ABS)
+ int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
+ int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
+ && (UINTVAL (inner_const)
+ & ((unsigned HOST_WIDE_INT) 1
+ << sign_bitnum)));
+
+ switch (code)
{
- if (INTEGRAL_MODE_P (mode)
- && (issue_strict_overflow_warning
- (WARN_STRICT_OVERFLOW_CONDITIONAL)))
- warning (OPT_Wstrict_overflow,
- ("assuming signed overflow does not occur when "
- "assuming abs (x) < 0 is false"));
+ case EQ:
+ case LEU:
return const0_rtx;
+ case NE:
+ case GTU:
+ return const_true_rtx;
+ case LT:
+ case LE:
+ if (has_sign)
+ return const_true_rtx;
+ break;
+ case GT:
+ case GE:
+ if (has_sign)
+ return const0_rtx;
+ break;
+ default:
+ break;
}
}
+ }
+ }
- /* Optimize popcount (x) < 0. */
- if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
- return const_true_rtx;
+ /* Optimize comparison of ABS with zero. */
+ if (trueop1 == CONST0_RTX (mode)
+ && (GET_CODE (trueop0) == ABS
+ || (GET_CODE (trueop0) == FLOAT_EXTEND
+ && GET_CODE (XEXP (trueop0, 0)) == ABS)))
+ {
+ switch (code)
+ {
+ case LT:
+ /* Optimize abs(x) < 0.0. */
+ if (!HONOR_SNANS (mode)
+ && (!INTEGRAL_MODE_P (mode)
+ || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
+ {
+ if (INTEGRAL_MODE_P (mode)
+ && (issue_strict_overflow_warning
+ (WARN_STRICT_OVERFLOW_CONDITIONAL)))
+ warning (OPT_Wstrict_overflow,
+ ("assuming signed overflow does not occur when "
+ "assuming abs (x) < 0 is false"));
+ return const0_rtx;
+ }
break;
case GE:
/* Optimize abs(x) >= 0.0. */
- if (trueop1 == CONST0_RTX (mode)
- && !HONOR_NANS (mode)
+ if (!HONOR_NANS (mode)
&& (!INTEGRAL_MODE_P (mode)
|| (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
{
- tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
- : trueop0;
- if (GET_CODE (tem) == ABS)
- {
- if (INTEGRAL_MODE_P (mode)
- && (issue_strict_overflow_warning
- (WARN_STRICT_OVERFLOW_CONDITIONAL)))
- warning (OPT_Wstrict_overflow,
- ("assuming signed overflow does not occur when "
- "assuming abs (x) >= 0 is true"));
- return const_true_rtx;
- }
+ if (INTEGRAL_MODE_P (mode)
+ && (issue_strict_overflow_warning
+ (WARN_STRICT_OVERFLOW_CONDITIONAL)))
+ warning (OPT_Wstrict_overflow,
+ ("assuming signed overflow does not occur when "
+ "assuming abs (x) >= 0 is true"));
+ return const_true_rtx;
}
-
- /* Optimize popcount (x) >= 0. */
- if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
- return const_true_rtx;
break;
case UNGE:
/* Optimize ! (abs(x) < 0.0). */
- if (trueop1 == CONST0_RTX (mode))
- {
- tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
- : trueop0;
- if (GET_CODE (tem) == ABS)
- return const_true_rtx;
- }
- break;
+ return const_true_rtx;
default:
break;
}
-
- return 0;
}
- /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
- as appropriate. */
- switch (code)
- {
- case EQ:
- case UNEQ:
- return equal ? const_true_rtx : const0_rtx;
- case NE:
- case LTGT:
- return ! equal ? const_true_rtx : const0_rtx;
- case LT:
- case UNLT:
- return op0lt ? const_true_rtx : const0_rtx;
- case GT:
- case UNGT:
- return op1lt ? const_true_rtx : const0_rtx;
- case LTU:
- return op0ltu ? const_true_rtx : const0_rtx;
- case GTU:
- return op1ltu ? const_true_rtx : const0_rtx;
- case LE:
- case UNLE:
- return equal || op0lt ? const_true_rtx : const0_rtx;
- case GE:
- case UNGE:
- return equal || op1lt ? const_true_rtx : const0_rtx;
- case LEU:
- return equal || op0ltu ? const_true_rtx : const0_rtx;
- case GEU:
- return equal || op1ltu ? const_true_rtx : const0_rtx;
- case ORDERED:
- return const_true_rtx;
- case UNORDERED:
- return const0_rtx;
- default:
- gcc_unreachable ();
- }
+ return 0;
}
/* Simplify CODE, an operation with result mode MODE and three operands,
@@ -4228,7 +4914,9 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
enum machine_mode op0_mode, rtx op0, rtx op1,
rtx op2)
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
+ bool any_change = false;
+ rtx tem;
/* VOIDmode means "infinite" precision. */
if (width == 0)
@@ -4236,48 +4924,65 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
switch (code)
{
+ case FMA:
+ /* Simplify negations around the multiplication. */
+ /* -a * -b + c => a * b + c. */
+ if (GET_CODE (op0) == NEG)
+ {
+ tem = simplify_unary_operation (NEG, mode, op1, mode);
+ if (tem)
+ op1 = tem, op0 = XEXP (op0, 0), any_change = true;
+ }
+ else if (GET_CODE (op1) == NEG)
+ {
+ tem = simplify_unary_operation (NEG, mode, op0, mode);
+ if (tem)
+ op0 = tem, op1 = XEXP (op1, 0), any_change = true;
+ }
+
+ /* Canonicalize the two multiplication operands. */
+ /* a * -b + c => -b * a + c. */
+ if (swap_commutative_operands_p (op0, op1))
+ tem = op0, op0 = op1, op1 = tem, any_change = true;
+
+ if (any_change)
+ return gen_rtx_FMA (mode, op0, op1, op2);
+ return NULL_RTX;
+
case SIGN_EXTRACT:
case ZERO_EXTRACT:
- if (GET_CODE (op0) == CONST_INT
- && GET_CODE (op1) == CONST_INT
- && GET_CODE (op2) == CONST_INT
+ if (CONST_INT_P (op0)
+ && CONST_INT_P (op1)
+ && CONST_INT_P (op2)
&& ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
&& width <= (unsigned) HOST_BITS_PER_WIDE_INT)
{
/* Extracting a bit-field from a constant */
- HOST_WIDE_INT val = INTVAL (op0);
-
+ unsigned HOST_WIDE_INT val = UINTVAL (op0);
+ HOST_WIDE_INT op1val = INTVAL (op1);
+ HOST_WIDE_INT op2val = INTVAL (op2);
if (BITS_BIG_ENDIAN)
- val >>= (GET_MODE_BITSIZE (op0_mode)
- - INTVAL (op2) - INTVAL (op1));
+ val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
else
- val >>= INTVAL (op2);
+ val >>= op2val;
- if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
+ if (HOST_BITS_PER_WIDE_INT != op1val)
{
/* First zero-extend. */
- val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
+ val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
/* If desired, propagate sign bit. */
if (code == SIGN_EXTRACT
- && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
- val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
+ && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
+ != 0)
+ val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
}
- /* Clear the bits that don't belong in our mode,
- unless they and our sign bit are all one.
- So we get either a reasonable negative value or a reasonable
- unsigned value for this mode. */
- if (width < HOST_BITS_PER_WIDE_INT
- && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
- != ((HOST_WIDE_INT) (-1) << (width - 1))))
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
-
return gen_int_mode (val, mode);
}
break;
case IF_THEN_ELSE:
- if (GET_CODE (op0) == CONST_INT)
+ if (CONST_INT_P (op0))
return op0 != const0_rtx ? op1 : op2;
/* Convert c ? a : a into "a". */
@@ -4314,7 +5019,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
rtx temp;
/* Look for happy constants in op1 and op2. */
- if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
+ if (CONST_INT_P (op1) && CONST_INT_P (op2))
{
HOST_WIDE_INT t = INTVAL (op1);
HOST_WIDE_INT f = INTVAL (op2);
@@ -4345,7 +5050,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
/* See if any simplifications were possible. */
if (temp)
{
- if (GET_CODE (temp) == CONST_INT)
+ if (CONST_INT_P (temp))
return temp == const0_rtx ? op2 : op1;
else if (temp)
return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
@@ -4358,7 +5063,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
gcc_assert (GET_MODE (op1) == mode);
gcc_assert (VECTOR_MODE_P (mode));
op2 = avoid_constant_pool_reference (op2);
- if (GET_CODE (op2) == CONST_INT)
+ if (CONST_INT_P (op2))
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
@@ -4393,15 +5098,16 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
return 0;
}
-/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
- returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
+/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
+ or CONST_VECTOR,
+ returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
Works by unpacking OP into a collection of 8-bit values
represented as a little-endian array of 'unsigned char', selecting by BYTE,
and then repacking them again for OUTERMODE. */
static rtx
-simplify_immed_subreg (enum machine_mode outermode, rtx op,
+simplify_immed_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
/* We support up to 512-bit values (for V8DFmode). */
@@ -4424,7 +5130,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
enum machine_mode outer_submode;
/* Some ports misuse CCmode. */
- if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
+ if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
return op;
/* We have no way to represent a complex constant at the rtl level. */
@@ -4449,17 +5155,17 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
gcc_assert (BITS_PER_UNIT % value_bit == 0);
/* I don't know how to handle endianness of sub-units. */
gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
-
+
for (elem = 0; elem < num_elem; elem++)
{
unsigned char * vp;
rtx el = elems[elem];
-
+
/* Vectors are kept in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
- unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
+ unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
@@ -4467,19 +5173,19 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
+ (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
vp = value + (bytele * BITS_PER_UNIT) / value_bit;
}
-
+
switch (GET_CODE (el))
{
case CONST_INT:
for (i = 0;
- i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
+ i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
*vp++ = INTVAL (el) >> i;
/* CONST_INTs are always logically sign-extended. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = INTVAL (el) < 0 ? -1 : 0;
break;
-
+
case CONST_DOUBLE:
if (GET_MODE (el) == VOIDmode)
{
@@ -4525,14 +5231,33 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
ibase = i;
*vp++ = tmp[ibase / 32] >> i % 32;
}
-
+
/* It shouldn't matter what's done here, so fill it with
zero. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = 0;
}
break;
-
+
+ case CONST_FIXED:
+ if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
+ {
+ for (i = 0; i < elem_bitsize; i += value_bit)
+ *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
+ }
+ else
+ {
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
+ *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
+ for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
+ i += value_bit)
+ *vp++ = CONST_FIXED_VALUE_HIGH (el)
+ >> (i - HOST_BITS_PER_WIDE_INT);
+ for (; i < elem_bitsize; i += value_bit)
+ *vp++ = 0;
+ }
+ break;
+
default:
gcc_unreachable ();
}
@@ -4544,7 +5269,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
will already have offset 0. */
if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
{
- unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
+ unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
- byte);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
@@ -4560,7 +5285,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
value_start = byte * (BITS_PER_UNIT / value_bit);
/* Re-pack the value. */
-
+
if (VECTOR_MODE_P (outermode))
{
num_elem = GET_MODE_NUNITS (outermode);
@@ -4584,12 +5309,12 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
for (elem = 0; elem < num_elem; elem++)
{
unsigned char *vp;
-
+
/* Vectors are stored in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
- unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
+ unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
@@ -4608,11 +5333,11 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
for (i = 0;
i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
- lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
+ lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
for (; i < elem_bitsize; i += value_bit)
- hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
- << (i - HOST_BITS_PER_WIDE_INT));
-
+ hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
+ << (i - HOST_BITS_PER_WIDE_INT);
+
/* immed_double_const doesn't call trunc_int_for_mode. I don't
know why. */
if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
@@ -4623,13 +5348,13 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
return NULL_RTX;
}
break;
-
+
case MODE_FLOAT:
case MODE_DECIMAL_FLOAT:
{
REAL_VALUE_TYPE r;
long tmp[max_bitsize / 32];
-
+
/* real_from_target wants its input in words affected by
FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
and use WORDS_BIG_ENDIAN instead; see the documentation
@@ -4650,7 +5375,29 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
}
break;
-
+
+ case MODE_FRACT:
+ case MODE_UFRACT:
+ case MODE_ACCUM:
+ case MODE_UACCUM:
+ {
+ FIXED_VALUE_TYPE f;
+ f.data.low = 0;
+ f.data.high = 0;
+ f.mode = outer_submode;
+
+ for (i = 0;
+ i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
+ i += value_bit)
+ f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
+ for (; i < elem_bitsize; i += value_bit)
+ f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
+ << (i - HOST_BITS_PER_WIDE_INT));
+
+ elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
+ }
+ break;
+
default:
gcc_unreachable ();
}
@@ -4682,8 +5429,9 @@ simplify_subreg (enum machine_mode outermode, rtx op,
if (outermode == innermode && !byte)
return op;
- if (GET_CODE (op) == CONST_INT
+ if (CONST_INT_P (op)
|| GET_CODE (op) == CONST_DOUBLE
+ || GET_CODE (op) == CONST_FIXED
|| GET_CODE (op) == CONST_VECTOR)
return simplify_immed_subreg (outermode, op, innermode, byte);
@@ -4756,7 +5504,22 @@ simplify_subreg (enum machine_mode outermode, rtx op,
return newx;
if (validate_subreg (outermode, innermostmode,
SUBREG_REG (op), final_offset))
- return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
+ {
+ newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
+ if (SUBREG_PROMOTED_VAR_P (op)
+ && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
+ && GET_MODE_CLASS (outermode) == MODE_INT
+ && IN_RANGE (GET_MODE_SIZE (outermode),
+ GET_MODE_SIZE (innermode),
+ GET_MODE_SIZE (innermostmode))
+ && subreg_lowpart_p (newx))
+ {
+ SUBREG_PROMOTED_VAR_P (newx) = 1;
+ SUBREG_PROMOTED_UNSIGNED_SET
+ (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
+ }
+ return newx;
+ }
return NULL_RTX;
}
@@ -4773,35 +5536,13 @@ simplify_subreg (enum machine_mode outermode, rtx op,
suppress this simplification. If the hard register is the stack,
frame, or argument pointer, leave this as a SUBREG. */
- if (REG_P (op)
- && REGNO (op) < FIRST_PSEUDO_REGISTER
-#ifdef CANNOT_CHANGE_MODE_CLASS
- && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
- && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
- && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
-#endif
- && ((reload_completed && !frame_pointer_needed)
- || (REGNO (op) != FRAME_POINTER_REGNUM
-#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
- && REGNO (op) != HARD_FRAME_POINTER_REGNUM
-#endif
- ))
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
- && REGNO (op) != ARG_POINTER_REGNUM
-#endif
- && REGNO (op) != STACK_POINTER_REGNUM
- && subreg_offset_representable_p (REGNO (op), innermode,
- byte, outermode))
+ if (REG_P (op) && HARD_REGISTER_P (op))
{
- unsigned int regno = REGNO (op);
- unsigned int final_regno
- = regno + subreg_regno_offset (regno, innermode, byte, outermode);
-
- /* ??? We do allow it if the current REG is not valid for
- its mode. This is a kludge to work around how float/complex
- arguments are passed on 32-bit SPARC and should be fixed. */
- if (HARD_REGNO_MODE_OK (final_regno, outermode)
- || ! HARD_REGNO_MODE_OK (regno, innermode))
+ unsigned int regno, final_regno;
+
+ regno = REGNO (op);
+ final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
+ if (HARD_REGISTER_NUM_P (final_regno))
{
rtx x;
int final_offset = byte;
@@ -4878,7 +5619,8 @@ simplify_subreg (enum machine_mode outermode, rtx op,
/* Optimize SUBREG truncations of zero and sign extended values. */
if ((GET_CODE (op) == ZERO_EXTEND
|| GET_CODE (op) == SIGN_EXTEND)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
{
unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
@@ -4894,7 +5636,7 @@ simplify_subreg (enum machine_mode outermode, rtx op,
enum machine_mode origmode = GET_MODE (XEXP (op, 0));
if (outermode == origmode)
return XEXP (op, 0);
- if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
+ if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
subreg_lowpart_offset (outermode,
origmode));
@@ -4906,7 +5648,7 @@ simplify_subreg (enum machine_mode outermode, rtx op,
/* A SUBREG resulting from a zero extension may fold to zero if
it extracts higher bits that the ZERO_EXTEND's source bits. */
if (GET_CODE (op) == ZERO_EXTEND
- && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
+ && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
return CONST0_RTX (outermode);
}
@@ -4916,15 +5658,16 @@ simplify_subreg (enum machine_mode outermode, rtx op,
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
&& SCALAR_INT_MODE_P (outermode)
+ && SCALAR_INT_MODE_P (innermode)
/* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
to avoid the possibility that an outer LSHIFTRT shifts by more
than the sign extension's sign_bit_copies and introduces zeros
into the high bits of the result. */
- && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (ASHIFTRT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
@@ -4935,11 +5678,12 @@ simplify_subreg (enum machine_mode outermode, rtx op,
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (LSHIFTRT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
@@ -4949,16 +5693,62 @@ simplify_subreg (enum machine_mode outermode, rtx op,
the outer subreg is effectively a truncation to the original mode. */
if (GET_CODE (op) == ASHIFT
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
- && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
+ && CONST_INT_P (XEXP (op, 1))
&& (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (ASHIFT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
+ /* Recognize a word extraction from a multi-word subreg. */
+ if ((GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ASHIFTRT)
+ && SCALAR_INT_MODE_P (innermode)
+ && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
+ && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
+ && CONST_INT_P (XEXP (op, 1))
+ && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
+ && INTVAL (XEXP (op, 1)) >= 0
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
+ && byte == subreg_lowpart_offset (outermode, innermode))
+ {
+ int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
+ return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
+ (WORDS_BIG_ENDIAN
+ ? byte - shifted_bytes
+ : byte + shifted_bytes));
+ }
+
+ /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
+ and try replacing the SUBREG and shift with it. Don't do this if
+ the MEM has a mode-dependent address or if we would be widening it. */
+
+ if ((GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ASHIFTRT)
+ && SCALAR_INT_MODE_P (innermode)
+ && MEM_P (XEXP (op, 0))
+ && CONST_INT_P (XEXP (op, 1))
+ && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
+ && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
+ && INTVAL (XEXP (op, 1)) > 0
+ && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
+ && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
+ && ! MEM_VOLATILE_P (XEXP (op, 0))
+ && byte == subreg_lowpart_offset (outermode, innermode)
+ && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
+ || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
+ {
+ int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
+ return adjust_address_nv (XEXP (op, 0), outermode,
+ (WORDS_BIG_ENDIAN
+ ? byte - shifted_bytes
+ : byte + shifted_bytes));
+ }
+
return NULL_RTX;
}
@@ -5026,10 +5816,10 @@ simplify_gen_subreg (enum machine_mode outermode, rtx op,
simplification and 1 for tree simplification. */
rtx
-simplify_rtx (rtx x)
+simplify_rtx (const_rtx x)
{
- enum rtx_code code = GET_CODE (x);
- enum machine_mode mode = GET_MODE (x);
+ const enum rtx_code code = GET_CODE (x);
+ const enum machine_mode mode = GET_MODE (x);
switch (GET_RTX_CLASS (code))
{