#include "regs.h"
#include "hard-reg-set.h"
#include "flags.h"
-#include "real.h"
#include "insn-config.h"
#include "recog.h"
#include "function.h"
#include "expr.h"
#include "toplev.h"
+#include "diagnostic-core.h"
#include "output.h"
#include "ggc.h"
#include "target.h"
rtx
delegitimize_mem_from_attrs (rtx x)
{
+ /* MEMs without MEM_OFFSETs may have been offset, so we can't just
+ use their base addresses as equivalent. */
if (MEM_P (x)
&& MEM_EXPR (x)
- && (!MEM_OFFSET (x)
- || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
+ && MEM_OFFSET (x))
{
tree decl = MEM_EXPR (x);
enum machine_mode mode = GET_MODE (x);
{
rtx newx;
- if (MEM_OFFSET (x))
- offset += INTVAL (MEM_OFFSET (x));
+ offset += INTVAL (MEM_OFFSET (x));
newx = DECL_RTL (decl);
than HOST_BITS_PER_WIDE_INT. */
if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& COMPARISON_P (op)
- && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
+ && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
return rtl_hooks.gen_lowpart_no_emit (mode, op);
break;
|| ((GET_MODE_BITSIZE (GET_MODE (op))
<= HOST_BITS_PER_WIDE_INT)
&& ((nonzero_bits (op, GET_MODE (op))
- & ((HOST_WIDE_INT) 1
+ & ((unsigned HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
== 0)))
return op;
&& GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
return rtl_hooks.gen_lowpart_no_emit (mode, op);
+ /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
+ (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
+ if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
+ {
+ gcc_assert (GET_MODE_BITSIZE (mode)
+ > GET_MODE_BITSIZE (GET_MODE (op)));
+ return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+ }
+
+ /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is (sign_extend:M (subreg:O <X>)) if there is mode with
+ GET_MODE_BITSIZE (N) - I bits.
+ (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is similarly (zero_extend:M (subreg:O <X>)). */
+ if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
+ && GET_CODE (XEXP (op, 0)) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
+ && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ {
+ enum machine_mode tmode
+ = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ gcc_assert (GET_MODE_BITSIZE (mode)
+ > GET_MODE_BITSIZE (GET_MODE (op)));
+ if (tmode != BLKmode)
+ {
+ rtx inner =
+ rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
+ return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
+ ? SIGN_EXTEND : ZERO_EXTEND,
+ mode, inner, tmode);
+ }
+ }
+
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
/* As we do not know which address space the pointer is refering to,
we can do this only if the target does not support different pointer
&& GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
return rtl_hooks.gen_lowpart_no_emit (mode, op);
+ /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
+ if (GET_CODE (op) == ZERO_EXTEND)
+ return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)));
+
+ /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
+ is (zero_extend:M (subreg:O <X>)) if there is mode with
+ GET_MODE_BITSIZE (N) - I bits. */
+ if (GET_CODE (op) == LSHIFTRT
+ && GET_CODE (XEXP (op, 0)) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
+ && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ {
+ enum machine_mode tmode
+ = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ if (tmode != BLKmode)
+ {
+ rtx inner =
+ rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
+ return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
+ }
+ }
+
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
/* As we do not know which address space the pointer is refering to,
we can do this only if the target does not support different pointer
break;
case FFS:
- /* Don't use ffs here. Instead, get low order bit and then its
- number. If arg0 is zero, this will return 0, as desired. */
arg0 &= GET_MODE_MASK (mode);
- val = exact_log2 (arg0 & (- arg0)) + 1;
+ val = ffs_hwi (arg0);
break;
case CLZ:
val = GET_MODE_BITSIZE (mode);
}
else
- val = exact_log2 (arg0 & -arg0);
+ val = ctz_hwi (arg0);
break;
case POPCOUNT:
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
- val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
+ val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
+ << GET_MODE_BITSIZE (op_mode));
else
return 0;
break;
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
{
val
- = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
- if (val
- & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
- val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
+ << GET_MODE_BITSIZE (op_mode));
+ if (val & ((unsigned HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (op_mode) - 1)))
+ val
+ -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
}
else
return 0;
case FFS:
hv = 0;
- if (l1 == 0)
- {
- if (h1 == 0)
- lv = 0;
- else
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
- }
+ if (l1 != 0)
+ lv = ffs_hwi (l1);
+ else if (h1 != 0)
+ lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
else
- lv = exact_log2 (l1 & -l1) + 1;
+ lv = 0;
break;
case CLZ:
case CTZ:
hv = 0;
if (l1 != 0)
- lv = exact_log2 (l1 & -l1);
+ lv = ctz_hwi (l1);
else if (h1 != 0)
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
+ lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
lv = GET_MODE_BITSIZE (mode);
break;
{
lv = l1 & GET_MODE_MASK (op_mode);
if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
- && (lv & ((HOST_WIDE_INT) 1
+ && (lv & ((unsigned HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
- lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
hv = HWI_SIGN_EXTEND (lv);
}
d = t;
break;
case ABS:
- d = REAL_VALUE_ABS (d);
+ d = real_value_abs (&d);
break;
case NEG:
- d = REAL_VALUE_NEGATE (d);
+ d = real_value_negate (&d);
break;
case FLOAT_TRUNCATE:
d = real_value_truncate (mode, d);
/* Test against the signed lower bound. */
if (width > HOST_BITS_PER_WIDE_INT)
{
- th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
+ th = (unsigned HOST_WIDE_INT) (-1)
+ << (width - HOST_BITS_PER_WIDE_INT - 1);
tl = 0;
}
else
{
th = -1;
- tl = (HOST_WIDE_INT) -1 << (width - 1);
+ tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
}
real_from_integer (&t, VOIDmode, tl, th, 0);
if (REAL_VALUES_LESS (x, t))
if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
- unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
+ double_int coeff0, coeff1;
rtx lhs = op0, rhs = op1;
+ coeff0 = double_int_one;
+ coeff1 = double_int_one;
+
if (GET_CODE (lhs) == NEG)
{
- coeff0l = -1;
- coeff0h = -1;
+ coeff0 = double_int_minus_one;
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
&& CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0l = INTVAL (XEXP (lhs, 1));
- coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
&& CONST_INT_P (XEXP (lhs, 1))
- && INTVAL (XEXP (lhs, 1)) >= 0
+ && INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- coeff0h = 0;
+ coeff0 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- coeff1l = -1;
- coeff1h = -1;
+ coeff1 = double_int_minus_one;
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
&& CONST_INT_P (XEXP (rhs, 1)))
{
- coeff1l = INTVAL (XEXP (rhs, 1));
- coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
+ coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
- coeff1h = 0;
+ coeff1 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
{
rtx orig = gen_rtx_PLUS (mode, op0, op1);
rtx coeff;
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
+ double_int val;
bool speed = optimize_function_for_speed_p (cfun);
- add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
- coeff = immed_double_const (l, h, mode);
+ val = double_int_add (coeff0, coeff1);
+ coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
if (SCALAR_INT_MODE_P (mode))
{
- HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
- unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
+ double_int coeff0, negcoeff1;
rtx lhs = op0, rhs = op1;
+ coeff0 = double_int_one;
+ negcoeff1 = double_int_minus_one;
+
if (GET_CODE (lhs) == NEG)
{
- coeff0l = -1;
- coeff0h = -1;
+ coeff0 = double_int_minus_one;
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
&& CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0l = INTVAL (XEXP (lhs, 1));
- coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
+ coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- coeff0h = 0;
+ coeff0 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- negcoeff1l = 1;
- negcoeff1h = 0;
+ negcoeff1 = double_int_one;
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
&& CONST_INT_P (XEXP (rhs, 1)))
{
- negcoeff1l = -INTVAL (XEXP (rhs, 1));
- negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
+ negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
- negcoeff1h = -1;
+ negcoeff1 = double_int_setbit (double_int_zero,
+ INTVAL (XEXP (rhs, 1)));
+ negcoeff1 = double_int_neg (negcoeff1);
rhs = XEXP (rhs, 0);
}
{
rtx orig = gen_rtx_MINUS (mode, op0, op1);
rtx coeff;
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
+ double_int val;
bool speed = optimize_function_for_speed_p (cfun);
- add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
- coeff = immed_double_const (l, h, mode);
+ val = double_int_add (coeff0, negcoeff1);
+ coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
if (trueop1 == constm1_rtx)
return simplify_gen_unary (NEG, mode, op0, mode);
+ if (GET_CODE (op0) == NEG)
+ {
+ rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
+ if (temp)
+ return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
+ }
+ if (GET_CODE (op1) == NEG)
+ {
+ rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
+ if (temp)
+ return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
+ }
+
/* Maybe simplify x * 0 to 0. The reduction is not valid if
x is NaN, since x * 0 is then also NaN. Nor is it valid
when the mode has signed zeros, since multiplying a negative
/* Convert multiply by constant power of two into shift unless
we are still generating RTL. This test is a kludge. */
if (CONST_INT_P (trueop1)
- && (val = exact_log2 (INTVAL (trueop1))) >= 0
+ && (val = exact_log2 (UINTVAL (trueop1))) >= 0
/* If the mode is larger than the host word size, and the
uppermost bit is set, then this isn't a power of two due
to implicit sign extension. */
break;
case IOR:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
if (CONST_INT_P (trueop1)
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
+ && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
== GET_MODE_MASK (mode)))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
/* (ior A C) is C if all bits of A that might be nonzero are on in C. */
if (CONST_INT_P (op1)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
+ && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
return op1;
/* Canonicalize (X & C1) | C2. */
&& GET_CODE (op0) == AND
&& CONST_INT_P (XEXP (op0, 1))
&& CONST_INT_P (op1)
- && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
+ && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
return simplify_gen_binary (IOR, mode,
simplify_gen_binary
(AND, mode, XEXP (op0, 0),
- GEN_INT (INTVAL (XEXP (op0, 1))
- & ~INTVAL (op1))),
+ GEN_INT (UINTVAL (XEXP (op0, 1))
+ & ~UINTVAL (op1))),
op1);
/* If OP0 is (ashiftrt (plus ...) C), it might actually be
break;
case XOR:
- if (trueop1 == const0_rtx)
+ if (trueop1 == CONST0_RTX (mode))
return op0;
if (CONST_INT_P (trueop1)
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
+ && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
== GET_MODE_MASK (mode)))
return simplify_gen_unary (NOT, mode, op0, mode);
if (rtx_equal_p (trueop0, trueop1)
&& CONST_INT_P (trueop1)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
- & INTVAL (trueop1)) == 0)
+ & UINTVAL (trueop1)) == 0)
{
enum machine_mode imode = GET_MODE (XEXP (op0, 0));
tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
(A +- N) & M -> A & M. */
if (CONST_INT_P (trueop1)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ~INTVAL (trueop1)
- && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
+ && ~UINTVAL (trueop1)
+ && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
&& (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
{
rtx pmop[2];
pmop[1] = XEXP (op0, 1);
if (CONST_INT_P (pmop[1])
- && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
+ && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
return simplify_gen_binary (AND, mode, pmop[0], op1);
for (which = 0; which < 2; which++)
{
case AND:
if (CONST_INT_P (XEXP (tem, 1))
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
- == INTVAL (trueop1))
+ && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
+ == UINTVAL (trueop1))
pmop[which] = XEXP (tem, 0);
break;
case IOR:
case XOR:
if (CONST_INT_P (XEXP (tem, 1))
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
+ && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
pmop[which] = XEXP (tem, 0);
break;
default:
return rtl_hooks.gen_lowpart_no_emit (mode, op0);
/* Convert divide by power of two into shift. */
if (CONST_INT_P (trueop1)
- && (val = exact_log2 (INTVAL (trueop1))) > 0)
+ && (val = exact_log2 (UINTVAL (trueop1))) > 0)
return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
break;
else
{
/* 0/x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == CONST0_RTX (mode))
+ if (trueop0 == CONST0_RTX (mode)
+ && !cfun->can_throw_non_call_exceptions)
{
if (side_effects_p (op1))
return simplify_gen_binary (AND, mode, op1, trueop0);
}
/* Implement modulus by power of two as AND. */
if (CONST_INT_P (trueop1)
- && exact_log2 (INTVAL (trueop1)) > 0)
+ && exact_log2 (UINTVAL (trueop1)) > 0)
return simplify_gen_binary (AND, mode, op0,
GEN_INT (INTVAL (op1) - 1));
break;
return op0;
/* Rotating ~0 always results in ~0. */
if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
+ && UINTVAL (trueop0) == GET_MODE_MASK (mode)
&& ! side_effects_p (op1))
return op0;
canonicalize_shift:
case SMIN:
if (width <= HOST_BITS_PER_WIDE_INT
&& CONST_INT_P (trueop1)
- && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
+ && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
case SMAX:
if (width <= HOST_BITS_PER_WIDE_INT
&& CONST_INT_P (trueop1)
- && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
- == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
+ && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
/* We can fold some multi-word operations. */
if (GET_MODE_CLASS (mode) == MODE_INT
- && width == HOST_BITS_PER_WIDE_INT * 2
- && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
- && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
+ && width == HOST_BITS_PER_DOUBLE_INT
+ && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
+ && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
{
- unsigned HOST_WIDE_INT l1, l2, lv, lt;
- HOST_WIDE_INT h1, h2, hv, ht;
+ double_int o0, o1, res, tmp;
- if (GET_CODE (op0) == CONST_DOUBLE)
- l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
- else
- l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
-
- if (GET_CODE (op1) == CONST_DOUBLE)
- l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
- else
- l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
+ o0 = rtx_to_double_int (op0);
+ o1 = rtx_to_double_int (op1);
switch (code)
{
case MINUS:
/* A - B == A + (-B). */
- neg_double (l2, h2, &lv, &hv);
- l2 = lv, h2 = hv;
+ o1 = double_int_neg (o1);
/* Fall through.... */
case PLUS:
- add_double (l1, h1, l2, h2, &lv, &hv);
+ res = double_int_add (o0, o1);
break;
case MULT:
- mul_double (l1, h1, l2, h2, &lv, &hv);
+ res = double_int_mul (o0, o1);
break;
case DIV:
- if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
- &lv, &hv, <, &ht))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+ o0.low, o0.high, o1.low, o1.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high))
return 0;
break;
case MOD:
- if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
- <, &ht, &lv, &hv))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+ o0.low, o0.high, o1.low, o1.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high))
return 0;
break;
case UDIV:
- if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
- &lv, &hv, <, &ht))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+ o0.low, o0.high, o1.low, o1.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high))
return 0;
break;
case UMOD:
- if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
- <, &ht, &lv, &hv))
+ if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+ o0.low, o0.high, o1.low, o1.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high))
return 0;
break;
case AND:
- lv = l1 & l2, hv = h1 & h2;
+ res = double_int_and (o0, o1);
break;
case IOR:
- lv = l1 | l2, hv = h1 | h2;
+ res = double_int_ior (o0, o1);
break;
case XOR:
- lv = l1 ^ l2, hv = h1 ^ h2;
+ res = double_int_xor (o0, o1);
break;
case SMIN:
- if (h1 < h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- < (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_smin (o0, o1);
break;
case SMAX:
- if (h1 > h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- > (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_smax (o0, o1);
break;
case UMIN:
- if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- < (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_umin (o0, o1);
break;
case UMAX:
- if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
- || (h1 == h2
- && ((unsigned HOST_WIDE_INT) l1
- > (unsigned HOST_WIDE_INT) l2)))
- lv = l1, hv = h1;
- else
- lv = l2, hv = h2;
+ res = double_int_umax (o0, o1);
break;
case LSHIFTRT: case ASHIFTRT:
case ASHIFT:
case ROTATE: case ROTATERT:
- if (SHIFT_COUNT_TRUNCATED)
- l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
-
- if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
- return 0;
-
- if (code == LSHIFTRT || code == ASHIFTRT)
- rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
- code == ASHIFTRT);
- else if (code == ASHIFT)
- lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
- else if (code == ROTATE)
- lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
- else /* code == ROTATERT */
- rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+ {
+ unsigned HOST_WIDE_INT cnt;
+
+ if (SHIFT_COUNT_TRUNCATED)
+ o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
+
+ if (!double_int_fits_in_uhwi_p (o1)
+ || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
+ return 0;
+
+ cnt = double_int_to_uhwi (o1);
+
+ if (code == LSHIFTRT || code == ASHIFTRT)
+ res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
+ code == ASHIFTRT);
+ else if (code == ASHIFT)
+ res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
+ true);
+ else if (code == ROTATE)
+ res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
+ else /* code == ROTATERT */
+ res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
+ }
break;
default:
return 0;
}
- return immed_double_const (lv, hv, mode);
+ return immed_double_int_const (res, mode);
}
if (CONST_INT_P (op0) && CONST_INT_P (op1)
if (width < HOST_BITS_PER_WIDE_INT)
{
- arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
- arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
+ arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
+ arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
arg0s = arg0;
- if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg0s |= ((HOST_WIDE_INT) (-1) << width);
+ if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
+ arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
arg1s = arg1;
- if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg1s |= ((HOST_WIDE_INT) (-1) << width);
+ if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
+ arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
}
else
{
case DIV:
if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = arg0s / arg1s;
case MOD:
if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = arg0s % arg1s;
case UDIV:
if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = (unsigned HOST_WIDE_INT) arg0 / arg1;
case UMOD:
if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ || ((unsigned HOST_WIDE_INT) arg0s
+ == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
&& arg1s == -1))
return 0;
val = (unsigned HOST_WIDE_INT) arg0 % arg1;
/* Sign-extend the result for arithmetic right shifts. */
if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
- val |= ((HOST_WIDE_INT) -1) << (width - arg1);
+ val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
break;
case ROTATERT:
we have to sign or zero-extend the values. */
if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
{
- l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
- l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
+ l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
+ l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
- if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- l0s |= ((HOST_WIDE_INT) (-1) << width);
+ if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
+ l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
- if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- l1s |= ((HOST_WIDE_INT) (-1) << width);
+ if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
+ l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
}
if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
{
int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
- && (INTVAL (inner_const)
- & ((HOST_WIDE_INT) 1 << sign_bitnum)));
+ && (UINTVAL (inner_const)
+ & ((unsigned HOST_WIDE_INT) 1
+ << sign_bitnum)));
switch (code)
{
rtx op2)
{
unsigned int width = GET_MODE_BITSIZE (mode);
+ bool any_change = false;
+ rtx tem;
/* VOIDmode means "infinite" precision. */
if (width == 0)
switch (code)
{
+ case FMA:
+ /* Simplify negations around the multiplication. */
+ /* -a * -b + c => a * b + c. */
+ if (GET_CODE (op0) == NEG)
+ {
+ tem = simplify_unary_operation (NEG, mode, op1, mode);
+ if (tem)
+ op1 = tem, op0 = XEXP (op0, 0), any_change = true;
+ }
+ else if (GET_CODE (op1) == NEG)
+ {
+ tem = simplify_unary_operation (NEG, mode, op0, mode);
+ if (tem)
+ op0 = tem, op1 = XEXP (op1, 0), any_change = true;
+ }
+
+ /* Canonicalize the two multiplication operands. */
+ /* a * -b + c => -b * a + c. */
+ if (swap_commutative_operands_p (op0, op1))
+ tem = op0, op0 = op1, op1 = tem, any_change = true;
+
+ if (any_change)
+ return gen_rtx_FMA (mode, op0, op1, op2);
+ return NULL_RTX;
+
case SIGN_EXTRACT:
case ZERO_EXTRACT:
if (CONST_INT_P (op0)
&& width <= (unsigned) HOST_BITS_PER_WIDE_INT)
{
/* Extracting a bit-field from a constant */
- HOST_WIDE_INT val = INTVAL (op0);
+ unsigned HOST_WIDE_INT val = UINTVAL (op0);
if (BITS_BIG_ENDIAN)
- val >>= (GET_MODE_BITSIZE (op0_mode)
- - INTVAL (op2) - INTVAL (op1));
+ val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
else
val >>= INTVAL (op2);
if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
{
/* First zero-extend. */
- val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
+ val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
/* If desired, propagate sign bit. */
if (code == SIGN_EXTRACT
- && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
- val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
+ && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
+ != 0)
+ val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
}
/* Clear the bits that don't belong in our mode,
So we get either a reasonable negative value or a reasonable
unsigned value for this mode. */
if (width < HOST_BITS_PER_WIDE_INT
- && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
- != ((HOST_WIDE_INT) (-1) << (width - 1))))
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
+ && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
return gen_int_mode (val, mode);
}
for (i = 0;
i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
- lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
+ lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
for (; i < elem_bitsize; i += value_bit)
- hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
- << (i - HOST_BITS_PER_WIDE_INT));
+ hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
+ << (i - HOST_BITS_PER_WIDE_INT);
/* immed_double_const doesn't call trunc_int_for_mode. I don't
know why. */
for (i = 0;
i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
- f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
+ f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
for (; i < elem_bitsize; i += value_bit)
- f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
+ f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
<< (i - HOST_BITS_PER_WIDE_INT));
elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
: byte + shifted_bytes));
}
+ /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
+ and try replacing the SUBREG and shift with it. Don't do this if
+ the MEM has a mode-dependent address or if we would be widening it. */
+
+ if ((GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ASHIFTRT)
+ && MEM_P (XEXP (op, 0))
+ && CONST_INT_P (XEXP (op, 1))
+ && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
+ && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
+ && INTVAL (XEXP (op, 1)) > 0
+ && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
+ && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
+ && ! MEM_VOLATILE_P (XEXP (op, 0))
+ && byte == subreg_lowpart_offset (outermode, innermode)
+ && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
+ || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
+ {
+ int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
+ return adjust_address_nv (XEXP (op, 0), outermode,
+ (WORDS_BIG_ENDIAN
+ ? byte - shifted_bytes
+ : byte + shifted_bytes));
+ }
+
return NULL_RTX;
}