return x;
}
-
-/* Return true if X is a MEM referencing the constant pool. */
-
-bool
-constant_pool_reference_p (rtx x)
-{
- return avoid_constant_pool_reference (x) != x;
-}
\f
/* Make a unary operation by first seeing if it folds and otherwise making
the specified operation. */
/* (float_truncate (float x)) is (float x) */
if (GET_CODE (op) == FLOAT
&& (flag_unsafe_math_optimizations
- || ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
- - num_sign_bit_copies (XEXP (op, 0),
- GET_MODE (XEXP (op, 0)))))))
+ || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
+ && ((unsigned)significand_size (GET_MODE (op))
+ >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ - num_sign_bit_copies (XEXP (op, 0),
+ GET_MODE (XEXP (op, 0))))))))
return simplify_gen_unary (FLOAT, mode,
XEXP (op, 0),
GET_MODE (XEXP (op, 0)));
*/
if (GET_CODE (op) == FLOAT_EXTEND
|| (GET_CODE (op) == FLOAT
+ && SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& ((unsigned)significand_size (GET_MODE (op))
>= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
- num_sign_bit_copies (XEXP (op, 0),
XEXP (op0, 1)));
/* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
- if (GET_CODE (op0) == MULT
+ if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
+ && GET_CODE (op0) == MULT
&& GET_CODE (XEXP (op0, 0)) == NEG)
{
rtx in1, in2;
case MINUS:
/* We can't assume x-x is 0 even with non-IEEE floating point,
but since it is zero except in very strange circumstances, we
- will treat it as zero with -funsafe-math-optimizations. */
+ will treat it as zero with -funsafe-math-optimizations and
+ -ffinite-math-only. */
if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
- && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
+ && (! FLOAT_MODE_P (mode)
+ || (flag_unsafe_math_optimizations
+ && !HONOR_NANS (mode)
+ && !HONOR_INFINITIES (mode))))
return CONST0_RTX (mode);
/* Change subtraction from zero into negation. (0 - x) is the
return reversed;
/* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
- if (GET_CODE (op1) == MULT
+ if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
+ && GET_CODE (op1) == MULT
&& GET_CODE (XEXP (op1, 0)) == NEG)
{
rtx in1, in2;
/* Canonicalize (minus (neg A) (mult B C)) to
(minus (mult (neg B) C) A). */
- if (GET_CODE (op1) == MULT
+ if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
+ && GET_CODE (op1) == MULT
&& GET_CODE (op0) == NEG)
{
rtx in1, in2;
&& (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
return op1;
+ /* Canonicalize (X & C1) | C2. */
+ if (GET_CODE (op0) == AND
+ && GET_CODE (trueop1) == CONST_INT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT mask = GET_MODE_MASK (mode);
+ HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
+ HOST_WIDE_INT c2 = INTVAL (trueop1);
+
+ /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
+ if ((c1 & c2) == c1
+ && !side_effects_p (XEXP (op0, 0)))
+ return trueop1;
+
+ /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
+ if (((c1|c2) & mask) == mask)
+ return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
+
+ /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
+ if (((c1 & ~c2) & mask) != (c1 & mask))
+ {
+ tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
+ gen_int_mode (c1 & ~c2, mode));
+ return simplify_gen_binary (IOR, mode, tem, op1);
+ }
+ }
+
/* Convert (A & B) | A to A. */
if (GET_CODE (op0) == AND
&& (rtx_equal_p (XEXP (op0, 0), op1)
return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
}
+ /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
+ if (GET_CODE (op0) == IOR
+ && GET_CODE (trueop1) == CONST_INT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
+ return simplify_gen_binary (IOR, mode,
+ simplify_gen_binary (AND, mode,
+ XEXP (op0, 0), op1),
+ gen_int_mode (tmp, mode));
+ }
+
/* Convert (A ^ B) & A to A & (~B) since the latter is often a single
insn (and may simplify more). */
if (GET_CODE (op0) == XOR
simplify_gen_binary (XOR, cmp_mode,
XEXP (op0, 1), op1));
+ if (op0code == POPCOUNT && op1 == const0_rtx)
+ switch (code)
+ {
+ case EQ:
+ case LE:
+ case LEU:
+ /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
+ return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
+ XEXP (op0, 0), const0_rtx);
+
+ case NE:
+ case GT:
+ case GTU:
+ /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
+ return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
+ XEXP (op0, 0), const0_rtx);
+
+ default:
+ break;
+ }
+
return NULL_RTX;
}
tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
: trueop0;
if (GET_CODE (tem) == ABS)
- return const0_rtx;
+ {
+ if (INTEGRAL_MODE_P (mode)
+ && (issue_strict_overflow_warning
+ (WARN_STRICT_OVERFLOW_CONDITIONAL)))
+ warning (OPT_Wstrict_overflow,
+ ("assuming signed overflow does not occur when "
+ "assuming abs (x) < 0 is false"));
+ return const0_rtx;
+ }
}
+
+ /* Optimize popcount (x) < 0. */
+ if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
+ return const_true_rtx;
break;
case GE:
tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
: trueop0;
if (GET_CODE (tem) == ABS)
- return const_true_rtx;
+ {
+ if (INTEGRAL_MODE_P (mode)
+ && (issue_strict_overflow_warning
+ (WARN_STRICT_OVERFLOW_CONDITIONAL)))
+ warning (OPT_Wstrict_overflow,
+ ("assuming signed overflow does not occur when "
+ "assuming abs (x) >= 0 is true"));
+ return const_true_rtx;
+ }
}
+
+ /* Optimize popcount (x) >= 0. */
+ if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
+ return const_true_rtx;
break;
case UNGE: