static int simplify_plus_minus_op_data_cmp (const void *, const void *);
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
rtx, int);
+static bool associative_constant_p (rtx);
+static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
+ rtx, rtx);
\f
/* Negate a CONST_INT rtx, truncating (because a conversion from a
maximally negative number can overflow). */
addr = XEXP (x, 0);
- /* Call target hook to avoid the effects of -fpic etc... */
+ /* Call target hook to avoid the effects of -fpic etc.... */
addr = (*targetm.delegitimize_address) (addr);
if (GET_CODE (addr) == LO_SUM)
case CLZ:
hv = 0;
- if (h1 == 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
- else
+ if (h1 != 0)
lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
- HOST_BITS_PER_WIDE_INT;
+ else if (l1 != 0)
+ lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
+ else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
+ lv = GET_MODE_BITSIZE (mode);
break;
case CTZ:
hv = 0;
- if (l1 == 0)
- {
- if (h1 == 0)
- lv = GET_MODE_BITSIZE (mode);
- else
- lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
- }
- else
+ if (l1 != 0)
lv = exact_log2 (l1 & -l1);
+ else if (h1 != 0)
+ lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
+ else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
+ lv = GET_MODE_BITSIZE (mode);
break;
case POPCOUNT:
else
{
enum rtx_code reversed;
+ rtx temp;
+
/* There are some simplifications we can do even if the operands
aren't constant. */
switch (code)
if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
&& ((reversed = reversed_comparison_code (op, NULL_RTX))
!= UNKNOWN))
- return gen_rtx_fmt_ee (reversed,
- op_mode, XEXP (op, 0), XEXP (op, 1));
+ return simplify_gen_relational (reversed, op_mode, op_mode,
+ XEXP (op, 0), XEXP (op, 1));
+
+ /* (not (plus X -1)) can become (neg X). */
+ if (GET_CODE (op) == PLUS
+ && XEXP (op, 1) == constm1_rtx)
+ return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+
+ /* Similarly, (not (neg X)) is (plus X -1). */
+ if (GET_CODE (op) == NEG)
+ return plus_constant (XEXP (op, 0), -1);
+
+ /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
+ if (GET_CODE (op) == XOR
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (temp = simplify_unary_operation (NOT, mode,
+ XEXP (op, 1),
+ mode)) != 0)
+ return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+
+
+ /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
+ operands other than 1, but that is not valid. We could do a
+ similar simplification for (not (lshiftrt C X)) where C is
+ just the sign bit, but this doesn't seem common enough to
+ bother with. */
+ if (GET_CODE (op) == ASHIFT
+ && XEXP (op, 0) == const1_rtx)
+ {
+ temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
+ return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
+ }
+
+ /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
+ by reversing the comparison code if valid. */
+ if (STORE_FLAG_VALUE == -1
+ && GET_RTX_CLASS (GET_CODE (op)) == '<'
+ && (reversed = reversed_comparison_code (op, NULL_RTX))
+ != UNKNOWN)
+ return simplify_gen_relational (reversed, op_mode, op_mode,
+ XEXP (op, 0), XEXP (op, 1));
+
+ /* (not (ashiftrt foo C)) where C is the number of bits in FOO
+ minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
+ so we can perform the above simplification. */
+
+ if (STORE_FLAG_VALUE == -1
+ && GET_CODE (op) == ASHIFTRT
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return simplify_gen_relational (GE, mode, mode, XEXP (op, 0),
+ const0_rtx);
+
break;
case NEG:
/* (neg (neg X)) == X. */
if (GET_CODE (op) == NEG)
return XEXP (op, 0);
+
+ /* (neg (plus X 1)) can become (not X). */
+ if (GET_CODE (op) == PLUS
+ && XEXP (op, 1) == const1_rtx)
+ return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
+
+ /* Similarly, (neg (not X)) is (plus X 1). */
+ if (GET_CODE (op) == NOT)
+ return plus_constant (XEXP (op, 0), 1);
+
+ /* (neg (minus X Y)) can become (minus Y X). This transformation
+ isn't safe for modes with signed zeros, since if X and Y are
+ both +0, (minus Y X) is the same as (minus X Y). If the
+ rounding mode is towards +infinity (or -infinity) then the two
+ expressions will be rounded differently. */
+ if (GET_CODE (op) == MINUS
+ && !HONOR_SIGNED_ZEROS (mode)
+ && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+ return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
+ XEXP (op, 0));
+
+ /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
+ if (GET_CODE (op) == PLUS
+ && !HONOR_SIGNED_ZEROS (mode)
+ && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+ {
+ temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+ return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
+ }
+
+ /* (neg (mult A B)) becomes (mult (neg A) B).
+ This works even for floating-point values. */
+ if (GET_CODE (op) == MULT
+ && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+ {
+ temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+ return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
+ }
+
+ /* NEG commutes with ASHIFT since it is multiplication. Only do
+ this if we can then eliminate the NEG (e.g., if the operand
+ is a constant). */
+ if (GET_CODE (op) == ASHIFT)
+ {
+ temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
+ mode);
+ if (temp)
+ return simplify_gen_binary (ASHIFT, mode, temp,
+ XEXP (op, 1));
+ }
+
break;
case SIGN_EXTEND:
}
}
\f
+/* Subroutine of simplify_associative_operation. Return true if rtx OP
+ is a suitable integer or floating point immediate constant. */
+static bool
+associative_constant_p (rtx op)
+{
+ if (GET_CODE (op) == CONST_INT
+ || GET_CODE (op) == CONST_DOUBLE)
+ return true;
+ op = avoid_constant_pool_reference (op);
+ return GET_CODE (op) == CONST_INT
+ || GET_CODE (op) == CONST_DOUBLE;
+}
+
+/* Subroutine of simplify_binary_operation to simplify an associative
+ binary operation CODE with result mode MODE, operating on OP0 and OP1.
+ Return 0 if no simplification is possible. */
+static rtx
+simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
+ rtx op0, rtx op1)
+{
+ rtx tem;
+
+ /* Simplify (x op c1) op c2 as x op (c1 op c2). */
+ if (GET_CODE (op0) == code
+ && associative_constant_p (op1)
+ && associative_constant_p (XEXP (op0, 1)))
+ {
+ tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
+ if (! tem)
+ return tem;
+ return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
+ }
+
+ /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
+ if (GET_CODE (op0) == code
+ && GET_CODE (op1) == code
+ && associative_constant_p (XEXP (op0, 1))
+ && associative_constant_p (XEXP (op1, 1)))
+ {
+ rtx c = simplify_binary_operation (code, mode,
+ XEXP (op0, 1), XEXP (op1, 1));
+ if (! c)
+ return 0;
+ tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
+ return simplify_gen_binary (code, mode, tem, c);
+ }
+
+ /* Canonicalize (x op c) op y as (x op y) op c. */
+ if (GET_CODE (op0) == code
+ && associative_constant_p (XEXP (op0, 1)))
+ {
+ tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
+ return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
+ }
+
+ /* Canonicalize x op (y op c) as (x op y) op c. */
+ if (GET_CODE (op1) == code
+ && associative_constant_p (XEXP (op1, 1)))
+ {
+ tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
+ return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
+ }
+
+ return 0;
+}
+
/* Simplify a binary operation CODE with result mode MODE, operating on OP0
and OP1. Return 0 if no simplification is possible.
neg_double (l2, h2, &lv, &hv);
l2 = lv, h2 = hv;
- /* .. fall through ... */
+ /* Fall through.... */
case PLUS:
add_double (l1, h1, l2, h2, &lv, &hv);
&& GET_CODE (XEXP (op1, 0)) == PLUS))
&& (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
return tem;
+
+ /* Reassociate floating point addition only when the user
+ specifies unsafe math optimizations. */
+ if (FLOAT_MODE_P (mode)
+ && flag_unsafe_math_optimizations)
+ {
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ }
break;
case COMPARE:
if (REAL_VALUES_EQUAL (d, dconstm1))
return simplify_gen_unary (NEG, mode, op0, mode);
}
+
+ /* Reassociate multiplication, but for floating point MULTs
+ only when the user specifies unsafe math optimizations. */
+ if (! FLOAT_MODE_P (mode)
+ || flag_unsafe_math_optimizations)
+ {
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ }
break;
case IOR:
&& ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return constm1_rtx;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
break;
case XOR:
if (trueop0 == trueop1 && ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return const0_rtx;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
break;
case AND:
&& ! side_effects_p (op0)
&& GET_MODE_CLASS (mode) != MODE_CC)
return const0_rtx;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
break;
case UDIV:
&& (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
- /* ... fall through ... */
+ /* Fall through.... */
case DIV:
if (trueop1 == CONST1_RTX (mode))
return simplify_gen_binary (AND, mode, op0,
GEN_INT (INTVAL (op1) - 1));
- /* ... fall through ... */
+ /* Fall through.... */
case MOD:
if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
&& ! side_effects_p (op1))
return op0;
- /* ... fall through ... */
+ /* Fall through.... */
case ASHIFT:
case LSHIFTRT:
break;
case SMIN:
- if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
+ if (width <= HOST_BITS_PER_WIDE_INT
+ && GET_CODE (trueop1) == CONST_INT
&& INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
&& ! side_effects_p (op0))
return op1;
- else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
break;
case SMAX:
- if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
+ if (width <= HOST_BITS_PER_WIDE_INT
+ && GET_CODE (trueop1) == CONST_INT
&& ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
== (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
return op1;
- else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
break;
case UMIN:
if (trueop1 == const0_rtx && ! side_effects_p (op0))
return op1;
- else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
break;
case UMAX:
if (trueop1 == constm1_rtx && ! side_effects_p (op0))
return op1;
- else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
break;
case SS_PLUS:
#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
&& REGNO (op) != ARG_POINTER_REGNUM
#endif
- && REGNO (op) != STACK_POINTER_REGNUM)
+ && REGNO (op) != STACK_POINTER_REGNUM
+ && subreg_offset_representable_p (REGNO (op), innermode,
+ byte, outermode))
{
- int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
- 0);
+ rtx tem = gen_rtx_SUBREG (outermode, op, byte);
+ int final_regno = subreg_hard_regno (tem, 0);
/* ??? We do allow it if the current REG is not valid for
its mode. This is a kludge to work around how float/complex
if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
- /* ... fall through ... */
+ /* Fall through.... */
case '2':
return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));