\f
#ifndef LOGICAL_OP_NON_SHORT_CIRCUIT
-#define LOGICAL_OP_NON_SHORT_CIRCUIT (BRANCH_COST >= 2)
+#define LOGICAL_OP_NON_SHORT_CIRCUIT \
+ (BRANCH_COST (!cfun || optimize_function_for_speed_p (cfun), \
+ false) >= 2)
#endif
/* EXP is some logical combination of boolean tests. See if we can
that can be merged. Avoid doing this if the RHS is a floating-point
comparison since those can trap. */
- if (BRANCH_COST >= 2
+ if (BRANCH_COST (!cfun || optimize_function_for_speed_p (cfun),
+ false) >= 2
&& ! FLOAT_TYPE_P (TREE_TYPE (rl_arg))
&& simple_operand_p (rl_arg)
&& simple_operand_p (rr_arg))
(C * 8) % 4 since we know that's zero. */
if ((code == TRUNC_MOD_EXPR || code == CEIL_MOD_EXPR
|| code == FLOOR_MOD_EXPR || code == ROUND_MOD_EXPR)
+ /* If the multiplication can overflow we cannot optimize this.
+ ??? Until we can properly mark individual operations as
+ not overflowing we need to treat sizetype special here as
+ stor-layout relies on this opimization to make
+ DECL_FIELD_BIT_OFFSET always a constant. */
+ && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t))
+ || (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (t))))
&& TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
&& integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
- return omit_one_operand (type, integer_zero_node, op0);
+ {
+ *strict_overflow_p = true;
+ return omit_one_operand (type, integer_zero_node, op0);
+ }
/* ... fall through ... */
if (TREE_CONSTANT (arg0))
return 1;
- if (optimize_size)
+ if (cfun && optimize_function_for_size_p (cfun))
return 0;
if (reorder && flag_evaluation_order
arg0 = op0;
if (arg0)
{
- if (code == NOP_EXPR || code == CONVERT_EXPR
+ if (CONVERT_EXPR_CODE_P (code)
|| code == FLOAT_EXPR || code == ABS_EXPR)
{
/* Don't use STRIP_NOPS, because signedness of argument type
so we don't get into an infinite recursion loop taking the
conversion out and then back in. */
- if ((code == NOP_EXPR || code == CONVERT_EXPR
+ if ((CONVERT_EXPR_CODE_P (code)
|| code == NON_LVALUE_EXPR)
&& TREE_CODE (tem) == COND_EXPR
&& TREE_CODE (TREE_OPERAND (tem, 1)) == code
int sgn0;
bool swap = false;
- /* Match A +- CST code arg1 and CST code arg1. */
- if (!(((code0 == MINUS_EXPR
- || code0 == PLUS_EXPR)
+ /* Match A +- CST code arg1 and CST code arg1. We can change the
+ first form only if overflow is undefined. */
+ if (!((TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (arg0))
+ /* In principle pointers also have undefined overflow behavior,
+ but that causes problems elsewhere. */
+ && !POINTER_TYPE_P (TREE_TYPE (arg0))
+ && (code0 == MINUS_EXPR
+ || code0 == PLUS_EXPR)
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
|| code0 == INTEGER_CST))
return NULL_TREE;
*strict_overflow_p = true;
}
- /* Now build the constant reduced in magnitude. */
+ /* Now build the constant reduced in magnitude. But not if that
+ would produce one outside of its types range. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (cst0))
+ && ((sgn0 == 1
+ && TYPE_MIN_VALUE (TREE_TYPE (cst0))
+ && tree_int_cst_equal (cst0, TYPE_MIN_VALUE (TREE_TYPE (cst0))))
+ || (sgn0 == -1
+ && TYPE_MAX_VALUE (TREE_TYPE (cst0))
+ && tree_int_cst_equal (cst0, TYPE_MAX_VALUE (TREE_TYPE (cst0))))))
+ /* We cannot swap the comparison here as that would cause us to
+ endlessly recurse. */
+ return NULL_TREE;
+
t = int_const_binop (sgn0 == -1 ? PLUS_EXPR : MINUS_EXPR,
- cst0, build_int_cst (TREE_TYPE (cst0), 1), 0);
+ cst0, build_int_cst (TREE_TYPE (cst0), 1), 0);
if (code0 != INTEGER_CST)
t = fold_build2 (code0, TREE_TYPE (arg0), TREE_OPERAND (arg0, 0), t);
const char * const warnmsg = G_("assuming signed overflow does not occur "
"when reducing constant in comparison");
- /* In principle pointers also have undefined overflow behavior,
- but that causes problems elsewhere. */
- if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (arg0))
- || POINTER_TYPE_P (TREE_TYPE (arg0)))
- return NULL_TREE;
-
/* Try canonicalization by simplifying arg0. */
strict_overflow_p = false;
t = maybe_canonicalize_comparison_1 (code, type, arg0, arg1,
/* Convert ~A + 1 to -A. */
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& integer_onep (arg1))
- return fold_build1 (NEGATE_EXPR, type, TREE_OPERAND (arg0, 0));
+ return fold_build1 (NEGATE_EXPR, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)));
/* ~X + X is -1. */
if (TREE_CODE (arg0) == BIT_NOT_EXPR
}
/* Optimize x*x as pow(x,2.0), which is expanded as x*x. */
- if (! optimize_size
+ if (optimize_function_for_speed_p (cfun)
&& operand_equal_p (arg0, arg1, 0))
{
tree powfn = mathfn_built_in (type, BUILT_IN_POW);
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- tree tmp1 = fold_convert (TREE_TYPE (arg0), arg1);
- tree tmp2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- TREE_OPERAND (arg0, 0), tmp1);
- tree tmp3 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- TREE_OPERAND (arg0, 1), tmp1);
+ tree tmp1 = fold_convert (type, arg1);
+ tree tmp2 = fold_convert (type, TREE_OPERAND (arg0, 0));
+ tree tmp3 = fold_convert (type, TREE_OPERAND (arg0, 1));
+ tmp2 = fold_build2 (BIT_AND_EXPR, type, tmp2, tmp1);
+ tmp3 = fold_build2 (BIT_AND_EXPR, type, tmp3, tmp1);
return fold_convert (type,
- fold_build2 (BIT_IOR_EXPR, TREE_TYPE (arg0),
- tmp2, tmp3));
+ fold_build2 (BIT_IOR_EXPR, type, tmp2, tmp3));
}
/* (X | Y) & Y is (X, Y). */
}
}
- /* Change X >= C to X > (C - 1) and X < C to X <= (C - 1) if C > 0.
- This transformation affects the cases which are handled in later
- optimizations involving comparisons with non-negative constants. */
- if (TREE_CODE (arg1) == INTEGER_CST
- && TREE_CODE (arg0) != INTEGER_CST
- && tree_int_cst_sgn (arg1) > 0)
- {
- if (code == GE_EXPR)
- {
- arg1 = const_binop (MINUS_EXPR, arg1,
- build_int_cst (TREE_TYPE (arg1), 1), 0);
- return fold_build2 (GT_EXPR, type, arg0,
- fold_convert (TREE_TYPE (arg0), arg1));
- }
- if (code == LT_EXPR)
- {
- arg1 = const_binop (MINUS_EXPR, arg1,
- build_int_cst (TREE_TYPE (arg1), 1), 0);
- return fold_build2 (LE_EXPR, type, arg0,
- fold_convert (TREE_TYPE (arg0), arg1));
- }
- }
-
/* Comparisons with the highest or lowest possible integer of
the specified precision will have known values. */
{