#include "hashtab.h"
#include "langhooks.h"
#include "md5.h"
+#include "gimple.h"
/* Nonzero if we are folding constants inside an initializer; zero
otherwise. */
static enum tree_code compcode_to_comparison (enum comparison_code);
static tree combine_comparisons (enum tree_code, enum tree_code,
enum tree_code, tree, tree, tree);
-static int truth_value_p (enum tree_code);
static int operand_equal_for_comparison_p (tree, tree, tree);
static int twoval_comparison_p (tree, tree *, tree *, int *);
static tree eval_subst (tree, tree, tree, tree, tree);
static tree pedantic_omit_one_operand (tree, tree, tree);
static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
+static tree make_bit_field_ref (tree, tree, HOST_WIDE_INT, HOST_WIDE_INT, int);
+static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
enum machine_mode *, int *, int *,
tree *, tree *);
+static int all_ones_mask_p (const_tree, int);
static tree sign_bit_p (tree, const_tree);
static int simple_operand_p (const_tree);
static tree range_binop (enum tree_code, tree, tree, int, tree, int);
if (hden < 0)
neg_double (lden, hden, &labs_den, &habs_den);
- /* If (2 * abs (lrem) >= abs (lden)) */
+ /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
labs_rem, habs_rem, <wice, &htwice);
< (unsigned HOST_WIDE_INT) htwice)
|| (((unsigned HOST_WIDE_INT) habs_den
== (unsigned HOST_WIDE_INT) htwice)
- && (labs_den < ltwice)))
+ && (labs_den <= ltwice)))
{
if (*hquo < 0)
/* quo = quo - 1; */
deferred code. */
void
-fold_undefer_overflow_warnings (bool issue, const_tree stmt, int code)
+fold_undefer_overflow_warnings (bool issue, const_gimple stmt, int code)
{
const char *warnmsg;
location_t locus;
if (!issue || warnmsg == NULL)
return;
- if (stmt != NULL_TREE && TREE_NO_WARNING (stmt))
+ if (gimple_no_warning_p (stmt))
return;
/* Use the smallest code level when deciding to issue the
if (!issue_strict_overflow_warning (code))
return;
- if (stmt == NULL_TREE || !expr_has_location (stmt))
+ if (stmt == NULL)
locus = input_location;
else
- locus = expr_location (stmt);
+ locus = gimple_location (stmt);
warning (OPT_Wstrict_overflow, "%H%s", &locus, warnmsg);
}
void
fold_undefer_and_ignore_overflow_warnings (void)
{
- fold_undefer_overflow_warnings (false, NULL_TREE, 0);
+ fold_undefer_overflow_warnings (false, NULL, 0);
}
/* Whether we are deferring overflow warnings. */
flag_rounding_math is set, or if GCC's software emulation
is unable to accurately represent the result. */
if ((flag_rounding_math
- || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
- && !flag_unsafe_math_optimizations))
+ || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
&& (inexact || !real_identical (&result, &value)))
return NULL_TREE;
if (TREE_TYPE (arg1) == type)
return arg1;
- if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
+ if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)
+ || TREE_CODE (type) == OFFSET_TYPE)
{
if (TREE_CODE (arg1) == INTEGER_CST)
return fold_convert_const_int_from_int (type, arg1);
case VOID_TYPE:
tem = fold_ignored_result (arg);
- if (TREE_CODE (tem) == GIMPLE_MODIFY_STMT)
+ if (TREE_CODE (tem) == MODIFY_EXPR)
return tem;
return fold_build1 (NOP_EXPR, type, tem);
case WITH_CLEANUP_EXPR:
case COMPOUND_EXPR:
case MODIFY_EXPR:
- case GIMPLE_MODIFY_STMT:
case TARGET_EXPR:
case COND_EXPR:
case BIND_EXPR:
return fold_build2 (compcode_to_comparison (compcode),
truth_type, ll_arg, lr_arg);
}
-
-/* Return nonzero if CODE is a tree code that represents a truth value. */
-
-static int
-truth_value_p (enum tree_code code)
-{
- return (TREE_CODE_CLASS (code) == tcc_comparison
- || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
- || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
- || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
-}
\f
/* Return nonzero if two operands (typically of the same tree node)
are necessarily equal. If either argument has side-effects this
twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
{
enum tree_code code = TREE_CODE (arg);
- enum tree_code_class class = TREE_CODE_CLASS (code);
+ enum tree_code_class tclass = TREE_CODE_CLASS (code);
/* We can handle some of the tcc_expression cases here. */
- if (class == tcc_expression && code == TRUTH_NOT_EXPR)
- class = tcc_unary;
- else if (class == tcc_expression
+ if (tclass == tcc_expression && code == TRUTH_NOT_EXPR)
+ tclass = tcc_unary;
+ else if (tclass == tcc_expression
&& (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
|| code == COMPOUND_EXPR))
- class = tcc_binary;
+ tclass = tcc_binary;
- else if (class == tcc_expression && code == SAVE_EXPR
+ else if (tclass == tcc_expression && code == SAVE_EXPR
&& ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
{
/* If we've already found a CVAL1 or CVAL2, this expression is
if (*cval1 || *cval2)
return 0;
- class = tcc_unary;
+ tclass = tcc_unary;
*save_p = 1;
}
- switch (class)
+ switch (tclass)
{
case tcc_unary:
return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
{
tree type = TREE_TYPE (arg);
enum tree_code code = TREE_CODE (arg);
- enum tree_code_class class = TREE_CODE_CLASS (code);
+ enum tree_code_class tclass = TREE_CODE_CLASS (code);
/* We can handle some of the tcc_expression cases here. */
- if (class == tcc_expression && code == TRUTH_NOT_EXPR)
- class = tcc_unary;
- else if (class == tcc_expression
+ if (tclass == tcc_expression && code == TRUTH_NOT_EXPR)
+ tclass = tcc_unary;
+ else if (tclass == tcc_expression
&& (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
- class = tcc_binary;
+ tclass = tcc_binary;
- switch (class)
+ switch (tclass)
{
case tcc_unary:
return fold_build1 (code, type,
else
return 0;
+ common = fold_convert (type, common);
+ left = fold_convert (type, left);
+ right = fold_convert (type, right);
return fold_build2 (TREE_CODE (arg0), type, common,
fold_build2 (code, type, left, right));
}
return NULL_TREE;
}
\f
+/* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
+ starting at BITPOS. The field is unsigned if UNSIGNEDP is nonzero. */
+
+static tree
+make_bit_field_ref (tree inner, tree type, HOST_WIDE_INT bitsize,
+ HOST_WIDE_INT bitpos, int unsignedp)
+{
+ tree result, bftype;
+
+ if (bitpos == 0)
+ {
+ tree size = TYPE_SIZE (TREE_TYPE (inner));
+ if ((INTEGRAL_TYPE_P (TREE_TYPE (inner))
+ || POINTER_TYPE_P (TREE_TYPE (inner)))
+ && host_integerp (size, 0)
+ && tree_low_cst (size, 0) == bitsize)
+ return fold_convert (type, inner);
+ }
+
+ bftype = type;
+ if (TYPE_PRECISION (bftype) != bitsize
+ || TYPE_UNSIGNED (bftype) == !unsignedp)
+ bftype = build_nonstandard_integer_type (bitsize, 0);
+
+ result = build3 (BIT_FIELD_REF, bftype, inner,
+ size_int (bitsize), bitsize_int (bitpos));
+
+ if (bftype != type)
+ result = fold_convert (type, result);
+
+ return result;
+}
+
+/* Optimize a bit-field compare.
+
+ There are two cases: First is a compare against a constant and the
+ second is a comparison of two items where the fields are at the same
+ bit position relative to the start of a chunk (byte, halfword, word)
+ large enough to contain it. In these cases we can avoid the shift
+ implicit in bitfield extractions.
+
+ For constants, we emit a compare of the shifted constant with the
+ BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
+ compared. For two fields at the same position, we do the ANDs with the
+ similar mask and compare the result of the ANDs.
+
+ CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
+ COMPARE_TYPE is the type of the comparison, and LHS and RHS
+ are the left and right operands of the comparison, respectively.
+
+ If the optimization described above can be done, we return the resulting
+ tree. Otherwise we return zero. */
+
+static tree
+optimize_bit_field_compare (enum tree_code code, tree compare_type,
+ tree lhs, tree rhs)
+{
+ HOST_WIDE_INT lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize;
+ tree type = TREE_TYPE (lhs);
+ tree signed_type, unsigned_type;
+ int const_p = TREE_CODE (rhs) == INTEGER_CST;
+ enum machine_mode lmode, rmode, nmode;
+ int lunsignedp, runsignedp;
+ int lvolatilep = 0, rvolatilep = 0;
+ tree linner, rinner = NULL_TREE;
+ tree mask;
+ tree offset;
+
+ /* Get all the information about the extractions being done. If the bit size
+ if the same as the size of the underlying object, we aren't doing an
+ extraction at all and so can do nothing. We also don't want to
+ do anything if the inner expression is a PLACEHOLDER_EXPR since we
+ then will no longer be able to replace it. */
+ linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
+ &lunsignedp, &lvolatilep, false);
+ if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
+ || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
+ return 0;
+
+ if (!const_p)
+ {
+ /* If this is not a constant, we can only do something if bit positions,
+ sizes, and signedness are the same. */
+ rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
+ &runsignedp, &rvolatilep, false);
+
+ if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
+ || lunsignedp != runsignedp || offset != 0
+ || TREE_CODE (rinner) == PLACEHOLDER_EXPR)
+ return 0;
+ }
+
+ /* See if we can find a mode to refer to this field. We should be able to,
+ but fail if we can't. */
+ nmode = get_best_mode (lbitsize, lbitpos,
+ const_p ? TYPE_ALIGN (TREE_TYPE (linner))
+ : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
+ TYPE_ALIGN (TREE_TYPE (rinner))),
+ word_mode, lvolatilep || rvolatilep);
+ if (nmode == VOIDmode)
+ return 0;
+
+ /* Set signed and unsigned types of the precision of this mode for the
+ shifts below. */
+ signed_type = lang_hooks.types.type_for_mode (nmode, 0);
+ unsigned_type = lang_hooks.types.type_for_mode (nmode, 1);
+
+ /* Compute the bit position and size for the new reference and our offset
+ within it. If the new reference is the same size as the original, we
+ won't optimize anything, so return zero. */
+ nbitsize = GET_MODE_BITSIZE (nmode);
+ nbitpos = lbitpos & ~ (nbitsize - 1);
+ lbitpos -= nbitpos;
+ if (nbitsize == lbitsize)
+ return 0;
+
+ if (BYTES_BIG_ENDIAN)
+ lbitpos = nbitsize - lbitsize - lbitpos;
+
+ /* Make the mask to be used against the extracted field. */
+ mask = build_int_cst_type (unsigned_type, -1);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0);
+ mask = const_binop (RSHIFT_EXPR, mask,
+ size_int (nbitsize - lbitsize - lbitpos), 0);
+
+ if (! const_p)
+ /* If not comparing with constant, just rework the comparison
+ and return. */
+ return fold_build2 (code, compare_type,
+ fold_build2 (BIT_AND_EXPR, unsigned_type,
+ make_bit_field_ref (linner,
+ unsigned_type,
+ nbitsize, nbitpos,
+ 1),
+ mask),
+ fold_build2 (BIT_AND_EXPR, unsigned_type,
+ make_bit_field_ref (rinner,
+ unsigned_type,
+ nbitsize, nbitpos,
+ 1),
+ mask));
+
+ /* Otherwise, we are handling the constant case. See if the constant is too
+ big for the field. Warn and return a tree of for 0 (false) if so. We do
+ this not only for its own sake, but to avoid having to test for this
+ error case below. If we didn't, we might generate wrong code.
+
+ For unsigned fields, the constant shifted right by the field length should
+ be all zero. For signed fields, the high-order bits should agree with
+ the sign bit. */
+
+ if (lunsignedp)
+ {
+ if (! integer_zerop (const_binop (RSHIFT_EXPR,
+ fold_convert (unsigned_type, rhs),
+ size_int (lbitsize), 0)))
+ {
+ warning (0, "comparison is always %d due to width of bit-field",
+ code == NE_EXPR);
+ return constant_boolean_node (code == NE_EXPR, compare_type);
+ }
+ }
+ else
+ {
+ tree tem = const_binop (RSHIFT_EXPR, fold_convert (signed_type, rhs),
+ size_int (lbitsize - 1), 0);
+ if (! integer_zerop (tem) && ! integer_all_onesp (tem))
+ {
+ warning (0, "comparison is always %d due to width of bit-field",
+ code == NE_EXPR);
+ return constant_boolean_node (code == NE_EXPR, compare_type);
+ }
+ }
+
+ /* Single-bit compares should always be against zero. */
+ if (lbitsize == 1 && ! integer_zerop (rhs))
+ {
+ code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
+ rhs = build_int_cst (type, 0);
+ }
+
+ /* Make a new bitfield reference, shift the constant over the
+ appropriate number of bits and mask it with the computed mask
+ (in case this was a signed field). If we changed it, make a new one. */
+ lhs = make_bit_field_ref (linner, unsigned_type, nbitsize, nbitpos, 1);
+ if (lvolatilep)
+ {
+ TREE_SIDE_EFFECTS (lhs) = 1;
+ TREE_THIS_VOLATILE (lhs) = 1;
+ }
+
+ rhs = const_binop (BIT_AND_EXPR,
+ const_binop (LSHIFT_EXPR,
+ fold_convert (unsigned_type, rhs),
+ size_int (lbitpos), 0),
+ mask, 0);
+
+ return build2 (code, compare_type,
+ build2 (BIT_AND_EXPR, unsigned_type, lhs, mask),
+ rhs);
+}
+\f
/* Subroutine for fold_truthop: decode a field reference.
If EXP is a comparison reference, we return the innermost reference.
return inner;
}
+/* Return nonzero if MASK represents a mask of SIZE ones in the low-order
+ bit positions. */
+
+static int
+all_ones_mask_p (const_tree mask, int size)
+{
+ tree type = TREE_TYPE (mask);
+ unsigned int precision = TYPE_PRECISION (type);
+ tree tmask;
+
+ tmask = build_int_cst_type (signed_type_for (type), -1);
+
+ return
+ tree_int_cst_equal (mask,
+ const_binop (RSHIFT_EXPR,
+ const_binop (LSHIFT_EXPR, tmask,
+ size_int (precision - size),
+ 0),
+ size_int (precision - size), 0));
+}
+
/* Subroutine for fold: determine if VAL is the INTEGER_CONST that
represents the sign bit of EXP's type. If EXP represents a sign
or zero extension, also test VAL against the unextended type.
{
if (TYPE_UNSIGNED (etype))
{
- etype = signed_type_for (etype);
+ tree signed_etype = signed_type_for (etype);
+ if (TYPE_PRECISION (signed_etype) != TYPE_PRECISION (etype))
+ etype
+ = build_nonstandard_integer_type (TYPE_PRECISION (etype), 0);
+ else
+ etype = signed_etype;
exp = fold_convert (etype, exp);
}
return fold_build2 (GT_EXPR, type, exp,
\f
#ifndef LOGICAL_OP_NON_SHORT_CIRCUIT
-#define LOGICAL_OP_NON_SHORT_CIRCUIT (BRANCH_COST >= 2)
+#define LOGICAL_OP_NON_SHORT_CIRCUIT \
+ (BRANCH_COST (optimize_function_for_speed_p (cfun), \
+ false) >= 2)
#endif
/* EXP is some logical combination of boolean tests. See if we can
tree ll_inner, lr_inner, rl_inner, rr_inner;
HOST_WIDE_INT ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
HOST_WIDE_INT rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
- HOST_WIDE_INT xll_bitpos, xrl_bitpos;
- HOST_WIDE_INT lnbitsize, lnbitpos;
+ HOST_WIDE_INT xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
+ HOST_WIDE_INT lnbitsize, lnbitpos, rnbitsize, rnbitpos;
int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
- enum machine_mode lnmode;
+ enum machine_mode lnmode, rnmode;
tree ll_mask, lr_mask, rl_mask, rr_mask;
tree ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask;
tree l_const, r_const;
- tree lntype, result;
- int first_bit, end_bit;
+ tree lntype, rntype, result;
+ HOST_WIDE_INT first_bit, end_bit;
int volatilep;
tree orig_lhs = lhs, orig_rhs = rhs;
enum tree_code orig_code = code;
that can be merged. Avoid doing this if the RHS is a floating-point
comparison since those can trap. */
- if (BRANCH_COST >= 2
+ if (BRANCH_COST (optimize_function_for_speed_p (cfun),
+ false) >= 2
&& ! FLOAT_TYPE_P (TREE_TYPE (rl_arg))
&& simple_operand_p (rl_arg)
&& simple_operand_p (rr_arg))
}
}
+ /* If the right sides are not constant, do the same for it. Also,
+ disallow this optimization if a size or signedness mismatch occurs
+ between the left and right sides. */
+ if (l_const == 0)
+ {
+ if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
+ || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
+ /* Make sure the two fields on the right
+ correspond to the left without being swapped. */
+ || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
+ return 0;
+
+ first_bit = MIN (lr_bitpos, rr_bitpos);
+ end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
+ rnmode = get_best_mode (end_bit - first_bit, first_bit,
+ TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
+ volatilep);
+ if (rnmode == VOIDmode)
+ return 0;
+
+ rnbitsize = GET_MODE_BITSIZE (rnmode);
+ rnbitpos = first_bit & ~ (rnbitsize - 1);
+ rntype = lang_hooks.types.type_for_size (rnbitsize, 1);
+ xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
+ xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
+ }
+
+ lr_mask = const_binop (LSHIFT_EXPR, fold_convert (rntype, lr_mask),
+ size_int (xlr_bitpos), 0);
+ rr_mask = const_binop (LSHIFT_EXPR, fold_convert (rntype, rr_mask),
+ size_int (xrr_bitpos), 0);
+
+ /* Make a mask that corresponds to both fields being compared.
+ Do this for both items being compared. If the operands are the
+ same size and the bits being compared are in the same position
+ then we can do this by masking both and comparing the masked
+ results. */
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
+ lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
+ if (lnbitsize == rnbitsize && xll_bitpos == xlr_bitpos)
+ {
+ lhs = make_bit_field_ref (ll_inner, lntype, lnbitsize, lnbitpos,
+ ll_unsignedp || rl_unsignedp);
+ if (! all_ones_mask_p (ll_mask, lnbitsize))
+ lhs = build2 (BIT_AND_EXPR, lntype, lhs, ll_mask);
+
+ rhs = make_bit_field_ref (lr_inner, rntype, rnbitsize, rnbitpos,
+ lr_unsignedp || rr_unsignedp);
+ if (! all_ones_mask_p (lr_mask, rnbitsize))
+ rhs = build2 (BIT_AND_EXPR, rntype, rhs, lr_mask);
+
+ return build2 (wanted_code, truth_type, lhs, rhs);
+ }
+
+ /* There is still another way we can do something: If both pairs of
+ fields being compared are adjacent, we may be able to make a wider
+ field containing them both.
+
+ Note that we still must mask the lhs/rhs expressions. Furthermore,
+ the mask must be shifted to account for the shift done by
+ make_bit_field_ref. */
+ if ((ll_bitsize + ll_bitpos == rl_bitpos
+ && lr_bitsize + lr_bitpos == rr_bitpos)
+ || (ll_bitpos == rl_bitpos + rl_bitsize
+ && lr_bitpos == rr_bitpos + rr_bitsize))
+ {
+ tree type;
+
+ lhs = make_bit_field_ref (ll_inner, lntype, ll_bitsize + rl_bitsize,
+ MIN (ll_bitpos, rl_bitpos), ll_unsignedp);
+ rhs = make_bit_field_ref (lr_inner, rntype, lr_bitsize + rr_bitsize,
+ MIN (lr_bitpos, rr_bitpos), lr_unsignedp);
+
+ ll_mask = const_binop (RSHIFT_EXPR, ll_mask,
+ size_int (MIN (xll_bitpos, xrl_bitpos)), 0);
+ lr_mask = const_binop (RSHIFT_EXPR, lr_mask,
+ size_int (MIN (xlr_bitpos, xrr_bitpos)), 0);
+
+ /* Convert to the smaller type before masking out unwanted bits. */
+ type = lntype;
+ if (lntype != rntype)
+ {
+ if (lnbitsize > rnbitsize)
+ {
+ lhs = fold_convert (rntype, lhs);
+ ll_mask = fold_convert (rntype, ll_mask);
+ type = rntype;
+ }
+ else if (lnbitsize < rnbitsize)
+ {
+ rhs = fold_convert (lntype, rhs);
+ lr_mask = fold_convert (lntype, lr_mask);
+ type = lntype;
+ }
+ }
+
+ if (! all_ones_mask_p (ll_mask, ll_bitsize + rl_bitsize))
+ lhs = build2 (BIT_AND_EXPR, type, lhs, ll_mask);
+
+ if (! all_ones_mask_p (lr_mask, lr_bitsize + rr_bitsize))
+ rhs = build2 (BIT_AND_EXPR, type, rhs, lr_mask);
+
+ return build2 (wanted_code, truth_type, lhs, rhs);
+ }
+
+ return 0;
+ }
+
/* Handle the case of comparisons with constants. If there is something in
common between the masks, those bits of the constants must be the same.
If not, the condition is always false. Test for this to avoid generating
}
}
- return NULL_TREE;
+ /* Construct the expression we will return. First get the component
+ reference we will make. Unless the mask is all ones the width of
+ that field, perform the mask operation. Then compare with the
+ merged constant. */
+ result = make_bit_field_ref (ll_inner, lntype, lnbitsize, lnbitpos,
+ ll_unsignedp || rl_unsignedp);
+
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
+ if (! all_ones_mask_p (ll_mask, lnbitsize))
+ result = build2 (BIT_AND_EXPR, lntype, result, ll_mask);
+
+ return build2 (wanted_code, truth_type, result,
+ const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
}
\f
/* Optimize T, which is a comparison of a MIN_EXPR or MAX_EXPR with a
(C * 8) % 4 since we know that's zero. */
if ((code == TRUNC_MOD_EXPR || code == CEIL_MOD_EXPR
|| code == FLOOR_MOD_EXPR || code == ROUND_MOD_EXPR)
+ /* If the multiplication can overflow we cannot optimize this.
+ ??? Until we can properly mark individual operations as
+ not overflowing we need to treat sizetype special here as
+ stor-layout relies on this opimization to make
+ DECL_FIELD_BIT_OFFSET always a constant. */
+ && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t))
+ || (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (t))))
&& TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
&& integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
- return omit_one_operand (type, integer_zero_node, op0);
+ {
+ *strict_overflow_p = true;
+ return omit_one_operand (type, integer_zero_node, op0);
+ }
/* ... fall through ... */
if (TREE_CONSTANT (arg0))
return 1;
- if (optimize_size)
+ if (optimize_function_for_size_p (cfun))
return 0;
if (reorder && flag_evaluation_order
if ((code == EQ_EXPR || code == NE_EXPR
|| TYPE_UNSIGNED (TREE_TYPE (arg0)) == TYPE_UNSIGNED (shorter_type))
&& (TREE_TYPE (arg1_unw) == shorter_type
- || (TYPE_PRECISION (shorter_type)
- > TYPE_PRECISION (TREE_TYPE (arg1_unw)))
|| ((TYPE_PRECISION (shorter_type)
- == TYPE_PRECISION (TREE_TYPE (arg1_unw)))
+ >= TYPE_PRECISION (TREE_TYPE (arg1_unw)))
&& (TYPE_UNSIGNED (shorter_type)
== TYPE_UNSIGNED (TREE_TYPE (arg1_unw))))
|| (TREE_CODE (arg1_unw) == INTEGER_CST
if (total_bytes > len)
return 0;
- words = 32 / UNITS_PER_WORD;
+ words = (32 / BITS_PER_UNIT) / UNITS_PER_WORD;
real_to_target (tmp, TREE_REAL_CST_PTR (expr), TYPE_MODE (type));
}
+/* Subroutine of native_encode_expr. Encode the STRING_CST
+ specified by EXPR into the buffer PTR of length LEN bytes.
+ Return the number of bytes placed in the buffer, or zero
+ upon failure. */
+
+static int
+native_encode_string (const_tree expr, unsigned char *ptr, int len)
+{
+ tree type = TREE_TYPE (expr);
+ HOST_WIDE_INT total_bytes;
+
+ if (TREE_CODE (type) != ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (type)) != INTEGER_TYPE
+ || GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type))) != BITS_PER_UNIT
+ || !host_integerp (TYPE_SIZE_UNIT (type), 0))
+ return 0;
+ total_bytes = tree_low_cst (TYPE_SIZE_UNIT (type), 0);
+ if (total_bytes > len)
+ return 0;
+ if (TREE_STRING_LENGTH (expr) < total_bytes)
+ {
+ memcpy (ptr, TREE_STRING_POINTER (expr), TREE_STRING_LENGTH (expr));
+ memset (ptr + TREE_STRING_LENGTH (expr), 0,
+ total_bytes - TREE_STRING_LENGTH (expr));
+ }
+ else
+ memcpy (ptr, TREE_STRING_POINTER (expr), total_bytes);
+ return total_bytes;
+}
+
+
/* Subroutine of fold_view_convert_expr. Encode the INTEGER_CST,
REAL_CST, COMPLEX_CST or VECTOR_CST specified by EXPR into the
buffer PTR of length LEN bytes. Return the number of bytes
case VECTOR_CST:
return native_encode_vector (expr, ptr, len);
+ case STRING_CST:
+ return native_encode_string (expr, ptr, len);
+
default:
return 0;
}
total_bytes = GET_MODE_SIZE (TYPE_MODE (type));
if (total_bytes > len || total_bytes > 24)
return NULL_TREE;
- words = 32 / UNITS_PER_WORD;
+ words = (32 / BITS_PER_UNIT) / UNITS_PER_WORD;
memset (tmp, 0, sizeof (tmp));
for (bitpos = 0; bitpos < total_bytes * BITS_PER_UNIT;
arg0 = op0;
if (arg0)
{
- if (code == NOP_EXPR || code == CONVERT_EXPR
+ if (CONVERT_EXPR_CODE_P (code)
|| code == FLOAT_EXPR || code == ABS_EXPR)
{
/* Don't use STRIP_NOPS, because signedness of argument type
so we don't get into an infinite recursion loop taking the
conversion out and then back in. */
- if ((code == NOP_EXPR || code == CONVERT_EXPR
+ if ((CONVERT_EXPR_CODE_P (code)
|| code == NON_LVALUE_EXPR)
&& TREE_CODE (tem) == COND_EXPR
&& TREE_CODE (TREE_OPERAND (tem, 1)) == code
return fold_convert (type, fold_addr_expr (base));
}
- if ((TREE_CODE (op0) == MODIFY_EXPR
- || TREE_CODE (op0) == GIMPLE_MODIFY_STMT)
- && TREE_CONSTANT (GENERIC_TREE_OPERAND (op0, 1))
+ if (TREE_CODE (op0) == MODIFY_EXPR
+ && TREE_CONSTANT (TREE_OPERAND (op0, 1))
/* Detect assigning a bitfield. */
- && !(TREE_CODE (GENERIC_TREE_OPERAND (op0, 0)) == COMPONENT_REF
+ && !(TREE_CODE (TREE_OPERAND (op0, 0)) == COMPONENT_REF
&& DECL_BIT_FIELD
- (TREE_OPERAND (GENERIC_TREE_OPERAND (op0, 0), 1))))
+ (TREE_OPERAND (TREE_OPERAND (op0, 0), 1))))
{
/* Don't leave an assignment inside a conversion
unless assigning a bitfield. */
- tem = fold_build1 (code, type, GENERIC_TREE_OPERAND (op0, 1));
+ tem = fold_build1 (code, type, TREE_OPERAND (op0, 1));
/* First do the assignment, then return converted constant. */
tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), op0, tem);
TREE_NO_WARNING (tem) = 1;
transformation effectively doesn't preserve non-maximal ranges. */
if (TREE_CODE (type) == INTEGER_TYPE
&& TREE_CODE (op0) == BIT_AND_EXPR
- && TREE_CODE (TREE_OPERAND (op0, 1)) == INTEGER_CST)
+ && TREE_CODE (TREE_OPERAND (op0, 1)) == INTEGER_CST
+ /* Not if the conversion is to the sub-type. */
+ && TREE_TYPE (type) != TREE_TYPE (op0))
{
tree and = op0;
tree and0 = TREE_OPERAND (and, 0), and1 = TREE_OPERAND (and, 1);
mult_type = type;
else
mult_type = unsigned_type_for (type);
-
- tem = fold_build2 (MULT_EXPR, mult_type,
- fold_convert (mult_type, TREE_OPERAND (op0, 0)),
- fold_convert (mult_type, TREE_OPERAND (op0, 1)));
- return fold_convert (type, tem);
+
+ if (TYPE_PRECISION (mult_type) < TYPE_PRECISION (TREE_TYPE (op0)))
+ {
+ tem = fold_build2 (MULT_EXPR, mult_type,
+ fold_convert (mult_type,
+ TREE_OPERAND (op0, 0)),
+ fold_convert (mult_type,
+ TREE_OPERAND (op0, 1)));
+ return fold_convert (type, tem);
+ }
}
tem = fold_convert_const (code, type, op0);
int sgn0;
bool swap = false;
- /* Match A +- CST code arg1 and CST code arg1. */
- if (!(((code0 == MINUS_EXPR
- || code0 == PLUS_EXPR)
+ /* Match A +- CST code arg1 and CST code arg1. We can change the
+ first form only if overflow is undefined. */
+ if (!((TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (arg0))
+ /* In principle pointers also have undefined overflow behavior,
+ but that causes problems elsewhere. */
+ && !POINTER_TYPE_P (TREE_TYPE (arg0))
+ && (code0 == MINUS_EXPR
+ || code0 == PLUS_EXPR)
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
|| code0 == INTEGER_CST))
return NULL_TREE;
*strict_overflow_p = true;
}
- /* Now build the constant reduced in magnitude. */
+ /* Now build the constant reduced in magnitude. But not if that
+ would produce one outside of its types range. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (cst0))
+ && ((sgn0 == 1
+ && TYPE_MIN_VALUE (TREE_TYPE (cst0))
+ && tree_int_cst_equal (cst0, TYPE_MIN_VALUE (TREE_TYPE (cst0))))
+ || (sgn0 == -1
+ && TYPE_MAX_VALUE (TREE_TYPE (cst0))
+ && tree_int_cst_equal (cst0, TYPE_MAX_VALUE (TREE_TYPE (cst0))))))
+ /* We cannot swap the comparison here as that would cause us to
+ endlessly recurse. */
+ return NULL_TREE;
+
t = int_const_binop (sgn0 == -1 ? PLUS_EXPR : MINUS_EXPR,
- cst0, build_int_cst (TREE_TYPE (cst0), 1), 0);
+ cst0, build_int_cst (TREE_TYPE (cst0), 1), 0);
if (code0 != INTEGER_CST)
t = fold_build2 (code0, TREE_TYPE (arg0), TREE_OPERAND (arg0, 0), t);
const char * const warnmsg = G_("assuming signed overflow does not occur "
"when reducing constant in comparison");
- /* In principle pointers also have undefined overflow behavior,
- but that causes problems elsewhere. */
- if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (arg0))
- || POINTER_TYPE_P (TREE_TYPE (arg0)))
- return NULL_TREE;
-
/* Try canonicalization by simplifying arg0. */
strict_overflow_p = false;
t = maybe_canonicalize_comparison_1 (code, type, arg0, arg1,
tree t1 = NULL_TREE;
bool strict_overflow_p;
- gcc_assert ((IS_EXPR_CODE_CLASS (kind)
- || IS_GIMPLE_STMT_CODE_CLASS (kind))
+ gcc_assert (IS_EXPR_CODE_CLASS (kind)
&& TREE_CODE_LENGTH (code) == 2
&& op0 != NULL_TREE
&& op1 != NULL_TREE);
/* Convert ~A + 1 to -A. */
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& integer_onep (arg1))
- return fold_build1 (NEGATE_EXPR, type, TREE_OPERAND (arg0, 0));
+ return fold_build1 (NEGATE_EXPR, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)));
/* ~X + X is -1. */
if (TREE_CODE (arg0) == BIT_NOT_EXPR
}
/* Optimize x*x as pow(x,2.0), which is expanded as x*x. */
- if (! optimize_size
+ if (optimize_function_for_speed_p (cfun)
&& operand_equal_p (arg0, arg1, 0))
{
tree powfn = mathfn_built_in (type, BUILT_IN_POW);
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- tree tmp1 = fold_convert (TREE_TYPE (arg0), arg1);
- tree tmp2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- TREE_OPERAND (arg0, 0), tmp1);
- tree tmp3 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- TREE_OPERAND (arg0, 1), tmp1);
+ tree tmp1 = fold_convert (type, arg1);
+ tree tmp2 = fold_convert (type, TREE_OPERAND (arg0, 0));
+ tree tmp3 = fold_convert (type, TREE_OPERAND (arg0, 1));
+ tmp2 = fold_build2 (BIT_AND_EXPR, type, tmp2, tmp1);
+ tmp3 = fold_build2 (BIT_AND_EXPR, type, tmp3, tmp1);
return fold_convert (type,
- fold_build2 (BIT_IOR_EXPR, TREE_TYPE (arg0),
- tmp2, tmp3));
+ fold_build2 (BIT_IOR_EXPR, type, tmp2, tmp3));
}
/* (X | Y) & Y is (X, Y). */
{
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not occur "
- "when simplifying modulos"),
+ "when simplifying modulus"),
WARN_STRICT_OVERFLOW_MISC);
return fold_convert (type, tem);
}
case RSHIFT_EXPR:
/* Optimize -1 >> x for arithmetic right shifts. */
- if (integer_all_onesp (arg0) && !TYPE_UNSIGNED (type))
+ if (integer_all_onesp (arg0) && !TYPE_UNSIGNED (type)
+ && tree_expr_nonnegative_p (arg1))
return omit_one_operand (type, arg0, arg1);
/* ... fall through ... */
return omit_one_operand (type, rslt, arg0);
}
+ /* If this is a comparison of a field, we may be able to simplify it. */
+ if ((TREE_CODE (arg0) == COMPONENT_REF
+ || TREE_CODE (arg0) == BIT_FIELD_REF)
+ /* Handle the constant case even without -O
+ to make sure the warnings are given. */
+ && (optimize || TREE_CODE (arg1) == INTEGER_CST))
+ {
+ t1 = optimize_bit_field_compare (code, type, arg0, arg1);
+ if (t1)
+ return t1;
+ }
+
/* Optimize comparisons of strlen vs zero to a compare of the
first character of the string vs zero. To wit,
strlen(ptr) == 0 => *ptr == 0
}
}
- /* Change X >= C to X > (C - 1) and X < C to X <= (C - 1) if C > 0.
- This transformation affects the cases which are handled in later
- optimizations involving comparisons with non-negative constants. */
- if (TREE_CODE (arg1) == INTEGER_CST
- && TREE_CODE (arg0) != INTEGER_CST
- && tree_int_cst_sgn (arg1) > 0)
- {
- if (code == GE_EXPR)
- {
- arg1 = const_binop (MINUS_EXPR, arg1,
- build_int_cst (TREE_TYPE (arg1), 1), 0);
- return fold_build2 (GT_EXPR, type, arg0,
- fold_convert (TREE_TYPE (arg0), arg1));
- }
- if (code == LT_EXPR)
- {
- arg1 = const_binop (MINUS_EXPR, arg1,
- build_int_cst (TREE_TYPE (arg1), 1), 0);
- return fold_build2 (LE_EXPR, type, arg0,
- fold_convert (TREE_TYPE (arg0), arg1));
- }
- }
-
/* Comparisons with the highest or lowest possible integer of
the specified precision will have known values. */
{
unsigned int width = TYPE_PRECISION (arg1_type);
if (TREE_CODE (arg1) == INTEGER_CST
- && !TREE_OVERFLOW (arg1)
&& width <= 2 * HOST_BITS_PER_WIDE_INT
&& (INTEGRAL_TYPE_P (arg1_type) || POINTER_TYPE_P (arg1_type)))
{
return fold_convert (type, integer_zero_node);
}
}
+
+ /* A bit-field-ref that referenced the full argument can be stripped. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (arg0))
+ && TYPE_PRECISION (TREE_TYPE (arg0)) == tree_low_cst (arg1, 1)
+ && integer_zerop (op2))
+ return fold_convert (type, arg0);
+
return NULL_TREE;
default:
return expr;
}
- if (IS_EXPR_CODE_CLASS (kind)
- || IS_GIMPLE_STMT_CODE_CLASS (kind))
+ if (IS_EXPR_CODE_CLASS (kind))
{
tree type = TREE_TYPE (t);
tree op0, op1, op2;
{
const void **slot;
enum tree_code code;
- struct tree_function_decl buf;
+ union tree_node buf;
int i, len;
recursive_label:
expr = (tree) &buf;
}
else if (TREE_CODE_CLASS (code) == tcc_type
- && (TYPE_POINTER_TO (expr) || TYPE_REFERENCE_TO (expr)
+ && (TYPE_POINTER_TO (expr)
+ || TYPE_REFERENCE_TO (expr)
|| TYPE_CACHED_VALUES_P (expr)
- || TYPE_CONTAINS_PLACEHOLDER_INTERNAL (expr)))
+ || TYPE_CONTAINS_PLACEHOLDER_INTERNAL (expr)
+ || TYPE_NEXT_VARIANT (expr)))
{
/* Allow these fields to be modified. */
tree tmp;
TYPE_CONTAINS_PLACEHOLDER_INTERNAL (tmp) = 0;
TYPE_POINTER_TO (tmp) = NULL;
TYPE_REFERENCE_TO (tmp) = NULL;
+ TYPE_NEXT_VARIANT (tmp) = NULL;
if (TYPE_CACHED_VALUES_P (tmp))
{
TYPE_CACHED_VALUES_P (tmp) = 0;
/* zero_extend(x) * zero_extend(y) is non-negative if x and y are
both unsigned and their total bits is shorter than the result. */
if (TREE_CODE (type) == INTEGER_TYPE
- && TREE_CODE (op0) == NOP_EXPR
- && TREE_CODE (op1) == NOP_EXPR)
+ && (TREE_CODE (op0) == NOP_EXPR || TREE_CODE (op0) == INTEGER_CST)
+ && (TREE_CODE (op1) == NOP_EXPR || TREE_CODE (op1) == INTEGER_CST))
{
- tree inner1 = TREE_TYPE (TREE_OPERAND (op0, 0));
- tree inner2 = TREE_TYPE (TREE_OPERAND (op1, 0));
- if (TREE_CODE (inner1) == INTEGER_TYPE && TYPE_UNSIGNED (inner1)
- && TREE_CODE (inner2) == INTEGER_TYPE && TYPE_UNSIGNED (inner2))
- return TYPE_PRECISION (inner1) + TYPE_PRECISION (inner2)
- < TYPE_PRECISION (type);
+ tree inner0 = (TREE_CODE (op0) == NOP_EXPR)
+ ? TREE_TYPE (TREE_OPERAND (op0, 0))
+ : TREE_TYPE (op0);
+ tree inner1 = (TREE_CODE (op1) == NOP_EXPR)
+ ? TREE_TYPE (TREE_OPERAND (op1, 0))
+ : TREE_TYPE (op1);
+
+ bool unsigned0 = TYPE_UNSIGNED (inner0);
+ bool unsigned1 = TYPE_UNSIGNED (inner1);
+
+ if (TREE_CODE (op0) == INTEGER_CST)
+ unsigned0 = unsigned0 || tree_int_cst_sgn (op0) >= 0;
+
+ if (TREE_CODE (op1) == INTEGER_CST)
+ unsigned1 = unsigned1 || tree_int_cst_sgn (op1) >= 0;
+
+ if (TREE_CODE (inner0) == INTEGER_TYPE && unsigned0
+ && TREE_CODE (inner1) == INTEGER_TYPE && unsigned1)
+ {
+ unsigned int precision0 = (TREE_CODE (op0) == INTEGER_CST)
+ ? tree_int_cst_min_precision (op0, /*unsignedp=*/true)
+ : TYPE_PRECISION (inner0);
+
+ unsigned int precision1 = (TREE_CODE (op1) == INTEGER_CST)
+ ? tree_int_cst_min_precision (op1, /*unsignedp=*/true)
+ : TYPE_PRECISION (inner1);
+
+ return precision0 + precision1 < TYPE_PRECISION (type);
+ }
}
return false;
*STRICT_OVERFLOW_P. */
bool
-tree_call_nonnegative_warnv_p (enum tree_code code, tree type, tree fndecl,
+tree_call_nonnegative_warnv_p (tree type, tree fndecl,
tree arg0, tree arg1, bool *strict_overflow_p)
{
if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
default:
break;
}
- return tree_simple_nonnegative_warnv_p (code,
+ return tree_simple_nonnegative_warnv_p (CALL_EXPR,
type);
}
else
break;
}
- if ((TREE_CODE (t) == MODIFY_EXPR
- || TREE_CODE (t) == GIMPLE_MODIFY_STMT)
- && GENERIC_TREE_OPERAND (t, 0) == temp)
- return tree_expr_nonnegative_warnv_p (GENERIC_TREE_OPERAND (t, 1),
+ if (TREE_CODE (t) == MODIFY_EXPR
+ && TREE_OPERAND (t, 0) == temp)
+ return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
strict_overflow_p);
return false;
tree arg0 = call_expr_nargs (t) > 0 ? CALL_EXPR_ARG (t, 0) : NULL_TREE;
tree arg1 = call_expr_nargs (t) > 1 ? CALL_EXPR_ARG (t, 1) : NULL_TREE;
- return tree_call_nonnegative_warnv_p (TREE_CODE (t),
- TREE_TYPE (t),
+ return tree_call_nonnegative_warnv_p (TREE_TYPE (t),
get_callee_fndecl (t),
arg0,
arg1,
}
case COMPOUND_EXPR:
case MODIFY_EXPR:
- case GIMPLE_MODIFY_STMT:
- return tree_expr_nonnegative_warnv_p (GENERIC_TREE_OPERAND (t, 1),
+ return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
strict_overflow_p);
case BIND_EXPR:
return tree_expr_nonnegative_warnv_p (expr_last (TREE_OPERAND (t, 1)),
case COMPOUND_EXPR:
case MODIFY_EXPR:
- case GIMPLE_MODIFY_STMT:
case BIND_EXPR:
- return tree_expr_nonzero_warnv_p (GENERIC_TREE_OPERAND (t, 1),
+ return tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
strict_overflow_p);
case SAVE_EXPR:
with constant folding. (E.g. suppose the lower bound is 1,
and its mode is QI. Without the conversion,l (ARRAY
+(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
- +INDEX), which becomes (ARRAY+255+INDEX). Opps!) */
+ +INDEX), which becomes (ARRAY+255+INDEX). Oops!) */
if (! integer_zerop (low_bound))
index = size_diffop (index, fold_convert (sizetype, low_bound));