@@ This would also make life easier when this technology is used
@@ for cross-compilers. */
-/* The entry points in this file are fold, size_int_wide, size_binop
- and force_fit_type_double.
+/* The entry points in this file are fold, size_int_wide and size_binop.
fold takes a tree as argument and returns a simplified tree.
size_int takes an integer value, and creates a tree constant
with type from `sizetype'.
- force_fit_type_double takes a constant, an overflowable flag and a
- prior overflow indicator. It forces the value to fit the type and
- sets TREE_OVERFLOW.
-
Note: Since the folders get called on non-gimple code as well as
gimple code, we need to handle GIMPLE tuples as well as their
corresponding tree equivalents. */
#include "expr.h"
#include "tm_p.h"
#include "target.h"
+#include "diagnostic-core.h"
#include "toplev.h"
#include "intl.h"
#include "ggc.h"
#include "langhooks.h"
#include "md5.h"
#include "gimple.h"
+#include "tree-flow.h"
/* Nonzero if we are folding constants inside an initializer; zero
otherwise. */
static tree negate_expr (tree);
static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
static tree associate_trees (location_t, tree, tree, enum tree_code, tree);
-static tree const_binop (enum tree_code, tree, tree, int);
+static tree const_binop (enum tree_code, tree, tree);
static enum comparison_code comparison_to_compcode (enum tree_code);
static enum tree_code compcode_to_comparison (enum comparison_code);
static int operand_equal_for_comparison_p (tree, tree, tree);
static tree fold_div_compare (location_t, enum tree_code, tree, tree, tree);
static bool reorder_operands_p (const_tree, const_tree);
static tree fold_negate_const (tree, tree);
-static tree fold_not_const (tree, tree);
+static tree fold_not_const (const_tree, tree);
static tree fold_relational_const (enum tree_code, tree, tree, tree);
static tree fold_convert_const (enum tree_code, tree, tree);
}
}
else
- t = force_fit_type_double (TREE_TYPE (arg1), res.low, res.high, 1,
+ t = force_fit_type_double (TREE_TYPE (arg1), res, 1,
((!uns || is_sizetype) && overflow)
| TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
/* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
constant. We assume ARG1 and ARG2 have the same data type, or at least
are the same kind of constant and the same machine mode. Return zero if
- combining the constants is not allowed in the current operating mode.
-
- If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
+ combining the constants is not allowed in the current operating mode. */
static tree
-const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
+const_binop (enum tree_code code, tree arg1, tree arg2)
{
/* Sanity check for the recursive cases. */
if (!arg1 || !arg2)
STRIP_NOPS (arg2);
if (TREE_CODE (arg1) == INTEGER_CST)
- return int_const_binop (code, arg1, arg2, notrunc);
+ return int_const_binop (code, arg1, arg2, 0);
if (TREE_CODE (arg1) == REAL_CST)
{
{
case PLUS_EXPR:
case MINUS_EXPR:
- real = const_binop (code, r1, r2, notrunc);
- imag = const_binop (code, i1, i2, notrunc);
+ real = const_binop (code, r1, r2);
+ imag = const_binop (code, i1, i2);
break;
case MULT_EXPR:
mpc_mul);
real = const_binop (MINUS_EXPR,
- const_binop (MULT_EXPR, r1, r2, notrunc),
- const_binop (MULT_EXPR, i1, i2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, r1, r2),
+ const_binop (MULT_EXPR, i1, i2));
imag = const_binop (PLUS_EXPR,
- const_binop (MULT_EXPR, r1, i2, notrunc),
- const_binop (MULT_EXPR, i1, r2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, r1, i2),
+ const_binop (MULT_EXPR, i1, r2));
break;
case RDIV_EXPR:
*/
tree magsquared
= const_binop (PLUS_EXPR,
- const_binop (MULT_EXPR, r2, r2, notrunc),
- const_binop (MULT_EXPR, i2, i2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, r2, r2),
+ const_binop (MULT_EXPR, i2, i2));
tree t1
= const_binop (PLUS_EXPR,
- const_binop (MULT_EXPR, r1, r2, notrunc),
- const_binop (MULT_EXPR, i1, i2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, r1, r2),
+ const_binop (MULT_EXPR, i1, i2));
tree t2
= const_binop (MINUS_EXPR,
- const_binop (MULT_EXPR, i1, r2, notrunc),
- const_binop (MULT_EXPR, r1, i2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, i1, r2),
+ const_binop (MULT_EXPR, r1, i2));
- real = const_binop (code, t1, magsquared, notrunc);
- imag = const_binop (code, t2, magsquared, notrunc);
+ real = const_binop (code, t1, magsquared);
+ imag = const_binop (code, t2, magsquared);
}
else
{
ti = (ai * ratio) - ar;
tr = tr / div;
ti = ti / div; */
- tree ratio = const_binop (code, r2, i2, notrunc);
+ tree ratio = const_binop (code, r2, i2);
tree div = const_binop (PLUS_EXPR, i2,
- const_binop (MULT_EXPR, r2, ratio,
- notrunc),
- notrunc);
- real = const_binop (MULT_EXPR, r1, ratio, notrunc);
- real = const_binop (PLUS_EXPR, real, i1, notrunc);
- real = const_binop (code, real, div, notrunc);
-
- imag = const_binop (MULT_EXPR, i1, ratio, notrunc);
- imag = const_binop (MINUS_EXPR, imag, r1, notrunc);
- imag = const_binop (code, imag, div, notrunc);
+ const_binop (MULT_EXPR, r2, ratio));
+ real = const_binop (MULT_EXPR, r1, ratio);
+ real = const_binop (PLUS_EXPR, real, i1);
+ real = const_binop (code, real, div);
+
+ imag = const_binop (MULT_EXPR, i1, ratio);
+ imag = const_binop (MINUS_EXPR, imag, r1);
+ imag = const_binop (code, imag, div);
}
else
{
ti = b - (a * ratio);
tr = tr / div;
ti = ti / div; */
- tree ratio = const_binop (code, i2, r2, notrunc);
+ tree ratio = const_binop (code, i2, r2);
tree div = const_binop (PLUS_EXPR, r2,
- const_binop (MULT_EXPR, i2, ratio,
- notrunc),
- notrunc);
+ const_binop (MULT_EXPR, i2, ratio));
- real = const_binop (MULT_EXPR, i1, ratio, notrunc);
- real = const_binop (PLUS_EXPR, real, r1, notrunc);
- real = const_binop (code, real, div, notrunc);
+ real = const_binop (MULT_EXPR, i1, ratio);
+ real = const_binop (PLUS_EXPR, real, r1);
+ real = const_binop (code, real, div);
- imag = const_binop (MULT_EXPR, r1, ratio, notrunc);
- imag = const_binop (MINUS_EXPR, i1, imag, notrunc);
- imag = const_binop (code, imag, div, notrunc);
+ imag = const_binop (MULT_EXPR, r1, ratio);
+ imag = const_binop (MINUS_EXPR, i1, imag);
+ imag = const_binop (code, imag, div);
}
}
break;
elements2 = TREE_CHAIN (elements2);
}
- elem = const_binop (code, elem1, elem2, notrunc);
+ elem = const_binop (code, elem1, elem2);
/* It is possible that const_binop cannot handle the given
code and return NULL_TREE */
/* Given an integer constant, make new constant with new type,
appropriately sign-extended or truncated. */
- t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
- TREE_INT_CST_HIGH (arg1),
+ t = force_fit_type_double (type, tree_to_double_int (arg1),
!POINTER_TYPE_P (TREE_TYPE (arg1)),
(TREE_INT_CST_HIGH (arg1) < 0
&& (TYPE_UNSIGNED (type)
if (! overflow)
real_to_integer2 ((HOST_WIDE_INT *) &val.low, &val.high, &r);
- t = force_fit_type_double (type, val.low, val.high, -1,
- overflow | TREE_OVERFLOW (arg1));
+ t = force_fit_type_double (type, val, -1, overflow | TREE_OVERFLOW (arg1));
return t;
}
/* Given a fixed-point constant, make new constant with new type,
appropriately sign-extended or truncated. */
- t = force_fit_type_double (type, temp.low, temp.high, -1,
+ t = force_fit_type_double (type, temp, -1,
(double_int_negative_p (temp)
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
static tree
build_zero_vector (tree type)
{
- tree elem, list;
- int i, units;
-
- elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
- units = TYPE_VECTOR_SUBPARTS (type);
+ tree t;
- list = NULL_TREE;
- for (i = 0; i < units; i++)
- list = tree_cons (NULL_TREE, elem, list);
- return build_vector (type, list);
+ t = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
+ return build_vector_from_val (type, t);
}
/* Returns true, if ARG is convertible to TYPE using a NOP_EXPR. */
case SSA_NAME:
case COMPONENT_REF:
+ case MEM_REF:
case INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case MISALIGNED_INDIRECT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case BIT_FIELD_REF:
{
if (pedantic_lvalues)
return non_lvalue_loc (loc, x);
- protected_set_expr_location (x, loc);
+
+ if (CAN_HAVE_LOCATION_P (x)
+ && EXPR_LOCATION (x) != loc
+ && !(TREE_CODE (x) == SAVE_EXPR
+ || TREE_CODE (x) == TARGET_EXPR
+ || TREE_CODE (x) == BIND_EXPR))
+ {
+ x = copy_node (x);
+ SET_EXPR_LOCATION (x, loc);
+ }
return x;
}
\f
switch (TREE_CODE (arg0))
{
case INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case MISALIGNED_INDIRECT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
return OP_SAME (0);
+ case MEM_REF:
+ /* Require equal access sizes, and similar pointer types.
+ We can have incomplete types for array references of
+ variable-sized arrays from the Fortran frontent
+ though. */
+ return ((TYPE_SIZE (TREE_TYPE (arg0)) == TYPE_SIZE (TREE_TYPE (arg1))
+ || (TYPE_SIZE (TREE_TYPE (arg0))
+ && TYPE_SIZE (TREE_TYPE (arg1))
+ && operand_equal_p (TYPE_SIZE (TREE_TYPE (arg0)),
+ TYPE_SIZE (TREE_TYPE (arg1)), flags)))
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (arg0, 1)))
+ == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (arg1, 1))))
+ && OP_SAME (0) && OP_SAME (1));
+
case ARRAY_REF:
case ARRAY_RANGE_REF:
/* Operands 2 and 3 may be null.
case TRUTH_ORIF_EXPR:
return OP_SAME (0) && OP_SAME (1);
+ case FMA_EXPR:
+ case WIDEN_MULT_PLUS_EXPR:
+ case WIDEN_MULT_MINUS_EXPR:
+ if (!OP_SAME (2))
+ return 0;
+ /* The multiplcation operands are commutative. */
+ /* FALLTHRU */
+
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
TREE_OPERAND (arg1, 0), flags));
case COND_EXPR:
+ case VEC_COND_EXPR:
+ case DOT_PROD_EXPR:
return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
default:
/* Make the mask to be used against the extracted field. */
mask = build_int_cst_type (unsigned_type, -1);
- mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize));
mask = const_binop (RSHIFT_EXPR, mask,
- size_int (nbitsize - lbitsize - lbitpos), 0);
+ size_int (nbitsize - lbitsize - lbitpos));
if (! const_p)
/* If not comparing with constant, just rework the comparison
if (! integer_zerop (const_binop (RSHIFT_EXPR,
fold_convert_loc (loc,
unsigned_type, rhs),
- size_int (lbitsize), 0)))
+ size_int (lbitsize))))
{
warning (0, "comparison is always %d due to width of bit-field",
code == NE_EXPR);
{
tree tem = const_binop (RSHIFT_EXPR,
fold_convert_loc (loc, signed_type, rhs),
- size_int (lbitsize - 1), 0);
+ size_int (lbitsize - 1));
if (! integer_zerop (tem) && ! integer_all_onesp (tem))
{
warning (0, "comparison is always %d due to width of bit-field",
rhs = const_binop (BIT_AND_EXPR,
const_binop (LSHIFT_EXPR,
fold_convert_loc (loc, unsigned_type, rhs),
- size_int (lbitpos), 0),
- mask, 0);
+ size_int (lbitpos)),
+ mask);
lhs = build2 (code, compare_type,
build2 (BIT_AND_EXPR, unsigned_type, lhs, mask),
mask = build_int_cst_type (unsigned_type, -1);
- mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
- mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize));
+ mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize));
/* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
if (and_mask != 0)
tree_int_cst_equal (mask,
const_binop (RSHIFT_EXPR,
const_binop (LSHIFT_EXPR, tmask,
- size_int (precision - size),
- 0),
- size_int (precision - size), 0));
+ size_int (precision - size)),
+ size_int (precision - size)));
}
/* Subroutine for fold: determine if VAL is the INTEGER_CONST that
n_high = range_binop (MINUS_EXPR, exp_type,
build_int_cst (exp_type, 0),
0, low, 0);
- low = n_low, high = n_high;
- exp = arg0;
- continue;
+ if (n_high != 0 && TREE_OVERFLOW (n_high))
+ break;
+ goto normalize;
case BIT_NOT_EXPR:
/* ~ X -> -X - 1 */
if (TYPE_OVERFLOW_UNDEFINED (arg0_type))
*strict_overflow_p = true;
+ normalize:
/* Check for an unsigned range which has wrapped around the maximum
value thus making n_high < n_low, and normalize it. */
if (n_low && n_high && tree_int_cst_lt (n_high, n_low))
low = fold_convert_loc (loc, etype, low);
exp = fold_convert_loc (loc, etype, exp);
- value = const_binop (MINUS_EXPR, high, low, 0);
+ value = const_binop (MINUS_EXPR, high, low);
if (POINTER_TYPE_P (etype))
OEP_ONLY_CONST)
&& operand_equal_p (arg01,
const_binop (PLUS_EXPR, arg2,
- build_int_cst (type, 1), 0),
+ build_int_cst (type, 1)),
OEP_ONLY_CONST))
{
tem = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (arg00), arg00,
OEP_ONLY_CONST)
&& operand_equal_p (arg01,
const_binop (MINUS_EXPR, arg2,
- build_int_cst (type, 1), 0),
+ build_int_cst (type, 1)),
OEP_ONLY_CONST))
{
tem = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (arg00), arg00,
OEP_ONLY_CONST)
&& operand_equal_p (arg01,
const_binop (MINUS_EXPR, arg2,
- build_int_cst (type, 1), 0),
+ build_int_cst (type, 1)),
OEP_ONLY_CONST))
{
tem = fold_build2_loc (loc, MAX_EXPR, TREE_TYPE (arg00), arg00,
OEP_ONLY_CONST)
&& operand_equal_p (arg01,
const_binop (PLUS_EXPR, arg2,
- build_int_cst (type, 1), 0),
+ build_int_cst (type, 1)),
OEP_ONLY_CONST))
{
tem = fold_build2_loc (loc, MAX_EXPR, TREE_TYPE (arg00), arg00,
/* We work by getting just the sign bit into the low-order bit, then
into the high-order bit, then sign-extend. We then XOR that value
with C. */
- temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
- temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
+ temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1));
+ temp = const_binop (BIT_AND_EXPR, temp, size_int (1));
/* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that
if (TYPE_UNSIGNED (type))
temp = fold_convert (signed_type_for (type), temp);
- temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
- temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
+ temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1));
+ temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1));
if (mask != 0)
temp = const_binop (BIT_AND_EXPR, temp,
- fold_convert (TREE_TYPE (c), mask),
- 0);
+ fold_convert (TREE_TYPE (c), mask));
/* If necessary, convert the type back to match the type of C. */
if (TYPE_UNSIGNED (type))
temp = fold_convert (type, temp);
- return fold_convert (type,
- const_binop (BIT_XOR_EXPR, c, temp, 0));
+ return fold_convert (type, const_binop (BIT_XOR_EXPR, c, temp));
}
\f
/* For an expression that has the form
}
ll_mask = const_binop (LSHIFT_EXPR, fold_convert_loc (loc, lntype, ll_mask),
- size_int (xll_bitpos), 0);
+ size_int (xll_bitpos));
rl_mask = const_binop (LSHIFT_EXPR, fold_convert_loc (loc, lntype, rl_mask),
- size_int (xrl_bitpos), 0);
+ size_int (xrl_bitpos));
if (l_const)
{
l_const = fold_convert_loc (loc, lntype, l_const);
l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
- l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
+ l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos));
if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
fold_build1_loc (loc, BIT_NOT_EXPR,
- lntype, ll_mask),
- 0)))
+ lntype, ll_mask))))
{
warning (0, "comparison is always %d", wanted_code == NE_EXPR);
{
r_const = fold_convert_loc (loc, lntype, r_const);
r_const = unextend (r_const, rl_bitsize, rl_unsignedp, rl_and_mask);
- r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
+ r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos));
if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
fold_build1_loc (loc, BIT_NOT_EXPR,
- lntype, rl_mask),
- 0)))
+ lntype, rl_mask))))
{
warning (0, "comparison is always %d", wanted_code == NE_EXPR);
lr_mask = const_binop (LSHIFT_EXPR, fold_convert_loc (loc,
rntype, lr_mask),
- size_int (xlr_bitpos), 0);
+ size_int (xlr_bitpos));
rr_mask = const_binop (LSHIFT_EXPR, fold_convert_loc (loc,
rntype, rr_mask),
- size_int (xrr_bitpos), 0);
+ size_int (xrr_bitpos));
/* Make a mask that corresponds to both fields being compared.
Do this for both items being compared. If the operands are the
same size and the bits being compared are in the same position
then we can do this by masking both and comparing the masked
results. */
- ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
- lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask);
+ lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask);
if (lnbitsize == rnbitsize && xll_bitpos == xlr_bitpos)
{
lhs = make_bit_field_ref (loc, ll_inner, lntype, lnbitsize, lnbitpos,
MIN (lr_bitpos, rr_bitpos), lr_unsignedp);
ll_mask = const_binop (RSHIFT_EXPR, ll_mask,
- size_int (MIN (xll_bitpos, xrl_bitpos)), 0);
+ size_int (MIN (xll_bitpos, xrl_bitpos)));
lr_mask = const_binop (RSHIFT_EXPR, lr_mask,
- size_int (MIN (xlr_bitpos, xrr_bitpos)), 0);
+ size_int (MIN (xlr_bitpos, xrr_bitpos)));
/* Convert to the smaller type before masking out unwanted bits. */
type = lntype;
common between the masks, those bits of the constants must be the same.
If not, the condition is always false. Test for this to avoid generating
incorrect code below. */
- result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
+ result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask);
if (! integer_zerop (result)
- && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
- const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
+ && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const),
+ const_binop (BIT_AND_EXPR, result, r_const)) != 1)
{
if (wanted_code == NE_EXPR)
{
result = make_bit_field_ref (loc, ll_inner, lntype, lnbitsize, lnbitpos,
ll_unsignedp || rl_unsignedp);
- ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask);
if (! all_ones_mask_p (ll_mask, lnbitsize))
{
result = build2 (BIT_AND_EXPR, lntype, result, ll_mask);
}
result = build2 (wanted_code, truth_type, result,
- const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
+ const_binop (BIT_IOR_EXPR, l_const, r_const));
fold_truthop_exit:
SET_EXPR_LOCATION (result, loc);
/* For a constant, we can always simplify if we are a multiply
or (for divide and modulus) if it is a multiple of our constant. */
if (code == MULT_EXPR
- || integer_zerop (const_binop (TRUNC_MOD_EXPR, t, c, 0)))
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, t, c)))
return const_binop (code, fold_convert (ctype, t),
- fold_convert (ctype, c), 0);
+ fold_convert (ctype, c));
break;
CASE_CONVERT: case NON_LVALUE_EXPR:
&& 0 != (t1 = fold_convert (ctype,
const_binop (LSHIFT_EXPR,
size_one_node,
- op1, 0)))
+ op1)))
&& !TREE_OVERFLOW (t1))
return extract_muldiv (build2 (tcode == LSHIFT_EXPR
? MULT_EXPR : FLOOR_DIV_EXPR,
/* If it's a multiply or a division/modulus operation of a multiple
of our constant, do the operation and verify it doesn't overflow. */
if (code == MULT_EXPR
- || integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c)))
{
op1 = const_binop (code, fold_convert (ctype, op1),
- fold_convert (ctype, c), 0);
+ fold_convert (ctype, c));
/* We allow the constant to overflow with wrapping semantics. */
if (op1 == 0
|| (TREE_OVERFLOW (op1) && !TYPE_OVERFLOW_WRAPS (ctype)))
|| (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (TREE_TYPE (t))))
&& TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
- && integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
+ && integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c)))
{
*strict_overflow_p = true;
return omit_one_operand (type, integer_zero_node, op0);
&& 0 != (t1 = int_const_binop (MULT_EXPR,
fold_convert (ctype, op1),
fold_convert (ctype, c), 1))
- && 0 != (t1 = force_fit_type_double (ctype, TREE_INT_CST_LOW (t1),
- TREE_INT_CST_HIGH (t1),
+ && 0 != (t1 = force_fit_type_double (ctype, tree_to_double_int (t1),
(TYPE_UNSIGNED (ctype)
&& tcode != MULT_EXPR) ? -1 : 1,
TREE_OVERFLOW (t1)))
&& code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR
&& code != MULT_EXPR)))
{
- if (integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
+ if (integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c)))
{
if (TYPE_OVERFLOW_UNDEFINED (ctype))
*strict_overflow_p = true;
return fold_build2 (tcode, ctype, fold_convert (ctype, op0),
fold_convert (ctype,
const_binop (TRUNC_DIV_EXPR,
- op1, c, 0)));
+ op1, c)));
}
- else if (integer_zerop (const_binop (TRUNC_MOD_EXPR, c, op1, 0)))
+ else if (integer_zerop (const_binop (TRUNC_MOD_EXPR, c, op1)))
{
if (TYPE_OVERFLOW_UNDEFINED (ctype))
*strict_overflow_p = true;
return fold_build2 (code, ctype, fold_convert (ctype, op0),
fold_convert (ctype,
const_binop (TRUNC_DIV_EXPR,
- c, op1, 0)));
+ c, op1)));
}
}
break;
tree prod, tmp, hi, lo;
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
- unsigned HOST_WIDE_INT lpart;
- HOST_WIDE_INT hpart;
+ double_int val;
bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
bool neg_overflow;
int overflow;
TREE_INT_CST_HIGH (arg01),
TREE_INT_CST_LOW (arg1),
TREE_INT_CST_HIGH (arg1),
- &lpart, &hpart, unsigned_p);
- prod = force_fit_type_double (TREE_TYPE (arg00), lpart, hpart,
- -1, overflow);
+ &val.low, &val.high, unsigned_p);
+ prod = force_fit_type_double (TREE_TYPE (arg00), val, -1, overflow);
neg_overflow = false;
if (unsigned_p)
TREE_INT_CST_HIGH (prod),
TREE_INT_CST_LOW (tmp),
TREE_INT_CST_HIGH (tmp),
- &lpart, &hpart, unsigned_p);
- hi = force_fit_type_double (TREE_TYPE (arg00), lpart, hpart,
+ &val.low, &val.high, unsigned_p);
+ hi = force_fit_type_double (TREE_TYPE (arg00), val,
-1, overflow | TREE_OVERFLOW (prod));
}
else if (tree_int_cst_sgn (arg01) >= 0)
return NULL_TREE;
if (TREE_CODE (arg1) == INTEGER_CST)
- arg1 = force_fit_type_double (inner_type, TREE_INT_CST_LOW (arg1),
- TREE_INT_CST_HIGH (arg1), 0,
- TREE_OVERFLOW (arg1));
+ arg1 = force_fit_type_double (inner_type, tree_to_double_int (arg1),
+ 0, TREE_OVERFLOW (arg1));
else
arg1 = fold_convert_loc (loc, inner_type, arg1);
if (TREE_CODE (t) == WITH_SIZE_EXPR)
t = TREE_OPERAND (t, 0);
- /* Note: doesn't apply to ALIGN_INDIRECT_REF */
- if (TREE_CODE (t) == INDIRECT_REF
- || TREE_CODE (t) == MISALIGNED_INDIRECT_REF)
+ if (TREE_CODE (t) == INDIRECT_REF)
{
t = TREE_OPERAND (t, 0);
SET_EXPR_LOCATION (t, loc);
}
}
+ else if (TREE_CODE (t) == MEM_REF
+ && integer_zerop (TREE_OPERAND (t, 1)))
+ return TREE_OPERAND (t, 0);
else if (TREE_CODE (t) == VIEW_CONVERT_EXPR)
{
t = build_fold_addr_expr_loc (loc, TREE_OPERAND (t, 0));
}
if (change)
{
- tem = force_fit_type_double (type, TREE_INT_CST_LOW (and1),
- TREE_INT_CST_HIGH (and1), 0,
- TREE_OVERFLOW (and1));
+ tem = force_fit_type_double (type, tree_to_double_int (and1),
+ 0, TREE_OVERFLOW (and1));
return fold_build2_loc (loc, BIT_AND_EXPR, type,
fold_convert_loc (loc, type, and0), tem);
}
if (TREE_CODE (op0) == VIEW_CONVERT_EXPR)
return fold_build1_loc (loc, VIEW_CONVERT_EXPR,
type, TREE_OPERAND (op0, 0));
+ if (TREE_CODE (op0) == MEM_REF)
+ return fold_build2_loc (loc, MEM_REF, type,
+ TREE_OPERAND (op0, 0), TREE_OPERAND (op0, 1));
/* For integral conversions with the same precision or pointer
conversions use a NOP_EXPR instead. */
case IMAGPART_EXPR:
if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
- return fold_convert_loc (loc, type, integer_zero_node);
+ return build_zero_cst (type);
if (TREE_CODE (arg0) == COMPLEX_EXPR)
return omit_one_operand_loc (loc, type, TREE_OPERAND (arg0, 1),
TREE_OPERAND (arg0, 0));
else if (TREE_CODE (arg0) == POINTER_PLUS_EXPR)
{
base0 = TREE_OPERAND (arg0, 0);
+ STRIP_SIGN_NOPS (base0);
+ if (TREE_CODE (base0) == ADDR_EXPR)
+ {
+ base0 = TREE_OPERAND (base0, 0);
+ indirect_base0 = true;
+ }
offset0 = TREE_OPERAND (arg0, 1);
}
else if (TREE_CODE (arg1) == POINTER_PLUS_EXPR)
{
base1 = TREE_OPERAND (arg1, 0);
+ STRIP_SIGN_NOPS (base1);
+ if (TREE_CODE (base1) == ADDR_EXPR)
+ {
+ base1 = TREE_OPERAND (base1, 0);
+ indirect_base1 = true;
+ }
offset1 = TREE_OPERAND (arg1, 1);
}
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST
&& 0 != (tem = const_binop (TREE_CODE (arg0) == PLUS_EXPR
? MINUS_EXPR : PLUS_EXPR,
- arg1, TREE_OPERAND (arg0, 1), 0))
+ arg1, TREE_OPERAND (arg0, 1)))
&& !TREE_OVERFLOW (tem))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0), tem);
&& TREE_CODE (arg0) == MINUS_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST
&& 0 != (tem = const_binop (MINUS_EXPR, TREE_OPERAND (arg0, 0),
- arg1, 0))
+ arg1))
&& !TREE_OVERFLOW (tem))
return fold_build2_loc (loc, swap_tree_comparison (code), type,
TREE_OPERAND (arg0, 1), tem);
fold_build2_loc (loc, MULT_EXPR, itype, rpart, rpart),
fold_build2_loc (loc, MULT_EXPR, itype, ipart, ipart));
return fold_build2_loc (loc, COMPLEX_EXPR, type, tem,
- fold_convert_loc (loc, itype, integer_zero_node));
+ build_zero_cst (itype));
}
/* Make sure type and arg0 have the same saturating flag. */
gcc_assert (TYPE_SATURATING (type)
== TYPE_SATURATING (TREE_TYPE (arg0)));
- tem = const_binop (code, arg0, arg1, 0);
+ tem = const_binop (code, arg0, arg1);
}
else if (kind == tcc_comparison)
tem = fold_relational_const (code, type, arg0, arg1);
switch (code)
{
+ case MEM_REF:
+ /* MEM[&MEM[p, CST1], CST2] -> MEM[p, CST1 + CST2]. */
+ if (TREE_CODE (arg0) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == MEM_REF)
+ {
+ tree iref = TREE_OPERAND (arg0, 0);
+ return fold_build2 (MEM_REF, type,
+ TREE_OPERAND (iref, 0),
+ int_const_binop (PLUS_EXPR, arg1,
+ TREE_OPERAND (iref, 1), 0));
+ }
+
+ /* MEM[&a.b, CST2] -> MEM[&a, offsetof (a, b) + CST2]. */
+ if (TREE_CODE (arg0) == ADDR_EXPR
+ && handled_component_p (TREE_OPERAND (arg0, 0)))
+ {
+ tree base;
+ HOST_WIDE_INT coffset;
+ base = get_addr_base_and_unit_offset (TREE_OPERAND (arg0, 0),
+ &coffset);
+ if (!base)
+ return NULL_TREE;
+ return fold_build2 (MEM_REF, type,
+ build_fold_addr_expr (base),
+ int_const_binop (PLUS_EXPR, arg1,
+ size_int (coffset), 0));
+ }
+
+ return NULL_TREE;
+
case POINTER_PLUS_EXPR:
/* 0 +p index -> (type)index */
if (integer_zerop (arg0))
&& TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
&& integer_zerop (const_binop (BIT_AND_EXPR,
TREE_OPERAND (arg0, 1),
- TREE_OPERAND (arg1, 1), 0)))
+ TREE_OPERAND (arg1, 1))))
{
code = BIT_IOR_EXPR;
goto bit_ior;
if ((!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type)))
&& operand_equal_p (arg0, arg1, 0))
- return fold_convert_loc (loc, type, integer_zero_node);
+ return build_zero_cst (type);
/* A - B -> A + (-B) if B is easily negatable. */
if (negate_expr_p (arg1)
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST)
{
tree tem = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 0),
- arg1, 0);
+ arg1);
if (tem)
return fold_build2_loc (loc, RDIV_EXPR, type, tem,
TREE_OPERAND (arg0, 1));
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
{
- t1 = fold_convert_loc (loc, type, integer_zero_node);
+ t1 = build_zero_cst (type);
t1 = fold_unary_loc (loc, BIT_NOT_EXPR, type, t1);
return omit_one_operand_loc (loc, type, t1, arg1);
}
if (TREE_CODE (arg1) == BIT_NOT_EXPR
&& operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
{
- t1 = fold_convert_loc (loc, type, integer_zero_node);
+ t1 = build_zero_cst (type);
t1 = fold_unary_loc (loc, BIT_NOT_EXPR, type, t1);
return omit_one_operand_loc (loc, type, t1, arg0);
}
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
{
- t1 = fold_convert_loc (loc, type, integer_zero_node);
+ t1 = build_zero_cst (type);
t1 = fold_unary_loc (loc, BIT_NOT_EXPR, type, t1);
return omit_one_operand_loc (loc, type, t1, arg1);
}
if (TREE_CODE (arg1) == BIT_NOT_EXPR
&& operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
{
- t1 = fold_convert_loc (loc, type, integer_zero_node);
+ t1 = build_zero_cst (type);
t1 = fold_unary_loc (loc, BIT_NOT_EXPR, type, t1);
return omit_one_operand_loc (loc, type, t1, arg0);
}
&& TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
&& integer_zerop (const_binop (BIT_AND_EXPR,
TREE_OPERAND (arg0, 1),
- TREE_OPERAND (arg1, 1), 0)))
+ TREE_OPERAND (arg1, 1))))
{
code = BIT_IOR_EXPR;
goto bit_ior;
fold_convert_loc (loc, type, arg0));
}
+ /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
+ ((A & N) + B) & M -> (A + B) & M
+ Similarly if (N & M) == 0,
+ ((A | N) + B) & M -> (A + B) & M
+ and for - instead of + (or unary - instead of +)
+ and/or ^ instead of |.
+ If B is constant and (B & M) == 0, fold into A & M. */
+ if (host_integerp (arg1, 1))
+ {
+ unsigned HOST_WIDE_INT cst1 = tree_low_cst (arg1, 1);
+ if (~cst1 && (cst1 & (cst1 + 1)) == 0
+ && INTEGRAL_TYPE_P (TREE_TYPE (arg0))
+ && (TREE_CODE (arg0) == PLUS_EXPR
+ || TREE_CODE (arg0) == MINUS_EXPR
+ || TREE_CODE (arg0) == NEGATE_EXPR)
+ && (TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
+ || TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE))
+ {
+ tree pmop[2];
+ int which = 0;
+ unsigned HOST_WIDE_INT cst0;
+
+ /* Now we know that arg0 is (C + D) or (C - D) or
+ -C and arg1 (M) is == (1LL << cst) - 1.
+ Store C into PMOP[0] and D into PMOP[1]. */
+ pmop[0] = TREE_OPERAND (arg0, 0);
+ pmop[1] = NULL;
+ if (TREE_CODE (arg0) != NEGATE_EXPR)
+ {
+ pmop[1] = TREE_OPERAND (arg0, 1);
+ which = 1;
+ }
+
+ if (!host_integerp (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1)
+ || (tree_low_cst (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1)
+ & cst1) != cst1)
+ which = -1;
+
+ for (; which >= 0; which--)
+ switch (TREE_CODE (pmop[which]))
+ {
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ if (TREE_CODE (TREE_OPERAND (pmop[which], 1))
+ != INTEGER_CST)
+ break;
+ /* tree_low_cst not used, because we don't care about
+ the upper bits. */
+ cst0 = TREE_INT_CST_LOW (TREE_OPERAND (pmop[which], 1));
+ cst0 &= cst1;
+ if (TREE_CODE (pmop[which]) == BIT_AND_EXPR)
+ {
+ if (cst0 != cst1)
+ break;
+ }
+ else if (cst0 != 0)
+ break;
+ /* If C or D is of the form (A & N) where
+ (N & M) == M, or of the form (A | N) or
+ (A ^ N) where (N & M) == 0, replace it with A. */
+ pmop[which] = TREE_OPERAND (pmop[which], 0);
+ break;
+ case INTEGER_CST:
+ /* If C or D is a N where (N & M) == 0, it can be
+ omitted (assumed 0). */
+ if ((TREE_CODE (arg0) == PLUS_EXPR
+ || (TREE_CODE (arg0) == MINUS_EXPR && which == 0))
+ && (TREE_INT_CST_LOW (pmop[which]) & cst1) == 0)
+ pmop[which] = NULL;
+ break;
+ default:
+ break;
+ }
+
+ /* Only build anything new if we optimized one or both arguments
+ above. */
+ if (pmop[0] != TREE_OPERAND (arg0, 0)
+ || (TREE_CODE (arg0) != NEGATE_EXPR
+ && pmop[1] != TREE_OPERAND (arg0, 1)))
+ {
+ tree utype = TREE_TYPE (arg0);
+ if (! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0)))
+ {
+ /* Perform the operations in a type that has defined
+ overflow behavior. */
+ utype = unsigned_type_for (TREE_TYPE (arg0));
+ if (pmop[0] != NULL)
+ pmop[0] = fold_convert_loc (loc, utype, pmop[0]);
+ if (pmop[1] != NULL)
+ pmop[1] = fold_convert_loc (loc, utype, pmop[1]);
+ }
+
+ if (TREE_CODE (arg0) == NEGATE_EXPR)
+ tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[0]);
+ else if (TREE_CODE (arg0) == PLUS_EXPR)
+ {
+ if (pmop[0] != NULL && pmop[1] != NULL)
+ tem = fold_build2_loc (loc, PLUS_EXPR, utype,
+ pmop[0], pmop[1]);
+ else if (pmop[0] != NULL)
+ tem = pmop[0];
+ else if (pmop[1] != NULL)
+ tem = pmop[1];
+ else
+ return build_int_cst (type, 0);
+ }
+ else if (pmop[0] == NULL)
+ tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[1]);
+ else
+ tem = fold_build2_loc (loc, MINUS_EXPR, utype,
+ pmop[0], pmop[1]);
+ /* TEM is now the new binary +, - or unary - replacement. */
+ tem = fold_build2_loc (loc, BIT_AND_EXPR, utype, tem,
+ fold_convert_loc (loc, utype, arg1));
+ return fold_convert_loc (loc, type, tem);
+ }
+ }
+ }
+
t1 = distribute_bit_expr (loc, code, type, arg0, arg1);
if (t1 != NULL_TREE)
return t1;
{
if (flag_reciprocal_math
&& 0 != (tem = const_binop (code, build_real (type, dconst1),
- arg1, 0)))
+ arg1)))
return fold_build2_loc (loc, MULT_EXPR, type, arg0, tem);
/* Find the reciprocal if optimizing and the result is exact. */
if (optimize)
&& TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST)
{
tree tem = const_binop (RDIV_EXPR, arg0,
- TREE_OPERAND (arg1, 1), 0);
+ TREE_OPERAND (arg1, 1));
if (tem)
return fold_build2_loc (loc, RDIV_EXPR, type, tem,
TREE_OPERAND (arg1, 0));
return NULL_TREE;
case TRUNC_DIV_EXPR:
+ /* Optimize (X & (-A)) / A where A is a power of 2,
+ to X >> log2(A) */
+ if (TREE_CODE (arg0) == BIT_AND_EXPR
+ && !TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST
+ && integer_pow2p (arg1) && tree_int_cst_sgn (arg1) > 0)
+ {
+ tree sum = fold_binary_loc (loc, PLUS_EXPR, TREE_TYPE (arg1),
+ arg1, TREE_OPERAND (arg0, 1));
+ if (sum && integer_zerop (sum)) {
+ unsigned long pow2;
+
+ if (TREE_INT_CST_LOW (arg1))
+ pow2 = exact_log2 (TREE_INT_CST_LOW (arg1));
+ else
+ pow2 = exact_log2 (TREE_INT_CST_HIGH (arg1))
+ + HOST_BITS_PER_WIDE_INT;
+
+ return fold_build2_loc (loc, RSHIFT_EXPR, type,
+ TREE_OPERAND (arg0, 0),
+ build_int_cst (NULL_TREE, pow2));
+ }
+ }
+
+ /* Fall thru */
+
case FLOOR_DIV_EXPR:
/* Simplify A / (B << N) where A and B are positive and B is
a power of 2, to A >> (N + log2(B)). */
if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0)
{
tree sh_cnt = TREE_OPERAND (arg1, 1);
- unsigned long pow2 = exact_log2 (TREE_INT_CST_LOW (sval));
+ unsigned long pow2;
+
+ if (TREE_INT_CST_LOW (sval))
+ pow2 = exact_log2 (TREE_INT_CST_LOW (sval));
+ else
+ pow2 = exact_log2 (TREE_INT_CST_HIGH (sval))
+ + HOST_BITS_PER_WIDE_INT;
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not "
{
tree tem = build_int_cst (TREE_TYPE (arg1),
TYPE_PRECISION (type));
- tem = const_binop (MINUS_EXPR, tem, arg1, 0);
+ tem = const_binop (MINUS_EXPR, tem, arg1);
return fold_build2_loc (loc, RROTATE_EXPR, type, op0, tem);
}
? MINUS_EXPR : PLUS_EXPR,
fold_convert_loc (loc, TREE_TYPE (arg0),
arg1),
- TREE_OPERAND (arg0, 1), 0))
+ TREE_OPERAND (arg0, 1)))
&& !TREE_OVERFLOW (tem))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0), tem);
{
case GT_EXPR:
arg1 = const_binop (PLUS_EXPR, arg1,
- build_int_cst (TREE_TYPE (arg1), 1), 0);
+ build_int_cst (TREE_TYPE (arg1), 1));
return fold_build2_loc (loc, EQ_EXPR, type,
fold_convert_loc (loc,
TREE_TYPE (arg1), arg0),
arg1);
case LE_EXPR:
arg1 = const_binop (PLUS_EXPR, arg1,
- build_int_cst (TREE_TYPE (arg1), 1), 0);
+ build_int_cst (TREE_TYPE (arg1), 1));
return fold_build2_loc (loc, NE_EXPR, type,
fold_convert_loc (loc, TREE_TYPE (arg1),
arg0),
switch (code)
{
case GE_EXPR:
- arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
+ arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
return fold_build2_loc (loc, NE_EXPR, type,
fold_convert_loc (loc,
TREE_TYPE (arg1), arg0),
arg1);
case LT_EXPR:
- arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
+ arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
return fold_build2_loc (loc, EQ_EXPR, type,
fold_convert_loc (loc, TREE_TYPE (arg1),
arg0),
tree
fold_ternary_loc (location_t loc, enum tree_code code, tree type,
- tree op0, tree op1, tree op2)
+ tree op0, tree op1, tree op2)
{
tree tem;
- tree arg0 = NULL_TREE, arg1 = NULL_TREE;
+ tree arg0 = NULL_TREE, arg1 = NULL_TREE, arg2 = NULL_TREE;
enum tree_code_class kind = TREE_CODE_CLASS (code);
gcc_assert (IS_EXPR_CODE_CLASS (kind)
STRIP_NOPS (arg1);
}
+ if (op2)
+ {
+ arg2 = op2;
+ STRIP_NOPS (arg2);
+ }
+
switch (code)
{
case COMPONENT_REF:
if (elements)
return TREE_VALUE (elements);
else
- return fold_convert_loc (loc, type, integer_zero_node);
+ return build_zero_cst (type);
}
}
return NULL_TREE;
+ case FMA_EXPR:
+ /* For integers we can decompose the FMA if possible. */
+ if (TREE_CODE (arg0) == INTEGER_CST
+ && TREE_CODE (arg1) == INTEGER_CST)
+ return fold_build2_loc (loc, PLUS_EXPR, type,
+ const_binop (MULT_EXPR, arg0, arg1), arg2);
+ if (integer_zerop (arg2))
+ return fold_build2_loc (loc, MULT_EXPR, type, arg0, arg1);
+
+ return fold_fma (loc, type, arg0, arg1, arg2);
+
default:
return NULL_TREE;
} /* switch (code) */
static void
fold_checksum_tree (const_tree expr, struct md5_ctx *ctx, htab_t ht)
{
- const void **slot;
+ void **slot;
enum tree_code code;
union tree_node buf;
int i, len;
&& sizeof (struct tree_type) <= sizeof (struct tree_function_decl));
if (expr == NULL)
return;
- slot = (const void **) htab_find_slot (ht, expr, INSERT);
+ slot = (void **) htab_find_slot (ht, expr, INSERT);
if (*slot != NULL)
return;
- *slot = expr;
+ *slot = CONST_CAST_TREE (expr);
code = TREE_CODE (expr);
if (TREE_CODE_CLASS (code) == tcc_declaration
&& DECL_ASSEMBLER_NAME_SET_P (expr))
&& 0 != (t1 = fold_convert (type,
const_binop (LSHIFT_EXPR,
size_one_node,
- op1, 0)))
+ op1)))
&& !TREE_OVERFLOW (t1))
return multiple_of_p (type, t1, bottom);
}
{
case INTEGER_CST:
{
- unsigned HOST_WIDE_INT low;
- HOST_WIDE_INT high;
- int overflow = neg_double (TREE_INT_CST_LOW (arg0),
- TREE_INT_CST_HIGH (arg0),
- &low, &high);
- t = force_fit_type_double (type, low, high, 1,
+ double_int val = tree_to_double_int (arg0);
+ int overflow = neg_double (val.low, val.high, &val.low, &val.high);
+
+ t = force_fit_type_double (type, val, 1,
(overflow | TREE_OVERFLOW (arg0))
&& !TYPE_UNSIGNED (type));
break;
switch (TREE_CODE (arg0))
{
case INTEGER_CST:
- /* If the value is unsigned, then the absolute value is
- the same as the ordinary value. */
- if (TYPE_UNSIGNED (type))
- t = arg0;
- /* Similarly, if the value is non-negative. */
- else if (INT_CST_LT (integer_minus_one_node, arg0))
- t = arg0;
- /* If the value is negative, then the absolute value is
- its negation. */
- else
- {
- unsigned HOST_WIDE_INT low;
- HOST_WIDE_INT high;
- int overflow = neg_double (TREE_INT_CST_LOW (arg0),
- TREE_INT_CST_HIGH (arg0),
- &low, &high);
- t = force_fit_type_double (type, low, high, -1,
- overflow | TREE_OVERFLOW (arg0));
- }
+ {
+ double_int val = tree_to_double_int (arg0);
+
+ /* If the value is unsigned or non-negative, then the absolute value
+ is the same as the ordinary value. */
+ if (TYPE_UNSIGNED (type)
+ || !double_int_negative_p (val))
+ t = arg0;
+
+ /* If the value is negative, then the absolute value is
+ its negation. */
+ else
+ {
+ int overflow;
+
+ overflow = neg_double (val.low, val.high, &val.low, &val.high);
+ t = force_fit_type_double (type, val, -1,
+ overflow | TREE_OVERFLOW (arg0));
+ }
+ }
break;
case REAL_CST:
constant. TYPE is the type of the result. */
static tree
-fold_not_const (tree arg0, tree type)
+fold_not_const (const_tree arg0, tree type)
{
- tree t = NULL_TREE;
+ double_int val;
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- t = force_fit_type_double (type, ~TREE_INT_CST_LOW (arg0),
- ~TREE_INT_CST_HIGH (arg0), 0,
- TREE_OVERFLOW (arg0));
-
- return t;
+ val = double_int_not (tree_to_double_int (arg0));
+ return force_fit_type_double (type, val, 0, TREE_OVERFLOW (arg0));
}
/* Given CODE, a relational operator, the target type, TYPE and two
}
}
- /* ((foo*)&vectorfoo)[1] => BIT_FIELD_REF<vectorfoo,...> */
if (TREE_CODE (sub) == POINTER_PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (sub, 1)) == INTEGER_CST)
{
tree op00 = TREE_OPERAND (sub, 0);
tree op01 = TREE_OPERAND (sub, 1);
- tree op00type;
STRIP_NOPS (op00);
- op00type = TREE_TYPE (op00);
- if (TREE_CODE (op00) == ADDR_EXPR
- && TREE_CODE (TREE_TYPE (op00type)) == VECTOR_TYPE
- && type == TREE_TYPE (TREE_TYPE (op00type)))
+ if (TREE_CODE (op00) == ADDR_EXPR)
{
- HOST_WIDE_INT offset = tree_low_cst (op01, 0);
- tree part_width = TYPE_SIZE (type);
- unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT;
- unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
- tree index = bitsize_int (indexi);
+ tree op00type;
+ op00 = TREE_OPERAND (op00, 0);
+ op00type = TREE_TYPE (op00);
- if (offset/part_widthi <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (op00type)))
- return fold_build3_loc (loc,
- BIT_FIELD_REF, type, TREE_OPERAND (op00, 0),
- part_width, index);
-
- }
- }
+ /* ((foo*)&vectorfoo)[1] => BIT_FIELD_REF<vectorfoo,...> */
+ if (TREE_CODE (op00type) == VECTOR_TYPE
+ && type == TREE_TYPE (op00type))
+ {
+ HOST_WIDE_INT offset = tree_low_cst (op01, 0);
+ tree part_width = TYPE_SIZE (type);
+ unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT;
+ unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
+ tree index = bitsize_int (indexi);
+ if (offset/part_widthi <= TYPE_VECTOR_SUBPARTS (op00type))
+ return fold_build3_loc (loc,
+ BIT_FIELD_REF, type, op00,
+ part_width, index);
- /* ((foo*)&complexfoo)[1] => __imag__ complexfoo */
- if (TREE_CODE (sub) == POINTER_PLUS_EXPR
- && TREE_CODE (TREE_OPERAND (sub, 1)) == INTEGER_CST)
- {
- tree op00 = TREE_OPERAND (sub, 0);
- tree op01 = TREE_OPERAND (sub, 1);
- tree op00type;
-
- STRIP_NOPS (op00);
- op00type = TREE_TYPE (op00);
- if (TREE_CODE (op00) == ADDR_EXPR
- && TREE_CODE (TREE_TYPE (op00type)) == COMPLEX_TYPE
- && type == TREE_TYPE (TREE_TYPE (op00type)))
- {
- tree size = TYPE_SIZE_UNIT (type);
- if (tree_int_cst_equal (size, op01))
- return fold_build1_loc (loc, IMAGPART_EXPR, type,
- TREE_OPERAND (op00, 0));
+ }
+ /* ((foo*)&complexfoo)[1] => __imag__ complexfoo */
+ else if (TREE_CODE (op00type) == COMPLEX_TYPE
+ && type == TREE_TYPE (op00type))
+ {
+ tree size = TYPE_SIZE_UNIT (type);
+ if (tree_int_cst_equal (size, op01))
+ return fold_build1_loc (loc, IMAGPART_EXPR, type, op00);
+ }
+ /* ((foo *)&fooarray)[1] => fooarray[1] */
+ else if (TREE_CODE (op00type) == ARRAY_TYPE
+ && type == TREE_TYPE (op00type))
+ {
+ tree type_domain = TYPE_DOMAIN (op00type);
+ tree min_val = size_zero_node;
+ if (type_domain && TYPE_MIN_VALUE (type_domain))
+ min_val = TYPE_MIN_VALUE (type_domain);
+ op01 = size_binop_loc (loc, EXACT_DIV_EXPR, op01,
+ TYPE_SIZE_UNIT (type));
+ op01 = size_binop_loc (loc, PLUS_EXPR, op01, min_val);
+ op0 = build4 (ARRAY_REF, type, op00, op01,
+ NULL_TREE, NULL_TREE);
+ SET_EXPR_LOCATION (op0, loc);
+ return op0;
+ }
}
}
{
if (TREE_CODE (value) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (value);
- unsigned HOST_WIDE_INT high;
+ double_int val = tree_to_double_int (value);
bool overflow_p;
- if ((low & (divisor - 1)) == 0)
+ if ((val.low & (divisor - 1)) == 0)
return value;
overflow_p = TREE_OVERFLOW (value);
- high = TREE_INT_CST_HIGH (value);
- low &= ~(divisor - 1);
- low += divisor;
- if (low == 0)
+ val.low &= ~(divisor - 1);
+ val.low += divisor;
+ if (val.low == 0)
{
- high++;
- if (high == 0)
+ val.high++;
+ if (val.high == 0)
overflow_p = true;
}
- return force_fit_type_double (TREE_TYPE (value), low, high,
+ return force_fit_type_double (TREE_TYPE (value), val,
-1, overflow_p);
}
else