#include "tree-dump.h"
#include "timevar.h"
#include "diagnostic.h"
+#include "toplev.h"
#include "cfgloop.h"
#include "tree-scalar-evolution.h"
#include "tree-ssa-propagate.h"
/* Local functions. */
static int compare_values (tree val1, tree val2);
+static void vrp_meet (value_range_t *, value_range_t *);
/* Location information for ASSERT_EXPRs. Each instance of this
structure describes an ASSERT_EXPR for an SSA name. Since a single
gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
+ /* The static chain decl is always non null. */
+ if (arg == cfun->static_chain_decl)
+ return true;
+
fntype = TREE_TYPE (current_function_decl);
attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
}
+/* Set value range VR to a range of a truthvalue of type TYPE. */
+
+static inline void
+set_value_range_to_truthvalue (value_range_t *vr, tree type)
+{
+ if (TYPE_PRECISION (type) == 1)
+ set_value_range_to_varying (vr);
+ else
+ set_value_range (vr, VR_RANGE,
+ build_int_cst (type, 0), build_int_cst (type, 1),
+ vr->equiv);
+}
+
+
/* Set value range VR to VR_UNDEFINED. */
static inline void
return vr;
/* Create a default value range. */
- vr_value[ver] = vr = XNEW (value_range_t);
- memset (vr, 0, sizeof (*vr));
+ vr_value[ver] = vr = XCNEW (value_range_t);
/* Allocate an equivalence set. */
vr->equiv = BITMAP_ALLOC (NULL);
/* If VAR is a default definition, the variable can take any value
in VAR's type. */
sym = SSA_NAME_VAR (var);
- if (var == default_def (sym))
+ if (SSA_NAME_IS_DEFAULT_DEF (var))
{
/* Try to use the "nonnull" attribute to create ~[0, 0]
anti-ranges for pointers. Note that this is only valid with
return is_gimple_min_invariant (expr);
}
+/* Return
+ 1 if VAL < VAL2
+ 0 if !(VAL < VAL2)
+ -2 if those are incomparable. */
+static inline int
+operand_less_p (tree val, tree val2)
+{
+ tree tcmp;
+ /* LT is folded faster than GE and others. Inline the common case. */
+ if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
+ {
+ if (TYPE_UNSIGNED (TREE_TYPE (val)))
+ return INT_CST_LT_UNSIGNED (val, val2);
+ else
+ return INT_CST_LT (val, val2);
+ }
+ else
+ tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
+ if (!tcmp)
+ return -2;
+ return !integer_zerop (tcmp);
+}
+
/* Compare two values VAL1 and VAL2. Return
-2 if VAL1 and VAL2 cannot be compared at compile-time,
return 0;
/* If VAL1 is a lower address than VAL2, return -1. */
- t = fold_binary (LT_EXPR, boolean_type_node, val1, val2);
- if (t == boolean_true_node)
+ if (operand_less_p (val1, val2) == 1)
return -1;
/* If VAL1 is a higher address than VAL2, return +1. */
- t = fold_binary (GT_EXPR, boolean_type_node, val1, val2);
- if (t == boolean_true_node)
+ if (operand_less_p (val2, val1) == 1)
return 1;
- /* If VAL1 is different than VAL2, return +2. */
- t = fold_binary (NE_EXPR, boolean_type_node, val1, val2);
- if (t == boolean_true_node)
- return 2;
+ /* If VAL1 is different than VAL2, return +2.
+ For integer constants we either have already returned -1 or 1
+ or they are equivalent. We still might succeed in proving
+ something about non-trivial operands. */
+ if (TREE_CODE (val1) != INTEGER_CST
+ || TREE_CODE (val2) != INTEGER_CST)
+ {
+ t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
+ if (t && tree_expr_nonzero_p (t))
+ return 2;
+ }
return -2;
}
This also applies to value_ranges_intersect_p and
range_includes_zero_p. The semantics of VR_RANGE and
VR_ANTI_RANGE should be encoded here, but that also means
- adapting the users of these functions to the new semantics. */
+ adapting the users of these functions to the new semantics.
+
+ Benchmark compile/20001226-1.c compilation time after changing this
+ function. */
static inline int
-value_inside_range (tree val, value_range_t *vr)
+value_inside_range (tree val, value_range_t * vr)
{
- tree cmp1, cmp2;
+ int cmp1, cmp2;
- cmp1 = fold_binary_to_constant (GE_EXPR, boolean_type_node, val, vr->min);
- if (!cmp1)
+ cmp1 = operand_less_p (val, vr->min);
+ if (cmp1 == -2)
return -2;
+ if (cmp1 == 1)
+ return 0;
- cmp2 = fold_binary_to_constant (LE_EXPR, boolean_type_node, val, vr->max);
- if (!cmp2)
+ cmp2 = operand_less_p (vr->max, val);
+ if (cmp2 == -2)
return -2;
- return cmp1 == boolean_true_node && cmp2 == boolean_true_node;
+ return !cmp2;
}
/* Return true if value ranges VR0 and VR1 have a non-empty
- intersection. */
+ intersection.
+
+ Benchmark compile/20001226-1.c compilation time after changing this
+ function.
+ */
static inline bool
value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
{
- return (value_inside_range (vr1->min, vr0) == 1
- || value_inside_range (vr1->max, vr0) == 1
- || value_inside_range (vr0->min, vr1) == 1
- || value_inside_range (vr0->max, vr1) == 1);
+ /* The value ranges do not intersect if the maximum of the first range is
+ less than the minimum of the second range or vice versa.
+ When those relations are unknown, we can't do any better. */
+ if (operand_less_p (vr0->max, vr1->min) != 0)
+ return false;
+ if (operand_less_p (vr1->max, vr0->min) != 0)
+ return false;
+ return true;
}
else
{
tree min, max, anti_min, anti_max, real_min, real_max;
+ int cmp;
/* We want to compute the logical AND of the two ranges;
there are three cases to consider.
/* Case 3a, the anti-range extends into the low
part of the real range. Thus creating a new
low for the real range. */
- else if ((compare_values (anti_max, real_min) == 1
- || compare_values (anti_max, real_min) == 0)
+ else if (((cmp = compare_values (anti_max, real_min)) == 1
+ || cmp == 0)
&& compare_values (anti_max, real_max) == -1)
{
min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
part of the real range. Thus creating a new
higher for the real range. */
else if (compare_values (anti_min, real_min) == 1
- && (compare_values (anti_min, real_max) == -1
- || compare_values (anti_min, real_max) == 0))
+ && ((cmp = compare_values (anti_min, real_max)) == -1
+ || cmp == 0))
{
max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
anti_min,
else if (code == MULT_EXPR && !integer_zerop (val1))
{
tree tmp = int_const_binop (TRUNC_DIV_EXPR,
- TYPE_MAX_VALUE (TREE_TYPE (val1)),
+ res,
val1, 0);
int check = compare_values (tmp, val2);
}
else
{
- set_value_range_to_varying (vr);
+ /* The result of a TRUTH_*_EXPR is always true or false. */
+ set_value_range_to_truthvalue (vr, TREE_TYPE (expr));
return;
}
}
/* Refuse to operate on certain unary expressions for which we
cannot easily determine a resulting range. */
if (code == FIX_TRUNC_EXPR
- || code == FIX_CEIL_EXPR
- || code == FIX_FLOOR_EXPR
- || code == FIX_ROUND_EXPR
|| code == FLOAT_EXPR
|| code == BIT_NOT_EXPR
|| code == NON_LVALUE_EXPR
&& is_gimple_val (new_max)
&& tree_int_cst_equal (new_min, orig_min)
&& tree_int_cst_equal (new_max, orig_max)
- && compare_values (new_min, new_max) <= 0
- && compare_values (new_min, new_max) >= -1)
+ && (cmp = compare_values (new_min, new_max)) <= 0
+ && cmp >= -1)
{
set_value_range (vr, VR_RANGE, new_min, new_max, vr->equiv);
return;
}
+/* Extract range information from a conditional expression EXPR based on
+ the ranges of each of its operands and the expression code. */
+
+static void
+extract_range_from_cond_expr (value_range_t *vr, tree expr)
+{
+ tree op0, op1;
+ value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
+ value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
+
+ /* Get value ranges for each operand. For constant operands, create
+ a new value range with the operand to simplify processing. */
+ op0 = COND_EXPR_THEN (expr);
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range (&vr0, VR_RANGE, op0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ op1 = COND_EXPR_ELSE (expr);
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *(get_value_range (op1));
+ else if (is_gimple_min_invariant (op1))
+ set_value_range (&vr1, VR_RANGE, op1, op1, NULL);
+ else
+ set_value_range_to_varying (&vr1);
+
+ /* The resulting value range is the union of the operand ranges */
+ vrp_meet (&vr0, &vr1);
+ copy_value_range (vr, &vr0);
+}
+
+
/* Extract range information from a comparison expression EXPR based
on the range of its operand and the expression code. */
set_value_range (vr, VR_RANGE, val, val, vr->equiv);
}
else
- set_value_range_to_varying (vr);
+ /* The result of a comparison is always true or false. */
+ set_value_range_to_truthvalue (vr, TREE_TYPE (expr));
}
extract_range_from_binary_expr (vr, expr);
else if (TREE_CODE_CLASS (code) == tcc_unary)
extract_range_from_unary_expr (vr, expr);
+ else if (code == COND_EXPR)
+ extract_range_from_cond_expr (vr, expr);
else if (TREE_CODE_CLASS (code) == tcc_comparison)
extract_range_from_comparison (vr, expr);
else if (is_gimple_min_invariant (expr))
or decreases, ... */
dir == EV_DIR_UNKNOWN
/* ... or if it may wrap. */
- || scev_probably_wraps_p (init, step, stmt,
- current_loops->parray[CHREC_VARIABLE (chrec)],
+ || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
true))
return;
if (COMPARISON_CLASS_P (cond))
{
tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
- assertion = build2 (MODIFY_EXPR, TREE_TYPE (v), n, a);
+ assertion = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (v), n, a);
}
else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
{
/* Given !V, build the assignment N = false. */
tree op0 = TREE_OPERAND (cond, 0);
gcc_assert (op0 == v);
- assertion = build2 (MODIFY_EXPR, TREE_TYPE (v), n, boolean_false_node);
+ assertion = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (v), n,
+ boolean_false_node);
}
else if (TREE_CODE (cond) == SSA_NAME)
{
/* Given V, build the assignment N = true. */
gcc_assert (v == cond);
- assertion = build2 (MODIFY_EXPR, TREE_TYPE (v), n, boolean_true_node);
+ assertion = build2 (GIMPLE_MODIFY_STMT,
+ TREE_TYPE (v), n, boolean_true_node);
}
else
gcc_unreachable ();
bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
}
+/* COND is a predicate which uses NAME. Extract a suitable test code
+ and value and store them into *CODE_P and *VAL_P so the predicate
+ is normalized to NAME *CODE_P *VAL_P.
-/* Try to register an edge assertion for SSA name NAME on edge E for
- the conditional jump pointed to by SI. Return true if an assertion
- for NAME could be registered. */
+ If no extraction was possible, return FALSE, otherwise return TRUE.
+
+ If INVERT is true, then we invert the result stored into *CODE_P. */
static bool
-register_edge_assert_for (tree name, edge e, block_stmt_iterator si)
+extract_code_and_val_from_cond (tree name, tree cond, bool invert,
+ enum tree_code *code_p, tree *val_p)
{
- tree val, stmt;
enum tree_code comp_code;
+ tree val;
- stmt = bsi_stmt (si);
+ /* Predicates may be a single SSA name or NAME OP VAL. */
+ if (cond == name)
+ {
+ /* If the predicate is a name, it must be NAME, in which
+ case we create the predicate NAME == true or
+ NAME == false accordingly. */
+ comp_code = EQ_EXPR;
+ val = invert ? boolean_false_node : boolean_true_node;
+ }
+ else
+ {
+ /* Otherwise, we have a comparison of the form NAME COMP VAL
+ or VAL COMP NAME. */
+ if (name == TREE_OPERAND (cond, 1))
+ {
+ /* If the predicate is of the form VAL COMP NAME, flip
+ COMP around because we need to register NAME as the
+ first operand in the predicate. */
+ comp_code = swap_tree_comparison (TREE_CODE (cond));
+ val = TREE_OPERAND (cond, 0);
+ }
+ else
+ {
+ /* The comparison is of the form NAME COMP VAL, so the
+ comparison code remains unchanged. */
+ comp_code = TREE_CODE (cond);
+ val = TREE_OPERAND (cond, 1);
+ }
- /* Do not attempt to infer anything in names that flow through
- abnormal edges. */
- if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
- return false;
+ /* Invert the comparison code as necessary. */
+ if (invert)
+ comp_code = invert_tree_comparison (comp_code, 0);
+
+ /* VRP does not handle float types. */
+ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
+ return false;
+
+ /* Do not register always-false predicates.
+ FIXME: this works around a limitation in fold() when dealing with
+ enumerations. Given 'enum { N1, N2 } x;', fold will not
+ fold 'if (x > N2)' to 'if (0)'. */
+ if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
+ && INTEGRAL_TYPE_P (TREE_TYPE (val)))
+ {
+ tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
+ tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
+
+ if (comp_code == GT_EXPR
+ && (!max
+ || compare_values (val, max) == 0))
+ return false;
+
+ if (comp_code == LT_EXPR
+ && (!min
+ || compare_values (val, min) == 0))
+ return false;
+ }
+ }
+ *code_p = comp_code;
+ *val_p = val;
+ return true;
+}
+
+/* OP is an operand of a truth value expression which is known to have
+ a particular value. Register any asserts for OP and for any
+ operands in OP's defining statement.
+
+ If CODE is EQ_EXPR, then we want to register OP is zero (false),
+ if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
+
+static bool
+register_edge_assert_for_1 (tree op, enum tree_code code,
+ edge e, block_stmt_iterator bsi)
+{
+ bool retval = false;
+ tree op_def, rhs, val;
- /* If NAME was not found in the sub-graph reachable from E, then
- there's nothing to do. */
- if (!TEST_BIT (found_in_subgraph, SSA_NAME_VERSION (name)))
+ /* We only care about SSA_NAMEs. */
+ if (TREE_CODE (op) != SSA_NAME)
return false;
- /* We found a use of NAME in the sub-graph rooted at E->DEST.
- Register an assertion for NAME according to the value that NAME
- takes on edge E. */
- if (TREE_CODE (stmt) == COND_EXPR)
+ /* We know that OP will have a zero or nonzero value. If OP is used
+ more than once go ahead and register an assert for OP.
+
+ The FOUND_IN_SUBGRAPH support is not helpful in this situation as
+ it will always be set for OP (because OP is used in a COND_EXPR in
+ the subgraph). */
+ if (!has_single_use (op))
+ {
+ val = build_int_cst (TREE_TYPE (op), 0);
+ register_new_assert_for (op, code, val, NULL, e, bsi);
+ retval = true;
+ }
+
+ /* Now look at how OP is set. If it's set from a comparison,
+ a truth operation or some bit operations, then we may be able
+ to register information about the operands of that assignment. */
+ op_def = SSA_NAME_DEF_STMT (op);
+ if (TREE_CODE (op_def) != GIMPLE_MODIFY_STMT)
+ return retval;
+
+ rhs = GIMPLE_STMT_OPERAND (op_def, 1);
+
+ if (COMPARISON_CLASS_P (rhs))
{
- /* If BB ends in a COND_EXPR then NAME then we should insert
- the original predicate on EDGE_TRUE_VALUE and the
- opposite predicate on EDGE_FALSE_VALUE. */
- tree cond = COND_EXPR_COND (stmt);
- bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
+ bool invert = (code == EQ_EXPR ? true : false);
+ tree op0 = TREE_OPERAND (rhs, 0);
+ tree op1 = TREE_OPERAND (rhs, 1);
- /* Predicates may be a single SSA name or NAME OP VAL. */
- if (cond == name)
+ /* Conditionally register an assert for each SSA_NAME in the
+ comparison. */
+ if (TREE_CODE (op0) == SSA_NAME
+ && !has_single_use (op0)
+ && extract_code_and_val_from_cond (op0, rhs,
+ invert, &code, &val))
{
- /* If the predicate is a name, it must be NAME, in which
- case we create the predicate NAME == true or
- NAME == false accordingly. */
- comp_code = EQ_EXPR;
- val = (is_else_edge) ? boolean_false_node : boolean_true_node;
+ register_new_assert_for (op0, code, val, NULL, e, bsi);
+ retval = true;
}
- else
+
+ /* Similarly for the second operand of the comparison. */
+ if (TREE_CODE (op1) == SSA_NAME
+ && !has_single_use (op1)
+ && extract_code_and_val_from_cond (op1, rhs,
+ invert, &code, &val))
{
- /* Otherwise, we have a comparison of the form NAME COMP VAL
- or VAL COMP NAME. */
- if (name == TREE_OPERAND (cond, 1))
- {
- /* If the predicate is of the form VAL COMP NAME, flip
- COMP around because we need to register NAME as the
- first operand in the predicate. */
- comp_code = swap_tree_comparison (TREE_CODE (cond));
- val = TREE_OPERAND (cond, 0);
- }
- else
- {
- /* The comparison is of the form NAME COMP VAL, so the
- comparison code remains unchanged. */
- comp_code = TREE_CODE (cond);
- val = TREE_OPERAND (cond, 1);
- }
+ register_new_assert_for (op1, code, val, NULL, e, bsi);
+ retval = true;
+ }
+ }
+ else if ((code == NE_EXPR
+ && (TREE_CODE (rhs) == TRUTH_AND_EXPR
+ || TREE_CODE (rhs) == BIT_AND_EXPR))
+ || (code == EQ_EXPR
+ && (TREE_CODE (rhs) == TRUTH_OR_EXPR
+ || TREE_CODE (rhs) == BIT_IOR_EXPR)))
+ {
+ /* Recurse on each operand. */
+ retval |= register_edge_assert_for_1 (TREE_OPERAND (rhs, 0),
+ code, e, bsi);
+ retval |= register_edge_assert_for_1 (TREE_OPERAND (rhs, 1),
+ code, e, bsi);
+ }
+ else if (TREE_CODE (rhs) == TRUTH_NOT_EXPR)
+ {
+ /* Recurse, flipping CODE. */
+ code = invert_tree_comparison (code, false);
+ retval |= register_edge_assert_for_1 (TREE_OPERAND (rhs, 0),
+ code, e, bsi);
+ }
+ else if (TREE_CODE (rhs) == SSA_NAME)
+ {
+ /* Recurse through the copy. */
+ retval |= register_edge_assert_for_1 (rhs, code, e, bsi);
+ }
+ else if (TREE_CODE (rhs) == NOP_EXPR
+ || TREE_CODE (rhs) == CONVERT_EXPR
+ || TREE_CODE (rhs) == VIEW_CONVERT_EXPR
+ || TREE_CODE (rhs) == NON_LVALUE_EXPR)
+ {
+ /* Recurse through the type conversion. */
+ retval |= register_edge_assert_for_1 (TREE_OPERAND (rhs, 0),
+ code, e, bsi);
+ }
- /* If we are inserting the assertion on the ELSE edge, we
- need to invert the sign comparison. */
- if (is_else_edge)
- comp_code = invert_tree_comparison (comp_code, 0);
-
- /* Do not register always-false predicates. FIXME, this
- works around a limitation in fold() when dealing with
- enumerations. Given 'enum { N1, N2 } x;', fold will not
- fold 'if (x > N2)' to 'if (0)'. */
- if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
- && (INTEGRAL_TYPE_P (TREE_TYPE (val))
- || SCALAR_FLOAT_TYPE_P (TREE_TYPE (val))))
- {
- tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
- tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
+ return retval;
+}
- if (comp_code == GT_EXPR && compare_values (val, max) == 0)
- return false;
+/* Try to register an edge assertion for SSA name NAME on edge E for
+ the condition COND contributing to the conditional jump pointed to by SI.
+ Return true if an assertion for NAME could be registered. */
- if (comp_code == LT_EXPR && compare_values (val, min) == 0)
- return false;
- }
+static bool
+register_edge_assert_for (tree name, edge e, block_stmt_iterator si, tree cond)
+{
+ tree val;
+ enum tree_code comp_code;
+ bool retval = false;
+ bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
+
+ /* Do not attempt to infer anything in names that flow through
+ abnormal edges. */
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
+ return false;
+
+ if (!extract_code_and_val_from_cond (name, cond, is_else_edge,
+ &comp_code, &val))
+ return false;
+
+ /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
+ reachable from E. */
+ if (TEST_BIT (found_in_subgraph, SSA_NAME_VERSION (name)))
+ {
+ register_new_assert_for (name, comp_code, val, NULL, e, si);
+ retval = true;
+ }
+
+ /* If COND is effectively an equality test of an SSA_NAME against
+ the value zero or one, then we may be able to assert values
+ for SSA_NAMEs which flow into COND. */
+
+ /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
+ statement of NAME we can assert both operands of the TRUTH_AND_EXPR
+ have nonzero value. */
+ if (((comp_code == EQ_EXPR && integer_onep (val))
+ || (comp_code == NE_EXPR && integer_zerop (val))))
+ {
+ tree def_stmt = SSA_NAME_DEF_STMT (name);
+
+ if (TREE_CODE (def_stmt) == GIMPLE_MODIFY_STMT
+ && (TREE_CODE (GIMPLE_STMT_OPERAND (def_stmt, 1)) == TRUTH_AND_EXPR
+ || TREE_CODE (GIMPLE_STMT_OPERAND (def_stmt, 1)) == BIT_AND_EXPR))
+ {
+ tree op0 = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 0);
+ tree op1 = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 1);
+ retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
+ retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
}
}
- else
+
+ /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
+ statement of NAME we can assert both operands of the TRUTH_OR_EXPR
+ have zero value. */
+ if (((comp_code == EQ_EXPR && integer_zerop (val))
+ || (comp_code == NE_EXPR && integer_onep (val))))
{
- /* FIXME. Handle SWITCH_EXPR. */
- gcc_unreachable ();
+ tree def_stmt = SSA_NAME_DEF_STMT (name);
+
+ if (TREE_CODE (def_stmt) == GIMPLE_MODIFY_STMT
+ && (TREE_CODE (GIMPLE_STMT_OPERAND (def_stmt, 1)) == TRUTH_OR_EXPR
+ || TREE_CODE (GIMPLE_STMT_OPERAND (def_stmt, 1)) == BIT_IOR_EXPR))
+ {
+ tree op0 = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 0);
+ tree op1 = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 1);
+ retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
+ retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
+ }
}
- register_new_assert_for (name, comp_code, val, NULL, e, si);
- return true;
+ return retval;
}
static bool find_assert_locations (basic_block bb);
/* Determine whether the outgoing edges of BB should receive an
- ASSERT_EXPR for each of the operands of BB's last statement. The
- last statement of BB must be a COND_EXPR or a SWITCH_EXPR.
+ ASSERT_EXPR for each of the operands of BB's LAST statement.
+ The last statement of BB must be a COND_EXPR or a SWITCH_EXPR.
If any of the sub-graphs rooted at BB have an interesting use of
the predicate operands, an assert location node is added to the
list of assertions for the corresponding operands. */
static bool
-find_conditional_asserts (basic_block bb)
+find_conditional_asserts (basic_block bb, tree last)
{
bool need_assert;
- block_stmt_iterator last_si;
- tree op, last;
+ block_stmt_iterator bsi;
+ tree op;
edge_iterator ei;
edge e;
ssa_op_iter iter;
need_assert = false;
- last_si = bsi_last (bb);
- last = bsi_stmt (last_si);
+ bsi = bsi_for_stmt (last);
/* Look for uses of the operands in each of the sub-graphs
rooted at BB. We need to check each of the outgoing edges
/* Register the necessary assertions for each operand in the
conditional predicate. */
FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
- need_assert |= register_edge_assert_for (op, e, last_si);
+ need_assert |= register_edge_assert_for (op, e, bsi,
+ COND_EXPR_COND (last));
}
/* Finally, indicate that we have found the operands in the
tree t = op;
tree def_stmt = SSA_NAME_DEF_STMT (t);
- while (TREE_CODE (def_stmt) == MODIFY_EXPR
- && TREE_CODE (TREE_OPERAND (def_stmt, 1)) == NOP_EXPR
- && TREE_CODE (TREE_OPERAND (TREE_OPERAND (def_stmt, 1), 0)) == SSA_NAME
- && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (def_stmt, 1), 0))))
+ while (TREE_CODE (def_stmt) == GIMPLE_MODIFY_STMT
+ && TREE_CODE
+ (GIMPLE_STMT_OPERAND (def_stmt, 1)) == NOP_EXPR
+ && TREE_CODE
+ (TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1),
+ 0)) == SSA_NAME
+ && POINTER_TYPE_P
+ (TREE_TYPE (TREE_OPERAND
+ (GIMPLE_STMT_OPERAND (def_stmt,
+ 1), 0))))
{
- t = TREE_OPERAND (TREE_OPERAND (def_stmt, 1), 0);
+ t = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 0);
def_stmt = SSA_NAME_DEF_STMT (t);
/* Note we want to register the assert for the
&& TREE_CODE (last) == COND_EXPR
&& !fp_predicate (COND_EXPR_COND (last))
&& !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
- need_assert |= find_conditional_asserts (bb);
+ need_assert |= find_conditional_asserts (bb, last);
/* Recurse into the dominator children of BB. */
for (son = first_dom_son (CDI_DOMINATORS, bb);
sbitmap_zero (blocks_visited);
need_assert_for = BITMAP_ALLOC (NULL);
- asserts_for = XNEWVEC (assert_locus_t, num_ssa_names);
- memset (asserts_for, 0, num_ssa_names * sizeof (assert_locus_t));
+ asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
calculate_dominance_info (CDI_DOMINATORS);
BITMAP_FREE (need_assert_for);
}
+/* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
+ and "struct" hacks. If VRP can determine that the
+ array subscript is a contant, check if it is outside valid
+ range. If the array subscript is a RANGE, warn if it is
+ non-overlapping with valid range.
+ IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
+
+static void
+check_array_ref (tree ref, location_t* locus, bool ignore_off_by_one)
+{
+ value_range_t* vr = NULL;
+ tree low_sub, up_sub;
+ tree low_bound, up_bound = array_ref_up_bound (ref);
+
+ low_sub = up_sub = TREE_OPERAND (ref, 1);
+
+ if (!up_bound || !locus || TREE_NO_WARNING (ref)
+ || TREE_CODE (up_bound) != INTEGER_CST
+ /* Can not check flexible arrays. */
+ || (TYPE_SIZE (TREE_TYPE (ref)) == NULL_TREE
+ && TYPE_DOMAIN (TREE_TYPE (ref)) != NULL_TREE
+ && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (ref))) == NULL_TREE)
+ /* Accesses after the end of arrays of size 0 (gcc
+ extension) and 1 are likely intentional ("struct
+ hack"). */
+ || compare_tree_int (up_bound, 1) <= 0)
+ return;
+
+ low_bound = array_ref_low_bound (ref);
+
+ if (TREE_CODE (low_sub) == SSA_NAME)
+ {
+ vr = get_value_range (low_sub);
+ if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
+ {
+ low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
+ up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
+ }
+ }
+
+ if (vr && vr->type == VR_ANTI_RANGE)
+ {
+ if (TREE_CODE (up_sub) == INTEGER_CST
+ && tree_int_cst_lt (up_bound, up_sub)
+ && TREE_CODE (low_sub) == INTEGER_CST
+ && tree_int_cst_lt (low_sub, low_bound))
+ {
+ warning (OPT_Warray_bounds,
+ "%Harray subscript is outside array bounds", locus);
+ TREE_NO_WARNING (ref) = 1;
+ }
+ }
+ else if (TREE_CODE (up_sub) == INTEGER_CST
+ && tree_int_cst_lt (up_bound, up_sub)
+ && !tree_int_cst_equal (up_bound, up_sub)
+ && (!ignore_off_by_one
+ || !tree_int_cst_equal (int_const_binop (PLUS_EXPR,
+ up_bound,
+ integer_one_node,
+ 0),
+ up_sub)))
+ {
+ warning (OPT_Warray_bounds, "%Harray subscript is above array bounds",
+ locus);
+ TREE_NO_WARNING (ref) = 1;
+ }
+ else if (TREE_CODE (low_sub) == INTEGER_CST
+ && tree_int_cst_lt (low_sub, low_bound))
+ {
+ warning (OPT_Warray_bounds, "%Harray subscript is below array bounds",
+ locus);
+ TREE_NO_WARNING (ref) = 1;
+ }
+}
+
+/* walk_tree() callback that checks if *TP is
+ an ARRAY_REF inside an ADDR_EXPR (in which an array
+ subscript one outside the valid range is allowed). Call
+ check_array_ref for each ARRAY_REF found. The location is
+ passed in DATA. */
+
+static tree
+check_array_bounds (tree *tp, int *walk_subtree, void *data)
+{
+ tree t = *tp;
+ tree stmt = (tree)data;
+ location_t *location = EXPR_LOCUS (stmt);
+
+ *walk_subtree = TRUE;
+
+ if (TREE_CODE (t) == ARRAY_REF)
+ check_array_ref (t, location, false /*ignore_off_by_one*/);
+ else if (TREE_CODE (t) == ADDR_EXPR)
+ {
+ use_operand_p op;
+ tree use_stmt;
+ t = TREE_OPERAND (t, 0);
+
+ /* Don't warn on statements like
+
+ ssa_name = 500 + &array[-200]
+
+ or
+
+ ssa_name = &array[-200]
+ other_name = ssa_name + 300;
+
+ which are sometimes
+ produced by other optimizing passes. */
+
+ if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
+ && BINARY_CLASS_P (GIMPLE_STMT_OPERAND (stmt, 1)))
+ *walk_subtree = FALSE;
+
+ if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
+ && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == SSA_NAME
+ && single_imm_use (GIMPLE_STMT_OPERAND (stmt, 0), &op, &use_stmt)
+ && TREE_CODE (use_stmt) == GIMPLE_MODIFY_STMT
+ && BINARY_CLASS_P (GIMPLE_STMT_OPERAND (use_stmt, 1)))
+ *walk_subtree = FALSE;
+
+ while (*walk_subtree && handled_component_p (t))
+ {
+ if (TREE_CODE (t) == ARRAY_REF)
+ check_array_ref (t, location, true /*ignore_off_by_one*/);
+ t = TREE_OPERAND (t, 0);
+ }
+ *walk_subtree = FALSE;
+ }
+
+ return NULL_TREE;
+}
+
+/* Walk over all statements of all reachable BBs and call check_array_bounds
+ on them. */
+
+static void
+check_all_array_refs (void)
+{
+ basic_block bb;
+ block_stmt_iterator si;
+
+ FOR_EACH_BB (bb)
+ {
+ /* Skip bb's that are clearly unreachable. */
+ if (single_pred_p (bb))
+ {
+ basic_block pred_bb = EDGE_PRED (bb, 0)->src;
+ tree ls = NULL_TREE;
+
+ if (!bsi_end_p (bsi_last (pred_bb)))
+ ls = bsi_stmt (bsi_last (pred_bb));
+
+ if (ls && TREE_CODE (ls) == COND_EXPR
+ && ((COND_EXPR_COND (ls) == boolean_false_node
+ && (EDGE_PRED (bb, 0)->flags & EDGE_TRUE_VALUE))
+ || (COND_EXPR_COND (ls) == boolean_true_node
+ && (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE))))
+ continue;
+ }
+ for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
+ walk_tree (bsi_stmt_ptr (si), check_array_bounds,
+ bsi_stmt (si), NULL);
+ }
+}
/* Convert range assertion expressions into the implied copies and
copy propagate away the copies. Doing the trivial copy propagation
tree stmt = bsi_stmt (si);
tree use_stmt;
- if (TREE_CODE (stmt) == MODIFY_EXPR
- && TREE_CODE (TREE_OPERAND (stmt, 1)) == ASSERT_EXPR)
+ if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
+ && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)) == ASSERT_EXPR)
{
- tree rhs = TREE_OPERAND (stmt, 1), var;
+ tree rhs = GIMPLE_STMT_OPERAND (stmt, 1), var;
tree cond = fold (ASSERT_EXPR_COND (rhs));
use_operand_p use_p;
imm_use_iterator iter;
/* Propagate the RHS into every use of the LHS. */
var = ASSERT_EXPR_VAR (rhs);
- FOR_EACH_IMM_USE_STMT (use_stmt, iter, TREE_OPERAND (stmt, 0))
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter,
+ GIMPLE_STMT_OPERAND (stmt, 0))
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
{
SET_USE (use_p, var);
/* And finally, remove the copy, it is not needed. */
bsi_remove (&si, true);
+ release_defs (stmt);
}
else
bsi_next (&si);
&& (INTEGRAL_TYPE_P (TREE_TYPE (PHI_RESULT (stmt)))
|| POINTER_TYPE_P (TREE_TYPE (PHI_RESULT (stmt)))))
return true;
- else if (TREE_CODE (stmt) == MODIFY_EXPR)
+ else if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
{
- tree lhs = TREE_OPERAND (stmt, 0);
- tree rhs = TREE_OPERAND (stmt, 1);
+ tree lhs = GIMPLE_STMT_OPERAND (stmt, 0);
+ tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
/* In general, assignments with virtual operands are not useful
for deriving ranges, with the obvious exception of calls to
{
basic_block bb;
- vr_value = XNEWVEC (value_range_t *, num_ssa_names);
- memset (vr_value, 0, num_ssa_names * sizeof (value_range_t *));
+ vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
FOR_EACH_BB (bb)
{
tree lhs, rhs, def;
ssa_op_iter iter;
- lhs = TREE_OPERAND (stmt, 0);
- rhs = TREE_OPERAND (stmt, 1);
+ lhs = GIMPLE_STMT_OPERAND (stmt, 0);
+ rhs = GIMPLE_STMT_OPERAND (stmt, 1);
/* We only keep track of ranges in integral and pointer types. */
if (TREE_CODE (lhs) == SSA_NAME
}
ann = stmt_ann (stmt);
- if (TREE_CODE (stmt) == MODIFY_EXPR)
+ if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
{
- tree rhs = TREE_OPERAND (stmt, 1);
+ tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
/* In general, assignments with virtual operands are not useful
for deriving ranges, with the obvious exception of calls to
/* Meet operation for value ranges. Given two value ranges VR0 and
- VR1, store in VR0 the result of meeting VR0 and VR1.
-
- The meeting rules are as follows:
-
- 1- If VR0 and VR1 have an empty intersection, set VR0 to VR_VARYING.
-
- 2- If VR0 and VR1 have a non-empty intersection, set VR0 to the
- union of VR0 and VR1. */
+ VR1, store in VR0 a range that contains both VR0 and VR1. This
+ may not be the smallest possible such range. */
static void
vrp_meet (value_range_t *vr0, value_range_t *vr1)
if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
{
- /* If VR0 and VR1 have a non-empty intersection, compute the
- union of both ranges. */
- if (value_ranges_intersect_p (vr0, vr1))
- {
- int cmp;
- tree min, max;
-
- /* The lower limit of the new range is the minimum of the
- two ranges. If they cannot be compared, the result is
- VARYING. */
- cmp = compare_values (vr0->min, vr1->min);
- if (cmp == 0 || cmp == 1)
- min = vr1->min;
- else if (cmp == -1)
- min = vr0->min;
- else
- {
- set_value_range_to_varying (vr0);
- return;
- }
+ int cmp;
+ tree min, max;
+
+ /* Compute the convex hull of the ranges. The lower limit of
+ the new range is the minimum of the two ranges. If they
+ cannot be compared, then give up. */
+ cmp = compare_values (vr0->min, vr1->min);
+ if (cmp == 0 || cmp == 1)
+ min = vr1->min;
+ else if (cmp == -1)
+ min = vr0->min;
+ else
+ goto give_up;
+
+ /* Similarly, the upper limit of the new range is the maximum
+ of the two ranges. If they cannot be compared, then
+ give up. */
+ cmp = compare_values (vr0->max, vr1->max);
+ if (cmp == 0 || cmp == -1)
+ max = vr1->max;
+ else if (cmp == 1)
+ max = vr0->max;
+ else
+ goto give_up;
- /* Similarly, the upper limit of the new range is the
- maximum of the two ranges. If they cannot be compared,
- the result is VARYING. */
- cmp = compare_values (vr0->max, vr1->max);
- if (cmp == 0 || cmp == -1)
- max = vr1->max;
- else if (cmp == 1)
- max = vr0->max;
- else
- {
- set_value_range_to_varying (vr0);
- return;
- }
+ /* The resulting set of equivalences is the intersection of
+ the two sets. */
+ if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
+ bitmap_and_into (vr0->equiv, vr1->equiv);
+ else if (vr0->equiv && !vr1->equiv)
+ bitmap_clear (vr0->equiv);
- /* The resulting set of equivalences is the intersection of
- the two sets. */
- if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
- bitmap_and_into (vr0->equiv, vr1->equiv);
- else if (vr0->equiv && !vr1->equiv)
- bitmap_clear (vr0->equiv);
-
- set_value_range (vr0, vr0->type, min, max, vr0->equiv);
- }
- else
- goto no_meet;
+ set_value_range (vr0, vr0->type, min, max, vr0->equiv);
}
else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
{
- /* Two anti-ranges meet only if they are both identical. */
+ /* Two anti-ranges meet only if their complements intersect.
+ Only handle the case of identical ranges. */
if (compare_values (vr0->min, vr1->min) == 0
&& compare_values (vr0->max, vr1->max) == 0
&& compare_values (vr0->min, vr0->max) == 0)
bitmap_clear (vr0->equiv);
}
else
- goto no_meet;
+ goto give_up;
}
else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
{
- /* A numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4]
- meet only if the ranges have an empty intersection. The
- result of the meet operation is the anti-range. */
+ /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
+ only handle the case where the ranges have an empty intersection.
+ The result of the meet operation is the anti-range. */
if (!symbolic_range_p (vr0)
&& !symbolic_range_p (vr1)
&& !value_ranges_intersect_p (vr0, vr1))
bitmap_clear (vr0->equiv);
}
else
- goto no_meet;
+ goto give_up;
}
else
gcc_unreachable ();
return;
-no_meet:
- /* The two range VR0 and VR1 do not meet. Before giving up and
- setting the result to VARYING, see if we can at least derive a
- useful anti-range. FIXME, all this nonsense about distinguishing
+give_up:
+ /* Failed to find an efficient meet. Before giving up and setting
+ the result to VARYING, see if we can at least derive a useful
+ anti-range. FIXME, all this nonsense about distinguishing
anti-ranges from ranges is necessary because of the odd
semantics of range_includes_zero_p and friends. */
if (!symbolic_range_p (vr0)
tree lhs = PHI_RESULT (phi);
value_range_t *lhs_vr = get_value_range (lhs);
value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
+ bool all_const = true;
copy_value_range (&vr_result, lhs_vr);
value_range_t vr_arg;
if (TREE_CODE (arg) == SSA_NAME)
- vr_arg = *(get_value_range (arg));
+ {
+ vr_arg = *(get_value_range (arg));
+ all_const = false;
+ }
else
{
vr_arg.type = VR_RANGE;
/* To prevent infinite iterations in the algorithm, derive ranges
when the new value is slightly bigger or smaller than the
previous one. */
- if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE)
+ if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE
+ && !all_const)
{
if (!POINTER_TYPE_P (TREE_TYPE (lhs)))
{
t = build2 (BIT_AND_EXPR, TREE_TYPE (op0), op0, t);
}
- TREE_OPERAND (stmt, 1) = t;
+ GIMPLE_STMT_OPERAND (stmt, 1) = t;
update_stmt (stmt);
}
}
else
t = op;
- TREE_OPERAND (stmt, 1) = t;
+ GIMPLE_STMT_OPERAND (stmt, 1) = t;
update_stmt (stmt);
}
}
void
simplify_stmt_using_ranges (tree stmt)
{
- if (TREE_CODE (stmt) == MODIFY_EXPR)
+ if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
{
- tree rhs = TREE_OPERAND (stmt, 1);
+ tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
enum tree_code rhs_code = TREE_CODE (rhs);
/* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
cfg_altered = thread_through_all_blocks ();
/* If we threaded jumps, then we need to recompute the dominance
- information, to safely do that we must clean up the CFG first. */
+ information. */
if (cfg_altered)
- {
- free_dominance_info (CDI_DOMINATORS);
- cleanup_tree_cfg ();
- calculate_dominance_info (CDI_DOMINATORS);
- }
+ free_dominance_info (CDI_DOMINATORS);
VEC_free (tree, heap, stack);
}
/* We may have ended with ranges that have exactly one value. Those
values can be substituted as any other copy/const propagated
value using substitute_and_fold. */
- single_val_range = XNEWVEC (prop_value_t, num_ssa_names);
- memset (single_val_range, 0, num_ssa_names * sizeof (*single_val_range));
+ single_val_range = XCNEWVEC (prop_value_t, num_ssa_names);
do_value_subst_p = false;
for (i = 0; i < num_ssa_names; i++)
substitute_and_fold (single_val_range, true);
+ if (warn_array_bounds)
+ check_all_array_refs();
+
/* We must identify jump threading opportunities before we release
the datastructures built by VRP. */
identify_jump_threads ();
{
insert_range_assertions ();
- current_loops = loop_optimizer_init (LOOPS_NORMAL);
+ loop_optimizer_init (LOOPS_NORMAL);
if (current_loops)
- scev_initialize (current_loops);
+ scev_initialize ();
vrp_initialize ();
ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
if (current_loops)
{
scev_finalize ();
- loop_optimizer_finalize (current_loops);
- current_loops = NULL;
+ loop_optimizer_finalize ();
}
/* ASSERT_EXPRs must be removed before finalizing jump threads
TV_TREE_VRP, /* tv_id */
PROP_ssa | PROP_alias, /* properties_required */
0, /* properties_provided */
- PROP_smt_usage, /* properties_destroyed */
+ 0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_cleanup_cfg
| TODO_ggc_collect