/* Minimum cost of an expensive expression. */
#define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
-/* The outermost loop for that execution of the header guarantees that the
+/* The outermost loop for which execution of the header guarantees that the
block will be executed. */
#define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
+#define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
static struct lim_aux_data *
init_lim_data (gimple stmt)
by the true edge of the predicate block and the other edge
dominated by the false edge. This ensures that the PHI argument
we are going to take is completely determined by the path we
- take from the predicate block. */
+ take from the predicate block.
+ We can only use BB dominance checks below if the destination of
+ the true/false edges are dominated by their edge, thus only
+ have a single predecessor. */
extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
tem = EDGE_PRED (bb, 0);
if (tem == true_edge
- || tem->src == true_edge->dest
- || dominated_by_p (CDI_DOMINATORS,
- tem->src, true_edge->dest))
+ || (single_pred_p (true_edge->dest)
+ && (tem->src == true_edge->dest
+ || dominated_by_p (CDI_DOMINATORS,
+ tem->src, true_edge->dest))))
arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
else if (tem == false_edge
- || tem->src == false_edge->dest
- || dominated_by_p (CDI_DOMINATORS,
- tem->src, false_edge->dest))
+ || (single_pred_p (false_edge->dest)
+ && (tem->src == false_edge->dest
+ || dominated_by_p (CDI_DOMINATORS,
+ tem->src, false_edge->dest))))
arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
else
return false;
tem = EDGE_PRED (bb, 1);
if (tem == true_edge
- || tem->src == true_edge->dest
- || dominated_by_p (CDI_DOMINATORS,
- tem->src, true_edge->dest))
+ || (single_pred_p (true_edge->dest)
+ && (tem->src == true_edge->dest
+ || dominated_by_p (CDI_DOMINATORS,
+ tem->src, true_edge->dest))))
arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
else if (tem == false_edge
- || tem->src == false_edge->dest
- || dominated_by_p (CDI_DOMINATORS,
- tem->src, false_edge->dest))
+ || (single_pred_p (false_edge->dest)
+ && (tem->src == false_edge->dest
+ || dominated_by_p (CDI_DOMINATORS,
+ tem->src, false_edge->dest))))
arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
else
return false;
add_referenced_var (var);
DECL_GIMPLE_REG_P (var) = 1;
- /* For vectors, create a VECTOR_CST full of 1's. */
- if (TREE_CODE (type) == VECTOR_TYPE)
- {
- int i, len;
- tree list = NULL_TREE;
- real_one = build_real (TREE_TYPE (type), dconst1);
- len = TYPE_VECTOR_SUBPARTS (type);
- for (i = 0; i < len; i++)
- list = tree_cons (NULL, real_one, list);
- real_one = build_vector (type, list);
- }
- else
- real_one = build_real (type, dconst1);
+ real_one = build_one_cst (type);
stmt1 = gimple_build_assign_with_ops (RDIV_EXPR,
var, real_one, gimple_assign_rhs2 (stmt));
gcc_assert (arg0 && arg1);
t = build2 (gimple_cond_code (cond), boolean_type_node,
gimple_cond_lhs (cond), gimple_cond_rhs (cond));
- t = build3 (COND_EXPR, TREE_TYPE (gimple_phi_result (stmt)),
- t, arg0, arg1);
- new_stmt = gimple_build_assign_with_ops (COND_EXPR,
- gimple_phi_result (stmt),
- t, NULL_TREE);
+ new_stmt = gimple_build_assign_with_ops3 (COND_EXPR,
+ gimple_phi_result (stmt),
+ t, arg0, arg1);
SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
*((unsigned int *)(dw_data->global_data)) |= TODO_cleanup_cfg;
}
create_vop_ref_mapping ();
}
-/* Returns true if a region of size SIZE1 at position 0 and a region of
- size SIZE2 at position DIFF cannot overlap. */
-
-static bool
-cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2)
-{
- double_int d, bound;
-
- /* Unless the difference is a constant, we fail. */
- if (diff->n != 0)
- return false;
-
- d = diff->offset;
- if (double_int_negative_p (d))
- {
- /* The second object is before the first one, we succeed if the last
- element of the second object is before the start of the first one. */
- bound = double_int_add (d, double_int_add (size2, double_int_minus_one));
- return double_int_negative_p (bound);
- }
- else
- {
- /* We succeed if the second object starts after the first one ends. */
- return double_int_scmp (size1, d) <= 0;
- }
-}
-
/* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
tree_to_aff_combination_expand. */
aff_combination_scale (&off1, double_int_minus_one);
aff_combination_add (&off2, &off1);
- if (cannot_overlap_p (&off2, size1, size2))
+ if (aff_comb_cannot_overlap_p (&off2, size1, size2))
return false;
return true;
switch (TREE_CODE (ref))
{
case MEM_REF:
+ case TARGET_MEM_REF:
gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
lsm_tmp_name_add ("_");
break;
|| !for_each_index (&ref->mem, may_move_till, loop))
return false;
+ /* If it can throw fail, we do not properly update EH info. */
+ if (tree_could_throw_p (ref->mem))
+ return false;
+
/* If it can trap, it must be always executed in LOOP.
Readonly memory locations may trap when storing to them, but
tree_could_trap_p is a predicate for rvalues, so check that
edge ex;
FOR_EACH_VEC_ELT (edge, exits, i, ex)
- if (ex->flags & EDGE_ABNORMAL)
+ if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
return false;
return true;
edge e;
struct loop *inn_loop = loop;
- if (!loop->header->aux)
+ if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
{
bbs = get_loop_body_in_dom_order (loop);
while (1)
{
- last->aux = loop;
+ SET_ALWAYS_EXECUTED_IN (last, loop);
if (last == loop->header)
break;
last = get_immediate_dominator (CDI_DOMINATORS, last);
htab_t h;
FOR_EACH_BB (bb)
- {
- bb->aux = NULL;
- }
+ SET_ALWAYS_EXECUTED_IN (bb, NULL);
pointer_map_destroy (lim_aux_data_map);