/* Induction variable optimizations.
- Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
- Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
This file is part of GCC.
/* The currently optimized loop. */
struct loop *current_loop;
- /* Are we optimizing for speed? */
- bool speed;
+ /* Numbers of iterations for all exits of the current loop. */
+ struct pointer_map_t *niters;
/* Number of registers used in it. */
unsigned regs_used;
- /* Numbers of iterations for all exits of the current loop. */
- struct pointer_map_t *niters;
-
/* The size of version_info array allocated. */
unsigned version_info_size;
/* The bitmap of indices in version_info whose value was changed. */
bitmap relevant;
- /* The maximum invariant id. */
- unsigned max_inv_id;
-
/* The uses of induction variables. */
VEC(iv_use_p,heap) *iv_uses;
/* A bitmap of important candidates. */
bitmap important_candidates;
+ /* The maximum invariant id. */
+ unsigned max_inv_id;
+
/* Whether to consider just related and important candidates when replacing a
use. */
bool consider_all_candidates;
+
+ /* Are we optimizing for speed? */
+ bool speed;
};
/* An assignment of iv candidates to uses. */
idx_contains_abnormal_ssa_name_p (tree base, tree *index,
void *data ATTRIBUTE_UNUSED)
{
- if (TREE_CODE (base) == ARRAY_REF)
+ if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
{
if (abnormal_ssa_name_p (TREE_OPERAND (base, 2)))
return false;
if (!is_gimple_reg (name))
return NULL_TREE;
- if (!simple_iv (loop, phi, name, &iv, true))
+ if (!simple_iv (loop, loop, name, &iv, true))
return NULL_TREE;
return integer_zerop (iv.step) ? NULL_TREE : iv.step;
if (TREE_CODE (lhs) != SSA_NAME)
return false;
- if (!simple_iv (loop, stmt, lhs, iv, true))
+ if (!simple_iv (loop, loop_containing_stmt (stmt), lhs, iv, true))
return false;
iv->base = expand_simple_operations (iv->base);
reference out of the loop (in order to take its address in strength
reduction). In order for this to work we need both lower bound
and step to be loop invariants. */
- if (TREE_CODE (base) == ARRAY_REF)
+ if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
{
+ /* Moreover, for a range, the size needs to be invariant as well. */
+ if (TREE_CODE (base) == ARRAY_RANGE_REF
+ && !expr_invariant_in_loop_p (loop, TYPE_SIZE (TREE_TYPE (base))))
+ return false;
+
step = array_ref_element_size (base);
lbound = array_ref_low_bound (base);
if (integer_zerop (iv->step))
return true;
- if (TREE_CODE (base) == ARRAY_REF)
+ if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
{
step = array_ref_element_size (base);
{
struct ivopts_data *data = (struct ivopts_data *) vdata;
find_interesting_uses_op (data, *idx);
- if (TREE_CODE (base) == ARRAY_REF)
+ if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
{
find_interesting_uses_op (data, array_ref_element_size (base));
find_interesting_uses_op (data, array_ref_low_bound (base));
non-addressability may be uncovered again, causing ADDR_EXPRs
of inappropriate objects to be built. */
if (is_gimple_reg (TREE_OPERAND (expr, 0))
- || is_gimple_min_invariant (TREE_OPERAND (expr, 0)))
+ || !is_gimple_addressable (TREE_OPERAND (expr, 0)))
return true;
/* ... fall through ... */
return fold_convert (orig_type, expr);
+ case MULT_EXPR:
+ op1 = TREE_OPERAND (expr, 1);
+ if (!cst_and_fits_in_hwi (op1))
+ return orig_expr;
+
+ op0 = TREE_OPERAND (expr, 0);
+ op0 = strip_offset_1 (op0, false, false, &off0);
+ if (op0 == TREE_OPERAND (expr, 0))
+ return orig_expr;
+
+ *offset = off0 * int_cst_value (op1);
+ if (integer_zerop (op0))
+ expr = op0;
+ else
+ expr = fold_build2 (MULT_EXPR, type, op0, op1);
+
+ return fold_convert (orig_type, expr);
+
case ARRAY_REF:
+ case ARRAY_RANGE_REF:
if (!inside_addr)
return orig_expr;
{
orig_type = TREE_TYPE (base);
type = generic_type_for (orig_type);
- /* Don't convert the base to the generic type for pointers as the generic
- type is an integer type with the same size as the pointer type. */
- if (type != orig_type && !POINTER_TYPE_P (orig_type))
+ if (type != orig_type)
{
base = fold_convert (type, base);
step = fold_convert (type, step);
add_candidate (data, iv->base, iv->step, true, NULL);
/* The same, but with initial value zero. */
- add_candidate (data,
- build_int_cst (TREE_TYPE (iv->base), 0),
- iv->step, true, NULL);
+ if (POINTER_TYPE_P (TREE_TYPE (iv->base)))
+ add_candidate (data, size_int (0), iv->step, true, NULL);
+ else
+ add_candidate (data, build_int_cst (TREE_TYPE (iv->base), 0),
+ iv->step, true, NULL);
phi = SSA_NAME_DEF_STMT (iv->ssa_name);
if (gimple_code (phi) == GIMPLE_PHI)
break;
+ case NEGATE_EXPR:
+ op0 = TREE_OPERAND (expr, 0);
+ STRIP_NOPS (op0);
+ op1 = NULL_TREE;
+
+ if (is_gimple_val (op0))
+ cost0 = zero_cost;
+ else
+ cost0 = force_expr_to_var_cost (op0, speed);
+
+ cost1 = zero_cost;
+ break;
+
default:
/* Just an arbitrary value, FIXME. */
return new_cost (target_spill_cost[speed], 0);
case POINTER_PLUS_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
+ case NEGATE_EXPR:
cost = new_cost (add_cost (mode, speed), 0);
break;
unsigned HOST_WIDE_INT *offset, bitmap *depends_on)
{
HOST_WIDE_INT diff = 0;
- comp_cost cost;
- bool speed = optimize_loop_for_speed_p (data->current_loop);
+ aff_tree aff_e1, aff_e2;
+ tree type;
gcc_assert (TREE_CODE (e1) == ADDR_EXPR);
*symbol_present = false;
*var_present = true;
-
- cost = force_var_cost (data, e1, depends_on);
- cost = add_costs (cost, force_var_cost (data, e2, depends_on));
- cost.cost += add_cost (Pmode, speed);
- return cost;
+ type = signed_type_for (TREE_TYPE (e1));
+ tree_to_aff_combination (e1, type, &aff_e1);
+ tree_to_aff_combination (e2, type, &aff_e2);
+ aff_combination_scale (&aff_e2, double_int_minus_one);
+ aff_combination_add (&aff_e1, &aff_e2);
+
+ return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on);
}
/* Estimates cost of expressing difference E1 - E2 as
tree e1, tree e2, bool *symbol_present, bool *var_present,
unsigned HOST_WIDE_INT *offset, bitmap *depends_on)
{
- comp_cost cost;
enum machine_mode mode = TYPE_MODE (TREE_TYPE (e1));
unsigned HOST_WIDE_INT off1, off2;
+ aff_tree aff_e1, aff_e2;
+ tree type;
e1 = strip_offset (e1, &off1);
e2 = strip_offset (e2, &off2);
STRIP_NOPS (e2);
if (TREE_CODE (e1) == ADDR_EXPR)
- return ptr_difference_cost (data, e1, e2, symbol_present, var_present, offset,
- depends_on);
+ return ptr_difference_cost (data, e1, e2, symbol_present, var_present,
+ offset, depends_on);
*symbol_present = false;
if (operand_equal_p (e1, e2, 0))
*var_present = false;
return zero_cost;
}
+
*var_present = true;
+
if (integer_zerop (e2))
return force_var_cost (data, e1, depends_on);
if (integer_zerop (e1))
{
- cost = force_var_cost (data, e2, depends_on);
+ comp_cost cost = force_var_cost (data, e2, depends_on);
cost.cost += multiply_by_cost (-1, mode, data->speed);
-
return cost;
}
- cost = force_var_cost (data, e1, depends_on);
- cost = add_costs (cost, force_var_cost (data, e2, depends_on));
- cost.cost += add_cost (mode, data->speed);
+ type = signed_type_for (TREE_TYPE (e1));
+ tree_to_aff_combination (e1, type, &aff_e1);
+ tree_to_aff_combination (e2, type, &aff_e2);
+ aff_combination_scale (&aff_e2, double_int_minus_one);
+ aff_combination_add (&aff_e1, &aff_e2);
- return cost;
+ return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on);
}
/* Determines the cost of the computation by that USE is expressed
HOST_WIDE_INT ratio, aratio;
bool var_present, symbol_present;
comp_cost cost;
- unsigned n_sums;
double_int rat;
bool speed = optimize_bb_for_speed_p (gimple_bb (at));
return infinite_cost;
}
- if (TYPE_PRECISION (utype) != TYPE_PRECISION (ctype))
+ if (TYPE_PRECISION (utype) < TYPE_PRECISION (ctype))
{
/* TODO -- add direct handling of this case. */
goto fallback;
else
return infinite_cost;
+ STRIP_NOPS (cbase);
+ ctype = TREE_TYPE (cbase);
+
/* use = ubase + ratio * (var - cbase). If either cbase is a constant
or ratio == 1, it is better to handle this like
&symbol_present, &var_present, &offset,
depends_on);
}
+ else if (address_p
+ && !POINTER_TYPE_P (ctype)
+ && multiplier_allowed_in_address_p (ratio,
+ TYPE_MODE (TREE_TYPE (utype))))
+ {
+ cbase
+ = fold_build2 (MULT_EXPR, ctype, cbase, build_int_cst (ctype, ratio));
+ cost = difference_cost (data,
+ ubase, cbase,
+ &symbol_present, &var_present, &offset,
+ depends_on);
+ }
else
{
cost = force_var_cost (data, cbase, depends_on);
offset -= ratio * cstepi;
/* Now the computation is in shape symbol + var1 + const + ratio * var2.
- (symbol/var/const parts may be omitted). If we are looking for an address,
- find the cost of addressing this. */
+ (symbol/var1/const parts may be omitted). If we are looking for an
+ address, find the cost of addressing this. */
if (address_p)
- return add_costs (cost, get_address_cost (symbol_present, var_present,
- offset, ratio,
- TYPE_MODE (TREE_TYPE (*use->op_p)), speed));
+ return add_costs (cost,
+ get_address_cost (symbol_present, var_present,
+ offset, ratio,
+ TYPE_MODE (TREE_TYPE (utype)), speed));
/* Otherwise estimate the costs for computing the expression. */
- aratio = ratio > 0 ? ratio : -ratio;
if (!symbol_present && !var_present && !offset)
{
if (ratio != 1)
cost.cost += multiply_by_cost (ratio, TYPE_MODE (ctype), speed);
-
return cost;
}
- if (aratio != 1)
- cost.cost += multiply_by_cost (aratio, TYPE_MODE (ctype), speed);
-
- n_sums = 1;
- if (var_present
- /* Symbol + offset should be compile-time computable. */
- && (symbol_present || offset))
- n_sums++;
+ /* Symbol + offset should be compile-time computable so consider that they
+ are added once to the variable, if present. */
+ if (var_present && (symbol_present || offset))
+ cost.cost += add_cost (TYPE_MODE (ctype), speed)
+ / AVG_LOOP_NITER (data->current_loop);
/* Having offset does not affect runtime cost in case it is added to
symbol, but it increases complexity. */
if (offset)
cost.complexity++;
- cost.cost += n_sums * add_cost (TYPE_MODE (ctype), speed);
- return cost;
+ cost.cost += add_cost (TYPE_MODE (ctype), speed);
+
+ aratio = ratio > 0 ? ratio : -ratio;
+ if (aratio != 1)
+ cost.cost += multiply_by_cost (aratio, TYPE_MODE (ctype), speed);
fallback:
{
return false;
cand_value_at (loop, cand, use->stmt, nit, &bnd);
+
*bound = aff_combination_to_tree (&bnd);
+ /* It is unlikely that computing the number of iterations using division
+ would be more profitable than keeping the original induction variable. */
+ if (expression_expensive_p (*bound))
+ return false;
return true;
}
static comp_cost
iv_ca_cost (struct iv_ca *ivs)
{
- return (ivs->bad_uses ? infinite_cost : ivs->cost);
+ /* This was a conditional expression but it triggered a bug in
+ Sun C 5.5. */
+ if (ivs->bad_uses)
+ return infinite_cost;
+ else
+ return ivs->cost;
}
/* Returns true if all dependences of CP are among invariants in IVS. */
if (TREE_CODE (*idx) == SSA_NAME)
*idx = SSA_NAME_VAR (*idx);
- if (TREE_CODE (base) == ARRAY_REF)
+ if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
{
op = &TREE_OPERAND (base, 2);
if (*op
return ref;
}
-/* Extract the alias analysis info for the memory reference REF. There are
- several ways how this information may be stored and what precisely is
- its semantics depending on the type of the reference, but there always is
- somewhere hidden one _DECL node that is used to determine the set of
- virtual operands for the reference. The code below deciphers this jungle
- and extracts this single useful piece of information. */
-
-static tree
-get_ref_tag (tree ref, tree orig)
-{
- tree var = get_base_address (ref);
- tree aref = NULL_TREE, tag, sv;
- HOST_WIDE_INT offset, size, maxsize;
-
- for (sv = orig; handled_component_p (sv); sv = TREE_OPERAND (sv, 0))
- {
- aref = get_ref_base_and_extent (sv, &offset, &size, &maxsize);
- if (ref)
- break;
- }
-
- if (!var)
- return NULL_TREE;
-
- if (TREE_CODE (var) == INDIRECT_REF)
- {
- /* If the base is a dereference of a pointer, first check its name memory
- tag. If it does not have one, use its symbol memory tag. */
- var = TREE_OPERAND (var, 0);
- if (TREE_CODE (var) != SSA_NAME)
- return NULL_TREE;
-
- if (SSA_NAME_PTR_INFO (var))
- {
- tag = SSA_NAME_PTR_INFO (var)->name_mem_tag;
- if (tag)
- return tag;
- }
-
- var = SSA_NAME_VAR (var);
- tag = symbol_mem_tag (var);
- gcc_assert (tag != NULL_TREE);
- return tag;
- }
- else
- {
- if (!DECL_P (var))
- return NULL_TREE;
-
- tag = symbol_mem_tag (var);
- if (tag)
- return tag;
-
- return var;
- }
-}
-
/* Copies the reference information from OLD_REF to NEW_REF. */
static void
if (TREE_CODE (old_ref) == TARGET_MEM_REF)
copy_mem_ref_info (new_ref, old_ref);
else
- {
- TMR_ORIGINAL (new_ref) = unshare_and_remove_ssa_names (old_ref);
- TMR_TAG (new_ref) = get_ref_tag (old_ref, TMR_ORIGINAL (new_ref));
- }
+ TMR_ORIGINAL (new_ref) = unshare_and_remove_ssa_names (old_ref);
}
/* Rewrites USE (address that is an iv) using candidate CAND. */
{
tree var = var_at_stmt (data->current_loop, cand, use->stmt);
tree var_type = TREE_TYPE (var);
+ gimple_seq stmts;
compare = iv_elimination_compare (data, use);
bound = unshare_expr (fold_convert (var_type, bound));
- op = force_gimple_operand_gsi (&bsi, bound, true, NULL_TREE,
- true, GSI_SAME_STMT);
+ op = force_gimple_operand (bound, &stmts, true, NULL_TREE);
+ if (stmts)
+ gsi_insert_seq_on_edge_immediate (
+ loop_preheader_edge (data->current_loop),
+ stmts);
gimple_cond_set_lhs (use->stmt, var);
gimple_cond_set_code (use->stmt, compare);
static void
rewrite_use (struct ivopts_data *data, struct iv_use *use, struct iv_cand *cand)
{
- push_stmt_changes (&use->stmt);
-
switch (use->type)
{
case USE_NONLINEAR_EXPR:
default:
gcc_unreachable ();
}
-
- pop_stmt_changes (&use->stmt);
+
+ update_stmt (use->stmt);
}
/* Rewrite the uses using the selected induction variables. */