#include "coretypes.h"
#include "tm.h"
#include "tree.h"
-#include "rtl.h"
#include "tm_p.h"
-#include "hard-reg-set.h"
#include "basic-block.h"
#include "output.h"
-#include "diagnostic.h"
+#include "tree-pretty-print.h"
+#include "gimple-pretty-print.h"
#include "tree-flow.h"
#include "tree-dump.h"
#include "timevar.h"
#include "cfgloop.h"
-#include "varray.h"
-#include "expr.h"
#include "tree-pass.h"
#include "ggc.h"
#include "insn-config.h"
#include "langhooks.h"
#include "tree-affine.h"
#include "target.h"
+#include "tree-ssa-propagate.h"
+
+/* FIXME: Expressions are expanded to RTL in this pass to determine the
+ cost of different addressing modes. This should be moved to a TBD
+ interface between the GIMPLE and RTL worlds. */
+#include "expr.h"
/* The infinite cost. */
#define INFTY 10000000
/* Are we optimizing for speed? */
bool speed;
+
+ /* Whether the loop body includes any function calls. */
+ bool body_includes_call;
};
/* An assignment of iv candidates to uses. */
idx_contains_abnormal_ssa_name_p,
NULL);
+ if (code == COND_EXPR)
+ return contains_abnormal_ssa_name_p (TREE_OPERAND (expr, 0))
+ || contains_abnormal_ssa_name_p (TREE_OPERAND (expr, 1))
+ || contains_abnormal_ssa_name_p (TREE_OPERAND (expr, 2));
+
switch (codeclass)
{
case tcc_binary:
EXIT of DATA->current_loop, or NULL if something goes wrong. */
static tree
-niter_for_exit (struct ivopts_data *data, edge exit)
+niter_for_exit (struct ivopts_data *data, edge exit,
+ struct tree_niter_desc **desc_p)
{
- struct tree_niter_desc desc;
+ struct tree_niter_desc* desc = NULL;
tree niter;
void **slot;
being zero). Also, we cannot safely work with ssa names that
appear in phi nodes on abnormal edges, so that we do not create
overlapping life ranges for them (PR 27283). */
+ desc = XNEW (struct tree_niter_desc);
if (number_of_iterations_exit (data->current_loop,
- exit, &desc, true)
- && integer_zerop (desc.may_be_zero)
- && !contains_abnormal_ssa_name_p (desc.niter))
- niter = desc.niter;
+ exit, desc, true)
+ && integer_zerop (desc->may_be_zero)
+ && !contains_abnormal_ssa_name_p (desc->niter))
+ niter = desc->niter;
else
niter = NULL_TREE;
- *pointer_map_insert (data->niters, exit) = niter;
+ desc->niter = niter;
+ slot = pointer_map_insert (data->niters, exit);
+ *slot = desc;
}
else
- niter = (tree) *slot;
+ niter = ((struct tree_niter_desc *) *slot)->niter;
+ if (desc_p)
+ *desc_p = (struct tree_niter_desc *) *slot;
return niter;
}
if (!exit)
return NULL;
- return niter_for_exit (data, exit);
+ return niter_for_exit (data, exit, NULL);
}
/* Initializes data structures used by the iv optimization pass, stored
if (!base)
return expr;
- if (TREE_CODE (base) == INDIRECT_REF)
+ if (TREE_CODE (base) == MEM_REF)
return determine_base_object (TREE_OPERAND (base, 0));
return fold_convert (ptr_type_node,
tree step, iv_base, iv_step, lbound, off;
struct loop *loop = dta->ivopts_data->current_loop;
- if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
- || TREE_CODE (base) == ALIGN_INDIRECT_REF)
+ if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF)
return false;
/* If base is a component ref, require that the offset of the reference
}
else
/* The step for pointer arithmetics already is 1 byte. */
- step = build_int_cst (sizetype, 1);
+ step = size_one_node;
iv_base = iv->base;
iv_step = iv->step;
if (mode != BLKmode)
{
- double_int mul;
- tree al = build_int_cst (TREE_TYPE (step),
- GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT);
+ unsigned mode_align = GET_MODE_ALIGNMENT (mode);
- if (base_align < GET_MODE_ALIGNMENT (mode)
- || bitpos % GET_MODE_ALIGNMENT (mode) != 0
- || bitpos % BITS_PER_UNIT != 0)
+ if (base_align < mode_align
+ || (bitpos % mode_align) != 0
+ || (bitpos % BITS_PER_UNIT) != 0)
return true;
- if (!constant_multiple_of (step, al, &mul))
+ if (toffset
+ && (highest_pow2_factor (toffset) * BITS_PER_UNIT) < mode_align)
+ return true;
+
+ if ((highest_pow2_factor (step) * BITS_PER_UNIT) < mode_align)
return true;
}
static void
find_interesting_uses_address (struct ivopts_data *data, gimple stmt, tree *op_p)
{
- tree base = *op_p, step = build_int_cst (sizetype, 0);
+ tree base = *op_p, step = size_zero_node;
struct iv *civ;
struct ifs_ivopts_data ifs_ivopts_data;
{
ifs_ivopts_data.ivopts_data = data;
ifs_ivopts_data.stmt = stmt;
- ifs_ivopts_data.step = build_int_cst (sizetype, 0);
+ ifs_ivopts_data.step = size_zero_node;
if (!for_each_index (&base, idx_find_step, &ifs_ivopts_data)
|| integer_zerop (ifs_ivopts_data.step))
goto fail;
step = ifs_ivopts_data.step;
- gcc_assert (TREE_CODE (base) != ALIGN_INDIRECT_REF);
gcc_assert (TREE_CODE (base) != MISALIGNED_INDIRECT_REF);
/* Check that the base expression is addressable. This needs
tree *ref = &TREE_OPERAND (base, 0);
while (handled_component_p (*ref))
ref = &TREE_OPERAND (*ref, 0);
- if (TREE_CODE (*ref) == INDIRECT_REF)
+ if (TREE_CODE (*ref) == MEM_REF)
{
- tree tem = gimple_fold_indirect_ref (TREE_OPERAND (*ref, 0));
+ tree tem = fold_binary (MEM_REF, TREE_TYPE (*ref),
+ TREE_OPERAND (*ref, 0),
+ TREE_OPERAND (*ref, 1));
if (tem)
*ref = tem;
}
expr = build_fold_addr_expr (op0);
return fold_convert (orig_type, expr);
- case INDIRECT_REF:
+ case MEM_REF:
+ /* ??? Offset operand? */
inside_addr = false;
break;
return get_computation_at (loop, use, cand, use->stmt);
}
+/* Adjust the cost COST for being in loop setup rather than loop body.
+ If we're optimizing for space, the loop setup overhead is constant;
+ if we're optimizing for speed, amortize it over the per-iteration cost. */
+static unsigned
+adjust_setup_cost (struct ivopts_data *data, unsigned cost)
+{
+ if (cost == INFTY)
+ return cost;
+ else if (optimize_loop_for_speed_p (data->current_loop))
+ return cost / AVG_LOOP_NITER (data->current_loop);
+ else
+ return cost;
+}
+
/* Returns cost of addition in MODE. */
static unsigned
/* Symbol + offset should be compile-time computable so consider that they
are added once to the variable, if present. */
if (var_present && (symbol_present || offset))
- cost.cost += add_cost (TYPE_MODE (ctype), speed)
- / AVG_LOOP_NITER (data->current_loop);
+ cost.cost += adjust_setup_cost (data,
+ add_cost (TYPE_MODE (ctype), speed));
/* Having offset does not affect runtime cost in case it is added to
symbol, but it increases complexity. */
aratio = ratio > 0 ? ratio : -ratio;
if (aratio != 1)
cost.cost += multiply_by_cost (aratio, TYPE_MODE (ctype), speed);
+ return cost;
fallback:
if (can_autoinc)
return infinite_cost;
if (address_p)
- comp = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (comp)), comp);
+ comp = build_simple_mem_ref (comp);
return new_cost (computation_cost (comp, speed), 0);
}
gcc_assert (step && TREE_CODE (step) == INTEGER_CST);
- /* Period of the iv is gcd (step, type range). Since type range is power
- of two, it suffices to determine the maximum power of two that divides
- step. */
- pow2div = num_ending_zeros (step);
type = unsigned_type_for (TREE_TYPE (step));
+ /* Period of the iv is lcm (step, type_range)/step -1,
+ i.e., N*type_range/step - 1. Since type range is power
+ of two, N == (step >> num_of_ending_zeros_binary (step),
+ so the final result is
+
+ (type_range >> num_of_ending_zeros_binary (step)) - 1
+
+ */
+ pow2div = num_ending_zeros (step);
period = build_low_bits_mask (type,
- (TYPE_PRECISION (type)
- - tree_low_cst (pow2div, 1)));
+ (TYPE_PRECISION (type)
+ - tree_low_cst (pow2div, 1)));
return period;
}
tree nit, period;
struct loop *loop = data->current_loop;
aff_tree bnd;
+ struct tree_niter_desc *desc = NULL;
if (TREE_CODE (cand->iv->step) != INTEGER_CST)
return false;
if (flow_bb_inside_loop_p (loop, exit->dest))
return false;
- nit = niter_for_exit (data, exit);
+ nit = niter_for_exit (data, exit, &desc);
if (!nit)
return false;
/* If the number of iterations is constant, compare against it directly. */
if (TREE_CODE (nit) == INTEGER_CST)
{
- if (!tree_int_cst_lt (nit, period))
- return false;
+ /* See cand_value_at. */
+ if (stmt_after_increment (loop, cand, use->stmt))
+ {
+ if (!tree_int_cst_lt (nit, period))
+ return false;
+ }
+ else
+ {
+ if (tree_int_cst_lt (period, nit))
+ return false;
+ }
}
/* If not, and if this is the only possible exit of the loop, see whether
we can get a conservative estimate on the number of iterations of the
entire loop and compare against that instead. */
- else if (loop_only_exit_p (loop, exit))
+ else
{
double_int period_value, max_niter;
- if (!estimated_loop_iterations (loop, true, &max_niter))
- return false;
+
+ max_niter = desc->max;
+ if (stmt_after_increment (loop, cand, use->stmt))
+ max_niter = double_int_add (max_niter, double_int_one);
period_value = tree_to_double_int (period);
- if (double_int_ucmp (max_niter, period_value) >= 0)
- return false;
+ if (double_int_ucmp (max_niter, period_value) > 0)
+ {
+ /* See if we can take advantage of infered loop bound information. */
+ if (loop_only_exit_p (loop, exit))
+ {
+ if (!estimated_loop_iterations (loop, true, &max_niter))
+ return false;
+ /* The loop bound is already adjusted by adding 1. */
+ if (double_int_ucmp (max_niter, period_value) > 0)
+ return false;
+ }
+ else
+ return false;
+ }
}
- /* Otherwise, punt. */
- else
- return false;
-
cand_value_at (loop, cand, use->stmt, nit, &bnd);
*bound = aff_combination_to_tree (&bnd);
return true;
}
+
/* Determines cost of basing replacement of USE on CAND in a condition. */
static bool
elim_cost = force_var_cost (data, bound, &depends_on_elim);
/* The bound is a loop invariant, so it will be only computed
once. */
- elim_cost.cost /= AVG_LOOP_NITER (data->current_loop);
+ elim_cost.cost = adjust_setup_cost (data, elim_cost.cost);
}
else
elim_cost = infinite_cost;
cost_base = force_var_cost (data, base, NULL);
cost_step = add_cost (TYPE_MODE (TREE_TYPE (base)), data->speed);
- cost = cost_step + cost_base.cost / AVG_LOOP_NITER (current_loop);
+ cost = cost_step + adjust_setup_cost (data, cost_base.cost);
/* Prefer the original ivs unless we may gain something by replacing it.
The reason is to make debugging simpler; so this is not relevant for
{
/* We add size to the cost, so that we prefer eliminating ivs
if possible. */
- return size + estimate_reg_pressure_cost (size, data->regs_used, data->speed);
+ return size + estimate_reg_pressure_cost (size, data->regs_used, data->speed,
+ data->body_includes_call);
}
/* For each size of the induction variable set determine the penalty. */
struct loop *loop = data->current_loop;
bitmap_iterator bi;
- /* We use the following model (definitely improvable, especially the
- cost function -- TODO):
-
- We estimate the number of registers available (using MD data), name it A.
-
- We estimate the number of registers used by the loop, name it U. This
- number is obtained as the number of loop phi nodes (not counting virtual
- registers and bivs) + the number of variables from outside of the loop.
-
- We set a reserve R (free regs that are used for temporary computations,
- etc.). For now the reserve is a constant 3.
-
- Let I be the number of induction variables.
-
- -- if U + I + R <= A, the cost is I * SMALL_COST (just not to encourage
- make a lot of ivs without a reason).
- -- if A - R < U + I <= A, the cost is I * PRES_COST
- -- if U + I > A, the cost is I * PRES_COST and
- number of uses * SPILL_COST * (U + I - A) / (U + I) is added. */
-
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Global costs:\n");
fprintf (dump_file, " target_avail_regs %d\n", target_avail_regs);
+ fprintf (dump_file, " target_clobbered_regs %d\n", target_clobbered_regs);
fprintf (dump_file, " target_reg_cost %d\n", target_reg_cost[data->speed]);
fprintf (dump_file, " target_spill_cost %d\n", target_spill_cost[data->speed]);
}
}
/* Tries to extend the sets IVS in the best possible way in order
- to express the USE. */
+ to express the USE. If ORIGINALP is true, prefer candidates from
+ the original set of IVs, otherwise favor important candidates not
+ based on any memory object. */
static bool
try_add_cand_for (struct ivopts_data *data, struct iv_ca *ivs,
- struct iv_use *use)
+ struct iv_use *use, bool originalp)
{
comp_cost best_cost, act_cost;
unsigned i;
iv_ca_set_no_cp (data, ivs, use);
}
- /* First try important candidates not based on any memory object. Only if
+ /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
+ first try important candidates not based on any memory object. Only if
this fails, try the specific ones. Rationale -- in loops with many
variables the best choice often is to use just one generic biv. If we
added here many ivs specific to the uses, the optimization algorithm later
{
cand = iv_cand (data, i);
- if (cand->iv->base_object != NULL_TREE)
+ if (originalp && cand->pos !=IP_ORIGINAL)
+ continue;
+
+ if (!originalp && cand->iv->base_object != NULL_TREE)
continue;
if (iv_ca_cand_used_p (ivs, cand))
continue;
/* Already tried this. */
- if (cand->important && cand->iv->base_object == NULL_TREE)
- continue;
+ if (cand->important)
+ {
+ if (originalp && cand->pos == IP_ORIGINAL)
+ continue;
+ if (!originalp && cand->iv->base_object == NULL_TREE)
+ continue;
+ }
if (iv_ca_cand_used_p (ivs, cand))
continue;
/* Finds an initial assignment of candidates to uses. */
static struct iv_ca *
-get_initial_solution (struct ivopts_data *data)
+get_initial_solution (struct ivopts_data *data, bool originalp)
{
struct iv_ca *ivs = iv_ca_new (data);
unsigned i;
for (i = 0; i < n_iv_uses (data); i++)
- if (!try_add_cand_for (data, ivs, iv_use (data, i)))
+ if (!try_add_cand_for (data, ivs, iv_use (data, i), originalp))
{
iv_ca_free (&ivs);
return NULL;
solution and remove the unused ivs while this improves the cost. */
static struct iv_ca *
-find_optimal_iv_set (struct ivopts_data *data)
+find_optimal_iv_set_1 (struct ivopts_data *data, bool originalp)
{
- unsigned i;
struct iv_ca *set;
- struct iv_use *use;
/* Get the initial solution. */
- set = get_initial_solution (data);
+ set = get_initial_solution (data, originalp);
if (!set)
{
if (dump_file && (dump_flags & TDF_DETAILS))
}
}
+ return set;
+}
+
+static struct iv_ca *
+find_optimal_iv_set (struct ivopts_data *data)
+{
+ unsigned i;
+ struct iv_ca *set, *origset;
+ struct iv_use *use;
+ comp_cost cost, origcost;
+
+ /* Determine the cost based on a strategy that starts with original IVs,
+ and try again using a strategy that prefers candidates not based
+ on any IVs. */
+ origset = find_optimal_iv_set_1 (data, true);
+ set = find_optimal_iv_set_1 (data, false);
+
+ if (!origset && !set)
+ return NULL;
+
+ origcost = origset ? iv_ca_cost (origset) : infinite_cost;
+ cost = set ? iv_ca_cost (set) : infinite_cost;
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
- comp_cost cost = iv_ca_cost (set);
- fprintf (dump_file, "Final cost %d (complexity %d)\n\n", cost.cost, cost.complexity);
+ fprintf (dump_file, "Original cost %d (complexity %d)\n\n",
+ origcost.cost, origcost.complexity);
+ fprintf (dump_file, "Final cost %d (complexity %d)\n\n",
+ cost.cost, cost.complexity);
+ }
+
+ /* Choose the one with the best cost. */
+ if (compare_costs (origcost, cost) <= 0)
+ {
+ if (set)
+ iv_ca_free (&set);
+ set = origset;
}
+ else if (origset)
+ iv_ca_free (&origset);
for (i = 0; i < n_iv_uses (data); i++)
{
gcc_unreachable ();
}
- op = force_gimple_operand_gsi (&bsi, comp, false, SSA_NAME_VAR (tgt),
- true, GSI_SAME_STMT);
+ if (!valid_gimple_rhs_p (comp)
+ || (gimple_code (use->stmt) != GIMPLE_PHI
+ /* We can't allow re-allocating the stmt as it might be pointed
+ to still. */
+ && (get_gimple_rhs_num_ops (TREE_CODE (comp))
+ >= gimple_num_ops (gsi_stmt (bsi)))))
+ {
+ comp = force_gimple_operand_gsi (&bsi, comp, true, NULL_TREE,
+ true, GSI_SAME_STMT);
+ if (POINTER_TYPE_P (TREE_TYPE (tgt)))
+ duplicate_ssa_name_ptr_info (comp, SSA_NAME_PTR_INFO (tgt));
+ }
if (gimple_code (use->stmt) == GIMPLE_PHI)
{
- ass = gimple_build_assign (tgt, op);
+ ass = gimple_build_assign (tgt, comp);
gsi_insert_before (&bsi, ass, GSI_SAME_STMT);
bsi = gsi_for_stmt (use->stmt);
}
else
{
- gimple_assign_set_rhs_from_tree (&bsi, op);
+ gimple_assign_set_rhs_from_tree (&bsi, comp);
use->stmt = gsi_stmt (bsi);
}
}
static void
copy_ref_info (tree new_ref, tree old_ref)
{
- if (TREE_CODE (old_ref) == TARGET_MEM_REF)
- copy_mem_ref_info (new_ref, old_ref);
- else
+ tree new_ptr_base = NULL_TREE;
+
+ if (TREE_CODE (old_ref) == TARGET_MEM_REF
+ && TREE_CODE (new_ref) == TARGET_MEM_REF)
+ TMR_ORIGINAL (new_ref) = TMR_ORIGINAL (old_ref);
+ else if (TREE_CODE (new_ref) == TARGET_MEM_REF)
+ TMR_ORIGINAL (new_ref) = unshare_and_remove_ssa_names (old_ref);
+
+ TREE_SIDE_EFFECTS (new_ref) = TREE_SIDE_EFFECTS (old_ref);
+ TREE_THIS_VOLATILE (new_ref) = TREE_THIS_VOLATILE (old_ref);
+
+ if (TREE_CODE (new_ref) == TARGET_MEM_REF)
+ new_ptr_base = TMR_BASE (new_ref);
+ else if (TREE_CODE (new_ref) == MEM_REF)
+ new_ptr_base = TREE_OPERAND (new_ref, 0);
+
+ /* We can transfer points-to information from an old pointer
+ or decl base to the new one. */
+ if (new_ptr_base
+ && TREE_CODE (new_ptr_base) == SSA_NAME
+ && POINTER_TYPE_P (TREE_TYPE (new_ptr_base))
+ && !SSA_NAME_PTR_INFO (new_ptr_base))
{
- TMR_ORIGINAL (new_ref) = unshare_and_remove_ssa_names (old_ref);
- TREE_SIDE_EFFECTS (new_ref) = TREE_SIDE_EFFECTS (old_ref);
- TREE_THIS_VOLATILE (new_ref) = TREE_THIS_VOLATILE (old_ref);
+ tree base = get_base_address (old_ref);
+ if (!base)
+ ;
+ else if ((INDIRECT_REF_P (base)
+ || TREE_CODE (base) == MEM_REF)
+ && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
+ duplicate_ssa_name_ptr_info
+ (new_ptr_base, SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0)));
+ else if (TREE_CODE (base) == VAR_DECL
+ || TREE_CODE (base) == PARM_DECL
+ || TREE_CODE (base) == RESULT_DECL)
+ {
+ struct ptr_info_def *pi = get_ptr_info (new_ptr_base);
+ pt_solution_set_var (&pi->pt, base);
+ }
}
}
if (cand->iv->base_object)
base_hint = var_at_stmt (data->current_loop, cand, use->stmt);
- ref = create_mem_ref (&bsi, TREE_TYPE (*use->op_p), &aff, base_hint,
- data->speed);
+ ref = create_mem_ref (&bsi, TREE_TYPE (*use->op_p),
+ reference_alias_ptr_type (*use->op_p),
+ &aff, base_hint, data->speed);
copy_ref_info (ref, *use->op_p);
*use->op_p = ref;
}
BITMAP_FREE (toremove);
}
+/* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
+ for pointer_map_traverse. */
+
+static bool
+free_tree_niter_desc (const void *key ATTRIBUTE_UNUSED, void **value,
+ void *data ATTRIBUTE_UNUSED)
+{
+ struct tree_niter_desc *const niter = (struct tree_niter_desc *) *value;
+
+ free (niter);
+ return true;
+}
+
/* Frees data allocated by the optimization of a single loop. */
static void
if (data->niters)
{
+ pointer_map_traverse (data->niters, free_tree_niter_desc, NULL);
pointer_map_destroy (data->niters);
data->niters = NULL;
}
VEC_free (iv_cand_p, heap, data->iv_candidates);
}
+/* Returns true if the loop body BODY includes any function calls. */
+
+static bool
+loop_body_includes_call (basic_block *body, unsigned num_nodes)
+{
+ gimple_stmt_iterator gsi;
+ unsigned i;
+
+ for (i = 0; i < num_nodes; i++)
+ for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ if (is_gimple_call (stmt)
+ && !is_inexpensive_builtin (gimple_call_fndecl (stmt)))
+ return true;
+ }
+ return false;
+}
+
/* Optimizes the LOOP. Returns true if anything changed. */
static bool
}
body = get_loop_body (loop);
+ data->body_includes_call = loop_body_includes_call (body, loop->num_nodes);
renumber_gimple_stmt_uids_in_blocks (body, loop->num_nodes);
free (body);