/* Array prefetching.
- Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2007, 2008, 2009, 2010, 2011
+ Free Software Foundation, Inc.
This file is part of GCC.
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
#include "diagnostic-core.h"
-#include "toplev.h"
#include "params.h"
#include "langhooks.h"
#include "tree-inline.h"
(2) has PREFETCH_MOD 64
(3) has PREFETCH_MOD 4
(4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
- the cache line accessed by (4) is the same with probability only
+ the cache line accessed by (5) is the same with probability only
7/32.
(5) has PREFETCH_MOD 1 as well.
prefetch instructions with guards in cases where 5) was not sufficient
to satisfy the constraints?
- The function is_loop_prefetching_profitable() implements a cost model
- to determine if prefetching is profitable for a given loop. The cost
- model has two heuristcs:
- 1. A heuristic that determines whether the given loop has enough CPU
- ops that can be overlapped with cache missing memory ops.
- If not, the loop won't benefit from prefetching. This is implemented
- by requirung the ratio between the instruction count and the mem ref
- count to be above a certain minimum.
- 2. A heuristic that disables prefetching in a loop with an unknown trip
- count if the prefetching cost is above a certain limit. The relative
- prefetching cost is estimated by taking the ratio between the
- prefetch count and the total intruction count (this models the I-cache
- cost).
+ A cost model is implemented to determine whether or not prefetching is
+ profitable for a given loop. The cost model has three heuristics:
+
+ 1. Function trip_count_to_ahead_ratio_too_small_p implements a
+ heuristic that determines whether or not the loop has too few
+ iterations (compared to ahead). Prefetching is not likely to be
+ beneficial if the trip count to ahead ratio is below a certain
+ minimum.
+
+ 2. Function mem_ref_count_reasonable_p implements a heuristic that
+ determines whether the given loop has enough CPU ops that can be
+ overlapped with cache missing memory ops. If not, the loop
+ won't benefit from prefetching. In the implementation,
+ prefetching is not considered beneficial if the ratio between
+ the instruction count and the mem ref count is below a certain
+ minimum.
+
+ 3. Function insn_to_prefetch_ratio_too_small_p implements a
+ heuristic that disables prefetching in a loop if the prefetching
+ cost is above a certain limit. The relative prefetching cost is
+ estimated by taking the ratio between the prefetch count and the
+ total intruction count (this models the I-cache cost).
+
The limits used in these heuristics are defined as parameters with
reasonable default values. Machine-specific default values will be
added later.
#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
#endif
+/* Some of the prefetch computations have quadratic complexity. We want to
+ avoid huge compile times and, therefore, want to limit the amount of
+ memory references per loop where we consider prefetching. */
+
+#ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
+#define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
+#endif
+
/* The memory reference. */
struct mem_ref
HOST_WIDE_INT idelta = 0, imult = 1;
affine_iv iv;
- if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF)
- return false;
-
if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
*index, &iv, true))
return false;
*step = NULL_TREE;
*delta = 0;
- /* First strip off the component references. Ignore bitfields. */
- if (TREE_CODE (ref) == COMPONENT_REF
- && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
- ref = TREE_OPERAND (ref, 0);
+ /* First strip off the component references. Ignore bitfields.
+ Also strip off the real and imagine parts of a complex, so that
+ they can have the same base. */
+ if (TREE_CODE (ref) == REALPART_EXPR
+ || TREE_CODE (ref) == IMAGPART_EXPR
+ || (TREE_CODE (ref) == COMPONENT_REF
+ && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
+ {
+ if (TREE_CODE (ref) == IMAGPART_EXPR)
+ *delta += int_size_in_bytes (TREE_TYPE (ref));
+ ref = TREE_OPERAND (ref, 0);
+ }
*ref_p = ref;
if (step == NULL_TREE)
return false;
+ /* Stop if the address of BASE could not be taken. */
+ if (may_be_nonaddressable_p (base))
+ return false;
+
/* Limit non-constant step prefetching only to the innermost loops. */
if (!cst_and_fits_in_hwi (step) && loop->inner != NULL)
return false;
prefetch_before = (hit_from - delta_r + step - 1) / step;
/* Do not reduce prefetch_before if we meet beyond cache size. */
- if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
+ if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
prefetch_before = PREFETCH_ALL;
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
true, NULL, true, GSI_SAME_STMT);
write_p = ref->write_p ? integer_one_node : integer_zero_node;
- local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
+ local = nontemporal ? integer_zero_node : integer_three_node;
for (ap = 0; ap < n_prefetches; ap++)
{
/* Determine the address to prefetch. */
delta = (ahead + ap * ref->prefetch_mod) *
int_cst_value (ref->group->step);
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- addr_base, size_int (delta));
+ addr = fold_build_pointer_plus_hwi (addr_base, delta);
addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
true, GSI_SAME_STMT);
}
forward = fold_build2 (MULT_EXPR, sizetype,
fold_convert (sizetype, ref->group->step),
fold_convert (sizetype, size_int (ahead)));
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
- forward);
+ addr = fold_build_pointer_plus (addr_base, forward);
addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
NULL, true, GSI_SAME_STMT);
}
/* Create the prefetch instruction. */
- prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
+ prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
3, addr, write_p, local);
gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
}
gimple_stmt_iterator bsi;
unsigned i;
- for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
+ FOR_EACH_VEC_ELT (edge, exits, i, exit)
{
call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
unsigned i;
edge exit;
- for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
+ FOR_EACH_VEC_ELT (edge, exits, i, exit)
if ((exit->flags & EDGE_ABNORMAL)
&& exit->dest == EXIT_BLOCK_PTR)
ret = false;
strides = XCNEWVEC (HOST_WIDE_INT, n);
access_fns = DR_ACCESS_FNS (dr);
- for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
+ FOR_EACH_VEC_ELT (tree, access_fns, i, access_fn)
{
/* Keep track of the reference corresponding to the subscript, so that we
know its stride. */
continue;
aloop = VEC_index (loop_p, vloops, i);
- vol = estimated_loop_iterations_int (aloop, false);
+ vol = max_stmt_executions_int (aloop, false);
if (vol < 0)
vol = expected_loop_iterations (aloop);
volume *= vol;
for (gr = refs; gr; gr = gr->next)
for (ref = gr->refs; ref; ref = ref->next)
{
- dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
+ dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
+ ref->mem, ref->stmt, !ref->write_p);
if (dr)
{
no_other_refs = false;
}
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
{
dist = self_reuse_distance (dr, loop_data_size, n, loop);
ref = (struct mem_ref *) dr->aux;
compute_all_dependences (datarefs, &dependences, vloops, true);
- for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
+ FOR_EACH_VEC_ELT (ddr_p, dependences, i, dep)
{
if (DDR_ARE_DEPENDENT (dep) == chrec_known)
continue;
}
}
-/* Do a cost-benefit analysis to determine if prefetching is profitable
- for the current loop given the following parameters:
+/* Determine whether or not the trip count to ahead ratio is too small based
+ on prefitablility consideration.
AHEAD: the iteration ahead distance,
- EST_NITER: the estimated trip count,
+ EST_NITER: the estimated trip count. */
+
+static bool
+trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
+{
+ /* Assume trip count to ahead ratio is big enough if the trip count could not
+ be estimated at compile time. */
+ if (est_niter < 0)
+ return false;
+
+ if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- loop estimated to roll only %d times\n",
+ (int) est_niter);
+ return true;
+ }
+
+ return false;
+}
+
+/* Determine whether or not the number of memory references in the loop is
+ reasonable based on the profitablity and compilation time considerations.
NINSNS: estimated number of instructions in the loop,
- PREFETCH_COUNT: an estimate of the number of prefetches
MEM_REF_COUNT: total number of memory references in the loop. */
static bool
-is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
- unsigned ninsns, unsigned prefetch_count,
- unsigned mem_ref_count, unsigned unroll_factor)
+mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
{
- int insn_to_mem_ratio, insn_to_prefetch_ratio;
+ int insn_to_mem_ratio;
if (mem_ref_count == 0)
return false;
+ /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
+ (compute_all_dependences) have high costs based on quadratic complexity.
+ To avoid huge compilation time, we give up prefetching if mem_ref_count
+ is too large. */
+ if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
+ return false;
+
/* Prefetching improves performance by overlapping cache missing
memory accesses with CPU operations. If the loop does not have
enough CPU operations to overlap with memory operations, prefetching
return false;
}
+ return true;
+}
+
+/* Determine whether or not the instruction to prefetch ratio in the loop is
+ too small based on the profitablity consideration.
+ NINSNS: estimated number of instructions in the loop,
+ PREFETCH_COUNT: an estimate of the number of prefetches,
+ UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
+
+static bool
+insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
+ unsigned unroll_factor)
+{
+ int insn_to_prefetch_ratio;
+
/* Prefetching most likely causes performance degradation when the instruction
to prefetch ratio is too small. Too many prefetch instructions in a loop
may reduce the I-cache performance.
fprintf (dump_file,
"Not prefetching -- instruction to prefetch ratio (%d) too small\n",
insn_to_prefetch_ratio);
- return false;
+ return true;
}
- /* Could not do further estimation if the trip count is unknown. Just assume
- prefetching is profitable. Too aggressive??? */
- if (est_niter < 0)
- return true;
-
- if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file,
- "Not prefetching -- loop estimated to roll only %d times\n",
- (int) est_niter);
- return false;
- }
- return true;
+ return false;
}
return false;
}
+ /* FIXME: the time should be weighted by the probabilities of the blocks in
+ the loop body. */
+ time = tree_num_loop_insns (loop, &eni_time_weights);
+ if (time == 0)
+ return false;
+
+ ahead = (PREFETCH_LATENCY + time - 1) / time;
+ est_niter = max_stmt_executions_int (loop, false);
+
+ /* Prefetching is not likely to be profitable if the trip count to ahead
+ ratio is too small. */
+ if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
+ return false;
+
+ ninsns = tree_num_loop_insns (loop, &eni_size_weights);
+
/* Step 1: gather the memory references. */
refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
+ /* Give up prefetching if the number of memory references in the
+ loop is not reasonable based on profitablity and compilation time
+ considerations. */
+ if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
+ goto fail;
+
/* Step 2: estimate the reuse effects. */
prune_by_reuse (refs);
determine_loop_nest_reuse (loop, refs, no_other_refs);
- /* Step 3: determine the ahead and unroll factor. */
-
- /* FIXME: the time should be weighted by the probabilities of the blocks in
- the loop body. */
- time = tree_num_loop_insns (loop, &eni_time_weights);
- ahead = (PREFETCH_LATENCY + time - 1) / time;
- est_niter = estimated_loop_iterations_int (loop, false);
-
- ninsns = tree_num_loop_insns (loop, &eni_size_weights);
+ /* Step 3: determine unroll factor. */
unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
est_niter);
ahead, unroll_factor, est_niter,
ninsns, mem_ref_count, prefetch_count);
- if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns, prefetch_count,
- mem_ref_count, unroll_factor))
+ /* Prefetching is not likely to be profitable if the instruction to prefetch
+ ratio is too small. */
+ if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
+ unroll_factor))
goto fail;
mark_nontemporal_stores (loop, refs);
initialize_original_copy_tables ();
- if (!built_in_decls[BUILT_IN_PREFETCH])
+ if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
{
- tree type = build_function_type (void_type_node,
- tree_cons (NULL_TREE,
- const_ptr_type_node,
- NULL_TREE));
+ tree type = build_function_type_list (void_type_node,
+ const_ptr_type_node, NULL_TREE);
tree decl = add_builtin_function ("__builtin_prefetch", type,
BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
NULL, NULL_TREE);
DECL_IS_NOVOPS (decl) = true;
- built_in_decls[BUILT_IN_PREFETCH] = decl;
+ set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
}
/* We assume that size of cache line is a power of two, so verify this