/* Array prefetching.
Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "tree-dump.h"
#include "timevar.h"
#include "cfgloop.h"
-#include "varray.h"
#include "expr.h"
#include "tree-pass.h"
#include "ggc.h"
while still within this bound (starting with those with lowest
prefetch_mod, since they are responsible for most of the cache
misses).
-
+
5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
and PREFETCH_BEFORE requirements (within some bounds), and to avoid
prefetching nonaccessed memory.
TODO -- actually implement peeling.
-
+
6) We actually emit the prefetch instructions. ??? Perhaps emit the
prefetch instructions with guards in cases where 5) was not sufficient
to satisfy the constraints?
+ The function is_loop_prefetching_profitable() implements a cost model
+ to determine if prefetching is profitable for a given loop. The cost
+ model has two heuristcs:
+ 1. A heuristic that determines whether the given loop has enough CPU
+ ops that can be overlapped with cache missing memory ops.
+ If not, the loop won't benefit from prefetching. This is implemented
+ by requirung the ratio between the instruction count and the mem ref
+ count to be above a certain minimum.
+ 2. A heuristic that disables prefetching in a loop with an unknown trip
+ count if the prefetching cost is above a certain limit. The relative
+ prefetching cost is estimated by taking the ratio between the
+ prefetch count and the total intruction count (this models the I-cache
+ cost).
+ The limits used in these heuristics are defined as parameters with
+ reasonable default values. Machine-specific default values will be
+ added later.
+
Some other TODO:
-- write and use more general reuse analysis (that could be also used
in other cache aimed loop optimizations)
#define FENCE_FOLLOWING_MOVNT NULL_TREE
#endif
+/* It is not profitable to prefetch when the trip count is not at
+ least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
+ For example, in a loop with a prefetch ahead distance of 10,
+ supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
+ profitable to prefetch when the trip count is greater or equal to
+ 40. In that case, 30 out of the 40 iterations will benefit from
+ prefetching. */
+
+#ifndef TRIP_COUNT_TO_AHEAD_RATIO
+#define TRIP_COUNT_TO_AHEAD_RATIO 4
+#endif
+
/* The group of references between that reuse may occur. */
struct mem_ref_group
{
tree base; /* Base of the reference. */
- HOST_WIDE_INT step; /* Step of the reference. */
+ tree step; /* Step of the reference. */
struct mem_ref *refs; /* References in the group. */
struct mem_ref_group *next; /* Next group of references. */
};
#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
+/* Do not generate a prefetch if the unroll factor is significantly less
+ than what is required by the prefetch. This is to avoid redundant
+ prefetches. For example, if prefetch_mod is 16 and unroll_factor is
+ 1, this means prefetching requires unrolling the loop 16 times, but
+ the loop is not going to be unrolled. In this case (ratio = 16),
+ prefetching is not likely to be beneficial. */
+
+#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
+#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 8
+#endif
+
/* The memory reference. */
struct mem_ref
fprintf (file, " group %p (base ", (void *) ref->group);
print_generic_expr (file, ref->group->base, TDF_SLIM);
fprintf (file, ", step ");
- fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
+ if (cst_and_fits_in_hwi (ref->group->step))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
+ else
+ print_generic_expr (file, ref->group->step, TDF_TREE);
fprintf (file, ")\n");
fprintf (file, " delta ");
exist. */
static struct mem_ref_group *
-find_or_create_group (struct mem_ref_group **groups, tree base,
- HOST_WIDE_INT step)
+find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
{
struct mem_ref_group *group;
for (; *groups; groups = &(*groups)->next)
{
- if ((*groups)->step == step
+ if (operand_equal_p ((*groups)->step, step, 0)
&& operand_equal_p ((*groups)->base, base, 0))
return *groups;
- /* Keep the list of groups sorted by decreasing step. */
- if ((*groups)->step < step)
+ /* If step is an integer constant, keep the list of groups sorted
+ by decreasing step. */
+ if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
+ && int_cst_value ((*groups)->step) < int_cst_value (step))
break;
}
{
struct loop *loop; /* Loop of the reference. */
gimple stmt; /* Statement of the reference. */
- HOST_WIDE_INT *step; /* Step of the memory reference. */
+ tree *step; /* Step of the memory reference. */
HOST_WIDE_INT *delta; /* Offset of the memory reference. */
};
{
struct ar_data *ar_data = (struct ar_data *) data;
tree ibase, step, stepsize;
- HOST_WIDE_INT istep, idelta = 0, imult = 1;
+ HOST_WIDE_INT idelta = 0, imult = 1;
affine_iv iv;
if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
return false;
if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
- *index, &iv, false))
+ *index, &iv, true))
return false;
ibase = iv.base;
step = iv.step;
- if (!cst_and_fits_in_hwi (step))
- return false;
- istep = int_cst_value (step);
-
if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
&& cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
{
ibase = build_int_cst (TREE_TYPE (ibase), 0);
}
+ if (*ar_data->step == NULL_TREE)
+ *ar_data->step = step;
+ else
+ *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
+ fold_convert (sizetype, *ar_data->step),
+ fold_convert (sizetype, step));
if (TREE_CODE (base) == ARRAY_REF)
{
stepsize = array_ref_element_size (base);
return false;
imult = int_cst_value (stepsize);
- istep *= imult;
+ *ar_data->step = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, *ar_data->step),
+ fold_convert (sizetype, step));
idelta *= imult;
}
- *ar_data->step += istep;
*ar_data->delta += idelta;
*index = ibase;
static bool
analyze_ref (struct loop *loop, tree *ref_p, tree *base,
- HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
+ tree *step, HOST_WIDE_INT *delta,
gimple stmt)
{
struct ar_data ar_data;
HOST_WIDE_INT bit_offset;
tree ref = *ref_p;
- *step = 0;
+ *step = NULL_TREE;
*delta = 0;
/* First strip off the component references. Ignore bitfields. */
off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
bit_offset = TREE_INT_CST_LOW (off);
gcc_assert (bit_offset % BITS_PER_UNIT == 0);
-
+
*delta += bit_offset / BITS_PER_UNIT;
}
gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
tree ref, bool write_p, gimple stmt)
{
- tree base;
- HOST_WIDE_INT step, delta;
+ tree base, step;
+ HOST_WIDE_INT delta;
struct mem_ref_group *agrp;
if (get_base_address (ref) == NULL)
if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
return false;
+ /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
+ if (step == NULL_TREE)
+ return false;
/* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
are integer constants. */
true if there are no other memory references inside the loop. */
static struct mem_ref_group *
-gather_memory_references (struct loop *loop, bool *no_other_refs)
+gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
{
basic_block *body = get_loop_body_in_dom_order (loop);
basic_block bb;
struct mem_ref_group *refs = NULL;
*no_other_refs = true;
+ *ref_count = 0;
/* Scan the loop body in order, so that the former references precede the
later ones. */
if (gimple_code (stmt) != GIMPLE_ASSIGN)
{
- if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)
+ if (gimple_vuse (stmt)
|| (is_gimple_call (stmt)
&& !(gimple_call_flags (stmt) & ECF_CONST)))
*no_other_refs = false;
rhs = gimple_assign_rhs1 (stmt);
if (REFERENCE_CLASS_P (rhs))
+ {
*no_other_refs &= gather_memory_references_ref (loop, &refs,
rhs, false, stmt);
+ *ref_count += 1;
+ }
if (REFERENCE_CLASS_P (lhs))
+ {
*no_other_refs &= gather_memory_references_ref (loop, &refs,
lhs, true, stmt);
+ *ref_count += 1;
+ }
}
}
free (body);
static void
prune_ref_by_self_reuse (struct mem_ref *ref)
{
- HOST_WIDE_INT step = ref->group->step;
- bool backward = step < 0;
+ HOST_WIDE_INT step;
+ bool backward;
+
+ /* If the step size is non constant, we cannot calculate prefetch_mod. */
+ if (!cst_and_fits_in_hwi (ref->group->step))
+ return;
+
+ step = int_cst_value (ref->group->step);
+
+ backward = step < 0;
if (step == 0)
{
return (x + by - 1) / by;
}
+/* Given a CACHE_LINE_SIZE and two inductive memory references
+ with a common STEP greater than CACHE_LINE_SIZE and an address
+ difference DELTA, compute the probability that they will fall
+ in different cache lines. DISTINCT_ITERS is the number of
+ distinct iterations after which the pattern repeats itself.
+ ALIGN_UNIT is the unit of alignment in bytes. */
+
+static int
+compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
+ HOST_WIDE_INT step, HOST_WIDE_INT delta,
+ unsigned HOST_WIDE_INT distinct_iters,
+ int align_unit)
+{
+ unsigned align, iter;
+ int total_positions, miss_positions, miss_rate;
+ int address1, address2, cache_line1, cache_line2;
+
+ total_positions = 0;
+ miss_positions = 0;
+
+ /* Iterate through all possible alignments of the first
+ memory reference within its cache line. */
+ for (align = 0; align < cache_line_size; align += align_unit)
+
+ /* Iterate through all distinct iterations. */
+ for (iter = 0; iter < distinct_iters; iter++)
+ {
+ address1 = align + step * iter;
+ address2 = address1 + delta;
+ cache_line1 = address1 / cache_line_size;
+ cache_line2 = address2 / cache_line_size;
+ total_positions += 1;
+ if (cache_line1 != cache_line2)
+ miss_positions += 1;
+ }
+ miss_rate = 1000 * miss_positions / total_positions;
+ return miss_rate;
+}
+
/* Prune the prefetch candidate REF using the reuse with BY.
If BY_IS_BEFORE is true, BY is before REF in the loop. */
prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
bool by_is_before)
{
- HOST_WIDE_INT step = ref->group->step;
- bool backward = step < 0;
+ HOST_WIDE_INT step;
+ bool backward;
HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
HOST_WIDE_INT delta = delta_b - delta_r;
HOST_WIDE_INT hit_from;
unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
+ int miss_rate;
+ HOST_WIDE_INT reduced_step;
+ unsigned HOST_WIDE_INT reduced_prefetch_block;
+ tree ref_type;
+ int align_unit;
+
+ /* If the step is non constant we cannot calculate prefetch_before. */
+ if (!cst_and_fits_in_hwi (ref->group->step)) {
+ return;
+ }
+
+ step = int_cst_value (ref->group->step);
+
+ backward = step < 0;
+
if (delta == 0)
{
former. */
if (by_is_before)
ref->prefetch_before = 0;
-
+
return;
}
hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
prefetch_before = (hit_from - delta_r + step - 1) / step;
+ /* Do not reduce prefetch_before if we meet beyond cache size. */
+ if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
+ prefetch_before = PREFETCH_ALL;
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
return;
}
- /* A more complicated case. First let us ensure that size of cache line
- and step are coprime (here we assume that PREFETCH_BLOCK is a power
- of two. */
+ /* A more complicated case with step > prefetch_block. First reduce
+ the ratio between the step and the cache line size to its simplest
+ terms. The resulting denominator will then represent the number of
+ distinct iterations after which each address will go back to its
+ initial location within the cache line. This computation assumes
+ that PREFETCH_BLOCK is a power of two. */
prefetch_block = PREFETCH_BLOCK;
- while ((step & 1) == 0
- && prefetch_block > 1)
+ reduced_prefetch_block = prefetch_block;
+ reduced_step = step;
+ while ((reduced_step & 1) == 0
+ && reduced_prefetch_block > 1)
{
- step >>= 1;
- prefetch_block >>= 1;
- delta >>= 1;
+ reduced_step >>= 1;
+ reduced_prefetch_block >>= 1;
}
- /* Now step > prefetch_block, and step and prefetch_block are coprime.
- Determine the probability that the accesses hit the same cache line. */
-
prefetch_before = delta / step;
delta %= step;
- if ((unsigned HOST_WIDE_INT) delta
- <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
+ ref_type = TREE_TYPE (ref->mem);
+ align_unit = TYPE_ALIGN (ref_type) / 8;
+ miss_rate = compute_miss_rate(prefetch_block, step, delta,
+ reduced_prefetch_block, align_unit);
+ if (miss_rate <= ACCEPTABLE_MISS_RATE)
{
+ /* Do not reduce prefetch_before if we meet beyond cache size. */
+ if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
+ prefetch_before = PREFETCH_ALL;
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
/* Try also the following iteration. */
prefetch_before++;
delta = step - delta;
- if ((unsigned HOST_WIDE_INT) delta
- <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
+ miss_rate = compute_miss_rate(prefetch_block, step, delta,
+ reduced_prefetch_block, align_unit);
+ if (miss_rate <= ACCEPTABLE_MISS_RATE)
{
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
/* For now do not issue prefetches for only first few of the
iterations. */
if (ref->prefetch_before != PREFETCH_ALL)
- return false;
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
+ (void *) ref);
+ return false;
+ }
/* Do not prefetch nontemporal stores. */
if (ref->storent_p)
- return false;
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
+ return false;
+ }
return true;
}
if (!should_issue_prefetch_p (ref))
continue;
+ /* The loop is far from being sufficiently unrolled for this
+ prefetch. Do not generate prefetch to avoid many redudant
+ prefetches. */
+ if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
+ continue;
+
/* If we need to prefetch the reference each PREFETCH_MOD iterations,
and we unroll the loop UNROLL_FACTOR times, we need to insert
ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
return any;
}
-/* Determine whether there is any reference suitable for prefetching
- in GROUPS. */
+/* Estimate the number of prefetches in the given GROUPS. */
-static bool
-anything_to_prefetch_p (struct mem_ref_group *groups)
+static int
+estimate_prefetch_count (struct mem_ref_group *groups)
{
struct mem_ref *ref;
+ int prefetch_count = 0;
for (; groups; groups = groups->next)
for (ref = groups->refs; ref; ref = ref->next)
if (should_issue_prefetch_p (ref))
- return true;
+ prefetch_count++;
- return false;
+ return prefetch_count;
}
/* Issue prefetches for the reference REF into loop as decided before.
issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
{
HOST_WIDE_INT delta;
- tree addr, addr_base, write_p, local;
+ tree addr, addr_base, write_p, local, forward;
gimple prefetch;
gimple_stmt_iterator bsi;
unsigned n_prefetches, ap;
for (ap = 0; ap < n_prefetches; ap++)
{
- /* Determine the address to prefetch. */
- delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- addr_base, size_int (delta));
- addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
- true, GSI_SAME_STMT);
-
+ if (cst_and_fits_in_hwi (ref->group->step))
+ {
+ /* Determine the address to prefetch. */
+ delta = (ahead + ap * ref->prefetch_mod) *
+ int_cst_value (ref->group->step);
+ addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
+ addr_base, size_int (delta));
+ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
+ true, GSI_SAME_STMT);
+ }
+ else
+ {
+ /* The step size is non-constant but loop-invariant. We use the
+ heuristic to simply prefetch ahead iterations ahead. */
+ forward = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, ref->group->step),
+ fold_convert (sizetype, size_int (ahead)));
+ addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
+ forward);
+ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
+ NULL, true, GSI_SAME_STMT);
+ }
/* Create the prefetch instruction. */
prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
3, addr, write_p, local);
know its stride. */
while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
ref = TREE_OPERAND (ref, 0);
-
+
if (TREE_CODE (ref) == ARRAY_REF)
{
stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
/* If the dependence cannot be analyzed, assume that there might be
a reuse. */
dist = 0;
-
+
ref->independent_p = false;
refb->independent_p = false;
}
}
}
+/* Do a cost-benefit analysis to determine if prefetching is profitable
+ for the current loop given the following parameters:
+ AHEAD: the iteration ahead distance,
+ EST_NITER: the estimated trip count,
+ NINSNS: estimated number of instructions in the loop,
+ PREFETCH_COUNT: an estimate of the number of prefetches
+ MEM_REF_COUNT: total number of memory references in the loop. */
+
+static bool
+is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
+ unsigned ninsns, unsigned prefetch_count,
+ unsigned mem_ref_count, unsigned unroll_factor)
+{
+ int insn_to_mem_ratio, insn_to_prefetch_ratio;
+
+ if (mem_ref_count == 0)
+ return false;
+
+ /* Prefetching improves performance by overlapping cache missing
+ memory accesses with CPU operations. If the loop does not have
+ enough CPU operations to overlap with memory operations, prefetching
+ won't give a significant benefit. One approximate way of checking
+ this is to require the ratio of instructions to memory references to
+ be above a certain limit. This approximation works well in practice.
+ TODO: Implement a more precise computation by estimating the time
+ for each CPU or memory op in the loop. Time estimates for memory ops
+ should account for cache misses. */
+ insn_to_mem_ratio = ninsns / mem_ref_count;
+
+ if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
+ insn_to_mem_ratio);
+ return false;
+ }
+
+ /* Prefetching most likely causes performance degradation when the instruction
+ to prefetch ratio is too small. Too many prefetch instructions in a loop
+ may reduce the I-cache performance.
+ (unroll_factor * ninsns) is used to estimate the number of instructions in
+ the unrolled loop. This implementation is a bit simplistic -- the number
+ of issued prefetch instructions is also affected by unrolling. So,
+ prefetch_mod and the unroll factor should be taken into account when
+ determining prefetch_count. Also, the number of insns of the unrolled
+ loop will usually be significantly smaller than the number of insns of the
+ original loop * unroll_factor (at least the induction variable increases
+ and the exit branches will get eliminated), so it might be better to use
+ tree_estimate_loop_size + estimated_unrolled_size. */
+ insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
+ if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
+ insn_to_prefetch_ratio);
+ return false;
+ }
+
+ /* Could not do further estimation if the trip count is unknown. Just assume
+ prefetching is profitable. Too aggressive??? */
+ if (est_niter < 0)
+ return true;
+
+ if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- loop estimated to roll only %d times\n",
+ (int) est_niter);
+ return false;
+ }
+ return true;
+}
+
+
/* Issue prefetch instructions for array references in LOOP. Returns
true if the LOOP was unrolled. */
HOST_WIDE_INT est_niter;
struct tree_niter_desc desc;
bool unrolled = false, no_other_refs;
+ unsigned prefetch_count;
+ unsigned mem_ref_count;
if (optimize_loop_nest_for_size_p (loop))
{
}
/* Step 1: gather the memory references. */
- refs = gather_memory_references (loop, &no_other_refs);
+ refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
/* Step 2: estimate the reuse effects. */
prune_by_reuse (refs);
- if (!anything_to_prefetch_p (refs))
+ prefetch_count = estimate_prefetch_count (refs);
+ if (prefetch_count == 0)
goto fail;
determine_loop_nest_reuse (loop, refs, no_other_refs);
ahead = (PREFETCH_LATENCY + time - 1) / time;
est_niter = estimated_loop_iterations_int (loop, false);
- /* The prefetches will run for AHEAD iterations of the original loop. Unless
- the loop rolls at least AHEAD times, prefetching the references does not
- make sense. */
- if (est_niter >= 0 && est_niter <= (HOST_WIDE_INT) ahead)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file,
- "Not prefetching -- loop estimated to roll only %d times\n",
- (int) est_niter);
- goto fail;
- }
-
- mark_nontemporal_stores (loop, refs);
-
ninsns = tree_num_loop_insns (loop, &eni_size_weights);
unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
est_niter);
if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor);
+ fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
+ HOST_WIDE_INT_PRINT_DEC "\n"
+ "insn count %d, mem ref count %d, prefetch count %d\n",
+ ahead, unroll_factor, est_niter,
+ ninsns, mem_ref_count, prefetch_count);
+
+ if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns, prefetch_count,
+ mem_ref_count, unroll_factor))
+ goto fail;
+
+ mark_nontemporal_stores (loop, refs);
/* Step 4: what to prefetch? */
if (!schedule_prefetches (refs, unroll_factor, ahead))
L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
+ fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
+ MIN_INSN_TO_PREFETCH_RATIO);
+ fprintf (dump_file, " min insn-to-mem ratio: %d \n",
+ PREFETCH_MIN_INSN_TO_MEM_RATIO);
fprintf (dump_file, "\n");
}