/* Array prefetching.
- Copyright (C) 2005 Free Software Foundation, Inc.
-
+ Copyright (C) 2005, 2007, 2008, 2009, 2010, 2011
+ Free Software Foundation, Inc.
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
-Free Software Foundation; either version 2, or (at your option) any
+Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
-#include "rtl.h"
#include "tm_p.h"
-#include "hard-reg-set.h"
#include "basic-block.h"
#include "output.h"
-#include "diagnostic.h"
+#include "tree-pretty-print.h"
#include "tree-flow.h"
#include "tree-dump.h"
#include "timevar.h"
#include "cfgloop.h"
-#include "varray.h"
-#include "expr.h"
#include "tree-pass.h"
-#include "ggc.h"
#include "insn-config.h"
#include "recog.h"
#include "hashtab.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "params.h"
#include "langhooks.h"
+#include "tree-inline.h"
+#include "tree-data-ref.h"
+
+
+/* FIXME: Needed for optabs, but this should all be moved to a TBD interface
+ between the GIMPLE and RTL worlds. */
+#include "expr.h"
+#include "optabs.h"
/* This pass inserts prefetch instructions to optimize cache usage during
accesses to arrays in loops. It processes loops sequentially and:
(2) has PREFETCH_MOD 64
(3) has PREFETCH_MOD 4
(4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
- the cache line accessed by (4) is the same with probability only
+ the cache line accessed by (5) is the same with probability only
7/32.
(5) has PREFETCH_MOD 1 as well.
+ Additionally, we use data dependence analysis to determine for each
+ reference the distance till the first reuse; this information is used
+ to determine the temporality of the issued prefetch instruction.
+
3) We determine how much ahead we need to prefetch. The number of
iterations needed is time to fetch / time spent in one iteration of
the loop. The problem is that we do not know either of these values,
while still within this bound (starting with those with lowest
prefetch_mod, since they are responsible for most of the cache
misses).
-
+
5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
and PREFETCH_BEFORE requirements (within some bounds), and to avoid
prefetching nonaccessed memory.
TODO -- actually implement peeling.
-
+
6) We actually emit the prefetch instructions. ??? Perhaps emit the
prefetch instructions with guards in cases where 5) was not sufficient
to satisfy the constraints?
+ A cost model is implemented to determine whether or not prefetching is
+ profitable for a given loop. The cost model has three heuristics:
+
+ 1. Function trip_count_to_ahead_ratio_too_small_p implements a
+ heuristic that determines whether or not the loop has too few
+ iterations (compared to ahead). Prefetching is not likely to be
+ beneficial if the trip count to ahead ratio is below a certain
+ minimum.
+
+ 2. Function mem_ref_count_reasonable_p implements a heuristic that
+ determines whether the given loop has enough CPU ops that can be
+ overlapped with cache missing memory ops. If not, the loop
+ won't benefit from prefetching. In the implementation,
+ prefetching is not considered beneficial if the ratio between
+ the instruction count and the mem ref count is below a certain
+ minimum.
+
+ 3. Function insn_to_prefetch_ratio_too_small_p implements a
+ heuristic that disables prefetching in a loop if the prefetching
+ cost is above a certain limit. The relative prefetching cost is
+ estimated by taking the ratio between the prefetch count and the
+ total intruction count (this models the I-cache cost).
+
+ The limits used in these heuristics are defined as parameters with
+ reasonable default values. Machine-specific default values will be
+ added later.
+
Some other TODO:
-- write and use more general reuse analysis (that could be also used
in other cache aimed loop optimizations)
/* Magic constants follow. These should be replaced by machine specific
numbers. */
-/* A number that should roughly correspond to the number of instructions
- executed before the prefetch is completed. */
-
-#ifndef PREFETCH_LATENCY
-#define PREFETCH_LATENCY 200
-#endif
-
-/* Number of prefetches that can run at the same time. */
-
-#ifndef SIMULTANEOUS_PREFETCHES
-#define SIMULTANEOUS_PREFETCHES 3
-#endif
-
/* True if write can be prefetched by a read prefetch. */
#ifndef WRITE_CAN_USE_READ_PREFETCH
#define READ_CAN_USE_WRITE_PREFETCH 0
#endif
-/* Cache line size. Assumed to be a power of two. */
+/* The size of the block loaded by a single prefetch. Usually, this is
+ the same as cache line size (at the moment, we only consider one level
+ of cache hierarchy). */
#ifndef PREFETCH_BLOCK
-#define PREFETCH_BLOCK 32
+#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
#endif
/* Do we have a forward hardware sequential prefetching? */
/* In some cases we are only able to determine that there is a certain
probability that the two accesses hit the same cache line. In this
case, we issue the prefetches for both of them if this probability
- is less then (1000 - ACCEPTABLE_MISS_RATE) promile. */
+ is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
#ifndef ACCEPTABLE_MISS_RATE
#define ACCEPTABLE_MISS_RATE 50
#define HAVE_prefetch 0
#endif
+#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
+#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
+
+/* We consider a memory access nontemporal if it is not reused sooner than
+ after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
+ accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
+ so that we use nontemporal prefetches e.g. if single memory location
+ is accessed several times in a single iteration of the loop. */
+#define NONTEMPORAL_FRACTION 16
+
+/* In case we have to emit a memory fence instruction after the loop that
+ uses nontemporal stores, this defines the builtin to use. */
+
+#ifndef FENCE_FOLLOWING_MOVNT
+#define FENCE_FOLLOWING_MOVNT NULL_TREE
+#endif
+
+/* It is not profitable to prefetch when the trip count is not at
+ least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
+ For example, in a loop with a prefetch ahead distance of 10,
+ supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
+ profitable to prefetch when the trip count is greater or equal to
+ 40. In that case, 30 out of the 40 iterations will benefit from
+ prefetching. */
+
+#ifndef TRIP_COUNT_TO_AHEAD_RATIO
+#define TRIP_COUNT_TO_AHEAD_RATIO 4
+#endif
+
/* The group of references between that reuse may occur. */
struct mem_ref_group
{
tree base; /* Base of the reference. */
- HOST_WIDE_INT step; /* Step of the reference. */
+ tree step; /* Step of the reference. */
struct mem_ref *refs; /* References in the group. */
struct mem_ref_group *next; /* Next group of references. */
};
#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
+/* Do not generate a prefetch if the unroll factor is significantly less
+ than what is required by the prefetch. This is to avoid redundant
+ prefetches. For example, when prefetch_mod is 16 and unroll_factor is
+ 2, prefetching requires unrolling the loop 16 times, but
+ the loop is actually unrolled twice. In this case (ratio = 8),
+ prefetching is not likely to be beneficial. */
+
+#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
+#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
+#endif
+
+/* Some of the prefetch computations have quadratic complexity. We want to
+ avoid huge compile times and, therefore, want to limit the amount of
+ memory references per loop where we consider prefetching. */
+
+#ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
+#define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
+#endif
+
/* The memory reference. */
struct mem_ref
{
- tree stmt; /* Statement in that the reference appears. */
+ gimple stmt; /* Statement in that the reference appears. */
tree mem; /* The reference. */
HOST_WIDE_INT delta; /* Constant offset of the reference. */
- bool write_p; /* Is it a write? */
struct mem_ref_group *group; /* The group of references it belongs to. */
unsigned HOST_WIDE_INT prefetch_mod;
/* Prefetch only each PREFETCH_MOD-th
unsigned HOST_WIDE_INT prefetch_before;
/* Prefetch only first PREFETCH_BEFORE
iterations. */
- bool issue_prefetch_p; /* Should we really issue the prefetch? */
+ unsigned reuse_distance; /* The amount of data accessed before the first
+ reuse of this value. */
struct mem_ref *next; /* The next reference in the group. */
+ unsigned write_p : 1; /* Is it a write? */
+ unsigned independent_p : 1; /* True if the reference is independent on
+ all other references inside the loop. */
+ unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
+ unsigned storent_p : 1; /* True if we changed the store to a
+ nontemporal one. */
};
/* Dumps information about reference REF to FILE. */
fprintf (file, " group %p (base ", (void *) ref->group);
print_generic_expr (file, ref->group->base, TDF_SLIM);
fprintf (file, ", step ");
- fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
+ if (cst_and_fits_in_hwi (ref->group->step))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
+ else
+ print_generic_expr (file, ref->group->step, TDF_TREE);
fprintf (file, ")\n");
- fprintf (dump_file, " delta ");
+ fprintf (file, " delta ");
fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
fprintf (file, "\n");
exist. */
static struct mem_ref_group *
-find_or_create_group (struct mem_ref_group **groups, tree base,
- HOST_WIDE_INT step)
+find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
{
struct mem_ref_group *group;
for (; *groups; groups = &(*groups)->next)
{
- if ((*groups)->step == step
+ if (operand_equal_p ((*groups)->step, step, 0)
&& operand_equal_p ((*groups)->base, base, 0))
return *groups;
- /* Keep the list of groups sorted by decreasing step. */
- if ((*groups)->step < step)
+ /* If step is an integer constant, keep the list of groups sorted
+ by decreasing step. */
+ if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
+ && int_cst_value ((*groups)->step) < int_cst_value (step))
break;
}
- group = xcalloc (1, sizeof (struct mem_ref_group));
+ group = XNEW (struct mem_ref_group);
group->base = base;
group->step = step;
group->refs = NULL;
WRITE_P. The reference occurs in statement STMT. */
static void
-record_ref (struct mem_ref_group *group, tree stmt, tree mem,
+record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
HOST_WIDE_INT delta, bool write_p)
{
struct mem_ref **aref;
return;
}
- (*aref) = xcalloc (1, sizeof (struct mem_ref));
+ (*aref) = XNEW (struct mem_ref);
(*aref)->stmt = stmt;
(*aref)->mem = mem;
(*aref)->delta = delta;
(*aref)->write_p = write_p;
(*aref)->prefetch_before = PREFETCH_ALL;
(*aref)->prefetch_mod = 1;
+ (*aref)->reuse_distance = 0;
(*aref)->issue_prefetch_p = false;
(*aref)->group = group;
(*aref)->next = NULL;
+ (*aref)->independent_p = false;
+ (*aref)->storent_p = false;
if (dump_file && (dump_flags & TDF_DETAILS))
dump_mem_ref (dump_file, *aref);
struct ar_data
{
struct loop *loop; /* Loop of the reference. */
- tree stmt; /* Statement of the reference. */
- HOST_WIDE_INT *step; /* Step of the memory reference. */
+ gimple stmt; /* Statement of the reference. */
+ tree *step; /* Step of the memory reference. */
HOST_WIDE_INT *delta; /* Offset of the memory reference. */
};
static bool
idx_analyze_ref (tree base, tree *index, void *data)
{
- struct ar_data *ar_data = data;
+ struct ar_data *ar_data = (struct ar_data *) data;
tree ibase, step, stepsize;
- HOST_WIDE_INT istep, idelta = 0, imult = 1;
+ HOST_WIDE_INT idelta = 0, imult = 1;
affine_iv iv;
- if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
- || TREE_CODE (base) == ALIGN_INDIRECT_REF)
- return false;
-
- if (!simple_iv (ar_data->loop, ar_data->stmt, *index, &iv, false))
+ if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
+ *index, &iv, true))
return false;
ibase = iv.base;
step = iv.step;
- if (zero_p (step))
- istep = 0;
- else
- {
- if (!cst_and_fits_in_hwi (step))
- return false;
- istep = int_cst_value (step);
- }
-
- if (TREE_CODE (ibase) == PLUS_EXPR
+ if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
&& cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
{
idelta = int_cst_value (TREE_OPERAND (ibase, 1));
if (!cst_and_fits_in_hwi (stepsize))
return false;
imult = int_cst_value (stepsize);
-
- istep *= imult;
+ step = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, step),
+ fold_convert (sizetype, stepsize));
idelta *= imult;
}
- *ar_data->step += istep;
+ if (*ar_data->step == NULL_TREE)
+ *ar_data->step = step;
+ else
+ *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
+ fold_convert (sizetype, *ar_data->step),
+ fold_convert (sizetype, step));
*ar_data->delta += idelta;
*index = ibase;
static bool
analyze_ref (struct loop *loop, tree *ref_p, tree *base,
- HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
- tree stmt)
+ tree *step, HOST_WIDE_INT *delta,
+ gimple stmt)
{
struct ar_data ar_data;
tree off;
HOST_WIDE_INT bit_offset;
tree ref = *ref_p;
- *step = 0;
+ *step = NULL_TREE;
*delta = 0;
- /* First strip off the component references. Ignore bitfields. */
- if (TREE_CODE (ref) == COMPONENT_REF
- && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
- ref = TREE_OPERAND (ref, 0);
+ /* First strip off the component references. Ignore bitfields.
+ Also strip off the real and imagine parts of a complex, so that
+ they can have the same base. */
+ if (TREE_CODE (ref) == REALPART_EXPR
+ || TREE_CODE (ref) == IMAGPART_EXPR
+ || (TREE_CODE (ref) == COMPONENT_REF
+ && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
+ {
+ if (TREE_CODE (ref) == IMAGPART_EXPR)
+ *delta += int_size_in_bytes (TREE_TYPE (ref));
+ ref = TREE_OPERAND (ref, 0);
+ }
*ref_p = ref;
off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
bit_offset = TREE_INT_CST_LOW (off);
gcc_assert (bit_offset % BITS_PER_UNIT == 0);
-
+
*delta += bit_offset / BITS_PER_UNIT;
}
}
/* Record a memory reference REF to the list REFS. The reference occurs in
- LOOP in statement STMT and it is write if WRITE_P. */
+ LOOP in statement STMT and it is write if WRITE_P. Returns true if the
+ reference was recorded, false otherwise. */
-static void
+static bool
gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
- tree ref, bool write_p, tree stmt)
+ tree ref, bool write_p, gimple stmt)
{
- tree base;
- HOST_WIDE_INT step, delta;
+ tree base, step;
+ HOST_WIDE_INT delta;
struct mem_ref_group *agrp;
+ if (get_base_address (ref) == NULL)
+ return false;
+
if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
- return;
+ return false;
+ /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
+ if (step == NULL_TREE)
+ return false;
+
+ /* Stop if the address of BASE could not be taken. */
+ if (may_be_nonaddressable_p (base))
+ return false;
+
+ /* Limit non-constant step prefetching only to the innermost loops. */
+ if (!cst_and_fits_in_hwi (step) && loop->inner != NULL)
+ return false;
/* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
are integer constants. */
agrp = find_or_create_group (refs, base, step);
record_ref (agrp, stmt, ref, delta, write_p);
+
+ return true;
}
-/* Record the suitable memory references in LOOP. */
+/* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
+ true if there are no other memory references inside the loop. */
static struct mem_ref_group *
-gather_memory_references (struct loop *loop)
+gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
{
basic_block *body = get_loop_body_in_dom_order (loop);
basic_block bb;
unsigned i;
- block_stmt_iterator bsi;
- tree stmt, lhs, rhs;
+ gimple_stmt_iterator bsi;
+ gimple stmt;
+ tree lhs, rhs;
struct mem_ref_group *refs = NULL;
+ *no_other_refs = true;
+ *ref_count = 0;
+
/* Scan the loop body in order, so that the former references precede the
later ones. */
for (i = 0; i < loop->num_nodes; i++)
if (bb->loop_father != loop)
continue;
- for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
- stmt = bsi_stmt (bsi);
- if (TREE_CODE (stmt) != MODIFY_EXPR)
- continue;
+ stmt = gsi_stmt (bsi);
+
+ if (gimple_code (stmt) != GIMPLE_ASSIGN)
+ {
+ if (gimple_vuse (stmt)
+ || (is_gimple_call (stmt)
+ && !(gimple_call_flags (stmt) & ECF_CONST)))
+ *no_other_refs = false;
+ continue;
+ }
- lhs = TREE_OPERAND (stmt, 0);
- rhs = TREE_OPERAND (stmt, 1);
+ lhs = gimple_assign_lhs (stmt);
+ rhs = gimple_assign_rhs1 (stmt);
if (REFERENCE_CLASS_P (rhs))
- gather_memory_references_ref (loop, &refs, rhs, false, stmt);
+ {
+ *no_other_refs &= gather_memory_references_ref (loop, &refs,
+ rhs, false, stmt);
+ *ref_count += 1;
+ }
if (REFERENCE_CLASS_P (lhs))
- gather_memory_references_ref (loop, &refs, lhs, true, stmt);
+ {
+ *no_other_refs &= gather_memory_references_ref (loop, &refs,
+ lhs, true, stmt);
+ *ref_count += 1;
+ }
}
}
free (body);
static void
prune_ref_by_self_reuse (struct mem_ref *ref)
{
- HOST_WIDE_INT step = ref->group->step;
- bool backward = step < 0;
+ HOST_WIDE_INT step;
+ bool backward;
+
+ /* If the step size is non constant, we cannot calculate prefetch_mod. */
+ if (!cst_and_fits_in_hwi (ref->group->step))
+ return;
+
+ step = int_cst_value (ref->group->step);
+
+ backward = step < 0;
if (step == 0)
{
return (x + by - 1) / by;
}
+/* Given a CACHE_LINE_SIZE and two inductive memory references
+ with a common STEP greater than CACHE_LINE_SIZE and an address
+ difference DELTA, compute the probability that they will fall
+ in different cache lines. Return true if the computed miss rate
+ is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
+ number of distinct iterations after which the pattern repeats itself.
+ ALIGN_UNIT is the unit of alignment in bytes. */
+
+static bool
+is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
+ HOST_WIDE_INT step, HOST_WIDE_INT delta,
+ unsigned HOST_WIDE_INT distinct_iters,
+ int align_unit)
+{
+ unsigned align, iter;
+ int total_positions, miss_positions, max_allowed_miss_positions;
+ int address1, address2, cache_line1, cache_line2;
+
+ /* It always misses if delta is greater than or equal to the cache
+ line size. */
+ if (delta >= (HOST_WIDE_INT) cache_line_size)
+ return false;
+
+ miss_positions = 0;
+ total_positions = (cache_line_size / align_unit) * distinct_iters;
+ max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
+
+ /* Iterate through all possible alignments of the first
+ memory reference within its cache line. */
+ for (align = 0; align < cache_line_size; align += align_unit)
+
+ /* Iterate through all distinct iterations. */
+ for (iter = 0; iter < distinct_iters; iter++)
+ {
+ address1 = align + step * iter;
+ address2 = address1 + delta;
+ cache_line1 = address1 / cache_line_size;
+ cache_line2 = address2 / cache_line_size;
+ if (cache_line1 != cache_line2)
+ {
+ miss_positions += 1;
+ if (miss_positions > max_allowed_miss_positions)
+ return false;
+ }
+ }
+ return true;
+}
+
/* Prune the prefetch candidate REF using the reuse with BY.
If BY_IS_BEFORE is true, BY is before REF in the loop. */
prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
bool by_is_before)
{
- HOST_WIDE_INT step = ref->group->step;
- bool backward = step < 0;
+ HOST_WIDE_INT step;
+ bool backward;
HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
HOST_WIDE_INT delta = delta_b - delta_r;
HOST_WIDE_INT hit_from;
unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
+ HOST_WIDE_INT reduced_step;
+ unsigned HOST_WIDE_INT reduced_prefetch_block;
+ tree ref_type;
+ int align_unit;
+
+ /* If the step is non constant we cannot calculate prefetch_before. */
+ if (!cst_and_fits_in_hwi (ref->group->step)) {
+ return;
+ }
+
+ step = int_cst_value (ref->group->step);
+
+ backward = step < 0;
+
if (delta == 0)
{
former. */
if (by_is_before)
ref->prefetch_before = 0;
-
+
return;
}
hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
prefetch_before = (hit_from - delta_r + step - 1) / step;
+ /* Do not reduce prefetch_before if we meet beyond cache size. */
+ if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
+ prefetch_before = PREFETCH_ALL;
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
return;
}
- /* A more complicated case. First let us ensure that size of cache line
- and step are coprime (here we assume that PREFETCH_BLOCK is a power
- of two. */
+ /* A more complicated case with step > prefetch_block. First reduce
+ the ratio between the step and the cache line size to its simplest
+ terms. The resulting denominator will then represent the number of
+ distinct iterations after which each address will go back to its
+ initial location within the cache line. This computation assumes
+ that PREFETCH_BLOCK is a power of two. */
prefetch_block = PREFETCH_BLOCK;
- while ((step & 1) == 0
- && prefetch_block > 1)
+ reduced_prefetch_block = prefetch_block;
+ reduced_step = step;
+ while ((reduced_step & 1) == 0
+ && reduced_prefetch_block > 1)
{
- step >>= 1;
- prefetch_block >>= 1;
- delta >>= 1;
+ reduced_step >>= 1;
+ reduced_prefetch_block >>= 1;
}
- /* Now step > prefetch_block, and step and prefetch_block are coprime.
- Determine the probability that the accesses hit the same cache line. */
-
prefetch_before = delta / step;
delta %= step;
- if ((unsigned HOST_WIDE_INT) delta
- <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
+ ref_type = TREE_TYPE (ref->mem);
+ align_unit = TYPE_ALIGN (ref_type) / 8;
+ if (is_miss_rate_acceptable (prefetch_block, step, delta,
+ reduced_prefetch_block, align_unit))
{
+ /* Do not reduce prefetch_before if we meet beyond cache size. */
+ if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
+ prefetch_before = PREFETCH_ALL;
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
/* Try also the following iteration. */
prefetch_before++;
delta = step - delta;
- if ((unsigned HOST_WIDE_INT) delta
- <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
+ if (is_miss_rate_acceptable (prefetch_block, step, delta,
+ reduced_prefetch_block, align_unit))
{
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
/* For now do not issue prefetches for only first few of the
iterations. */
if (ref->prefetch_before != PREFETCH_ALL)
- return false;
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
+ (void *) ref);
+ return false;
+ }
+
+ /* Do not prefetch nontemporal stores. */
+ if (ref->storent_p)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
+ return false;
+ }
return true;
}
schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
unsigned ahead)
{
- unsigned max_prefetches, n_prefetches;
+ unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
+ unsigned slots_per_prefetch;
struct mem_ref *ref;
bool any = false;
- max_prefetches = (SIMULTANEOUS_PREFETCHES * unroll_factor) / ahead;
- if (max_prefetches > (unsigned) SIMULTANEOUS_PREFETCHES)
- max_prefetches = SIMULTANEOUS_PREFETCHES;
+ /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
+ remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
+ /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
+ AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
+ it will need a prefetch slot. */
+ slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Max prefetches to issue: %d.\n", max_prefetches);
-
- if (!max_prefetches)
- return false;
+ fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
+ slots_per_prefetch);
/* For now we just take memory references one by one and issue
prefetches for as many as possible. The groups are sorted
if (!should_issue_prefetch_p (ref))
continue;
- ref->issue_prefetch_p = true;
+ /* The loop is far from being sufficiently unrolled for this
+ prefetch. Do not generate prefetch to avoid many redudant
+ prefetches. */
+ if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
+ continue;
- /* If prefetch_mod is less then unroll_factor, we need to insert
- several prefetches for the reference. */
+ /* If we need to prefetch the reference each PREFETCH_MOD iterations,
+ and we unroll the loop UNROLL_FACTOR times, we need to insert
+ ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
+ iteration. */
n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
/ ref->prefetch_mod);
- if (max_prefetches <= n_prefetches)
- return true;
+ prefetch_slots = n_prefetches * slots_per_prefetch;
+
+ /* If more than half of the prefetches would be lost anyway, do not
+ issue the prefetch. */
+ if (2 * remaining_prefetch_slots < prefetch_slots)
+ continue;
- max_prefetches -= n_prefetches;
+ ref->issue_prefetch_p = true;
+
+ if (remaining_prefetch_slots <= prefetch_slots)
+ return true;
+ remaining_prefetch_slots -= prefetch_slots;
any = true;
}
return any;
}
-/* Determine whether there is any reference suitable for prefetching
- in GROUPS. */
+/* Return TRUE if no prefetch is going to be generated in the given
+ GROUPS. */
static bool
-anything_to_prefetch_p (struct mem_ref_group *groups)
+nothing_to_prefetch_p (struct mem_ref_group *groups)
{
struct mem_ref *ref;
for (; groups; groups = groups->next)
for (ref = groups->refs; ref; ref = ref->next)
if (should_issue_prefetch_p (ref))
- return true;
+ return false;
- return false;
+ return true;
+}
+
+/* Estimate the number of prefetches in the given GROUPS.
+ UNROLL_FACTOR is the factor by which LOOP was unrolled. */
+
+static int
+estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
+{
+ struct mem_ref *ref;
+ unsigned n_prefetches;
+ int prefetch_count = 0;
+
+ for (; groups; groups = groups->next)
+ for (ref = groups->refs; ref; ref = ref->next)
+ if (should_issue_prefetch_p (ref))
+ {
+ n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
+ / ref->prefetch_mod);
+ prefetch_count += n_prefetches;
+ }
+
+ return prefetch_count;
}
/* Issue prefetches for the reference REF into loop as decided before.
issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
{
HOST_WIDE_INT delta;
- tree addr, addr_base, prefetch, params, write_p;
- block_stmt_iterator bsi;
+ tree addr, addr_base, write_p, local, forward;
+ gimple prefetch;
+ gimple_stmt_iterator bsi;
unsigned n_prefetches, ap;
+ bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Issued prefetch for %p.\n", (void *) ref);
+ fprintf (dump_file, "Issued%s prefetch for %p.\n",
+ nontemporal ? " nontemporal" : "",
+ (void *) ref);
- bsi = bsi_for_stmt (ref->stmt);
+ bsi = gsi_for_stmt (ref->stmt);
n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
/ ref->prefetch_mod);
addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
- addr_base = force_gimple_operand_bsi (&bsi, unshare_expr (addr_base), true, NULL);
+ addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
+ true, NULL, true, GSI_SAME_STMT);
+ write_p = ref->write_p ? integer_one_node : integer_zero_node;
+ local = nontemporal ? integer_zero_node : integer_three_node;
for (ap = 0; ap < n_prefetches; ap++)
{
- /* Determine the address to prefetch. */
- delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
- addr = fold_build2 (PLUS_EXPR, ptr_type_node,
- addr_base, build_int_cst (ptr_type_node, delta));
- addr = force_gimple_operand_bsi (&bsi, unshare_expr (addr), true, NULL);
-
+ if (cst_and_fits_in_hwi (ref->group->step))
+ {
+ /* Determine the address to prefetch. */
+ delta = (ahead + ap * ref->prefetch_mod) *
+ int_cst_value (ref->group->step);
+ addr = fold_build_pointer_plus_hwi (addr_base, delta);
+ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
+ true, GSI_SAME_STMT);
+ }
+ else
+ {
+ /* The step size is non-constant but loop-invariant. We use the
+ heuristic to simply prefetch ahead iterations ahead. */
+ forward = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, ref->group->step),
+ fold_convert (sizetype, size_int (ahead)));
+ addr = fold_build_pointer_plus (addr_base, forward);
+ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
+ NULL, true, GSI_SAME_STMT);
+ }
/* Create the prefetch instruction. */
- write_p = ref->write_p ? integer_one_node : integer_zero_node;
- params = tree_cons (NULL_TREE, addr,
- tree_cons (NULL_TREE, write_p, NULL_TREE));
-
- prefetch = build_function_call_expr (built_in_decls[BUILT_IN_PREFETCH],
- params);
- bsi_insert_before (&bsi, prefetch, BSI_SAME_STMT);
+ prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
+ 3, addr, write_p, local);
+ gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
}
}
issue_prefetch_ref (ref, unroll_factor, ahead);
}
+/* Returns true if REF is a memory write for that a nontemporal store insn
+ can be used. */
+
+static bool
+nontemporal_store_p (struct mem_ref *ref)
+{
+ enum machine_mode mode;
+ enum insn_code code;
+
+ /* REF must be a write that is not reused. We require it to be independent
+ on all other memory references in the loop, as the nontemporal stores may
+ be reordered with respect to other memory references. */
+ if (!ref->write_p
+ || !ref->independent_p
+ || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
+ return false;
+
+ /* Check that we have the storent instruction for the mode. */
+ mode = TYPE_MODE (TREE_TYPE (ref->mem));
+ if (mode == BLKmode)
+ return false;
+
+ code = optab_handler (storent_optab, mode);
+ return code != CODE_FOR_nothing;
+}
+
+/* If REF is a nontemporal store, we mark the corresponding modify statement
+ and return true. Otherwise, we return false. */
+
+static bool
+mark_nontemporal_store (struct mem_ref *ref)
+{
+ if (!nontemporal_store_p (ref))
+ return false;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
+ (void *) ref);
+
+ gimple_assign_set_nontemporal_move (ref->stmt, true);
+ ref->storent_p = true;
+
+ return true;
+}
+
+/* Issue a memory fence instruction after LOOP. */
+
+static void
+emit_mfence_after_loop (struct loop *loop)
+{
+ VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ edge exit;
+ gimple call;
+ gimple_stmt_iterator bsi;
+ unsigned i;
+
+ FOR_EACH_VEC_ELT (edge, exits, i, exit)
+ {
+ call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
+
+ if (!single_pred_p (exit->dest)
+ /* If possible, we prefer not to insert the fence on other paths
+ in cfg. */
+ && !(exit->flags & EDGE_ABNORMAL))
+ split_loop_exit_edge (exit);
+ bsi = gsi_after_labels (exit->dest);
+
+ gsi_insert_before (&bsi, call, GSI_NEW_STMT);
+ mark_virtual_ops_for_renaming (call);
+ }
+
+ VEC_free (edge, heap, exits);
+ update_ssa (TODO_update_ssa_only_virtuals);
+}
+
+/* Returns true if we can use storent in loop, false otherwise. */
+
+static bool
+may_use_storent_in_loop_p (struct loop *loop)
+{
+ bool ret = true;
+
+ if (loop->inner != NULL)
+ return false;
+
+ /* If we must issue a mfence insn after using storent, check that there
+ is a suitable place for it at each of the loop exits. */
+ if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
+ {
+ VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ unsigned i;
+ edge exit;
+
+ FOR_EACH_VEC_ELT (edge, exits, i, exit)
+ if ((exit->flags & EDGE_ABNORMAL)
+ && exit->dest == EXIT_BLOCK_PTR)
+ ret = false;
+
+ VEC_free (edge, heap, exits);
+ }
+
+ return ret;
+}
+
+/* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
+ references in the loop. */
+
+static void
+mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
+{
+ struct mem_ref *ref;
+ bool any = false;
+
+ if (!may_use_storent_in_loop_p (loop))
+ return;
+
+ for (; groups; groups = groups->next)
+ for (ref = groups->refs; ref; ref = ref->next)
+ any |= mark_nontemporal_store (ref);
+
+ if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
+ emit_mfence_after_loop (loop);
+}
+
/* Determines whether we can profitably unroll LOOP FACTOR times, and if
this is the case, fill in DESC by the description of number of
iterations. */
/* Determine the coefficient by that unroll LOOP, from the information
contained in the list of memory references REFS. Description of
- umber of iterations of LOOP is stored to DESC. AHEAD is the number
- of iterations ahead that we need to prefetch. NINSNS is number of
- insns of the LOOP. */
+ umber of iterations of LOOP is stored to DESC. NINSNS is the number of
+ insns of the LOOP. EST_NITER is the estimated number of iterations of
+ the loop, or -1 if no estimate is available. */
static unsigned
determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
- unsigned ahead, unsigned ninsns,
- struct tree_niter_desc *desc)
+ unsigned ninsns, struct tree_niter_desc *desc,
+ HOST_WIDE_INT est_niter)
{
- unsigned upper_bound, size_factor, constraint_factor;
- unsigned factor, max_mod_constraint, ahead_factor;
+ unsigned upper_bound;
+ unsigned nfactor, factor, mod_constraint;
struct mem_ref_group *agp;
struct mem_ref *ref;
- upper_bound = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
-
- /* First check whether the loop is not too large to unroll. */
- size_factor = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
- if (size_factor <= 1)
+ /* First check whether the loop is not too large to unroll. We ignore
+ PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
+ from unrolling them enough to make exactly one cache line covered by each
+ iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
+ us from unrolling the loops too many times in cases where we only expect
+ gains from better scheduling and decreasing loop overhead, which is not
+ the case here. */
+ upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
+
+ /* If we unrolled the loop more times than it iterates, the unrolled version
+ of the loop would be never entered. */
+ if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
+ upper_bound = est_niter;
+
+ if (upper_bound <= 1)
return 1;
- if (size_factor < upper_bound)
- upper_bound = size_factor;
-
- max_mod_constraint = 1;
+ /* Choose the factor so that we may prefetch each cache just once,
+ but bound the unrolling by UPPER_BOUND. */
+ factor = 1;
for (agp = refs; agp; agp = agp->next)
for (ref = agp->refs; ref; ref = ref->next)
- if (should_issue_prefetch_p (ref)
- && ref->prefetch_mod > max_mod_constraint)
- max_mod_constraint = ref->prefetch_mod;
-
- /* Set constraint_factor as large as needed to be able to satisfy the
- largest modulo constraint. */
- constraint_factor = max_mod_constraint;
-
- /* If ahead is too large in comparison with the number of available
- prefetches, unroll the loop as much as needed to be able to prefetch
- at least partially some of the references in the loop. */
- ahead_factor = ((ahead + SIMULTANEOUS_PREFETCHES - 1)
- / SIMULTANEOUS_PREFETCHES);
-
- /* Unroll as much as useful, but bound the code size growth. */
- if (constraint_factor < ahead_factor)
- factor = ahead_factor;
- else
- factor = constraint_factor;
- if (factor > upper_bound)
- factor = upper_bound;
+ if (should_issue_prefetch_p (ref))
+ {
+ mod_constraint = ref->prefetch_mod;
+ nfactor = least_common_multiple (mod_constraint, factor);
+ if (nfactor <= upper_bound)
+ factor = nfactor;
+ }
if (!should_unroll_loop_p (loop, desc, factor))
return 1;
return factor;
}
+/* Returns the total volume of the memory references REFS, taking into account
+ reuses in the innermost loop and cache line size. TODO -- we should also
+ take into account reuses across the iterations of the loops in the loop
+ nest. */
+
+static unsigned
+volume_of_references (struct mem_ref_group *refs)
+{
+ unsigned volume = 0;
+ struct mem_ref_group *gr;
+ struct mem_ref *ref;
+
+ for (gr = refs; gr; gr = gr->next)
+ for (ref = gr->refs; ref; ref = ref->next)
+ {
+ /* Almost always reuses another value? */
+ if (ref->prefetch_before != PREFETCH_ALL)
+ continue;
+
+ /* If several iterations access the same cache line, use the size of
+ the line divided by this number. Otherwise, a cache line is
+ accessed in each iteration. TODO -- in the latter case, we should
+ take the size of the reference into account, rounding it up on cache
+ line size multiple. */
+ volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
+ }
+ return volume;
+}
+
+/* Returns the volume of memory references accessed across VEC iterations of
+ loops, whose sizes are described in the LOOP_SIZES array. N is the number
+ of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
+
+static unsigned
+volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
+{
+ unsigned i;
+
+ for (i = 0; i < n; i++)
+ if (vec[i] != 0)
+ break;
+
+ if (i == n)
+ return 0;
+
+ gcc_assert (vec[i] > 0);
+
+ /* We ignore the parts of the distance vector in subloops, since usually
+ the numbers of iterations are much smaller. */
+ return loop_sizes[i] * vec[i];
+}
+
+/* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
+ at the position corresponding to the loop of the step. N is the depth
+ of the considered loop nest, and, LOOP is its innermost loop. */
+
+static void
+add_subscript_strides (tree access_fn, unsigned stride,
+ HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
+{
+ struct loop *aloop;
+ tree step;
+ HOST_WIDE_INT astep;
+ unsigned min_depth = loop_depth (loop) - n;
+
+ while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
+ {
+ aloop = get_chrec_loop (access_fn);
+ step = CHREC_RIGHT (access_fn);
+ access_fn = CHREC_LEFT (access_fn);
+
+ if ((unsigned) loop_depth (aloop) <= min_depth)
+ continue;
+
+ if (host_integerp (step, 0))
+ astep = tree_low_cst (step, 0);
+ else
+ astep = L1_CACHE_LINE_SIZE;
+
+ strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
+
+ }
+}
+
+/* Returns the volume of memory references accessed between two consecutive
+ self-reuses of the reference DR. We consider the subscripts of DR in N
+ loops, and LOOP_SIZES contains the volumes of accesses in each of the
+ loops. LOOP is the innermost loop of the current loop nest. */
+
+static unsigned
+self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
+ struct loop *loop)
+{
+ tree stride, access_fn;
+ HOST_WIDE_INT *strides, astride;
+ VEC (tree, heap) *access_fns;
+ tree ref = DR_REF (dr);
+ unsigned i, ret = ~0u;
+
+ /* In the following example:
+
+ for (i = 0; i < N; i++)
+ for (j = 0; j < N; j++)
+ use (a[j][i]);
+ the same cache line is accessed each N steps (except if the change from
+ i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
+ we cannot rely purely on the results of the data dependence analysis.
+
+ Instead, we compute the stride of the reference in each loop, and consider
+ the innermost loop in that the stride is less than cache size. */
+
+ strides = XCNEWVEC (HOST_WIDE_INT, n);
+ access_fns = DR_ACCESS_FNS (dr);
+
+ FOR_EACH_VEC_ELT (tree, access_fns, i, access_fn)
+ {
+ /* Keep track of the reference corresponding to the subscript, so that we
+ know its stride. */
+ while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
+ ref = TREE_OPERAND (ref, 0);
+
+ if (TREE_CODE (ref) == ARRAY_REF)
+ {
+ stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
+ if (host_integerp (stride, 1))
+ astride = tree_low_cst (stride, 1);
+ else
+ astride = L1_CACHE_LINE_SIZE;
+
+ ref = TREE_OPERAND (ref, 0);
+ }
+ else
+ astride = 1;
+
+ add_subscript_strides (access_fn, astride, strides, n, loop);
+ }
+
+ for (i = n; i-- > 0; )
+ {
+ unsigned HOST_WIDE_INT s;
+
+ s = strides[i] < 0 ? -strides[i] : strides[i];
+
+ if (s < (unsigned) L1_CACHE_LINE_SIZE
+ && (loop_sizes[i]
+ > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
+ {
+ ret = loop_sizes[i];
+ break;
+ }
+ }
+
+ free (strides);
+ return ret;
+}
+
+/* Determines the distance till the first reuse of each reference in REFS
+ in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
+ memory references in the loop. */
+
+static void
+determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
+ bool no_other_refs)
+{
+ struct loop *nest, *aloop;
+ VEC (data_reference_p, heap) *datarefs = NULL;
+ VEC (ddr_p, heap) *dependences = NULL;
+ struct mem_ref_group *gr;
+ struct mem_ref *ref, *refb;
+ VEC (loop_p, heap) *vloops = NULL;
+ unsigned *loop_data_size;
+ unsigned i, j, n;
+ unsigned volume, dist, adist;
+ HOST_WIDE_INT vol;
+ data_reference_p dr;
+ ddr_p dep;
+
+ if (loop->inner)
+ return;
+
+ /* Find the outermost loop of the loop nest of loop (we require that
+ there are no sibling loops inside the nest). */
+ nest = loop;
+ while (1)
+ {
+ aloop = loop_outer (nest);
+
+ if (aloop == current_loops->tree_root
+ || aloop->inner->next)
+ break;
+
+ nest = aloop;
+ }
+
+ /* For each loop, determine the amount of data accessed in each iteration.
+ We use this to estimate whether the reference is evicted from the
+ cache before its reuse. */
+ find_loop_nest (nest, &vloops);
+ n = VEC_length (loop_p, vloops);
+ loop_data_size = XNEWVEC (unsigned, n);
+ volume = volume_of_references (refs);
+ i = n;
+ while (i-- != 0)
+ {
+ loop_data_size[i] = volume;
+ /* Bound the volume by the L2 cache size, since above this bound,
+ all dependence distances are equivalent. */
+ if (volume > L2_CACHE_SIZE_BYTES)
+ continue;
+
+ aloop = VEC_index (loop_p, vloops, i);
+ vol = max_stmt_executions_int (aloop, false);
+ if (vol < 0)
+ vol = expected_loop_iterations (aloop);
+ volume *= vol;
+ }
+
+ /* Prepare the references in the form suitable for data dependence
+ analysis. We ignore unanalyzable data references (the results
+ are used just as a heuristics to estimate temporality of the
+ references, hence we do not need to worry about correctness). */
+ for (gr = refs; gr; gr = gr->next)
+ for (ref = gr->refs; ref; ref = ref->next)
+ {
+ dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
+ ref->mem, ref->stmt, !ref->write_p);
+
+ if (dr)
+ {
+ ref->reuse_distance = volume;
+ dr->aux = ref;
+ VEC_safe_push (data_reference_p, heap, datarefs, dr);
+ }
+ else
+ no_other_refs = false;
+ }
+
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ {
+ dist = self_reuse_distance (dr, loop_data_size, n, loop);
+ ref = (struct mem_ref *) dr->aux;
+ if (ref->reuse_distance > dist)
+ ref->reuse_distance = dist;
+
+ if (no_other_refs)
+ ref->independent_p = true;
+ }
+
+ compute_all_dependences (datarefs, &dependences, vloops, true);
+
+ FOR_EACH_VEC_ELT (ddr_p, dependences, i, dep)
+ {
+ if (DDR_ARE_DEPENDENT (dep) == chrec_known)
+ continue;
+
+ ref = (struct mem_ref *) DDR_A (dep)->aux;
+ refb = (struct mem_ref *) DDR_B (dep)->aux;
+
+ if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
+ || DDR_NUM_DIST_VECTS (dep) == 0)
+ {
+ /* If the dependence cannot be analyzed, assume that there might be
+ a reuse. */
+ dist = 0;
+
+ ref->independent_p = false;
+ refb->independent_p = false;
+ }
+ else
+ {
+ /* The distance vectors are normalized to be always lexicographically
+ positive, hence we cannot tell just from them whether DDR_A comes
+ before DDR_B or vice versa. However, it is not important,
+ anyway -- if DDR_A is close to DDR_B, then it is either reused in
+ DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
+ in cache (and marking it as nontemporal would not affect
+ anything). */
+
+ dist = volume;
+ for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
+ {
+ adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
+ loop_data_size, n);
+
+ /* If this is a dependence in the innermost loop (i.e., the
+ distances in all superloops are zero) and it is not
+ the trivial self-dependence with distance zero, record that
+ the references are not completely independent. */
+ if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
+ && (ref != refb
+ || DDR_DIST_VECT (dep, j)[n-1] != 0))
+ {
+ ref->independent_p = false;
+ refb->independent_p = false;
+ }
+
+ /* Ignore accesses closer than
+ L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
+ so that we use nontemporal prefetches e.g. if single memory
+ location is accessed several times in a single iteration of
+ the loop. */
+ if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
+ continue;
+
+ if (adist < dist)
+ dist = adist;
+ }
+ }
+
+ if (ref->reuse_distance > dist)
+ ref->reuse_distance = dist;
+ if (refb->reuse_distance > dist)
+ refb->reuse_distance = dist;
+ }
+
+ free_dependence_relations (dependences);
+ free_data_refs (datarefs);
+ free (loop_data_size);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Reuse distances:\n");
+ for (gr = refs; gr; gr = gr->next)
+ for (ref = gr->refs; ref; ref = ref->next)
+ fprintf (dump_file, " ref %p distance %u\n",
+ (void *) ref, ref->reuse_distance);
+ }
+}
+
+/* Determine whether or not the trip count to ahead ratio is too small based
+ on prefitablility consideration.
+ AHEAD: the iteration ahead distance,
+ EST_NITER: the estimated trip count. */
+
+static bool
+trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
+{
+ /* Assume trip count to ahead ratio is big enough if the trip count could not
+ be estimated at compile time. */
+ if (est_niter < 0)
+ return false;
+
+ if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- loop estimated to roll only %d times\n",
+ (int) est_niter);
+ return true;
+ }
+
+ return false;
+}
+
+/* Determine whether or not the number of memory references in the loop is
+ reasonable based on the profitablity and compilation time considerations.
+ NINSNS: estimated number of instructions in the loop,
+ MEM_REF_COUNT: total number of memory references in the loop. */
+
+static bool
+mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
+{
+ int insn_to_mem_ratio;
+
+ if (mem_ref_count == 0)
+ return false;
+
+ /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
+ (compute_all_dependences) have high costs based on quadratic complexity.
+ To avoid huge compilation time, we give up prefetching if mem_ref_count
+ is too large. */
+ if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
+ return false;
+
+ /* Prefetching improves performance by overlapping cache missing
+ memory accesses with CPU operations. If the loop does not have
+ enough CPU operations to overlap with memory operations, prefetching
+ won't give a significant benefit. One approximate way of checking
+ this is to require the ratio of instructions to memory references to
+ be above a certain limit. This approximation works well in practice.
+ TODO: Implement a more precise computation by estimating the time
+ for each CPU or memory op in the loop. Time estimates for memory ops
+ should account for cache misses. */
+ insn_to_mem_ratio = ninsns / mem_ref_count;
+
+ if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
+ insn_to_mem_ratio);
+ return false;
+ }
+
+ return true;
+}
+
+/* Determine whether or not the instruction to prefetch ratio in the loop is
+ too small based on the profitablity consideration.
+ NINSNS: estimated number of instructions in the loop,
+ PREFETCH_COUNT: an estimate of the number of prefetches,
+ UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
+
+static bool
+insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
+ unsigned unroll_factor)
+{
+ int insn_to_prefetch_ratio;
+
+ /* Prefetching most likely causes performance degradation when the instruction
+ to prefetch ratio is too small. Too many prefetch instructions in a loop
+ may reduce the I-cache performance.
+ (unroll_factor * ninsns) is used to estimate the number of instructions in
+ the unrolled loop. This implementation is a bit simplistic -- the number
+ of issued prefetch instructions is also affected by unrolling. So,
+ prefetch_mod and the unroll factor should be taken into account when
+ determining prefetch_count. Also, the number of insns of the unrolled
+ loop will usually be significantly smaller than the number of insns of the
+ original loop * unroll_factor (at least the induction variable increases
+ and the exit branches will get eliminated), so it might be better to use
+ tree_estimate_loop_size + estimated_unrolled_size. */
+ insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
+ if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
+ insn_to_prefetch_ratio);
+ return true;
+ }
+
+ return false;
+}
+
+
/* Issue prefetch instructions for array references in LOOP. Returns
- true if the LOOP was unrolled. LOOPS is the array containing all
- loops. */
+ true if the LOOP was unrolled. */
static bool
-loop_prefetch_arrays (struct loops *loops, struct loop *loop)
+loop_prefetch_arrays (struct loop *loop)
{
struct mem_ref_group *refs;
- unsigned ahead, ninsns, unroll_factor;
+ unsigned ahead, ninsns, time, unroll_factor;
+ HOST_WIDE_INT est_niter;
struct tree_niter_desc desc;
- bool unrolled = false;
+ bool unrolled = false, no_other_refs;
+ unsigned prefetch_count;
+ unsigned mem_ref_count;
+
+ if (optimize_loop_nest_for_size_p (loop))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " ignored (cold area)\n");
+ return false;
+ }
+
+ /* FIXME: the time should be weighted by the probabilities of the blocks in
+ the loop body. */
+ time = tree_num_loop_insns (loop, &eni_time_weights);
+ if (time == 0)
+ return false;
+
+ ahead = (PREFETCH_LATENCY + time - 1) / time;
+ est_niter = max_stmt_executions_int (loop, false);
+
+ /* Prefetching is not likely to be profitable if the trip count to ahead
+ ratio is too small. */
+ if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
+ return false;
+
+ ninsns = tree_num_loop_insns (loop, &eni_size_weights);
/* Step 1: gather the memory references. */
- refs = gather_memory_references (loop);
+ refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
+
+ /* Give up prefetching if the number of memory references in the
+ loop is not reasonable based on profitablity and compilation time
+ considerations. */
+ if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
+ goto fail;
/* Step 2: estimate the reuse effects. */
prune_by_reuse (refs);
- if (!anything_to_prefetch_p (refs))
+ if (nothing_to_prefetch_p (refs))
goto fail;
- /* Step 3: determine the ahead and unroll factor. */
+ determine_loop_nest_reuse (loop, refs, no_other_refs);
- /* FIXME: We should use not size of the loop, but the average number of
- instructions executed per iteration of the loop. */
- ninsns = tree_num_loop_insns (loop);
- ahead = (PREFETCH_LATENCY + ninsns - 1) / ninsns;
- unroll_factor = determine_unroll_factor (loop, refs, ahead, ninsns,
- &desc);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor);
+ /* Step 3: determine unroll factor. */
+ unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
+ est_niter);
+
+ /* Estimate prefetch count for the unrolled loop. */
+ prefetch_count = estimate_prefetch_count (refs, unroll_factor);
+ if (prefetch_count == 0)
+ goto fail;
- /* If the loop rolls less than the required unroll factor, prefetching
- is useless. */
- if (unroll_factor > 1
- && cst_and_fits_in_hwi (desc.niter)
- && (unsigned HOST_WIDE_INT) int_cst_value (desc.niter) < unroll_factor)
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
+ HOST_WIDE_INT_PRINT_DEC "\n"
+ "insn count %d, mem ref count %d, prefetch count %d\n",
+ ahead, unroll_factor, est_niter,
+ ninsns, mem_ref_count, prefetch_count);
+
+ /* Prefetching is not likely to be profitable if the instruction to prefetch
+ ratio is too small. */
+ if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
+ unroll_factor))
goto fail;
+ mark_nontemporal_stores (loop, refs);
+
/* Step 4: what to prefetch? */
if (!schedule_prefetches (refs, unroll_factor, ahead))
goto fail;
iterations so that we do not issue superfluous prefetches. */
if (unroll_factor != 1)
{
- tree_unroll_loop (loops, loop, unroll_factor,
+ tree_unroll_loop (loop, unroll_factor,
single_dom_exit (loop), &desc);
unrolled = true;
}
return unrolled;
}
-/* Issue prefetch instructions for array references in LOOPS. */
+/* Issue prefetch instructions for array references in loops. */
unsigned int
-tree_ssa_prefetch_arrays (struct loops *loops)
+tree_ssa_prefetch_arrays (void)
{
- unsigned i;
+ loop_iterator li;
struct loop *loop;
bool unrolled = false;
int todo_flags = 0;
|| PREFETCH_BLOCK == 0)
return 0;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Prefetching parameters:\n");
+ fprintf (dump_file, " simultaneous prefetches: %d\n",
+ SIMULTANEOUS_PREFETCHES);
+ fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
+ fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
+ fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
+ L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
+ fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
+ fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
+ fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
+ MIN_INSN_TO_PREFETCH_RATIO);
+ fprintf (dump_file, " min insn-to-mem ratio: %d \n",
+ PREFETCH_MIN_INSN_TO_MEM_RATIO);
+ fprintf (dump_file, "\n");
+ }
+
initialize_original_copy_tables ();
if (!built_in_decls[BUILT_IN_PREFETCH])
{
- tree type = build_function_type (void_type_node,
- tree_cons (NULL_TREE,
- const_ptr_type_node,
- NULL_TREE));
+ tree type = build_function_type_list (void_type_node,
+ const_ptr_type_node, NULL_TREE);
tree decl = add_builtin_function ("__builtin_prefetch", type,
BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
NULL, NULL_TREE);
here. */
gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
- for (i = loops->num - 1; i > 0; i--)
+ FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
{
- loop = loops->parray[i];
-
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Processing loop %d:\n", loop->num);
- if (loop)
- unrolled |= loop_prefetch_arrays (loops, loop);
+ unrolled |= loop_prefetch_arrays (loop);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\n\n");