/* Array prefetching.
- Copyright (C) 2005 Free Software Foundation, Inc.
-
+ Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
-Free Software Foundation; either version 2, or (at your option) any
+Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
while still within this bound (starting with those with lowest
prefetch_mod, since they are responsible for most of the cache
misses).
-
+
5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
and PREFETCH_BEFORE requirements (within some bounds), and to avoid
prefetching nonaccessed memory.
TODO -- actually implement peeling.
-
+
6) We actually emit the prefetch instructions. ??? Perhaps emit the
prefetch instructions with guards in cases where 5) was not sufficient
to satisfy the constraints?
+ The function is_loop_prefetching_profitable() implements a cost model
+ to determine if prefetching is profitable for a given loop. The cost
+ model has two heuristcs:
+ 1. A heuristic that determines whether the given loop has enough CPU
+ ops that can be overlapped with cache missing memory ops.
+ If not, the loop won't benefit from prefetching. This is implemented
+ by requirung the ratio between the instruction count and the mem ref
+ count to be above a certain minimum.
+ 2. A heuristic that disables prefetching in a loop with an unknown trip
+ count if the prefetching cost is above a certain limit. The relative
+ prefetching cost is estimated by taking the ratio between the
+ prefetch count and the total intruction count (this models the I-cache
+ cost).
+ The limits used in these heuristics are defined as parameters with
+ reasonable default values. Machine-specific default values will be
+ added later.
+
Some other TODO:
-- write and use more general reuse analysis (that could be also used
in other cache aimed loop optimizations)
/* In some cases we are only able to determine that there is a certain
probability that the two accesses hit the same cache line. In this
case, we issue the prefetches for both of them if this probability
- is less then (1000 - ACCEPTABLE_MISS_RATE) promile. */
+ is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
#ifndef ACCEPTABLE_MISS_RATE
#define ACCEPTABLE_MISS_RATE 50
#define HAVE_prefetch 0
#endif
-#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * L1_CACHE_LINE_SIZE))
-/* TODO: Add parameter to specify L2 cache size. */
-#define L2_CACHE_SIZE_BYTES (8 * L1_CACHE_SIZE_BYTES)
+#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
+#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
/* We consider a memory access nontemporal if it is not reused sooner than
after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
struct mem_ref
{
- tree stmt; /* Statement in that the reference appears. */
+ gimple stmt; /* Statement in that the reference appears. */
tree mem; /* The reference. */
HOST_WIDE_INT delta; /* Constant offset of the reference. */
struct mem_ref_group *group; /* The group of references it belongs to. */
WRITE_P. The reference occurs in statement STMT. */
static void
-record_ref (struct mem_ref_group *group, tree stmt, tree mem,
+record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
HOST_WIDE_INT delta, bool write_p)
{
struct mem_ref **aref;
struct ar_data
{
struct loop *loop; /* Loop of the reference. */
- tree stmt; /* Statement of the reference. */
+ gimple stmt; /* Statement of the reference. */
HOST_WIDE_INT *step; /* Step of the memory reference. */
HOST_WIDE_INT *delta; /* Offset of the memory reference. */
};
|| TREE_CODE (base) == ALIGN_INDIRECT_REF)
return false;
- if (!simple_iv (ar_data->loop, ar_data->stmt, *index, &iv, false))
+ if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
+ *index, &iv, false))
return false;
ibase = iv.base;
step = iv.step;
static bool
analyze_ref (struct loop *loop, tree *ref_p, tree *base,
HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
- tree stmt)
+ gimple stmt)
{
struct ar_data ar_data;
tree off;
off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
bit_offset = TREE_INT_CST_LOW (off);
gcc_assert (bit_offset % BITS_PER_UNIT == 0);
-
+
*delta += bit_offset / BITS_PER_UNIT;
}
static bool
gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
- tree ref, bool write_p, tree stmt)
+ tree ref, bool write_p, gimple stmt)
{
tree base;
HOST_WIDE_INT step, delta;
struct mem_ref_group *agrp;
+ if (get_base_address (ref) == NULL)
+ return false;
+
if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
return false;
true if there are no other memory references inside the loop. */
static struct mem_ref_group *
-gather_memory_references (struct loop *loop, bool *no_other_refs)
+gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
{
basic_block *body = get_loop_body_in_dom_order (loop);
basic_block bb;
unsigned i;
- block_stmt_iterator bsi;
- tree stmt, lhs, rhs, call;
+ gimple_stmt_iterator bsi;
+ gimple stmt;
+ tree lhs, rhs;
struct mem_ref_group *refs = NULL;
*no_other_refs = true;
+ *ref_count = 0;
/* Scan the loop body in order, so that the former references precede the
later ones. */
if (bb->loop_father != loop)
continue;
- for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
- stmt = bsi_stmt (bsi);
- call = get_call_expr_in (stmt);
- if (call && !(call_expr_flags (call) & ECF_CONST))
- *no_other_refs = false;
+ stmt = gsi_stmt (bsi);
- if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
+ if (gimple_code (stmt) != GIMPLE_ASSIGN)
{
- if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
+ if (gimple_vuse (stmt)
+ || (is_gimple_call (stmt)
+ && !(gimple_call_flags (stmt) & ECF_CONST)))
*no_other_refs = false;
continue;
}
- lhs = GIMPLE_STMT_OPERAND (stmt, 0);
- rhs = GIMPLE_STMT_OPERAND (stmt, 1);
+ lhs = gimple_assign_lhs (stmt);
+ rhs = gimple_assign_rhs1 (stmt);
if (REFERENCE_CLASS_P (rhs))
+ {
*no_other_refs &= gather_memory_references_ref (loop, &refs,
rhs, false, stmt);
+ *ref_count += 1;
+ }
if (REFERENCE_CLASS_P (lhs))
+ {
*no_other_refs &= gather_memory_references_ref (loop, &refs,
lhs, true, stmt);
+ *ref_count += 1;
+ }
}
}
free (body);
return (x + by - 1) / by;
}
+/* Given a CACHE_LINE_SIZE and two inductive memory references
+ with a common STEP greater than CACHE_LINE_SIZE and an address
+ difference DELTA, compute the probability that they will fall
+ in different cache lines. DISTINCT_ITERS is the number of
+ distinct iterations after which the pattern repeats itself.
+ ALIGN_UNIT is the unit of alignment in bytes. */
+
+static int
+compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
+ HOST_WIDE_INT step, HOST_WIDE_INT delta,
+ unsigned HOST_WIDE_INT distinct_iters,
+ int align_unit)
+{
+ unsigned align, iter;
+ int total_positions, miss_positions, miss_rate;
+ int address1, address2, cache_line1, cache_line2;
+
+ total_positions = 0;
+ miss_positions = 0;
+
+ /* Iterate through all possible alignments of the first
+ memory reference within its cache line. */
+ for (align = 0; align < cache_line_size; align += align_unit)
+
+ /* Iterate through all distinct iterations. */
+ for (iter = 0; iter < distinct_iters; iter++)
+ {
+ address1 = align + step * iter;
+ address2 = address1 + delta;
+ cache_line1 = address1 / cache_line_size;
+ cache_line2 = address2 / cache_line_size;
+ total_positions += 1;
+ if (cache_line1 != cache_line2)
+ miss_positions += 1;
+ }
+ miss_rate = 1000 * miss_positions / total_positions;
+ return miss_rate;
+}
+
/* Prune the prefetch candidate REF using the reuse with BY.
If BY_IS_BEFORE is true, BY is before REF in the loop. */
HOST_WIDE_INT delta = delta_b - delta_r;
HOST_WIDE_INT hit_from;
unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
+ int miss_rate;
+ HOST_WIDE_INT reduced_step;
+ unsigned HOST_WIDE_INT reduced_prefetch_block;
+ tree ref_type;
+ int align_unit;
if (delta == 0)
{
former. */
if (by_is_before)
ref->prefetch_before = 0;
-
+
return;
}
return;
}
- /* A more complicated case. First let us ensure that size of cache line
- and step are coprime (here we assume that PREFETCH_BLOCK is a power
- of two. */
+ /* A more complicated case with step > prefetch_block. First reduce
+ the ratio between the step and the cache line size to its simplest
+ terms. The resulting denominator will then represent the number of
+ distinct iterations after which each address will go back to its
+ initial location within the cache line. This computation assumes
+ that PREFETCH_BLOCK is a power of two. */
prefetch_block = PREFETCH_BLOCK;
- while ((step & 1) == 0
- && prefetch_block > 1)
+ reduced_prefetch_block = prefetch_block;
+ reduced_step = step;
+ while ((reduced_step & 1) == 0
+ && reduced_prefetch_block > 1)
{
- step >>= 1;
- prefetch_block >>= 1;
- delta >>= 1;
+ reduced_step >>= 1;
+ reduced_prefetch_block >>= 1;
}
- /* Now step > prefetch_block, and step and prefetch_block are coprime.
- Determine the probability that the accesses hit the same cache line. */
-
prefetch_before = delta / step;
delta %= step;
- if ((unsigned HOST_WIDE_INT) delta
- <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
+ ref_type = TREE_TYPE (ref->mem);
+ align_unit = TYPE_ALIGN (ref_type) / 8;
+ miss_rate = compute_miss_rate(prefetch_block, step, delta,
+ reduced_prefetch_block, align_unit);
+ if (miss_rate <= ACCEPTABLE_MISS_RATE)
{
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
/* Try also the following iteration. */
prefetch_before++;
delta = step - delta;
- if ((unsigned HOST_WIDE_INT) delta
- <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
+ miss_rate = compute_miss_rate(prefetch_block, step, delta,
+ reduced_prefetch_block, align_unit);
+ if (miss_rate <= ACCEPTABLE_MISS_RATE)
{
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
return any;
}
-/* Determine whether there is any reference suitable for prefetching
- in GROUPS. */
+/* Estimate the number of prefetches in the given GROUPS. */
-static bool
-anything_to_prefetch_p (struct mem_ref_group *groups)
+static int
+estimate_prefetch_count (struct mem_ref_group *groups)
{
struct mem_ref *ref;
+ int prefetch_count = 0;
for (; groups; groups = groups->next)
for (ref = groups->refs; ref; ref = ref->next)
if (should_issue_prefetch_p (ref))
- return true;
+ prefetch_count++;
- return false;
+ return prefetch_count;
}
/* Issue prefetches for the reference REF into loop as decided before.
issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
{
HOST_WIDE_INT delta;
- tree addr, addr_base, prefetch, write_p, local;
- block_stmt_iterator bsi;
+ tree addr, addr_base, write_p, local;
+ gimple prefetch;
+ gimple_stmt_iterator bsi;
unsigned n_prefetches, ap;
bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
nontemporal ? " nontemporal" : "",
(void *) ref);
- bsi = bsi_for_stmt (ref->stmt);
+ bsi = gsi_for_stmt (ref->stmt);
n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
/ ref->prefetch_mod);
addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
- addr_base = force_gimple_operand_bsi (&bsi, unshare_expr (addr_base), true, NULL);
+ addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
+ true, NULL, true, GSI_SAME_STMT);
write_p = ref->write_p ? integer_one_node : integer_zero_node;
local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
addr_base, size_int (delta));
- addr = force_gimple_operand_bsi (&bsi, unshare_expr (addr), true, NULL);
+ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
+ true, GSI_SAME_STMT);
/* Create the prefetch instruction. */
- prefetch = build_call_expr (built_in_decls[BUILT_IN_PREFETCH],
- 3, addr, write_p, local);
- bsi_insert_before (&bsi, prefetch, BSI_SAME_STMT);
+ prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
+ 3, addr, write_p, local);
+ gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
}
}
if (mode == BLKmode)
return false;
- code = storent_optab->handlers[mode].insn_code;
+ code = optab_handler (storent_optab, mode)->insn_code;
return code != CODE_FOR_nothing;
}
fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
(void *) ref);
- MOVE_NONTEMPORAL (ref->stmt) = true;
+ gimple_assign_set_nontemporal_move (ref->stmt, true);
ref->storent_p = true;
return true;
{
VEC (edge, heap) *exits = get_loop_exit_edges (loop);
edge exit;
- tree call;
- block_stmt_iterator bsi;
+ gimple call;
+ gimple_stmt_iterator bsi;
unsigned i;
for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
{
- call = build_function_call_expr (FENCE_FOLLOWING_MOVNT, NULL_TREE);
+ call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
if (!single_pred_p (exit->dest)
/* If possible, we prefer not to insert the fence on other paths
in cfg. */
&& !(exit->flags & EDGE_ABNORMAL))
split_loop_exit_edge (exit);
- bsi = bsi_after_labels (exit->dest);
+ bsi = gsi_after_labels (exit->dest);
- bsi_insert_before (&bsi, call, BSI_NEW_STMT);
+ gsi_insert_before (&bsi, call, GSI_NEW_STMT);
mark_virtual_ops_for_renaming (call);
}
know its stride. */
while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
ref = TREE_OPERAND (ref, 0);
-
+
if (TREE_CODE (ref) == ARRAY_REF)
{
stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
{
dist = self_reuse_distance (dr, loop_data_size, n, loop);
- ref = dr->aux;
+ ref = (struct mem_ref *) dr->aux;
if (ref->reuse_distance > dist)
ref->reuse_distance = dist;
if (DDR_ARE_DEPENDENT (dep) == chrec_known)
continue;
- ref = DDR_A (dep)->aux;
- refb = DDR_B (dep)->aux;
+ ref = (struct mem_ref *) DDR_A (dep)->aux;
+ refb = (struct mem_ref *) DDR_B (dep)->aux;
if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
|| DDR_NUM_DIST_VECTS (dep) == 0)
/* If the dependence cannot be analyzed, assume that there might be
a reuse. */
dist = 0;
-
+
ref->independent_p = false;
refb->independent_p = false;
}
}
}
+/* Do a cost-benefit analysis to determine if prefetching is profitable
+ for the current loop given the following parameters:
+ AHEAD: the iteration ahead distance,
+ EST_NITER: the estimated trip count,
+ NINSNS: estimated number of instructions in the loop,
+ PREFETCH_COUNT: an estimate of the number of prefetches
+ MEM_REF_COUNT: total number of memory references in the loop. */
+
+static bool
+is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
+ unsigned ninsns, unsigned prefetch_count,
+ unsigned mem_ref_count)
+{
+ int insn_to_mem_ratio, insn_to_prefetch_ratio;
+
+ if (mem_ref_count == 0)
+ return false;
+
+ /* Prefetching improves performance by overlapping cache missing
+ memory accesses with CPU operations. If the loop does not have
+ enough CPU operations to overlap with memory operations, prefetching
+ won't give a significant benefit. One approximate way of checking
+ this is to require the ratio of instructions to memory references to
+ be above a certain limit. This approximation works well in practice.
+ TODO: Implement a more precise computation by estimating the time
+ for each CPU or memory op in the loop. Time estimates for memory ops
+ should account for cache misses. */
+ insn_to_mem_ratio = ninsns / mem_ref_count;
+
+ if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
+ return false;
+
+ /* Profitability of prefetching is highly dependent on the trip count.
+ For a given AHEAD distance, the first AHEAD iterations do not benefit
+ from prefetching, and the last AHEAD iterations execute useless
+ prefetches. So, if the trip count is not large enough relative to AHEAD,
+ prefetching may cause serious performance degradation. To avoid this
+ problem when the trip count is not known at compile time, we
+ conservatively skip loops with high prefetching costs. For now, only
+ the I-cache cost is considered. The relative I-cache cost is estimated
+ by taking the ratio between the number of prefetches and the total
+ number of instructions. Since we are using integer arithmetic, we
+ compute the reciprocal of this ratio.
+ TODO: Account for loop unrolling, which may reduce the costs of
+ shorter stride prefetches. Note that not accounting for loop
+ unrolling over-estimates the cost and hence gives more conservative
+ results. */
+ if (est_niter < 0)
+ {
+ insn_to_prefetch_ratio = ninsns / prefetch_count;
+ return insn_to_prefetch_ratio >= MIN_INSN_TO_PREFETCH_RATIO;
+ }
+
+ if (est_niter <= (HOST_WIDE_INT) ahead)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- loop estimated to roll only %d times\n",
+ (int) est_niter);
+ return false;
+ }
+ return true;
+}
+
+
/* Issue prefetch instructions for array references in LOOP. Returns
true if the LOOP was unrolled. */
HOST_WIDE_INT est_niter;
struct tree_niter_desc desc;
bool unrolled = false, no_other_refs;
+ unsigned prefetch_count;
+ unsigned mem_ref_count;
- if (!maybe_hot_bb_p (loop->header))
+ if (optimize_loop_nest_for_size_p (loop))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " ignored (cold area)\n");
}
/* Step 1: gather the memory references. */
- refs = gather_memory_references (loop, &no_other_refs);
+ refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
/* Step 2: estimate the reuse effects. */
prune_by_reuse (refs);
- if (!anything_to_prefetch_p (refs))
+ prefetch_count = estimate_prefetch_count (refs);
+ if (prefetch_count == 0)
goto fail;
determine_loop_nest_reuse (loop, refs, no_other_refs);
ahead = (PREFETCH_LATENCY + time - 1) / time;
est_niter = estimated_loop_iterations_int (loop, false);
- /* The prefetches will run for AHEAD iterations of the original loop. Unless
- the loop rolls at least AHEAD times, prefetching the references does not
- make sense. */
- if (est_niter >= 0 && est_niter <= (HOST_WIDE_INT) ahead)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file,
- "Not prefetching -- loop estimated to roll only %d times\n",
- (int) est_niter);
- goto fail;
- }
-
- mark_nontemporal_stores (loop, refs);
-
ninsns = tree_num_loop_insns (loop, &eni_size_weights);
unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
est_niter);
if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor);
+ fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
+ HOST_WIDE_INT_PRINT_DEC "\n"
+ "insn count %d, mem ref count %d, prefetch count %d\n",
+ ahead, unroll_factor, est_niter,
+ ninsns, mem_ref_count, prefetch_count);
+
+ if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns,
+ prefetch_count, mem_ref_count))
+ goto fail;
+
+ mark_nontemporal_stores (loop, refs);
/* Step 4: what to prefetch? */
if (!schedule_prefetches (refs, unroll_factor, ahead))
SIMULTANEOUS_PREFETCHES);
fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
- fprintf (dump_file, " L1 cache size: %d lines, %d bytes\n",
- L1_CACHE_SIZE, L1_CACHE_SIZE_BYTES);
+ fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
+ L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
- fprintf (dump_file, " L2 cache size: %d bytes\n", L2_CACHE_SIZE_BYTES);
+ fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
+ fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
+ MIN_INSN_TO_PREFETCH_RATIO);
+ fprintf (dump_file, " min insn-to-mem ratio: %d \n",
+ PREFETCH_MIN_INSN_TO_MEM_RATIO);
fprintf (dump_file, "\n");
}