/* Array prefetching.
- Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
This file is part of GCC.
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
-#include "rtl.h"
#include "tm_p.h"
-#include "hard-reg-set.h"
#include "basic-block.h"
#include "output.h"
#include "diagnostic.h"
+#include "tree-pretty-print.h"
#include "tree-flow.h"
#include "tree-dump.h"
#include "timevar.h"
#include "cfgloop.h"
#include "expr.h"
#include "tree-pass.h"
-#include "ggc.h"
#include "insn-config.h"
#include "recog.h"
#include "hashtab.h"
struct mem_ref_group
{
tree base; /* Base of the reference. */
- HOST_WIDE_INT step; /* Step of the reference. */
+ tree step; /* Step of the reference. */
struct mem_ref *refs; /* References in the group. */
struct mem_ref_group *next; /* Next group of references. */
};
#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
+/* Do not generate a prefetch if the unroll factor is significantly less
+ than what is required by the prefetch. This is to avoid redundant
+ prefetches. For example, if prefetch_mod is 16 and unroll_factor is
+ 1, this means prefetching requires unrolling the loop 16 times, but
+ the loop is not going to be unrolled. In this case (ratio = 16),
+ prefetching is not likely to be beneficial. */
+
+#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
+#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 8
+#endif
+
/* The memory reference. */
struct mem_ref
fprintf (file, " group %p (base ", (void *) ref->group);
print_generic_expr (file, ref->group->base, TDF_SLIM);
fprintf (file, ", step ");
- fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
+ if (cst_and_fits_in_hwi (ref->group->step))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
+ else
+ print_generic_expr (file, ref->group->step, TDF_TREE);
fprintf (file, ")\n");
fprintf (file, " delta ");
exist. */
static struct mem_ref_group *
-find_or_create_group (struct mem_ref_group **groups, tree base,
- HOST_WIDE_INT step)
+find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
{
struct mem_ref_group *group;
for (; *groups; groups = &(*groups)->next)
{
- if ((*groups)->step == step
+ if (operand_equal_p ((*groups)->step, step, 0)
&& operand_equal_p ((*groups)->base, base, 0))
return *groups;
- /* Keep the list of groups sorted by decreasing step. */
- if ((*groups)->step < step)
+ /* If step is an integer constant, keep the list of groups sorted
+ by decreasing step. */
+ if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
+ && int_cst_value ((*groups)->step) < int_cst_value (step))
break;
}
{
struct loop *loop; /* Loop of the reference. */
gimple stmt; /* Statement of the reference. */
- HOST_WIDE_INT *step; /* Step of the memory reference. */
+ tree *step; /* Step of the memory reference. */
HOST_WIDE_INT *delta; /* Offset of the memory reference. */
};
{
struct ar_data *ar_data = (struct ar_data *) data;
tree ibase, step, stepsize;
- HOST_WIDE_INT istep, idelta = 0, imult = 1;
+ HOST_WIDE_INT idelta = 0, imult = 1;
affine_iv iv;
if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
return false;
if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
- *index, &iv, false))
+ *index, &iv, true))
return false;
ibase = iv.base;
step = iv.step;
- if (!cst_and_fits_in_hwi (step))
- return false;
- istep = int_cst_value (step);
-
if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
&& cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
{
ibase = build_int_cst (TREE_TYPE (ibase), 0);
}
+ if (*ar_data->step == NULL_TREE)
+ *ar_data->step = step;
+ else
+ *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
+ fold_convert (sizetype, *ar_data->step),
+ fold_convert (sizetype, step));
if (TREE_CODE (base) == ARRAY_REF)
{
stepsize = array_ref_element_size (base);
return false;
imult = int_cst_value (stepsize);
- istep *= imult;
+ *ar_data->step = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, *ar_data->step),
+ fold_convert (sizetype, step));
idelta *= imult;
}
- *ar_data->step += istep;
*ar_data->delta += idelta;
*index = ibase;
static bool
analyze_ref (struct loop *loop, tree *ref_p, tree *base,
- HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
+ tree *step, HOST_WIDE_INT *delta,
gimple stmt)
{
struct ar_data ar_data;
HOST_WIDE_INT bit_offset;
tree ref = *ref_p;
- *step = 0;
+ *step = NULL_TREE;
*delta = 0;
/* First strip off the component references. Ignore bitfields. */
gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
tree ref, bool write_p, gimple stmt)
{
- tree base;
- HOST_WIDE_INT step, delta;
+ tree base, step;
+ HOST_WIDE_INT delta;
struct mem_ref_group *agrp;
if (get_base_address (ref) == NULL)
if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
return false;
+ /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
+ if (step == NULL_TREE)
+ return false;
/* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
are integer constants. */
static void
prune_ref_by_self_reuse (struct mem_ref *ref)
{
- HOST_WIDE_INT step = ref->group->step;
- bool backward = step < 0;
+ HOST_WIDE_INT step;
+ bool backward;
+
+ /* If the step size is non constant, we cannot calculate prefetch_mod. */
+ if (!cst_and_fits_in_hwi (ref->group->step))
+ return;
+
+ step = int_cst_value (ref->group->step);
+
+ backward = step < 0;
if (step == 0)
{
prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
bool by_is_before)
{
- HOST_WIDE_INT step = ref->group->step;
- bool backward = step < 0;
+ HOST_WIDE_INT step;
+ bool backward;
HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
HOST_WIDE_INT delta = delta_b - delta_r;
HOST_WIDE_INT hit_from;
tree ref_type;
int align_unit;
+ /* If the step is non constant we cannot calculate prefetch_before. */
+ if (!cst_and_fits_in_hwi (ref->group->step)) {
+ return;
+ }
+
+ step = int_cst_value (ref->group->step);
+
+ backward = step < 0;
+
+
if (delta == 0)
{
/* If the references has the same address, only prefetch the
hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
prefetch_before = (hit_from - delta_r + step - 1) / step;
+ /* Do not reduce prefetch_before if we meet beyond cache size. */
+ if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
+ prefetch_before = PREFETCH_ALL;
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
reduced_prefetch_block, align_unit);
if (miss_rate <= ACCEPTABLE_MISS_RATE)
{
+ /* Do not reduce prefetch_before if we meet beyond cache size. */
+ if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
+ prefetch_before = PREFETCH_ALL;
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
/* For now do not issue prefetches for only first few of the
iterations. */
if (ref->prefetch_before != PREFETCH_ALL)
- return false;
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
+ (void *) ref);
+ return false;
+ }
/* Do not prefetch nontemporal stores. */
if (ref->storent_p)
- return false;
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
+ return false;
+ }
return true;
}
if (!should_issue_prefetch_p (ref))
continue;
+ /* The loop is far from being sufficiently unrolled for this
+ prefetch. Do not generate prefetch to avoid many redudant
+ prefetches. */
+ if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
+ continue;
+
/* If we need to prefetch the reference each PREFETCH_MOD iterations,
and we unroll the loop UNROLL_FACTOR times, we need to insert
ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
{
HOST_WIDE_INT delta;
- tree addr, addr_base, write_p, local;
+ tree addr, addr_base, write_p, local, forward;
gimple prefetch;
gimple_stmt_iterator bsi;
unsigned n_prefetches, ap;
for (ap = 0; ap < n_prefetches; ap++)
{
- /* Determine the address to prefetch. */
- delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- addr_base, size_int (delta));
- addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
- true, GSI_SAME_STMT);
-
+ if (cst_and_fits_in_hwi (ref->group->step))
+ {
+ /* Determine the address to prefetch. */
+ delta = (ahead + ap * ref->prefetch_mod) *
+ int_cst_value (ref->group->step);
+ addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
+ addr_base, size_int (delta));
+ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
+ true, GSI_SAME_STMT);
+ }
+ else
+ {
+ /* The step size is non-constant but loop-invariant. We use the
+ heuristic to simply prefetch ahead iterations ahead. */
+ forward = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, ref->group->step),
+ fold_convert (sizetype, size_int (ahead)));
+ addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
+ forward);
+ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
+ NULL, true, GSI_SAME_STMT);
+ }
/* Create the prefetch instruction. */
prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
3, addr, write_p, local);
return false;
}
- /* Profitability of prefetching is highly dependent on the trip count.
- For a given AHEAD distance, the first AHEAD iterations do not benefit
- from prefetching, and the last AHEAD iterations execute useless
- prefetches. So, if the trip count is not large enough relative to AHEAD,
- prefetching may cause serious performance degradation. To avoid this
- problem when the trip count is not known at compile time, we
- conservatively skip loops with high prefetching costs. For now, only
- the I-cache cost is considered. The relative I-cache cost is estimated
- by taking the ratio between the number of prefetches and the total
- number of instructions. Since we are using integer arithmetic, we
- compute the reciprocal of this ratio.
+ /* Prefetching most likely causes performance degradation when the instruction
+ to prefetch ratio is too small. Too many prefetch instructions in a loop
+ may reduce the I-cache performance.
(unroll_factor * ninsns) is used to estimate the number of instructions in
the unrolled loop. This implementation is a bit simplistic -- the number
of issued prefetch instructions is also affected by unrolling. So,
original loop * unroll_factor (at least the induction variable increases
and the exit branches will get eliminated), so it might be better to use
tree_estimate_loop_size + estimated_unrolled_size. */
- if (est_niter < 0)
+ insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
+ if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
{
- insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
- return insn_to_prefetch_ratio >= MIN_INSN_TO_PREFETCH_RATIO;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
+ insn_to_prefetch_ratio);
+ return false;
}
+ /* Could not do further estimation if the trip count is unknown. Just assume
+ prefetching is profitable. Too aggressive??? */
+ if (est_niter < 0)
+ return true;
+
if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
{
if (dump_file && (dump_flags & TDF_DETAILS))