2 Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
26 #include "basic-block.h"
28 #include "tree-pretty-print.h"
29 #include "tree-flow.h"
30 #include "tree-dump.h"
33 #include "tree-pass.h"
34 #include "insn-config.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
41 #include "langhooks.h"
42 #include "tree-inline.h"
43 #include "tree-data-ref.h"
46 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
47 between the GIMPLE and RTL worlds. */
51 /* This pass inserts prefetch instructions to optimize cache usage during
52 accesses to arrays in loops. It processes loops sequentially and:
54 1) Gathers all memory references in the single loop.
55 2) For each of the references it decides when it is profitable to prefetch
56 it. To do it, we evaluate the reuse among the accesses, and determines
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example
61 (assuming cache line size is 64 bytes, char has size 1 byte and there
62 is no hardware sequential prefetch):
65 for (i = 0; i < max; i++)
72 a[187*i + 50] = ...; (5)
75 (0) obviously has PREFETCH_BEFORE 1
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
77 location 64 iterations before it, and PREFETCH_MOD 64 (since
78 it hits the same cache line otherwise).
79 (2) has PREFETCH_MOD 64
80 (3) has PREFETCH_MOD 4
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
82 the cache line accessed by (4) is the same with probability only
84 (5) has PREFETCH_MOD 1 as well.
86 Additionally, we use data dependence analysis to determine for each
87 reference the distance till the first reuse; this information is used
88 to determine the temporality of the issued prefetch instruction.
90 3) We determine how much ahead we need to prefetch. The number of
91 iterations needed is time to fetch / time spent in one iteration of
92 the loop. The problem is that we do not know either of these values,
93 so we just make a heuristic guess based on a magic (possibly)
94 target-specific constant and size of the loop.
96 4) Determine which of the references we prefetch. We take into account
97 that there is a maximum number of simultaneous prefetches (provided
98 by machine description). We prefetch as many prefetches as possible
99 while still within this bound (starting with those with lowest
100 prefetch_mod, since they are responsible for most of the cache
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
105 prefetching nonaccessed memory.
106 TODO -- actually implement peeling.
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the
109 prefetch instructions with guards in cases where 5) was not sufficient
110 to satisfy the constraints?
112 The function is_loop_prefetching_profitable() implements a cost model
113 to determine if prefetching is profitable for a given loop. The cost
114 model has two heuristcs:
115 1. A heuristic that determines whether the given loop has enough CPU
116 ops that can be overlapped with cache missing memory ops.
117 If not, the loop won't benefit from prefetching. This is implemented
118 by requirung the ratio between the instruction count and the mem ref
119 count to be above a certain minimum.
120 2. A heuristic that disables prefetching in a loop with an unknown trip
121 count if the prefetching cost is above a certain limit. The relative
122 prefetching cost is estimated by taking the ratio between the
123 prefetch count and the total intruction count (this models the I-cache
125 The limits used in these heuristics are defined as parameters with
126 reasonable default values. Machine-specific default values will be
130 -- write and use more general reuse analysis (that could be also used
131 in other cache aimed loop optimizations)
132 -- make it behave sanely together with the prefetches given by user
133 (now we just ignore them; at the very least we should avoid
134 optimizing loops in that user put his own prefetches)
135 -- we assume cache line size alignment of arrays; this could be
138 /* Magic constants follow. These should be replaced by machine specific
141 /* True if write can be prefetched by a read prefetch. */
143 #ifndef WRITE_CAN_USE_READ_PREFETCH
144 #define WRITE_CAN_USE_READ_PREFETCH 1
147 /* True if read can be prefetched by a write prefetch. */
149 #ifndef READ_CAN_USE_WRITE_PREFETCH
150 #define READ_CAN_USE_WRITE_PREFETCH 0
153 /* The size of the block loaded by a single prefetch. Usually, this is
154 the same as cache line size (at the moment, we only consider one level
155 of cache hierarchy). */
157 #ifndef PREFETCH_BLOCK
158 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
161 /* Do we have a forward hardware sequential prefetching? */
163 #ifndef HAVE_FORWARD_PREFETCH
164 #define HAVE_FORWARD_PREFETCH 0
167 /* Do we have a backward hardware sequential prefetching? */
169 #ifndef HAVE_BACKWARD_PREFETCH
170 #define HAVE_BACKWARD_PREFETCH 0
173 /* In some cases we are only able to determine that there is a certain
174 probability that the two accesses hit the same cache line. In this
175 case, we issue the prefetches for both of them if this probability
176 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
178 #ifndef ACCEPTABLE_MISS_RATE
179 #define ACCEPTABLE_MISS_RATE 50
182 #ifndef HAVE_prefetch
183 #define HAVE_prefetch 0
186 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
187 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
189 /* We consider a memory access nontemporal if it is not reused sooner than
190 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
191 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
192 so that we use nontemporal prefetches e.g. if single memory location
193 is accessed several times in a single iteration of the loop. */
194 #define NONTEMPORAL_FRACTION 16
196 /* In case we have to emit a memory fence instruction after the loop that
197 uses nontemporal stores, this defines the builtin to use. */
199 #ifndef FENCE_FOLLOWING_MOVNT
200 #define FENCE_FOLLOWING_MOVNT NULL_TREE
203 /* It is not profitable to prefetch when the trip count is not at
204 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
205 For example, in a loop with a prefetch ahead distance of 10,
206 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
207 profitable to prefetch when the trip count is greater or equal to
208 40. In that case, 30 out of the 40 iterations will benefit from
211 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
212 #define TRIP_COUNT_TO_AHEAD_RATIO 4
215 /* The group of references between that reuse may occur. */
219 tree base; /* Base of the reference. */
220 tree step; /* Step of the reference. */
221 struct mem_ref *refs; /* References in the group. */
222 struct mem_ref_group *next; /* Next group of references. */
225 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
227 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
229 /* Do not generate a prefetch if the unroll factor is significantly less
230 than what is required by the prefetch. This is to avoid redundant
231 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
232 2, prefetching requires unrolling the loop 16 times, but
233 the loop is actually unrolled twice. In this case (ratio = 8),
234 prefetching is not likely to be beneficial. */
236 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
237 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
240 /* The memory reference. */
244 gimple stmt; /* Statement in that the reference appears. */
245 tree mem; /* The reference. */
246 HOST_WIDE_INT delta; /* Constant offset of the reference. */
247 struct mem_ref_group *group; /* The group of references it belongs to. */
248 unsigned HOST_WIDE_INT prefetch_mod;
249 /* Prefetch only each PREFETCH_MOD-th
251 unsigned HOST_WIDE_INT prefetch_before;
252 /* Prefetch only first PREFETCH_BEFORE
254 unsigned reuse_distance; /* The amount of data accessed before the first
255 reuse of this value. */
256 struct mem_ref *next; /* The next reference in the group. */
257 unsigned write_p : 1; /* Is it a write? */
258 unsigned independent_p : 1; /* True if the reference is independent on
259 all other references inside the loop. */
260 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
261 unsigned storent_p : 1; /* True if we changed the store to a
265 /* Dumps information about reference REF to FILE. */
268 dump_mem_ref (FILE *file, struct mem_ref *ref)
270 fprintf (file, "Reference %p:\n", (void *) ref);
272 fprintf (file, " group %p (base ", (void *) ref->group);
273 print_generic_expr (file, ref->group->base, TDF_SLIM);
274 fprintf (file, ", step ");
275 if (cst_and_fits_in_hwi (ref->group->step))
276 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
278 print_generic_expr (file, ref->group->step, TDF_TREE);
279 fprintf (file, ")\n");
281 fprintf (file, " delta ");
282 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
283 fprintf (file, "\n");
285 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
287 fprintf (file, "\n");
290 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
293 static struct mem_ref_group *
294 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
296 struct mem_ref_group *group;
298 for (; *groups; groups = &(*groups)->next)
300 if (operand_equal_p ((*groups)->step, step, 0)
301 && operand_equal_p ((*groups)->base, base, 0))
304 /* If step is an integer constant, keep the list of groups sorted
305 by decreasing step. */
306 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
307 && int_cst_value ((*groups)->step) < int_cst_value (step))
311 group = XNEW (struct mem_ref_group);
315 group->next = *groups;
321 /* Records a memory reference MEM in GROUP with offset DELTA and write status
322 WRITE_P. The reference occurs in statement STMT. */
325 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
326 HOST_WIDE_INT delta, bool write_p)
328 struct mem_ref **aref;
330 /* Do not record the same address twice. */
331 for (aref = &group->refs; *aref; aref = &(*aref)->next)
333 /* It does not have to be possible for write reference to reuse the read
334 prefetch, or vice versa. */
335 if (!WRITE_CAN_USE_READ_PREFETCH
337 && !(*aref)->write_p)
339 if (!READ_CAN_USE_WRITE_PREFETCH
344 if ((*aref)->delta == delta)
348 (*aref) = XNEW (struct mem_ref);
349 (*aref)->stmt = stmt;
351 (*aref)->delta = delta;
352 (*aref)->write_p = write_p;
353 (*aref)->prefetch_before = PREFETCH_ALL;
354 (*aref)->prefetch_mod = 1;
355 (*aref)->reuse_distance = 0;
356 (*aref)->issue_prefetch_p = false;
357 (*aref)->group = group;
358 (*aref)->next = NULL;
359 (*aref)->independent_p = false;
360 (*aref)->storent_p = false;
362 if (dump_file && (dump_flags & TDF_DETAILS))
363 dump_mem_ref (dump_file, *aref);
366 /* Release memory references in GROUPS. */
369 release_mem_refs (struct mem_ref_group *groups)
371 struct mem_ref_group *next_g;
372 struct mem_ref *ref, *next_r;
374 for (; groups; groups = next_g)
376 next_g = groups->next;
377 for (ref = groups->refs; ref; ref = next_r)
386 /* A structure used to pass arguments to idx_analyze_ref. */
390 struct loop *loop; /* Loop of the reference. */
391 gimple stmt; /* Statement of the reference. */
392 tree *step; /* Step of the memory reference. */
393 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
396 /* Analyzes a single INDEX of a memory reference to obtain information
397 described at analyze_ref. Callback for for_each_index. */
400 idx_analyze_ref (tree base, tree *index, void *data)
402 struct ar_data *ar_data = (struct ar_data *) data;
403 tree ibase, step, stepsize;
404 HOST_WIDE_INT idelta = 0, imult = 1;
407 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
408 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
411 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
417 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
418 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
420 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
421 ibase = TREE_OPERAND (ibase, 0);
423 if (cst_and_fits_in_hwi (ibase))
425 idelta += int_cst_value (ibase);
426 ibase = build_int_cst (TREE_TYPE (ibase), 0);
429 if (TREE_CODE (base) == ARRAY_REF)
431 stepsize = array_ref_element_size (base);
432 if (!cst_and_fits_in_hwi (stepsize))
434 imult = int_cst_value (stepsize);
435 step = fold_build2 (MULT_EXPR, sizetype,
436 fold_convert (sizetype, step),
437 fold_convert (sizetype, stepsize));
441 if (*ar_data->step == NULL_TREE)
442 *ar_data->step = step;
444 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
445 fold_convert (sizetype, *ar_data->step),
446 fold_convert (sizetype, step));
447 *ar_data->delta += idelta;
453 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
454 STEP are integer constants and iter is number of iterations of LOOP. The
455 reference occurs in statement STMT. Strips nonaddressable component
456 references from REF_P. */
459 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
460 tree *step, HOST_WIDE_INT *delta,
463 struct ar_data ar_data;
465 HOST_WIDE_INT bit_offset;
471 /* First strip off the component references. Ignore bitfields. */
472 if (TREE_CODE (ref) == COMPONENT_REF
473 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
474 ref = TREE_OPERAND (ref, 0);
478 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
480 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
481 bit_offset = TREE_INT_CST_LOW (off);
482 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
484 *delta += bit_offset / BITS_PER_UNIT;
487 *base = unshare_expr (ref);
491 ar_data.delta = delta;
492 return for_each_index (base, idx_analyze_ref, &ar_data);
495 /* Record a memory reference REF to the list REFS. The reference occurs in
496 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
497 reference was recorded, false otherwise. */
500 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
501 tree ref, bool write_p, gimple stmt)
505 struct mem_ref_group *agrp;
507 if (get_base_address (ref) == NULL)
510 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
512 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
513 if (step == NULL_TREE)
516 /* Limit non-constant step prefetching only to the innermost loops. */
517 if (!cst_and_fits_in_hwi (step) && loop->inner != NULL)
520 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
521 are integer constants. */
522 agrp = find_or_create_group (refs, base, step);
523 record_ref (agrp, stmt, ref, delta, write_p);
528 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
529 true if there are no other memory references inside the loop. */
531 static struct mem_ref_group *
532 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
534 basic_block *body = get_loop_body_in_dom_order (loop);
537 gimple_stmt_iterator bsi;
540 struct mem_ref_group *refs = NULL;
542 *no_other_refs = true;
545 /* Scan the loop body in order, so that the former references precede the
547 for (i = 0; i < loop->num_nodes; i++)
550 if (bb->loop_father != loop)
553 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
555 stmt = gsi_stmt (bsi);
557 if (gimple_code (stmt) != GIMPLE_ASSIGN)
559 if (gimple_vuse (stmt)
560 || (is_gimple_call (stmt)
561 && !(gimple_call_flags (stmt) & ECF_CONST)))
562 *no_other_refs = false;
566 lhs = gimple_assign_lhs (stmt);
567 rhs = gimple_assign_rhs1 (stmt);
569 if (REFERENCE_CLASS_P (rhs))
571 *no_other_refs &= gather_memory_references_ref (loop, &refs,
575 if (REFERENCE_CLASS_P (lhs))
577 *no_other_refs &= gather_memory_references_ref (loop, &refs,
588 /* Prune the prefetch candidate REF using the self-reuse. */
591 prune_ref_by_self_reuse (struct mem_ref *ref)
596 /* If the step size is non constant, we cannot calculate prefetch_mod. */
597 if (!cst_and_fits_in_hwi (ref->group->step))
600 step = int_cst_value (ref->group->step);
606 /* Prefetch references to invariant address just once. */
607 ref->prefetch_before = 1;
614 if (step > PREFETCH_BLOCK)
617 if ((backward && HAVE_BACKWARD_PREFETCH)
618 || (!backward && HAVE_FORWARD_PREFETCH))
620 ref->prefetch_before = 1;
624 ref->prefetch_mod = PREFETCH_BLOCK / step;
627 /* Divides X by BY, rounding down. */
630 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
637 return (x + by - 1) / by;
640 /* Given a CACHE_LINE_SIZE and two inductive memory references
641 with a common STEP greater than CACHE_LINE_SIZE and an address
642 difference DELTA, compute the probability that they will fall
643 in different cache lines. Return true if the computed miss rate
644 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
645 number of distinct iterations after which the pattern repeats itself.
646 ALIGN_UNIT is the unit of alignment in bytes. */
649 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
650 HOST_WIDE_INT step, HOST_WIDE_INT delta,
651 unsigned HOST_WIDE_INT distinct_iters,
654 unsigned align, iter;
655 int total_positions, miss_positions, max_allowed_miss_positions;
656 int address1, address2, cache_line1, cache_line2;
658 /* It always misses if delta is greater than or equal to the cache
660 if (delta >= (HOST_WIDE_INT) cache_line_size)
664 total_positions = (cache_line_size / align_unit) * distinct_iters;
665 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
667 /* Iterate through all possible alignments of the first
668 memory reference within its cache line. */
669 for (align = 0; align < cache_line_size; align += align_unit)
671 /* Iterate through all distinct iterations. */
672 for (iter = 0; iter < distinct_iters; iter++)
674 address1 = align + step * iter;
675 address2 = address1 + delta;
676 cache_line1 = address1 / cache_line_size;
677 cache_line2 = address2 / cache_line_size;
678 if (cache_line1 != cache_line2)
681 if (miss_positions > max_allowed_miss_positions)
688 /* Prune the prefetch candidate REF using the reuse with BY.
689 If BY_IS_BEFORE is true, BY is before REF in the loop. */
692 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
697 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
698 HOST_WIDE_INT delta = delta_b - delta_r;
699 HOST_WIDE_INT hit_from;
700 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
701 HOST_WIDE_INT reduced_step;
702 unsigned HOST_WIDE_INT reduced_prefetch_block;
706 /* If the step is non constant we cannot calculate prefetch_before. */
707 if (!cst_and_fits_in_hwi (ref->group->step)) {
711 step = int_cst_value (ref->group->step);
718 /* If the references has the same address, only prefetch the
721 ref->prefetch_before = 0;
728 /* If the reference addresses are invariant and fall into the
729 same cache line, prefetch just the first one. */
733 if (ddown (ref->delta, PREFETCH_BLOCK)
734 != ddown (by->delta, PREFETCH_BLOCK))
737 ref->prefetch_before = 0;
741 /* Only prune the reference that is behind in the array. */
747 /* Transform the data so that we may assume that the accesses
751 delta_r = PREFETCH_BLOCK - 1 - delta_r;
752 delta_b = PREFETCH_BLOCK - 1 - delta_b;
760 /* Check whether the two references are likely to hit the same cache
761 line, and how distant the iterations in that it occurs are from
764 if (step <= PREFETCH_BLOCK)
766 /* The accesses are sure to meet. Let us check when. */
767 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
768 prefetch_before = (hit_from - delta_r + step - 1) / step;
770 /* Do not reduce prefetch_before if we meet beyond cache size. */
771 if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
772 prefetch_before = PREFETCH_ALL;
773 if (prefetch_before < ref->prefetch_before)
774 ref->prefetch_before = prefetch_before;
779 /* A more complicated case with step > prefetch_block. First reduce
780 the ratio between the step and the cache line size to its simplest
781 terms. The resulting denominator will then represent the number of
782 distinct iterations after which each address will go back to its
783 initial location within the cache line. This computation assumes
784 that PREFETCH_BLOCK is a power of two. */
785 prefetch_block = PREFETCH_BLOCK;
786 reduced_prefetch_block = prefetch_block;
788 while ((reduced_step & 1) == 0
789 && reduced_prefetch_block > 1)
792 reduced_prefetch_block >>= 1;
795 prefetch_before = delta / step;
797 ref_type = TREE_TYPE (ref->mem);
798 align_unit = TYPE_ALIGN (ref_type) / 8;
799 if (is_miss_rate_acceptable (prefetch_block, step, delta,
800 reduced_prefetch_block, align_unit))
802 /* Do not reduce prefetch_before if we meet beyond cache size. */
803 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
804 prefetch_before = PREFETCH_ALL;
805 if (prefetch_before < ref->prefetch_before)
806 ref->prefetch_before = prefetch_before;
811 /* Try also the following iteration. */
813 delta = step - delta;
814 if (is_miss_rate_acceptable (prefetch_block, step, delta,
815 reduced_prefetch_block, align_unit))
817 if (prefetch_before < ref->prefetch_before)
818 ref->prefetch_before = prefetch_before;
823 /* The ref probably does not reuse by. */
827 /* Prune the prefetch candidate REF using the reuses with other references
831 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
833 struct mem_ref *prune_by;
836 prune_ref_by_self_reuse (ref);
838 for (prune_by = refs; prune_by; prune_by = prune_by->next)
846 if (!WRITE_CAN_USE_READ_PREFETCH
848 && !prune_by->write_p)
850 if (!READ_CAN_USE_WRITE_PREFETCH
852 && prune_by->write_p)
855 prune_ref_by_group_reuse (ref, prune_by, before);
859 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
862 prune_group_by_reuse (struct mem_ref_group *group)
864 struct mem_ref *ref_pruned;
866 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
868 prune_ref_by_reuse (ref_pruned, group->refs);
870 if (dump_file && (dump_flags & TDF_DETAILS))
872 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
874 if (ref_pruned->prefetch_before == PREFETCH_ALL
875 && ref_pruned->prefetch_mod == 1)
876 fprintf (dump_file, " no restrictions");
877 else if (ref_pruned->prefetch_before == 0)
878 fprintf (dump_file, " do not prefetch");
879 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
880 fprintf (dump_file, " prefetch once");
883 if (ref_pruned->prefetch_before != PREFETCH_ALL)
885 fprintf (dump_file, " prefetch before ");
886 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
887 ref_pruned->prefetch_before);
889 if (ref_pruned->prefetch_mod != 1)
891 fprintf (dump_file, " prefetch mod ");
892 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
893 ref_pruned->prefetch_mod);
896 fprintf (dump_file, "\n");
901 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
904 prune_by_reuse (struct mem_ref_group *groups)
906 for (; groups; groups = groups->next)
907 prune_group_by_reuse (groups);
910 /* Returns true if we should issue prefetch for REF. */
913 should_issue_prefetch_p (struct mem_ref *ref)
915 /* For now do not issue prefetches for only first few of the
917 if (ref->prefetch_before != PREFETCH_ALL)
919 if (dump_file && (dump_flags & TDF_DETAILS))
920 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
925 /* Do not prefetch nontemporal stores. */
928 if (dump_file && (dump_flags & TDF_DETAILS))
929 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
936 /* Decide which of the prefetch candidates in GROUPS to prefetch.
937 AHEAD is the number of iterations to prefetch ahead (which corresponds
938 to the number of simultaneous instances of one prefetch running at a
939 time). UNROLL_FACTOR is the factor by that the loop is going to be
940 unrolled. Returns true if there is anything to prefetch. */
943 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
946 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
947 unsigned slots_per_prefetch;
951 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
952 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
954 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
955 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
956 it will need a prefetch slot. */
957 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
958 if (dump_file && (dump_flags & TDF_DETAILS))
959 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
962 /* For now we just take memory references one by one and issue
963 prefetches for as many as possible. The groups are sorted
964 starting with the largest step, since the references with
965 large step are more likely to cause many cache misses. */
967 for (; groups; groups = groups->next)
968 for (ref = groups->refs; ref; ref = ref->next)
970 if (!should_issue_prefetch_p (ref))
973 /* The loop is far from being sufficiently unrolled for this
974 prefetch. Do not generate prefetch to avoid many redudant
976 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
979 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
980 and we unroll the loop UNROLL_FACTOR times, we need to insert
981 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
983 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
984 / ref->prefetch_mod);
985 prefetch_slots = n_prefetches * slots_per_prefetch;
987 /* If more than half of the prefetches would be lost anyway, do not
988 issue the prefetch. */
989 if (2 * remaining_prefetch_slots < prefetch_slots)
992 ref->issue_prefetch_p = true;
994 if (remaining_prefetch_slots <= prefetch_slots)
996 remaining_prefetch_slots -= prefetch_slots;
1003 /* Return TRUE if no prefetch is going to be generated in the given
1007 nothing_to_prefetch_p (struct mem_ref_group *groups)
1009 struct mem_ref *ref;
1011 for (; groups; groups = groups->next)
1012 for (ref = groups->refs; ref; ref = ref->next)
1013 if (should_issue_prefetch_p (ref))
1019 /* Estimate the number of prefetches in the given GROUPS.
1020 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1023 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1025 struct mem_ref *ref;
1026 unsigned n_prefetches;
1027 int prefetch_count = 0;
1029 for (; groups; groups = groups->next)
1030 for (ref = groups->refs; ref; ref = ref->next)
1031 if (should_issue_prefetch_p (ref))
1033 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1034 / ref->prefetch_mod);
1035 prefetch_count += n_prefetches;
1038 return prefetch_count;
1041 /* Issue prefetches for the reference REF into loop as decided before.
1042 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1043 is the factor by which LOOP was unrolled. */
1046 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1048 HOST_WIDE_INT delta;
1049 tree addr, addr_base, write_p, local, forward;
1051 gimple_stmt_iterator bsi;
1052 unsigned n_prefetches, ap;
1053 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1055 if (dump_file && (dump_flags & TDF_DETAILS))
1056 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1057 nontemporal ? " nontemporal" : "",
1060 bsi = gsi_for_stmt (ref->stmt);
1062 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1063 / ref->prefetch_mod);
1064 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1065 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1066 true, NULL, true, GSI_SAME_STMT);
1067 write_p = ref->write_p ? integer_one_node : integer_zero_node;
1068 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
1070 for (ap = 0; ap < n_prefetches; ap++)
1072 if (cst_and_fits_in_hwi (ref->group->step))
1074 /* Determine the address to prefetch. */
1075 delta = (ahead + ap * ref->prefetch_mod) *
1076 int_cst_value (ref->group->step);
1077 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
1078 addr_base, size_int (delta));
1079 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1080 true, GSI_SAME_STMT);
1084 /* The step size is non-constant but loop-invariant. We use the
1085 heuristic to simply prefetch ahead iterations ahead. */
1086 forward = fold_build2 (MULT_EXPR, sizetype,
1087 fold_convert (sizetype, ref->group->step),
1088 fold_convert (sizetype, size_int (ahead)));
1089 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
1091 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1092 NULL, true, GSI_SAME_STMT);
1094 /* Create the prefetch instruction. */
1095 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
1096 3, addr, write_p, local);
1097 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1101 /* Issue prefetches for the references in GROUPS into loop as decided before.
1102 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1103 factor by that LOOP was unrolled. */
1106 issue_prefetches (struct mem_ref_group *groups,
1107 unsigned unroll_factor, unsigned ahead)
1109 struct mem_ref *ref;
1111 for (; groups; groups = groups->next)
1112 for (ref = groups->refs; ref; ref = ref->next)
1113 if (ref->issue_prefetch_p)
1114 issue_prefetch_ref (ref, unroll_factor, ahead);
1117 /* Returns true if REF is a memory write for that a nontemporal store insn
1121 nontemporal_store_p (struct mem_ref *ref)
1123 enum machine_mode mode;
1124 enum insn_code code;
1126 /* REF must be a write that is not reused. We require it to be independent
1127 on all other memory references in the loop, as the nontemporal stores may
1128 be reordered with respect to other memory references. */
1130 || !ref->independent_p
1131 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1134 /* Check that we have the storent instruction for the mode. */
1135 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1136 if (mode == BLKmode)
1139 code = optab_handler (storent_optab, mode);
1140 return code != CODE_FOR_nothing;
1143 /* If REF is a nontemporal store, we mark the corresponding modify statement
1144 and return true. Otherwise, we return false. */
1147 mark_nontemporal_store (struct mem_ref *ref)
1149 if (!nontemporal_store_p (ref))
1152 if (dump_file && (dump_flags & TDF_DETAILS))
1153 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1156 gimple_assign_set_nontemporal_move (ref->stmt, true);
1157 ref->storent_p = true;
1162 /* Issue a memory fence instruction after LOOP. */
1165 emit_mfence_after_loop (struct loop *loop)
1167 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1170 gimple_stmt_iterator bsi;
1173 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1175 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1177 if (!single_pred_p (exit->dest)
1178 /* If possible, we prefer not to insert the fence on other paths
1180 && !(exit->flags & EDGE_ABNORMAL))
1181 split_loop_exit_edge (exit);
1182 bsi = gsi_after_labels (exit->dest);
1184 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1185 mark_virtual_ops_for_renaming (call);
1188 VEC_free (edge, heap, exits);
1189 update_ssa (TODO_update_ssa_only_virtuals);
1192 /* Returns true if we can use storent in loop, false otherwise. */
1195 may_use_storent_in_loop_p (struct loop *loop)
1199 if (loop->inner != NULL)
1202 /* If we must issue a mfence insn after using storent, check that there
1203 is a suitable place for it at each of the loop exits. */
1204 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1206 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1210 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1211 if ((exit->flags & EDGE_ABNORMAL)
1212 && exit->dest == EXIT_BLOCK_PTR)
1215 VEC_free (edge, heap, exits);
1221 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1222 references in the loop. */
1225 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1227 struct mem_ref *ref;
1230 if (!may_use_storent_in_loop_p (loop))
1233 for (; groups; groups = groups->next)
1234 for (ref = groups->refs; ref; ref = ref->next)
1235 any |= mark_nontemporal_store (ref);
1237 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1238 emit_mfence_after_loop (loop);
1241 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1242 this is the case, fill in DESC by the description of number of
1246 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1249 if (!can_unroll_loop_p (loop, factor, desc))
1252 /* We only consider loops without control flow for unrolling. This is not
1253 a hard restriction -- tree_unroll_loop works with arbitrary loops
1254 as well; but the unrolling/prefetching is usually more profitable for
1255 loops consisting of a single basic block, and we want to limit the
1257 if (loop->num_nodes > 2)
1263 /* Determine the coefficient by that unroll LOOP, from the information
1264 contained in the list of memory references REFS. Description of
1265 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1266 insns of the LOOP. EST_NITER is the estimated number of iterations of
1267 the loop, or -1 if no estimate is available. */
1270 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1271 unsigned ninsns, struct tree_niter_desc *desc,
1272 HOST_WIDE_INT est_niter)
1274 unsigned upper_bound;
1275 unsigned nfactor, factor, mod_constraint;
1276 struct mem_ref_group *agp;
1277 struct mem_ref *ref;
1279 /* First check whether the loop is not too large to unroll. We ignore
1280 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1281 from unrolling them enough to make exactly one cache line covered by each
1282 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1283 us from unrolling the loops too many times in cases where we only expect
1284 gains from better scheduling and decreasing loop overhead, which is not
1286 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1288 /* If we unrolled the loop more times than it iterates, the unrolled version
1289 of the loop would be never entered. */
1290 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1291 upper_bound = est_niter;
1293 if (upper_bound <= 1)
1296 /* Choose the factor so that we may prefetch each cache just once,
1297 but bound the unrolling by UPPER_BOUND. */
1299 for (agp = refs; agp; agp = agp->next)
1300 for (ref = agp->refs; ref; ref = ref->next)
1301 if (should_issue_prefetch_p (ref))
1303 mod_constraint = ref->prefetch_mod;
1304 nfactor = least_common_multiple (mod_constraint, factor);
1305 if (nfactor <= upper_bound)
1309 if (!should_unroll_loop_p (loop, desc, factor))
1315 /* Returns the total volume of the memory references REFS, taking into account
1316 reuses in the innermost loop and cache line size. TODO -- we should also
1317 take into account reuses across the iterations of the loops in the loop
1321 volume_of_references (struct mem_ref_group *refs)
1323 unsigned volume = 0;
1324 struct mem_ref_group *gr;
1325 struct mem_ref *ref;
1327 for (gr = refs; gr; gr = gr->next)
1328 for (ref = gr->refs; ref; ref = ref->next)
1330 /* Almost always reuses another value? */
1331 if (ref->prefetch_before != PREFETCH_ALL)
1334 /* If several iterations access the same cache line, use the size of
1335 the line divided by this number. Otherwise, a cache line is
1336 accessed in each iteration. TODO -- in the latter case, we should
1337 take the size of the reference into account, rounding it up on cache
1338 line size multiple. */
1339 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1344 /* Returns the volume of memory references accessed across VEC iterations of
1345 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1346 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1349 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1353 for (i = 0; i < n; i++)
1360 gcc_assert (vec[i] > 0);
1362 /* We ignore the parts of the distance vector in subloops, since usually
1363 the numbers of iterations are much smaller. */
1364 return loop_sizes[i] * vec[i];
1367 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1368 at the position corresponding to the loop of the step. N is the depth
1369 of the considered loop nest, and, LOOP is its innermost loop. */
1372 add_subscript_strides (tree access_fn, unsigned stride,
1373 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1377 HOST_WIDE_INT astep;
1378 unsigned min_depth = loop_depth (loop) - n;
1380 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1382 aloop = get_chrec_loop (access_fn);
1383 step = CHREC_RIGHT (access_fn);
1384 access_fn = CHREC_LEFT (access_fn);
1386 if ((unsigned) loop_depth (aloop) <= min_depth)
1389 if (host_integerp (step, 0))
1390 astep = tree_low_cst (step, 0);
1392 astep = L1_CACHE_LINE_SIZE;
1394 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1399 /* Returns the volume of memory references accessed between two consecutive
1400 self-reuses of the reference DR. We consider the subscripts of DR in N
1401 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1402 loops. LOOP is the innermost loop of the current loop nest. */
1405 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1408 tree stride, access_fn;
1409 HOST_WIDE_INT *strides, astride;
1410 VEC (tree, heap) *access_fns;
1411 tree ref = DR_REF (dr);
1412 unsigned i, ret = ~0u;
1414 /* In the following example:
1416 for (i = 0; i < N; i++)
1417 for (j = 0; j < N; j++)
1419 the same cache line is accessed each N steps (except if the change from
1420 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1421 we cannot rely purely on the results of the data dependence analysis.
1423 Instead, we compute the stride of the reference in each loop, and consider
1424 the innermost loop in that the stride is less than cache size. */
1426 strides = XCNEWVEC (HOST_WIDE_INT, n);
1427 access_fns = DR_ACCESS_FNS (dr);
1429 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1431 /* Keep track of the reference corresponding to the subscript, so that we
1433 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1434 ref = TREE_OPERAND (ref, 0);
1436 if (TREE_CODE (ref) == ARRAY_REF)
1438 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1439 if (host_integerp (stride, 1))
1440 astride = tree_low_cst (stride, 1);
1442 astride = L1_CACHE_LINE_SIZE;
1444 ref = TREE_OPERAND (ref, 0);
1449 add_subscript_strides (access_fn, astride, strides, n, loop);
1452 for (i = n; i-- > 0; )
1454 unsigned HOST_WIDE_INT s;
1456 s = strides[i] < 0 ? -strides[i] : strides[i];
1458 if (s < (unsigned) L1_CACHE_LINE_SIZE
1460 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1462 ret = loop_sizes[i];
1471 /* Determines the distance till the first reuse of each reference in REFS
1472 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1473 memory references in the loop. */
1476 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1479 struct loop *nest, *aloop;
1480 VEC (data_reference_p, heap) *datarefs = NULL;
1481 VEC (ddr_p, heap) *dependences = NULL;
1482 struct mem_ref_group *gr;
1483 struct mem_ref *ref, *refb;
1484 VEC (loop_p, heap) *vloops = NULL;
1485 unsigned *loop_data_size;
1487 unsigned volume, dist, adist;
1489 data_reference_p dr;
1495 /* Find the outermost loop of the loop nest of loop (we require that
1496 there are no sibling loops inside the nest). */
1500 aloop = loop_outer (nest);
1502 if (aloop == current_loops->tree_root
1503 || aloop->inner->next)
1509 /* For each loop, determine the amount of data accessed in each iteration.
1510 We use this to estimate whether the reference is evicted from the
1511 cache before its reuse. */
1512 find_loop_nest (nest, &vloops);
1513 n = VEC_length (loop_p, vloops);
1514 loop_data_size = XNEWVEC (unsigned, n);
1515 volume = volume_of_references (refs);
1519 loop_data_size[i] = volume;
1520 /* Bound the volume by the L2 cache size, since above this bound,
1521 all dependence distances are equivalent. */
1522 if (volume > L2_CACHE_SIZE_BYTES)
1525 aloop = VEC_index (loop_p, vloops, i);
1526 vol = estimated_loop_iterations_int (aloop, false);
1528 vol = expected_loop_iterations (aloop);
1532 /* Prepare the references in the form suitable for data dependence
1533 analysis. We ignore unanalyzable data references (the results
1534 are used just as a heuristics to estimate temporality of the
1535 references, hence we do not need to worry about correctness). */
1536 for (gr = refs; gr; gr = gr->next)
1537 for (ref = gr->refs; ref; ref = ref->next)
1539 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1543 ref->reuse_distance = volume;
1545 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1548 no_other_refs = false;
1551 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1553 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1554 ref = (struct mem_ref *) dr->aux;
1555 if (ref->reuse_distance > dist)
1556 ref->reuse_distance = dist;
1559 ref->independent_p = true;
1562 compute_all_dependences (datarefs, &dependences, vloops, true);
1564 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1566 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1569 ref = (struct mem_ref *) DDR_A (dep)->aux;
1570 refb = (struct mem_ref *) DDR_B (dep)->aux;
1572 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1573 || DDR_NUM_DIST_VECTS (dep) == 0)
1575 /* If the dependence cannot be analyzed, assume that there might be
1579 ref->independent_p = false;
1580 refb->independent_p = false;
1584 /* The distance vectors are normalized to be always lexicographically
1585 positive, hence we cannot tell just from them whether DDR_A comes
1586 before DDR_B or vice versa. However, it is not important,
1587 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1588 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1589 in cache (and marking it as nontemporal would not affect
1593 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1595 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1598 /* If this is a dependence in the innermost loop (i.e., the
1599 distances in all superloops are zero) and it is not
1600 the trivial self-dependence with distance zero, record that
1601 the references are not completely independent. */
1602 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1604 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1606 ref->independent_p = false;
1607 refb->independent_p = false;
1610 /* Ignore accesses closer than
1611 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1612 so that we use nontemporal prefetches e.g. if single memory
1613 location is accessed several times in a single iteration of
1615 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1623 if (ref->reuse_distance > dist)
1624 ref->reuse_distance = dist;
1625 if (refb->reuse_distance > dist)
1626 refb->reuse_distance = dist;
1629 free_dependence_relations (dependences);
1630 free_data_refs (datarefs);
1631 free (loop_data_size);
1633 if (dump_file && (dump_flags & TDF_DETAILS))
1635 fprintf (dump_file, "Reuse distances:\n");
1636 for (gr = refs; gr; gr = gr->next)
1637 for (ref = gr->refs; ref; ref = ref->next)
1638 fprintf (dump_file, " ref %p distance %u\n",
1639 (void *) ref, ref->reuse_distance);
1643 /* Do a cost-benefit analysis to determine if prefetching is profitable
1644 for the current loop given the following parameters:
1645 AHEAD: the iteration ahead distance,
1646 EST_NITER: the estimated trip count,
1647 NINSNS: estimated number of instructions in the loop,
1648 PREFETCH_COUNT: an estimate of the number of prefetches
1649 MEM_REF_COUNT: total number of memory references in the loop. */
1652 is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
1653 unsigned ninsns, unsigned prefetch_count,
1654 unsigned mem_ref_count, unsigned unroll_factor)
1656 int insn_to_mem_ratio, insn_to_prefetch_ratio;
1658 if (mem_ref_count == 0)
1661 /* Prefetching improves performance by overlapping cache missing
1662 memory accesses with CPU operations. If the loop does not have
1663 enough CPU operations to overlap with memory operations, prefetching
1664 won't give a significant benefit. One approximate way of checking
1665 this is to require the ratio of instructions to memory references to
1666 be above a certain limit. This approximation works well in practice.
1667 TODO: Implement a more precise computation by estimating the time
1668 for each CPU or memory op in the loop. Time estimates for memory ops
1669 should account for cache misses. */
1670 insn_to_mem_ratio = ninsns / mem_ref_count;
1672 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1674 if (dump_file && (dump_flags & TDF_DETAILS))
1676 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1681 /* Prefetching most likely causes performance degradation when the instruction
1682 to prefetch ratio is too small. Too many prefetch instructions in a loop
1683 may reduce the I-cache performance.
1684 (unroll_factor * ninsns) is used to estimate the number of instructions in
1685 the unrolled loop. This implementation is a bit simplistic -- the number
1686 of issued prefetch instructions is also affected by unrolling. So,
1687 prefetch_mod and the unroll factor should be taken into account when
1688 determining prefetch_count. Also, the number of insns of the unrolled
1689 loop will usually be significantly smaller than the number of insns of the
1690 original loop * unroll_factor (at least the induction variable increases
1691 and the exit branches will get eliminated), so it might be better to use
1692 tree_estimate_loop_size + estimated_unrolled_size. */
1693 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1694 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1696 if (dump_file && (dump_flags & TDF_DETAILS))
1698 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1699 insn_to_prefetch_ratio);
1703 /* Could not do further estimation if the trip count is unknown. Just assume
1704 prefetching is profitable. Too aggressive??? */
1708 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1710 if (dump_file && (dump_flags & TDF_DETAILS))
1712 "Not prefetching -- loop estimated to roll only %d times\n",
1720 /* Issue prefetch instructions for array references in LOOP. Returns
1721 true if the LOOP was unrolled. */
1724 loop_prefetch_arrays (struct loop *loop)
1726 struct mem_ref_group *refs;
1727 unsigned ahead, ninsns, time, unroll_factor;
1728 HOST_WIDE_INT est_niter;
1729 struct tree_niter_desc desc;
1730 bool unrolled = false, no_other_refs;
1731 unsigned prefetch_count;
1732 unsigned mem_ref_count;
1734 if (optimize_loop_nest_for_size_p (loop))
1736 if (dump_file && (dump_flags & TDF_DETAILS))
1737 fprintf (dump_file, " ignored (cold area)\n");
1741 /* Step 1: gather the memory references. */
1742 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1744 /* Step 2: estimate the reuse effects. */
1745 prune_by_reuse (refs);
1747 if (nothing_to_prefetch_p (refs))
1750 determine_loop_nest_reuse (loop, refs, no_other_refs);
1752 /* Step 3: determine the ahead and unroll factor. */
1754 /* FIXME: the time should be weighted by the probabilities of the blocks in
1756 time = tree_num_loop_insns (loop, &eni_time_weights);
1757 ahead = (PREFETCH_LATENCY + time - 1) / time;
1758 est_niter = estimated_loop_iterations_int (loop, false);
1760 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1761 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1764 /* Estimate prefetch count for the unrolled loop. */
1765 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1766 if (prefetch_count == 0)
1769 if (dump_file && (dump_flags & TDF_DETAILS))
1770 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1771 HOST_WIDE_INT_PRINT_DEC "\n"
1772 "insn count %d, mem ref count %d, prefetch count %d\n",
1773 ahead, unroll_factor, est_niter,
1774 ninsns, mem_ref_count, prefetch_count);
1776 if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns, prefetch_count,
1777 mem_ref_count, unroll_factor))
1780 mark_nontemporal_stores (loop, refs);
1782 /* Step 4: what to prefetch? */
1783 if (!schedule_prefetches (refs, unroll_factor, ahead))
1786 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1787 iterations so that we do not issue superfluous prefetches. */
1788 if (unroll_factor != 1)
1790 tree_unroll_loop (loop, unroll_factor,
1791 single_dom_exit (loop), &desc);
1795 /* Step 6: issue the prefetches. */
1796 issue_prefetches (refs, unroll_factor, ahead);
1799 release_mem_refs (refs);
1803 /* Issue prefetch instructions for array references in loops. */
1806 tree_ssa_prefetch_arrays (void)
1810 bool unrolled = false;
1814 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1815 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1816 of processor costs and i486 does not have prefetch, but
1817 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1818 || PREFETCH_BLOCK == 0)
1821 if (dump_file && (dump_flags & TDF_DETAILS))
1823 fprintf (dump_file, "Prefetching parameters:\n");
1824 fprintf (dump_file, " simultaneous prefetches: %d\n",
1825 SIMULTANEOUS_PREFETCHES);
1826 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1827 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1828 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1829 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1830 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1831 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1832 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1833 MIN_INSN_TO_PREFETCH_RATIO);
1834 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1835 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1836 fprintf (dump_file, "\n");
1839 initialize_original_copy_tables ();
1841 if (!built_in_decls[BUILT_IN_PREFETCH])
1843 tree type = build_function_type (void_type_node,
1844 tree_cons (NULL_TREE,
1845 const_ptr_type_node,
1847 tree decl = add_builtin_function ("__builtin_prefetch", type,
1848 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1850 DECL_IS_NOVOPS (decl) = true;
1851 built_in_decls[BUILT_IN_PREFETCH] = decl;
1854 /* We assume that size of cache line is a power of two, so verify this
1856 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1858 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1860 if (dump_file && (dump_flags & TDF_DETAILS))
1861 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1863 unrolled |= loop_prefetch_arrays (loop);
1865 if (dump_file && (dump_flags & TDF_DETAILS))
1866 fprintf (dump_file, "\n\n");
1872 todo_flags |= TODO_cleanup_cfg;
1875 free_original_copy_tables ();