2 Copyright (C) 2005 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 #include "coretypes.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
38 #include "tree-pass.h"
40 #include "insn-config.h"
43 #include "tree-chrec.h"
44 #include "tree-scalar-evolution.h"
47 #include "langhooks.h"
48 #include "tree-inline.h"
49 #include "tree-data-ref.h"
52 /* This pass inserts prefetch instructions to optimize cache usage during
53 accesses to arrays in loops. It processes loops sequentially and:
55 1) Gathers all memory references in the single loop.
56 2) For each of the references it decides when it is profitable to prefetch
57 it. To do it, we evaluate the reuse among the accesses, and determines
58 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
59 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
60 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
61 iterations of the loop that are zero modulo PREFETCH_MOD). For example
62 (assuming cache line size is 64 bytes, char has size 1 byte and there
63 is no hardware sequential prefetch):
66 for (i = 0; i < max; i++)
73 a[187*i + 50] = ...; (5)
76 (0) obviously has PREFETCH_BEFORE 1
77 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
78 location 64 iterations before it, and PREFETCH_MOD 64 (since
79 it hits the same cache line otherwise).
80 (2) has PREFETCH_MOD 64
81 (3) has PREFETCH_MOD 4
82 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
83 the cache line accessed by (4) is the same with probability only
85 (5) has PREFETCH_MOD 1 as well.
87 Additionally, we use data dependence analysis to determine for each
88 reference the distance till the first reuse; this information is used
89 to determine the temporality of the issued prefetch instruction.
91 3) We determine how much ahead we need to prefetch. The number of
92 iterations needed is time to fetch / time spent in one iteration of
93 the loop. The problem is that we do not know either of these values,
94 so we just make a heuristic guess based on a magic (possibly)
95 target-specific constant and size of the loop.
97 4) Determine which of the references we prefetch. We take into account
98 that there is a maximum number of simultaneous prefetches (provided
99 by machine description). We prefetch as many prefetches as possible
100 while still within this bound (starting with those with lowest
101 prefetch_mod, since they are responsible for most of the cache
104 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
105 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
106 prefetching nonaccessed memory.
107 TODO -- actually implement peeling.
109 6) We actually emit the prefetch instructions. ??? Perhaps emit the
110 prefetch instructions with guards in cases where 5) was not sufficient
111 to satisfy the constraints?
114 -- write and use more general reuse analysis (that could be also used
115 in other cache aimed loop optimizations)
116 -- make it behave sanely together with the prefetches given by user
117 (now we just ignore them; at the very least we should avoid
118 optimizing loops in that user put his own prefetches)
119 -- we assume cache line size alignment of arrays; this could be
122 /* Magic constants follow. These should be replaced by machine specific
125 /* True if write can be prefetched by a read prefetch. */
127 #ifndef WRITE_CAN_USE_READ_PREFETCH
128 #define WRITE_CAN_USE_READ_PREFETCH 1
131 /* True if read can be prefetched by a write prefetch. */
133 #ifndef READ_CAN_USE_WRITE_PREFETCH
134 #define READ_CAN_USE_WRITE_PREFETCH 0
137 /* The size of the block loaded by a single prefetch. Usually, this is
138 the same as cache line size (at the moment, we only consider one level
139 of cache hierarchy). */
141 #ifndef PREFETCH_BLOCK
142 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
145 /* Do we have a forward hardware sequential prefetching? */
147 #ifndef HAVE_FORWARD_PREFETCH
148 #define HAVE_FORWARD_PREFETCH 0
151 /* Do we have a backward hardware sequential prefetching? */
153 #ifndef HAVE_BACKWARD_PREFETCH
154 #define HAVE_BACKWARD_PREFETCH 0
157 /* In some cases we are only able to determine that there is a certain
158 probability that the two accesses hit the same cache line. In this
159 case, we issue the prefetches for both of them if this probability
160 is less then (1000 - ACCEPTABLE_MISS_RATE) promile. */
162 #ifndef ACCEPTABLE_MISS_RATE
163 #define ACCEPTABLE_MISS_RATE 50
166 #ifndef HAVE_prefetch
167 #define HAVE_prefetch 0
170 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * L1_CACHE_LINE_SIZE))
171 /* TODO: Add parameter to specify L2 cache size. */
172 #define L2_CACHE_SIZE_BYTES (8 * L1_CACHE_SIZE_BYTES)
174 /* We consider a memory access nontemporal if it is not reused sooner than
175 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
176 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
177 so that we use nontemporal prefetches e.g. if single memory location
178 is accessed several times in a single iteration of the loop. */
179 #define NONTEMPORAL_FRACTION 16
181 /* In case we have to emit a memory fence instruction after the loop that
182 uses nontemporal stores, this defines the builtin to use. */
184 #ifndef FENCE_FOLLOWING_MOVNT
185 #define FENCE_FOLLOWING_MOVNT NULL_TREE
188 /* The group of references between that reuse may occur. */
192 tree base; /* Base of the reference. */
193 HOST_WIDE_INT step; /* Step of the reference. */
194 struct mem_ref *refs; /* References in the group. */
195 struct mem_ref_group *next; /* Next group of references. */
198 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
200 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
202 /* The memory reference. */
206 tree stmt; /* Statement in that the reference appears. */
207 tree mem; /* The reference. */
208 HOST_WIDE_INT delta; /* Constant offset of the reference. */
209 struct mem_ref_group *group; /* The group of references it belongs to. */
210 unsigned HOST_WIDE_INT prefetch_mod;
211 /* Prefetch only each PREFETCH_MOD-th
213 unsigned HOST_WIDE_INT prefetch_before;
214 /* Prefetch only first PREFETCH_BEFORE
216 unsigned reuse_distance; /* The amount of data accessed before the first
217 reuse of this value. */
218 struct mem_ref *next; /* The next reference in the group. */
219 unsigned write_p : 1; /* Is it a write? */
220 unsigned independent_p : 1; /* True if the reference is independent on
221 all other references inside the loop. */
222 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
223 unsigned storent_p : 1; /* True if we changed the store to a
227 /* Dumps information about reference REF to FILE. */
230 dump_mem_ref (FILE *file, struct mem_ref *ref)
232 fprintf (file, "Reference %p:\n", (void *) ref);
234 fprintf (file, " group %p (base ", (void *) ref->group);
235 print_generic_expr (file, ref->group->base, TDF_SLIM);
236 fprintf (file, ", step ");
237 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
238 fprintf (file, ")\n");
240 fprintf (file, " delta ");
241 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
242 fprintf (file, "\n");
244 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
246 fprintf (file, "\n");
249 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
252 static struct mem_ref_group *
253 find_or_create_group (struct mem_ref_group **groups, tree base,
256 struct mem_ref_group *group;
258 for (; *groups; groups = &(*groups)->next)
260 if ((*groups)->step == step
261 && operand_equal_p ((*groups)->base, base, 0))
264 /* Keep the list of groups sorted by decreasing step. */
265 if ((*groups)->step < step)
269 group = XNEW (struct mem_ref_group);
273 group->next = *groups;
279 /* Records a memory reference MEM in GROUP with offset DELTA and write status
280 WRITE_P. The reference occurs in statement STMT. */
283 record_ref (struct mem_ref_group *group, tree stmt, tree mem,
284 HOST_WIDE_INT delta, bool write_p)
286 struct mem_ref **aref;
288 /* Do not record the same address twice. */
289 for (aref = &group->refs; *aref; aref = &(*aref)->next)
291 /* It does not have to be possible for write reference to reuse the read
292 prefetch, or vice versa. */
293 if (!WRITE_CAN_USE_READ_PREFETCH
295 && !(*aref)->write_p)
297 if (!READ_CAN_USE_WRITE_PREFETCH
302 if ((*aref)->delta == delta)
306 (*aref) = XNEW (struct mem_ref);
307 (*aref)->stmt = stmt;
309 (*aref)->delta = delta;
310 (*aref)->write_p = write_p;
311 (*aref)->prefetch_before = PREFETCH_ALL;
312 (*aref)->prefetch_mod = 1;
313 (*aref)->reuse_distance = 0;
314 (*aref)->issue_prefetch_p = false;
315 (*aref)->group = group;
316 (*aref)->next = NULL;
317 (*aref)->independent_p = false;
318 (*aref)->storent_p = false;
320 if (dump_file && (dump_flags & TDF_DETAILS))
321 dump_mem_ref (dump_file, *aref);
324 /* Release memory references in GROUPS. */
327 release_mem_refs (struct mem_ref_group *groups)
329 struct mem_ref_group *next_g;
330 struct mem_ref *ref, *next_r;
332 for (; groups; groups = next_g)
334 next_g = groups->next;
335 for (ref = groups->refs; ref; ref = next_r)
344 /* A structure used to pass arguments to idx_analyze_ref. */
348 struct loop *loop; /* Loop of the reference. */
349 tree stmt; /* Statement of the reference. */
350 HOST_WIDE_INT *step; /* Step of the memory reference. */
351 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
354 /* Analyzes a single INDEX of a memory reference to obtain information
355 described at analyze_ref. Callback for for_each_index. */
358 idx_analyze_ref (tree base, tree *index, void *data)
360 struct ar_data *ar_data = (struct ar_data *) data;
361 tree ibase, step, stepsize;
362 HOST_WIDE_INT istep, idelta = 0, imult = 1;
365 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
366 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
369 if (!simple_iv (ar_data->loop, ar_data->stmt, *index, &iv, false))
374 if (!cst_and_fits_in_hwi (step))
376 istep = int_cst_value (step);
378 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
379 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
381 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
382 ibase = TREE_OPERAND (ibase, 0);
384 if (cst_and_fits_in_hwi (ibase))
386 idelta += int_cst_value (ibase);
387 ibase = build_int_cst (TREE_TYPE (ibase), 0);
390 if (TREE_CODE (base) == ARRAY_REF)
392 stepsize = array_ref_element_size (base);
393 if (!cst_and_fits_in_hwi (stepsize))
395 imult = int_cst_value (stepsize);
401 *ar_data->step += istep;
402 *ar_data->delta += idelta;
408 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
409 STEP are integer constants and iter is number of iterations of LOOP. The
410 reference occurs in statement STMT. Strips nonaddressable component
411 references from REF_P. */
414 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
415 HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
418 struct ar_data ar_data;
420 HOST_WIDE_INT bit_offset;
426 /* First strip off the component references. Ignore bitfields. */
427 if (TREE_CODE (ref) == COMPONENT_REF
428 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
429 ref = TREE_OPERAND (ref, 0);
433 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
435 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
436 bit_offset = TREE_INT_CST_LOW (off);
437 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
439 *delta += bit_offset / BITS_PER_UNIT;
442 *base = unshare_expr (ref);
446 ar_data.delta = delta;
447 return for_each_index (base, idx_analyze_ref, &ar_data);
450 /* Record a memory reference REF to the list REFS. The reference occurs in
451 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
452 reference was recorded, false otherwise. */
455 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
456 tree ref, bool write_p, tree stmt)
459 HOST_WIDE_INT step, delta;
460 struct mem_ref_group *agrp;
462 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
465 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
466 are integer constants. */
467 agrp = find_or_create_group (refs, base, step);
468 record_ref (agrp, stmt, ref, delta, write_p);
473 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
474 true if there are no other memory references inside the loop. */
476 static struct mem_ref_group *
477 gather_memory_references (struct loop *loop, bool *no_other_refs)
479 basic_block *body = get_loop_body_in_dom_order (loop);
482 block_stmt_iterator bsi;
483 tree stmt, lhs, rhs, call;
484 struct mem_ref_group *refs = NULL;
486 *no_other_refs = true;
488 /* Scan the loop body in order, so that the former references precede the
490 for (i = 0; i < loop->num_nodes; i++)
493 if (bb->loop_father != loop)
496 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
498 stmt = bsi_stmt (bsi);
499 call = get_call_expr_in (stmt);
500 if (call && !(call_expr_flags (call) & ECF_CONST))
501 *no_other_refs = false;
503 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
505 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
506 *no_other_refs = false;
510 lhs = GIMPLE_STMT_OPERAND (stmt, 0);
511 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
513 if (REFERENCE_CLASS_P (rhs))
514 *no_other_refs &= gather_memory_references_ref (loop, &refs,
516 if (REFERENCE_CLASS_P (lhs))
517 *no_other_refs &= gather_memory_references_ref (loop, &refs,
526 /* Prune the prefetch candidate REF using the self-reuse. */
529 prune_ref_by_self_reuse (struct mem_ref *ref)
531 HOST_WIDE_INT step = ref->group->step;
532 bool backward = step < 0;
536 /* Prefetch references to invariant address just once. */
537 ref->prefetch_before = 1;
544 if (step > PREFETCH_BLOCK)
547 if ((backward && HAVE_BACKWARD_PREFETCH)
548 || (!backward && HAVE_FORWARD_PREFETCH))
550 ref->prefetch_before = 1;
554 ref->prefetch_mod = PREFETCH_BLOCK / step;
557 /* Divides X by BY, rounding down. */
560 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
567 return (x + by - 1) / by;
570 /* Prune the prefetch candidate REF using the reuse with BY.
571 If BY_IS_BEFORE is true, BY is before REF in the loop. */
574 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
577 HOST_WIDE_INT step = ref->group->step;
578 bool backward = step < 0;
579 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
580 HOST_WIDE_INT delta = delta_b - delta_r;
581 HOST_WIDE_INT hit_from;
582 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
586 /* If the references has the same address, only prefetch the
589 ref->prefetch_before = 0;
596 /* If the reference addresses are invariant and fall into the
597 same cache line, prefetch just the first one. */
601 if (ddown (ref->delta, PREFETCH_BLOCK)
602 != ddown (by->delta, PREFETCH_BLOCK))
605 ref->prefetch_before = 0;
609 /* Only prune the reference that is behind in the array. */
615 /* Transform the data so that we may assume that the accesses
619 delta_r = PREFETCH_BLOCK - 1 - delta_r;
620 delta_b = PREFETCH_BLOCK - 1 - delta_b;
628 /* Check whether the two references are likely to hit the same cache
629 line, and how distant the iterations in that it occurs are from
632 if (step <= PREFETCH_BLOCK)
634 /* The accesses are sure to meet. Let us check when. */
635 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
636 prefetch_before = (hit_from - delta_r + step - 1) / step;
638 if (prefetch_before < ref->prefetch_before)
639 ref->prefetch_before = prefetch_before;
644 /* A more complicated case. First let us ensure that size of cache line
645 and step are coprime (here we assume that PREFETCH_BLOCK is a power
647 prefetch_block = PREFETCH_BLOCK;
648 while ((step & 1) == 0
649 && prefetch_block > 1)
652 prefetch_block >>= 1;
656 /* Now step > prefetch_block, and step and prefetch_block are coprime.
657 Determine the probability that the accesses hit the same cache line. */
659 prefetch_before = delta / step;
661 if ((unsigned HOST_WIDE_INT) delta
662 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
664 if (prefetch_before < ref->prefetch_before)
665 ref->prefetch_before = prefetch_before;
670 /* Try also the following iteration. */
672 delta = step - delta;
673 if ((unsigned HOST_WIDE_INT) delta
674 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
676 if (prefetch_before < ref->prefetch_before)
677 ref->prefetch_before = prefetch_before;
682 /* The ref probably does not reuse by. */
686 /* Prune the prefetch candidate REF using the reuses with other references
690 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
692 struct mem_ref *prune_by;
695 prune_ref_by_self_reuse (ref);
697 for (prune_by = refs; prune_by; prune_by = prune_by->next)
705 if (!WRITE_CAN_USE_READ_PREFETCH
707 && !prune_by->write_p)
709 if (!READ_CAN_USE_WRITE_PREFETCH
711 && prune_by->write_p)
714 prune_ref_by_group_reuse (ref, prune_by, before);
718 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
721 prune_group_by_reuse (struct mem_ref_group *group)
723 struct mem_ref *ref_pruned;
725 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
727 prune_ref_by_reuse (ref_pruned, group->refs);
729 if (dump_file && (dump_flags & TDF_DETAILS))
731 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
733 if (ref_pruned->prefetch_before == PREFETCH_ALL
734 && ref_pruned->prefetch_mod == 1)
735 fprintf (dump_file, " no restrictions");
736 else if (ref_pruned->prefetch_before == 0)
737 fprintf (dump_file, " do not prefetch");
738 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
739 fprintf (dump_file, " prefetch once");
742 if (ref_pruned->prefetch_before != PREFETCH_ALL)
744 fprintf (dump_file, " prefetch before ");
745 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
746 ref_pruned->prefetch_before);
748 if (ref_pruned->prefetch_mod != 1)
750 fprintf (dump_file, " prefetch mod ");
751 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
752 ref_pruned->prefetch_mod);
755 fprintf (dump_file, "\n");
760 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
763 prune_by_reuse (struct mem_ref_group *groups)
765 for (; groups; groups = groups->next)
766 prune_group_by_reuse (groups);
769 /* Returns true if we should issue prefetch for REF. */
772 should_issue_prefetch_p (struct mem_ref *ref)
774 /* For now do not issue prefetches for only first few of the
776 if (ref->prefetch_before != PREFETCH_ALL)
779 /* Do not prefetch nontemporal stores. */
786 /* Decide which of the prefetch candidates in GROUPS to prefetch.
787 AHEAD is the number of iterations to prefetch ahead (which corresponds
788 to the number of simultaneous instances of one prefetch running at a
789 time). UNROLL_FACTOR is the factor by that the loop is going to be
790 unrolled. Returns true if there is anything to prefetch. */
793 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
796 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
797 unsigned slots_per_prefetch;
801 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
802 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
804 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
805 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
806 it will need a prefetch slot. */
807 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
808 if (dump_file && (dump_flags & TDF_DETAILS))
809 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
812 /* For now we just take memory references one by one and issue
813 prefetches for as many as possible. The groups are sorted
814 starting with the largest step, since the references with
815 large step are more likely to cause many cache misses. */
817 for (; groups; groups = groups->next)
818 for (ref = groups->refs; ref; ref = ref->next)
820 if (!should_issue_prefetch_p (ref))
823 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
824 and we unroll the loop UNROLL_FACTOR times, we need to insert
825 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
827 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
828 / ref->prefetch_mod);
829 prefetch_slots = n_prefetches * slots_per_prefetch;
831 /* If more than half of the prefetches would be lost anyway, do not
832 issue the prefetch. */
833 if (2 * remaining_prefetch_slots < prefetch_slots)
836 ref->issue_prefetch_p = true;
838 if (remaining_prefetch_slots <= prefetch_slots)
840 remaining_prefetch_slots -= prefetch_slots;
847 /* Determine whether there is any reference suitable for prefetching
851 anything_to_prefetch_p (struct mem_ref_group *groups)
855 for (; groups; groups = groups->next)
856 for (ref = groups->refs; ref; ref = ref->next)
857 if (should_issue_prefetch_p (ref))
863 /* Issue prefetches for the reference REF into loop as decided before.
864 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
865 is the factor by which LOOP was unrolled. */
868 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
871 tree addr, addr_base, prefetch, write_p, local;
872 block_stmt_iterator bsi;
873 unsigned n_prefetches, ap;
874 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
876 if (dump_file && (dump_flags & TDF_DETAILS))
877 fprintf (dump_file, "Issued%s prefetch for %p.\n",
878 nontemporal ? " nontemporal" : "",
881 bsi = bsi_for_stmt (ref->stmt);
883 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
884 / ref->prefetch_mod);
885 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
886 addr_base = force_gimple_operand_bsi (&bsi, unshare_expr (addr_base),
887 true, NULL, true, BSI_SAME_STMT);
888 write_p = ref->write_p ? integer_one_node : integer_zero_node;
889 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
891 for (ap = 0; ap < n_prefetches; ap++)
893 /* Determine the address to prefetch. */
894 delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
895 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
896 addr_base, size_int (delta));
897 addr = force_gimple_operand_bsi (&bsi, unshare_expr (addr), true, NULL,
898 true, BSI_SAME_STMT);
900 /* Create the prefetch instruction. */
901 prefetch = build_call_expr (built_in_decls[BUILT_IN_PREFETCH],
902 3, addr, write_p, local);
903 bsi_insert_before (&bsi, prefetch, BSI_SAME_STMT);
907 /* Issue prefetches for the references in GROUPS into loop as decided before.
908 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
909 factor by that LOOP was unrolled. */
912 issue_prefetches (struct mem_ref_group *groups,
913 unsigned unroll_factor, unsigned ahead)
917 for (; groups; groups = groups->next)
918 for (ref = groups->refs; ref; ref = ref->next)
919 if (ref->issue_prefetch_p)
920 issue_prefetch_ref (ref, unroll_factor, ahead);
923 /* Returns true if REF is a memory write for that a nontemporal store insn
927 nontemporal_store_p (struct mem_ref *ref)
929 enum machine_mode mode;
932 /* REF must be a write that is not reused. We require it to be independent
933 on all other memory references in the loop, as the nontemporal stores may
934 be reordered with respect to other memory references. */
936 || !ref->independent_p
937 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
940 /* Check that we have the storent instruction for the mode. */
941 mode = TYPE_MODE (TREE_TYPE (ref->mem));
945 code = storent_optab->handlers[mode].insn_code;
946 return code != CODE_FOR_nothing;
949 /* If REF is a nontemporal store, we mark the corresponding modify statement
950 and return true. Otherwise, we return false. */
953 mark_nontemporal_store (struct mem_ref *ref)
955 if (!nontemporal_store_p (ref))
958 if (dump_file && (dump_flags & TDF_DETAILS))
959 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
962 MOVE_NONTEMPORAL (ref->stmt) = true;
963 ref->storent_p = true;
968 /* Issue a memory fence instruction after LOOP. */
971 emit_mfence_after_loop (struct loop *loop)
973 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
976 block_stmt_iterator bsi;
979 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
981 call = build_function_call_expr (FENCE_FOLLOWING_MOVNT, NULL_TREE);
983 if (!single_pred_p (exit->dest)
984 /* If possible, we prefer not to insert the fence on other paths
986 && !(exit->flags & EDGE_ABNORMAL))
987 split_loop_exit_edge (exit);
988 bsi = bsi_after_labels (exit->dest);
990 bsi_insert_before (&bsi, call, BSI_NEW_STMT);
991 mark_virtual_ops_for_renaming (call);
994 VEC_free (edge, heap, exits);
995 update_ssa (TODO_update_ssa_only_virtuals);
998 /* Returns true if we can use storent in loop, false otherwise. */
1001 may_use_storent_in_loop_p (struct loop *loop)
1005 if (loop->inner != NULL)
1008 /* If we must issue a mfence insn after using storent, check that there
1009 is a suitable place for it at each of the loop exits. */
1010 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1012 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1016 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1017 if ((exit->flags & EDGE_ABNORMAL)
1018 && exit->dest == EXIT_BLOCK_PTR)
1021 VEC_free (edge, heap, exits);
1027 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1028 references in the loop. */
1031 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1033 struct mem_ref *ref;
1036 if (!may_use_storent_in_loop_p (loop))
1039 for (; groups; groups = groups->next)
1040 for (ref = groups->refs; ref; ref = ref->next)
1041 any |= mark_nontemporal_store (ref);
1043 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1044 emit_mfence_after_loop (loop);
1047 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1048 this is the case, fill in DESC by the description of number of
1052 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1055 if (!can_unroll_loop_p (loop, factor, desc))
1058 /* We only consider loops without control flow for unrolling. This is not
1059 a hard restriction -- tree_unroll_loop works with arbitrary loops
1060 as well; but the unrolling/prefetching is usually more profitable for
1061 loops consisting of a single basic block, and we want to limit the
1063 if (loop->num_nodes > 2)
1069 /* Determine the coefficient by that unroll LOOP, from the information
1070 contained in the list of memory references REFS. Description of
1071 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1072 insns of the LOOP. EST_NITER is the estimated number of iterations of
1073 the loop, or -1 if no estimate is available. */
1076 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1077 unsigned ninsns, struct tree_niter_desc *desc,
1078 HOST_WIDE_INT est_niter)
1080 unsigned upper_bound;
1081 unsigned nfactor, factor, mod_constraint;
1082 struct mem_ref_group *agp;
1083 struct mem_ref *ref;
1085 /* First check whether the loop is not too large to unroll. We ignore
1086 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1087 from unrolling them enough to make exactly one cache line covered by each
1088 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1089 us from unrolling the loops too many times in cases where we only expect
1090 gains from better scheduling and decreasing loop overhead, which is not
1092 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1094 /* If we unrolled the loop more times than it iterates, the unrolled version
1095 of the loop would be never entered. */
1096 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1097 upper_bound = est_niter;
1099 if (upper_bound <= 1)
1102 /* Choose the factor so that we may prefetch each cache just once,
1103 but bound the unrolling by UPPER_BOUND. */
1105 for (agp = refs; agp; agp = agp->next)
1106 for (ref = agp->refs; ref; ref = ref->next)
1107 if (should_issue_prefetch_p (ref))
1109 mod_constraint = ref->prefetch_mod;
1110 nfactor = least_common_multiple (mod_constraint, factor);
1111 if (nfactor <= upper_bound)
1115 if (!should_unroll_loop_p (loop, desc, factor))
1121 /* Returns the total volume of the memory references REFS, taking into account
1122 reuses in the innermost loop and cache line size. TODO -- we should also
1123 take into account reuses across the iterations of the loops in the loop
1127 volume_of_references (struct mem_ref_group *refs)
1129 unsigned volume = 0;
1130 struct mem_ref_group *gr;
1131 struct mem_ref *ref;
1133 for (gr = refs; gr; gr = gr->next)
1134 for (ref = gr->refs; ref; ref = ref->next)
1136 /* Almost always reuses another value? */
1137 if (ref->prefetch_before != PREFETCH_ALL)
1140 /* If several iterations access the same cache line, use the size of
1141 the line divided by this number. Otherwise, a cache line is
1142 accessed in each iteration. TODO -- in the latter case, we should
1143 take the size of the reference into account, rounding it up on cache
1144 line size multiple. */
1145 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1150 /* Returns the volume of memory references accessed across VEC iterations of
1151 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1152 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1155 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1159 for (i = 0; i < n; i++)
1166 gcc_assert (vec[i] > 0);
1168 /* We ignore the parts of the distance vector in subloops, since usually
1169 the numbers of iterations are much smaller. */
1170 return loop_sizes[i] * vec[i];
1173 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1174 at the position corresponding to the loop of the step. N is the depth
1175 of the considered loop nest, and, LOOP is its innermost loop. */
1178 add_subscript_strides (tree access_fn, unsigned stride,
1179 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1183 HOST_WIDE_INT astep;
1184 unsigned min_depth = loop_depth (loop) - n;
1186 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1188 aloop = get_chrec_loop (access_fn);
1189 step = CHREC_RIGHT (access_fn);
1190 access_fn = CHREC_LEFT (access_fn);
1192 if ((unsigned) loop_depth (aloop) <= min_depth)
1195 if (host_integerp (step, 0))
1196 astep = tree_low_cst (step, 0);
1198 astep = L1_CACHE_LINE_SIZE;
1200 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1205 /* Returns the volume of memory references accessed between two consecutive
1206 self-reuses of the reference DR. We consider the subscripts of DR in N
1207 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1208 loops. LOOP is the innermost loop of the current loop nest. */
1211 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1214 tree stride, access_fn;
1215 HOST_WIDE_INT *strides, astride;
1216 VEC (tree, heap) *access_fns;
1217 tree ref = DR_REF (dr);
1218 unsigned i, ret = ~0u;
1220 /* In the following example:
1222 for (i = 0; i < N; i++)
1223 for (j = 0; j < N; j++)
1225 the same cache line is accessed each N steps (except if the change from
1226 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1227 we cannot rely purely on the results of the data dependence analysis.
1229 Instead, we compute the stride of the reference in each loop, and consider
1230 the innermost loop in that the stride is less than cache size. */
1232 strides = XCNEWVEC (HOST_WIDE_INT, n);
1233 access_fns = DR_ACCESS_FNS (dr);
1235 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1237 /* Keep track of the reference corresponding to the subscript, so that we
1239 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1240 ref = TREE_OPERAND (ref, 0);
1242 if (TREE_CODE (ref) == ARRAY_REF)
1244 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1245 if (host_integerp (stride, 1))
1246 astride = tree_low_cst (stride, 1);
1248 astride = L1_CACHE_LINE_SIZE;
1250 ref = TREE_OPERAND (ref, 0);
1255 add_subscript_strides (access_fn, astride, strides, n, loop);
1258 for (i = n; i-- > 0; )
1260 unsigned HOST_WIDE_INT s;
1262 s = strides[i] < 0 ? -strides[i] : strides[i];
1264 if (s < (unsigned) L1_CACHE_LINE_SIZE
1266 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1268 ret = loop_sizes[i];
1277 /* Determines the distance till the first reuse of each reference in REFS
1278 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1279 memory references in the loop. */
1282 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1285 struct loop *nest, *aloop;
1286 VEC (data_reference_p, heap) *datarefs = NULL;
1287 VEC (ddr_p, heap) *dependences = NULL;
1288 struct mem_ref_group *gr;
1289 struct mem_ref *ref, *refb;
1290 VEC (loop_p, heap) *vloops = NULL;
1291 unsigned *loop_data_size;
1293 unsigned volume, dist, adist;
1295 data_reference_p dr;
1301 /* Find the outermost loop of the loop nest of loop (we require that
1302 there are no sibling loops inside the nest). */
1306 aloop = loop_outer (nest);
1308 if (aloop == current_loops->tree_root
1309 || aloop->inner->next)
1315 /* For each loop, determine the amount of data accessed in each iteration.
1316 We use this to estimate whether the reference is evicted from the
1317 cache before its reuse. */
1318 find_loop_nest (nest, &vloops);
1319 n = VEC_length (loop_p, vloops);
1320 loop_data_size = XNEWVEC (unsigned, n);
1321 volume = volume_of_references (refs);
1325 loop_data_size[i] = volume;
1326 /* Bound the volume by the L2 cache size, since above this bound,
1327 all dependence distances are equivalent. */
1328 if (volume > L2_CACHE_SIZE_BYTES)
1331 aloop = VEC_index (loop_p, vloops, i);
1332 vol = estimated_loop_iterations_int (aloop, false);
1334 vol = expected_loop_iterations (aloop);
1338 /* Prepare the references in the form suitable for data dependence
1339 analysis. We ignore unanalyzable data references (the results
1340 are used just as a heuristics to estimate temporality of the
1341 references, hence we do not need to worry about correctness). */
1342 for (gr = refs; gr; gr = gr->next)
1343 for (ref = gr->refs; ref; ref = ref->next)
1345 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1349 ref->reuse_distance = volume;
1351 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1354 no_other_refs = false;
1357 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1359 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1361 if (ref->reuse_distance > dist)
1362 ref->reuse_distance = dist;
1365 ref->independent_p = true;
1368 compute_all_dependences (datarefs, &dependences, vloops, true);
1370 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1372 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1375 ref = DDR_A (dep)->aux;
1376 refb = DDR_B (dep)->aux;
1378 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1379 || DDR_NUM_DIST_VECTS (dep) == 0)
1381 /* If the dependence cannot be analyzed, assume that there might be
1385 ref->independent_p = false;
1386 refb->independent_p = false;
1390 /* The distance vectors are normalized to be always lexicographically
1391 positive, hence we cannot tell just from them whether DDR_A comes
1392 before DDR_B or vice versa. However, it is not important,
1393 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1394 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1395 in cache (and marking it as nontemporal would not affect
1399 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1401 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1404 /* If this is a dependence in the innermost loop (i.e., the
1405 distances in all superloops are zero) and it is not
1406 the trivial self-dependence with distance zero, record that
1407 the references are not completely independent. */
1408 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1410 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1412 ref->independent_p = false;
1413 refb->independent_p = false;
1416 /* Ignore accesses closer than
1417 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1418 so that we use nontemporal prefetches e.g. if single memory
1419 location is accessed several times in a single iteration of
1421 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1429 if (ref->reuse_distance > dist)
1430 ref->reuse_distance = dist;
1431 if (refb->reuse_distance > dist)
1432 refb->reuse_distance = dist;
1435 free_dependence_relations (dependences);
1436 free_data_refs (datarefs);
1437 free (loop_data_size);
1439 if (dump_file && (dump_flags & TDF_DETAILS))
1441 fprintf (dump_file, "Reuse distances:\n");
1442 for (gr = refs; gr; gr = gr->next)
1443 for (ref = gr->refs; ref; ref = ref->next)
1444 fprintf (dump_file, " ref %p distance %u\n",
1445 (void *) ref, ref->reuse_distance);
1449 /* Issue prefetch instructions for array references in LOOP. Returns
1450 true if the LOOP was unrolled. */
1453 loop_prefetch_arrays (struct loop *loop)
1455 struct mem_ref_group *refs;
1456 unsigned ahead, ninsns, time, unroll_factor;
1457 HOST_WIDE_INT est_niter;
1458 struct tree_niter_desc desc;
1459 bool unrolled = false, no_other_refs;
1461 if (!maybe_hot_bb_p (loop->header))
1463 if (dump_file && (dump_flags & TDF_DETAILS))
1464 fprintf (dump_file, " ignored (cold area)\n");
1468 /* Step 1: gather the memory references. */
1469 refs = gather_memory_references (loop, &no_other_refs);
1471 /* Step 2: estimate the reuse effects. */
1472 prune_by_reuse (refs);
1474 if (!anything_to_prefetch_p (refs))
1477 determine_loop_nest_reuse (loop, refs, no_other_refs);
1479 /* Step 3: determine the ahead and unroll factor. */
1481 /* FIXME: the time should be weighted by the probabilities of the blocks in
1483 time = tree_num_loop_insns (loop, &eni_time_weights);
1484 ahead = (PREFETCH_LATENCY + time - 1) / time;
1485 est_niter = estimated_loop_iterations_int (loop, false);
1487 /* The prefetches will run for AHEAD iterations of the original loop. Unless
1488 the loop rolls at least AHEAD times, prefetching the references does not
1490 if (est_niter >= 0 && est_niter <= (HOST_WIDE_INT) ahead)
1492 if (dump_file && (dump_flags & TDF_DETAILS))
1494 "Not prefetching -- loop estimated to roll only %d times\n",
1499 mark_nontemporal_stores (loop, refs);
1501 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1502 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1504 if (dump_file && (dump_flags & TDF_DETAILS))
1505 fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor);
1507 /* Step 4: what to prefetch? */
1508 if (!schedule_prefetches (refs, unroll_factor, ahead))
1511 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1512 iterations so that we do not issue superfluous prefetches. */
1513 if (unroll_factor != 1)
1515 tree_unroll_loop (loop, unroll_factor,
1516 single_dom_exit (loop), &desc);
1520 /* Step 6: issue the prefetches. */
1521 issue_prefetches (refs, unroll_factor, ahead);
1524 release_mem_refs (refs);
1528 /* Issue prefetch instructions for array references in loops. */
1531 tree_ssa_prefetch_arrays (void)
1535 bool unrolled = false;
1539 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1540 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1541 of processor costs and i486 does not have prefetch, but
1542 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1543 || PREFETCH_BLOCK == 0)
1546 if (dump_file && (dump_flags & TDF_DETAILS))
1548 fprintf (dump_file, "Prefetching parameters:\n");
1549 fprintf (dump_file, " simultaneous prefetches: %d\n",
1550 SIMULTANEOUS_PREFETCHES);
1551 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1552 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1553 fprintf (dump_file, " L1 cache size: %d lines, %d bytes\n",
1554 L1_CACHE_SIZE, L1_CACHE_SIZE_BYTES);
1555 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1556 fprintf (dump_file, " L2 cache size: %d bytes\n", L2_CACHE_SIZE_BYTES);
1557 fprintf (dump_file, "\n");
1560 initialize_original_copy_tables ();
1562 if (!built_in_decls[BUILT_IN_PREFETCH])
1564 tree type = build_function_type (void_type_node,
1565 tree_cons (NULL_TREE,
1566 const_ptr_type_node,
1568 tree decl = add_builtin_function ("__builtin_prefetch", type,
1569 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1571 DECL_IS_NOVOPS (decl) = true;
1572 built_in_decls[BUILT_IN_PREFETCH] = decl;
1575 /* We assume that size of cache line is a power of two, so verify this
1577 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1579 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1581 if (dump_file && (dump_flags & TDF_DETAILS))
1582 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1584 unrolled |= loop_prefetch_arrays (loop);
1586 if (dump_file && (dump_flags & TDF_DETAILS))
1587 fprintf (dump_file, "\n\n");
1593 todo_flags |= TODO_cleanup_cfg;
1596 free_original_copy_tables ();