1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 2, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
42 #include "sched-int.h"
47 #ifdef ENABLE_CHECKING
53 /* Return the major type present in the DS. */
61 return REG_DEP_OUTPUT;
63 gcc_assert (ds & DEP_ANTI);
68 /* Return equivalent dep_status. */
70 dk_to_ds (enum reg_note dk)
81 gcc_assert (dk == REG_DEP_ANTI);
86 /* Functions to operate with dependence information container - dep_t. */
88 /* Init DEP with the arguments. */
90 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note kind, ds_t ds)
94 DEP_KIND (dep) = kind;
95 DEP_STATUS (dep) = ds;
98 /* Init DEP with the arguments.
99 While most of the scheduler (including targets) only need the major type
100 of the dependency, it is convenient to hide full dep_status from them. */
102 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
106 if ((current_sched_info->flags & USE_DEPS_LIST) != 0)
107 ds = dk_to_ds (kind);
111 init_dep_1 (dep, pro, con, kind, ds);
114 /* Make a copy of FROM in TO. */
116 copy_dep (dep_t to, dep_t from)
118 memcpy (to, from, sizeof (*to));
121 /* Functions to operate with a single link from the dependencies lists -
124 /* Return true if dep_link L is consistent. */
126 dep_link_consistent_p (dep_link_t l)
128 dep_link_t next = DEP_LINK_NEXT (l);
131 || &DEP_LINK_NEXT (l) == DEP_LINK_PREV_NEXTP (next));
134 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
137 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
139 dep_link_t next = *prev_nextp;
141 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
142 && DEP_LINK_NEXT (l) == NULL);
144 /* Init node being inserted. */
145 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
146 DEP_LINK_NEXT (l) = next;
151 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
153 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
160 /* Add dep_link LINK to deps_list L. */
162 add_to_deps_list (dep_link_t link, deps_list_t l)
164 attach_dep_link (link, &DEPS_LIST_FIRST (l));
167 /* Detach dep_link L from the list. */
169 detach_dep_link (dep_link_t l)
171 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
172 dep_link_t next = DEP_LINK_NEXT (l);
177 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
179 /* Though this is property is not used anywhere but in the assert in
180 attach_dep_link (), this can prevent latent errors. */
181 DEP_LINK_PREV_NEXTP (l) = NULL;
182 DEP_LINK_NEXT (l) = NULL;
185 /* Move LINK from whatever list it is now to L. */
187 move_dep_link (dep_link_t link, deps_list_t l)
189 detach_dep_link (link);
190 add_to_deps_list (link, l);
193 /* Check L's and its successors' consistency.
194 This is, potentially, an expensive check, hence it should be guarded by
195 ENABLE_CHECKING at all times. */
197 dep_links_consistent_p (dep_link_t l)
201 if (dep_link_consistent_p (l))
202 l = DEP_LINK_NEXT (l);
210 /* Dump dep_nodes starting from l. */
212 dump_dep_links (FILE *dump, dep_link_t l)
216 dep_t d = DEP_LINK_DEP (l);
218 fprintf (dump, "%d%c>%d ", INSN_UID (DEP_PRO (d)),
219 dep_link_consistent_p (l) ? '-' : '!', INSN_UID (DEP_CON (d)));
221 l = DEP_LINK_NEXT (l);
224 fprintf (dump, "\n");
227 /* Dump dep_nodes starting from L to stderr. */
229 debug_dep_links (dep_link_t l)
231 dump_dep_links (stderr, l);
234 /* Obstack to allocate dep_nodes and deps_lists on. */
235 static struct obstack deps_obstack;
237 /* Obstack to hold forward dependencies lists (deps_list_t). */
238 static struct obstack *dl_obstack = &deps_obstack;
240 /* Obstack to hold all dependency nodes (dep_node_t). */
241 static struct obstack *dn_obstack = &deps_obstack;
243 /* Functions to operate with dependences lists - deps_list_t. */
245 /* Allocate deps_list.
247 If ON_OBSTACK_P is true, allocate the list on the obstack. This is done for
248 INSN_FORW_DEPS lists because they should live till the end of scheduling.
250 INSN_BACK_DEPS and INSN_RESOLVED_BACK_DEPS lists are allocated on the free
251 store and are being freed in haifa-sched.c: schedule_insn (). */
253 alloc_deps_list (bool on_obstack_p)
256 return obstack_alloc (dl_obstack, sizeof (struct _deps_list));
258 return xmalloc (sizeof (struct _deps_list));
261 /* Initialize deps_list L. */
263 init_deps_list (deps_list_t l)
265 DEPS_LIST_FIRST (l) = NULL;
268 /* Create (allocate and init) deps_list.
269 The meaning of ON_OBSTACK_P is the same as in alloc_deps_list (). */
271 create_deps_list (bool on_obstack_p)
273 deps_list_t l = alloc_deps_list (on_obstack_p);
279 /* Free dep_data_nodes that present in L. */
281 clear_deps_list (deps_list_t l)
283 /* All dep_nodes are allocated on the dn_obstack. They'll be freed with
286 DEPS_LIST_FIRST (l) = NULL;
289 /* Free deps_list L. */
291 free_deps_list (deps_list_t l)
293 gcc_assert (deps_list_empty_p (l));
297 /* Delete (clear and free) deps_list L. */
299 delete_deps_list (deps_list_t l)
305 /* Return true if L is empty. */
307 deps_list_empty_p (deps_list_t l)
309 return DEPS_LIST_FIRST (l) == NULL;
312 /* Check L's consistency.
313 This is, potentially, an expensive check, hence it should be guarded by
314 ENABLE_CHECKING at all times. */
316 deps_list_consistent_p (deps_list_t l)
318 dep_link_t first = DEPS_LIST_FIRST (l);
320 return (first == NULL
321 || (&DEPS_LIST_FIRST (l) == DEP_LINK_PREV_NEXTP (first)
322 && dep_links_consistent_p (first)));
327 dump_deps_list (FILE *f, deps_list_t l)
329 dump_dep_links (f, DEPS_LIST_FIRST (l));
332 /* Dump L to STDERR. */
334 debug_deps_list (deps_list_t l)
336 dump_deps_list (stderr, l);
339 /* Add a dependency described by DEP to the list L.
340 L should be either INSN_BACK_DEPS or INSN_RESOLVED_BACK_DEPS. */
342 add_back_dep_to_deps_list (deps_list_t l, dep_t dep_from)
344 dep_node_t n = (dep_node_t) obstack_alloc (dn_obstack,
346 dep_t dep_to = DEP_NODE_DEP (n);
347 dep_link_t back = DEP_NODE_BACK (n);
348 dep_link_t forw = DEP_NODE_FORW (n);
350 copy_dep (dep_to, dep_from);
352 DEP_LINK_NODE (back) = n;
353 DEP_LINK_NODE (forw) = n;
355 /* There is no particular need to initialize these four fields except to make
356 assert in attach_dep_link () happy. */
357 DEP_LINK_NEXT (back) = NULL;
358 DEP_LINK_PREV_NEXTP (back) = NULL;
359 DEP_LINK_NEXT (forw) = NULL;
360 DEP_LINK_PREV_NEXTP (forw) = NULL;
362 add_to_deps_list (back, l);
365 /* Find the dep_link with producer PRO in deps_list L. */
367 find_link_by_pro_in_deps_list (deps_list_t l, rtx pro)
371 FOR_EACH_DEP_LINK (link, l)
372 if (DEP_LINK_PRO (link) == pro)
378 /* Find the dep_link with consumer CON in deps_list L. */
380 find_link_by_con_in_deps_list (deps_list_t l, rtx con)
384 FOR_EACH_DEP_LINK (link, l)
385 if (DEP_LINK_CON (link) == con)
391 /* Make a copy of FROM in TO with substituting consumer with CON.
392 TO and FROM should be RESOLVED_BACK_DEPS lists. */
394 copy_deps_list_change_con (deps_list_t to, deps_list_t from, rtx con)
398 gcc_assert (deps_list_empty_p (to));
400 FOR_EACH_DEP_LINK (l, from)
402 add_back_dep_to_deps_list (to, DEP_LINK_DEP (l));
403 DEP_LINK_CON (DEPS_LIST_FIRST (to)) = con;
407 static regset reg_pending_sets;
408 static regset reg_pending_clobbers;
409 static regset reg_pending_uses;
411 /* The following enumeration values tell us what dependencies we
412 should use to implement the barrier. We use true-dependencies for
413 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
414 enum reg_pending_barrier_mode
421 static enum reg_pending_barrier_mode reg_pending_barrier;
423 /* To speed up the test for duplicate dependency links we keep a
424 record of dependencies created by add_dependence when the average
425 number of instructions in a basic block is very large.
427 Studies have shown that there is typically around 5 instructions between
428 branches for typical C code. So we can make a guess that the average
429 basic block is approximately 5 instructions long; we will choose 100X
430 the average size as a very large basic block.
432 Each insn has associated bitmaps for its dependencies. Each bitmap
433 has enough entries to represent a dependency on any other insn in
434 the insn chain. All bitmap for true dependencies cache is
435 allocated then the rest two ones are also allocated. */
436 static bitmap_head *true_dependency_cache;
437 static bitmap_head *output_dependency_cache;
438 static bitmap_head *anti_dependency_cache;
439 static bitmap_head *spec_dependency_cache;
440 static int cache_size;
442 /* To speed up checking consistency of formed forward insn
443 dependencies we use the following cache. Another possible solution
444 could be switching off checking duplication of insns in forward
446 #ifdef ENABLE_CHECKING
447 static bitmap_head *forward_dependency_cache;
450 static int deps_may_trap_p (rtx);
451 static void add_dependence_list (rtx, rtx, int, enum reg_note);
452 static void add_dependence_list_and_free (rtx, rtx *, int, enum reg_note);
453 static void delete_all_dependences (rtx);
454 static void fixup_sched_groups (rtx);
456 static void flush_pending_lists (struct deps *, rtx, int, int);
457 static void sched_analyze_1 (struct deps *, rtx, rtx);
458 static void sched_analyze_2 (struct deps *, rtx, rtx);
459 static void sched_analyze_insn (struct deps *, rtx, rtx);
461 static rtx sched_get_condition (rtx);
462 static int conditions_mutex_p (rtx, rtx);
464 static enum DEPS_ADJUST_RESULT maybe_add_or_update_back_dep_1 (rtx, rtx,
465 enum reg_note, ds_t, rtx, rtx, dep_link_t **);
466 static enum DEPS_ADJUST_RESULT add_or_update_back_dep_1 (rtx, rtx,
467 enum reg_note, ds_t, rtx, rtx, dep_link_t **);
468 static void add_back_dep (rtx, rtx, enum reg_note, ds_t);
470 static void adjust_add_sorted_back_dep (rtx, dep_link_t, dep_link_t *);
471 static void adjust_back_add_forw_dep (rtx, dep_link_t *);
472 static void delete_forw_dep (dep_link_t);
473 static dw_t estimate_dep_weak (rtx, rtx);
474 #ifdef INSN_SCHEDULING
475 #ifdef ENABLE_CHECKING
476 static void check_dep_status (enum reg_note, ds_t, bool);
480 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
483 deps_may_trap_p (rtx mem)
485 rtx addr = XEXP (mem, 0);
487 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
489 rtx t = get_reg_known_value (REGNO (addr));
493 return rtx_addr_can_trap_p (addr);
496 /* Find the condition under which INSN is executed. */
499 sched_get_condition (rtx insn)
501 rtx pat = PATTERN (insn);
507 if (GET_CODE (pat) == COND_EXEC)
508 return COND_EXEC_TEST (pat);
510 if (!any_condjump_p (insn) || !onlyjump_p (insn))
513 src = SET_SRC (pc_set (insn));
515 if (XEXP (src, 2) == pc_rtx)
516 return XEXP (src, 0);
517 else if (XEXP (src, 1) == pc_rtx)
519 rtx cond = XEXP (src, 0);
520 enum rtx_code revcode = reversed_comparison_code (cond, insn);
522 if (revcode == UNKNOWN)
524 return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
532 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
535 conditions_mutex_p (rtx cond1, rtx cond2)
537 if (COMPARISON_P (cond1)
538 && COMPARISON_P (cond2)
539 && GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
540 && XEXP (cond1, 0) == XEXP (cond2, 0)
541 && XEXP (cond1, 1) == XEXP (cond2, 1))
546 /* Return true if insn1 and insn2 can never depend on one another because
547 the conditions under which they are executed are mutually exclusive. */
549 sched_insns_conditions_mutex_p (rtx insn1, rtx insn2)
553 /* flow.c doesn't handle conditional lifetimes entirely correctly;
554 calls mess up the conditional lifetimes. */
555 if (!CALL_P (insn1) && !CALL_P (insn2))
557 cond1 = sched_get_condition (insn1);
558 cond2 = sched_get_condition (insn2);
560 && conditions_mutex_p (cond1, cond2)
561 /* Make sure first instruction doesn't affect condition of second
562 instruction if switched. */
563 && !modified_in_p (cond1, insn2)
564 /* Make sure second instruction doesn't affect condition of first
565 instruction if switched. */
566 && !modified_in_p (cond2, insn1))
572 /* Add ELEM wrapped in an dep_link with reg note kind DEP_TYPE to the
573 INSN_BACK_DEPS (INSN), if it is not already there. DEP_TYPE indicates the
574 type of dependence that this link represents. DS, if nonzero,
575 indicates speculations, through which this dependence can be overcome.
576 MEM1 and MEM2, if non-null, corresponds to memory locations in case of
577 data speculation. The function returns a value indicating if an old entry
578 has been changed or a new entry has been added to insn's backward deps.
579 In case of changed entry CHANGED_LINKPP sets to its address.
580 See also the definition of enum DEPS_ADJUST_RESULT in sched-int.h.
581 Actual manipulation of dependence data structures is performed in
582 add_or_update_back_dep_1. */
584 static enum DEPS_ADJUST_RESULT
585 maybe_add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
586 ds_t ds, rtx mem1, rtx mem2,
587 dep_link_t **changed_linkpp)
589 gcc_assert (INSN_P (insn) && INSN_P (elem));
591 /* Don't depend an insn on itself. */
594 #ifdef INSN_SCHEDULING
595 if (current_sched_info->flags & DO_SPECULATION)
596 /* INSN has an internal dependence, which we can't overcome. */
597 HAS_INTERNAL_DEP (insn) = 1;
602 return add_or_update_back_dep_1 (insn, elem, dep_type,
603 ds, mem1, mem2, changed_linkpp);
606 /* This function has the same meaning of parameters and return values
607 as maybe_add_or_update_back_dep_1. The only difference between these
608 two functions is that INSN and ELEM are guaranteed not to be the same
610 static enum DEPS_ADJUST_RESULT
611 add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
612 ds_t ds ATTRIBUTE_UNUSED,
613 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED,
614 dep_link_t **changed_linkpp ATTRIBUTE_UNUSED)
616 bool maybe_present_p = true, present_p = false;
618 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
620 #ifdef INSN_SCHEDULING
622 #ifdef ENABLE_CHECKING
623 check_dep_status (dep_type, ds, mem1 != NULL);
626 /* If we already have a dependency for ELEM, then we do not need to
627 do anything. Avoiding the list walk below can cut compile times
628 dramatically for some code. */
629 if (true_dependency_cache != NULL)
631 enum reg_note present_dep_type;
633 gcc_assert (output_dependency_cache);
634 gcc_assert (anti_dependency_cache);
635 if (!(current_sched_info->flags & USE_DEPS_LIST))
637 if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
639 present_dep_type = REG_DEP_TRUE;
640 else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
642 present_dep_type = REG_DEP_OUTPUT;
643 else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
645 present_dep_type = REG_DEP_ANTI;
647 maybe_present_p = false;
651 if ((int) dep_type >= (int) present_dep_type)
659 ds_t present_dep_types = 0;
661 if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
663 present_dep_types |= DEP_TRUE;
664 if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
666 present_dep_types |= DEP_OUTPUT;
667 if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
669 present_dep_types |= DEP_ANTI;
671 if (present_dep_types)
673 if (!(current_sched_info->flags & DO_SPECULATION)
674 || !bitmap_bit_p (&spec_dependency_cache[INSN_LUID (insn)],
677 if ((present_dep_types | (ds & DEP_TYPES))
678 == present_dep_types)
679 /* We already have all these bits. */
684 /* Only true dependencies can be data speculative and
685 only anti dependencies can be control speculative. */
686 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
687 == present_dep_types);
689 /* if (additional dep is SPECULATIVE) then
690 we should update DEP_STATUS
692 we should reset existing dep to non-speculative. */
698 maybe_present_p = false;
703 /* Check that we don't already have this dependence. */
708 for (linkp = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
710 linkp = &DEP_LINK_NEXT (*linkp))
712 dep_t link = DEP_LINK_DEP (*linkp);
714 gcc_assert (true_dependency_cache == 0 || present_p);
716 if (DEP_PRO (link) == elem)
718 enum DEPS_ADJUST_RESULT changed_p = DEP_PRESENT;
720 #ifdef INSN_SCHEDULING
721 if (current_sched_info->flags & USE_DEPS_LIST)
723 ds_t new_status = ds | DEP_STATUS (link);
725 if (new_status & SPECULATIVE)
727 if (!(ds & SPECULATIVE)
728 || !(DEP_STATUS (link) & SPECULATIVE))
729 /* Then this dep can't be speculative. */
731 new_status &= ~SPECULATIVE;
732 if (true_dependency_cache
733 && (DEP_STATUS (link) & SPECULATIVE))
734 bitmap_clear_bit (&spec_dependency_cache
740 /* Both are speculative. Merging probabilities. */
745 dw = estimate_dep_weak (mem1, mem2);
746 ds = set_dep_weak (ds, BEGIN_DATA, dw);
749 new_status = ds_merge (DEP_STATUS (link), ds);
756 /* Clear corresponding cache entry because type of the link
757 may have changed. Keep them if we use_deps_list. */
758 if (true_dependency_cache != NULL
759 && !(current_sched_info->flags & USE_DEPS_LIST))
761 enum reg_note kind = DEP_KIND (link);
766 bitmap_clear_bit (&output_dependency_cache
767 [INSN_LUID (insn)], INSN_LUID (elem));
770 bitmap_clear_bit (&anti_dependency_cache
771 [INSN_LUID (insn)], INSN_LUID (elem));
778 if ((current_sched_info->flags & USE_DEPS_LIST)
779 && DEP_STATUS (link) != ds)
781 DEP_STATUS (link) = ds;
782 changed_p = DEP_CHANGED;
786 /* If this is a more restrictive type of dependence than the
787 existing one, then change the existing dependence to this
789 if ((int) dep_type < (int) DEP_KIND (link))
791 DEP_KIND (link) = dep_type;
792 changed_p = DEP_CHANGED;
795 #ifdef INSN_SCHEDULING
796 /* If we are adding a dependency to INSN's LOG_LINKs, then
797 note that in the bitmap caches of dependency information. */
798 if (true_dependency_cache != NULL)
800 if (!(current_sched_info->flags & USE_DEPS_LIST))
802 if (DEP_KIND (link) == REG_DEP_TRUE)
803 bitmap_set_bit (&true_dependency_cache
804 [INSN_LUID (insn)], INSN_LUID (elem));
805 else if (DEP_KIND (link) == REG_DEP_OUTPUT)
806 bitmap_set_bit (&output_dependency_cache
807 [INSN_LUID (insn)], INSN_LUID (elem));
808 else if (DEP_KIND (link) == REG_DEP_ANTI)
809 bitmap_set_bit (&anti_dependency_cache
810 [INSN_LUID (insn)], INSN_LUID (elem));
815 bitmap_set_bit (&true_dependency_cache
816 [INSN_LUID (insn)], INSN_LUID (elem));
818 bitmap_set_bit (&output_dependency_cache
819 [INSN_LUID (insn)], INSN_LUID (elem));
821 bitmap_set_bit (&anti_dependency_cache
822 [INSN_LUID (insn)], INSN_LUID (elem));
823 /* Note, that dep can become speculative only
824 at the moment of creation. Thus, we don't need to
825 check for it here. */
829 if (changed_linkpp && changed_p == DEP_CHANGED)
830 *changed_linkpp = linkp;
835 /* We didn't find a dep. It shouldn't be present in the cache. */
836 gcc_assert (!present_p);
839 /* Might want to check one level of transitivity to save conses.
840 This check should be done in maybe_add_or_update_back_dep_1.
841 Since we made it to add_or_update_back_dep_1, we must create
842 (or update) a link. */
846 gcc_assert (current_sched_info->flags & DO_SPECULATION);
847 ds = set_dep_weak (ds, BEGIN_DATA, estimate_dep_weak (mem1, mem2));
850 add_back_dep (insn, elem, dep_type, ds);
855 /* This function creates a link between INSN and ELEM under any
856 conditions. DS describes speculative status of the link. */
858 add_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
860 struct _dep _dep, *dep = &_dep;
862 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
864 if (current_sched_info->flags & USE_DEPS_LIST)
865 init_dep_1 (dep, elem, insn, dep_type, ds);
867 init_dep_1 (dep, elem, insn, dep_type, -1);
869 add_back_dep_to_deps_list (INSN_BACK_DEPS (insn), dep);
871 #ifdef INSN_SCHEDULING
872 #ifdef ENABLE_CHECKING
873 check_dep_status (dep_type, ds, false);
876 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
877 in the bitmap caches of dependency information. */
878 if (true_dependency_cache != NULL)
880 if (!(current_sched_info->flags & USE_DEPS_LIST))
882 if (dep_type == REG_DEP_TRUE)
883 bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
885 else if (dep_type == REG_DEP_OUTPUT)
886 bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
888 else if (dep_type == REG_DEP_ANTI)
889 bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
895 bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
898 bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
901 bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
903 if (ds & SPECULATIVE)
905 gcc_assert (current_sched_info->flags & DO_SPECULATION);
906 bitmap_set_bit (&spec_dependency_cache[INSN_LUID (insn)],
914 /* A convenience wrapper to operate on an entire list. */
917 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
919 for (; list; list = XEXP (list, 1))
921 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
922 add_dependence (insn, XEXP (list, 0), dep_type);
926 /* Similar, but free *LISTP at the same time. */
929 add_dependence_list_and_free (rtx insn, rtx *listp, int uncond,
930 enum reg_note dep_type)
933 for (list = *listp, *listp = NULL; list ; list = next)
935 next = XEXP (list, 1);
936 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
937 add_dependence (insn, XEXP (list, 0), dep_type);
938 free_INSN_LIST_node (list);
942 /* Clear all dependencies for an insn. */
945 delete_all_dependences (rtx insn)
947 /* Clear caches, if they exist, as well as free the dependence. */
949 #ifdef INSN_SCHEDULING
950 if (true_dependency_cache != NULL)
952 bitmap_clear (&true_dependency_cache[INSN_LUID (insn)]);
953 bitmap_clear (&output_dependency_cache[INSN_LUID (insn)]);
954 bitmap_clear (&anti_dependency_cache[INSN_LUID (insn)]);
955 /* We don't have to clear forward_dependency_cache here,
956 because it is formed later. */
957 if (current_sched_info->flags & DO_SPECULATION)
958 bitmap_clear (&spec_dependency_cache[INSN_LUID (insn)]);
962 clear_deps_list (INSN_BACK_DEPS (insn));
965 /* All insns in a scheduling group except the first should only have
966 dependencies on the previous insn in the group. So we find the
967 first instruction in the scheduling group by walking the dependence
968 chains backwards. Then we add the dependencies for the group to
969 the previous nonnote insn. */
972 fixup_sched_groups (rtx insn)
977 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
980 dep_t dep = DEP_LINK_DEP (link);
981 rtx pro = DEP_PRO (dep);
985 i = prev_nonnote_insn (i);
989 } while (SCHED_GROUP_P (i));
991 if (! sched_insns_conditions_mutex_p (i, pro))
992 add_dependence (i, pro, DEP_KIND (dep));
996 delete_all_dependences (insn);
998 prev_nonnote = prev_nonnote_insn (insn);
999 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1000 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1001 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1004 /* Process an insn's memory dependencies. There are four kinds of
1007 (0) read dependence: read follows read
1008 (1) true dependence: read follows write
1009 (2) output dependence: write follows write
1010 (3) anti dependence: write follows read
1012 We are careful to build only dependencies which actually exist, and
1013 use transitivity to avoid building too many links. */
1015 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1016 The MEM is a memory reference contained within INSN, which we are saving
1017 so that we can do memory aliasing on it. */
1020 add_insn_mem_dependence (struct deps *deps, bool read_p,
1029 insn_list = &deps->pending_read_insns;
1030 mem_list = &deps->pending_read_mems;
1031 deps->pending_read_list_length++;
1035 insn_list = &deps->pending_write_insns;
1036 mem_list = &deps->pending_write_mems;
1037 deps->pending_write_list_length++;
1040 link = alloc_INSN_LIST (insn, *insn_list);
1043 if (current_sched_info->use_cselib)
1045 mem = shallow_copy_rtx (mem);
1046 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1048 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1052 /* Make a dependency between every memory reference on the pending lists
1053 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1054 dependencies for a read operation, similarly with FOR_WRITE. */
1057 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
1062 add_dependence_list_and_free (insn, &deps->pending_read_insns, 1,
1064 free_EXPR_LIST_list (&deps->pending_read_mems);
1065 deps->pending_read_list_length = 0;
1068 add_dependence_list_and_free (insn, &deps->pending_write_insns, 1,
1069 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1070 free_EXPR_LIST_list (&deps->pending_write_mems);
1071 deps->pending_write_list_length = 0;
1073 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, 1,
1074 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1075 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1076 deps->pending_flush_length = 1;
1079 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
1080 The type of the reference is specified by REF and can be SET,
1081 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
1084 sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
1085 enum rtx_code ref, rtx insn)
1087 /* A hard reg in a wide mode may really be multiple registers.
1088 If so, mark all of them just like the first. */
1089 if (regno < FIRST_PSEUDO_REGISTER)
1091 int i = hard_regno_nregs[regno][mode];
1095 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
1097 else if (ref == USE)
1100 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
1105 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
1109 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
1110 it does not reload. Ignore these as they have served their
1112 else if (regno >= deps->max_reg)
1114 enum rtx_code code = GET_CODE (PATTERN (insn));
1115 gcc_assert (code == USE || code == CLOBBER);
1121 SET_REGNO_REG_SET (reg_pending_sets, regno);
1122 else if (ref == USE)
1123 SET_REGNO_REG_SET (reg_pending_uses, regno);
1125 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1127 /* Pseudos that are REG_EQUIV to something may be replaced
1128 by that during reloading. We need only add dependencies for
1129 the address in the REG_EQUIV note. */
1130 if (!reload_completed && get_reg_known_equiv_p (regno))
1132 rtx t = get_reg_known_value (regno);
1134 sched_analyze_2 (deps, XEXP (t, 0), insn);
1137 /* Don't let it cross a call after scheduling if it doesn't
1138 already cross one. */
1139 if (REG_N_CALLS_CROSSED (regno) == 0)
1142 deps->sched_before_next_call
1143 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
1145 add_dependence_list (insn, deps->last_function_call, 1,
1151 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
1152 rtx, X, creating all dependencies generated by the write to the
1153 destination of X, and reads of everything mentioned. */
1156 sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
1158 rtx dest = XEXP (x, 0);
1159 enum rtx_code code = GET_CODE (x);
1164 if (GET_CODE (dest) == PARALLEL)
1168 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1169 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1170 sched_analyze_1 (deps,
1171 gen_rtx_CLOBBER (VOIDmode,
1172 XEXP (XVECEXP (dest, 0, i), 0)),
1175 if (GET_CODE (x) == SET)
1176 sched_analyze_2 (deps, SET_SRC (x), insn);
1180 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1181 || GET_CODE (dest) == ZERO_EXTRACT)
1183 if (GET_CODE (dest) == STRICT_LOW_PART
1184 || GET_CODE (dest) == ZERO_EXTRACT
1185 || df_read_modify_subreg_p (dest))
1187 /* These both read and modify the result. We must handle
1188 them as writes to get proper dependencies for following
1189 instructions. We must handle them as reads to get proper
1190 dependencies from this to previous instructions.
1191 Thus we need to call sched_analyze_2. */
1193 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1195 if (GET_CODE (dest) == ZERO_EXTRACT)
1197 /* The second and third arguments are values read by this insn. */
1198 sched_analyze_2 (deps, XEXP (dest, 1), insn);
1199 sched_analyze_2 (deps, XEXP (dest, 2), insn);
1201 dest = XEXP (dest, 0);
1206 int regno = REGNO (dest);
1207 enum machine_mode mode = GET_MODE (dest);
1209 sched_analyze_reg (deps, regno, mode, code, insn);
1212 /* Treat all writes to a stack register as modifying the TOS. */
1213 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1215 /* Avoid analyzing the same register twice. */
1216 if (regno != FIRST_STACK_REG)
1217 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
1218 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
1222 else if (MEM_P (dest))
1224 /* Writing memory. */
1227 if (current_sched_info->use_cselib)
1229 t = shallow_copy_rtx (dest);
1230 cselib_lookup (XEXP (t, 0), Pmode, 1);
1231 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1235 if ((deps->pending_read_list_length + deps->pending_write_list_length)
1236 > MAX_PENDING_LIST_LENGTH)
1238 /* Flush all pending reads and writes to prevent the pending lists
1239 from getting any larger. Insn scheduling runs too slowly when
1240 these lists get long. When compiling GCC with itself,
1241 this flush occurs 8 times for sparc, and 10 times for m88k using
1242 the default value of 32. */
1243 flush_pending_lists (deps, insn, false, true);
1247 rtx pending, pending_mem;
1249 pending = deps->pending_read_insns;
1250 pending_mem = deps->pending_read_mems;
1253 if (anti_dependence (XEXP (pending_mem, 0), t)
1254 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1255 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1257 pending = XEXP (pending, 1);
1258 pending_mem = XEXP (pending_mem, 1);
1261 pending = deps->pending_write_insns;
1262 pending_mem = deps->pending_write_mems;
1265 if (output_dependence (XEXP (pending_mem, 0), t)
1266 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1267 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1269 pending = XEXP (pending, 1);
1270 pending_mem = XEXP (pending_mem, 1);
1273 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1276 add_insn_mem_dependence (deps, false, insn, dest);
1278 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1281 /* Analyze reads. */
1282 if (GET_CODE (x) == SET)
1283 sched_analyze_2 (deps, SET_SRC (x), insn);
1286 /* Analyze the uses of memory and registers in rtx X in INSN. */
1289 sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
1299 code = GET_CODE (x);
1309 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1310 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1311 this does not mean that this insn is using cc0. */
1316 /* User of CC0 depends on immediately preceding insn. */
1317 SCHED_GROUP_P (insn) = 1;
1318 /* Don't move CC0 setter to another block (it can set up the
1319 same flag for previous CC0 users which is safe). */
1320 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
1326 int regno = REGNO (x);
1327 enum machine_mode mode = GET_MODE (x);
1329 sched_analyze_reg (deps, regno, mode, USE, insn);
1332 /* Treat all reads of a stack register as modifying the TOS. */
1333 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1335 /* Avoid analyzing the same register twice. */
1336 if (regno != FIRST_STACK_REG)
1337 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
1338 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
1346 /* Reading memory. */
1348 rtx pending, pending_mem;
1351 if (current_sched_info->use_cselib)
1353 t = shallow_copy_rtx (t);
1354 cselib_lookup (XEXP (t, 0), Pmode, 1);
1355 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1358 pending = deps->pending_read_insns;
1359 pending_mem = deps->pending_read_mems;
1362 if (read_dependence (XEXP (pending_mem, 0), t)
1363 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1364 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1366 pending = XEXP (pending, 1);
1367 pending_mem = XEXP (pending_mem, 1);
1370 pending = deps->pending_write_insns;
1371 pending_mem = deps->pending_write_mems;
1374 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
1376 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1378 if (current_sched_info->flags & DO_SPECULATION)
1379 maybe_add_or_update_back_dep_1 (insn, XEXP (pending, 0),
1381 BEGIN_DATA | DEP_TRUE,
1382 XEXP (pending_mem, 0), t, 0);
1384 add_dependence (insn, XEXP (pending, 0), REG_DEP_TRUE);
1387 pending = XEXP (pending, 1);
1388 pending_mem = XEXP (pending_mem, 1);
1391 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1392 if (! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
1393 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1395 /* Always add these dependencies to pending_reads, since
1396 this insn may be followed by a write. */
1397 add_insn_mem_dependence (deps, true, insn, x);
1399 /* Take advantage of tail recursion here. */
1400 sched_analyze_2 (deps, XEXP (x, 0), insn);
1404 /* Force pending stores to memory in case a trap handler needs them. */
1406 flush_pending_lists (deps, insn, true, false);
1411 case UNSPEC_VOLATILE:
1413 /* Traditional and volatile asm instructions must be considered to use
1414 and clobber all hard registers, all pseudo-registers and all of
1415 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1417 Consider for instance a volatile asm that changes the fpu rounding
1418 mode. An insn should not be moved across this even if it only uses
1419 pseudo-regs because it might give an incorrectly rounded result. */
1420 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1421 reg_pending_barrier = TRUE_BARRIER;
1423 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1424 We can not just fall through here since then we would be confused
1425 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1426 traditional asms unlike their normal usage. */
1428 if (code == ASM_OPERANDS)
1430 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1431 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
1441 /* These both read and modify the result. We must handle them as writes
1442 to get proper dependencies for following instructions. We must handle
1443 them as reads to get proper dependencies from this to previous
1444 instructions. Thus we need to pass them to both sched_analyze_1
1445 and sched_analyze_2. We must call sched_analyze_2 first in order
1446 to get the proper antecedent for the read. */
1447 sched_analyze_2 (deps, XEXP (x, 0), insn);
1448 sched_analyze_1 (deps, x, insn);
1453 /* op0 = op0 + op1 */
1454 sched_analyze_2 (deps, XEXP (x, 0), insn);
1455 sched_analyze_2 (deps, XEXP (x, 1), insn);
1456 sched_analyze_1 (deps, x, insn);
1463 /* Other cases: walk the insn. */
1464 fmt = GET_RTX_FORMAT (code);
1465 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1468 sched_analyze_2 (deps, XEXP (x, i), insn);
1469 else if (fmt[i] == 'E')
1470 for (j = 0; j < XVECLEN (x, i); j++)
1471 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
1475 /* Analyze an INSN with pattern X to find all dependencies. */
1478 sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
1480 RTX_CODE code = GET_CODE (x);
1483 reg_set_iterator rsi;
1485 if (code == COND_EXEC)
1487 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
1489 /* ??? Should be recording conditions so we reduce the number of
1490 false dependencies. */
1491 x = COND_EXEC_CODE (x);
1492 code = GET_CODE (x);
1494 if (code == SET || code == CLOBBER)
1496 sched_analyze_1 (deps, x, insn);
1498 /* Bare clobber insns are used for letting life analysis, reg-stack
1499 and others know that a value is dead. Depend on the last call
1500 instruction so that reg-stack won't get confused. */
1501 if (code == CLOBBER)
1502 add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
1504 else if (code == PARALLEL)
1506 for (i = XVECLEN (x, 0); i--;)
1508 rtx sub = XVECEXP (x, 0, i);
1509 code = GET_CODE (sub);
1511 if (code == COND_EXEC)
1513 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1514 sub = COND_EXEC_CODE (sub);
1515 code = GET_CODE (sub);
1517 if (code == SET || code == CLOBBER)
1518 sched_analyze_1 (deps, sub, insn);
1520 sched_analyze_2 (deps, sub, insn);
1524 sched_analyze_2 (deps, x, insn);
1526 /* Mark registers CLOBBERED or used by called function. */
1529 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1531 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1532 sched_analyze_1 (deps, XEXP (link, 0), insn);
1534 sched_analyze_2 (deps, XEXP (link, 0), insn);
1536 if (find_reg_note (insn, REG_SETJMP, NULL))
1537 reg_pending_barrier = MOVE_BARRIER;
1543 next = next_nonnote_insn (insn);
1544 if (next && BARRIER_P (next))
1545 reg_pending_barrier = TRUE_BARRIER;
1548 rtx pending, pending_mem;
1549 regset_head tmp_uses, tmp_sets;
1550 INIT_REG_SET (&tmp_uses);
1551 INIT_REG_SET (&tmp_sets);
1553 (*current_sched_info->compute_jump_reg_dependencies)
1554 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
1555 /* Make latency of jump equal to 0 by using anti-dependence. */
1556 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
1558 struct deps_reg *reg_last = &deps->reg_last[i];
1559 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
1560 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
1561 reg_last->uses_length++;
1562 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1564 IOR_REG_SET (reg_pending_sets, &tmp_sets);
1566 CLEAR_REG_SET (&tmp_uses);
1567 CLEAR_REG_SET (&tmp_sets);
1569 /* All memory writes and volatile reads must happen before the
1570 jump. Non-volatile reads must happen before the jump iff
1571 the result is needed by the above register used mask. */
1573 pending = deps->pending_write_insns;
1574 pending_mem = deps->pending_write_mems;
1577 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1578 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1579 pending = XEXP (pending, 1);
1580 pending_mem = XEXP (pending_mem, 1);
1583 pending = deps->pending_read_insns;
1584 pending_mem = deps->pending_read_mems;
1587 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
1588 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1589 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1590 pending = XEXP (pending, 1);
1591 pending_mem = XEXP (pending_mem, 1);
1594 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1599 /* If this instruction can throw an exception, then moving it changes
1600 where block boundaries fall. This is mighty confusing elsewhere.
1601 Therefore, prevent such an instruction from being moved. */
1602 if (can_throw_internal (insn))
1603 reg_pending_barrier = MOVE_BARRIER;
1605 /* Add dependencies if a scheduling barrier was found. */
1606 if (reg_pending_barrier)
1608 /* In the case of barrier the most added dependencies are not
1609 real, so we use anti-dependence here. */
1610 if (sched_get_condition (insn))
1612 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1614 struct deps_reg *reg_last = &deps->reg_last[i];
1615 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1617 (insn, reg_last->sets, 0,
1618 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1620 (insn, reg_last->clobbers, 0,
1621 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1626 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1628 struct deps_reg *reg_last = &deps->reg_last[i];
1629 add_dependence_list_and_free (insn, ®_last->uses, 0,
1631 add_dependence_list_and_free
1632 (insn, ®_last->sets, 0,
1633 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1634 add_dependence_list_and_free
1635 (insn, ®_last->clobbers, 0,
1636 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1637 reg_last->uses_length = 0;
1638 reg_last->clobbers_length = 0;
1642 for (i = 0; i < (unsigned)deps->max_reg; i++)
1644 struct deps_reg *reg_last = &deps->reg_last[i];
1645 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1646 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1649 flush_pending_lists (deps, insn, true, true);
1650 CLEAR_REG_SET (&deps->reg_conditional_sets);
1651 reg_pending_barrier = NOT_A_BARRIER;
1655 /* If the current insn is conditional, we can't free any
1657 if (sched_get_condition (insn))
1659 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1661 struct deps_reg *reg_last = &deps->reg_last[i];
1662 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
1663 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
1664 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1665 reg_last->uses_length++;
1667 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
1669 struct deps_reg *reg_last = &deps->reg_last[i];
1670 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1671 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1672 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1673 reg_last->clobbers_length++;
1675 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
1677 struct deps_reg *reg_last = &deps->reg_last[i];
1678 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1679 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
1680 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1681 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1682 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1687 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1689 struct deps_reg *reg_last = &deps->reg_last[i];
1690 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
1691 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
1692 reg_last->uses_length++;
1693 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1695 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
1697 struct deps_reg *reg_last = &deps->reg_last[i];
1698 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1699 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1701 add_dependence_list_and_free (insn, ®_last->sets, 0,
1703 add_dependence_list_and_free (insn, ®_last->uses, 0,
1705 add_dependence_list_and_free (insn, ®_last->clobbers, 0,
1707 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1708 reg_last->clobbers_length = 0;
1709 reg_last->uses_length = 0;
1713 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1714 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1716 reg_last->clobbers_length++;
1717 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1719 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
1721 struct deps_reg *reg_last = &deps->reg_last[i];
1722 add_dependence_list_and_free (insn, ®_last->sets, 0,
1724 add_dependence_list_and_free (insn, ®_last->clobbers, 0,
1726 add_dependence_list_and_free (insn, ®_last->uses, 0,
1728 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1729 reg_last->uses_length = 0;
1730 reg_last->clobbers_length = 0;
1731 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1735 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1736 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1737 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1739 CLEAR_REG_SET (reg_pending_uses);
1740 CLEAR_REG_SET (reg_pending_clobbers);
1741 CLEAR_REG_SET (reg_pending_sets);
1743 /* If we are currently in a libcall scheduling group, then mark the
1744 current insn as being in a scheduling group and that it can not
1745 be moved into a different basic block. */
1747 if (deps->libcall_block_tail_insn)
1749 SCHED_GROUP_P (insn) = 1;
1750 CANT_MOVE (insn) = 1;
1753 /* If a post-call group is still open, see if it should remain so.
1754 This insn must be a simple move of a hard reg to a pseudo or
1757 We must avoid moving these insns for correctness on
1758 SMALL_REGISTER_CLASS machines, and for special registers like
1759 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1760 hard regs for all targets. */
1762 if (deps->in_post_call_group_p)
1764 rtx tmp, set = single_set (insn);
1765 int src_regno, dest_regno;
1768 goto end_call_group;
1770 tmp = SET_DEST (set);
1771 if (GET_CODE (tmp) == SUBREG)
1772 tmp = SUBREG_REG (tmp);
1774 dest_regno = REGNO (tmp);
1776 goto end_call_group;
1778 tmp = SET_SRC (set);
1779 if (GET_CODE (tmp) == SUBREG)
1780 tmp = SUBREG_REG (tmp);
1781 if ((GET_CODE (tmp) == PLUS
1782 || GET_CODE (tmp) == MINUS)
1783 && REG_P (XEXP (tmp, 0))
1784 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
1785 && dest_regno == STACK_POINTER_REGNUM)
1786 src_regno = STACK_POINTER_REGNUM;
1787 else if (REG_P (tmp))
1788 src_regno = REGNO (tmp);
1790 goto end_call_group;
1792 if (src_regno < FIRST_PSEUDO_REGISTER
1793 || dest_regno < FIRST_PSEUDO_REGISTER)
1795 if (deps->in_post_call_group_p == post_call_initial)
1796 deps->in_post_call_group_p = post_call;
1798 SCHED_GROUP_P (insn) = 1;
1799 CANT_MOVE (insn) = 1;
1804 deps->in_post_call_group_p = not_post_call;
1808 /* Fixup the dependencies in the sched group. */
1809 if (SCHED_GROUP_P (insn))
1810 fixup_sched_groups (insn);
1813 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
1814 dependencies for each insn. */
1817 sched_analyze (struct deps *deps, rtx head, rtx tail)
1821 if (current_sched_info->use_cselib)
1824 /* Before reload, if the previous block ended in a call, show that
1825 we are inside a post-call group, so as to keep the lifetimes of
1826 hard registers correct. */
1827 if (! reload_completed && !LABEL_P (head))
1829 insn = prev_nonnote_insn (head);
1830 if (insn && CALL_P (insn))
1831 deps->in_post_call_group_p = post_call_initial;
1833 for (insn = head;; insn = NEXT_INSN (insn))
1835 rtx link, end_seq, r0, set;
1839 /* Clear out the stale LOG_LINKS from flow. */
1840 free_INSN_LIST_list (&LOG_LINKS (insn));
1842 /* These two lists will be freed in schedule_insn (). */
1843 INSN_BACK_DEPS (insn) = create_deps_list (false);
1844 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list (false);
1846 /* This one should be allocated on the obstack because it should live
1847 till the scheduling ends. */
1848 INSN_FORW_DEPS (insn) = create_deps_list (true);
1851 if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
1853 /* Make each JUMP_INSN a scheduling barrier for memory
1857 /* Keep the list a reasonable size. */
1858 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1859 flush_pending_lists (deps, insn, true, true);
1861 deps->last_pending_memory_flush
1862 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1864 sched_analyze_insn (deps, PATTERN (insn), insn);
1866 else if (CALL_P (insn))
1870 CANT_MOVE (insn) = 1;
1872 if (find_reg_note (insn, REG_SETJMP, NULL))
1874 /* This is setjmp. Assume that all registers, not just
1875 hard registers, may be clobbered by this call. */
1876 reg_pending_barrier = MOVE_BARRIER;
1880 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1881 /* A call may read and modify global register variables. */
1884 SET_REGNO_REG_SET (reg_pending_sets, i);
1885 SET_REGNO_REG_SET (reg_pending_uses, i);
1887 /* Other call-clobbered hard regs may be clobbered.
1888 Since we only have a choice between 'might be clobbered'
1889 and 'definitely not clobbered', we must include all
1890 partly call-clobbered registers here. */
1891 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1892 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1893 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1894 /* We don't know what set of fixed registers might be used
1895 by the function, but it is certain that the stack pointer
1896 is among them, but be conservative. */
1897 else if (fixed_regs[i])
1898 SET_REGNO_REG_SET (reg_pending_uses, i);
1899 /* The frame pointer is normally not used by the function
1900 itself, but by the debugger. */
1901 /* ??? MIPS o32 is an exception. It uses the frame pointer
1902 in the macro expansion of jal but does not represent this
1903 fact in the call_insn rtl. */
1904 else if (i == FRAME_POINTER_REGNUM
1905 || (i == HARD_FRAME_POINTER_REGNUM
1906 && (! reload_completed || frame_pointer_needed)))
1907 SET_REGNO_REG_SET (reg_pending_uses, i);
1910 /* For each insn which shouldn't cross a call, add a dependence
1911 between that insn and this call insn. */
1912 add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
1915 sched_analyze_insn (deps, PATTERN (insn), insn);
1917 /* In the absence of interprocedural alias analysis, we must flush
1918 all pending reads and writes, and start new dependencies starting
1919 from here. But only flush writes for constant calls (which may
1920 be passed a pointer to something we haven't written yet). */
1921 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1923 /* Remember the last function call for limiting lifetimes. */
1924 free_INSN_LIST_list (&deps->last_function_call);
1925 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1927 /* Before reload, begin a post-call group, so as to keep the
1928 lifetimes of hard registers correct. */
1929 if (! reload_completed)
1930 deps->in_post_call_group_p = post_call;
1933 /* EH_REGION insn notes can not appear until well after we complete
1936 gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1937 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END);
1939 if (current_sched_info->use_cselib)
1940 cselib_process_insn (insn);
1942 /* Now that we have completed handling INSN, check and see if it is
1943 a CLOBBER beginning a libcall block. If it is, record the
1944 end of the libcall sequence.
1946 We want to schedule libcall blocks as a unit before reload. While
1947 this restricts scheduling, it preserves the meaning of a libcall
1950 As a side effect, we may get better code due to decreased register
1951 pressure as well as less chance of a foreign insn appearing in
1953 if (!reload_completed
1954 /* Note we may have nested libcall sequences. We only care about
1955 the outermost libcall sequence. */
1956 && deps->libcall_block_tail_insn == 0
1957 /* The sequence must start with a clobber of a register. */
1958 && NONJUMP_INSN_P (insn)
1959 && GET_CODE (PATTERN (insn)) == CLOBBER
1960 && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
1961 && REG_P (XEXP (PATTERN (insn), 0))
1962 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1963 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1964 && (end_seq = XEXP (link, 0)) != 0
1965 /* The insn referenced by the REG_LIBCALL note must be a
1966 simple nop copy with the same destination as the register
1967 mentioned in the clobber. */
1968 && (set = single_set (end_seq)) != 0
1969 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1970 /* And finally the insn referenced by the REG_LIBCALL must
1971 also contain a REG_EQUAL note and a REG_RETVAL note. */
1972 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1973 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1974 deps->libcall_block_tail_insn = XEXP (link, 0);
1976 /* If we have reached the end of a libcall block, then close the
1978 if (deps->libcall_block_tail_insn == insn)
1979 deps->libcall_block_tail_insn = 0;
1983 if (current_sched_info->use_cselib)
1992 /* The following function adds forward dependence (FROM, TO) with
1993 given DEP_TYPE. The forward dependence should be not exist before. */
1996 add_forw_dep (dep_link_t link)
1998 dep_t dep = DEP_LINK_DEP (link);
1999 rtx to = DEP_CON (dep);
2000 rtx from = DEP_PRO (dep);
2002 #ifdef ENABLE_CHECKING
2003 /* If add_dependence is working properly there should never
2004 be notes, deleted insns or duplicates in the backward
2005 links. Thus we need not check for them here.
2007 However, if we have enabled checking we might as well go
2008 ahead and verify that add_dependence worked properly. */
2009 gcc_assert (INSN_P (from));
2010 gcc_assert (!INSN_DELETED_P (from));
2011 if (true_dependency_cache)
2013 gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
2015 bitmap_set_bit (&forward_dependency_cache[INSN_LUID (from)],
2019 gcc_assert (find_link_by_con_in_deps_list (INSN_FORW_DEPS (from), to)
2023 add_to_deps_list (DEP_NODE_FORW (DEP_LINK_NODE (link)),
2024 INSN_FORW_DEPS (from));
2026 INSN_DEP_COUNT (to) += 1;
2029 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
2030 dependences from INSN_BACK_DEPS list to build forward dependences in
2034 compute_forward_dependences (rtx head, rtx tail)
2039 next_tail = NEXT_INSN (tail);
2040 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2044 if (! INSN_P (insn))
2047 if (current_sched_info->flags & DO_SPECULATION)
2049 /* We will add links, preserving order, from INSN_BACK_DEPS to
2051 dep_link_t new = NULL;
2053 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
2055 while (link != NULL)
2057 dep_link_t next = DEP_LINK_NEXT (link);
2059 detach_dep_link (link);
2060 adjust_add_sorted_back_dep (insn, link, &new);
2065 /* Attach NEW to be the list of backward dependencies. */
2068 DEP_LINK_PREV_NEXTP (new)
2069 = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
2071 DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)) = new;
2075 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
2076 add_forw_dep (link);
2080 /* Initialize variables for region data dependence analysis.
2081 n_bbs is the number of region blocks. */
2084 init_deps (struct deps *deps)
2086 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
2088 deps->max_reg = max_reg;
2089 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
2090 INIT_REG_SET (&deps->reg_last_in_use);
2091 INIT_REG_SET (&deps->reg_conditional_sets);
2093 deps->pending_read_insns = 0;
2094 deps->pending_read_mems = 0;
2095 deps->pending_write_insns = 0;
2096 deps->pending_write_mems = 0;
2097 deps->pending_read_list_length = 0;
2098 deps->pending_write_list_length = 0;
2099 deps->pending_flush_length = 0;
2100 deps->last_pending_memory_flush = 0;
2101 deps->last_function_call = 0;
2102 deps->sched_before_next_call = 0;
2103 deps->in_post_call_group_p = not_post_call;
2104 deps->libcall_block_tail_insn = 0;
2107 /* Free insn lists found in DEPS. */
2110 free_deps (struct deps *deps)
2113 reg_set_iterator rsi;
2115 free_INSN_LIST_list (&deps->pending_read_insns);
2116 free_EXPR_LIST_list (&deps->pending_read_mems);
2117 free_INSN_LIST_list (&deps->pending_write_insns);
2118 free_EXPR_LIST_list (&deps->pending_write_mems);
2119 free_INSN_LIST_list (&deps->last_pending_memory_flush);
2121 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
2122 times. For a testcase with 42000 regs and 8000 small basic blocks,
2123 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
2124 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2126 struct deps_reg *reg_last = &deps->reg_last[i];
2128 free_INSN_LIST_list (®_last->uses);
2130 free_INSN_LIST_list (®_last->sets);
2131 if (reg_last->clobbers)
2132 free_INSN_LIST_list (®_last->clobbers);
2134 CLEAR_REG_SET (&deps->reg_last_in_use);
2135 CLEAR_REG_SET (&deps->reg_conditional_sets);
2137 free (deps->reg_last);
2140 /* If it is profitable to use them, initialize caches for tracking
2141 dependency information. LUID is the number of insns to be scheduled,
2142 it is used in the estimate of profitability. */
2145 init_dependency_caches (int luid)
2147 /* ?!? We could save some memory by computing a per-region luid mapping
2148 which could reduce both the number of vectors in the cache and the size
2149 of each vector. Instead we just avoid the cache entirely unless the
2150 average number of instructions in a basic block is very high. See
2151 the comment before the declaration of true_dependency_cache for
2152 what we consider "very high". */
2153 if (luid / n_basic_blocks > 100 * 5)
2156 extend_dependency_caches (luid, true);
2159 /* Lifetime of this obstack is whole function scheduling (not single region
2160 scheduling) because some dependencies can be manually generated for
2161 outside regions. See dont_calc_deps in sched-{rgn, ebb}.c .
2163 Possible solution would be to have two obstacks:
2164 * the big one for regular dependencies with region scheduling lifetime,
2165 * and the small one for manually generated dependencies with function
2166 scheduling lifetime. */
2167 gcc_obstack_init (&deps_obstack);
2170 /* Create or extend (depending on CREATE_P) dependency caches to
2173 extend_dependency_caches (int n, bool create_p)
2175 if (create_p || true_dependency_cache)
2177 int i, luid = cache_size + n;
2179 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
2181 output_dependency_cache = XRESIZEVEC (bitmap_head,
2182 output_dependency_cache, luid);
2183 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
2185 #ifdef ENABLE_CHECKING
2186 forward_dependency_cache = XRESIZEVEC (bitmap_head,
2187 forward_dependency_cache, luid);
2189 if (current_sched_info->flags & DO_SPECULATION)
2190 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
2193 for (i = cache_size; i < luid; i++)
2195 bitmap_initialize (&true_dependency_cache[i], 0);
2196 bitmap_initialize (&output_dependency_cache[i], 0);
2197 bitmap_initialize (&anti_dependency_cache[i], 0);
2198 #ifdef ENABLE_CHECKING
2199 bitmap_initialize (&forward_dependency_cache[i], 0);
2201 if (current_sched_info->flags & DO_SPECULATION)
2202 bitmap_initialize (&spec_dependency_cache[i], 0);
2208 /* Free the caches allocated in init_dependency_caches. */
2211 free_dependency_caches (void)
2213 obstack_free (&deps_obstack, NULL);
2215 if (true_dependency_cache)
2219 for (i = 0; i < cache_size; i++)
2221 bitmap_clear (&true_dependency_cache[i]);
2222 bitmap_clear (&output_dependency_cache[i]);
2223 bitmap_clear (&anti_dependency_cache[i]);
2224 #ifdef ENABLE_CHECKING
2225 bitmap_clear (&forward_dependency_cache[i]);
2227 if (current_sched_info->flags & DO_SPECULATION)
2228 bitmap_clear (&spec_dependency_cache[i]);
2230 free (true_dependency_cache);
2231 true_dependency_cache = NULL;
2232 free (output_dependency_cache);
2233 output_dependency_cache = NULL;
2234 free (anti_dependency_cache);
2235 anti_dependency_cache = NULL;
2236 #ifdef ENABLE_CHECKING
2237 free (forward_dependency_cache);
2238 forward_dependency_cache = NULL;
2240 if (current_sched_info->flags & DO_SPECULATION)
2242 free (spec_dependency_cache);
2243 spec_dependency_cache = NULL;
2248 /* Initialize some global variables needed by the dependency analysis
2252 init_deps_global (void)
2254 reg_pending_sets = ALLOC_REG_SET (®_obstack);
2255 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
2256 reg_pending_uses = ALLOC_REG_SET (®_obstack);
2257 reg_pending_barrier = NOT_A_BARRIER;
2260 /* Free everything used by the dependency analysis code. */
2263 finish_deps_global (void)
2265 FREE_REG_SET (reg_pending_sets);
2266 FREE_REG_SET (reg_pending_clobbers);
2267 FREE_REG_SET (reg_pending_uses);
2270 /* Insert LINK into the dependence chain pointed to by LINKP and
2271 maintain the sort order. */
2273 adjust_add_sorted_back_dep (rtx insn, dep_link_t link, dep_link_t *linkp)
2275 gcc_assert (current_sched_info->flags & DO_SPECULATION);
2277 /* If the insn cannot move speculatively, but the link is speculative,
2278 make it hard dependence. */
2279 if (HAS_INTERNAL_DEP (insn)
2280 && (DEP_LINK_STATUS (link) & SPECULATIVE))
2282 DEP_LINK_STATUS (link) &= ~SPECULATIVE;
2284 if (true_dependency_cache)
2285 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
2286 INSN_LUID (DEP_LINK_PRO (link)));
2289 /* Non-speculative links go at the head of deps_list, followed by
2290 speculative links. */
2291 if (DEP_LINK_STATUS (link) & SPECULATIVE)
2292 while (*linkp && !(DEP_LINK_STATUS (*linkp) & SPECULATIVE))
2293 linkp = &DEP_LINK_NEXT (*linkp);
2295 attach_dep_link (link, linkp);
2298 gcc_assert (deps_list_consistent_p (INSN_BACK_DEPS (insn)));
2301 /* Move the dependence pointed to by LINKP to the back dependencies
2302 of INSN, and also add this dependence to the forward ones. All dep_links,
2303 except one pointed to by LINKP, must be sorted. */
2305 adjust_back_add_forw_dep (rtx insn, dep_link_t *linkp)
2309 gcc_assert (current_sched_info->flags & DO_SPECULATION);
2312 detach_dep_link (link);
2314 adjust_add_sorted_back_dep (insn, link,
2315 &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)));
2316 add_forw_dep (link);
2319 /* Remove forward dependence described by L. */
2321 delete_forw_dep (dep_link_t l)
2323 gcc_assert (current_sched_info->flags & DO_SPECULATION);
2325 #ifdef ENABLE_CHECKING
2326 if (true_dependency_cache)
2327 bitmap_clear_bit (&forward_dependency_cache[INSN_LUID (DEP_LINK_PRO (l))],
2328 INSN_LUID (DEP_LINK_CON (l)));
2331 detach_dep_link (l);
2333 INSN_DEP_COUNT (DEP_LINK_CON (l))--;
2336 /* Estimate the weakness of dependence between MEM1 and MEM2. */
2338 estimate_dep_weak (rtx mem1, rtx mem2)
2343 /* MEMs are the same - don't speculate. */
2344 return MIN_DEP_WEAK;
2346 r1 = XEXP (mem1, 0);
2347 r2 = XEXP (mem2, 0);
2350 || (REG_P (r1) && REG_P (r2)
2351 && REGNO (r1) == REGNO (r2)))
2352 /* Again, MEMs are the same. */
2353 return MIN_DEP_WEAK;
2354 else if ((REG_P (r1) && !REG_P (r2))
2355 || (!REG_P (r1) && REG_P (r2)))
2356 /* Different addressing modes - reason to be more speculative,
2358 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
2360 /* We can't say anything about the dependence. */
2361 return UNCERTAIN_DEP_WEAK;
2364 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
2365 This function can handle same INSN and ELEM (INSN == ELEM).
2366 It is a convenience wrapper. */
2368 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
2372 if (dep_type == REG_DEP_TRUE)
2374 else if (dep_type == REG_DEP_OUTPUT)
2376 else if (dep_type == REG_DEP_ANTI)
2381 maybe_add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
2384 /* Add or update backward dependence between INSN and ELEM
2385 with given type DEP_TYPE and dep_status DS.
2386 This function is a convenience wrapper. */
2387 enum DEPS_ADJUST_RESULT
2388 add_or_update_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
2390 return add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
2393 /* Add or update both backward and forward dependencies between INSN and ELEM
2394 with given type DEP_TYPE and dep_status DS. */
2396 add_or_update_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type,
2399 enum DEPS_ADJUST_RESULT res;
2402 res = add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, &linkp);
2404 if (res == DEP_CHANGED || res == DEP_CREATED)
2406 if (res == DEP_CHANGED)
2407 delete_forw_dep (DEP_NODE_FORW (DEP_LINK_NODE (*linkp)));
2408 else if (res == DEP_CREATED)
2409 linkp = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
2411 adjust_back_add_forw_dep (insn, linkp);
2415 /* Add both backward and forward dependencies between INSN and ELEM
2416 with given type DEP_TYPE and dep_status DS. */
2418 add_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
2420 add_back_dep (insn, elem, dep_type, ds);
2421 adjust_back_add_forw_dep (insn, &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)));
2424 gcc_assert (deps_list_consistent_p (INSN_BACK_DEPS (insn)));
2427 /* Remove a dependency referred to by L. */
2429 delete_back_forw_dep (dep_link_t l)
2431 dep_node_t n = DEP_LINK_NODE (l);
2433 gcc_assert (current_sched_info->flags & DO_SPECULATION);
2435 if (true_dependency_cache != NULL)
2437 dep_t dep = DEP_NODE_DEP (n);
2438 int elem_luid = INSN_LUID (DEP_PRO (dep));
2439 int insn_luid = INSN_LUID (DEP_CON (dep));
2441 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
2442 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
2443 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
2444 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
2447 delete_forw_dep (DEP_NODE_FORW (n));
2448 detach_dep_link (DEP_NODE_BACK (n));
2451 /* Return weakness of speculative type TYPE in the dep_status DS. */
2453 get_dep_weak (ds_t ds, ds_t type)
2458 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
2459 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
2460 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
2461 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
2462 default: gcc_unreachable ();
2465 gcc_assert (MIN_DEP_WEAK <= ds && ds <= MAX_DEP_WEAK);
2469 /* Return the dep_status, which has the same parameters as DS, except for
2470 speculative type TYPE, that will have weakness DW. */
2472 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
2474 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
2479 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
2480 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
2481 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
2482 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
2483 default: gcc_unreachable ();
2488 /* Return the join of two dep_statuses DS1 and DS2. */
2490 ds_merge (ds_t ds1, ds_t ds2)
2494 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
2496 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
2498 t = FIRST_SPEC_TYPE;
2501 if ((ds1 & t) && !(ds2 & t))
2503 else if (!(ds1 & t) && (ds2 & t))
2505 else if ((ds1 & t) && (ds2 & t))
2509 dw = ((ds_t) get_dep_weak (ds1, t)) * ((ds_t) get_dep_weak (ds2, t));
2511 if (dw < MIN_DEP_WEAK)
2514 ds = set_dep_weak (ds, t, (dw_t) dw);
2517 if (t == LAST_SPEC_TYPE)
2519 t <<= SPEC_TYPE_SHIFT;
2526 #ifdef INSN_SCHEDULING
2527 #ifdef ENABLE_CHECKING
2528 /* Verify that dependence type and status are consistent.
2529 If RELAXED_P is true, then skip dep_weakness checks. */
2531 check_dep_status (enum reg_note dt, ds_t ds, bool relaxed_p)
2533 /* Check that dependence type contains the same bits as the status. */
2534 if (dt == REG_DEP_TRUE)
2535 gcc_assert (ds & DEP_TRUE);
2536 else if (dt == REG_DEP_OUTPUT)
2537 gcc_assert ((ds & DEP_OUTPUT)
2538 && !(ds & DEP_TRUE));
2540 gcc_assert ((dt == REG_DEP_ANTI)
2542 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
2544 /* HARD_DEP can not appear in dep_status of a link. */
2545 gcc_assert (!(ds & HARD_DEP));
2547 /* Check that dependence status is set correctly when speculation is not
2549 if (!(current_sched_info->flags & DO_SPECULATION))
2550 gcc_assert (!(ds & SPECULATIVE));
2551 else if (ds & SPECULATIVE)
2555 ds_t type = FIRST_SPEC_TYPE;
2557 /* Check that dependence weakness is in proper range. */
2561 get_dep_weak (ds, type);
2563 if (type == LAST_SPEC_TYPE)
2565 type <<= SPEC_TYPE_SHIFT;
2570 if (ds & BEGIN_SPEC)
2572 /* Only true dependence can be data speculative. */
2573 if (ds & BEGIN_DATA)
2574 gcc_assert (ds & DEP_TRUE);
2576 /* Control dependencies in the insn scheduler are represented by
2577 anti-dependencies, therefore only anti dependence can be
2578 control speculative. */
2579 if (ds & BEGIN_CONTROL)
2580 gcc_assert (ds & DEP_ANTI);
2584 /* Subsequent speculations should resolve true dependencies. */
2585 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
2588 /* Check that true and anti dependencies can't have other speculative
2591 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
2592 /* An output dependence can't be speculative at all. */
2593 gcc_assert (!(ds & DEP_OUTPUT));
2595 gcc_assert (ds & BEGIN_CONTROL);