1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
41 #include "sched-int.h"
45 #ifdef ENABLE_CHECKING
51 /* Return the major type present in the DS. */
59 return REG_DEP_OUTPUT;
61 gcc_assert (ds & DEP_ANTI);
66 /* Return equivalent dep_status. */
68 dk_to_ds (enum reg_note dk)
79 gcc_assert (dk == REG_DEP_ANTI);
84 /* Functions to operate with dependence information container - dep_t. */
86 /* Init DEP with the arguments. */
88 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
92 DEP_TYPE (dep) = type;
93 DEP_STATUS (dep) = ds;
96 /* Init DEP with the arguments.
97 While most of the scheduler (including targets) only need the major type
98 of the dependency, it is convenient to hide full dep_status from them. */
100 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
104 if ((current_sched_info->flags & USE_DEPS_LIST))
105 ds = dk_to_ds (kind);
109 init_dep_1 (dep, pro, con, kind, ds);
112 /* Make a copy of FROM in TO. */
114 copy_dep (dep_t to, dep_t from)
116 memcpy (to, from, sizeof (*to));
119 static void dump_ds (FILE *, ds_t);
121 /* Define flags for dump_dep (). */
123 /* Dump producer of the dependence. */
124 #define DUMP_DEP_PRO (2)
126 /* Dump consumer of the dependence. */
127 #define DUMP_DEP_CON (4)
129 /* Dump type of the dependence. */
130 #define DUMP_DEP_TYPE (8)
132 /* Dump status of the dependence. */
133 #define DUMP_DEP_STATUS (16)
135 /* Dump all information about the dependence. */
136 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
140 FLAGS is a bit mask specifying what information about DEP needs
142 If FLAGS has the very first bit set, then dump all information about DEP
143 and propagate this bit into the callee dump functions. */
145 dump_dep (FILE *dump, dep_t dep, int flags)
148 flags |= DUMP_DEP_ALL;
152 if (flags & DUMP_DEP_PRO)
153 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
155 if (flags & DUMP_DEP_CON)
156 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
158 if (flags & DUMP_DEP_TYPE)
161 enum reg_note type = DEP_TYPE (dep);
182 fprintf (dump, "%c; ", t);
185 if (flags & DUMP_DEP_STATUS)
187 if (current_sched_info->flags & USE_DEPS_LIST)
188 dump_ds (dump, DEP_STATUS (dep));
194 /* Default flags for dump_dep (). */
195 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
197 /* Dump all fields of DEP to STDERR. */
199 sd_debug_dep (dep_t dep)
201 dump_dep (stderr, dep, 1);
202 fprintf (stderr, "\n");
205 /* Functions to operate with a single link from the dependencies lists -
208 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
211 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
213 dep_link_t next = *prev_nextp;
215 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
216 && DEP_LINK_NEXT (l) == NULL);
218 /* Init node being inserted. */
219 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
220 DEP_LINK_NEXT (l) = next;
225 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
227 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
234 /* Add dep_link LINK to deps_list L. */
236 add_to_deps_list (dep_link_t link, deps_list_t l)
238 attach_dep_link (link, &DEPS_LIST_FIRST (l));
240 ++DEPS_LIST_N_LINKS (l);
243 /* Detach dep_link L from the list. */
245 detach_dep_link (dep_link_t l)
247 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
248 dep_link_t next = DEP_LINK_NEXT (l);
253 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
255 DEP_LINK_PREV_NEXTP (l) = NULL;
256 DEP_LINK_NEXT (l) = NULL;
259 /* Remove link LINK from list LIST. */
261 remove_from_deps_list (dep_link_t link, deps_list_t list)
263 detach_dep_link (link);
265 --DEPS_LIST_N_LINKS (list);
268 /* Move link LINK from list FROM to list TO. */
270 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
272 remove_from_deps_list (link, from);
273 add_to_deps_list (link, to);
276 /* Return true of LINK is not attached to any list. */
278 dep_link_is_detached_p (dep_link_t link)
280 return DEP_LINK_PREV_NEXTP (link) == NULL;
283 /* Pool to hold all dependency nodes (dep_node_t). */
284 static alloc_pool dn_pool;
286 /* Number of dep_nodes out there. */
287 static int dn_pool_diff = 0;
289 /* Create a dep_node. */
291 create_dep_node (void)
293 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
294 dep_link_t back = DEP_NODE_BACK (n);
295 dep_link_t forw = DEP_NODE_FORW (n);
297 DEP_LINK_NODE (back) = n;
298 DEP_LINK_NEXT (back) = NULL;
299 DEP_LINK_PREV_NEXTP (back) = NULL;
301 DEP_LINK_NODE (forw) = n;
302 DEP_LINK_NEXT (forw) = NULL;
303 DEP_LINK_PREV_NEXTP (forw) = NULL;
310 /* Delete dep_node N. N must not be connected to any deps_list. */
312 delete_dep_node (dep_node_t n)
314 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
315 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
319 pool_free (dn_pool, n);
322 /* Pool to hold dependencies lists (deps_list_t). */
323 static alloc_pool dl_pool;
325 /* Number of deps_lists out there. */
326 static int dl_pool_diff = 0;
328 /* Functions to operate with dependences lists - deps_list_t. */
330 /* Return true if list L is empty. */
332 deps_list_empty_p (deps_list_t l)
334 return DEPS_LIST_N_LINKS (l) == 0;
337 /* Create a new deps_list. */
339 create_deps_list (void)
341 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
343 DEPS_LIST_FIRST (l) = NULL;
344 DEPS_LIST_N_LINKS (l) = 0;
350 /* Free deps_list L. */
352 free_deps_list (deps_list_t l)
354 gcc_assert (deps_list_empty_p (l));
358 pool_free (dl_pool, l);
361 /* Return true if there is no dep_nodes and deps_lists out there.
362 After the region is scheduled all the depedency nodes and lists
363 should [generally] be returned to pool. */
365 deps_pools_are_empty_p (void)
367 return dn_pool_diff == 0 && dl_pool_diff == 0;
370 /* Remove all elements from L. */
372 clear_deps_list (deps_list_t l)
376 dep_link_t link = DEPS_LIST_FIRST (l);
381 remove_from_deps_list (link, l);
386 static regset reg_pending_sets;
387 static regset reg_pending_clobbers;
388 static regset reg_pending_uses;
390 /* The following enumeration values tell us what dependencies we
391 should use to implement the barrier. We use true-dependencies for
392 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
393 enum reg_pending_barrier_mode
400 static enum reg_pending_barrier_mode reg_pending_barrier;
402 /* To speed up the test for duplicate dependency links we keep a
403 record of dependencies created by add_dependence when the average
404 number of instructions in a basic block is very large.
406 Studies have shown that there is typically around 5 instructions between
407 branches for typical C code. So we can make a guess that the average
408 basic block is approximately 5 instructions long; we will choose 100X
409 the average size as a very large basic block.
411 Each insn has associated bitmaps for its dependencies. Each bitmap
412 has enough entries to represent a dependency on any other insn in
413 the insn chain. All bitmap for true dependencies cache is
414 allocated then the rest two ones are also allocated. */
415 static bitmap_head *true_dependency_cache;
416 static bitmap_head *output_dependency_cache;
417 static bitmap_head *anti_dependency_cache;
418 static bitmap_head *spec_dependency_cache;
419 static int cache_size;
421 static int deps_may_trap_p (rtx);
422 static void add_dependence_list (rtx, rtx, int, enum reg_note);
423 static void add_dependence_list_and_free (rtx, rtx *, int, enum reg_note);
424 static void delete_all_dependences (rtx);
425 static void fixup_sched_groups (rtx);
427 static void flush_pending_lists (struct deps *, rtx, int, int);
428 static void sched_analyze_1 (struct deps *, rtx, rtx);
429 static void sched_analyze_2 (struct deps *, rtx, rtx);
430 static void sched_analyze_insn (struct deps *, rtx, rtx);
432 static rtx sched_get_condition (rtx);
433 static int conditions_mutex_p (rtx, rtx);
435 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
437 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
439 static dw_t estimate_dep_weak (rtx, rtx);
440 #ifdef INSN_SCHEDULING
441 #ifdef ENABLE_CHECKING
442 static void check_dep (dep_t, bool);
446 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
449 deps_may_trap_p (rtx mem)
451 rtx addr = XEXP (mem, 0);
453 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
455 rtx t = get_reg_known_value (REGNO (addr));
459 return rtx_addr_can_trap_p (addr);
462 /* Find the condition under which INSN is executed. */
465 sched_get_condition (rtx insn)
467 rtx pat = PATTERN (insn);
473 if (GET_CODE (pat) == COND_EXEC)
474 return COND_EXEC_TEST (pat);
476 if (!any_condjump_p (insn) || !onlyjump_p (insn))
479 src = SET_SRC (pc_set (insn));
481 if (XEXP (src, 2) == pc_rtx)
482 return XEXP (src, 0);
483 else if (XEXP (src, 1) == pc_rtx)
485 rtx cond = XEXP (src, 0);
486 enum rtx_code revcode = reversed_comparison_code (cond, insn);
488 if (revcode == UNKNOWN)
490 return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
498 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
501 conditions_mutex_p (rtx cond1, rtx cond2)
503 if (COMPARISON_P (cond1)
504 && COMPARISON_P (cond2)
505 && GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
506 && XEXP (cond1, 0) == XEXP (cond2, 0)
507 && XEXP (cond1, 1) == XEXP (cond2, 1))
512 /* Return true if insn1 and insn2 can never depend on one another because
513 the conditions under which they are executed are mutually exclusive. */
515 sched_insns_conditions_mutex_p (rtx insn1, rtx insn2)
519 /* df doesn't handle conditional lifetimes entirely correctly;
520 calls mess up the conditional lifetimes. */
521 if (!CALL_P (insn1) && !CALL_P (insn2))
523 cond1 = sched_get_condition (insn1);
524 cond2 = sched_get_condition (insn2);
526 && conditions_mutex_p (cond1, cond2)
527 /* Make sure first instruction doesn't affect condition of second
528 instruction if switched. */
529 && !modified_in_p (cond1, insn2)
530 /* Make sure second instruction doesn't affect condition of first
531 instruction if switched. */
532 && !modified_in_p (cond2, insn1))
539 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
540 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
541 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
542 This function is used to switch sd_iterator to the next list.
543 !!! For internal use only. Might consider moving it to sched-int.h. */
545 sd_next_list (rtx insn, sd_list_types_def *types_ptr,
546 deps_list_t *list_ptr, bool *resolved_p_ptr)
548 sd_list_types_def types = *types_ptr;
550 if (types & SD_LIST_HARD_BACK)
552 *list_ptr = INSN_HARD_BACK_DEPS (insn);
553 *resolved_p_ptr = false;
554 *types_ptr = types & ~SD_LIST_HARD_BACK;
556 else if (types & SD_LIST_SPEC_BACK)
558 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
559 *resolved_p_ptr = false;
560 *types_ptr = types & ~SD_LIST_SPEC_BACK;
562 else if (types & SD_LIST_FORW)
564 *list_ptr = INSN_FORW_DEPS (insn);
565 *resolved_p_ptr = false;
566 *types_ptr = types & ~SD_LIST_FORW;
568 else if (types & SD_LIST_RES_BACK)
570 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
571 *resolved_p_ptr = true;
572 *types_ptr = types & ~SD_LIST_RES_BACK;
574 else if (types & SD_LIST_RES_FORW)
576 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
577 *resolved_p_ptr = true;
578 *types_ptr = types & ~SD_LIST_RES_FORW;
583 *resolved_p_ptr = false;
584 *types_ptr = SD_LIST_NONE;
588 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
590 sd_lists_size (rtx insn, sd_list_types_def list_types)
594 while (list_types != SD_LIST_NONE)
599 sd_next_list (insn, &list_types, &list, &resolved_p);
600 size += DEPS_LIST_N_LINKS (list);
606 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
608 sd_lists_empty_p (rtx insn, sd_list_types_def list_types)
610 return sd_lists_size (insn, list_types) == 0;
613 /* Initialize data for INSN. */
615 sd_init_insn (rtx insn)
617 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
618 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
619 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
620 INSN_FORW_DEPS (insn) = create_deps_list ();
621 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
623 /* ??? It would be nice to allocate dependency caches here. */
626 /* Free data for INSN. */
628 sd_finish_insn (rtx insn)
630 /* ??? It would be nice to deallocate dependency caches here. */
632 free_deps_list (INSN_HARD_BACK_DEPS (insn));
633 INSN_HARD_BACK_DEPS (insn) = NULL;
635 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
636 INSN_SPEC_BACK_DEPS (insn) = NULL;
638 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
639 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
641 free_deps_list (INSN_FORW_DEPS (insn));
642 INSN_FORW_DEPS (insn) = NULL;
644 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
645 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
648 /* Find a dependency between producer PRO and consumer CON.
649 Search through resolved dependency lists if RESOLVED_P is true.
650 If no such dependency is found return NULL,
651 overwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
652 with an iterator pointing to it. */
654 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
655 sd_iterator_def *sd_it_ptr)
657 sd_list_types_def pro_list_type;
658 sd_list_types_def con_list_type;
659 sd_iterator_def sd_it;
661 bool found_p = false;
665 pro_list_type = SD_LIST_RES_FORW;
666 con_list_type = SD_LIST_RES_BACK;
670 pro_list_type = SD_LIST_FORW;
671 con_list_type = SD_LIST_BACK;
674 /* Walk through either back list of INSN or forw list of ELEM
675 depending on which one is shorter. */
676 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
678 /* Find the dep_link with producer PRO in consumer's back_deps. */
679 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
680 if (DEP_PRO (dep) == pro)
688 /* Find the dep_link with consumer CON in producer's forw_deps. */
689 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
690 if (DEP_CON (dep) == con)
699 if (sd_it_ptr != NULL)
708 /* Find a dependency between producer PRO and consumer CON.
709 Use dependency [if available] to check if dependency is present at all.
710 Search through resolved dependency lists if RESOLVED_P is true.
711 If the dependency or NULL if none found. */
713 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
715 if (true_dependency_cache != NULL)
716 /* Avoiding the list walk below can cut compile times dramatically
719 int elem_luid = INSN_LUID (pro);
720 int insn_luid = INSN_LUID (con);
722 gcc_assert (output_dependency_cache != NULL
723 && anti_dependency_cache != NULL);
725 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
726 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
727 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
731 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
734 /* Add or update a dependence described by DEP.
735 MEM1 and MEM2, if non-null, correspond to memory locations in case of
738 The function returns a value indicating if an old entry has been changed
739 or a new entry has been added to insn's backward deps.
741 This function merely checks if producer and consumer is the same insn
742 and doesn't create a dep in this case. Actual manipulation of
743 dependence data structures is performed in add_or_update_dep_1. */
744 static enum DEPS_ADJUST_RESULT
745 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
747 rtx elem = DEP_PRO (dep);
748 rtx insn = DEP_CON (dep);
750 gcc_assert (INSN_P (insn) && INSN_P (elem));
752 /* Don't depend an insn on itself. */
755 #ifdef INSN_SCHEDULING
756 if (current_sched_info->flags & DO_SPECULATION)
757 /* INSN has an internal dependence, which we can't overcome. */
758 HAS_INTERNAL_DEP (insn) = 1;
764 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
767 #ifdef INSN_SCHEDULING
768 /* Ask dependency caches what needs to be done for dependence DEP.
769 Return DEP_CREATED if new dependence should be created and there is no
770 need to try to find one searching the dependencies lists.
771 Return DEP_PRESENT if there already is a dependence described by DEP and
772 hence nothing is to be done.
773 Return DEP_CHANGED if there already is a dependence, but it should be
774 updated to incorporate additional information from DEP. */
775 static enum DEPS_ADJUST_RESULT
776 ask_dependency_caches (dep_t dep)
778 int elem_luid = INSN_LUID (DEP_PRO (dep));
779 int insn_luid = INSN_LUID (DEP_CON (dep));
781 gcc_assert (true_dependency_cache != NULL
782 && output_dependency_cache != NULL
783 && anti_dependency_cache != NULL);
785 if (!(current_sched_info->flags & USE_DEPS_LIST))
787 enum reg_note present_dep_type;
789 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
790 present_dep_type = REG_DEP_TRUE;
791 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
792 present_dep_type = REG_DEP_OUTPUT;
793 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
794 present_dep_type = REG_DEP_ANTI;
796 /* There is no existing dep so it should be created. */
799 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
800 /* DEP does not add anything to the existing dependence. */
805 ds_t present_dep_types = 0;
807 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
808 present_dep_types |= DEP_TRUE;
809 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
810 present_dep_types |= DEP_OUTPUT;
811 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
812 present_dep_types |= DEP_ANTI;
814 if (present_dep_types == 0)
815 /* There is no existing dep so it should be created. */
818 if (!(current_sched_info->flags & DO_SPECULATION)
819 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
821 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
822 == present_dep_types)
823 /* DEP does not add anything to the existing dependence. */
828 /* Only true dependencies can be data speculative and
829 only anti dependencies can be control speculative. */
830 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
831 == present_dep_types);
833 /* if (DEP is SPECULATIVE) then
834 ..we should update DEP_STATUS
836 ..we should reset existing dep to non-speculative. */
843 /* Set dependency caches according to DEP. */
845 set_dependency_caches (dep_t dep)
847 int elem_luid = INSN_LUID (DEP_PRO (dep));
848 int insn_luid = INSN_LUID (DEP_CON (dep));
850 if (!(current_sched_info->flags & USE_DEPS_LIST))
852 switch (DEP_TYPE (dep))
855 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
859 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
863 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
872 ds_t ds = DEP_STATUS (dep);
875 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
877 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
879 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
881 if (ds & SPECULATIVE)
883 gcc_assert (current_sched_info->flags & DO_SPECULATION);
884 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
889 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
890 caches accordingly. */
892 update_dependency_caches (dep_t dep, enum reg_note old_type)
894 int elem_luid = INSN_LUID (DEP_PRO (dep));
895 int insn_luid = INSN_LUID (DEP_CON (dep));
897 /* Clear corresponding cache entry because type of the link
898 may have changed. Keep them if we use_deps_list. */
899 if (!(current_sched_info->flags & USE_DEPS_LIST))
904 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
908 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
916 set_dependency_caches (dep);
919 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
921 change_spec_dep_to_hard (sd_iterator_def sd_it)
923 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
924 dep_link_t link = DEP_NODE_BACK (node);
925 dep_t dep = DEP_NODE_DEP (node);
926 rtx elem = DEP_PRO (dep);
927 rtx insn = DEP_CON (dep);
929 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
931 DEP_STATUS (dep) &= ~SPECULATIVE;
933 if (true_dependency_cache != NULL)
934 /* Clear the cache entry. */
935 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
940 /* Update DEP to incorporate information from NEW_DEP.
941 SD_IT points to DEP in case it should be moved to another list.
942 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
943 data-speculative dependence should be updated. */
944 static enum DEPS_ADJUST_RESULT
945 update_dep (dep_t dep, dep_t new_dep,
946 sd_iterator_def sd_it, rtx mem1, rtx mem2)
948 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
949 enum reg_note old_type = DEP_TYPE (dep);
951 /* If this is a more restrictive type of dependence than the
952 existing one, then change the existing dependence to this
954 if ((int) DEP_TYPE (new_dep) < (int) old_type)
956 DEP_TYPE (dep) = DEP_TYPE (new_dep);
960 #ifdef INSN_SCHEDULING
961 if (current_sched_info->flags & USE_DEPS_LIST)
962 /* Update DEP_STATUS. */
964 ds_t dep_status = DEP_STATUS (dep);
965 ds_t ds = DEP_STATUS (new_dep);
966 ds_t new_status = ds | dep_status;
968 if (new_status & SPECULATIVE)
969 /* Either existing dep or a dep we're adding or both are
972 if (!(ds & SPECULATIVE)
973 || !(dep_status & SPECULATIVE))
974 /* The new dep can't be speculative. */
976 new_status &= ~SPECULATIVE;
978 if (dep_status & SPECULATIVE)
979 /* The old dep was speculative, but now it
981 change_spec_dep_to_hard (sd_it);
985 /* Both are speculative. Merge probabilities. */
990 dw = estimate_dep_weak (mem1, mem2);
991 ds = set_dep_weak (ds, BEGIN_DATA, dw);
994 new_status = ds_merge (dep_status, ds);
1000 if (dep_status != ds)
1002 DEP_STATUS (dep) = ds;
1007 if (true_dependency_cache != NULL
1008 && res == DEP_CHANGED)
1009 update_dependency_caches (dep, old_type);
1015 /* Add or update a dependence described by DEP.
1016 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1019 The function returns a value indicating if an old entry has been changed
1020 or a new entry has been added to insn's backward deps or nothing has
1021 been updated at all. */
1022 static enum DEPS_ADJUST_RESULT
1023 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1024 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1026 bool maybe_present_p = true;
1027 bool present_p = false;
1029 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1030 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1032 #ifdef INSN_SCHEDULING
1034 #ifdef ENABLE_CHECKING
1035 check_dep (new_dep, mem1 != NULL);
1038 if (true_dependency_cache != NULL)
1040 switch (ask_dependency_caches (new_dep))
1046 maybe_present_p = true;
1051 maybe_present_p = false;
1062 /* Check that we don't already have this dependence. */
1063 if (maybe_present_p)
1066 sd_iterator_def sd_it;
1068 gcc_assert (true_dependency_cache == NULL || present_p);
1070 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1072 resolved_p, &sd_it);
1074 if (present_dep != NULL)
1075 /* We found an existing dependency between ELEM and INSN. */
1076 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1078 /* We didn't find a dep, it shouldn't present in the cache. */
1079 gcc_assert (!present_p);
1082 /* Might want to check one level of transitivity to save conses.
1083 This check should be done in maybe_add_or_update_dep_1.
1084 Since we made it to add_or_update_dep_1, we must create
1085 (or update) a link. */
1087 if (mem1 != NULL_RTX)
1089 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1090 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1091 estimate_dep_weak (mem1, mem2));
1094 sd_add_dep (new_dep, resolved_p);
1099 /* Initialize BACK_LIST_PTR with consumer's backward list and
1100 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1101 initialize with lists that hold resolved deps. */
1103 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1104 deps_list_t *back_list_ptr,
1105 deps_list_t *forw_list_ptr)
1107 rtx con = DEP_CON (dep);
1111 if ((current_sched_info->flags & DO_SPECULATION)
1112 && (DEP_STATUS (dep) & SPECULATIVE))
1113 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1115 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1117 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1121 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1122 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1126 /* Add dependence described by DEP.
1127 If RESOLVED_P is true treat the dependence as a resolved one. */
1129 sd_add_dep (dep_t dep, bool resolved_p)
1131 dep_node_t n = create_dep_node ();
1132 deps_list_t con_back_deps;
1133 deps_list_t pro_forw_deps;
1134 rtx elem = DEP_PRO (dep);
1135 rtx insn = DEP_CON (dep);
1137 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1139 if ((current_sched_info->flags & DO_SPECULATION)
1140 && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1141 DEP_STATUS (dep) &= ~SPECULATIVE;
1143 copy_dep (DEP_NODE_DEP (n), dep);
1145 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1147 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1149 #ifdef INSN_SCHEDULING
1150 #ifdef ENABLE_CHECKING
1151 check_dep (dep, false);
1154 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1156 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1157 in the bitmap caches of dependency information. */
1158 if (true_dependency_cache != NULL)
1159 set_dependency_caches (dep);
1163 /* Add or update backward dependence between INSN and ELEM
1164 with given type DEP_TYPE and dep_status DS.
1165 This function is a convenience wrapper. */
1166 enum DEPS_ADJUST_RESULT
1167 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1169 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1172 /* Resolved dependence pointed to by SD_IT.
1173 SD_IT will advance to the next element. */
1175 sd_resolve_dep (sd_iterator_def sd_it)
1177 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1178 dep_t dep = DEP_NODE_DEP (node);
1179 rtx pro = DEP_PRO (dep);
1180 rtx con = DEP_CON (dep);
1182 if ((current_sched_info->flags & DO_SPECULATION)
1183 && (DEP_STATUS (dep) & SPECULATIVE))
1184 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1185 INSN_RESOLVED_BACK_DEPS (con));
1187 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1188 INSN_RESOLVED_BACK_DEPS (con));
1190 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1191 INSN_RESOLVED_FORW_DEPS (pro));
1194 /* Make TO depend on all the FROM's producers.
1195 If RESOLVED_P is true add dependencies to the resolved lists. */
1197 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1199 sd_list_types_def list_type;
1200 sd_iterator_def sd_it;
1203 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1205 FOR_EACH_DEP (from, list_type, sd_it, dep)
1207 dep_def _new_dep, *new_dep = &_new_dep;
1209 copy_dep (new_dep, dep);
1210 DEP_CON (new_dep) = to;
1211 sd_add_dep (new_dep, resolved_p);
1215 /* Remove a dependency referred to by SD_IT.
1216 SD_IT will point to the next dependence after removal. */
1218 sd_delete_dep (sd_iterator_def sd_it)
1220 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1221 dep_t dep = DEP_NODE_DEP (n);
1222 rtx pro = DEP_PRO (dep);
1223 rtx con = DEP_CON (dep);
1224 deps_list_t con_back_deps;
1225 deps_list_t pro_forw_deps;
1227 if (true_dependency_cache != NULL)
1229 int elem_luid = INSN_LUID (pro);
1230 int insn_luid = INSN_LUID (con);
1232 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1233 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1234 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1236 if (current_sched_info->flags & DO_SPECULATION)
1237 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1240 get_back_and_forw_lists (dep, sd_it.resolved_p,
1241 &con_back_deps, &pro_forw_deps);
1243 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1244 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1246 delete_dep_node (n);
1249 /* Dump size of the lists. */
1250 #define DUMP_LISTS_SIZE (2)
1252 /* Dump dependencies of the lists. */
1253 #define DUMP_LISTS_DEPS (4)
1255 /* Dump all information about the lists. */
1256 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1258 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1259 FLAGS is a bit mask specifying what information about the lists needs
1261 If FLAGS has the very first bit set, then dump all information about
1262 the lists and propagate this bit into the callee dump functions. */
1264 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1266 sd_iterator_def sd_it;
1273 flags |= DUMP_LISTS_ALL;
1275 fprintf (dump, "[");
1277 if (flags & DUMP_LISTS_SIZE)
1278 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1280 if (flags & DUMP_LISTS_DEPS)
1282 FOR_EACH_DEP (insn, types, sd_it, dep)
1284 dump_dep (dump, dep, dump_dep_flags | all);
1285 fprintf (dump, " ");
1290 /* Dump all information about deps_lists of INSN specified by TYPES
1293 sd_debug_lists (rtx insn, sd_list_types_def types)
1295 dump_lists (stderr, insn, types, 1);
1296 fprintf (stderr, "\n");
1299 /* A convenience wrapper to operate on an entire list. */
1302 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1304 for (; list; list = XEXP (list, 1))
1306 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1307 add_dependence (insn, XEXP (list, 0), dep_type);
1311 /* Similar, but free *LISTP at the same time. */
1314 add_dependence_list_and_free (rtx insn, rtx *listp, int uncond,
1315 enum reg_note dep_type)
1318 for (list = *listp, *listp = NULL; list ; list = next)
1320 next = XEXP (list, 1);
1321 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1322 add_dependence (insn, XEXP (list, 0), dep_type);
1323 free_INSN_LIST_node (list);
1327 /* Clear all dependencies for an insn. */
1329 delete_all_dependences (rtx insn)
1331 sd_iterator_def sd_it;
1334 /* The below cycle can be optimized to clear the caches and back_deps
1335 in one call but that would provoke duplication of code from
1338 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1339 sd_iterator_cond (&sd_it, &dep);)
1340 sd_delete_dep (sd_it);
1343 /* All insns in a scheduling group except the first should only have
1344 dependencies on the previous insn in the group. So we find the
1345 first instruction in the scheduling group by walking the dependence
1346 chains backwards. Then we add the dependencies for the group to
1347 the previous nonnote insn. */
1350 fixup_sched_groups (rtx insn)
1352 sd_iterator_def sd_it;
1356 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1359 rtx pro = DEP_PRO (dep);
1363 i = prev_nonnote_insn (i);
1367 } while (SCHED_GROUP_P (i));
1369 if (! sched_insns_conditions_mutex_p (i, pro))
1370 add_dependence (i, pro, DEP_TYPE (dep));
1374 delete_all_dependences (insn);
1376 prev_nonnote = prev_nonnote_insn (insn);
1377 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1378 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1379 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1382 /* Process an insn's memory dependencies. There are four kinds of
1385 (0) read dependence: read follows read
1386 (1) true dependence: read follows write
1387 (2) output dependence: write follows write
1388 (3) anti dependence: write follows read
1390 We are careful to build only dependencies which actually exist, and
1391 use transitivity to avoid building too many links. */
1393 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1394 The MEM is a memory reference contained within INSN, which we are saving
1395 so that we can do memory aliasing on it. */
1398 add_insn_mem_dependence (struct deps *deps, bool read_p,
1407 insn_list = &deps->pending_read_insns;
1408 mem_list = &deps->pending_read_mems;
1409 deps->pending_read_list_length++;
1413 insn_list = &deps->pending_write_insns;
1414 mem_list = &deps->pending_write_mems;
1415 deps->pending_write_list_length++;
1418 link = alloc_INSN_LIST (insn, *insn_list);
1421 if (current_sched_info->use_cselib)
1423 mem = shallow_copy_rtx (mem);
1424 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1426 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1430 /* Make a dependency between every memory reference on the pending lists
1431 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1432 dependencies for a read operation, similarly with FOR_WRITE. */
1435 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
1440 add_dependence_list_and_free (insn, &deps->pending_read_insns, 1,
1442 free_EXPR_LIST_list (&deps->pending_read_mems);
1443 deps->pending_read_list_length = 0;
1446 add_dependence_list_and_free (insn, &deps->pending_write_insns, 1,
1447 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1448 free_EXPR_LIST_list (&deps->pending_write_mems);
1449 deps->pending_write_list_length = 0;
1451 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, 1,
1452 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1453 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1454 deps->pending_flush_length = 1;
1457 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
1458 The type of the reference is specified by REF and can be SET,
1459 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
1462 sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
1463 enum rtx_code ref, rtx insn)
1465 /* A hard reg in a wide mode may really be multiple registers.
1466 If so, mark all of them just like the first. */
1467 if (regno < FIRST_PSEUDO_REGISTER)
1469 int i = hard_regno_nregs[regno][mode];
1473 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
1475 else if (ref == USE)
1478 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
1483 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
1487 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
1488 it does not reload. Ignore these as they have served their
1490 else if (regno >= deps->max_reg)
1492 enum rtx_code code = GET_CODE (PATTERN (insn));
1493 gcc_assert (code == USE || code == CLOBBER);
1499 SET_REGNO_REG_SET (reg_pending_sets, regno);
1500 else if (ref == USE)
1501 SET_REGNO_REG_SET (reg_pending_uses, regno);
1503 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1505 /* Pseudos that are REG_EQUIV to something may be replaced
1506 by that during reloading. We need only add dependencies for
1507 the address in the REG_EQUIV note. */
1508 if (!reload_completed && get_reg_known_equiv_p (regno))
1510 rtx t = get_reg_known_value (regno);
1512 sched_analyze_2 (deps, XEXP (t, 0), insn);
1515 /* Don't let it cross a call after scheduling if it doesn't
1516 already cross one. */
1517 if (REG_N_CALLS_CROSSED (regno) == 0)
1520 deps->sched_before_next_call
1521 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
1523 add_dependence_list (insn, deps->last_function_call, 1,
1529 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
1530 rtx, X, creating all dependencies generated by the write to the
1531 destination of X, and reads of everything mentioned. */
1534 sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
1536 rtx dest = XEXP (x, 0);
1537 enum rtx_code code = GET_CODE (x);
1542 if (GET_CODE (dest) == PARALLEL)
1546 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1547 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1548 sched_analyze_1 (deps,
1549 gen_rtx_CLOBBER (VOIDmode,
1550 XEXP (XVECEXP (dest, 0, i), 0)),
1553 if (GET_CODE (x) == SET)
1554 sched_analyze_2 (deps, SET_SRC (x), insn);
1558 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1559 || GET_CODE (dest) == ZERO_EXTRACT)
1561 if (GET_CODE (dest) == STRICT_LOW_PART
1562 || GET_CODE (dest) == ZERO_EXTRACT
1563 || df_read_modify_subreg_p (dest))
1565 /* These both read and modify the result. We must handle
1566 them as writes to get proper dependencies for following
1567 instructions. We must handle them as reads to get proper
1568 dependencies from this to previous instructions.
1569 Thus we need to call sched_analyze_2. */
1571 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1573 if (GET_CODE (dest) == ZERO_EXTRACT)
1575 /* The second and third arguments are values read by this insn. */
1576 sched_analyze_2 (deps, XEXP (dest, 1), insn);
1577 sched_analyze_2 (deps, XEXP (dest, 2), insn);
1579 dest = XEXP (dest, 0);
1584 int regno = REGNO (dest);
1585 enum machine_mode mode = GET_MODE (dest);
1587 sched_analyze_reg (deps, regno, mode, code, insn);
1590 /* Treat all writes to a stack register as modifying the TOS. */
1591 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1593 /* Avoid analyzing the same register twice. */
1594 if (regno != FIRST_STACK_REG)
1595 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
1596 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
1600 else if (MEM_P (dest))
1602 /* Writing memory. */
1605 if (current_sched_info->use_cselib)
1607 t = shallow_copy_rtx (dest);
1608 cselib_lookup (XEXP (t, 0), Pmode, 1);
1609 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1613 if ((deps->pending_read_list_length + deps->pending_write_list_length)
1614 > MAX_PENDING_LIST_LENGTH)
1616 /* Flush all pending reads and writes to prevent the pending lists
1617 from getting any larger. Insn scheduling runs too slowly when
1618 these lists get long. When compiling GCC with itself,
1619 this flush occurs 8 times for sparc, and 10 times for m88k using
1620 the default value of 32. */
1621 flush_pending_lists (deps, insn, false, true);
1625 rtx pending, pending_mem;
1627 pending = deps->pending_read_insns;
1628 pending_mem = deps->pending_read_mems;
1631 if (anti_dependence (XEXP (pending_mem, 0), t)
1632 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1633 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1635 pending = XEXP (pending, 1);
1636 pending_mem = XEXP (pending_mem, 1);
1639 pending = deps->pending_write_insns;
1640 pending_mem = deps->pending_write_mems;
1643 if (output_dependence (XEXP (pending_mem, 0), t)
1644 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1645 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1647 pending = XEXP (pending, 1);
1648 pending_mem = XEXP (pending_mem, 1);
1651 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1654 add_insn_mem_dependence (deps, false, insn, dest);
1656 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1659 /* Analyze reads. */
1660 if (GET_CODE (x) == SET)
1661 sched_analyze_2 (deps, SET_SRC (x), insn);
1664 /* Analyze the uses of memory and registers in rtx X in INSN. */
1667 sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
1677 code = GET_CODE (x);
1687 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1688 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1689 this does not mean that this insn is using cc0. */
1694 /* User of CC0 depends on immediately preceding insn. */
1695 SCHED_GROUP_P (insn) = 1;
1696 /* Don't move CC0 setter to another block (it can set up the
1697 same flag for previous CC0 users which is safe). */
1698 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
1704 int regno = REGNO (x);
1705 enum machine_mode mode = GET_MODE (x);
1707 sched_analyze_reg (deps, regno, mode, USE, insn);
1710 /* Treat all reads of a stack register as modifying the TOS. */
1711 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1713 /* Avoid analyzing the same register twice. */
1714 if (regno != FIRST_STACK_REG)
1715 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
1716 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
1724 /* Reading memory. */
1726 rtx pending, pending_mem;
1729 if (current_sched_info->use_cselib)
1731 t = shallow_copy_rtx (t);
1732 cselib_lookup (XEXP (t, 0), Pmode, 1);
1733 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1736 pending = deps->pending_read_insns;
1737 pending_mem = deps->pending_read_mems;
1740 if (read_dependence (XEXP (pending_mem, 0), t)
1741 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1742 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1744 pending = XEXP (pending, 1);
1745 pending_mem = XEXP (pending_mem, 1);
1748 pending = deps->pending_write_insns;
1749 pending_mem = deps->pending_write_mems;
1752 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
1754 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1756 if ((current_sched_info->flags & DO_SPECULATION)
1757 && (spec_info->mask & BEGIN_DATA))
1758 /* Create a data-speculative dependence between producer
1761 dep_def _dep, *dep = &_dep;
1763 init_dep_1 (dep, XEXP (pending, 0), insn, REG_DEP_TRUE,
1764 BEGIN_DATA | DEP_TRUE);
1766 maybe_add_or_update_dep_1 (dep, false,
1767 XEXP (pending_mem, 0), t);
1770 add_dependence (insn, XEXP (pending, 0), REG_DEP_TRUE);
1773 pending = XEXP (pending, 1);
1774 pending_mem = XEXP (pending_mem, 1);
1777 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1778 if (! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
1779 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1781 /* Always add these dependencies to pending_reads, since
1782 this insn may be followed by a write. */
1783 add_insn_mem_dependence (deps, true, insn, x);
1785 /* Take advantage of tail recursion here. */
1786 sched_analyze_2 (deps, XEXP (x, 0), insn);
1790 /* Force pending stores to memory in case a trap handler needs them. */
1792 flush_pending_lists (deps, insn, true, false);
1797 case UNSPEC_VOLATILE:
1799 /* Traditional and volatile asm instructions must be considered to use
1800 and clobber all hard registers, all pseudo-registers and all of
1801 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1803 Consider for instance a volatile asm that changes the fpu rounding
1804 mode. An insn should not be moved across this even if it only uses
1805 pseudo-regs because it might give an incorrectly rounded result. */
1806 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1807 reg_pending_barrier = TRUE_BARRIER;
1809 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1810 We can not just fall through here since then we would be confused
1811 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1812 traditional asms unlike their normal usage. */
1814 if (code == ASM_OPERANDS)
1816 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1817 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
1827 /* These both read and modify the result. We must handle them as writes
1828 to get proper dependencies for following instructions. We must handle
1829 them as reads to get proper dependencies from this to previous
1830 instructions. Thus we need to pass them to both sched_analyze_1
1831 and sched_analyze_2. We must call sched_analyze_2 first in order
1832 to get the proper antecedent for the read. */
1833 sched_analyze_2 (deps, XEXP (x, 0), insn);
1834 sched_analyze_1 (deps, x, insn);
1839 /* op0 = op0 + op1 */
1840 sched_analyze_2 (deps, XEXP (x, 0), insn);
1841 sched_analyze_2 (deps, XEXP (x, 1), insn);
1842 sched_analyze_1 (deps, x, insn);
1849 /* Other cases: walk the insn. */
1850 fmt = GET_RTX_FORMAT (code);
1851 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1854 sched_analyze_2 (deps, XEXP (x, i), insn);
1855 else if (fmt[i] == 'E')
1856 for (j = 0; j < XVECLEN (x, i); j++)
1857 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
1861 /* Analyze an INSN with pattern X to find all dependencies. */
1864 sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
1866 RTX_CODE code = GET_CODE (x);
1869 reg_set_iterator rsi;
1871 if (code == COND_EXEC)
1873 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
1875 /* ??? Should be recording conditions so we reduce the number of
1876 false dependencies. */
1877 x = COND_EXEC_CODE (x);
1878 code = GET_CODE (x);
1880 if (code == SET || code == CLOBBER)
1882 sched_analyze_1 (deps, x, insn);
1884 /* Bare clobber insns are used for letting life analysis, reg-stack
1885 and others know that a value is dead. Depend on the last call
1886 instruction so that reg-stack won't get confused. */
1887 if (code == CLOBBER)
1888 add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
1890 else if (code == PARALLEL)
1892 for (i = XVECLEN (x, 0); i--;)
1894 rtx sub = XVECEXP (x, 0, i);
1895 code = GET_CODE (sub);
1897 if (code == COND_EXEC)
1899 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1900 sub = COND_EXEC_CODE (sub);
1901 code = GET_CODE (sub);
1903 if (code == SET || code == CLOBBER)
1904 sched_analyze_1 (deps, sub, insn);
1906 sched_analyze_2 (deps, sub, insn);
1910 sched_analyze_2 (deps, x, insn);
1912 /* Mark registers CLOBBERED or used by called function. */
1915 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1917 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1918 sched_analyze_1 (deps, XEXP (link, 0), insn);
1920 sched_analyze_2 (deps, XEXP (link, 0), insn);
1922 if (find_reg_note (insn, REG_SETJMP, NULL))
1923 reg_pending_barrier = MOVE_BARRIER;
1929 next = next_nonnote_insn (insn);
1930 if (next && BARRIER_P (next))
1931 reg_pending_barrier = TRUE_BARRIER;
1934 rtx pending, pending_mem;
1935 regset_head tmp_uses, tmp_sets;
1936 INIT_REG_SET (&tmp_uses);
1937 INIT_REG_SET (&tmp_sets);
1939 (*current_sched_info->compute_jump_reg_dependencies)
1940 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
1941 /* Make latency of jump equal to 0 by using anti-dependence. */
1942 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
1944 struct deps_reg *reg_last = &deps->reg_last[i];
1945 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
1946 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
1947 reg_last->uses_length++;
1948 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1950 IOR_REG_SET (reg_pending_sets, &tmp_sets);
1952 CLEAR_REG_SET (&tmp_uses);
1953 CLEAR_REG_SET (&tmp_sets);
1955 /* All memory writes and volatile reads must happen before the
1956 jump. Non-volatile reads must happen before the jump iff
1957 the result is needed by the above register used mask. */
1959 pending = deps->pending_write_insns;
1960 pending_mem = deps->pending_write_mems;
1963 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1964 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1965 pending = XEXP (pending, 1);
1966 pending_mem = XEXP (pending_mem, 1);
1969 pending = deps->pending_read_insns;
1970 pending_mem = deps->pending_read_mems;
1973 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
1974 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1975 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1976 pending = XEXP (pending, 1);
1977 pending_mem = XEXP (pending_mem, 1);
1980 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1985 /* If this instruction can throw an exception, then moving it changes
1986 where block boundaries fall. This is mighty confusing elsewhere.
1987 Therefore, prevent such an instruction from being moved. Same for
1988 non-jump instructions that define block boundaries.
1989 ??? Unclear whether this is still necessary in EBB mode. If not,
1990 add_branch_dependences should be adjusted for RGN mode instead. */
1991 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
1992 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
1993 reg_pending_barrier = MOVE_BARRIER;
1995 /* Add dependencies if a scheduling barrier was found. */
1996 if (reg_pending_barrier)
1998 /* In the case of barrier the most added dependencies are not
1999 real, so we use anti-dependence here. */
2000 if (sched_get_condition (insn))
2002 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2004 struct deps_reg *reg_last = &deps->reg_last[i];
2005 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2007 (insn, reg_last->sets, 0,
2008 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2010 (insn, reg_last->clobbers, 0,
2011 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2016 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2018 struct deps_reg *reg_last = &deps->reg_last[i];
2019 add_dependence_list_and_free (insn, ®_last->uses, 0,
2021 add_dependence_list_and_free
2022 (insn, ®_last->sets, 0,
2023 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2024 add_dependence_list_and_free
2025 (insn, ®_last->clobbers, 0,
2026 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2027 reg_last->uses_length = 0;
2028 reg_last->clobbers_length = 0;
2032 for (i = 0; i < (unsigned)deps->max_reg; i++)
2034 struct deps_reg *reg_last = &deps->reg_last[i];
2035 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2036 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2039 flush_pending_lists (deps, insn, true, true);
2040 CLEAR_REG_SET (&deps->reg_conditional_sets);
2041 reg_pending_barrier = NOT_A_BARRIER;
2045 /* If the current insn is conditional, we can't free any
2047 if (sched_get_condition (insn))
2049 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2051 struct deps_reg *reg_last = &deps->reg_last[i];
2052 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2053 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2054 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2055 reg_last->uses_length++;
2057 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2059 struct deps_reg *reg_last = &deps->reg_last[i];
2060 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2061 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2062 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
2063 reg_last->clobbers_length++;
2065 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2067 struct deps_reg *reg_last = &deps->reg_last[i];
2068 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2069 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2070 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2071 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2072 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2077 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2079 struct deps_reg *reg_last = &deps->reg_last[i];
2080 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2081 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2082 reg_last->uses_length++;
2083 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2085 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2087 struct deps_reg *reg_last = &deps->reg_last[i];
2088 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2089 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2091 add_dependence_list_and_free (insn, ®_last->sets, 0,
2093 add_dependence_list_and_free (insn, ®_last->uses, 0,
2095 add_dependence_list_and_free (insn, ®_last->clobbers, 0,
2097 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2098 reg_last->clobbers_length = 0;
2099 reg_last->uses_length = 0;
2103 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2104 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2106 reg_last->clobbers_length++;
2107 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
2109 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2111 struct deps_reg *reg_last = &deps->reg_last[i];
2112 add_dependence_list_and_free (insn, ®_last->sets, 0,
2114 add_dependence_list_and_free (insn, ®_last->clobbers, 0,
2116 add_dependence_list_and_free (insn, ®_last->uses, 0,
2118 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2119 reg_last->uses_length = 0;
2120 reg_last->clobbers_length = 0;
2121 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2125 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2126 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2127 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2129 CLEAR_REG_SET (reg_pending_uses);
2130 CLEAR_REG_SET (reg_pending_clobbers);
2131 CLEAR_REG_SET (reg_pending_sets);
2133 /* If we are currently in a libcall scheduling group, then mark the
2134 current insn as being in a scheduling group and that it can not
2135 be moved into a different basic block. */
2137 if (deps->libcall_block_tail_insn)
2139 SCHED_GROUP_P (insn) = 1;
2140 CANT_MOVE (insn) = 1;
2143 /* If a post-call group is still open, see if it should remain so.
2144 This insn must be a simple move of a hard reg to a pseudo or
2147 We must avoid moving these insns for correctness on
2148 SMALL_REGISTER_CLASS machines, and for special registers like
2149 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
2150 hard regs for all targets. */
2152 if (deps->in_post_call_group_p)
2154 rtx tmp, set = single_set (insn);
2155 int src_regno, dest_regno;
2158 goto end_call_group;
2160 tmp = SET_DEST (set);
2161 if (GET_CODE (tmp) == SUBREG)
2162 tmp = SUBREG_REG (tmp);
2164 dest_regno = REGNO (tmp);
2166 goto end_call_group;
2168 tmp = SET_SRC (set);
2169 if (GET_CODE (tmp) == SUBREG)
2170 tmp = SUBREG_REG (tmp);
2171 if ((GET_CODE (tmp) == PLUS
2172 || GET_CODE (tmp) == MINUS)
2173 && REG_P (XEXP (tmp, 0))
2174 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
2175 && dest_regno == STACK_POINTER_REGNUM)
2176 src_regno = STACK_POINTER_REGNUM;
2177 else if (REG_P (tmp))
2178 src_regno = REGNO (tmp);
2180 goto end_call_group;
2182 if (src_regno < FIRST_PSEUDO_REGISTER
2183 || dest_regno < FIRST_PSEUDO_REGISTER)
2185 if (deps->in_post_call_group_p == post_call_initial)
2186 deps->in_post_call_group_p = post_call;
2188 SCHED_GROUP_P (insn) = 1;
2189 CANT_MOVE (insn) = 1;
2194 deps->in_post_call_group_p = not_post_call;
2198 /* Fixup the dependencies in the sched group. */
2199 if (SCHED_GROUP_P (insn))
2200 fixup_sched_groups (insn);
2202 if ((current_sched_info->flags & DO_SPECULATION)
2203 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
2204 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
2207 sd_iterator_def sd_it;
2210 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
2211 sd_iterator_cond (&sd_it, &dep);)
2212 change_spec_dep_to_hard (sd_it);
2216 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
2217 dependencies for each insn. */
2220 sched_analyze (struct deps *deps, rtx head, rtx tail)
2224 if (current_sched_info->use_cselib)
2227 /* Before reload, if the previous block ended in a call, show that
2228 we are inside a post-call group, so as to keep the lifetimes of
2229 hard registers correct. */
2230 if (! reload_completed && !LABEL_P (head))
2232 insn = prev_nonnote_insn (head);
2233 if (insn && CALL_P (insn))
2234 deps->in_post_call_group_p = post_call_initial;
2236 for (insn = head;; insn = NEXT_INSN (insn))
2238 rtx link, end_seq, r0, set;
2242 /* And initialize deps_lists. */
2243 sd_init_insn (insn);
2246 if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
2248 /* Make each JUMP_INSN a scheduling barrier for memory
2252 /* Keep the list a reasonable size. */
2253 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
2254 flush_pending_lists (deps, insn, true, true);
2256 deps->last_pending_memory_flush
2257 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
2259 sched_analyze_insn (deps, PATTERN (insn), insn);
2261 else if (CALL_P (insn))
2265 CANT_MOVE (insn) = 1;
2267 if (find_reg_note (insn, REG_SETJMP, NULL))
2269 /* This is setjmp. Assume that all registers, not just
2270 hard registers, may be clobbered by this call. */
2271 reg_pending_barrier = MOVE_BARRIER;
2275 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2276 /* A call may read and modify global register variables. */
2279 SET_REGNO_REG_SET (reg_pending_sets, i);
2280 SET_REGNO_REG_SET (reg_pending_uses, i);
2282 /* Other call-clobbered hard regs may be clobbered.
2283 Since we only have a choice between 'might be clobbered'
2284 and 'definitely not clobbered', we must include all
2285 partly call-clobbered registers here. */
2286 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
2287 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
2288 SET_REGNO_REG_SET (reg_pending_clobbers, i);
2289 /* We don't know what set of fixed registers might be used
2290 by the function, but it is certain that the stack pointer
2291 is among them, but be conservative. */
2292 else if (fixed_regs[i])
2293 SET_REGNO_REG_SET (reg_pending_uses, i);
2294 /* The frame pointer is normally not used by the function
2295 itself, but by the debugger. */
2296 /* ??? MIPS o32 is an exception. It uses the frame pointer
2297 in the macro expansion of jal but does not represent this
2298 fact in the call_insn rtl. */
2299 else if (i == FRAME_POINTER_REGNUM
2300 || (i == HARD_FRAME_POINTER_REGNUM
2301 && (! reload_completed || frame_pointer_needed)))
2302 SET_REGNO_REG_SET (reg_pending_uses, i);
2305 /* For each insn which shouldn't cross a call, add a dependence
2306 between that insn and this call insn. */
2307 add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
2310 sched_analyze_insn (deps, PATTERN (insn), insn);
2312 /* In the absence of interprocedural alias analysis, we must flush
2313 all pending reads and writes, and start new dependencies starting
2314 from here. But only flush writes for constant calls (which may
2315 be passed a pointer to something we haven't written yet). */
2316 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
2318 /* Remember the last function call for limiting lifetimes. */
2319 free_INSN_LIST_list (&deps->last_function_call);
2320 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
2322 /* Before reload, begin a post-call group, so as to keep the
2323 lifetimes of hard registers correct. */
2324 if (! reload_completed)
2325 deps->in_post_call_group_p = post_call;
2328 /* EH_REGION insn notes can not appear until well after we complete
2331 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
2332 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
2334 if (current_sched_info->use_cselib)
2335 cselib_process_insn (insn);
2337 /* Now that we have completed handling INSN, check and see if it is
2338 a CLOBBER beginning a libcall block. If it is, record the
2339 end of the libcall sequence.
2341 We want to schedule libcall blocks as a unit before reload. While
2342 this restricts scheduling, it preserves the meaning of a libcall
2345 As a side effect, we may get better code due to decreased register
2346 pressure as well as less chance of a foreign insn appearing in
2348 if (!reload_completed
2349 /* Note we may have nested libcall sequences. We only care about
2350 the outermost libcall sequence. */
2351 && deps->libcall_block_tail_insn == 0
2352 /* The sequence must start with a clobber of a register. */
2353 && NONJUMP_INSN_P (insn)
2354 && GET_CODE (PATTERN (insn)) == CLOBBER
2355 && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
2356 && REG_P (XEXP (PATTERN (insn), 0))
2357 /* The CLOBBER must also have a REG_LIBCALL note attached. */
2358 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
2359 && (end_seq = XEXP (link, 0)) != 0
2360 /* The insn referenced by the REG_LIBCALL note must be a
2361 simple nop copy with the same destination as the register
2362 mentioned in the clobber. */
2363 && (set = single_set (end_seq)) != 0
2364 && SET_DEST (set) == r0 && SET_SRC (set) == r0
2365 /* And finally the insn referenced by the REG_LIBCALL must
2366 also contain a REG_EQUAL note and a REG_RETVAL note. */
2367 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
2368 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
2369 deps->libcall_block_tail_insn = XEXP (link, 0);
2371 /* If we have reached the end of a libcall block, then close the
2373 if (deps->libcall_block_tail_insn == insn)
2374 deps->libcall_block_tail_insn = 0;
2378 if (current_sched_info->use_cselib)
2386 /* Helper for sched_free_deps ().
2387 Delete INSN's (RESOLVED_P) backward dependencies. */
2389 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
2391 sd_iterator_def sd_it;
2393 sd_list_types_def types;
2396 types = SD_LIST_RES_BACK;
2398 types = SD_LIST_BACK;
2400 for (sd_it = sd_iterator_start (insn, types);
2401 sd_iterator_cond (&sd_it, &dep);)
2403 dep_link_t link = *sd_it.linkp;
2404 dep_node_t node = DEP_LINK_NODE (link);
2405 deps_list_t back_list;
2406 deps_list_t forw_list;
2408 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
2409 remove_from_deps_list (link, back_list);
2410 delete_dep_node (node);
2414 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
2417 sched_free_deps (rtx head, rtx tail, bool resolved_p)
2420 rtx next_tail = NEXT_INSN (tail);
2422 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2423 if (INSN_P (insn) && INSN_LUID (insn) > 0)
2425 /* Clear resolved back deps together with its dep_nodes. */
2426 delete_dep_nodes_in_back_deps (insn, resolved_p);
2428 /* Clear forward deps and leave the dep_nodes to the
2429 corresponding back_deps list. */
2431 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
2433 clear_deps_list (INSN_FORW_DEPS (insn));
2435 sd_finish_insn (insn);
2439 /* Initialize variables for region data dependence analysis.
2440 n_bbs is the number of region blocks. */
2443 init_deps (struct deps *deps)
2445 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
2447 deps->max_reg = max_reg;
2448 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
2449 INIT_REG_SET (&deps->reg_last_in_use);
2450 INIT_REG_SET (&deps->reg_conditional_sets);
2452 deps->pending_read_insns = 0;
2453 deps->pending_read_mems = 0;
2454 deps->pending_write_insns = 0;
2455 deps->pending_write_mems = 0;
2456 deps->pending_read_list_length = 0;
2457 deps->pending_write_list_length = 0;
2458 deps->pending_flush_length = 0;
2459 deps->last_pending_memory_flush = 0;
2460 deps->last_function_call = 0;
2461 deps->sched_before_next_call = 0;
2462 deps->in_post_call_group_p = not_post_call;
2463 deps->libcall_block_tail_insn = 0;
2466 /* Free insn lists found in DEPS. */
2469 free_deps (struct deps *deps)
2472 reg_set_iterator rsi;
2474 free_INSN_LIST_list (&deps->pending_read_insns);
2475 free_EXPR_LIST_list (&deps->pending_read_mems);
2476 free_INSN_LIST_list (&deps->pending_write_insns);
2477 free_EXPR_LIST_list (&deps->pending_write_mems);
2478 free_INSN_LIST_list (&deps->last_pending_memory_flush);
2480 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
2481 times. For a testcase with 42000 regs and 8000 small basic blocks,
2482 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
2483 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2485 struct deps_reg *reg_last = &deps->reg_last[i];
2487 free_INSN_LIST_list (®_last->uses);
2489 free_INSN_LIST_list (®_last->sets);
2490 if (reg_last->clobbers)
2491 free_INSN_LIST_list (®_last->clobbers);
2493 CLEAR_REG_SET (&deps->reg_last_in_use);
2494 CLEAR_REG_SET (&deps->reg_conditional_sets);
2496 free (deps->reg_last);
2499 /* If it is profitable to use them, initialize caches for tracking
2500 dependency information. LUID is the number of insns to be scheduled,
2501 it is used in the estimate of profitability. */
2504 init_dependency_caches (int luid)
2506 /* Average number of insns in the basic block.
2507 '+ 1' is used to make it nonzero. */
2508 int insns_in_block = luid / n_basic_blocks + 1;
2510 /* ?!? We could save some memory by computing a per-region luid mapping
2511 which could reduce both the number of vectors in the cache and the size
2512 of each vector. Instead we just avoid the cache entirely unless the
2513 average number of instructions in a basic block is very high. See
2514 the comment before the declaration of true_dependency_cache for
2515 what we consider "very high". */
2516 if (insns_in_block > 100 * 5)
2519 extend_dependency_caches (luid, true);
2522 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
2523 /* Allocate lists for one block at a time. */
2526 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
2527 /* Allocate nodes for one block at a time.
2528 We assume that average insn has
2530 5 * insns_in_block);
2533 /* Create or extend (depending on CREATE_P) dependency caches to
2536 extend_dependency_caches (int n, bool create_p)
2538 if (create_p || true_dependency_cache)
2540 int i, luid = cache_size + n;
2542 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
2544 output_dependency_cache = XRESIZEVEC (bitmap_head,
2545 output_dependency_cache, luid);
2546 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
2549 if (current_sched_info->flags & DO_SPECULATION)
2550 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
2553 for (i = cache_size; i < luid; i++)
2555 bitmap_initialize (&true_dependency_cache[i], 0);
2556 bitmap_initialize (&output_dependency_cache[i], 0);
2557 bitmap_initialize (&anti_dependency_cache[i], 0);
2559 if (current_sched_info->flags & DO_SPECULATION)
2560 bitmap_initialize (&spec_dependency_cache[i], 0);
2566 /* Free the caches allocated in init_dependency_caches. */
2569 free_dependency_caches (void)
2571 gcc_assert (deps_pools_are_empty_p ());
2572 free_alloc_pool_if_empty (&dn_pool);
2573 free_alloc_pool_if_empty (&dl_pool);
2574 gcc_assert (dn_pool == NULL && dl_pool == NULL);
2576 if (true_dependency_cache)
2580 for (i = 0; i < cache_size; i++)
2582 bitmap_clear (&true_dependency_cache[i]);
2583 bitmap_clear (&output_dependency_cache[i]);
2584 bitmap_clear (&anti_dependency_cache[i]);
2586 if (current_sched_info->flags & DO_SPECULATION)
2587 bitmap_clear (&spec_dependency_cache[i]);
2589 free (true_dependency_cache);
2590 true_dependency_cache = NULL;
2591 free (output_dependency_cache);
2592 output_dependency_cache = NULL;
2593 free (anti_dependency_cache);
2594 anti_dependency_cache = NULL;
2596 if (current_sched_info->flags & DO_SPECULATION)
2598 free (spec_dependency_cache);
2599 spec_dependency_cache = NULL;
2604 /* Initialize some global variables needed by the dependency analysis
2608 init_deps_global (void)
2610 reg_pending_sets = ALLOC_REG_SET (®_obstack);
2611 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
2612 reg_pending_uses = ALLOC_REG_SET (®_obstack);
2613 reg_pending_barrier = NOT_A_BARRIER;
2616 /* Free everything used by the dependency analysis code. */
2619 finish_deps_global (void)
2621 FREE_REG_SET (reg_pending_sets);
2622 FREE_REG_SET (reg_pending_clobbers);
2623 FREE_REG_SET (reg_pending_uses);
2626 /* Estimate the weakness of dependence between MEM1 and MEM2. */
2628 estimate_dep_weak (rtx mem1, rtx mem2)
2633 /* MEMs are the same - don't speculate. */
2634 return MIN_DEP_WEAK;
2636 r1 = XEXP (mem1, 0);
2637 r2 = XEXP (mem2, 0);
2640 || (REG_P (r1) && REG_P (r2)
2641 && REGNO (r1) == REGNO (r2)))
2642 /* Again, MEMs are the same. */
2643 return MIN_DEP_WEAK;
2644 else if ((REG_P (r1) && !REG_P (r2))
2645 || (!REG_P (r1) && REG_P (r2)))
2646 /* Different addressing modes - reason to be more speculative,
2648 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
2650 /* We can't say anything about the dependence. */
2651 return UNCERTAIN_DEP_WEAK;
2654 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
2655 This function can handle same INSN and ELEM (INSN == ELEM).
2656 It is a convenience wrapper. */
2658 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
2660 dep_def _dep, *dep = &_dep;
2662 init_dep (dep, elem, insn, dep_type);
2663 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
2666 /* Return weakness of speculative type TYPE in the dep_status DS. */
2668 get_dep_weak_1 (ds_t ds, ds_t type)
2673 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
2674 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
2675 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
2676 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
2677 default: gcc_unreachable ();
2683 /* Return weakness of speculative type TYPE in the dep_status DS. */
2685 get_dep_weak (ds_t ds, ds_t type)
2687 dw_t dw = get_dep_weak_1 (ds, type);
2689 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
2694 /* Return the dep_status, which has the same parameters as DS, except for
2695 speculative type TYPE, that will have weakness DW. */
2697 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
2699 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
2704 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
2705 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
2706 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
2707 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
2708 default: gcc_unreachable ();
2713 /* Return the join of two dep_statuses DS1 and DS2. */
2715 ds_merge (ds_t ds1, ds_t ds2)
2719 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
2721 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
2723 t = FIRST_SPEC_TYPE;
2726 if ((ds1 & t) && !(ds2 & t))
2728 else if (!(ds1 & t) && (ds2 & t))
2730 else if ((ds1 & t) && (ds2 & t))
2734 dw = ((ds_t) get_dep_weak (ds1, t)) * ((ds_t) get_dep_weak (ds2, t));
2736 if (dw < MIN_DEP_WEAK)
2739 ds = set_dep_weak (ds, t, (dw_t) dw);
2742 if (t == LAST_SPEC_TYPE)
2744 t <<= SPEC_TYPE_SHIFT;
2751 /* Dump information about the dependence status S. */
2753 dump_ds (FILE *f, ds_t s)
2758 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
2760 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
2761 if (s & BEGIN_CONTROL)
2762 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
2763 if (s & BE_IN_CONTROL)
2764 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
2767 fprintf (f, "HARD_DEP; ");
2770 fprintf (f, "DEP_TRUE; ");
2772 fprintf (f, "DEP_ANTI; ");
2774 fprintf (f, "DEP_OUTPUT; ");
2782 dump_ds (stderr, s);
2783 fprintf (stderr, "\n");
2786 #ifdef INSN_SCHEDULING
2787 #ifdef ENABLE_CHECKING
2788 /* Verify that dependence type and status are consistent.
2789 If RELAXED_P is true, then skip dep_weakness checks. */
2791 check_dep (dep_t dep, bool relaxed_p)
2793 enum reg_note dt = DEP_TYPE (dep);
2794 ds_t ds = DEP_STATUS (dep);
2796 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
2798 if (!(current_sched_info->flags & USE_DEPS_LIST))
2800 gcc_assert (ds == -1);
2804 /* Check that dependence type contains the same bits as the status. */
2805 if (dt == REG_DEP_TRUE)
2806 gcc_assert (ds & DEP_TRUE);
2807 else if (dt == REG_DEP_OUTPUT)
2808 gcc_assert ((ds & DEP_OUTPUT)
2809 && !(ds & DEP_TRUE));
2811 gcc_assert ((dt == REG_DEP_ANTI)
2813 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
2815 /* HARD_DEP can not appear in dep_status of a link. */
2816 gcc_assert (!(ds & HARD_DEP));
2818 /* Check that dependence status is set correctly when speculation is not
2820 if (!(current_sched_info->flags & DO_SPECULATION))
2821 gcc_assert (!(ds & SPECULATIVE));
2822 else if (ds & SPECULATIVE)
2826 ds_t type = FIRST_SPEC_TYPE;
2828 /* Check that dependence weakness is in proper range. */
2832 get_dep_weak (ds, type);
2834 if (type == LAST_SPEC_TYPE)
2836 type <<= SPEC_TYPE_SHIFT;
2841 if (ds & BEGIN_SPEC)
2843 /* Only true dependence can be data speculative. */
2844 if (ds & BEGIN_DATA)
2845 gcc_assert (ds & DEP_TRUE);
2847 /* Control dependencies in the insn scheduler are represented by
2848 anti-dependencies, therefore only anti dependence can be
2849 control speculative. */
2850 if (ds & BEGIN_CONTROL)
2851 gcc_assert (ds & DEP_ANTI);
2855 /* Subsequent speculations should resolve true dependencies. */
2856 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
2859 /* Check that true and anti dependencies can't have other speculative
2862 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
2863 /* An output dependence can't be speculative at all. */
2864 gcc_assert (!(ds & DEP_OUTPUT));
2866 gcc_assert (ds & BEGIN_CONTROL);