1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
6 Free Software Foundation, Inc.
7 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
8 and currently maintained by, Jim Wilson (wilson@cygnus.com)
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free
14 Software Foundation; either version 3, or (at your option) any later
17 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
18 WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
30 #include "diagnostic-core.h"
33 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
41 #include "sched-int.h"
47 #ifdef INSN_SCHEDULING
49 #ifdef ENABLE_CHECKING
55 /* Holds current parameters for the dependency analyzer. */
56 struct sched_deps_info_def *sched_deps_info;
58 /* The data is specific to the Haifa scheduler. */
59 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
61 /* Return the major type present in the DS. */
69 return REG_DEP_OUTPUT;
72 return REG_DEP_CONTROL;
74 gcc_assert (ds & DEP_ANTI);
79 /* Return equivalent dep_status. */
81 dk_to_ds (enum reg_note dk)
95 gcc_assert (dk == REG_DEP_ANTI);
100 /* Functions to operate with dependence information container - dep_t. */
102 /* Init DEP with the arguments. */
104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
108 DEP_TYPE (dep) = type;
109 DEP_STATUS (dep) = ds;
110 DEP_COST (dep) = UNKNOWN_DEP_COST;
113 /* Init DEP with the arguments.
114 While most of the scheduler (including targets) only need the major type
115 of the dependency, it is convenient to hide full dep_status from them. */
117 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
121 if ((current_sched_info->flags & USE_DEPS_LIST))
122 ds = dk_to_ds (kind);
126 init_dep_1 (dep, pro, con, kind, ds);
129 /* Make a copy of FROM in TO. */
131 copy_dep (dep_t to, dep_t from)
133 memcpy (to, from, sizeof (*to));
136 static void dump_ds (FILE *, ds_t);
138 /* Define flags for dump_dep (). */
140 /* Dump producer of the dependence. */
141 #define DUMP_DEP_PRO (2)
143 /* Dump consumer of the dependence. */
144 #define DUMP_DEP_CON (4)
146 /* Dump type of the dependence. */
147 #define DUMP_DEP_TYPE (8)
149 /* Dump status of the dependence. */
150 #define DUMP_DEP_STATUS (16)
152 /* Dump all information about the dependence. */
153 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
157 FLAGS is a bit mask specifying what information about DEP needs
159 If FLAGS has the very first bit set, then dump all information about DEP
160 and propagate this bit into the callee dump functions. */
162 dump_dep (FILE *dump, dep_t dep, int flags)
165 flags |= DUMP_DEP_ALL;
169 if (flags & DUMP_DEP_PRO)
170 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
172 if (flags & DUMP_DEP_CON)
173 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
175 if (flags & DUMP_DEP_TYPE)
178 enum reg_note type = DEP_TYPE (dep);
190 case REG_DEP_CONTROL:
203 fprintf (dump, "%c; ", t);
206 if (flags & DUMP_DEP_STATUS)
208 if (current_sched_info->flags & USE_DEPS_LIST)
209 dump_ds (dump, DEP_STATUS (dep));
215 /* Default flags for dump_dep (). */
216 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
218 /* Dump all fields of DEP to STDERR. */
220 sd_debug_dep (dep_t dep)
222 dump_dep (stderr, dep, 1);
223 fprintf (stderr, "\n");
226 /* Determine whether DEP is a dependency link of a non-debug insn on a
230 depl_on_debug_p (dep_link_t dep)
232 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
233 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
236 /* Functions to operate with a single link from the dependencies lists -
239 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
242 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
244 dep_link_t next = *prev_nextp;
246 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
247 && DEP_LINK_NEXT (l) == NULL);
249 /* Init node being inserted. */
250 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
251 DEP_LINK_NEXT (l) = next;
256 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
258 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
265 /* Add dep_link LINK to deps_list L. */
267 add_to_deps_list (dep_link_t link, deps_list_t l)
269 attach_dep_link (link, &DEPS_LIST_FIRST (l));
271 /* Don't count debug deps. */
272 if (!depl_on_debug_p (link))
273 ++DEPS_LIST_N_LINKS (l);
276 /* Detach dep_link L from the list. */
278 detach_dep_link (dep_link_t l)
280 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
281 dep_link_t next = DEP_LINK_NEXT (l);
286 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
288 DEP_LINK_PREV_NEXTP (l) = NULL;
289 DEP_LINK_NEXT (l) = NULL;
292 /* Remove link LINK from list LIST. */
294 remove_from_deps_list (dep_link_t link, deps_list_t list)
296 detach_dep_link (link);
298 /* Don't count debug deps. */
299 if (!depl_on_debug_p (link))
300 --DEPS_LIST_N_LINKS (list);
303 /* Move link LINK from list FROM to list TO. */
305 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
307 remove_from_deps_list (link, from);
308 add_to_deps_list (link, to);
311 /* Return true of LINK is not attached to any list. */
313 dep_link_is_detached_p (dep_link_t link)
315 return DEP_LINK_PREV_NEXTP (link) == NULL;
318 /* Pool to hold all dependency nodes (dep_node_t). */
319 static alloc_pool dn_pool;
321 /* Number of dep_nodes out there. */
322 static int dn_pool_diff = 0;
324 /* Create a dep_node. */
326 create_dep_node (void)
328 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
329 dep_link_t back = DEP_NODE_BACK (n);
330 dep_link_t forw = DEP_NODE_FORW (n);
332 DEP_LINK_NODE (back) = n;
333 DEP_LINK_NEXT (back) = NULL;
334 DEP_LINK_PREV_NEXTP (back) = NULL;
336 DEP_LINK_NODE (forw) = n;
337 DEP_LINK_NEXT (forw) = NULL;
338 DEP_LINK_PREV_NEXTP (forw) = NULL;
345 /* Delete dep_node N. N must not be connected to any deps_list. */
347 delete_dep_node (dep_node_t n)
349 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
350 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
354 pool_free (dn_pool, n);
357 /* Pool to hold dependencies lists (deps_list_t). */
358 static alloc_pool dl_pool;
360 /* Number of deps_lists out there. */
361 static int dl_pool_diff = 0;
363 /* Functions to operate with dependences lists - deps_list_t. */
365 /* Return true if list L is empty. */
367 deps_list_empty_p (deps_list_t l)
369 return DEPS_LIST_N_LINKS (l) == 0;
372 /* Create a new deps_list. */
374 create_deps_list (void)
376 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
378 DEPS_LIST_FIRST (l) = NULL;
379 DEPS_LIST_N_LINKS (l) = 0;
385 /* Free deps_list L. */
387 free_deps_list (deps_list_t l)
389 gcc_assert (deps_list_empty_p (l));
393 pool_free (dl_pool, l);
396 /* Return true if there is no dep_nodes and deps_lists out there.
397 After the region is scheduled all the dependency nodes and lists
398 should [generally] be returned to pool. */
400 deps_pools_are_empty_p (void)
402 return dn_pool_diff == 0 && dl_pool_diff == 0;
405 /* Remove all elements from L. */
407 clear_deps_list (deps_list_t l)
411 dep_link_t link = DEPS_LIST_FIRST (l);
416 remove_from_deps_list (link, l);
421 /* Decide whether a dependency should be treated as a hard or a speculative
424 dep_spec_p (dep_t dep)
426 if (current_sched_info->flags & DO_SPECULATION)
428 if (DEP_STATUS (dep) & SPECULATIVE)
431 if (current_sched_info->flags & DO_PREDICATION)
433 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
439 static regset reg_pending_sets;
440 static regset reg_pending_clobbers;
441 static regset reg_pending_uses;
442 static regset reg_pending_control_uses;
443 static enum reg_pending_barrier_mode reg_pending_barrier;
445 /* Hard registers implicitly clobbered or used (or may be implicitly
446 clobbered or used) by the currently analyzed insn. For example,
447 insn in its constraint has one register class. Even if there is
448 currently no hard register in the insn, the particular hard
449 register will be in the insn after reload pass because the
450 constraint requires it. */
451 static HARD_REG_SET implicit_reg_pending_clobbers;
452 static HARD_REG_SET implicit_reg_pending_uses;
454 /* To speed up the test for duplicate dependency links we keep a
455 record of dependencies created by add_dependence when the average
456 number of instructions in a basic block is very large.
458 Studies have shown that there is typically around 5 instructions between
459 branches for typical C code. So we can make a guess that the average
460 basic block is approximately 5 instructions long; we will choose 100X
461 the average size as a very large basic block.
463 Each insn has associated bitmaps for its dependencies. Each bitmap
464 has enough entries to represent a dependency on any other insn in
465 the insn chain. All bitmap for true dependencies cache is
466 allocated then the rest two ones are also allocated. */
467 static bitmap_head *true_dependency_cache = NULL;
468 static bitmap_head *output_dependency_cache = NULL;
469 static bitmap_head *anti_dependency_cache = NULL;
470 static bitmap_head *control_dependency_cache = NULL;
471 static bitmap_head *spec_dependency_cache = NULL;
472 static int cache_size;
474 static int deps_may_trap_p (const_rtx);
475 static void add_dependence_1 (rtx, rtx, enum reg_note);
476 static void add_dependence_list (rtx, rtx, int, enum reg_note);
477 static void add_dependence_list_and_free (struct deps_desc *, rtx,
478 rtx *, int, enum reg_note);
479 static void delete_all_dependences (rtx);
480 static void fixup_sched_groups (rtx);
482 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
483 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
484 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
485 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
487 static bool sched_has_condition_p (const_rtx);
488 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
490 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
492 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
494 #ifdef ENABLE_CHECKING
495 static void check_dep (dep_t, bool);
498 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
501 deps_may_trap_p (const_rtx mem)
503 const_rtx addr = XEXP (mem, 0);
505 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
507 const_rtx t = get_reg_known_value (REGNO (addr));
511 return rtx_addr_can_trap_p (addr);
515 /* Find the condition under which INSN is executed. If REV is not NULL,
516 it is set to TRUE when the returned comparison should be reversed
517 to get the actual condition. */
519 sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
521 rtx pat = PATTERN (insn);
527 if (GET_CODE (pat) == COND_EXEC)
528 return COND_EXEC_TEST (pat);
530 if (!any_condjump_p (insn) || !onlyjump_p (insn))
533 src = SET_SRC (pc_set (insn));
535 if (XEXP (src, 2) == pc_rtx)
536 return XEXP (src, 0);
537 else if (XEXP (src, 1) == pc_rtx)
539 rtx cond = XEXP (src, 0);
540 enum rtx_code revcode = reversed_comparison_code (cond, insn);
542 if (revcode == UNKNOWN)
553 /* Return the condition under which INSN does not execute (i.e. the
554 not-taken condition for a conditional branch), or NULL if we cannot
555 find such a condition. The caller should make a copy of the condition
558 sched_get_reverse_condition_uncached (const_rtx insn)
561 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
562 if (cond == NULL_RTX)
566 enum rtx_code revcode = reversed_comparison_code (cond, insn);
567 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
574 /* Caching variant of sched_get_condition_with_rev_uncached.
575 We only do actual work the first time we come here for an insn; the
576 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
578 sched_get_condition_with_rev (const_rtx insn, bool *rev)
582 if (INSN_LUID (insn) == 0)
583 return sched_get_condition_with_rev_uncached (insn, rev);
585 if (INSN_CACHED_COND (insn) == const_true_rtx)
588 if (INSN_CACHED_COND (insn) != NULL_RTX)
591 *rev = INSN_REVERSE_COND (insn);
592 return INSN_CACHED_COND (insn);
595 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
596 INSN_REVERSE_COND (insn) = tmp;
598 if (INSN_CACHED_COND (insn) == NULL_RTX)
600 INSN_CACHED_COND (insn) = const_true_rtx;
605 *rev = INSN_REVERSE_COND (insn);
606 return INSN_CACHED_COND (insn);
609 /* True when we can find a condition under which INSN is executed. */
611 sched_has_condition_p (const_rtx insn)
613 return !! sched_get_condition_with_rev (insn, NULL);
618 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
620 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
622 if (COMPARISON_P (cond1)
623 && COMPARISON_P (cond2)
624 && GET_CODE (cond1) ==
626 ? reversed_comparison_code (cond2, NULL)
628 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
629 && XEXP (cond1, 1) == XEXP (cond2, 1))
634 /* Return true if insn1 and insn2 can never depend on one another because
635 the conditions under which they are executed are mutually exclusive. */
637 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
640 bool rev1 = false, rev2 = false;
642 /* df doesn't handle conditional lifetimes entirely correctly;
643 calls mess up the conditional lifetimes. */
644 if (!CALL_P (insn1) && !CALL_P (insn2))
646 cond1 = sched_get_condition_with_rev (insn1, &rev1);
647 cond2 = sched_get_condition_with_rev (insn2, &rev2);
649 && conditions_mutex_p (cond1, cond2, rev1, rev2)
650 /* Make sure first instruction doesn't affect condition of second
651 instruction if switched. */
652 && !modified_in_p (cond1, insn2)
653 /* Make sure second instruction doesn't affect condition of first
654 instruction if switched. */
655 && !modified_in_p (cond2, insn1))
662 /* Return true if INSN can potentially be speculated with type DS. */
664 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
666 if (HAS_INTERNAL_DEP (insn))
669 if (!NONJUMP_INSN_P (insn))
672 if (SCHED_GROUP_P (insn))
675 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
678 if (side_effects_p (PATTERN (insn)))
682 /* The following instructions, which depend on a speculatively scheduled
683 instruction, cannot be speculatively scheduled along. */
685 if (may_trap_or_fault_p (PATTERN (insn)))
686 /* If instruction might fault, it cannot be speculatively scheduled.
687 For control speculation it's obvious why and for data speculation
688 it's because the insn might get wrong input if speculation
689 wasn't successful. */
692 if ((ds & BE_IN_DATA)
693 && sched_has_condition_p (insn))
694 /* If this is a predicated instruction, then it cannot be
695 speculatively scheduled. See PR35659. */
702 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
703 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
704 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
705 This function is used to switch sd_iterator to the next list.
706 !!! For internal use only. Might consider moving it to sched-int.h. */
708 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
709 deps_list_t *list_ptr, bool *resolved_p_ptr)
711 sd_list_types_def types = *types_ptr;
713 if (types & SD_LIST_HARD_BACK)
715 *list_ptr = INSN_HARD_BACK_DEPS (insn);
716 *resolved_p_ptr = false;
717 *types_ptr = types & ~SD_LIST_HARD_BACK;
719 else if (types & SD_LIST_SPEC_BACK)
721 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
722 *resolved_p_ptr = false;
723 *types_ptr = types & ~SD_LIST_SPEC_BACK;
725 else if (types & SD_LIST_FORW)
727 *list_ptr = INSN_FORW_DEPS (insn);
728 *resolved_p_ptr = false;
729 *types_ptr = types & ~SD_LIST_FORW;
731 else if (types & SD_LIST_RES_BACK)
733 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
734 *resolved_p_ptr = true;
735 *types_ptr = types & ~SD_LIST_RES_BACK;
737 else if (types & SD_LIST_RES_FORW)
739 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
740 *resolved_p_ptr = true;
741 *types_ptr = types & ~SD_LIST_RES_FORW;
746 *resolved_p_ptr = false;
747 *types_ptr = SD_LIST_NONE;
751 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
753 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
757 while (list_types != SD_LIST_NONE)
762 sd_next_list (insn, &list_types, &list, &resolved_p);
764 size += DEPS_LIST_N_LINKS (list);
770 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
773 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
775 while (list_types != SD_LIST_NONE)
780 sd_next_list (insn, &list_types, &list, &resolved_p);
781 if (!deps_list_empty_p (list))
788 /* Initialize data for INSN. */
790 sd_init_insn (rtx insn)
792 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
793 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
794 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
795 INSN_FORW_DEPS (insn) = create_deps_list ();
796 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
798 /* ??? It would be nice to allocate dependency caches here. */
801 /* Free data for INSN. */
803 sd_finish_insn (rtx insn)
805 /* ??? It would be nice to deallocate dependency caches here. */
807 free_deps_list (INSN_HARD_BACK_DEPS (insn));
808 INSN_HARD_BACK_DEPS (insn) = NULL;
810 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
811 INSN_SPEC_BACK_DEPS (insn) = NULL;
813 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
814 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
816 free_deps_list (INSN_FORW_DEPS (insn));
817 INSN_FORW_DEPS (insn) = NULL;
819 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
820 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
823 /* Find a dependency between producer PRO and consumer CON.
824 Search through resolved dependency lists if RESOLVED_P is true.
825 If no such dependency is found return NULL,
826 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
827 with an iterator pointing to it. */
829 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
830 sd_iterator_def *sd_it_ptr)
832 sd_list_types_def pro_list_type;
833 sd_list_types_def con_list_type;
834 sd_iterator_def sd_it;
836 bool found_p = false;
840 pro_list_type = SD_LIST_RES_FORW;
841 con_list_type = SD_LIST_RES_BACK;
845 pro_list_type = SD_LIST_FORW;
846 con_list_type = SD_LIST_BACK;
849 /* Walk through either back list of INSN or forw list of ELEM
850 depending on which one is shorter. */
851 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
853 /* Find the dep_link with producer PRO in consumer's back_deps. */
854 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
855 if (DEP_PRO (dep) == pro)
863 /* Find the dep_link with consumer CON in producer's forw_deps. */
864 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
865 if (DEP_CON (dep) == con)
874 if (sd_it_ptr != NULL)
883 /* Find a dependency between producer PRO and consumer CON.
884 Use dependency [if available] to check if dependency is present at all.
885 Search through resolved dependency lists if RESOLVED_P is true.
886 If the dependency or NULL if none found. */
888 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
890 if (true_dependency_cache != NULL)
891 /* Avoiding the list walk below can cut compile times dramatically
894 int elem_luid = INSN_LUID (pro);
895 int insn_luid = INSN_LUID (con);
897 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
898 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
899 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
900 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
904 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
907 /* Add or update a dependence described by DEP.
908 MEM1 and MEM2, if non-null, correspond to memory locations in case of
911 The function returns a value indicating if an old entry has been changed
912 or a new entry has been added to insn's backward deps.
914 This function merely checks if producer and consumer is the same insn
915 and doesn't create a dep in this case. Actual manipulation of
916 dependence data structures is performed in add_or_update_dep_1. */
917 static enum DEPS_ADJUST_RESULT
918 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
920 rtx elem = DEP_PRO (dep);
921 rtx insn = DEP_CON (dep);
923 gcc_assert (INSN_P (insn) && INSN_P (elem));
925 /* Don't depend an insn on itself. */
928 if (sched_deps_info->generate_spec_deps)
929 /* INSN has an internal dependence, which we can't overcome. */
930 HAS_INTERNAL_DEP (insn) = 1;
935 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
938 /* Ask dependency caches what needs to be done for dependence DEP.
939 Return DEP_CREATED if new dependence should be created and there is no
940 need to try to find one searching the dependencies lists.
941 Return DEP_PRESENT if there already is a dependence described by DEP and
942 hence nothing is to be done.
943 Return DEP_CHANGED if there already is a dependence, but it should be
944 updated to incorporate additional information from DEP. */
945 static enum DEPS_ADJUST_RESULT
946 ask_dependency_caches (dep_t dep)
948 int elem_luid = INSN_LUID (DEP_PRO (dep));
949 int insn_luid = INSN_LUID (DEP_CON (dep));
951 gcc_assert (true_dependency_cache != NULL
952 && output_dependency_cache != NULL
953 && anti_dependency_cache != NULL
954 && control_dependency_cache != NULL);
956 if (!(current_sched_info->flags & USE_DEPS_LIST))
958 enum reg_note present_dep_type;
960 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
961 present_dep_type = REG_DEP_TRUE;
962 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
963 present_dep_type = REG_DEP_OUTPUT;
964 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
965 present_dep_type = REG_DEP_ANTI;
966 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
967 present_dep_type = REG_DEP_CONTROL;
969 /* There is no existing dep so it should be created. */
972 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
973 /* DEP does not add anything to the existing dependence. */
978 ds_t present_dep_types = 0;
980 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
981 present_dep_types |= DEP_TRUE;
982 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
983 present_dep_types |= DEP_OUTPUT;
984 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
985 present_dep_types |= DEP_ANTI;
986 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
987 present_dep_types |= DEP_CONTROL;
989 if (present_dep_types == 0)
990 /* There is no existing dep so it should be created. */
993 if (!(current_sched_info->flags & DO_SPECULATION)
994 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
996 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
997 == present_dep_types)
998 /* DEP does not add anything to the existing dependence. */
1003 /* Only true dependencies can be data speculative and
1004 only anti dependencies can be control speculative. */
1005 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1006 == present_dep_types);
1008 /* if (DEP is SPECULATIVE) then
1009 ..we should update DEP_STATUS
1011 ..we should reset existing dep to non-speculative. */
1018 /* Set dependency caches according to DEP. */
1020 set_dependency_caches (dep_t dep)
1022 int elem_luid = INSN_LUID (DEP_PRO (dep));
1023 int insn_luid = INSN_LUID (DEP_CON (dep));
1025 if (!(current_sched_info->flags & USE_DEPS_LIST))
1027 switch (DEP_TYPE (dep))
1030 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1033 case REG_DEP_OUTPUT:
1034 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1038 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1041 case REG_DEP_CONTROL:
1042 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1051 ds_t ds = DEP_STATUS (dep);
1054 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1055 if (ds & DEP_OUTPUT)
1056 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1058 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1059 if (ds & DEP_CONTROL)
1060 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1062 if (ds & SPECULATIVE)
1064 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1065 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1070 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1071 caches accordingly. */
1073 update_dependency_caches (dep_t dep, enum reg_note old_type)
1075 int elem_luid = INSN_LUID (DEP_PRO (dep));
1076 int insn_luid = INSN_LUID (DEP_CON (dep));
1078 /* Clear corresponding cache entry because type of the link
1079 may have changed. Keep them if we use_deps_list. */
1080 if (!(current_sched_info->flags & USE_DEPS_LIST))
1084 case REG_DEP_OUTPUT:
1085 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1089 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1092 case REG_DEP_CONTROL:
1093 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1101 set_dependency_caches (dep);
1104 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1106 change_spec_dep_to_hard (sd_iterator_def sd_it)
1108 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1109 dep_link_t link = DEP_NODE_BACK (node);
1110 dep_t dep = DEP_NODE_DEP (node);
1111 rtx elem = DEP_PRO (dep);
1112 rtx insn = DEP_CON (dep);
1114 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1116 DEP_STATUS (dep) &= ~SPECULATIVE;
1118 if (true_dependency_cache != NULL)
1119 /* Clear the cache entry. */
1120 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1124 /* Update DEP to incorporate information from NEW_DEP.
1125 SD_IT points to DEP in case it should be moved to another list.
1126 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1127 data-speculative dependence should be updated. */
1128 static enum DEPS_ADJUST_RESULT
1129 update_dep (dep_t dep, dep_t new_dep,
1130 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1131 rtx mem1 ATTRIBUTE_UNUSED,
1132 rtx mem2 ATTRIBUTE_UNUSED)
1134 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1135 enum reg_note old_type = DEP_TYPE (dep);
1136 bool was_spec = dep_spec_p (dep);
1138 /* If this is a more restrictive type of dependence than the
1139 existing one, then change the existing dependence to this
1141 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1143 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1147 if (current_sched_info->flags & USE_DEPS_LIST)
1148 /* Update DEP_STATUS. */
1150 ds_t dep_status = DEP_STATUS (dep);
1151 ds_t ds = DEP_STATUS (new_dep);
1152 ds_t new_status = ds | dep_status;
1154 if (new_status & SPECULATIVE)
1156 /* Either existing dep or a dep we're adding or both are
1158 if (!(ds & SPECULATIVE)
1159 || !(dep_status & SPECULATIVE))
1160 /* The new dep can't be speculative. */
1161 new_status &= ~SPECULATIVE;
1164 /* Both are speculative. Merge probabilities. */
1169 dw = estimate_dep_weak (mem1, mem2);
1170 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1173 new_status = ds_merge (dep_status, ds);
1179 if (dep_status != ds)
1181 DEP_STATUS (dep) = ds;
1186 if (was_spec && !dep_spec_p (dep))
1187 /* The old dep was speculative, but now it isn't. */
1188 change_spec_dep_to_hard (sd_it);
1190 if (true_dependency_cache != NULL
1191 && res == DEP_CHANGED)
1192 update_dependency_caches (dep, old_type);
1197 /* Add or update a dependence described by DEP.
1198 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1201 The function returns a value indicating if an old entry has been changed
1202 or a new entry has been added to insn's backward deps or nothing has
1203 been updated at all. */
1204 static enum DEPS_ADJUST_RESULT
1205 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1206 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1208 bool maybe_present_p = true;
1209 bool present_p = false;
1211 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1212 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1214 #ifdef ENABLE_CHECKING
1215 check_dep (new_dep, mem1 != NULL);
1218 if (true_dependency_cache != NULL)
1220 switch (ask_dependency_caches (new_dep))
1226 maybe_present_p = true;
1231 maybe_present_p = false;
1241 /* Check that we don't already have this dependence. */
1242 if (maybe_present_p)
1245 sd_iterator_def sd_it;
1247 gcc_assert (true_dependency_cache == NULL || present_p);
1249 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1251 resolved_p, &sd_it);
1253 if (present_dep != NULL)
1254 /* We found an existing dependency between ELEM and INSN. */
1255 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1257 /* We didn't find a dep, it shouldn't present in the cache. */
1258 gcc_assert (!present_p);
1261 /* Might want to check one level of transitivity to save conses.
1262 This check should be done in maybe_add_or_update_dep_1.
1263 Since we made it to add_or_update_dep_1, we must create
1264 (or update) a link. */
1266 if (mem1 != NULL_RTX)
1268 gcc_assert (sched_deps_info->generate_spec_deps);
1269 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1270 estimate_dep_weak (mem1, mem2));
1273 sd_add_dep (new_dep, resolved_p);
1278 /* Initialize BACK_LIST_PTR with consumer's backward list and
1279 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1280 initialize with lists that hold resolved deps. */
1282 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1283 deps_list_t *back_list_ptr,
1284 deps_list_t *forw_list_ptr)
1286 rtx con = DEP_CON (dep);
1290 if (dep_spec_p (dep))
1291 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1293 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1295 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1299 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1300 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1304 /* Add dependence described by DEP.
1305 If RESOLVED_P is true treat the dependence as a resolved one. */
1307 sd_add_dep (dep_t dep, bool resolved_p)
1309 dep_node_t n = create_dep_node ();
1310 deps_list_t con_back_deps;
1311 deps_list_t pro_forw_deps;
1312 rtx elem = DEP_PRO (dep);
1313 rtx insn = DEP_CON (dep);
1315 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1317 if ((current_sched_info->flags & DO_SPECULATION) == 0
1318 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1319 DEP_STATUS (dep) &= ~SPECULATIVE;
1321 copy_dep (DEP_NODE_DEP (n), dep);
1323 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1325 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1327 #ifdef ENABLE_CHECKING
1328 check_dep (dep, false);
1331 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1333 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1334 in the bitmap caches of dependency information. */
1335 if (true_dependency_cache != NULL)
1336 set_dependency_caches (dep);
1339 /* Add or update backward dependence between INSN and ELEM
1340 with given type DEP_TYPE and dep_status DS.
1341 This function is a convenience wrapper. */
1342 enum DEPS_ADJUST_RESULT
1343 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1345 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1348 /* Resolved dependence pointed to by SD_IT.
1349 SD_IT will advance to the next element. */
1351 sd_resolve_dep (sd_iterator_def sd_it)
1353 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1354 dep_t dep = DEP_NODE_DEP (node);
1355 rtx pro = DEP_PRO (dep);
1356 rtx con = DEP_CON (dep);
1358 if (dep_spec_p (dep))
1359 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1360 INSN_RESOLVED_BACK_DEPS (con));
1362 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1363 INSN_RESOLVED_BACK_DEPS (con));
1365 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1366 INSN_RESOLVED_FORW_DEPS (pro));
1369 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1370 pointed to by SD_IT to unresolved state. */
1372 sd_unresolve_dep (sd_iterator_def sd_it)
1374 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1375 dep_t dep = DEP_NODE_DEP (node);
1376 rtx pro = DEP_PRO (dep);
1377 rtx con = DEP_CON (dep);
1379 if (dep_spec_p (dep))
1380 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1381 INSN_SPEC_BACK_DEPS (con));
1383 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1384 INSN_HARD_BACK_DEPS (con));
1386 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1387 INSN_FORW_DEPS (pro));
1390 /* Make TO depend on all the FROM's producers.
1391 If RESOLVED_P is true add dependencies to the resolved lists. */
1393 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1395 sd_list_types_def list_type;
1396 sd_iterator_def sd_it;
1399 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1401 FOR_EACH_DEP (from, list_type, sd_it, dep)
1403 dep_def _new_dep, *new_dep = &_new_dep;
1405 copy_dep (new_dep, dep);
1406 DEP_CON (new_dep) = to;
1407 sd_add_dep (new_dep, resolved_p);
1411 /* Remove a dependency referred to by SD_IT.
1412 SD_IT will point to the next dependence after removal. */
1414 sd_delete_dep (sd_iterator_def sd_it)
1416 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1417 dep_t dep = DEP_NODE_DEP (n);
1418 rtx pro = DEP_PRO (dep);
1419 rtx con = DEP_CON (dep);
1420 deps_list_t con_back_deps;
1421 deps_list_t pro_forw_deps;
1423 if (true_dependency_cache != NULL)
1425 int elem_luid = INSN_LUID (pro);
1426 int insn_luid = INSN_LUID (con);
1428 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1429 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1430 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1431 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1433 if (current_sched_info->flags & DO_SPECULATION)
1434 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1437 get_back_and_forw_lists (dep, sd_it.resolved_p,
1438 &con_back_deps, &pro_forw_deps);
1440 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1441 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1443 delete_dep_node (n);
1446 /* Dump size of the lists. */
1447 #define DUMP_LISTS_SIZE (2)
1449 /* Dump dependencies of the lists. */
1450 #define DUMP_LISTS_DEPS (4)
1452 /* Dump all information about the lists. */
1453 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1455 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1456 FLAGS is a bit mask specifying what information about the lists needs
1458 If FLAGS has the very first bit set, then dump all information about
1459 the lists and propagate this bit into the callee dump functions. */
1461 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1463 sd_iterator_def sd_it;
1470 flags |= DUMP_LISTS_ALL;
1472 fprintf (dump, "[");
1474 if (flags & DUMP_LISTS_SIZE)
1475 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1477 if (flags & DUMP_LISTS_DEPS)
1479 FOR_EACH_DEP (insn, types, sd_it, dep)
1481 dump_dep (dump, dep, dump_dep_flags | all);
1482 fprintf (dump, " ");
1487 /* Dump all information about deps_lists of INSN specified by TYPES
1490 sd_debug_lists (rtx insn, sd_list_types_def types)
1492 dump_lists (stderr, insn, types, 1);
1493 fprintf (stderr, "\n");
1496 /* A wrapper around add_dependence_1, to add a dependence of CON on
1497 PRO, with type DEP_TYPE. This function implements special handling
1498 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1499 the type to REG_DEP_ANTI if we can determine that predication is
1500 impossible; otherwise we add additional true dependencies on the
1501 INSN_COND_DEPS list of the jump (which PRO must be). */
1503 add_dependence (rtx con, rtx pro, enum reg_note dep_type)
1505 if (dep_type == REG_DEP_CONTROL
1506 && !(current_sched_info->flags & DO_PREDICATION))
1507 dep_type = REG_DEP_ANTI;
1509 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1510 so we must also make the insn dependent on the setter of the
1512 if (dep_type == REG_DEP_CONTROL)
1515 rtx other = real_insn_for_shadow (real_pro);
1518 if (other != NULL_RTX)
1520 cond = sched_get_reverse_condition_uncached (real_pro);
1521 /* Verify that the insn does not use a different value in
1522 the condition register than the one that was present at
1524 if (cond == NULL_RTX)
1525 dep_type = REG_DEP_ANTI;
1526 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1529 CLEAR_HARD_REG_SET (uses);
1530 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1531 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1532 dep_type = REG_DEP_ANTI;
1534 if (dep_type == REG_DEP_CONTROL)
1536 if (sched_verbose >= 5)
1537 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1538 INSN_UID (real_pro));
1539 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1544 add_dependence_1 (con, pro, dep_type);
1547 /* A convenience wrapper to operate on an entire list. */
1550 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1552 for (; list; list = XEXP (list, 1))
1554 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1555 add_dependence (insn, XEXP (list, 0), dep_type);
1559 /* Similar, but free *LISTP at the same time, when the context
1563 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1564 int uncond, enum reg_note dep_type)
1568 /* We don't want to short-circuit dependencies involving debug
1569 insns, because they may cause actual dependencies to be
1571 if (deps->readonly || DEBUG_INSN_P (insn))
1573 add_dependence_list (insn, *listp, uncond, dep_type);
1577 for (list = *listp, *listp = NULL; list ; list = next)
1579 next = XEXP (list, 1);
1580 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1581 add_dependence (insn, XEXP (list, 0), dep_type);
1582 free_INSN_LIST_node (list);
1586 /* Remove all occurences of INSN from LIST. Return the number of
1587 occurences removed. */
1590 remove_from_dependence_list (rtx insn, rtx* listp)
1596 if (XEXP (*listp, 0) == insn)
1598 remove_free_INSN_LIST_node (listp);
1603 listp = &XEXP (*listp, 1);
1609 /* Same as above, but process two lists at once. */
1611 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1617 if (XEXP (*listp, 0) == insn)
1619 remove_free_INSN_LIST_node (listp);
1620 remove_free_EXPR_LIST_node (exprp);
1625 listp = &XEXP (*listp, 1);
1626 exprp = &XEXP (*exprp, 1);
1632 /* Clear all dependencies for an insn. */
1634 delete_all_dependences (rtx insn)
1636 sd_iterator_def sd_it;
1639 /* The below cycle can be optimized to clear the caches and back_deps
1640 in one call but that would provoke duplication of code from
1643 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1644 sd_iterator_cond (&sd_it, &dep);)
1645 sd_delete_dep (sd_it);
1648 /* All insns in a scheduling group except the first should only have
1649 dependencies on the previous insn in the group. So we find the
1650 first instruction in the scheduling group by walking the dependence
1651 chains backwards. Then we add the dependencies for the group to
1652 the previous nonnote insn. */
1655 fixup_sched_groups (rtx insn)
1657 sd_iterator_def sd_it;
1661 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1664 rtx pro = DEP_PRO (dep);
1668 i = prev_nonnote_insn (i);
1672 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1674 if (! sched_insns_conditions_mutex_p (i, pro))
1675 add_dependence (i, pro, DEP_TYPE (dep));
1679 delete_all_dependences (insn);
1681 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1682 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1683 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1684 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1687 /* Process an insn's memory dependencies. There are four kinds of
1690 (0) read dependence: read follows read
1691 (1) true dependence: read follows write
1692 (2) output dependence: write follows write
1693 (3) anti dependence: write follows read
1695 We are careful to build only dependencies which actually exist, and
1696 use transitivity to avoid building too many links. */
1698 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1699 The MEM is a memory reference contained within INSN, which we are saving
1700 so that we can do memory aliasing on it. */
1703 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1710 gcc_assert (!deps->readonly);
1713 insn_list = &deps->pending_read_insns;
1714 mem_list = &deps->pending_read_mems;
1715 if (!DEBUG_INSN_P (insn))
1716 deps->pending_read_list_length++;
1720 insn_list = &deps->pending_write_insns;
1721 mem_list = &deps->pending_write_mems;
1722 deps->pending_write_list_length++;
1725 link = alloc_INSN_LIST (insn, *insn_list);
1728 if (sched_deps_info->use_cselib)
1730 mem = shallow_copy_rtx (mem);
1731 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0), GET_MODE (mem));
1733 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1737 /* Make a dependency between every memory reference on the pending lists
1738 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1739 dependencies for a read operation, similarly with FOR_WRITE. */
1742 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1747 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1749 if (!deps->readonly)
1751 free_EXPR_LIST_list (&deps->pending_read_mems);
1752 deps->pending_read_list_length = 0;
1756 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1757 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1759 add_dependence_list_and_free (deps, insn,
1760 &deps->last_pending_memory_flush, 1,
1761 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1763 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1766 if (!deps->readonly)
1768 free_EXPR_LIST_list (&deps->pending_write_mems);
1769 deps->pending_write_list_length = 0;
1771 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1772 deps->pending_flush_length = 1;
1776 /* Instruction which dependencies we are analyzing. */
1777 static rtx cur_insn = NULL_RTX;
1779 /* Implement hooks for haifa scheduler. */
1782 haifa_start_insn (rtx insn)
1784 gcc_assert (insn && !cur_insn);
1790 haifa_finish_insn (void)
1796 haifa_note_reg_set (int regno)
1798 SET_REGNO_REG_SET (reg_pending_sets, regno);
1802 haifa_note_reg_clobber (int regno)
1804 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1808 haifa_note_reg_use (int regno)
1810 SET_REGNO_REG_SET (reg_pending_uses, regno);
1814 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1816 if (!(ds & SPECULATIVE))
1819 pending_mem = NULL_RTX;
1822 gcc_assert (ds & BEGIN_DATA);
1825 dep_def _dep, *dep = &_dep;
1827 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1828 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1829 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1835 haifa_note_dep (rtx elem, ds_t ds)
1840 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1841 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1845 note_reg_use (int r)
1847 if (sched_deps_info->note_reg_use)
1848 sched_deps_info->note_reg_use (r);
1852 note_reg_set (int r)
1854 if (sched_deps_info->note_reg_set)
1855 sched_deps_info->note_reg_set (r);
1859 note_reg_clobber (int r)
1861 if (sched_deps_info->note_reg_clobber)
1862 sched_deps_info->note_reg_clobber (r);
1866 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1868 if (sched_deps_info->note_mem_dep)
1869 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1873 note_dep (rtx e, ds_t ds)
1875 if (sched_deps_info->note_dep)
1876 sched_deps_info->note_dep (e, ds);
1879 /* Return corresponding to DS reg_note. */
1884 return REG_DEP_TRUE;
1885 else if (ds & DEP_OUTPUT)
1886 return REG_DEP_OUTPUT;
1887 else if (ds & DEP_ANTI)
1888 return REG_DEP_ANTI;
1891 gcc_assert (ds & DEP_CONTROL);
1892 return REG_DEP_CONTROL;
1898 /* Functions for computation of info needed for register pressure
1899 sensitive insn scheduling. */
1902 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1903 static struct reg_use_data *
1904 create_insn_reg_use (int regno, rtx insn)
1906 struct reg_use_data *use;
1908 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1911 use->next_insn_use = INSN_REG_USE_LIST (insn);
1912 INSN_REG_USE_LIST (insn) = use;
1916 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1917 static struct reg_set_data *
1918 create_insn_reg_set (int regno, rtx insn)
1920 struct reg_set_data *set;
1922 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1925 set->next_insn_set = INSN_REG_SET_LIST (insn);
1926 INSN_REG_SET_LIST (insn) = set;
1930 /* Set up insn register uses for INSN and dependency context DEPS. */
1932 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1935 reg_set_iterator rsi;
1937 struct reg_use_data *use, *use2, *next;
1938 struct deps_reg *reg_last;
1940 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1942 if (i < FIRST_PSEUDO_REGISTER
1943 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1946 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1947 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1948 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1949 /* Ignore use which is not dying. */
1952 use = create_insn_reg_use (i, insn);
1953 use->next_regno_use = use;
1954 reg_last = &deps->reg_last[i];
1956 /* Create the cycle list of uses. */
1957 for (list = reg_last->uses; list; list = XEXP (list, 1))
1959 use2 = create_insn_reg_use (i, XEXP (list, 0));
1960 next = use->next_regno_use;
1961 use->next_regno_use = use2;
1962 use2->next_regno_use = next;
1967 /* Register pressure info for the currently processed insn. */
1968 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1970 /* Return TRUE if INSN has the use structure for REGNO. */
1972 insn_use_p (rtx insn, int regno)
1974 struct reg_use_data *use;
1976 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1977 if (use->regno == regno)
1982 /* Update the register pressure info after birth of pseudo register REGNO
1983 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1984 the register is in clobber or unused after the insn. */
1986 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1991 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1992 cl = sched_regno_pressure_class[regno];
1995 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1998 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1999 reg_pressure_info[cl].clobber_increase = new_incr;
2003 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2004 reg_pressure_info[cl].unused_set_increase = new_incr;
2008 new_incr = reg_pressure_info[cl].set_increase + incr;
2009 reg_pressure_info[cl].set_increase = new_incr;
2010 if (! insn_use_p (insn, regno))
2011 reg_pressure_info[cl].change += incr;
2012 create_insn_reg_set (regno, insn);
2014 gcc_assert (new_incr < (1 << INCREASE_BITS));
2018 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2019 hard registers involved in the birth. */
2021 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2022 bool clobber_p, bool unused_p)
2025 int new_incr, last = regno + nregs;
2027 while (regno < last)
2029 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2030 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2032 cl = sched_regno_pressure_class[regno];
2037 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2038 reg_pressure_info[cl].clobber_increase = new_incr;
2042 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2043 reg_pressure_info[cl].unused_set_increase = new_incr;
2047 new_incr = reg_pressure_info[cl].set_increase + 1;
2048 reg_pressure_info[cl].set_increase = new_incr;
2049 if (! insn_use_p (insn, regno))
2050 reg_pressure_info[cl].change += 1;
2051 create_insn_reg_set (regno, insn);
2053 gcc_assert (new_incr < (1 << INCREASE_BITS));
2060 /* Update the register pressure info after birth of pseudo or hard
2061 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2062 correspondingly that the register is in clobber or unused after the
2065 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2069 if (GET_CODE (reg) == SUBREG)
2070 reg = SUBREG_REG (reg);
2075 regno = REGNO (reg);
2076 if (regno < FIRST_PSEUDO_REGISTER)
2077 mark_insn_hard_regno_birth (insn, regno,
2078 hard_regno_nregs[regno][GET_MODE (reg)],
2079 clobber_p, unused_p);
2081 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2084 /* Update the register pressure info after death of pseudo register
2087 mark_pseudo_death (int regno)
2092 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2093 cl = sched_regno_pressure_class[regno];
2096 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2097 reg_pressure_info[cl].change -= incr;
2101 /* Like mark_pseudo_death except that NREGS saying how many hard
2102 registers involved in the death. */
2104 mark_hard_regno_death (int regno, int nregs)
2107 int last = regno + nregs;
2109 while (regno < last)
2111 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2112 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2114 cl = sched_regno_pressure_class[regno];
2116 reg_pressure_info[cl].change -= 1;
2122 /* Update the register pressure info after death of pseudo or hard
2125 mark_reg_death (rtx reg)
2129 if (GET_CODE (reg) == SUBREG)
2130 reg = SUBREG_REG (reg);
2135 regno = REGNO (reg);
2136 if (regno < FIRST_PSEUDO_REGISTER)
2137 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2139 mark_pseudo_death (regno);
2142 /* Process SETTER of REG. DATA is an insn containing the setter. */
2144 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2146 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2149 ((rtx) data, reg, false,
2150 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2153 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2155 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2157 if (GET_CODE (setter) == CLOBBER)
2158 mark_insn_reg_birth ((rtx) data, reg, true, false);
2161 /* Set up reg pressure info related to INSN. */
2163 init_insn_reg_pressure_info (rtx insn)
2167 static struct reg_pressure_data *pressure_info;
2170 gcc_assert (sched_pressure_p);
2172 if (! INSN_P (insn))
2175 for (i = 0; i < ira_pressure_classes_num; i++)
2177 cl = ira_pressure_classes[i];
2178 reg_pressure_info[cl].clobber_increase = 0;
2179 reg_pressure_info[cl].set_increase = 0;
2180 reg_pressure_info[cl].unused_set_increase = 0;
2181 reg_pressure_info[cl].change = 0;
2184 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2186 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2189 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2190 if (REG_NOTE_KIND (link) == REG_INC)
2191 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2194 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2195 if (REG_NOTE_KIND (link) == REG_DEAD)
2196 mark_reg_death (XEXP (link, 0));
2198 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2200 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2201 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2203 for (i = 0; i < ira_pressure_classes_num; i++)
2205 cl = ira_pressure_classes[i];
2206 pressure_info[i].clobber_increase
2207 = reg_pressure_info[cl].clobber_increase;
2208 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2209 pressure_info[i].unused_set_increase
2210 = reg_pressure_info[cl].unused_set_increase;
2211 pressure_info[i].change = reg_pressure_info[cl].change;
2218 /* Internal variable for sched_analyze_[12] () functions.
2219 If it is nonzero, this means that sched_analyze_[12] looks
2220 at the most toplevel SET. */
2221 static bool can_start_lhs_rhs_p;
2223 /* Extend reg info for the deps context DEPS given that
2224 we have just generated a register numbered REGNO. */
2226 extend_deps_reg_info (struct deps_desc *deps, int regno)
2228 int max_regno = regno + 1;
2230 gcc_assert (!reload_completed);
2232 /* In a readonly context, it would not hurt to extend info,
2233 but it should not be needed. */
2234 if (reload_completed && deps->readonly)
2236 deps->max_reg = max_regno;
2240 if (max_regno > deps->max_reg)
2242 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2244 memset (&deps->reg_last[deps->max_reg],
2245 0, (max_regno - deps->max_reg)
2246 * sizeof (struct deps_reg));
2247 deps->max_reg = max_regno;
2251 /* Extends REG_INFO_P if needed. */
2253 maybe_extend_reg_info_p (void)
2255 /* Extend REG_INFO_P, if needed. */
2256 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2258 size_t new_reg_info_p_size = max_regno + 128;
2260 gcc_assert (!reload_completed && sel_sched_p ());
2262 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2263 new_reg_info_p_size,
2265 sizeof (*reg_info_p));
2266 reg_info_p_size = new_reg_info_p_size;
2270 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2271 The type of the reference is specified by REF and can be SET,
2272 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2275 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2276 enum rtx_code ref, rtx insn)
2278 /* We could emit new pseudos in renaming. Extend the reg structures. */
2279 if (!reload_completed && sel_sched_p ()
2280 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2281 extend_deps_reg_info (deps, regno);
2283 maybe_extend_reg_info_p ();
2285 /* A hard reg in a wide mode may really be multiple registers.
2286 If so, mark all of them just like the first. */
2287 if (regno < FIRST_PSEUDO_REGISTER)
2289 int i = hard_regno_nregs[regno][mode];
2293 note_reg_set (regno + i);
2295 else if (ref == USE)
2298 note_reg_use (regno + i);
2303 note_reg_clobber (regno + i);
2307 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2308 it does not reload. Ignore these as they have served their
2310 else if (regno >= deps->max_reg)
2312 enum rtx_code code = GET_CODE (PATTERN (insn));
2313 gcc_assert (code == USE || code == CLOBBER);
2319 note_reg_set (regno);
2320 else if (ref == USE)
2321 note_reg_use (regno);
2323 note_reg_clobber (regno);
2325 /* Pseudos that are REG_EQUIV to something may be replaced
2326 by that during reloading. We need only add dependencies for
2327 the address in the REG_EQUIV note. */
2328 if (!reload_completed && get_reg_known_equiv_p (regno))
2330 rtx t = get_reg_known_value (regno);
2332 sched_analyze_2 (deps, XEXP (t, 0), insn);
2335 /* Don't let it cross a call after scheduling if it doesn't
2336 already cross one. */
2337 if (REG_N_CALLS_CROSSED (regno) == 0)
2339 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2340 deps->sched_before_next_call
2341 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2343 add_dependence_list (insn, deps->last_function_call, 1,
2349 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2350 rtx, X, creating all dependencies generated by the write to the
2351 destination of X, and reads of everything mentioned. */
2354 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2356 rtx dest = XEXP (x, 0);
2357 enum rtx_code code = GET_CODE (x);
2358 bool cslr_p = can_start_lhs_rhs_p;
2360 can_start_lhs_rhs_p = false;
2366 if (cslr_p && sched_deps_info->start_lhs)
2367 sched_deps_info->start_lhs (dest);
2369 if (GET_CODE (dest) == PARALLEL)
2373 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2374 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2375 sched_analyze_1 (deps,
2376 gen_rtx_CLOBBER (VOIDmode,
2377 XEXP (XVECEXP (dest, 0, i), 0)),
2380 if (cslr_p && sched_deps_info->finish_lhs)
2381 sched_deps_info->finish_lhs ();
2385 can_start_lhs_rhs_p = cslr_p;
2387 sched_analyze_2 (deps, SET_SRC (x), insn);
2389 can_start_lhs_rhs_p = false;
2395 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2396 || GET_CODE (dest) == ZERO_EXTRACT)
2398 if (GET_CODE (dest) == STRICT_LOW_PART
2399 || GET_CODE (dest) == ZERO_EXTRACT
2400 || df_read_modify_subreg_p (dest))
2402 /* These both read and modify the result. We must handle
2403 them as writes to get proper dependencies for following
2404 instructions. We must handle them as reads to get proper
2405 dependencies from this to previous instructions.
2406 Thus we need to call sched_analyze_2. */
2408 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2410 if (GET_CODE (dest) == ZERO_EXTRACT)
2412 /* The second and third arguments are values read by this insn. */
2413 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2414 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2416 dest = XEXP (dest, 0);
2421 int regno = REGNO (dest);
2422 enum machine_mode mode = GET_MODE (dest);
2424 sched_analyze_reg (deps, regno, mode, code, insn);
2427 /* Treat all writes to a stack register as modifying the TOS. */
2428 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2430 /* Avoid analyzing the same register twice. */
2431 if (regno != FIRST_STACK_REG)
2432 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2434 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2439 else if (MEM_P (dest))
2441 /* Writing memory. */
2444 if (sched_deps_info->use_cselib)
2446 enum machine_mode address_mode
2447 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2449 t = shallow_copy_rtx (dest);
2450 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2451 GET_MODE (t), insn);
2452 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2456 /* Pending lists can't get larger with a readonly context. */
2458 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2459 > MAX_PENDING_LIST_LENGTH))
2461 /* Flush all pending reads and writes to prevent the pending lists
2462 from getting any larger. Insn scheduling runs too slowly when
2463 these lists get long. When compiling GCC with itself,
2464 this flush occurs 8 times for sparc, and 10 times for m88k using
2465 the default value of 32. */
2466 flush_pending_lists (deps, insn, false, true);
2470 rtx pending, pending_mem;
2472 pending = deps->pending_read_insns;
2473 pending_mem = deps->pending_read_mems;
2476 if (anti_dependence (XEXP (pending_mem, 0), t)
2477 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2478 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2481 pending = XEXP (pending, 1);
2482 pending_mem = XEXP (pending_mem, 1);
2485 pending = deps->pending_write_insns;
2486 pending_mem = deps->pending_write_mems;
2489 if (output_dependence (XEXP (pending_mem, 0), t)
2490 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2491 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2494 pending = XEXP (pending, 1);
2495 pending_mem = XEXP (pending_mem, 1);
2498 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2500 add_dependence_list (insn, deps->pending_jump_insns, 1,
2503 if (!deps->readonly)
2504 add_insn_mem_dependence (deps, false, insn, dest);
2506 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2509 if (cslr_p && sched_deps_info->finish_lhs)
2510 sched_deps_info->finish_lhs ();
2512 /* Analyze reads. */
2513 if (GET_CODE (x) == SET)
2515 can_start_lhs_rhs_p = cslr_p;
2517 sched_analyze_2 (deps, SET_SRC (x), insn);
2519 can_start_lhs_rhs_p = false;
2523 /* Analyze the uses of memory and registers in rtx X in INSN. */
2525 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2531 bool cslr_p = can_start_lhs_rhs_p;
2533 can_start_lhs_rhs_p = false;
2539 if (cslr_p && sched_deps_info->start_rhs)
2540 sched_deps_info->start_rhs (x);
2542 code = GET_CODE (x);
2553 /* Ignore constants. */
2554 if (cslr_p && sched_deps_info->finish_rhs)
2555 sched_deps_info->finish_rhs ();
2561 /* User of CC0 depends on immediately preceding insn. */
2562 SCHED_GROUP_P (insn) = 1;
2563 /* Don't move CC0 setter to another block (it can set up the
2564 same flag for previous CC0 users which is safe). */
2565 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2567 if (cslr_p && sched_deps_info->finish_rhs)
2568 sched_deps_info->finish_rhs ();
2575 int regno = REGNO (x);
2576 enum machine_mode mode = GET_MODE (x);
2578 sched_analyze_reg (deps, regno, mode, USE, insn);
2581 /* Treat all reads of a stack register as modifying the TOS. */
2582 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2584 /* Avoid analyzing the same register twice. */
2585 if (regno != FIRST_STACK_REG)
2586 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2587 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2591 if (cslr_p && sched_deps_info->finish_rhs)
2592 sched_deps_info->finish_rhs ();
2599 /* Reading memory. */
2601 rtx pending, pending_mem;
2604 if (sched_deps_info->use_cselib)
2606 enum machine_mode address_mode
2607 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2609 t = shallow_copy_rtx (t);
2610 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2611 GET_MODE (t), insn);
2612 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2615 if (!DEBUG_INSN_P (insn))
2618 pending = deps->pending_read_insns;
2619 pending_mem = deps->pending_read_mems;
2622 if (read_dependence (XEXP (pending_mem, 0), t)
2623 && ! sched_insns_conditions_mutex_p (insn,
2625 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2628 pending = XEXP (pending, 1);
2629 pending_mem = XEXP (pending_mem, 1);
2632 pending = deps->pending_write_insns;
2633 pending_mem = deps->pending_write_mems;
2636 if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
2637 && ! sched_insns_conditions_mutex_p (insn,
2639 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2640 sched_deps_info->generate_spec_deps
2641 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2643 pending = XEXP (pending, 1);
2644 pending_mem = XEXP (pending_mem, 1);
2647 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2648 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2650 for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2651 if (deps_may_trap_p (x))
2653 if ((sched_deps_info->generate_spec_deps)
2654 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2656 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2659 note_dep (XEXP (u, 0), ds);
2662 add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
2666 /* Always add these dependencies to pending_reads, since
2667 this insn may be followed by a write. */
2668 if (!deps->readonly)
2669 add_insn_mem_dependence (deps, true, insn, x);
2671 sched_analyze_2 (deps, XEXP (x, 0), insn);
2673 if (cslr_p && sched_deps_info->finish_rhs)
2674 sched_deps_info->finish_rhs ();
2679 /* Force pending stores to memory in case a trap handler needs them. */
2681 flush_pending_lists (deps, insn, true, false);
2685 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2686 reg_pending_barrier = TRUE_BARRIER;
2689 case UNSPEC_VOLATILE:
2690 flush_pending_lists (deps, insn, true, true);
2696 /* Traditional and volatile asm instructions must be considered to use
2697 and clobber all hard registers, all pseudo-registers and all of
2698 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2700 Consider for instance a volatile asm that changes the fpu rounding
2701 mode. An insn should not be moved across this even if it only uses
2702 pseudo-regs because it might give an incorrectly rounded result. */
2703 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2704 reg_pending_barrier = TRUE_BARRIER;
2706 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2707 We can not just fall through here since then we would be confused
2708 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2709 traditional asms unlike their normal usage. */
2711 if (code == ASM_OPERANDS)
2713 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2714 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2716 if (cslr_p && sched_deps_info->finish_rhs)
2717 sched_deps_info->finish_rhs ();
2728 /* These both read and modify the result. We must handle them as writes
2729 to get proper dependencies for following instructions. We must handle
2730 them as reads to get proper dependencies from this to previous
2731 instructions. Thus we need to pass them to both sched_analyze_1
2732 and sched_analyze_2. We must call sched_analyze_2 first in order
2733 to get the proper antecedent for the read. */
2734 sched_analyze_2 (deps, XEXP (x, 0), insn);
2735 sched_analyze_1 (deps, x, insn);
2737 if (cslr_p && sched_deps_info->finish_rhs)
2738 sched_deps_info->finish_rhs ();
2744 /* op0 = op0 + op1 */
2745 sched_analyze_2 (deps, XEXP (x, 0), insn);
2746 sched_analyze_2 (deps, XEXP (x, 1), insn);
2747 sched_analyze_1 (deps, x, insn);
2749 if (cslr_p && sched_deps_info->finish_rhs)
2750 sched_deps_info->finish_rhs ();
2758 /* Other cases: walk the insn. */
2759 fmt = GET_RTX_FORMAT (code);
2760 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2763 sched_analyze_2 (deps, XEXP (x, i), insn);
2764 else if (fmt[i] == 'E')
2765 for (j = 0; j < XVECLEN (x, i); j++)
2766 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2769 if (cslr_p && sched_deps_info->finish_rhs)
2770 sched_deps_info->finish_rhs ();
2773 /* Analyze an INSN with pattern X to find all dependencies. */
2775 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2777 RTX_CODE code = GET_CODE (x);
2780 reg_set_iterator rsi;
2782 if (! reload_completed)
2786 extract_insn (insn);
2787 preprocess_constraints ();
2788 ira_implicitly_set_insn_hard_regs (&temp);
2789 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2790 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2793 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2797 /* Avoid moving trapping instructions accross function calls that might
2798 not always return. */
2799 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2802 /* We must avoid creating a situation in which two successors of the
2803 current block have different unwind info after scheduling. If at any
2804 point the two paths re-join this leads to incorrect unwind info. */
2805 /* ??? There are certain situations involving a forced frame pointer in
2806 which, with extra effort, we could fix up the unwind info at a later
2807 CFG join. However, it seems better to notice these cases earlier
2808 during prologue generation and avoid marking the frame pointer setup
2809 as frame-related at all. */
2810 if (RTX_FRAME_RELATED_P (insn))
2812 /* Make sure prologue insn is scheduled before next jump. */
2813 deps->sched_before_next_jump
2814 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2816 /* Make sure epilogue insn is scheduled after preceding jumps. */
2817 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI);
2820 if (code == COND_EXEC)
2822 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2824 /* ??? Should be recording conditions so we reduce the number of
2825 false dependencies. */
2826 x = COND_EXEC_CODE (x);
2827 code = GET_CODE (x);
2829 if (code == SET || code == CLOBBER)
2831 sched_analyze_1 (deps, x, insn);
2833 /* Bare clobber insns are used for letting life analysis, reg-stack
2834 and others know that a value is dead. Depend on the last call
2835 instruction so that reg-stack won't get confused. */
2836 if (code == CLOBBER)
2837 add_dependence_list (insn, deps->last_function_call, 1,
2840 else if (code == PARALLEL)
2842 for (i = XVECLEN (x, 0); i--;)
2844 rtx sub = XVECEXP (x, 0, i);
2845 code = GET_CODE (sub);
2847 if (code == COND_EXEC)
2849 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2850 sub = COND_EXEC_CODE (sub);
2851 code = GET_CODE (sub);
2853 if (code == SET || code == CLOBBER)
2854 sched_analyze_1 (deps, sub, insn);
2856 sched_analyze_2 (deps, sub, insn);
2860 sched_analyze_2 (deps, x, insn);
2862 /* Mark registers CLOBBERED or used by called function. */
2865 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2867 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2868 sched_analyze_1 (deps, XEXP (link, 0), insn);
2870 sched_analyze_2 (deps, XEXP (link, 0), insn);
2872 /* Don't schedule anything after a tail call, tail call needs
2873 to use at least all call-saved registers. */
2874 if (SIBLING_CALL_P (insn))
2875 reg_pending_barrier = TRUE_BARRIER;
2876 else if (find_reg_note (insn, REG_SETJMP, NULL))
2877 reg_pending_barrier = MOVE_BARRIER;
2883 next = next_nonnote_nondebug_insn (insn);
2884 if (next && BARRIER_P (next))
2885 reg_pending_barrier = MOVE_BARRIER;
2888 rtx pending, pending_mem;
2890 if (sched_deps_info->compute_jump_reg_dependencies)
2892 (*sched_deps_info->compute_jump_reg_dependencies)
2893 (insn, reg_pending_control_uses);
2895 /* Make latency of jump equal to 0 by using anti-dependence. */
2896 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2898 struct deps_reg *reg_last = &deps->reg_last[i];
2899 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2900 add_dependence_list (insn, reg_last->implicit_sets,
2902 add_dependence_list (insn, reg_last->clobbers, 0,
2907 /* All memory writes and volatile reads must happen before the
2908 jump. Non-volatile reads must happen before the jump iff
2909 the result is needed by the above register used mask. */
2911 pending = deps->pending_write_insns;
2912 pending_mem = deps->pending_write_mems;
2915 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2916 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2917 pending = XEXP (pending, 1);
2918 pending_mem = XEXP (pending_mem, 1);
2921 pending = deps->pending_read_insns;
2922 pending_mem = deps->pending_read_mems;
2925 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2926 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2927 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2928 pending = XEXP (pending, 1);
2929 pending_mem = XEXP (pending_mem, 1);
2932 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2934 add_dependence_list (insn, deps->pending_jump_insns, 1,
2939 /* If this instruction can throw an exception, then moving it changes
2940 where block boundaries fall. This is mighty confusing elsewhere.
2941 Therefore, prevent such an instruction from being moved. Same for
2942 non-jump instructions that define block boundaries.
2943 ??? Unclear whether this is still necessary in EBB mode. If not,
2944 add_branch_dependences should be adjusted for RGN mode instead. */
2945 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2946 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2947 reg_pending_barrier = MOVE_BARRIER;
2949 if (sched_pressure_p)
2951 setup_insn_reg_uses (deps, insn);
2952 init_insn_reg_pressure_info (insn);
2955 /* Add register dependencies for insn. */
2956 if (DEBUG_INSN_P (insn))
2958 rtx prev = deps->last_debug_insn;
2961 if (!deps->readonly)
2962 deps->last_debug_insn = insn;
2965 add_dependence (insn, prev, REG_DEP_ANTI);
2967 add_dependence_list (insn, deps->last_function_call, 1,
2970 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2971 if (!sel_sched_p ())
2972 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2974 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2976 struct deps_reg *reg_last = &deps->reg_last[i];
2977 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2978 /* There's no point in making REG_DEP_CONTROL dependencies for
2980 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2982 if (!deps->readonly)
2983 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2985 CLEAR_REG_SET (reg_pending_uses);
2987 /* Quite often, a debug insn will refer to stuff in the
2988 previous instruction, but the reason we want this
2989 dependency here is to make sure the scheduler doesn't
2990 gratuitously move a debug insn ahead. This could dirty
2991 DF flags and cause additional analysis that wouldn't have
2992 occurred in compilation without debug insns, and such
2993 additional analysis can modify the generated code. */
2994 prev = PREV_INSN (insn);
2996 if (prev && NONDEBUG_INSN_P (prev))
2997 add_dependence (insn, prev, REG_DEP_ANTI);
3001 regset_head set_or_clobbered;
3003 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3005 struct deps_reg *reg_last = &deps->reg_last[i];
3006 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
3007 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
3008 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
3010 if (!deps->readonly)
3012 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3013 reg_last->uses_length++;
3017 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3018 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3020 struct deps_reg *reg_last = &deps->reg_last[i];
3021 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
3022 add_dependence_list (insn, reg_last->implicit_sets, 0,
3024 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
3026 if (!deps->readonly)
3028 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3029 reg_last->uses_length++;
3033 if (targetm.sched.exposed_pipeline)
3035 INIT_REG_SET (&set_or_clobbered);
3036 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3038 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3040 struct deps_reg *reg_last = &deps->reg_last[i];
3042 for (list = reg_last->uses; list; list = XEXP (list, 1))
3044 rtx other = XEXP (list, 0);
3045 if (INSN_CACHED_COND (other) != const_true_rtx
3046 && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3047 INSN_CACHED_COND (other) = const_true_rtx;
3052 /* If the current insn is conditional, we can't free any
3054 if (sched_has_condition_p (insn))
3056 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3058 struct deps_reg *reg_last = &deps->reg_last[i];
3059 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3060 add_dependence_list (insn, reg_last->implicit_sets, 0,
3062 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3063 add_dependence_list (insn, reg_last->control_uses, 0,
3066 if (!deps->readonly)
3069 = alloc_INSN_LIST (insn, reg_last->clobbers);
3070 reg_last->clobbers_length++;
3073 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3075 struct deps_reg *reg_last = &deps->reg_last[i];
3076 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3077 add_dependence_list (insn, reg_last->implicit_sets, 0,
3079 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
3080 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3081 add_dependence_list (insn, reg_last->control_uses, 0,
3084 if (!deps->readonly)
3085 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3090 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3092 struct deps_reg *reg_last = &deps->reg_last[i];
3093 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
3094 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
3096 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3098 add_dependence_list_and_free (deps, insn,
3099 ®_last->implicit_sets, 0,
3101 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3103 add_dependence_list_and_free (deps, insn,
3104 ®_last->control_uses, 0,
3106 add_dependence_list_and_free
3107 (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
3109 if (!deps->readonly)
3111 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3112 reg_last->clobbers_length = 0;
3113 reg_last->uses_length = 0;
3118 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3119 add_dependence_list (insn, reg_last->implicit_sets, 0,
3121 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3122 add_dependence_list (insn, reg_last->control_uses, 0,
3126 if (!deps->readonly)
3128 reg_last->clobbers_length++;
3130 = alloc_INSN_LIST (insn, reg_last->clobbers);
3133 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3135 struct deps_reg *reg_last = &deps->reg_last[i];
3137 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3139 add_dependence_list_and_free (deps, insn,
3140 ®_last->implicit_sets,
3142 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3144 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3146 add_dependence_list (insn, reg_last->control_uses, 0,
3149 if (!deps->readonly)
3151 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3152 reg_last->uses_length = 0;
3153 reg_last->clobbers_length = 0;
3157 if (!deps->readonly)
3159 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3161 struct deps_reg *reg_last = &deps->reg_last[i];
3162 reg_last->control_uses
3163 = alloc_INSN_LIST (insn, reg_last->control_uses);
3168 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3169 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3171 struct deps_reg *reg_last = &deps->reg_last[i];
3172 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
3173 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
3174 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3175 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI);
3177 if (!deps->readonly)
3178 reg_last->implicit_sets
3179 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3182 if (!deps->readonly)
3184 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3185 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3186 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3187 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3188 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3189 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3190 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3192 /* Set up the pending barrier found. */
3193 deps->last_reg_pending_barrier = reg_pending_barrier;
3196 CLEAR_REG_SET (reg_pending_uses);
3197 CLEAR_REG_SET (reg_pending_clobbers);
3198 CLEAR_REG_SET (reg_pending_sets);
3199 CLEAR_REG_SET (reg_pending_control_uses);
3200 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3201 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3203 /* Add dependencies if a scheduling barrier was found. */
3204 if (reg_pending_barrier)
3206 /* In the case of barrier the most added dependencies are not
3207 real, so we use anti-dependence here. */
3208 if (sched_has_condition_p (insn))
3210 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3212 struct deps_reg *reg_last = &deps->reg_last[i];
3213 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3214 add_dependence_list (insn, reg_last->sets, 0,
3215 reg_pending_barrier == TRUE_BARRIER
3216 ? REG_DEP_TRUE : REG_DEP_ANTI);
3217 add_dependence_list (insn, reg_last->implicit_sets, 0,
3219 add_dependence_list (insn, reg_last->clobbers, 0,
3220 reg_pending_barrier == TRUE_BARRIER
3221 ? REG_DEP_TRUE : REG_DEP_ANTI);
3226 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3228 struct deps_reg *reg_last = &deps->reg_last[i];
3229 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3231 add_dependence_list_and_free (deps, insn,
3232 ®_last->control_uses, 0,
3234 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3235 reg_pending_barrier == TRUE_BARRIER
3236 ? REG_DEP_TRUE : REG_DEP_ANTI);
3237 add_dependence_list_and_free (deps, insn,
3238 ®_last->implicit_sets, 0,
3240 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3241 reg_pending_barrier == TRUE_BARRIER
3242 ? REG_DEP_TRUE : REG_DEP_ANTI);
3244 if (!deps->readonly)
3246 reg_last->uses_length = 0;
3247 reg_last->clobbers_length = 0;
3252 if (!deps->readonly)
3253 for (i = 0; i < (unsigned)deps->max_reg; i++)
3255 struct deps_reg *reg_last = &deps->reg_last[i];
3256 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3257 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3260 /* Flush pending lists on jumps, but not on speculative checks. */
3261 if (JUMP_P (insn) && !(sel_sched_p ()
3262 && sel_insn_is_speculation_check (insn)))
3263 flush_pending_lists (deps, insn, true, true);
3265 reg_pending_barrier = NOT_A_BARRIER;
3268 /* If a post-call group is still open, see if it should remain so.
3269 This insn must be a simple move of a hard reg to a pseudo or
3272 We must avoid moving these insns for correctness on targets
3273 with small register classes, and for special registers like
3274 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3275 hard regs for all targets. */
3277 if (deps->in_post_call_group_p)
3279 rtx tmp, set = single_set (insn);
3280 int src_regno, dest_regno;
3284 if (DEBUG_INSN_P (insn))
3285 /* We don't want to mark debug insns as part of the same
3286 sched group. We know they really aren't, but if we use
3287 debug insns to tell that a call group is over, we'll
3288 get different code if debug insns are not there and
3289 instructions that follow seem like they should be part
3292 Also, if we did, fixup_sched_groups() would move the
3293 deps of the debug insn to the call insn, modifying
3294 non-debug post-dependency counts of the debug insn
3295 dependencies and otherwise messing with the scheduling
3298 Instead, let such debug insns be scheduled freely, but
3299 keep the call group open in case there are insns that
3300 should be part of it afterwards. Since we grant debug
3301 insns higher priority than even sched group insns, it
3302 will all turn out all right. */
3303 goto debug_dont_end_call_group;
3305 goto end_call_group;
3308 tmp = SET_DEST (set);
3309 if (GET_CODE (tmp) == SUBREG)
3310 tmp = SUBREG_REG (tmp);
3312 dest_regno = REGNO (tmp);
3314 goto end_call_group;
3316 tmp = SET_SRC (set);
3317 if (GET_CODE (tmp) == SUBREG)
3318 tmp = SUBREG_REG (tmp);
3319 if ((GET_CODE (tmp) == PLUS
3320 || GET_CODE (tmp) == MINUS)
3321 && REG_P (XEXP (tmp, 0))
3322 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3323 && dest_regno == STACK_POINTER_REGNUM)
3324 src_regno = STACK_POINTER_REGNUM;
3325 else if (REG_P (tmp))
3326 src_regno = REGNO (tmp);
3328 goto end_call_group;
3330 if (src_regno < FIRST_PSEUDO_REGISTER
3331 || dest_regno < FIRST_PSEUDO_REGISTER)
3334 && deps->in_post_call_group_p == post_call_initial)
3335 deps->in_post_call_group_p = post_call;
3337 if (!sel_sched_p () || sched_emulate_haifa_p)
3339 SCHED_GROUP_P (insn) = 1;
3340 CANT_MOVE (insn) = 1;
3346 if (!deps->readonly)
3347 deps->in_post_call_group_p = not_post_call;
3351 debug_dont_end_call_group:
3352 if ((current_sched_info->flags & DO_SPECULATION)
3353 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3354 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3358 sel_mark_hard_insn (insn);
3361 sd_iterator_def sd_it;
3364 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3365 sd_iterator_cond (&sd_it, &dep);)
3366 change_spec_dep_to_hard (sd_it);
3371 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3372 longjmp, loop forever, ...). */
3374 call_may_noreturn_p (rtx insn)
3378 /* const or pure calls that aren't looping will always return. */
3379 if (RTL_CONST_OR_PURE_CALL_P (insn)
3380 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3383 call = PATTERN (insn);
3384 if (GET_CODE (call) == PARALLEL)
3385 call = XVECEXP (call, 0, 0);
3386 if (GET_CODE (call) == SET)
3387 call = SET_SRC (call);
3388 if (GET_CODE (call) == CALL
3389 && MEM_P (XEXP (call, 0))
3390 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3392 rtx symbol = XEXP (XEXP (call, 0), 0);
3393 if (SYMBOL_REF_DECL (symbol)
3394 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3396 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3398 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3401 case BUILT_IN_BCOPY:
3402 case BUILT_IN_BZERO:
3403 case BUILT_IN_INDEX:
3404 case BUILT_IN_MEMCHR:
3405 case BUILT_IN_MEMCMP:
3406 case BUILT_IN_MEMCPY:
3407 case BUILT_IN_MEMMOVE:
3408 case BUILT_IN_MEMPCPY:
3409 case BUILT_IN_MEMSET:
3410 case BUILT_IN_RINDEX:
3411 case BUILT_IN_STPCPY:
3412 case BUILT_IN_STPNCPY:
3413 case BUILT_IN_STRCAT:
3414 case BUILT_IN_STRCHR:
3415 case BUILT_IN_STRCMP:
3416 case BUILT_IN_STRCPY:
3417 case BUILT_IN_STRCSPN:
3418 case BUILT_IN_STRLEN:
3419 case BUILT_IN_STRNCAT:
3420 case BUILT_IN_STRNCMP:
3421 case BUILT_IN_STRNCPY:
3422 case BUILT_IN_STRPBRK:
3423 case BUILT_IN_STRRCHR:
3424 case BUILT_IN_STRSPN:
3425 case BUILT_IN_STRSTR:
3426 /* Assume certain string/memory builtins always return. */
3434 /* For all other calls assume that they might not always return. */
3438 /* Analyze INSN with DEPS as a context. */
3440 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3442 if (sched_deps_info->start_insn)
3443 sched_deps_info->start_insn (insn);
3445 /* Record the condition for this insn. */
3446 if (NONDEBUG_INSN_P (insn))
3449 sched_get_condition_with_rev (insn, NULL);
3450 t = INSN_CACHED_COND (insn);
3451 INSN_COND_DEPS (insn) = NULL_RTX;
3452 if (reload_completed
3453 && (current_sched_info->flags & DO_PREDICATION)
3455 && REG_P (XEXP (t, 0))
3456 && CONSTANT_P (XEXP (t, 1)))
3462 nregs = hard_regno_nregs[regno][GET_MODE (t)];
3466 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3467 t = concat_INSN_LIST (reg_last->sets, t);
3468 t = concat_INSN_LIST (reg_last->clobbers, t);
3469 t = concat_INSN_LIST (reg_last->implicit_sets, t);
3471 INSN_COND_DEPS (insn) = t;
3477 /* Make each JUMP_INSN (but not a speculative check)
3478 a scheduling barrier for memory references. */
3481 && sel_insn_is_speculation_check (insn)))
3483 /* Keep the list a reasonable size. */
3484 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3485 flush_pending_lists (deps, insn, true, true);
3487 deps->pending_jump_insns
3488 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3491 /* For each insn which shouldn't cross a jump, add a dependence. */
3492 add_dependence_list_and_free (deps, insn,
3493 &deps->sched_before_next_jump, 1,
3496 sched_analyze_insn (deps, PATTERN (insn), insn);
3498 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3500 sched_analyze_insn (deps, PATTERN (insn), insn);
3502 else if (CALL_P (insn))
3506 CANT_MOVE (insn) = 1;
3508 if (find_reg_note (insn, REG_SETJMP, NULL))
3510 /* This is setjmp. Assume that all registers, not just
3511 hard registers, may be clobbered by this call. */
3512 reg_pending_barrier = MOVE_BARRIER;
3516 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3517 /* A call may read and modify global register variables. */
3520 SET_REGNO_REG_SET (reg_pending_sets, i);
3521 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3523 /* Other call-clobbered hard regs may be clobbered.
3524 Since we only have a choice between 'might be clobbered'
3525 and 'definitely not clobbered', we must include all
3526 partly call-clobbered registers here. */
3527 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3528 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3529 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3530 /* We don't know what set of fixed registers might be used
3531 by the function, but it is certain that the stack pointer
3532 is among them, but be conservative. */
3533 else if (fixed_regs[i])
3534 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3535 /* The frame pointer is normally not used by the function
3536 itself, but by the debugger. */
3537 /* ??? MIPS o32 is an exception. It uses the frame pointer
3538 in the macro expansion of jal but does not represent this
3539 fact in the call_insn rtl. */
3540 else if (i == FRAME_POINTER_REGNUM
3541 || (i == HARD_FRAME_POINTER_REGNUM
3542 && (! reload_completed || frame_pointer_needed)))
3543 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3546 /* For each insn which shouldn't cross a call, add a dependence
3547 between that insn and this call insn. */
3548 add_dependence_list_and_free (deps, insn,
3549 &deps->sched_before_next_call, 1,
3552 sched_analyze_insn (deps, PATTERN (insn), insn);
3554 /* If CALL would be in a sched group, then this will violate
3555 convention that sched group insns have dependencies only on the
3556 previous instruction.
3558 Of course one can say: "Hey! What about head of the sched group?"
3559 And I will answer: "Basic principles (one dep per insn) are always
3561 gcc_assert (!SCHED_GROUP_P (insn));
3563 /* In the absence of interprocedural alias analysis, we must flush
3564 all pending reads and writes, and start new dependencies starting
3565 from here. But only flush writes for constant calls (which may
3566 be passed a pointer to something we haven't written yet). */
3567 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3569 if (!deps->readonly)
3571 /* Remember the last function call for limiting lifetimes. */
3572 free_INSN_LIST_list (&deps->last_function_call);
3573 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3575 if (call_may_noreturn_p (insn))
3577 /* Remember the last function call that might not always return
3578 normally for limiting moves of trapping insns. */
3579 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3580 deps->last_function_call_may_noreturn
3581 = alloc_INSN_LIST (insn, NULL_RTX);
3584 /* Before reload, begin a post-call group, so as to keep the
3585 lifetimes of hard registers correct. */
3586 if (! reload_completed)
3587 deps->in_post_call_group_p = post_call;
3591 if (sched_deps_info->use_cselib)
3592 cselib_process_insn (insn);
3594 /* EH_REGION insn notes can not appear until well after we complete
3597 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3598 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3600 if (sched_deps_info->finish_insn)
3601 sched_deps_info->finish_insn ();
3603 /* Fixup the dependencies in the sched group. */
3604 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3605 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3606 fixup_sched_groups (insn);
3609 /* Initialize DEPS for the new block beginning with HEAD. */
3611 deps_start_bb (struct deps_desc *deps, rtx head)
3613 gcc_assert (!deps->readonly);
3615 /* Before reload, if the previous block ended in a call, show that
3616 we are inside a post-call group, so as to keep the lifetimes of
3617 hard registers correct. */
3618 if (! reload_completed && !LABEL_P (head))
3620 rtx insn = prev_nonnote_nondebug_insn (head);
3622 if (insn && CALL_P (insn))
3623 deps->in_post_call_group_p = post_call_initial;
3627 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3628 dependencies for each insn. */
3630 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3634 if (sched_deps_info->use_cselib)
3635 cselib_init (CSELIB_RECORD_MEMORY);
3637 deps_start_bb (deps, head);
3639 for (insn = head;; insn = NEXT_INSN (insn))
3644 /* And initialize deps_lists. */
3645 sd_init_insn (insn);
3648 deps_analyze_insn (deps, insn);
3652 if (sched_deps_info->use_cselib)
3660 /* Helper for sched_free_deps ().
3661 Delete INSN's (RESOLVED_P) backward dependencies. */
3663 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3665 sd_iterator_def sd_it;
3667 sd_list_types_def types;
3670 types = SD_LIST_RES_BACK;
3672 types = SD_LIST_BACK;
3674 for (sd_it = sd_iterator_start (insn, types);
3675 sd_iterator_cond (&sd_it, &dep);)
3677 dep_link_t link = *sd_it.linkp;
3678 dep_node_t node = DEP_LINK_NODE (link);
3679 deps_list_t back_list;
3680 deps_list_t forw_list;
3682 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3683 remove_from_deps_list (link, back_list);
3684 delete_dep_node (node);
3688 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3691 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3694 rtx next_tail = NEXT_INSN (tail);
3696 /* We make two passes since some insns may be scheduled before their
3697 dependencies are resolved. */
3698 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3699 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3701 /* Clear forward deps and leave the dep_nodes to the
3702 corresponding back_deps list. */
3704 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3706 clear_deps_list (INSN_FORW_DEPS (insn));
3708 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3709 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3711 /* Clear resolved back deps together with its dep_nodes. */
3712 delete_dep_nodes_in_back_deps (insn, resolved_p);
3714 sd_finish_insn (insn);
3718 /* Initialize variables for region data dependence analysis.
3719 When LAZY_REG_LAST is true, do not allocate reg_last array
3720 of struct deps_desc immediately. */
3723 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3725 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3727 deps->max_reg = max_reg;
3729 deps->reg_last = NULL;
3731 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3732 INIT_REG_SET (&deps->reg_last_in_use);
3734 deps->pending_read_insns = 0;
3735 deps->pending_read_mems = 0;
3736 deps->pending_write_insns = 0;
3737 deps->pending_write_mems = 0;
3738 deps->pending_jump_insns = 0;
3739 deps->pending_read_list_length = 0;
3740 deps->pending_write_list_length = 0;
3741 deps->pending_flush_length = 0;
3742 deps->last_pending_memory_flush = 0;
3743 deps->last_function_call = 0;
3744 deps->last_function_call_may_noreturn = 0;
3745 deps->sched_before_next_call = 0;
3746 deps->sched_before_next_jump = 0;
3747 deps->in_post_call_group_p = not_post_call;
3748 deps->last_debug_insn = 0;
3749 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3753 /* Init only reg_last field of DEPS, which was not allocated before as
3754 we inited DEPS lazily. */
3756 init_deps_reg_last (struct deps_desc *deps)
3758 gcc_assert (deps && deps->max_reg > 0);
3759 gcc_assert (deps->reg_last == NULL);
3761 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3765 /* Free insn lists found in DEPS. */
3768 free_deps (struct deps_desc *deps)
3771 reg_set_iterator rsi;
3773 /* We set max_reg to 0 when this context was already freed. */
3774 if (deps->max_reg == 0)
3776 gcc_assert (deps->reg_last == NULL);
3781 free_INSN_LIST_list (&deps->pending_read_insns);
3782 free_EXPR_LIST_list (&deps->pending_read_mems);
3783 free_INSN_LIST_list (&deps->pending_write_insns);
3784 free_EXPR_LIST_list (&deps->pending_write_mems);
3785 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3787 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3788 times. For a testcase with 42000 regs and 8000 small basic blocks,
3789 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3790 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3792 struct deps_reg *reg_last = &deps->reg_last[i];
3794 free_INSN_LIST_list (®_last->uses);
3796 free_INSN_LIST_list (®_last->sets);
3797 if (reg_last->implicit_sets)
3798 free_INSN_LIST_list (®_last->implicit_sets);
3799 if (reg_last->control_uses)
3800 free_INSN_LIST_list (®_last->control_uses);
3801 if (reg_last->clobbers)
3802 free_INSN_LIST_list (®_last->clobbers);
3804 CLEAR_REG_SET (&deps->reg_last_in_use);
3806 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3808 free (deps->reg_last);
3809 deps->reg_last = NULL;
3814 /* Remove INSN from dependence contexts DEPS. */
3816 remove_from_deps (struct deps_desc *deps, rtx insn)
3820 reg_set_iterator rsi;
3822 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3823 &deps->pending_read_mems);
3824 if (!DEBUG_INSN_P (insn))
3825 deps->pending_read_list_length -= removed;
3826 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3827 &deps->pending_write_mems);
3828 deps->pending_write_list_length -= removed;
3830 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3831 deps->pending_flush_length -= removed;
3832 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3833 deps->pending_flush_length -= removed;
3835 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3837 struct deps_reg *reg_last = &deps->reg_last[i];
3839 remove_from_dependence_list (insn, ®_last->uses);
3841 remove_from_dependence_list (insn, ®_last->sets);
3842 if (reg_last->implicit_sets)
3843 remove_from_dependence_list (insn, ®_last->implicit_sets);
3844 if (reg_last->clobbers)
3845 remove_from_dependence_list (insn, ®_last->clobbers);
3846 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3847 && !reg_last->clobbers)
3848 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3853 remove_from_dependence_list (insn, &deps->last_function_call);
3854 remove_from_dependence_list (insn,
3855 &deps->last_function_call_may_noreturn);
3857 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3860 /* Init deps data vector. */
3862 init_deps_data_vector (void)
3864 int reserve = (sched_max_luid + 1
3865 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3867 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3868 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3869 3 * sched_max_luid / 2);
3872 /* If it is profitable to use them, initialize or extend (depending on
3873 GLOBAL_P) dependency data. */
3875 sched_deps_init (bool global_p)
3877 /* Average number of insns in the basic block.
3878 '+ 1' is used to make it nonzero. */
3879 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3881 init_deps_data_vector ();
3883 /* We use another caching mechanism for selective scheduling, so
3884 we don't use this one. */
3885 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3887 /* ?!? We could save some memory by computing a per-region luid mapping
3888 which could reduce both the number of vectors in the cache and the
3889 size of each vector. Instead we just avoid the cache entirely unless
3890 the average number of instructions in a basic block is very high. See
3891 the comment before the declaration of true_dependency_cache for
3892 what we consider "very high". */
3894 extend_dependency_caches (sched_max_luid, true);
3899 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3900 /* Allocate lists for one block at a time. */
3902 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3903 /* Allocate nodes for one block at a time.
3904 We assume that average insn has
3906 5 * insns_in_block);
3911 /* Create or extend (depending on CREATE_P) dependency caches to
3914 extend_dependency_caches (int n, bool create_p)
3916 if (create_p || true_dependency_cache)
3918 int i, luid = cache_size + n;
3920 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3922 output_dependency_cache = XRESIZEVEC (bitmap_head,
3923 output_dependency_cache, luid);
3924 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3926 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
3929 if (current_sched_info->flags & DO_SPECULATION)
3930 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3933 for (i = cache_size; i < luid; i++)
3935 bitmap_initialize (&true_dependency_cache[i], 0);
3936 bitmap_initialize (&output_dependency_cache[i], 0);
3937 bitmap_initialize (&anti_dependency_cache[i], 0);
3938 bitmap_initialize (&control_dependency_cache[i], 0);
3940 if (current_sched_info->flags & DO_SPECULATION)
3941 bitmap_initialize (&spec_dependency_cache[i], 0);
3947 /* Finalize dependency information for the whole function. */
3949 sched_deps_finish (void)
3951 gcc_assert (deps_pools_are_empty_p ());
3952 free_alloc_pool_if_empty (&dn_pool);
3953 free_alloc_pool_if_empty (&dl_pool);
3954 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3956 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3959 if (true_dependency_cache)
3963 for (i = 0; i < cache_size; i++)
3965 bitmap_clear (&true_dependency_cache[i]);
3966 bitmap_clear (&output_dependency_cache[i]);
3967 bitmap_clear (&anti_dependency_cache[i]);
3968 bitmap_clear (&control_dependency_cache[i]);
3970 if (sched_deps_info->generate_spec_deps)
3971 bitmap_clear (&spec_dependency_cache[i]);
3973 free (true_dependency_cache);
3974 true_dependency_cache = NULL;
3975 free (output_dependency_cache);
3976 output_dependency_cache = NULL;
3977 free (anti_dependency_cache);
3978 anti_dependency_cache = NULL;
3979 free (control_dependency_cache);
3980 control_dependency_cache = NULL;
3982 if (sched_deps_info->generate_spec_deps)
3984 free (spec_dependency_cache);
3985 spec_dependency_cache = NULL;
3991 /* Initialize some global variables needed by the dependency analysis
3995 init_deps_global (void)
3997 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3998 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3999 reg_pending_sets = ALLOC_REG_SET (®_obstack);
4000 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
4001 reg_pending_uses = ALLOC_REG_SET (®_obstack);
4002 reg_pending_control_uses = ALLOC_REG_SET (®_obstack);
4003 reg_pending_barrier = NOT_A_BARRIER;
4005 if (!sel_sched_p () || sched_emulate_haifa_p)
4007 sched_deps_info->start_insn = haifa_start_insn;
4008 sched_deps_info->finish_insn = haifa_finish_insn;
4010 sched_deps_info->note_reg_set = haifa_note_reg_set;
4011 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4012 sched_deps_info->note_reg_use = haifa_note_reg_use;
4014 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4015 sched_deps_info->note_dep = haifa_note_dep;
4019 /* Free everything used by the dependency analysis code. */
4022 finish_deps_global (void)
4024 FREE_REG_SET (reg_pending_sets);
4025 FREE_REG_SET (reg_pending_clobbers);
4026 FREE_REG_SET (reg_pending_uses);
4027 FREE_REG_SET (reg_pending_control_uses);
4030 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4032 estimate_dep_weak (rtx mem1, rtx mem2)
4037 /* MEMs are the same - don't speculate. */
4038 return MIN_DEP_WEAK;
4040 r1 = XEXP (mem1, 0);
4041 r2 = XEXP (mem2, 0);
4044 || (REG_P (r1) && REG_P (r2)
4045 && REGNO (r1) == REGNO (r2)))
4046 /* Again, MEMs are the same. */
4047 return MIN_DEP_WEAK;
4048 else if ((REG_P (r1) && !REG_P (r2))
4049 || (!REG_P (r1) && REG_P (r2)))
4050 /* Different addressing modes - reason to be more speculative,
4052 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4054 /* We can't say anything about the dependence. */
4055 return UNCERTAIN_DEP_WEAK;
4058 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4059 This function can handle same INSN and ELEM (INSN == ELEM).
4060 It is a convenience wrapper. */
4062 add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
4067 if (dep_type == REG_DEP_TRUE)
4069 else if (dep_type == REG_DEP_OUTPUT)
4071 else if (dep_type == REG_DEP_CONTROL)
4075 gcc_assert (dep_type == REG_DEP_ANTI);
4079 /* When add_dependence is called from inside sched-deps.c, we expect
4080 cur_insn to be non-null. */
4081 internal = cur_insn != NULL;
4083 gcc_assert (insn == cur_insn);
4087 note_dep (elem, ds);
4092 /* Return weakness of speculative type TYPE in the dep_status DS. */
4094 get_dep_weak_1 (ds_t ds, ds_t type)
4100 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4101 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4102 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4103 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4104 default: gcc_unreachable ();
4111 get_dep_weak (ds_t ds, ds_t type)
4113 dw_t dw = get_dep_weak_1 (ds, type);
4115 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4119 /* Return the dep_status, which has the same parameters as DS, except for
4120 speculative type TYPE, that will have weakness DW. */
4122 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4124 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4129 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4130 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4131 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4132 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4133 default: gcc_unreachable ();
4138 /* Return the join of two dep_statuses DS1 and DS2.
4139 If MAX_P is true then choose the greater probability,
4140 otherwise multiply probabilities.
4141 This function assumes that both DS1 and DS2 contain speculative bits. */
4143 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4147 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4149 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4151 t = FIRST_SPEC_TYPE;
4154 if ((ds1 & t) && !(ds2 & t))
4156 else if (!(ds1 & t) && (ds2 & t))
4158 else if ((ds1 & t) && (ds2 & t))
4160 dw_t dw1 = get_dep_weak (ds1, t);
4161 dw_t dw2 = get_dep_weak (ds2, t);
4166 dw = ((ds_t) dw1) * ((ds_t) dw2);
4168 if (dw < MIN_DEP_WEAK)
4179 ds = set_dep_weak (ds, t, (dw_t) dw);
4182 if (t == LAST_SPEC_TYPE)
4184 t <<= SPEC_TYPE_SHIFT;
4191 /* Return the join of two dep_statuses DS1 and DS2.
4192 This function assumes that both DS1 and DS2 contain speculative bits. */
4194 ds_merge (ds_t ds1, ds_t ds2)
4196 return ds_merge_1 (ds1, ds2, false);
4199 /* Return the join of two dep_statuses DS1 and DS2. */
4201 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4203 ds_t new_status = ds | ds2;
4205 if (new_status & SPECULATIVE)
4207 if ((ds && !(ds & SPECULATIVE))
4208 || (ds2 && !(ds2 & SPECULATIVE)))
4209 /* Then this dep can't be speculative. */
4210 new_status &= ~SPECULATIVE;
4213 /* Both are speculative. Merging probabilities. */
4218 dw = estimate_dep_weak (mem1, mem2);
4219 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4227 new_status = ds_merge (ds2, ds);
4234 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4237 ds_max_merge (ds_t ds1, ds_t ds2)
4239 if (ds1 == 0 && ds2 == 0)
4242 if (ds1 == 0 && ds2 != 0)
4245 if (ds1 != 0 && ds2 == 0)
4248 return ds_merge_1 (ds1, ds2, true);
4251 /* Return the probability of speculation success for the speculation
4259 dt = FIRST_SPEC_TYPE;
4264 res *= (ds_t) get_dep_weak (ds, dt);
4268 if (dt == LAST_SPEC_TYPE)
4270 dt <<= SPEC_TYPE_SHIFT;
4276 res /= MAX_DEP_WEAK;
4278 if (res < MIN_DEP_WEAK)
4281 gcc_assert (res <= MAX_DEP_WEAK);
4286 /* Return a dep status that contains all speculation types of DS. */
4288 ds_get_speculation_types (ds_t ds)
4290 if (ds & BEGIN_DATA)
4292 if (ds & BE_IN_DATA)
4294 if (ds & BEGIN_CONTROL)
4295 ds |= BEGIN_CONTROL;
4296 if (ds & BE_IN_CONTROL)
4297 ds |= BE_IN_CONTROL;
4299 return ds & SPECULATIVE;
4302 /* Return a dep status that contains maximal weakness for each speculation
4303 type present in DS. */
4305 ds_get_max_dep_weak (ds_t ds)
4307 if (ds & BEGIN_DATA)
4308 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4309 if (ds & BE_IN_DATA)
4310 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4311 if (ds & BEGIN_CONTROL)
4312 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4313 if (ds & BE_IN_CONTROL)
4314 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4319 /* Dump information about the dependence status S. */
4321 dump_ds (FILE *f, ds_t s)
4326 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4328 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4329 if (s & BEGIN_CONTROL)
4330 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4331 if (s & BE_IN_CONTROL)
4332 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4335 fprintf (f, "HARD_DEP; ");
4338 fprintf (f, "DEP_TRUE; ");
4340 fprintf (f, "DEP_OUTPUT; ");
4342 fprintf (f, "DEP_ANTI; ");
4343 if (s & DEP_CONTROL)
4344 fprintf (f, "DEP_CONTROL; ");
4352 dump_ds (stderr, s);
4353 fprintf (stderr, "\n");
4356 #ifdef ENABLE_CHECKING
4357 /* Verify that dependence type and status are consistent.
4358 If RELAXED_P is true, then skip dep_weakness checks. */
4360 check_dep (dep_t dep, bool relaxed_p)
4362 enum reg_note dt = DEP_TYPE (dep);
4363 ds_t ds = DEP_STATUS (dep);
4365 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4367 if (!(current_sched_info->flags & USE_DEPS_LIST))
4369 gcc_assert (ds == 0);
4373 /* Check that dependence type contains the same bits as the status. */
4374 if (dt == REG_DEP_TRUE)
4375 gcc_assert (ds & DEP_TRUE);
4376 else if (dt == REG_DEP_OUTPUT)
4377 gcc_assert ((ds & DEP_OUTPUT)
4378 && !(ds & DEP_TRUE));
4379 else if (dt == REG_DEP_ANTI)
4380 gcc_assert ((ds & DEP_ANTI)
4381 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4383 gcc_assert (dt == REG_DEP_CONTROL
4384 && (ds & DEP_CONTROL)
4385 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4387 /* HARD_DEP can not appear in dep_status of a link. */
4388 gcc_assert (!(ds & HARD_DEP));
4390 /* Check that dependence status is set correctly when speculation is not
4392 if (!sched_deps_info->generate_spec_deps)
4393 gcc_assert (!(ds & SPECULATIVE));
4394 else if (ds & SPECULATIVE)
4398 ds_t type = FIRST_SPEC_TYPE;
4400 /* Check that dependence weakness is in proper range. */
4404 get_dep_weak (ds, type);
4406 if (type == LAST_SPEC_TYPE)
4408 type <<= SPEC_TYPE_SHIFT;
4413 if (ds & BEGIN_SPEC)
4415 /* Only true dependence can be data speculative. */
4416 if (ds & BEGIN_DATA)
4417 gcc_assert (ds & DEP_TRUE);
4419 /* Control dependencies in the insn scheduler are represented by
4420 anti-dependencies, therefore only anti dependence can be
4421 control speculative. */
4422 if (ds & BEGIN_CONTROL)
4423 gcc_assert (ds & DEP_ANTI);
4427 /* Subsequent speculations should resolve true dependencies. */
4428 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4431 /* Check that true and anti dependencies can't have other speculative
4434 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4435 /* An output dependence can't be speculative at all. */
4436 gcc_assert (!(ds & DEP_OUTPUT));
4438 gcc_assert (ds & BEGIN_CONTROL);
4441 #endif /* ENABLE_CHECKING */
4443 #endif /* INSN_SCHEDULING */