1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
41 #include "sched-int.h"
45 #ifdef INSN_SCHEDULING
47 #ifdef ENABLE_CHECKING
53 /* Holds current parameters for the dependency analyzer. */
54 struct sched_deps_info_def *sched_deps_info;
56 /* The data is specific to the Haifa scheduler. */
57 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
59 /* Return the major type present in the DS. */
67 return REG_DEP_OUTPUT;
69 gcc_assert (ds & DEP_ANTI);
74 /* Return equivalent dep_status. */
76 dk_to_ds (enum reg_note dk)
87 gcc_assert (dk == REG_DEP_ANTI);
92 /* Functions to operate with dependence information container - dep_t. */
94 /* Init DEP with the arguments. */
96 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
100 DEP_TYPE (dep) = type;
101 DEP_STATUS (dep) = ds;
104 /* Init DEP with the arguments.
105 While most of the scheduler (including targets) only need the major type
106 of the dependency, it is convenient to hide full dep_status from them. */
108 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
112 if ((current_sched_info->flags & USE_DEPS_LIST))
113 ds = dk_to_ds (kind);
117 init_dep_1 (dep, pro, con, kind, ds);
120 /* Make a copy of FROM in TO. */
122 copy_dep (dep_t to, dep_t from)
124 memcpy (to, from, sizeof (*to));
127 static void dump_ds (FILE *, ds_t);
129 /* Define flags for dump_dep (). */
131 /* Dump producer of the dependence. */
132 #define DUMP_DEP_PRO (2)
134 /* Dump consumer of the dependence. */
135 #define DUMP_DEP_CON (4)
137 /* Dump type of the dependence. */
138 #define DUMP_DEP_TYPE (8)
140 /* Dump status of the dependence. */
141 #define DUMP_DEP_STATUS (16)
143 /* Dump all information about the dependence. */
144 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
148 FLAGS is a bit mask specifying what information about DEP needs
150 If FLAGS has the very first bit set, then dump all information about DEP
151 and propagate this bit into the callee dump functions. */
153 dump_dep (FILE *dump, dep_t dep, int flags)
156 flags |= DUMP_DEP_ALL;
160 if (flags & DUMP_DEP_PRO)
161 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
163 if (flags & DUMP_DEP_CON)
164 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
166 if (flags & DUMP_DEP_TYPE)
169 enum reg_note type = DEP_TYPE (dep);
190 fprintf (dump, "%c; ", t);
193 if (flags & DUMP_DEP_STATUS)
195 if (current_sched_info->flags & USE_DEPS_LIST)
196 dump_ds (dump, DEP_STATUS (dep));
202 /* Default flags for dump_dep (). */
203 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
205 /* Dump all fields of DEP to STDERR. */
207 sd_debug_dep (dep_t dep)
209 dump_dep (stderr, dep, 1);
210 fprintf (stderr, "\n");
213 /* Functions to operate with a single link from the dependencies lists -
216 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
219 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
221 dep_link_t next = *prev_nextp;
223 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
224 && DEP_LINK_NEXT (l) == NULL);
226 /* Init node being inserted. */
227 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
228 DEP_LINK_NEXT (l) = next;
233 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
235 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
242 /* Add dep_link LINK to deps_list L. */
244 add_to_deps_list (dep_link_t link, deps_list_t l)
246 attach_dep_link (link, &DEPS_LIST_FIRST (l));
248 ++DEPS_LIST_N_LINKS (l);
251 /* Detach dep_link L from the list. */
253 detach_dep_link (dep_link_t l)
255 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
256 dep_link_t next = DEP_LINK_NEXT (l);
261 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
263 DEP_LINK_PREV_NEXTP (l) = NULL;
264 DEP_LINK_NEXT (l) = NULL;
267 /* Remove link LINK from list LIST. */
269 remove_from_deps_list (dep_link_t link, deps_list_t list)
271 detach_dep_link (link);
273 --DEPS_LIST_N_LINKS (list);
276 /* Move link LINK from list FROM to list TO. */
278 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
280 remove_from_deps_list (link, from);
281 add_to_deps_list (link, to);
284 /* Return true of LINK is not attached to any list. */
286 dep_link_is_detached_p (dep_link_t link)
288 return DEP_LINK_PREV_NEXTP (link) == NULL;
291 /* Pool to hold all dependency nodes (dep_node_t). */
292 static alloc_pool dn_pool;
294 /* Number of dep_nodes out there. */
295 static int dn_pool_diff = 0;
297 /* Create a dep_node. */
299 create_dep_node (void)
301 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
302 dep_link_t back = DEP_NODE_BACK (n);
303 dep_link_t forw = DEP_NODE_FORW (n);
305 DEP_LINK_NODE (back) = n;
306 DEP_LINK_NEXT (back) = NULL;
307 DEP_LINK_PREV_NEXTP (back) = NULL;
309 DEP_LINK_NODE (forw) = n;
310 DEP_LINK_NEXT (forw) = NULL;
311 DEP_LINK_PREV_NEXTP (forw) = NULL;
318 /* Delete dep_node N. N must not be connected to any deps_list. */
320 delete_dep_node (dep_node_t n)
322 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
323 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
327 pool_free (dn_pool, n);
330 /* Pool to hold dependencies lists (deps_list_t). */
331 static alloc_pool dl_pool;
333 /* Number of deps_lists out there. */
334 static int dl_pool_diff = 0;
336 /* Functions to operate with dependences lists - deps_list_t. */
338 /* Return true if list L is empty. */
340 deps_list_empty_p (deps_list_t l)
342 return DEPS_LIST_N_LINKS (l) == 0;
345 /* Create a new deps_list. */
347 create_deps_list (void)
349 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
351 DEPS_LIST_FIRST (l) = NULL;
352 DEPS_LIST_N_LINKS (l) = 0;
358 /* Free deps_list L. */
360 free_deps_list (deps_list_t l)
362 gcc_assert (deps_list_empty_p (l));
366 pool_free (dl_pool, l);
369 /* Return true if there is no dep_nodes and deps_lists out there.
370 After the region is scheduled all the dependency nodes and lists
371 should [generally] be returned to pool. */
373 deps_pools_are_empty_p (void)
375 return dn_pool_diff == 0 && dl_pool_diff == 0;
378 /* Remove all elements from L. */
380 clear_deps_list (deps_list_t l)
384 dep_link_t link = DEPS_LIST_FIRST (l);
389 remove_from_deps_list (link, l);
394 static regset reg_pending_sets;
395 static regset reg_pending_clobbers;
396 static regset reg_pending_uses;
397 static enum reg_pending_barrier_mode reg_pending_barrier;
399 /* To speed up the test for duplicate dependency links we keep a
400 record of dependencies created by add_dependence when the average
401 number of instructions in a basic block is very large.
403 Studies have shown that there is typically around 5 instructions between
404 branches for typical C code. So we can make a guess that the average
405 basic block is approximately 5 instructions long; we will choose 100X
406 the average size as a very large basic block.
408 Each insn has associated bitmaps for its dependencies. Each bitmap
409 has enough entries to represent a dependency on any other insn in
410 the insn chain. All bitmap for true dependencies cache is
411 allocated then the rest two ones are also allocated. */
412 static bitmap_head *true_dependency_cache = NULL;
413 static bitmap_head *output_dependency_cache = NULL;
414 static bitmap_head *anti_dependency_cache = NULL;
415 static bitmap_head *spec_dependency_cache = NULL;
416 static int cache_size;
418 static int deps_may_trap_p (const_rtx);
419 static void add_dependence_list (rtx, rtx, int, enum reg_note);
420 static void add_dependence_list_and_free (struct deps *, rtx,
421 rtx *, int, enum reg_note);
422 static void delete_all_dependences (rtx);
423 static void fixup_sched_groups (rtx);
425 static void flush_pending_lists (struct deps *, rtx, int, int);
426 static void sched_analyze_1 (struct deps *, rtx, rtx);
427 static void sched_analyze_2 (struct deps *, rtx, rtx);
428 static void sched_analyze_insn (struct deps *, rtx, rtx);
430 static bool sched_has_condition_p (const_rtx);
431 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
433 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
435 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
437 #ifdef ENABLE_CHECKING
438 static void check_dep (dep_t, bool);
441 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
444 deps_may_trap_p (const_rtx mem)
446 const_rtx addr = XEXP (mem, 0);
448 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
450 const_rtx t = get_reg_known_value (REGNO (addr));
454 return rtx_addr_can_trap_p (addr);
458 /* Find the condition under which INSN is executed. If REV is not NULL,
459 it is set to TRUE when the returned comparison should be reversed
460 to get the actual condition. */
462 sched_get_condition_with_rev (const_rtx insn, bool *rev)
464 rtx pat = PATTERN (insn);
473 if (GET_CODE (pat) == COND_EXEC)
474 return COND_EXEC_TEST (pat);
476 if (!any_condjump_p (insn) || !onlyjump_p (insn))
479 src = SET_SRC (pc_set (insn));
481 if (XEXP (src, 2) == pc_rtx)
482 return XEXP (src, 0);
483 else if (XEXP (src, 1) == pc_rtx)
485 rtx cond = XEXP (src, 0);
486 enum rtx_code revcode = reversed_comparison_code (cond, insn);
488 if (revcode == UNKNOWN)
499 /* True when we can find a condition under which INSN is executed. */
501 sched_has_condition_p (const_rtx insn)
503 return !! sched_get_condition_with_rev (insn, NULL);
508 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
510 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
512 if (COMPARISON_P (cond1)
513 && COMPARISON_P (cond2)
514 && GET_CODE (cond1) ==
516 ? reversed_comparison_code (cond2, NULL)
518 && XEXP (cond1, 0) == XEXP (cond2, 0)
519 && XEXP (cond1, 1) == XEXP (cond2, 1))
524 /* Return true if insn1 and insn2 can never depend on one another because
525 the conditions under which they are executed are mutually exclusive. */
527 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
530 bool rev1 = false, rev2 = false;
532 /* df doesn't handle conditional lifetimes entirely correctly;
533 calls mess up the conditional lifetimes. */
534 if (!CALL_P (insn1) && !CALL_P (insn2))
536 cond1 = sched_get_condition_with_rev (insn1, &rev1);
537 cond2 = sched_get_condition_with_rev (insn2, &rev2);
539 && conditions_mutex_p (cond1, cond2, rev1, rev2)
540 /* Make sure first instruction doesn't affect condition of second
541 instruction if switched. */
542 && !modified_in_p (cond1, insn2)
543 /* Make sure second instruction doesn't affect condition of first
544 instruction if switched. */
545 && !modified_in_p (cond2, insn1))
552 /* Return true if INSN can potentially be speculated with type DS. */
554 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
556 if (HAS_INTERNAL_DEP (insn))
559 if (!NONJUMP_INSN_P (insn))
562 if (SCHED_GROUP_P (insn))
565 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
568 if (side_effects_p (PATTERN (insn)))
572 /* The following instructions, which depend on a speculatively scheduled
573 instruction, cannot be speculatively scheduled along. */
575 if (may_trap_p (PATTERN (insn)))
576 /* If instruction might trap, it cannot be speculatively scheduled.
577 For control speculation it's obvious why and for data speculation
578 it's because the insn might get wrong input if speculation
579 wasn't successful. */
582 if ((ds & BE_IN_DATA)
583 && sched_has_condition_p (insn))
584 /* If this is a predicated instruction, then it cannot be
585 speculatively scheduled. See PR35659. */
592 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
593 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
594 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
595 This function is used to switch sd_iterator to the next list.
596 !!! For internal use only. Might consider moving it to sched-int.h. */
598 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
599 deps_list_t *list_ptr, bool *resolved_p_ptr)
601 sd_list_types_def types = *types_ptr;
603 if (types & SD_LIST_HARD_BACK)
605 *list_ptr = INSN_HARD_BACK_DEPS (insn);
606 *resolved_p_ptr = false;
607 *types_ptr = types & ~SD_LIST_HARD_BACK;
609 else if (types & SD_LIST_SPEC_BACK)
611 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
612 *resolved_p_ptr = false;
613 *types_ptr = types & ~SD_LIST_SPEC_BACK;
615 else if (types & SD_LIST_FORW)
617 *list_ptr = INSN_FORW_DEPS (insn);
618 *resolved_p_ptr = false;
619 *types_ptr = types & ~SD_LIST_FORW;
621 else if (types & SD_LIST_RES_BACK)
623 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
624 *resolved_p_ptr = true;
625 *types_ptr = types & ~SD_LIST_RES_BACK;
627 else if (types & SD_LIST_RES_FORW)
629 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
630 *resolved_p_ptr = true;
631 *types_ptr = types & ~SD_LIST_RES_FORW;
636 *resolved_p_ptr = false;
637 *types_ptr = SD_LIST_NONE;
641 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
643 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
647 while (list_types != SD_LIST_NONE)
652 sd_next_list (insn, &list_types, &list, &resolved_p);
653 size += DEPS_LIST_N_LINKS (list);
659 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
661 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
663 return sd_lists_size (insn, list_types) == 0;
666 /* Initialize data for INSN. */
668 sd_init_insn (rtx insn)
670 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
671 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
672 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
673 INSN_FORW_DEPS (insn) = create_deps_list ();
674 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
676 /* ??? It would be nice to allocate dependency caches here. */
679 /* Free data for INSN. */
681 sd_finish_insn (rtx insn)
683 /* ??? It would be nice to deallocate dependency caches here. */
685 free_deps_list (INSN_HARD_BACK_DEPS (insn));
686 INSN_HARD_BACK_DEPS (insn) = NULL;
688 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
689 INSN_SPEC_BACK_DEPS (insn) = NULL;
691 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
692 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
694 free_deps_list (INSN_FORW_DEPS (insn));
695 INSN_FORW_DEPS (insn) = NULL;
697 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
698 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
701 /* Find a dependency between producer PRO and consumer CON.
702 Search through resolved dependency lists if RESOLVED_P is true.
703 If no such dependency is found return NULL,
704 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
705 with an iterator pointing to it. */
707 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
708 sd_iterator_def *sd_it_ptr)
710 sd_list_types_def pro_list_type;
711 sd_list_types_def con_list_type;
712 sd_iterator_def sd_it;
714 bool found_p = false;
718 pro_list_type = SD_LIST_RES_FORW;
719 con_list_type = SD_LIST_RES_BACK;
723 pro_list_type = SD_LIST_FORW;
724 con_list_type = SD_LIST_BACK;
727 /* Walk through either back list of INSN or forw list of ELEM
728 depending on which one is shorter. */
729 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
731 /* Find the dep_link with producer PRO in consumer's back_deps. */
732 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
733 if (DEP_PRO (dep) == pro)
741 /* Find the dep_link with consumer CON in producer's forw_deps. */
742 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
743 if (DEP_CON (dep) == con)
752 if (sd_it_ptr != NULL)
761 /* Find a dependency between producer PRO and consumer CON.
762 Use dependency [if available] to check if dependency is present at all.
763 Search through resolved dependency lists if RESOLVED_P is true.
764 If the dependency or NULL if none found. */
766 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
768 if (true_dependency_cache != NULL)
769 /* Avoiding the list walk below can cut compile times dramatically
772 int elem_luid = INSN_LUID (pro);
773 int insn_luid = INSN_LUID (con);
775 gcc_assert (output_dependency_cache != NULL
776 && anti_dependency_cache != NULL);
778 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
779 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
780 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
784 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
787 /* Add or update a dependence described by DEP.
788 MEM1 and MEM2, if non-null, correspond to memory locations in case of
791 The function returns a value indicating if an old entry has been changed
792 or a new entry has been added to insn's backward deps.
794 This function merely checks if producer and consumer is the same insn
795 and doesn't create a dep in this case. Actual manipulation of
796 dependence data structures is performed in add_or_update_dep_1. */
797 static enum DEPS_ADJUST_RESULT
798 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
800 rtx elem = DEP_PRO (dep);
801 rtx insn = DEP_CON (dep);
803 gcc_assert (INSN_P (insn) && INSN_P (elem));
805 /* Don't depend an insn on itself. */
808 if (sched_deps_info->generate_spec_deps)
809 /* INSN has an internal dependence, which we can't overcome. */
810 HAS_INTERNAL_DEP (insn) = 1;
815 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
818 /* Ask dependency caches what needs to be done for dependence DEP.
819 Return DEP_CREATED if new dependence should be created and there is no
820 need to try to find one searching the dependencies lists.
821 Return DEP_PRESENT if there already is a dependence described by DEP and
822 hence nothing is to be done.
823 Return DEP_CHANGED if there already is a dependence, but it should be
824 updated to incorporate additional information from DEP. */
825 static enum DEPS_ADJUST_RESULT
826 ask_dependency_caches (dep_t dep)
828 int elem_luid = INSN_LUID (DEP_PRO (dep));
829 int insn_luid = INSN_LUID (DEP_CON (dep));
831 gcc_assert (true_dependency_cache != NULL
832 && output_dependency_cache != NULL
833 && anti_dependency_cache != NULL);
835 if (!(current_sched_info->flags & USE_DEPS_LIST))
837 enum reg_note present_dep_type;
839 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
840 present_dep_type = REG_DEP_TRUE;
841 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
842 present_dep_type = REG_DEP_OUTPUT;
843 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
844 present_dep_type = REG_DEP_ANTI;
846 /* There is no existing dep so it should be created. */
849 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
850 /* DEP does not add anything to the existing dependence. */
855 ds_t present_dep_types = 0;
857 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
858 present_dep_types |= DEP_TRUE;
859 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
860 present_dep_types |= DEP_OUTPUT;
861 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
862 present_dep_types |= DEP_ANTI;
864 if (present_dep_types == 0)
865 /* There is no existing dep so it should be created. */
868 if (!(current_sched_info->flags & DO_SPECULATION)
869 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
871 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
872 == present_dep_types)
873 /* DEP does not add anything to the existing dependence. */
878 /* Only true dependencies can be data speculative and
879 only anti dependencies can be control speculative. */
880 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
881 == present_dep_types);
883 /* if (DEP is SPECULATIVE) then
884 ..we should update DEP_STATUS
886 ..we should reset existing dep to non-speculative. */
893 /* Set dependency caches according to DEP. */
895 set_dependency_caches (dep_t dep)
897 int elem_luid = INSN_LUID (DEP_PRO (dep));
898 int insn_luid = INSN_LUID (DEP_CON (dep));
900 if (!(current_sched_info->flags & USE_DEPS_LIST))
902 switch (DEP_TYPE (dep))
905 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
909 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
913 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
922 ds_t ds = DEP_STATUS (dep);
925 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
927 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
929 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
931 if (ds & SPECULATIVE)
933 gcc_assert (current_sched_info->flags & DO_SPECULATION);
934 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
939 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
940 caches accordingly. */
942 update_dependency_caches (dep_t dep, enum reg_note old_type)
944 int elem_luid = INSN_LUID (DEP_PRO (dep));
945 int insn_luid = INSN_LUID (DEP_CON (dep));
947 /* Clear corresponding cache entry because type of the link
948 may have changed. Keep them if we use_deps_list. */
949 if (!(current_sched_info->flags & USE_DEPS_LIST))
954 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
958 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
966 set_dependency_caches (dep);
969 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
971 change_spec_dep_to_hard (sd_iterator_def sd_it)
973 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
974 dep_link_t link = DEP_NODE_BACK (node);
975 dep_t dep = DEP_NODE_DEP (node);
976 rtx elem = DEP_PRO (dep);
977 rtx insn = DEP_CON (dep);
979 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
981 DEP_STATUS (dep) &= ~SPECULATIVE;
983 if (true_dependency_cache != NULL)
984 /* Clear the cache entry. */
985 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
989 /* Update DEP to incorporate information from NEW_DEP.
990 SD_IT points to DEP in case it should be moved to another list.
991 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
992 data-speculative dependence should be updated. */
993 static enum DEPS_ADJUST_RESULT
994 update_dep (dep_t dep, dep_t new_dep,
995 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
996 rtx mem1 ATTRIBUTE_UNUSED,
997 rtx mem2 ATTRIBUTE_UNUSED)
999 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1000 enum reg_note old_type = DEP_TYPE (dep);
1002 /* If this is a more restrictive type of dependence than the
1003 existing one, then change the existing dependence to this
1005 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1007 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1011 if (current_sched_info->flags & USE_DEPS_LIST)
1012 /* Update DEP_STATUS. */
1014 ds_t dep_status = DEP_STATUS (dep);
1015 ds_t ds = DEP_STATUS (new_dep);
1016 ds_t new_status = ds | dep_status;
1018 if (new_status & SPECULATIVE)
1019 /* Either existing dep or a dep we're adding or both are
1022 if (!(ds & SPECULATIVE)
1023 || !(dep_status & SPECULATIVE))
1024 /* The new dep can't be speculative. */
1026 new_status &= ~SPECULATIVE;
1028 if (dep_status & SPECULATIVE)
1029 /* The old dep was speculative, but now it
1031 change_spec_dep_to_hard (sd_it);
1035 /* Both are speculative. Merge probabilities. */
1040 dw = estimate_dep_weak (mem1, mem2);
1041 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1044 new_status = ds_merge (dep_status, ds);
1050 if (dep_status != ds)
1052 DEP_STATUS (dep) = ds;
1057 if (true_dependency_cache != NULL
1058 && res == DEP_CHANGED)
1059 update_dependency_caches (dep, old_type);
1064 /* Add or update a dependence described by DEP.
1065 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1068 The function returns a value indicating if an old entry has been changed
1069 or a new entry has been added to insn's backward deps or nothing has
1070 been updated at all. */
1071 static enum DEPS_ADJUST_RESULT
1072 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1073 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1075 bool maybe_present_p = true;
1076 bool present_p = false;
1078 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1079 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1081 #ifdef ENABLE_CHECKING
1082 check_dep (new_dep, mem1 != NULL);
1085 if (true_dependency_cache != NULL)
1087 switch (ask_dependency_caches (new_dep))
1093 maybe_present_p = true;
1098 maybe_present_p = false;
1108 /* Check that we don't already have this dependence. */
1109 if (maybe_present_p)
1112 sd_iterator_def sd_it;
1114 gcc_assert (true_dependency_cache == NULL || present_p);
1116 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1118 resolved_p, &sd_it);
1120 if (present_dep != NULL)
1121 /* We found an existing dependency between ELEM and INSN. */
1122 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1124 /* We didn't find a dep, it shouldn't present in the cache. */
1125 gcc_assert (!present_p);
1128 /* Might want to check one level of transitivity to save conses.
1129 This check should be done in maybe_add_or_update_dep_1.
1130 Since we made it to add_or_update_dep_1, we must create
1131 (or update) a link. */
1133 if (mem1 != NULL_RTX)
1135 gcc_assert (sched_deps_info->generate_spec_deps);
1136 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1137 estimate_dep_weak (mem1, mem2));
1140 sd_add_dep (new_dep, resolved_p);
1145 /* Initialize BACK_LIST_PTR with consumer's backward list and
1146 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1147 initialize with lists that hold resolved deps. */
1149 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1150 deps_list_t *back_list_ptr,
1151 deps_list_t *forw_list_ptr)
1153 rtx con = DEP_CON (dep);
1157 if ((current_sched_info->flags & DO_SPECULATION)
1158 && (DEP_STATUS (dep) & SPECULATIVE))
1159 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1161 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1163 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1167 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1168 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1172 /* Add dependence described by DEP.
1173 If RESOLVED_P is true treat the dependence as a resolved one. */
1175 sd_add_dep (dep_t dep, bool resolved_p)
1177 dep_node_t n = create_dep_node ();
1178 deps_list_t con_back_deps;
1179 deps_list_t pro_forw_deps;
1180 rtx elem = DEP_PRO (dep);
1181 rtx insn = DEP_CON (dep);
1183 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1185 if ((current_sched_info->flags & DO_SPECULATION)
1186 && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1187 DEP_STATUS (dep) &= ~SPECULATIVE;
1189 copy_dep (DEP_NODE_DEP (n), dep);
1191 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1193 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1195 #ifdef ENABLE_CHECKING
1196 check_dep (dep, false);
1199 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1201 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1202 in the bitmap caches of dependency information. */
1203 if (true_dependency_cache != NULL)
1204 set_dependency_caches (dep);
1207 /* Add or update backward dependence between INSN and ELEM
1208 with given type DEP_TYPE and dep_status DS.
1209 This function is a convenience wrapper. */
1210 enum DEPS_ADJUST_RESULT
1211 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1213 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1216 /* Resolved dependence pointed to by SD_IT.
1217 SD_IT will advance to the next element. */
1219 sd_resolve_dep (sd_iterator_def sd_it)
1221 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1222 dep_t dep = DEP_NODE_DEP (node);
1223 rtx pro = DEP_PRO (dep);
1224 rtx con = DEP_CON (dep);
1226 if ((current_sched_info->flags & DO_SPECULATION)
1227 && (DEP_STATUS (dep) & SPECULATIVE))
1228 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1229 INSN_RESOLVED_BACK_DEPS (con));
1231 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1232 INSN_RESOLVED_BACK_DEPS (con));
1234 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1235 INSN_RESOLVED_FORW_DEPS (pro));
1238 /* Make TO depend on all the FROM's producers.
1239 If RESOLVED_P is true add dependencies to the resolved lists. */
1241 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1243 sd_list_types_def list_type;
1244 sd_iterator_def sd_it;
1247 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1249 FOR_EACH_DEP (from, list_type, sd_it, dep)
1251 dep_def _new_dep, *new_dep = &_new_dep;
1253 copy_dep (new_dep, dep);
1254 DEP_CON (new_dep) = to;
1255 sd_add_dep (new_dep, resolved_p);
1259 /* Remove a dependency referred to by SD_IT.
1260 SD_IT will point to the next dependence after removal. */
1262 sd_delete_dep (sd_iterator_def sd_it)
1264 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1265 dep_t dep = DEP_NODE_DEP (n);
1266 rtx pro = DEP_PRO (dep);
1267 rtx con = DEP_CON (dep);
1268 deps_list_t con_back_deps;
1269 deps_list_t pro_forw_deps;
1271 if (true_dependency_cache != NULL)
1273 int elem_luid = INSN_LUID (pro);
1274 int insn_luid = INSN_LUID (con);
1276 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1277 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1278 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1280 if (current_sched_info->flags & DO_SPECULATION)
1281 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1284 get_back_and_forw_lists (dep, sd_it.resolved_p,
1285 &con_back_deps, &pro_forw_deps);
1287 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1288 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1290 delete_dep_node (n);
1293 /* Dump size of the lists. */
1294 #define DUMP_LISTS_SIZE (2)
1296 /* Dump dependencies of the lists. */
1297 #define DUMP_LISTS_DEPS (4)
1299 /* Dump all information about the lists. */
1300 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1302 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1303 FLAGS is a bit mask specifying what information about the lists needs
1305 If FLAGS has the very first bit set, then dump all information about
1306 the lists and propagate this bit into the callee dump functions. */
1308 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1310 sd_iterator_def sd_it;
1317 flags |= DUMP_LISTS_ALL;
1319 fprintf (dump, "[");
1321 if (flags & DUMP_LISTS_SIZE)
1322 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1324 if (flags & DUMP_LISTS_DEPS)
1326 FOR_EACH_DEP (insn, types, sd_it, dep)
1328 dump_dep (dump, dep, dump_dep_flags | all);
1329 fprintf (dump, " ");
1334 /* Dump all information about deps_lists of INSN specified by TYPES
1337 sd_debug_lists (rtx insn, sd_list_types_def types)
1339 dump_lists (stderr, insn, types, 1);
1340 fprintf (stderr, "\n");
1343 /* A convenience wrapper to operate on an entire list. */
1346 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1348 for (; list; list = XEXP (list, 1))
1350 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1351 add_dependence (insn, XEXP (list, 0), dep_type);
1355 /* Similar, but free *LISTP at the same time, when the context
1359 add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp,
1360 int uncond, enum reg_note dep_type)
1366 add_dependence_list (insn, *listp, uncond, dep_type);
1370 for (list = *listp, *listp = NULL; list ; list = next)
1372 next = XEXP (list, 1);
1373 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1374 add_dependence (insn, XEXP (list, 0), dep_type);
1375 free_INSN_LIST_node (list);
1379 /* Remove all occurences of INSN from LIST. Return the number of
1380 occurences removed. */
1383 remove_from_dependence_list (rtx insn, rtx* listp)
1389 if (XEXP (*listp, 0) == insn)
1391 remove_free_INSN_LIST_node (listp);
1396 listp = &XEXP (*listp, 1);
1402 /* Same as above, but process two lists at once. */
1404 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1410 if (XEXP (*listp, 0) == insn)
1412 remove_free_INSN_LIST_node (listp);
1413 remove_free_EXPR_LIST_node (exprp);
1418 listp = &XEXP (*listp, 1);
1419 exprp = &XEXP (*exprp, 1);
1425 /* Clear all dependencies for an insn. */
1427 delete_all_dependences (rtx insn)
1429 sd_iterator_def sd_it;
1432 /* The below cycle can be optimized to clear the caches and back_deps
1433 in one call but that would provoke duplication of code from
1436 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1437 sd_iterator_cond (&sd_it, &dep);)
1438 sd_delete_dep (sd_it);
1441 /* All insns in a scheduling group except the first should only have
1442 dependencies on the previous insn in the group. So we find the
1443 first instruction in the scheduling group by walking the dependence
1444 chains backwards. Then we add the dependencies for the group to
1445 the previous nonnote insn. */
1448 fixup_sched_groups (rtx insn)
1450 sd_iterator_def sd_it;
1454 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1457 rtx pro = DEP_PRO (dep);
1461 i = prev_nonnote_insn (i);
1465 } while (SCHED_GROUP_P (i));
1467 if (! sched_insns_conditions_mutex_p (i, pro))
1468 add_dependence (i, pro, DEP_TYPE (dep));
1472 delete_all_dependences (insn);
1474 prev_nonnote = prev_nonnote_insn (insn);
1475 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1476 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1477 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1480 /* Process an insn's memory dependencies. There are four kinds of
1483 (0) read dependence: read follows read
1484 (1) true dependence: read follows write
1485 (2) output dependence: write follows write
1486 (3) anti dependence: write follows read
1488 We are careful to build only dependencies which actually exist, and
1489 use transitivity to avoid building too many links. */
1491 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1492 The MEM is a memory reference contained within INSN, which we are saving
1493 so that we can do memory aliasing on it. */
1496 add_insn_mem_dependence (struct deps *deps, bool read_p,
1503 gcc_assert (!deps->readonly);
1506 insn_list = &deps->pending_read_insns;
1507 mem_list = &deps->pending_read_mems;
1508 deps->pending_read_list_length++;
1512 insn_list = &deps->pending_write_insns;
1513 mem_list = &deps->pending_write_mems;
1514 deps->pending_write_list_length++;
1517 link = alloc_INSN_LIST (insn, *insn_list);
1520 if (sched_deps_info->use_cselib)
1522 mem = shallow_copy_rtx (mem);
1523 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1525 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1529 /* Make a dependency between every memory reference on the pending lists
1530 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1531 dependencies for a read operation, similarly with FOR_WRITE. */
1534 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
1539 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1541 if (!deps->readonly)
1543 free_EXPR_LIST_list (&deps->pending_read_mems);
1544 deps->pending_read_list_length = 0;
1548 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1549 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1551 add_dependence_list_and_free (deps, insn,
1552 &deps->last_pending_memory_flush, 1,
1553 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1554 if (!deps->readonly)
1556 free_EXPR_LIST_list (&deps->pending_write_mems);
1557 deps->pending_write_list_length = 0;
1559 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1560 deps->pending_flush_length = 1;
1564 /* Instruction which dependencies we are analyzing. */
1565 static rtx cur_insn = NULL_RTX;
1567 /* Implement hooks for haifa scheduler. */
1570 haifa_start_insn (rtx insn)
1572 gcc_assert (insn && !cur_insn);
1578 haifa_finish_insn (void)
1584 haifa_note_reg_set (int regno)
1586 SET_REGNO_REG_SET (reg_pending_sets, regno);
1590 haifa_note_reg_clobber (int regno)
1592 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1596 haifa_note_reg_use (int regno)
1598 SET_REGNO_REG_SET (reg_pending_uses, regno);
1602 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1604 if (!(ds & SPECULATIVE))
1607 pending_mem = NULL_RTX;
1610 gcc_assert (ds & BEGIN_DATA);
1613 dep_def _dep, *dep = &_dep;
1615 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1616 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1617 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1623 haifa_note_dep (rtx elem, ds_t ds)
1628 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1629 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1633 note_reg_use (int r)
1635 if (sched_deps_info->note_reg_use)
1636 sched_deps_info->note_reg_use (r);
1640 note_reg_set (int r)
1642 if (sched_deps_info->note_reg_set)
1643 sched_deps_info->note_reg_set (r);
1647 note_reg_clobber (int r)
1649 if (sched_deps_info->note_reg_clobber)
1650 sched_deps_info->note_reg_clobber (r);
1654 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1656 if (sched_deps_info->note_mem_dep)
1657 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1661 note_dep (rtx e, ds_t ds)
1663 if (sched_deps_info->note_dep)
1664 sched_deps_info->note_dep (e, ds);
1667 /* Return corresponding to DS reg_note. */
1672 return REG_DEP_TRUE;
1673 else if (ds & DEP_OUTPUT)
1674 return REG_DEP_OUTPUT;
1677 gcc_assert (ds & DEP_ANTI);
1678 return REG_DEP_ANTI;
1683 /* Internal variable for sched_analyze_[12] () functions.
1684 If it is nonzero, this means that sched_analyze_[12] looks
1685 at the most toplevel SET. */
1686 static bool can_start_lhs_rhs_p;
1688 /* Extend reg info for the deps context DEPS given that
1689 we have just generated a register numbered REGNO. */
1691 extend_deps_reg_info (struct deps *deps, int regno)
1693 int max_regno = regno + 1;
1695 gcc_assert (!reload_completed);
1697 /* In a readonly context, it would not hurt to extend info,
1698 but it should not be needed. */
1699 if (reload_completed && deps->readonly)
1701 deps->max_reg = max_regno;
1705 if (max_regno > deps->max_reg)
1707 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
1709 memset (&deps->reg_last[deps->max_reg],
1710 0, (max_regno - deps->max_reg)
1711 * sizeof (struct deps_reg));
1712 deps->max_reg = max_regno;
1716 /* Extends REG_INFO_P if needed. */
1718 maybe_extend_reg_info_p (void)
1720 /* Extend REG_INFO_P, if needed. */
1721 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
1723 size_t new_reg_info_p_size = max_regno + 128;
1725 gcc_assert (!reload_completed && sel_sched_p ());
1727 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
1728 new_reg_info_p_size,
1730 sizeof (*reg_info_p));
1731 reg_info_p_size = new_reg_info_p_size;
1735 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
1736 The type of the reference is specified by REF and can be SET,
1737 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
1740 sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
1741 enum rtx_code ref, rtx insn)
1743 /* We could emit new pseudos in renaming. Extend the reg structures. */
1744 if (!reload_completed && sel_sched_p ()
1745 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
1746 extend_deps_reg_info (deps, regno);
1748 maybe_extend_reg_info_p ();
1750 /* A hard reg in a wide mode may really be multiple registers.
1751 If so, mark all of them just like the first. */
1752 if (regno < FIRST_PSEUDO_REGISTER)
1754 int i = hard_regno_nregs[regno][mode];
1758 note_reg_set (regno + i);
1760 else if (ref == USE)
1763 note_reg_use (regno + i);
1768 note_reg_clobber (regno + i);
1772 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
1773 it does not reload. Ignore these as they have served their
1775 else if (regno >= deps->max_reg)
1777 enum rtx_code code = GET_CODE (PATTERN (insn));
1778 gcc_assert (code == USE || code == CLOBBER);
1784 note_reg_set (regno);
1785 else if (ref == USE)
1786 note_reg_use (regno);
1788 note_reg_clobber (regno);
1790 /* Pseudos that are REG_EQUIV to something may be replaced
1791 by that during reloading. We need only add dependencies for
1792 the address in the REG_EQUIV note. */
1793 if (!reload_completed && get_reg_known_equiv_p (regno))
1795 rtx t = get_reg_known_value (regno);
1797 sched_analyze_2 (deps, XEXP (t, 0), insn);
1800 /* Don't let it cross a call after scheduling if it doesn't
1801 already cross one. */
1802 if (REG_N_CALLS_CROSSED (regno) == 0)
1806 deps->sched_before_next_call
1807 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
1809 add_dependence_list (insn, deps->last_function_call, 1,
1815 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
1816 rtx, X, creating all dependencies generated by the write to the
1817 destination of X, and reads of everything mentioned. */
1820 sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
1822 rtx dest = XEXP (x, 0);
1823 enum rtx_code code = GET_CODE (x);
1824 bool cslr_p = can_start_lhs_rhs_p;
1826 can_start_lhs_rhs_p = false;
1832 if (cslr_p && sched_deps_info->start_lhs)
1833 sched_deps_info->start_lhs (dest);
1835 if (GET_CODE (dest) == PARALLEL)
1839 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1840 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1841 sched_analyze_1 (deps,
1842 gen_rtx_CLOBBER (VOIDmode,
1843 XEXP (XVECEXP (dest, 0, i), 0)),
1846 if (cslr_p && sched_deps_info->finish_lhs)
1847 sched_deps_info->finish_lhs ();
1851 can_start_lhs_rhs_p = cslr_p;
1853 sched_analyze_2 (deps, SET_SRC (x), insn);
1855 can_start_lhs_rhs_p = false;
1861 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1862 || GET_CODE (dest) == ZERO_EXTRACT)
1864 if (GET_CODE (dest) == STRICT_LOW_PART
1865 || GET_CODE (dest) == ZERO_EXTRACT
1866 || df_read_modify_subreg_p (dest))
1868 /* These both read and modify the result. We must handle
1869 them as writes to get proper dependencies for following
1870 instructions. We must handle them as reads to get proper
1871 dependencies from this to previous instructions.
1872 Thus we need to call sched_analyze_2. */
1874 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1876 if (GET_CODE (dest) == ZERO_EXTRACT)
1878 /* The second and third arguments are values read by this insn. */
1879 sched_analyze_2 (deps, XEXP (dest, 1), insn);
1880 sched_analyze_2 (deps, XEXP (dest, 2), insn);
1882 dest = XEXP (dest, 0);
1887 int regno = REGNO (dest);
1888 enum machine_mode mode = GET_MODE (dest);
1890 sched_analyze_reg (deps, regno, mode, code, insn);
1893 /* Treat all writes to a stack register as modifying the TOS. */
1894 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1896 /* Avoid analyzing the same register twice. */
1897 if (regno != FIRST_STACK_REG)
1898 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
1899 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
1903 else if (MEM_P (dest))
1905 /* Writing memory. */
1908 if (sched_deps_info->use_cselib)
1910 t = shallow_copy_rtx (dest);
1911 cselib_lookup (XEXP (t, 0), Pmode, 1);
1912 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1916 /* Pending lists can't get larger with a readonly context. */
1918 && ((deps->pending_read_list_length + deps->pending_write_list_length)
1919 > MAX_PENDING_LIST_LENGTH))
1921 /* Flush all pending reads and writes to prevent the pending lists
1922 from getting any larger. Insn scheduling runs too slowly when
1923 these lists get long. When compiling GCC with itself,
1924 this flush occurs 8 times for sparc, and 10 times for m88k using
1925 the default value of 32. */
1926 flush_pending_lists (deps, insn, false, true);
1930 rtx pending, pending_mem;
1932 pending = deps->pending_read_insns;
1933 pending_mem = deps->pending_read_mems;
1936 if (anti_dependence (XEXP (pending_mem, 0), t)
1937 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1938 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
1941 pending = XEXP (pending, 1);
1942 pending_mem = XEXP (pending_mem, 1);
1945 pending = deps->pending_write_insns;
1946 pending_mem = deps->pending_write_mems;
1949 if (output_dependence (XEXP (pending_mem, 0), t)
1950 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
1951 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
1954 pending = XEXP (pending, 1);
1955 pending_mem = XEXP (pending_mem, 1);
1958 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1961 if (!deps->readonly)
1962 add_insn_mem_dependence (deps, false, insn, dest);
1964 sched_analyze_2 (deps, XEXP (dest, 0), insn);
1967 if (cslr_p && sched_deps_info->finish_lhs)
1968 sched_deps_info->finish_lhs ();
1970 /* Analyze reads. */
1971 if (GET_CODE (x) == SET)
1973 can_start_lhs_rhs_p = cslr_p;
1975 sched_analyze_2 (deps, SET_SRC (x), insn);
1977 can_start_lhs_rhs_p = false;
1981 /* Analyze the uses of memory and registers in rtx X in INSN. */
1983 sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
1989 bool cslr_p = can_start_lhs_rhs_p;
1991 can_start_lhs_rhs_p = false;
1997 if (cslr_p && sched_deps_info->start_rhs)
1998 sched_deps_info->start_rhs (x);
2000 code = GET_CODE (x);
2011 /* Ignore constants. */
2012 if (cslr_p && sched_deps_info->finish_rhs)
2013 sched_deps_info->finish_rhs ();
2019 /* User of CC0 depends on immediately preceding insn. */
2020 SCHED_GROUP_P (insn) = 1;
2021 /* Don't move CC0 setter to another block (it can set up the
2022 same flag for previous CC0 users which is safe). */
2023 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2025 if (cslr_p && sched_deps_info->finish_rhs)
2026 sched_deps_info->finish_rhs ();
2033 int regno = REGNO (x);
2034 enum machine_mode mode = GET_MODE (x);
2036 sched_analyze_reg (deps, regno, mode, USE, insn);
2039 /* Treat all reads of a stack register as modifying the TOS. */
2040 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2042 /* Avoid analyzing the same register twice. */
2043 if (regno != FIRST_STACK_REG)
2044 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2045 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2049 if (cslr_p && sched_deps_info->finish_rhs)
2050 sched_deps_info->finish_rhs ();
2057 /* Reading memory. */
2059 rtx pending, pending_mem;
2062 if (sched_deps_info->use_cselib)
2064 t = shallow_copy_rtx (t);
2065 cselib_lookup (XEXP (t, 0), Pmode, 1);
2066 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2069 pending = deps->pending_read_insns;
2070 pending_mem = deps->pending_read_mems;
2073 if (read_dependence (XEXP (pending_mem, 0), t)
2074 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2075 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2078 pending = XEXP (pending, 1);
2079 pending_mem = XEXP (pending_mem, 1);
2082 pending = deps->pending_write_insns;
2083 pending_mem = deps->pending_write_mems;
2086 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2088 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2089 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2090 sched_deps_info->generate_spec_deps
2091 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2093 pending = XEXP (pending, 1);
2094 pending_mem = XEXP (pending_mem, 1);
2097 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2099 if (! JUMP_P (XEXP (u, 0)))
2100 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2101 else if (deps_may_trap_p (x))
2103 if ((sched_deps_info->generate_spec_deps)
2104 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2106 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2109 note_dep (XEXP (u, 0), ds);
2112 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2116 /* Always add these dependencies to pending_reads, since
2117 this insn may be followed by a write. */
2118 if (!deps->readonly)
2119 add_insn_mem_dependence (deps, true, insn, x);
2121 /* Take advantage of tail recursion here. */
2122 sched_analyze_2 (deps, XEXP (x, 0), insn);
2124 if (cslr_p && sched_deps_info->finish_rhs)
2125 sched_deps_info->finish_rhs ();
2130 /* Force pending stores to memory in case a trap handler needs them. */
2132 flush_pending_lists (deps, insn, true, false);
2137 case UNSPEC_VOLATILE:
2139 /* Traditional and volatile asm instructions must be considered to use
2140 and clobber all hard registers, all pseudo-registers and all of
2141 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2143 Consider for instance a volatile asm that changes the fpu rounding
2144 mode. An insn should not be moved across this even if it only uses
2145 pseudo-regs because it might give an incorrectly rounded result. */
2146 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2147 reg_pending_barrier = TRUE_BARRIER;
2149 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2150 We can not just fall through here since then we would be confused
2151 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2152 traditional asms unlike their normal usage. */
2154 if (code == ASM_OPERANDS)
2156 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2157 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2159 if (cslr_p && sched_deps_info->finish_rhs)
2160 sched_deps_info->finish_rhs ();
2171 /* These both read and modify the result. We must handle them as writes
2172 to get proper dependencies for following instructions. We must handle
2173 them as reads to get proper dependencies from this to previous
2174 instructions. Thus we need to pass them to both sched_analyze_1
2175 and sched_analyze_2. We must call sched_analyze_2 first in order
2176 to get the proper antecedent for the read. */
2177 sched_analyze_2 (deps, XEXP (x, 0), insn);
2178 sched_analyze_1 (deps, x, insn);
2180 if (cslr_p && sched_deps_info->finish_rhs)
2181 sched_deps_info->finish_rhs ();
2187 /* op0 = op0 + op1 */
2188 sched_analyze_2 (deps, XEXP (x, 0), insn);
2189 sched_analyze_2 (deps, XEXP (x, 1), insn);
2190 sched_analyze_1 (deps, x, insn);
2192 if (cslr_p && sched_deps_info->finish_rhs)
2193 sched_deps_info->finish_rhs ();
2201 /* Other cases: walk the insn. */
2202 fmt = GET_RTX_FORMAT (code);
2203 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2206 sched_analyze_2 (deps, XEXP (x, i), insn);
2207 else if (fmt[i] == 'E')
2208 for (j = 0; j < XVECLEN (x, i); j++)
2209 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2212 if (cslr_p && sched_deps_info->finish_rhs)
2213 sched_deps_info->finish_rhs ();
2216 /* Analyze an INSN with pattern X to find all dependencies. */
2218 sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
2220 RTX_CODE code = GET_CODE (x);
2223 reg_set_iterator rsi;
2225 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2228 if (code == COND_EXEC)
2230 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2232 /* ??? Should be recording conditions so we reduce the number of
2233 false dependencies. */
2234 x = COND_EXEC_CODE (x);
2235 code = GET_CODE (x);
2237 if (code == SET || code == CLOBBER)
2239 sched_analyze_1 (deps, x, insn);
2241 /* Bare clobber insns are used for letting life analysis, reg-stack
2242 and others know that a value is dead. Depend on the last call
2243 instruction so that reg-stack won't get confused. */
2244 if (code == CLOBBER)
2245 add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
2247 else if (code == PARALLEL)
2249 for (i = XVECLEN (x, 0); i--;)
2251 rtx sub = XVECEXP (x, 0, i);
2252 code = GET_CODE (sub);
2254 if (code == COND_EXEC)
2256 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2257 sub = COND_EXEC_CODE (sub);
2258 code = GET_CODE (sub);
2260 if (code == SET || code == CLOBBER)
2261 sched_analyze_1 (deps, sub, insn);
2263 sched_analyze_2 (deps, sub, insn);
2267 sched_analyze_2 (deps, x, insn);
2269 /* Mark registers CLOBBERED or used by called function. */
2272 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2274 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2275 sched_analyze_1 (deps, XEXP (link, 0), insn);
2277 sched_analyze_2 (deps, XEXP (link, 0), insn);
2279 if (find_reg_note (insn, REG_SETJMP, NULL))
2280 reg_pending_barrier = MOVE_BARRIER;
2286 next = next_nonnote_insn (insn);
2287 if (next && BARRIER_P (next))
2288 reg_pending_barrier = MOVE_BARRIER;
2291 rtx pending, pending_mem;
2293 if (sched_deps_info->compute_jump_reg_dependencies)
2295 regset_head tmp_uses, tmp_sets;
2296 INIT_REG_SET (&tmp_uses);
2297 INIT_REG_SET (&tmp_sets);
2299 (*sched_deps_info->compute_jump_reg_dependencies)
2300 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2301 /* Make latency of jump equal to 0 by using anti-dependence. */
2302 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2304 struct deps_reg *reg_last = &deps->reg_last[i];
2305 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2306 add_dependence_list (insn, reg_last->clobbers, 0,
2309 if (!deps->readonly)
2311 reg_last->uses_length++;
2312 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2315 IOR_REG_SET (reg_pending_sets, &tmp_sets);
2317 CLEAR_REG_SET (&tmp_uses);
2318 CLEAR_REG_SET (&tmp_sets);
2321 /* All memory writes and volatile reads must happen before the
2322 jump. Non-volatile reads must happen before the jump iff
2323 the result is needed by the above register used mask. */
2325 pending = deps->pending_write_insns;
2326 pending_mem = deps->pending_write_mems;
2329 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2330 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2331 pending = XEXP (pending, 1);
2332 pending_mem = XEXP (pending_mem, 1);
2335 pending = deps->pending_read_insns;
2336 pending_mem = deps->pending_read_mems;
2339 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2340 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2341 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2342 pending = XEXP (pending, 1);
2343 pending_mem = XEXP (pending_mem, 1);
2346 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2351 /* If this instruction can throw an exception, then moving it changes
2352 where block boundaries fall. This is mighty confusing elsewhere.
2353 Therefore, prevent such an instruction from being moved. Same for
2354 non-jump instructions that define block boundaries.
2355 ??? Unclear whether this is still necessary in EBB mode. If not,
2356 add_branch_dependences should be adjusted for RGN mode instead. */
2357 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2358 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2359 reg_pending_barrier = MOVE_BARRIER;
2361 /* If the current insn is conditional, we can't free any
2363 if (sched_has_condition_p (insn))
2365 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2367 struct deps_reg *reg_last = &deps->reg_last[i];
2368 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2369 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2371 if (!deps->readonly)
2373 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2374 reg_last->uses_length++;
2377 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2379 struct deps_reg *reg_last = &deps->reg_last[i];
2380 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2381 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2383 if (!deps->readonly)
2385 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
2386 reg_last->clobbers_length++;
2389 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2391 struct deps_reg *reg_last = &deps->reg_last[i];
2392 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2393 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2394 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2396 if (!deps->readonly)
2398 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2399 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2405 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2407 struct deps_reg *reg_last = &deps->reg_last[i];
2408 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2409 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2411 if (!deps->readonly)
2413 reg_last->uses_length++;
2414 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2417 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2419 struct deps_reg *reg_last = &deps->reg_last[i];
2420 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2421 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2423 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
2425 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
2427 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
2430 if (!deps->readonly)
2432 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2433 reg_last->clobbers_length = 0;
2434 reg_last->uses_length = 0;
2439 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2440 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2443 if (!deps->readonly)
2445 reg_last->clobbers_length++;
2446 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
2449 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2451 struct deps_reg *reg_last = &deps->reg_last[i];
2452 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
2454 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
2456 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
2459 if (!deps->readonly)
2461 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2462 reg_last->uses_length = 0;
2463 reg_last->clobbers_length = 0;
2464 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2469 if (!deps->readonly)
2471 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2472 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2473 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2475 /* Set up the pending barrier found. */
2476 deps->last_reg_pending_barrier = reg_pending_barrier;
2479 CLEAR_REG_SET (reg_pending_uses);
2480 CLEAR_REG_SET (reg_pending_clobbers);
2481 CLEAR_REG_SET (reg_pending_sets);
2483 /* Add dependencies if a scheduling barrier was found. */
2484 if (reg_pending_barrier)
2486 /* In the case of barrier the most added dependencies are not
2487 real, so we use anti-dependence here. */
2488 if (sched_has_condition_p (insn))
2490 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2492 struct deps_reg *reg_last = &deps->reg_last[i];
2493 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2495 (insn, reg_last->sets, 0,
2496 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2498 (insn, reg_last->clobbers, 0,
2499 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2504 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2506 struct deps_reg *reg_last = &deps->reg_last[i];
2507 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
2509 add_dependence_list_and_free
2510 (deps, insn, ®_last->sets, 0,
2511 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2512 add_dependence_list_and_free
2513 (deps, insn, ®_last->clobbers, 0,
2514 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
2516 if (!deps->readonly)
2518 reg_last->uses_length = 0;
2519 reg_last->clobbers_length = 0;
2524 if (!deps->readonly)
2525 for (i = 0; i < (unsigned)deps->max_reg; i++)
2527 struct deps_reg *reg_last = &deps->reg_last[i];
2528 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2529 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2532 /* Flush pending lists on jumps, but not on speculative checks. */
2533 if (JUMP_P (insn) && !(sel_sched_p ()
2534 && sel_insn_is_speculation_check (insn)))
2535 flush_pending_lists (deps, insn, true, true);
2537 if (!deps->readonly)
2538 CLEAR_REG_SET (&deps->reg_conditional_sets);
2539 reg_pending_barrier = NOT_A_BARRIER;
2542 /* If a post-call group is still open, see if it should remain so.
2543 This insn must be a simple move of a hard reg to a pseudo or
2546 We must avoid moving these insns for correctness on
2547 SMALL_REGISTER_CLASS machines, and for special registers like
2548 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
2549 hard regs for all targets. */
2551 if (deps->in_post_call_group_p)
2553 rtx tmp, set = single_set (insn);
2554 int src_regno, dest_regno;
2557 goto end_call_group;
2559 tmp = SET_DEST (set);
2560 if (GET_CODE (tmp) == SUBREG)
2561 tmp = SUBREG_REG (tmp);
2563 dest_regno = REGNO (tmp);
2565 goto end_call_group;
2567 tmp = SET_SRC (set);
2568 if (GET_CODE (tmp) == SUBREG)
2569 tmp = SUBREG_REG (tmp);
2570 if ((GET_CODE (tmp) == PLUS
2571 || GET_CODE (tmp) == MINUS)
2572 && REG_P (XEXP (tmp, 0))
2573 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
2574 && dest_regno == STACK_POINTER_REGNUM)
2575 src_regno = STACK_POINTER_REGNUM;
2576 else if (REG_P (tmp))
2577 src_regno = REGNO (tmp);
2579 goto end_call_group;
2581 if (src_regno < FIRST_PSEUDO_REGISTER
2582 || dest_regno < FIRST_PSEUDO_REGISTER)
2585 && deps->in_post_call_group_p == post_call_initial)
2586 deps->in_post_call_group_p = post_call;
2588 if (!sel_sched_p () || sched_emulate_haifa_p)
2590 SCHED_GROUP_P (insn) = 1;
2591 CANT_MOVE (insn) = 1;
2597 if (!deps->readonly)
2598 deps->in_post_call_group_p = not_post_call;
2602 if ((current_sched_info->flags & DO_SPECULATION)
2603 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
2604 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
2608 sel_mark_hard_insn (insn);
2611 sd_iterator_def sd_it;
2614 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
2615 sd_iterator_cond (&sd_it, &dep);)
2616 change_spec_dep_to_hard (sd_it);
2621 /* Analyze INSN with DEPS as a context. */
2623 deps_analyze_insn (struct deps *deps, rtx insn)
2625 if (sched_deps_info->start_insn)
2626 sched_deps_info->start_insn (insn);
2628 if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
2630 /* Make each JUMP_INSN (but not a speculative check)
2631 a scheduling barrier for memory references. */
2635 && sel_insn_is_speculation_check (insn)))
2637 /* Keep the list a reasonable size. */
2638 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
2639 flush_pending_lists (deps, insn, true, true);
2641 deps->last_pending_memory_flush
2642 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
2645 sched_analyze_insn (deps, PATTERN (insn), insn);
2647 else if (CALL_P (insn))
2651 CANT_MOVE (insn) = 1;
2653 if (find_reg_note (insn, REG_SETJMP, NULL))
2655 /* This is setjmp. Assume that all registers, not just
2656 hard registers, may be clobbered by this call. */
2657 reg_pending_barrier = MOVE_BARRIER;
2661 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2662 /* A call may read and modify global register variables. */
2665 SET_REGNO_REG_SET (reg_pending_sets, i);
2666 SET_REGNO_REG_SET (reg_pending_uses, i);
2668 /* Other call-clobbered hard regs may be clobbered.
2669 Since we only have a choice between 'might be clobbered'
2670 and 'definitely not clobbered', we must include all
2671 partly call-clobbered registers here. */
2672 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
2673 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
2674 SET_REGNO_REG_SET (reg_pending_clobbers, i);
2675 /* We don't know what set of fixed registers might be used
2676 by the function, but it is certain that the stack pointer
2677 is among them, but be conservative. */
2678 else if (fixed_regs[i])
2679 SET_REGNO_REG_SET (reg_pending_uses, i);
2680 /* The frame pointer is normally not used by the function
2681 itself, but by the debugger. */
2682 /* ??? MIPS o32 is an exception. It uses the frame pointer
2683 in the macro expansion of jal but does not represent this
2684 fact in the call_insn rtl. */
2685 else if (i == FRAME_POINTER_REGNUM
2686 || (i == HARD_FRAME_POINTER_REGNUM
2687 && (! reload_completed || frame_pointer_needed)))
2688 SET_REGNO_REG_SET (reg_pending_uses, i);
2691 /* For each insn which shouldn't cross a call, add a dependence
2692 between that insn and this call insn. */
2693 add_dependence_list_and_free (deps, insn,
2694 &deps->sched_before_next_call, 1,
2697 sched_analyze_insn (deps, PATTERN (insn), insn);
2699 /* If CALL would be in a sched group, then this will violate
2700 convention that sched group insns have dependencies only on the
2701 previous instruction.
2703 Of course one can say: "Hey! What about head of the sched group?"
2704 And I will answer: "Basic principles (one dep per insn) are always
2706 gcc_assert (!SCHED_GROUP_P (insn));
2708 /* In the absence of interprocedural alias analysis, we must flush
2709 all pending reads and writes, and start new dependencies starting
2710 from here. But only flush writes for constant calls (which may
2711 be passed a pointer to something we haven't written yet). */
2712 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
2714 if (!deps->readonly)
2716 /* Remember the last function call for limiting lifetimes. */
2717 free_INSN_LIST_list (&deps->last_function_call);
2718 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
2720 /* Before reload, begin a post-call group, so as to keep the
2721 lifetimes of hard registers correct. */
2722 if (! reload_completed)
2723 deps->in_post_call_group_p = post_call;
2727 if (sched_deps_info->use_cselib)
2728 cselib_process_insn (insn);
2730 /* EH_REGION insn notes can not appear until well after we complete
2733 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
2734 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
2736 if (sched_deps_info->finish_insn)
2737 sched_deps_info->finish_insn ();
2739 /* Fixup the dependencies in the sched group. */
2740 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
2741 && SCHED_GROUP_P (insn) && !sel_sched_p ())
2742 fixup_sched_groups (insn);
2745 /* Initialize DEPS for the new block beginning with HEAD. */
2747 deps_start_bb (struct deps *deps, rtx head)
2749 gcc_assert (!deps->readonly);
2751 /* Before reload, if the previous block ended in a call, show that
2752 we are inside a post-call group, so as to keep the lifetimes of
2753 hard registers correct. */
2754 if (! reload_completed && !LABEL_P (head))
2756 rtx insn = prev_nonnote_insn (head);
2758 if (insn && CALL_P (insn))
2759 deps->in_post_call_group_p = post_call_initial;
2763 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
2764 dependencies for each insn. */
2766 sched_analyze (struct deps *deps, rtx head, rtx tail)
2770 if (sched_deps_info->use_cselib)
2773 deps_start_bb (deps, head);
2775 for (insn = head;; insn = NEXT_INSN (insn))
2780 /* And initialize deps_lists. */
2781 sd_init_insn (insn);
2784 deps_analyze_insn (deps, insn);
2788 if (sched_deps_info->use_cselib)
2796 /* Helper for sched_free_deps ().
2797 Delete INSN's (RESOLVED_P) backward dependencies. */
2799 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
2801 sd_iterator_def sd_it;
2803 sd_list_types_def types;
2806 types = SD_LIST_RES_BACK;
2808 types = SD_LIST_BACK;
2810 for (sd_it = sd_iterator_start (insn, types);
2811 sd_iterator_cond (&sd_it, &dep);)
2813 dep_link_t link = *sd_it.linkp;
2814 dep_node_t node = DEP_LINK_NODE (link);
2815 deps_list_t back_list;
2816 deps_list_t forw_list;
2818 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
2819 remove_from_deps_list (link, back_list);
2820 delete_dep_node (node);
2824 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
2827 sched_free_deps (rtx head, rtx tail, bool resolved_p)
2830 rtx next_tail = NEXT_INSN (tail);
2832 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2833 if (INSN_P (insn) && INSN_LUID (insn) > 0)
2835 /* Clear resolved back deps together with its dep_nodes. */
2836 delete_dep_nodes_in_back_deps (insn, resolved_p);
2838 /* Clear forward deps and leave the dep_nodes to the
2839 corresponding back_deps list. */
2841 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
2843 clear_deps_list (INSN_FORW_DEPS (insn));
2845 sd_finish_insn (insn);
2849 /* Initialize variables for region data dependence analysis.
2850 n_bbs is the number of region blocks. */
2853 init_deps (struct deps *deps)
2855 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
2857 deps->max_reg = max_reg;
2858 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
2859 INIT_REG_SET (&deps->reg_last_in_use);
2860 INIT_REG_SET (&deps->reg_conditional_sets);
2862 deps->pending_read_insns = 0;
2863 deps->pending_read_mems = 0;
2864 deps->pending_write_insns = 0;
2865 deps->pending_write_mems = 0;
2866 deps->pending_read_list_length = 0;
2867 deps->pending_write_list_length = 0;
2868 deps->pending_flush_length = 0;
2869 deps->last_pending_memory_flush = 0;
2870 deps->last_function_call = 0;
2871 deps->sched_before_next_call = 0;
2872 deps->in_post_call_group_p = not_post_call;
2873 deps->last_reg_pending_barrier = NOT_A_BARRIER;
2877 /* Free insn lists found in DEPS. */
2880 free_deps (struct deps *deps)
2883 reg_set_iterator rsi;
2885 free_INSN_LIST_list (&deps->pending_read_insns);
2886 free_EXPR_LIST_list (&deps->pending_read_mems);
2887 free_INSN_LIST_list (&deps->pending_write_insns);
2888 free_EXPR_LIST_list (&deps->pending_write_mems);
2889 free_INSN_LIST_list (&deps->last_pending_memory_flush);
2891 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
2892 times. For a testcase with 42000 regs and 8000 small basic blocks,
2893 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
2894 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2896 struct deps_reg *reg_last = &deps->reg_last[i];
2898 free_INSN_LIST_list (®_last->uses);
2900 free_INSN_LIST_list (®_last->sets);
2901 if (reg_last->clobbers)
2902 free_INSN_LIST_list (®_last->clobbers);
2904 CLEAR_REG_SET (&deps->reg_last_in_use);
2905 CLEAR_REG_SET (&deps->reg_conditional_sets);
2907 free (deps->reg_last);
2908 deps->reg_last = NULL;
2913 /* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
2916 remove_from_deps (struct deps *deps, rtx insn)
2920 reg_set_iterator rsi;
2922 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
2923 &deps->pending_read_mems);
2924 deps->pending_read_list_length -= removed;
2925 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
2926 &deps->pending_write_mems);
2927 deps->pending_write_list_length -= removed;
2928 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
2929 deps->pending_flush_length -= removed;
2931 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2933 struct deps_reg *reg_last = &deps->reg_last[i];
2935 remove_from_dependence_list (insn, ®_last->uses);
2937 remove_from_dependence_list (insn, ®_last->sets);
2938 if (reg_last->clobbers)
2939 remove_from_dependence_list (insn, ®_last->clobbers);
2940 if (!reg_last->uses && !reg_last->sets && !reg_last->clobbers)
2941 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
2945 remove_from_dependence_list (insn, &deps->last_function_call);
2946 remove_from_dependence_list (insn, &deps->sched_before_next_call);
2949 /* Init deps data vector. */
2951 init_deps_data_vector (void)
2953 int reserve = (sched_max_luid + 1
2954 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
2956 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
2957 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
2958 3 * sched_max_luid / 2);
2961 /* If it is profitable to use them, initialize or extend (depending on
2962 GLOBAL_P) dependency data. */
2964 sched_deps_init (bool global_p)
2966 /* Average number of insns in the basic block.
2967 '+ 1' is used to make it nonzero. */
2968 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
2970 init_deps_data_vector ();
2972 /* We use another caching mechanism for selective scheduling, so
2973 we don't use this one. */
2974 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
2976 /* ?!? We could save some memory by computing a per-region luid mapping
2977 which could reduce both the number of vectors in the cache and the
2978 size of each vector. Instead we just avoid the cache entirely unless
2979 the average number of instructions in a basic block is very high. See
2980 the comment before the declaration of true_dependency_cache for
2981 what we consider "very high". */
2983 extend_dependency_caches (sched_max_luid, true);
2988 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
2989 /* Allocate lists for one block at a time. */
2991 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
2992 /* Allocate nodes for one block at a time.
2993 We assume that average insn has
2995 5 * insns_in_block);
3000 /* Create or extend (depending on CREATE_P) dependency caches to
3003 extend_dependency_caches (int n, bool create_p)
3005 if (create_p || true_dependency_cache)
3007 int i, luid = cache_size + n;
3009 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3011 output_dependency_cache = XRESIZEVEC (bitmap_head,
3012 output_dependency_cache, luid);
3013 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3016 if (current_sched_info->flags & DO_SPECULATION)
3017 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3020 for (i = cache_size; i < luid; i++)
3022 bitmap_initialize (&true_dependency_cache[i], 0);
3023 bitmap_initialize (&output_dependency_cache[i], 0);
3024 bitmap_initialize (&anti_dependency_cache[i], 0);
3026 if (current_sched_info->flags & DO_SPECULATION)
3027 bitmap_initialize (&spec_dependency_cache[i], 0);
3033 /* Finalize dependency information for the whole function. */
3035 sched_deps_finish (void)
3037 gcc_assert (deps_pools_are_empty_p ());
3038 free_alloc_pool_if_empty (&dn_pool);
3039 free_alloc_pool_if_empty (&dl_pool);
3040 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3042 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3045 if (true_dependency_cache)
3049 for (i = 0; i < cache_size; i++)
3051 bitmap_clear (&true_dependency_cache[i]);
3052 bitmap_clear (&output_dependency_cache[i]);
3053 bitmap_clear (&anti_dependency_cache[i]);
3055 if (sched_deps_info->generate_spec_deps)
3056 bitmap_clear (&spec_dependency_cache[i]);
3058 free (true_dependency_cache);
3059 true_dependency_cache = NULL;
3060 free (output_dependency_cache);
3061 output_dependency_cache = NULL;
3062 free (anti_dependency_cache);
3063 anti_dependency_cache = NULL;
3065 if (sched_deps_info->generate_spec_deps)
3067 free (spec_dependency_cache);
3068 spec_dependency_cache = NULL;
3074 /* Initialize some global variables needed by the dependency analysis
3078 init_deps_global (void)
3080 reg_pending_sets = ALLOC_REG_SET (®_obstack);
3081 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
3082 reg_pending_uses = ALLOC_REG_SET (®_obstack);
3083 reg_pending_barrier = NOT_A_BARRIER;
3085 if (!sel_sched_p () || sched_emulate_haifa_p)
3087 sched_deps_info->start_insn = haifa_start_insn;
3088 sched_deps_info->finish_insn = haifa_finish_insn;
3090 sched_deps_info->note_reg_set = haifa_note_reg_set;
3091 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3092 sched_deps_info->note_reg_use = haifa_note_reg_use;
3094 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3095 sched_deps_info->note_dep = haifa_note_dep;
3099 /* Free everything used by the dependency analysis code. */
3102 finish_deps_global (void)
3104 FREE_REG_SET (reg_pending_sets);
3105 FREE_REG_SET (reg_pending_clobbers);
3106 FREE_REG_SET (reg_pending_uses);
3109 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3111 estimate_dep_weak (rtx mem1, rtx mem2)
3116 /* MEMs are the same - don't speculate. */
3117 return MIN_DEP_WEAK;
3119 r1 = XEXP (mem1, 0);
3120 r2 = XEXP (mem2, 0);
3123 || (REG_P (r1) && REG_P (r2)
3124 && REGNO (r1) == REGNO (r2)))
3125 /* Again, MEMs are the same. */
3126 return MIN_DEP_WEAK;
3127 else if ((REG_P (r1) && !REG_P (r2))
3128 || (!REG_P (r1) && REG_P (r2)))
3129 /* Different addressing modes - reason to be more speculative,
3131 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3133 /* We can't say anything about the dependence. */
3134 return UNCERTAIN_DEP_WEAK;
3137 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3138 This function can handle same INSN and ELEM (INSN == ELEM).
3139 It is a convenience wrapper. */
3141 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3146 if (dep_type == REG_DEP_TRUE)
3148 else if (dep_type == REG_DEP_OUTPUT)
3152 gcc_assert (dep_type == REG_DEP_ANTI);
3156 /* When add_dependence is called from inside sched-deps.c, we expect
3157 cur_insn to be non-null. */
3158 internal = cur_insn != NULL;
3160 gcc_assert (insn == cur_insn);
3164 note_dep (elem, ds);
3169 /* Return weakness of speculative type TYPE in the dep_status DS. */
3171 get_dep_weak_1 (ds_t ds, ds_t type)
3177 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3178 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3179 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3180 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3181 default: gcc_unreachable ();
3188 get_dep_weak (ds_t ds, ds_t type)
3190 dw_t dw = get_dep_weak_1 (ds, type);
3192 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3196 /* Return the dep_status, which has the same parameters as DS, except for
3197 speculative type TYPE, that will have weakness DW. */
3199 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3201 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3206 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3207 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3208 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3209 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3210 default: gcc_unreachable ();
3215 /* Return the join of two dep_statuses DS1 and DS2.
3216 If MAX_P is true then choose the greater probability,
3217 otherwise multiply probabilities.
3218 This function assumes that both DS1 and DS2 contain speculative bits. */
3220 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3224 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3226 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3228 t = FIRST_SPEC_TYPE;
3231 if ((ds1 & t) && !(ds2 & t))
3233 else if (!(ds1 & t) && (ds2 & t))
3235 else if ((ds1 & t) && (ds2 & t))
3237 dw_t dw1 = get_dep_weak (ds1, t);
3238 dw_t dw2 = get_dep_weak (ds2, t);
3243 dw = ((ds_t) dw1) * ((ds_t) dw2);
3245 if (dw < MIN_DEP_WEAK)
3256 ds = set_dep_weak (ds, t, (dw_t) dw);
3259 if (t == LAST_SPEC_TYPE)
3261 t <<= SPEC_TYPE_SHIFT;
3268 /* Return the join of two dep_statuses DS1 and DS2.
3269 This function assumes that both DS1 and DS2 contain speculative bits. */
3271 ds_merge (ds_t ds1, ds_t ds2)
3273 return ds_merge_1 (ds1, ds2, false);
3276 /* Return the join of two dep_statuses DS1 and DS2. */
3278 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3280 ds_t new_status = ds | ds2;
3282 if (new_status & SPECULATIVE)
3284 if ((ds && !(ds & SPECULATIVE))
3285 || (ds2 && !(ds2 & SPECULATIVE)))
3286 /* Then this dep can't be speculative. */
3287 new_status &= ~SPECULATIVE;
3290 /* Both are speculative. Merging probabilities. */
3295 dw = estimate_dep_weak (mem1, mem2);
3296 ds = set_dep_weak (ds, BEGIN_DATA, dw);
3304 new_status = ds_merge (ds2, ds);
3311 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
3314 ds_max_merge (ds_t ds1, ds_t ds2)
3316 if (ds1 == 0 && ds2 == 0)
3319 if (ds1 == 0 && ds2 != 0)
3322 if (ds1 != 0 && ds2 == 0)
3325 return ds_merge_1 (ds1, ds2, true);
3328 /* Return the probability of speculation success for the speculation
3336 dt = FIRST_SPEC_TYPE;
3341 res *= (ds_t) get_dep_weak (ds, dt);
3345 if (dt == LAST_SPEC_TYPE)
3347 dt <<= SPEC_TYPE_SHIFT;
3353 res /= MAX_DEP_WEAK;
3355 if (res < MIN_DEP_WEAK)
3358 gcc_assert (res <= MAX_DEP_WEAK);
3363 /* Return a dep status that contains all speculation types of DS. */
3365 ds_get_speculation_types (ds_t ds)
3367 if (ds & BEGIN_DATA)
3369 if (ds & BE_IN_DATA)
3371 if (ds & BEGIN_CONTROL)
3372 ds |= BEGIN_CONTROL;
3373 if (ds & BE_IN_CONTROL)
3374 ds |= BE_IN_CONTROL;
3376 return ds & SPECULATIVE;
3379 /* Return a dep status that contains maximal weakness for each speculation
3380 type present in DS. */
3382 ds_get_max_dep_weak (ds_t ds)
3384 if (ds & BEGIN_DATA)
3385 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
3386 if (ds & BE_IN_DATA)
3387 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
3388 if (ds & BEGIN_CONTROL)
3389 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
3390 if (ds & BE_IN_CONTROL)
3391 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
3396 /* Dump information about the dependence status S. */
3398 dump_ds (FILE *f, ds_t s)
3403 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
3405 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
3406 if (s & BEGIN_CONTROL)
3407 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
3408 if (s & BE_IN_CONTROL)
3409 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
3412 fprintf (f, "HARD_DEP; ");
3415 fprintf (f, "DEP_TRUE; ");
3417 fprintf (f, "DEP_ANTI; ");
3419 fprintf (f, "DEP_OUTPUT; ");
3427 dump_ds (stderr, s);
3428 fprintf (stderr, "\n");
3431 #ifdef ENABLE_CHECKING
3432 /* Verify that dependence type and status are consistent.
3433 If RELAXED_P is true, then skip dep_weakness checks. */
3435 check_dep (dep_t dep, bool relaxed_p)
3437 enum reg_note dt = DEP_TYPE (dep);
3438 ds_t ds = DEP_STATUS (dep);
3440 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
3442 if (!(current_sched_info->flags & USE_DEPS_LIST))
3444 gcc_assert (ds == -1);
3448 /* Check that dependence type contains the same bits as the status. */
3449 if (dt == REG_DEP_TRUE)
3450 gcc_assert (ds & DEP_TRUE);
3451 else if (dt == REG_DEP_OUTPUT)
3452 gcc_assert ((ds & DEP_OUTPUT)
3453 && !(ds & DEP_TRUE));
3455 gcc_assert ((dt == REG_DEP_ANTI)
3457 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
3459 /* HARD_DEP can not appear in dep_status of a link. */
3460 gcc_assert (!(ds & HARD_DEP));
3462 /* Check that dependence status is set correctly when speculation is not
3464 if (!sched_deps_info->generate_spec_deps)
3465 gcc_assert (!(ds & SPECULATIVE));
3466 else if (ds & SPECULATIVE)
3470 ds_t type = FIRST_SPEC_TYPE;
3472 /* Check that dependence weakness is in proper range. */
3476 get_dep_weak (ds, type);
3478 if (type == LAST_SPEC_TYPE)
3480 type <<= SPEC_TYPE_SHIFT;
3485 if (ds & BEGIN_SPEC)
3487 /* Only true dependence can be data speculative. */
3488 if (ds & BEGIN_DATA)
3489 gcc_assert (ds & DEP_TRUE);
3491 /* Control dependencies in the insn scheduler are represented by
3492 anti-dependencies, therefore only anti dependence can be
3493 control speculative. */
3494 if (ds & BEGIN_CONTROL)
3495 gcc_assert (ds & DEP_ANTI);
3499 /* Subsequent speculations should resolve true dependencies. */
3500 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
3503 /* Check that true and anti dependencies can't have other speculative
3506 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
3507 /* An output dependence can't be speculative at all. */
3508 gcc_assert (!(ds & DEP_OUTPUT));
3510 gcc_assert (ds & BEGIN_CONTROL);
3513 #endif /* ENABLE_CHECKING */
3515 #endif /* INSN_SCHEDULING */