1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
29 #include "diagnostic-core.h"
32 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
40 #include "sched-int.h"
46 #ifdef INSN_SCHEDULING
48 #ifdef ENABLE_CHECKING
54 /* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
55 added to the list because of flush_pending_lists, stands just
56 for itself and not for any other pending memory reads/writes. */
57 #define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
58 #define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
60 /* Holds current parameters for the dependency analyzer. */
61 struct sched_deps_info_def *sched_deps_info;
63 /* The data is specific to the Haifa scheduler. */
64 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
66 /* Return the major type present in the DS. */
74 return REG_DEP_OUTPUT;
76 gcc_assert (ds & DEP_ANTI);
81 /* Return equivalent dep_status. */
83 dk_to_ds (enum reg_note dk)
94 gcc_assert (dk == REG_DEP_ANTI);
99 /* Functions to operate with dependence information container - dep_t. */
101 /* Init DEP with the arguments. */
103 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
107 DEP_TYPE (dep) = type;
108 DEP_STATUS (dep) = ds;
111 /* Init DEP with the arguments.
112 While most of the scheduler (including targets) only need the major type
113 of the dependency, it is convenient to hide full dep_status from them. */
115 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
119 if ((current_sched_info->flags & USE_DEPS_LIST))
120 ds = dk_to_ds (kind);
124 init_dep_1 (dep, pro, con, kind, ds);
127 /* Make a copy of FROM in TO. */
129 copy_dep (dep_t to, dep_t from)
131 memcpy (to, from, sizeof (*to));
134 static void dump_ds (FILE *, ds_t);
136 /* Define flags for dump_dep (). */
138 /* Dump producer of the dependence. */
139 #define DUMP_DEP_PRO (2)
141 /* Dump consumer of the dependence. */
142 #define DUMP_DEP_CON (4)
144 /* Dump type of the dependence. */
145 #define DUMP_DEP_TYPE (8)
147 /* Dump status of the dependence. */
148 #define DUMP_DEP_STATUS (16)
150 /* Dump all information about the dependence. */
151 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
155 FLAGS is a bit mask specifying what information about DEP needs
157 If FLAGS has the very first bit set, then dump all information about DEP
158 and propagate this bit into the callee dump functions. */
160 dump_dep (FILE *dump, dep_t dep, int flags)
163 flags |= DUMP_DEP_ALL;
167 if (flags & DUMP_DEP_PRO)
168 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
170 if (flags & DUMP_DEP_CON)
171 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
173 if (flags & DUMP_DEP_TYPE)
176 enum reg_note type = DEP_TYPE (dep);
197 fprintf (dump, "%c; ", t);
200 if (flags & DUMP_DEP_STATUS)
202 if (current_sched_info->flags & USE_DEPS_LIST)
203 dump_ds (dump, DEP_STATUS (dep));
209 /* Default flags for dump_dep (). */
210 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
212 /* Dump all fields of DEP to STDERR. */
214 sd_debug_dep (dep_t dep)
216 dump_dep (stderr, dep, 1);
217 fprintf (stderr, "\n");
220 /* Determine whether DEP is a dependency link of a non-debug insn on a
224 depl_on_debug_p (dep_link_t dep)
226 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
227 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
230 /* Functions to operate with a single link from the dependencies lists -
233 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
236 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
238 dep_link_t next = *prev_nextp;
240 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
241 && DEP_LINK_NEXT (l) == NULL);
243 /* Init node being inserted. */
244 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
245 DEP_LINK_NEXT (l) = next;
250 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
252 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
259 /* Add dep_link LINK to deps_list L. */
261 add_to_deps_list (dep_link_t link, deps_list_t l)
263 attach_dep_link (link, &DEPS_LIST_FIRST (l));
265 /* Don't count debug deps. */
266 if (!depl_on_debug_p (link))
267 ++DEPS_LIST_N_LINKS (l);
270 /* Detach dep_link L from the list. */
272 detach_dep_link (dep_link_t l)
274 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
275 dep_link_t next = DEP_LINK_NEXT (l);
280 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
282 DEP_LINK_PREV_NEXTP (l) = NULL;
283 DEP_LINK_NEXT (l) = NULL;
286 /* Remove link LINK from list LIST. */
288 remove_from_deps_list (dep_link_t link, deps_list_t list)
290 detach_dep_link (link);
292 /* Don't count debug deps. */
293 if (!depl_on_debug_p (link))
294 --DEPS_LIST_N_LINKS (list);
297 /* Move link LINK from list FROM to list TO. */
299 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
301 remove_from_deps_list (link, from);
302 add_to_deps_list (link, to);
305 /* Return true of LINK is not attached to any list. */
307 dep_link_is_detached_p (dep_link_t link)
309 return DEP_LINK_PREV_NEXTP (link) == NULL;
312 /* Pool to hold all dependency nodes (dep_node_t). */
313 static alloc_pool dn_pool;
315 /* Number of dep_nodes out there. */
316 static int dn_pool_diff = 0;
318 /* Create a dep_node. */
320 create_dep_node (void)
322 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
323 dep_link_t back = DEP_NODE_BACK (n);
324 dep_link_t forw = DEP_NODE_FORW (n);
326 DEP_LINK_NODE (back) = n;
327 DEP_LINK_NEXT (back) = NULL;
328 DEP_LINK_PREV_NEXTP (back) = NULL;
330 DEP_LINK_NODE (forw) = n;
331 DEP_LINK_NEXT (forw) = NULL;
332 DEP_LINK_PREV_NEXTP (forw) = NULL;
339 /* Delete dep_node N. N must not be connected to any deps_list. */
341 delete_dep_node (dep_node_t n)
343 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
344 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
348 pool_free (dn_pool, n);
351 /* Pool to hold dependencies lists (deps_list_t). */
352 static alloc_pool dl_pool;
354 /* Number of deps_lists out there. */
355 static int dl_pool_diff = 0;
357 /* Functions to operate with dependences lists - deps_list_t. */
359 /* Return true if list L is empty. */
361 deps_list_empty_p (deps_list_t l)
363 return DEPS_LIST_N_LINKS (l) == 0;
366 /* Create a new deps_list. */
368 create_deps_list (void)
370 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
372 DEPS_LIST_FIRST (l) = NULL;
373 DEPS_LIST_N_LINKS (l) = 0;
379 /* Free deps_list L. */
381 free_deps_list (deps_list_t l)
383 gcc_assert (deps_list_empty_p (l));
387 pool_free (dl_pool, l);
390 /* Return true if there is no dep_nodes and deps_lists out there.
391 After the region is scheduled all the dependency nodes and lists
392 should [generally] be returned to pool. */
394 deps_pools_are_empty_p (void)
396 return dn_pool_diff == 0 && dl_pool_diff == 0;
399 /* Remove all elements from L. */
401 clear_deps_list (deps_list_t l)
405 dep_link_t link = DEPS_LIST_FIRST (l);
410 remove_from_deps_list (link, l);
415 static regset reg_pending_sets;
416 static regset reg_pending_clobbers;
417 static regset reg_pending_uses;
418 static enum reg_pending_barrier_mode reg_pending_barrier;
420 /* Hard registers implicitly clobbered or used (or may be implicitly
421 clobbered or used) by the currently analyzed insn. For example,
422 insn in its constraint has one register class. Even if there is
423 currently no hard register in the insn, the particular hard
424 register will be in the insn after reload pass because the
425 constraint requires it. */
426 static HARD_REG_SET implicit_reg_pending_clobbers;
427 static HARD_REG_SET implicit_reg_pending_uses;
429 /* To speed up the test for duplicate dependency links we keep a
430 record of dependencies created by add_dependence when the average
431 number of instructions in a basic block is very large.
433 Studies have shown that there is typically around 5 instructions between
434 branches for typical C code. So we can make a guess that the average
435 basic block is approximately 5 instructions long; we will choose 100X
436 the average size as a very large basic block.
438 Each insn has associated bitmaps for its dependencies. Each bitmap
439 has enough entries to represent a dependency on any other insn in
440 the insn chain. All bitmap for true dependencies cache is
441 allocated then the rest two ones are also allocated. */
442 static bitmap_head *true_dependency_cache = NULL;
443 static bitmap_head *output_dependency_cache = NULL;
444 static bitmap_head *anti_dependency_cache = NULL;
445 static bitmap_head *spec_dependency_cache = NULL;
446 static int cache_size;
448 static int deps_may_trap_p (const_rtx);
449 static void add_dependence_list (rtx, rtx, int, enum reg_note);
450 static void add_dependence_list_and_free (struct deps_desc *, rtx,
451 rtx *, int, enum reg_note);
452 static void delete_all_dependences (rtx);
453 static void fixup_sched_groups (rtx);
455 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
456 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
457 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
458 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
460 static bool sched_has_condition_p (const_rtx);
461 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
463 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
465 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
467 #ifdef ENABLE_CHECKING
468 static void check_dep (dep_t, bool);
471 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
474 deps_may_trap_p (const_rtx mem)
476 const_rtx addr = XEXP (mem, 0);
478 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
480 const_rtx t = get_reg_known_value (REGNO (addr));
484 return rtx_addr_can_trap_p (addr);
488 /* Find the condition under which INSN is executed. If REV is not NULL,
489 it is set to TRUE when the returned comparison should be reversed
490 to get the actual condition. */
492 sched_get_condition_with_rev (const_rtx insn, bool *rev)
494 rtx pat = PATTERN (insn);
503 if (GET_CODE (pat) == COND_EXEC)
504 return COND_EXEC_TEST (pat);
506 if (!any_condjump_p (insn) || !onlyjump_p (insn))
509 src = SET_SRC (pc_set (insn));
511 if (XEXP (src, 2) == pc_rtx)
512 return XEXP (src, 0);
513 else if (XEXP (src, 1) == pc_rtx)
515 rtx cond = XEXP (src, 0);
516 enum rtx_code revcode = reversed_comparison_code (cond, insn);
518 if (revcode == UNKNOWN)
529 /* True when we can find a condition under which INSN is executed. */
531 sched_has_condition_p (const_rtx insn)
533 return !! sched_get_condition_with_rev (insn, NULL);
538 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
540 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
542 if (COMPARISON_P (cond1)
543 && COMPARISON_P (cond2)
544 && GET_CODE (cond1) ==
546 ? reversed_comparison_code (cond2, NULL)
548 && XEXP (cond1, 0) == XEXP (cond2, 0)
549 && XEXP (cond1, 1) == XEXP (cond2, 1))
554 /* Return true if insn1 and insn2 can never depend on one another because
555 the conditions under which they are executed are mutually exclusive. */
557 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
560 bool rev1 = false, rev2 = false;
562 /* df doesn't handle conditional lifetimes entirely correctly;
563 calls mess up the conditional lifetimes. */
564 if (!CALL_P (insn1) && !CALL_P (insn2))
566 cond1 = sched_get_condition_with_rev (insn1, &rev1);
567 cond2 = sched_get_condition_with_rev (insn2, &rev2);
569 && conditions_mutex_p (cond1, cond2, rev1, rev2)
570 /* Make sure first instruction doesn't affect condition of second
571 instruction if switched. */
572 && !modified_in_p (cond1, insn2)
573 /* Make sure second instruction doesn't affect condition of first
574 instruction if switched. */
575 && !modified_in_p (cond2, insn1))
582 /* Return true if INSN can potentially be speculated with type DS. */
584 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
586 if (HAS_INTERNAL_DEP (insn))
589 if (!NONJUMP_INSN_P (insn))
592 if (SCHED_GROUP_P (insn))
595 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
598 if (side_effects_p (PATTERN (insn)))
602 /* The following instructions, which depend on a speculatively scheduled
603 instruction, cannot be speculatively scheduled along. */
605 if (may_trap_or_fault_p (PATTERN (insn)))
606 /* If instruction might fault, it cannot be speculatively scheduled.
607 For control speculation it's obvious why and for data speculation
608 it's because the insn might get wrong input if speculation
609 wasn't successful. */
612 if ((ds & BE_IN_DATA)
613 && sched_has_condition_p (insn))
614 /* If this is a predicated instruction, then it cannot be
615 speculatively scheduled. See PR35659. */
622 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
623 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
624 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
625 This function is used to switch sd_iterator to the next list.
626 !!! For internal use only. Might consider moving it to sched-int.h. */
628 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
629 deps_list_t *list_ptr, bool *resolved_p_ptr)
631 sd_list_types_def types = *types_ptr;
633 if (types & SD_LIST_HARD_BACK)
635 *list_ptr = INSN_HARD_BACK_DEPS (insn);
636 *resolved_p_ptr = false;
637 *types_ptr = types & ~SD_LIST_HARD_BACK;
639 else if (types & SD_LIST_SPEC_BACK)
641 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
642 *resolved_p_ptr = false;
643 *types_ptr = types & ~SD_LIST_SPEC_BACK;
645 else if (types & SD_LIST_FORW)
647 *list_ptr = INSN_FORW_DEPS (insn);
648 *resolved_p_ptr = false;
649 *types_ptr = types & ~SD_LIST_FORW;
651 else if (types & SD_LIST_RES_BACK)
653 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
654 *resolved_p_ptr = true;
655 *types_ptr = types & ~SD_LIST_RES_BACK;
657 else if (types & SD_LIST_RES_FORW)
659 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
660 *resolved_p_ptr = true;
661 *types_ptr = types & ~SD_LIST_RES_FORW;
666 *resolved_p_ptr = false;
667 *types_ptr = SD_LIST_NONE;
671 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
673 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
677 while (list_types != SD_LIST_NONE)
682 sd_next_list (insn, &list_types, &list, &resolved_p);
684 size += DEPS_LIST_N_LINKS (list);
690 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
693 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
695 while (list_types != SD_LIST_NONE)
700 sd_next_list (insn, &list_types, &list, &resolved_p);
701 if (!deps_list_empty_p (list))
708 /* Initialize data for INSN. */
710 sd_init_insn (rtx insn)
712 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
713 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
714 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
715 INSN_FORW_DEPS (insn) = create_deps_list ();
716 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
718 /* ??? It would be nice to allocate dependency caches here. */
721 /* Free data for INSN. */
723 sd_finish_insn (rtx insn)
725 /* ??? It would be nice to deallocate dependency caches here. */
727 free_deps_list (INSN_HARD_BACK_DEPS (insn));
728 INSN_HARD_BACK_DEPS (insn) = NULL;
730 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
731 INSN_SPEC_BACK_DEPS (insn) = NULL;
733 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
734 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
736 free_deps_list (INSN_FORW_DEPS (insn));
737 INSN_FORW_DEPS (insn) = NULL;
739 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
740 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
743 /* Find a dependency between producer PRO and consumer CON.
744 Search through resolved dependency lists if RESOLVED_P is true.
745 If no such dependency is found return NULL,
746 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
747 with an iterator pointing to it. */
749 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
750 sd_iterator_def *sd_it_ptr)
752 sd_list_types_def pro_list_type;
753 sd_list_types_def con_list_type;
754 sd_iterator_def sd_it;
756 bool found_p = false;
760 pro_list_type = SD_LIST_RES_FORW;
761 con_list_type = SD_LIST_RES_BACK;
765 pro_list_type = SD_LIST_FORW;
766 con_list_type = SD_LIST_BACK;
769 /* Walk through either back list of INSN or forw list of ELEM
770 depending on which one is shorter. */
771 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
773 /* Find the dep_link with producer PRO in consumer's back_deps. */
774 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
775 if (DEP_PRO (dep) == pro)
783 /* Find the dep_link with consumer CON in producer's forw_deps. */
784 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
785 if (DEP_CON (dep) == con)
794 if (sd_it_ptr != NULL)
803 /* Find a dependency between producer PRO and consumer CON.
804 Use dependency [if available] to check if dependency is present at all.
805 Search through resolved dependency lists if RESOLVED_P is true.
806 If the dependency or NULL if none found. */
808 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
810 if (true_dependency_cache != NULL)
811 /* Avoiding the list walk below can cut compile times dramatically
814 int elem_luid = INSN_LUID (pro);
815 int insn_luid = INSN_LUID (con);
817 gcc_assert (output_dependency_cache != NULL
818 && anti_dependency_cache != NULL);
820 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
821 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
822 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
826 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
829 /* Add or update a dependence described by DEP.
830 MEM1 and MEM2, if non-null, correspond to memory locations in case of
833 The function returns a value indicating if an old entry has been changed
834 or a new entry has been added to insn's backward deps.
836 This function merely checks if producer and consumer is the same insn
837 and doesn't create a dep in this case. Actual manipulation of
838 dependence data structures is performed in add_or_update_dep_1. */
839 static enum DEPS_ADJUST_RESULT
840 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
842 rtx elem = DEP_PRO (dep);
843 rtx insn = DEP_CON (dep);
845 gcc_assert (INSN_P (insn) && INSN_P (elem));
847 /* Don't depend an insn on itself. */
850 if (sched_deps_info->generate_spec_deps)
851 /* INSN has an internal dependence, which we can't overcome. */
852 HAS_INTERNAL_DEP (insn) = 1;
857 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
860 /* Ask dependency caches what needs to be done for dependence DEP.
861 Return DEP_CREATED if new dependence should be created and there is no
862 need to try to find one searching the dependencies lists.
863 Return DEP_PRESENT if there already is a dependence described by DEP and
864 hence nothing is to be done.
865 Return DEP_CHANGED if there already is a dependence, but it should be
866 updated to incorporate additional information from DEP. */
867 static enum DEPS_ADJUST_RESULT
868 ask_dependency_caches (dep_t dep)
870 int elem_luid = INSN_LUID (DEP_PRO (dep));
871 int insn_luid = INSN_LUID (DEP_CON (dep));
873 gcc_assert (true_dependency_cache != NULL
874 && output_dependency_cache != NULL
875 && anti_dependency_cache != NULL);
877 if (!(current_sched_info->flags & USE_DEPS_LIST))
879 enum reg_note present_dep_type;
881 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
882 present_dep_type = REG_DEP_TRUE;
883 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
884 present_dep_type = REG_DEP_OUTPUT;
885 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
886 present_dep_type = REG_DEP_ANTI;
888 /* There is no existing dep so it should be created. */
891 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
892 /* DEP does not add anything to the existing dependence. */
897 ds_t present_dep_types = 0;
899 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
900 present_dep_types |= DEP_TRUE;
901 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
902 present_dep_types |= DEP_OUTPUT;
903 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
904 present_dep_types |= DEP_ANTI;
906 if (present_dep_types == 0)
907 /* There is no existing dep so it should be created. */
910 if (!(current_sched_info->flags & DO_SPECULATION)
911 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
913 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
914 == present_dep_types)
915 /* DEP does not add anything to the existing dependence. */
920 /* Only true dependencies can be data speculative and
921 only anti dependencies can be control speculative. */
922 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
923 == present_dep_types);
925 /* if (DEP is SPECULATIVE) then
926 ..we should update DEP_STATUS
928 ..we should reset existing dep to non-speculative. */
935 /* Set dependency caches according to DEP. */
937 set_dependency_caches (dep_t dep)
939 int elem_luid = INSN_LUID (DEP_PRO (dep));
940 int insn_luid = INSN_LUID (DEP_CON (dep));
942 if (!(current_sched_info->flags & USE_DEPS_LIST))
944 switch (DEP_TYPE (dep))
947 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
951 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
955 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
964 ds_t ds = DEP_STATUS (dep);
967 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
969 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
971 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
973 if (ds & SPECULATIVE)
975 gcc_assert (current_sched_info->flags & DO_SPECULATION);
976 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
981 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
982 caches accordingly. */
984 update_dependency_caches (dep_t dep, enum reg_note old_type)
986 int elem_luid = INSN_LUID (DEP_PRO (dep));
987 int insn_luid = INSN_LUID (DEP_CON (dep));
989 /* Clear corresponding cache entry because type of the link
990 may have changed. Keep them if we use_deps_list. */
991 if (!(current_sched_info->flags & USE_DEPS_LIST))
996 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1000 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1008 set_dependency_caches (dep);
1011 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1013 change_spec_dep_to_hard (sd_iterator_def sd_it)
1015 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1016 dep_link_t link = DEP_NODE_BACK (node);
1017 dep_t dep = DEP_NODE_DEP (node);
1018 rtx elem = DEP_PRO (dep);
1019 rtx insn = DEP_CON (dep);
1021 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1023 DEP_STATUS (dep) &= ~SPECULATIVE;
1025 if (true_dependency_cache != NULL)
1026 /* Clear the cache entry. */
1027 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1031 /* Update DEP to incorporate information from NEW_DEP.
1032 SD_IT points to DEP in case it should be moved to another list.
1033 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1034 data-speculative dependence should be updated. */
1035 static enum DEPS_ADJUST_RESULT
1036 update_dep (dep_t dep, dep_t new_dep,
1037 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1038 rtx mem1 ATTRIBUTE_UNUSED,
1039 rtx mem2 ATTRIBUTE_UNUSED)
1041 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1042 enum reg_note old_type = DEP_TYPE (dep);
1044 /* If this is a more restrictive type of dependence than the
1045 existing one, then change the existing dependence to this
1047 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1049 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1053 if (current_sched_info->flags & USE_DEPS_LIST)
1054 /* Update DEP_STATUS. */
1056 ds_t dep_status = DEP_STATUS (dep);
1057 ds_t ds = DEP_STATUS (new_dep);
1058 ds_t new_status = ds | dep_status;
1060 if (new_status & SPECULATIVE)
1061 /* Either existing dep or a dep we're adding or both are
1064 if (!(ds & SPECULATIVE)
1065 || !(dep_status & SPECULATIVE))
1066 /* The new dep can't be speculative. */
1068 new_status &= ~SPECULATIVE;
1070 if (dep_status & SPECULATIVE)
1071 /* The old dep was speculative, but now it
1073 change_spec_dep_to_hard (sd_it);
1077 /* Both are speculative. Merge probabilities. */
1082 dw = estimate_dep_weak (mem1, mem2);
1083 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1086 new_status = ds_merge (dep_status, ds);
1092 if (dep_status != ds)
1094 DEP_STATUS (dep) = ds;
1099 if (true_dependency_cache != NULL
1100 && res == DEP_CHANGED)
1101 update_dependency_caches (dep, old_type);
1106 /* Add or update a dependence described by DEP.
1107 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1110 The function returns a value indicating if an old entry has been changed
1111 or a new entry has been added to insn's backward deps or nothing has
1112 been updated at all. */
1113 static enum DEPS_ADJUST_RESULT
1114 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1115 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1117 bool maybe_present_p = true;
1118 bool present_p = false;
1120 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1121 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1123 #ifdef ENABLE_CHECKING
1124 check_dep (new_dep, mem1 != NULL);
1127 if (true_dependency_cache != NULL)
1129 switch (ask_dependency_caches (new_dep))
1135 maybe_present_p = true;
1140 maybe_present_p = false;
1150 /* Check that we don't already have this dependence. */
1151 if (maybe_present_p)
1154 sd_iterator_def sd_it;
1156 gcc_assert (true_dependency_cache == NULL || present_p);
1158 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1160 resolved_p, &sd_it);
1162 if (present_dep != NULL)
1163 /* We found an existing dependency between ELEM and INSN. */
1164 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1166 /* We didn't find a dep, it shouldn't present in the cache. */
1167 gcc_assert (!present_p);
1170 /* Might want to check one level of transitivity to save conses.
1171 This check should be done in maybe_add_or_update_dep_1.
1172 Since we made it to add_or_update_dep_1, we must create
1173 (or update) a link. */
1175 if (mem1 != NULL_RTX)
1177 gcc_assert (sched_deps_info->generate_spec_deps);
1178 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1179 estimate_dep_weak (mem1, mem2));
1182 sd_add_dep (new_dep, resolved_p);
1187 /* Initialize BACK_LIST_PTR with consumer's backward list and
1188 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1189 initialize with lists that hold resolved deps. */
1191 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1192 deps_list_t *back_list_ptr,
1193 deps_list_t *forw_list_ptr)
1195 rtx con = DEP_CON (dep);
1199 if ((current_sched_info->flags & DO_SPECULATION)
1200 && (DEP_STATUS (dep) & SPECULATIVE))
1201 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1203 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1205 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1209 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1210 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1214 /* Add dependence described by DEP.
1215 If RESOLVED_P is true treat the dependence as a resolved one. */
1217 sd_add_dep (dep_t dep, bool resolved_p)
1219 dep_node_t n = create_dep_node ();
1220 deps_list_t con_back_deps;
1221 deps_list_t pro_forw_deps;
1222 rtx elem = DEP_PRO (dep);
1223 rtx insn = DEP_CON (dep);
1225 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1227 if ((current_sched_info->flags & DO_SPECULATION)
1228 && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1229 DEP_STATUS (dep) &= ~SPECULATIVE;
1231 copy_dep (DEP_NODE_DEP (n), dep);
1233 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1235 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1237 #ifdef ENABLE_CHECKING
1238 check_dep (dep, false);
1241 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1243 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1244 in the bitmap caches of dependency information. */
1245 if (true_dependency_cache != NULL)
1246 set_dependency_caches (dep);
1249 /* Add or update backward dependence between INSN and ELEM
1250 with given type DEP_TYPE and dep_status DS.
1251 This function is a convenience wrapper. */
1252 enum DEPS_ADJUST_RESULT
1253 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1255 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1258 /* Resolved dependence pointed to by SD_IT.
1259 SD_IT will advance to the next element. */
1261 sd_resolve_dep (sd_iterator_def sd_it)
1263 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1264 dep_t dep = DEP_NODE_DEP (node);
1265 rtx pro = DEP_PRO (dep);
1266 rtx con = DEP_CON (dep);
1268 if ((current_sched_info->flags & DO_SPECULATION)
1269 && (DEP_STATUS (dep) & SPECULATIVE))
1270 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1271 INSN_RESOLVED_BACK_DEPS (con));
1273 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1274 INSN_RESOLVED_BACK_DEPS (con));
1276 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1277 INSN_RESOLVED_FORW_DEPS (pro));
1280 /* Make TO depend on all the FROM's producers.
1281 If RESOLVED_P is true add dependencies to the resolved lists. */
1283 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1285 sd_list_types_def list_type;
1286 sd_iterator_def sd_it;
1289 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1291 FOR_EACH_DEP (from, list_type, sd_it, dep)
1293 dep_def _new_dep, *new_dep = &_new_dep;
1295 copy_dep (new_dep, dep);
1296 DEP_CON (new_dep) = to;
1297 sd_add_dep (new_dep, resolved_p);
1301 /* Remove a dependency referred to by SD_IT.
1302 SD_IT will point to the next dependence after removal. */
1304 sd_delete_dep (sd_iterator_def sd_it)
1306 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1307 dep_t dep = DEP_NODE_DEP (n);
1308 rtx pro = DEP_PRO (dep);
1309 rtx con = DEP_CON (dep);
1310 deps_list_t con_back_deps;
1311 deps_list_t pro_forw_deps;
1313 if (true_dependency_cache != NULL)
1315 int elem_luid = INSN_LUID (pro);
1316 int insn_luid = INSN_LUID (con);
1318 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1319 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1320 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1322 if (current_sched_info->flags & DO_SPECULATION)
1323 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1326 get_back_and_forw_lists (dep, sd_it.resolved_p,
1327 &con_back_deps, &pro_forw_deps);
1329 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1330 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1332 delete_dep_node (n);
1335 /* Dump size of the lists. */
1336 #define DUMP_LISTS_SIZE (2)
1338 /* Dump dependencies of the lists. */
1339 #define DUMP_LISTS_DEPS (4)
1341 /* Dump all information about the lists. */
1342 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1344 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1345 FLAGS is a bit mask specifying what information about the lists needs
1347 If FLAGS has the very first bit set, then dump all information about
1348 the lists and propagate this bit into the callee dump functions. */
1350 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1352 sd_iterator_def sd_it;
1359 flags |= DUMP_LISTS_ALL;
1361 fprintf (dump, "[");
1363 if (flags & DUMP_LISTS_SIZE)
1364 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1366 if (flags & DUMP_LISTS_DEPS)
1368 FOR_EACH_DEP (insn, types, sd_it, dep)
1370 dump_dep (dump, dep, dump_dep_flags | all);
1371 fprintf (dump, " ");
1376 /* Dump all information about deps_lists of INSN specified by TYPES
1379 sd_debug_lists (rtx insn, sd_list_types_def types)
1381 dump_lists (stderr, insn, types, 1);
1382 fprintf (stderr, "\n");
1385 /* A convenience wrapper to operate on an entire list. */
1388 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1390 for (; list; list = XEXP (list, 1))
1392 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1393 add_dependence (insn, XEXP (list, 0), dep_type);
1397 /* Similar, but free *LISTP at the same time, when the context
1401 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1402 int uncond, enum reg_note dep_type)
1406 /* We don't want to short-circuit dependencies involving debug
1407 insns, because they may cause actual dependencies to be
1409 if (deps->readonly || DEBUG_INSN_P (insn))
1411 add_dependence_list (insn, *listp, uncond, dep_type);
1415 for (list = *listp, *listp = NULL; list ; list = next)
1417 next = XEXP (list, 1);
1418 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1419 add_dependence (insn, XEXP (list, 0), dep_type);
1420 free_INSN_LIST_node (list);
1424 /* Remove all occurences of INSN from LIST. Return the number of
1425 occurences removed. */
1428 remove_from_dependence_list (rtx insn, rtx* listp)
1434 if (XEXP (*listp, 0) == insn)
1436 remove_free_INSN_LIST_node (listp);
1441 listp = &XEXP (*listp, 1);
1447 /* Same as above, but process two lists at once. */
1449 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1455 if (XEXP (*listp, 0) == insn)
1457 remove_free_INSN_LIST_node (listp);
1458 remove_free_EXPR_LIST_node (exprp);
1463 listp = &XEXP (*listp, 1);
1464 exprp = &XEXP (*exprp, 1);
1470 /* Clear all dependencies for an insn. */
1472 delete_all_dependences (rtx insn)
1474 sd_iterator_def sd_it;
1477 /* The below cycle can be optimized to clear the caches and back_deps
1478 in one call but that would provoke duplication of code from
1481 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1482 sd_iterator_cond (&sd_it, &dep);)
1483 sd_delete_dep (sd_it);
1486 /* All insns in a scheduling group except the first should only have
1487 dependencies on the previous insn in the group. So we find the
1488 first instruction in the scheduling group by walking the dependence
1489 chains backwards. Then we add the dependencies for the group to
1490 the previous nonnote insn. */
1493 fixup_sched_groups (rtx insn)
1495 sd_iterator_def sd_it;
1499 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1502 rtx pro = DEP_PRO (dep);
1506 i = prev_nonnote_insn (i);
1510 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1512 if (! sched_insns_conditions_mutex_p (i, pro))
1513 add_dependence (i, pro, DEP_TYPE (dep));
1517 delete_all_dependences (insn);
1519 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1520 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1521 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1522 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1525 /* Process an insn's memory dependencies. There are four kinds of
1528 (0) read dependence: read follows read
1529 (1) true dependence: read follows write
1530 (2) output dependence: write follows write
1531 (3) anti dependence: write follows read
1533 We are careful to build only dependencies which actually exist, and
1534 use transitivity to avoid building too many links. */
1536 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1537 The MEM is a memory reference contained within INSN, which we are saving
1538 so that we can do memory aliasing on it. */
1541 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1548 gcc_assert (!deps->readonly);
1551 insn_list = &deps->pending_read_insns;
1552 mem_list = &deps->pending_read_mems;
1553 if (!DEBUG_INSN_P (insn))
1554 deps->pending_read_list_length++;
1558 insn_list = &deps->pending_write_insns;
1559 mem_list = &deps->pending_write_mems;
1560 deps->pending_write_list_length++;
1563 link = alloc_INSN_LIST (insn, *insn_list);
1566 if (sched_deps_info->use_cselib)
1568 mem = shallow_copy_rtx (mem);
1569 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1571 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1575 /* Make a dependency between every memory reference on the pending lists
1576 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1577 dependencies for a read operation, similarly with FOR_WRITE. */
1580 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1585 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1587 if (!deps->readonly)
1589 free_EXPR_LIST_list (&deps->pending_read_mems);
1590 deps->pending_read_list_length = 0;
1594 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1595 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1597 add_dependence_list_and_free (deps, insn,
1598 &deps->last_pending_memory_flush, 1,
1599 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1600 if (!deps->readonly)
1602 free_EXPR_LIST_list (&deps->pending_write_mems);
1603 deps->pending_write_list_length = 0;
1605 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1606 deps->pending_flush_length = 1;
1610 /* Instruction which dependencies we are analyzing. */
1611 static rtx cur_insn = NULL_RTX;
1613 /* Implement hooks for haifa scheduler. */
1616 haifa_start_insn (rtx insn)
1618 gcc_assert (insn && !cur_insn);
1624 haifa_finish_insn (void)
1630 haifa_note_reg_set (int regno)
1632 SET_REGNO_REG_SET (reg_pending_sets, regno);
1636 haifa_note_reg_clobber (int regno)
1638 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1642 haifa_note_reg_use (int regno)
1644 SET_REGNO_REG_SET (reg_pending_uses, regno);
1648 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1650 if (!(ds & SPECULATIVE))
1653 pending_mem = NULL_RTX;
1656 gcc_assert (ds & BEGIN_DATA);
1659 dep_def _dep, *dep = &_dep;
1661 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1662 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1663 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1669 haifa_note_dep (rtx elem, ds_t ds)
1674 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1675 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1679 note_reg_use (int r)
1681 if (sched_deps_info->note_reg_use)
1682 sched_deps_info->note_reg_use (r);
1686 note_reg_set (int r)
1688 if (sched_deps_info->note_reg_set)
1689 sched_deps_info->note_reg_set (r);
1693 note_reg_clobber (int r)
1695 if (sched_deps_info->note_reg_clobber)
1696 sched_deps_info->note_reg_clobber (r);
1700 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1702 if (sched_deps_info->note_mem_dep)
1703 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1707 note_dep (rtx e, ds_t ds)
1709 if (sched_deps_info->note_dep)
1710 sched_deps_info->note_dep (e, ds);
1713 /* Return corresponding to DS reg_note. */
1718 return REG_DEP_TRUE;
1719 else if (ds & DEP_OUTPUT)
1720 return REG_DEP_OUTPUT;
1723 gcc_assert (ds & DEP_ANTI);
1724 return REG_DEP_ANTI;
1730 /* Functions for computation of info needed for register pressure
1731 sensitive insn scheduling. */
1734 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1735 static struct reg_use_data *
1736 create_insn_reg_use (int regno, rtx insn)
1738 struct reg_use_data *use;
1740 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1743 use->next_insn_use = INSN_REG_USE_LIST (insn);
1744 INSN_REG_USE_LIST (insn) = use;
1748 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1749 static struct reg_set_data *
1750 create_insn_reg_set (int regno, rtx insn)
1752 struct reg_set_data *set;
1754 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1757 set->next_insn_set = INSN_REG_SET_LIST (insn);
1758 INSN_REG_SET_LIST (insn) = set;
1762 /* Set up insn register uses for INSN and dependency context DEPS. */
1764 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1767 reg_set_iterator rsi;
1769 struct reg_use_data *use, *use2, *next;
1770 struct deps_reg *reg_last;
1772 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1774 if (i < FIRST_PSEUDO_REGISTER
1775 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1778 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1779 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1780 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1781 /* Ignore use which is not dying. */
1784 use = create_insn_reg_use (i, insn);
1785 use->next_regno_use = use;
1786 reg_last = &deps->reg_last[i];
1788 /* Create the cycle list of uses. */
1789 for (list = reg_last->uses; list; list = XEXP (list, 1))
1791 use2 = create_insn_reg_use (i, XEXP (list, 0));
1792 next = use->next_regno_use;
1793 use->next_regno_use = use2;
1794 use2->next_regno_use = next;
1799 /* Register pressure info for the currently processed insn. */
1800 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1802 /* Return TRUE if INSN has the use structure for REGNO. */
1804 insn_use_p (rtx insn, int regno)
1806 struct reg_use_data *use;
1808 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1809 if (use->regno == regno)
1814 /* Update the register pressure info after birth of pseudo register REGNO
1815 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1816 the register is in clobber or unused after the insn. */
1818 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1823 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1824 cl = sched_regno_cover_class[regno];
1827 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1830 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1831 reg_pressure_info[cl].clobber_increase = new_incr;
1835 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1836 reg_pressure_info[cl].unused_set_increase = new_incr;
1840 new_incr = reg_pressure_info[cl].set_increase + incr;
1841 reg_pressure_info[cl].set_increase = new_incr;
1842 if (! insn_use_p (insn, regno))
1843 reg_pressure_info[cl].change += incr;
1844 create_insn_reg_set (regno, insn);
1846 gcc_assert (new_incr < (1 << INCREASE_BITS));
1850 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1851 hard registers involved in the birth. */
1853 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1854 bool clobber_p, bool unused_p)
1857 int new_incr, last = regno + nregs;
1859 while (regno < last)
1861 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1862 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1864 cl = sched_regno_cover_class[regno];
1869 new_incr = reg_pressure_info[cl].clobber_increase + 1;
1870 reg_pressure_info[cl].clobber_increase = new_incr;
1874 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1875 reg_pressure_info[cl].unused_set_increase = new_incr;
1879 new_incr = reg_pressure_info[cl].set_increase + 1;
1880 reg_pressure_info[cl].set_increase = new_incr;
1881 if (! insn_use_p (insn, regno))
1882 reg_pressure_info[cl].change += 1;
1883 create_insn_reg_set (regno, insn);
1885 gcc_assert (new_incr < (1 << INCREASE_BITS));
1892 /* Update the register pressure info after birth of pseudo or hard
1893 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1894 correspondingly that the register is in clobber or unused after the
1897 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1901 if (GET_CODE (reg) == SUBREG)
1902 reg = SUBREG_REG (reg);
1907 regno = REGNO (reg);
1908 if (regno < FIRST_PSEUDO_REGISTER)
1909 mark_insn_hard_regno_birth (insn, regno,
1910 hard_regno_nregs[regno][GET_MODE (reg)],
1911 clobber_p, unused_p);
1913 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1916 /* Update the register pressure info after death of pseudo register
1919 mark_pseudo_death (int regno)
1924 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1925 cl = sched_regno_cover_class[regno];
1928 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1929 reg_pressure_info[cl].change -= incr;
1933 /* Like mark_pseudo_death except that NREGS saying how many hard
1934 registers involved in the death. */
1936 mark_hard_regno_death (int regno, int nregs)
1939 int last = regno + nregs;
1941 while (regno < last)
1943 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1944 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1946 cl = sched_regno_cover_class[regno];
1948 reg_pressure_info[cl].change -= 1;
1954 /* Update the register pressure info after death of pseudo or hard
1957 mark_reg_death (rtx reg)
1961 if (GET_CODE (reg) == SUBREG)
1962 reg = SUBREG_REG (reg);
1967 regno = REGNO (reg);
1968 if (regno < FIRST_PSEUDO_REGISTER)
1969 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1971 mark_pseudo_death (regno);
1974 /* Process SETTER of REG. DATA is an insn containing the setter. */
1976 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1978 if (setter != NULL_RTX && GET_CODE (setter) != SET)
1981 ((rtx) data, reg, false,
1982 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1985 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
1987 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1989 if (GET_CODE (setter) == CLOBBER)
1990 mark_insn_reg_birth ((rtx) data, reg, true, false);
1993 /* Set up reg pressure info related to INSN. */
1995 setup_insn_reg_pressure_info (rtx insn)
1999 static struct reg_pressure_data *pressure_info;
2002 gcc_assert (sched_pressure_p);
2004 if (! INSN_P (insn))
2007 for (i = 0; i < ira_reg_class_cover_size; i++)
2009 cl = ira_reg_class_cover[i];
2010 reg_pressure_info[cl].clobber_increase = 0;
2011 reg_pressure_info[cl].set_increase = 0;
2012 reg_pressure_info[cl].unused_set_increase = 0;
2013 reg_pressure_info[cl].change = 0;
2016 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2018 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2021 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2022 if (REG_NOTE_KIND (link) == REG_INC)
2023 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2026 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2027 if (REG_NOTE_KIND (link) == REG_DEAD)
2028 mark_reg_death (XEXP (link, 0));
2030 len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2032 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2033 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
2035 for (i = 0; i < ira_reg_class_cover_size; i++)
2037 cl = ira_reg_class_cover[i];
2038 pressure_info[i].clobber_increase
2039 = reg_pressure_info[cl].clobber_increase;
2040 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2041 pressure_info[i].unused_set_increase
2042 = reg_pressure_info[cl].unused_set_increase;
2043 pressure_info[i].change = reg_pressure_info[cl].change;
2050 /* Internal variable for sched_analyze_[12] () functions.
2051 If it is nonzero, this means that sched_analyze_[12] looks
2052 at the most toplevel SET. */
2053 static bool can_start_lhs_rhs_p;
2055 /* Extend reg info for the deps context DEPS given that
2056 we have just generated a register numbered REGNO. */
2058 extend_deps_reg_info (struct deps_desc *deps, int regno)
2060 int max_regno = regno + 1;
2062 gcc_assert (!reload_completed);
2064 /* In a readonly context, it would not hurt to extend info,
2065 but it should not be needed. */
2066 if (reload_completed && deps->readonly)
2068 deps->max_reg = max_regno;
2072 if (max_regno > deps->max_reg)
2074 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2076 memset (&deps->reg_last[deps->max_reg],
2077 0, (max_regno - deps->max_reg)
2078 * sizeof (struct deps_reg));
2079 deps->max_reg = max_regno;
2083 /* Extends REG_INFO_P if needed. */
2085 maybe_extend_reg_info_p (void)
2087 /* Extend REG_INFO_P, if needed. */
2088 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2090 size_t new_reg_info_p_size = max_regno + 128;
2092 gcc_assert (!reload_completed && sel_sched_p ());
2094 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2095 new_reg_info_p_size,
2097 sizeof (*reg_info_p));
2098 reg_info_p_size = new_reg_info_p_size;
2102 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2103 The type of the reference is specified by REF and can be SET,
2104 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2107 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2108 enum rtx_code ref, rtx insn)
2110 /* We could emit new pseudos in renaming. Extend the reg structures. */
2111 if (!reload_completed && sel_sched_p ()
2112 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2113 extend_deps_reg_info (deps, regno);
2115 maybe_extend_reg_info_p ();
2117 /* A hard reg in a wide mode may really be multiple registers.
2118 If so, mark all of them just like the first. */
2119 if (regno < FIRST_PSEUDO_REGISTER)
2121 int i = hard_regno_nregs[regno][mode];
2125 note_reg_set (regno + i);
2127 else if (ref == USE)
2130 note_reg_use (regno + i);
2135 note_reg_clobber (regno + i);
2139 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2140 it does not reload. Ignore these as they have served their
2142 else if (regno >= deps->max_reg)
2144 enum rtx_code code = GET_CODE (PATTERN (insn));
2145 gcc_assert (code == USE || code == CLOBBER);
2151 note_reg_set (regno);
2152 else if (ref == USE)
2153 note_reg_use (regno);
2155 note_reg_clobber (regno);
2157 /* Pseudos that are REG_EQUIV to something may be replaced
2158 by that during reloading. We need only add dependencies for
2159 the address in the REG_EQUIV note. */
2160 if (!reload_completed && get_reg_known_equiv_p (regno))
2162 rtx t = get_reg_known_value (regno);
2164 sched_analyze_2 (deps, XEXP (t, 0), insn);
2167 /* Don't let it cross a call after scheduling if it doesn't
2168 already cross one. */
2169 if (REG_N_CALLS_CROSSED (regno) == 0)
2171 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2172 deps->sched_before_next_call
2173 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2175 add_dependence_list (insn, deps->last_function_call, 1,
2181 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2182 rtx, X, creating all dependencies generated by the write to the
2183 destination of X, and reads of everything mentioned. */
2186 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2188 rtx dest = XEXP (x, 0);
2189 enum rtx_code code = GET_CODE (x);
2190 bool cslr_p = can_start_lhs_rhs_p;
2192 can_start_lhs_rhs_p = false;
2198 if (cslr_p && sched_deps_info->start_lhs)
2199 sched_deps_info->start_lhs (dest);
2201 if (GET_CODE (dest) == PARALLEL)
2205 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2206 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2207 sched_analyze_1 (deps,
2208 gen_rtx_CLOBBER (VOIDmode,
2209 XEXP (XVECEXP (dest, 0, i), 0)),
2212 if (cslr_p && sched_deps_info->finish_lhs)
2213 sched_deps_info->finish_lhs ();
2217 can_start_lhs_rhs_p = cslr_p;
2219 sched_analyze_2 (deps, SET_SRC (x), insn);
2221 can_start_lhs_rhs_p = false;
2227 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2228 || GET_CODE (dest) == ZERO_EXTRACT)
2230 if (GET_CODE (dest) == STRICT_LOW_PART
2231 || GET_CODE (dest) == ZERO_EXTRACT
2232 || df_read_modify_subreg_p (dest))
2234 /* These both read and modify the result. We must handle
2235 them as writes to get proper dependencies for following
2236 instructions. We must handle them as reads to get proper
2237 dependencies from this to previous instructions.
2238 Thus we need to call sched_analyze_2. */
2240 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2242 if (GET_CODE (dest) == ZERO_EXTRACT)
2244 /* The second and third arguments are values read by this insn. */
2245 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2246 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2248 dest = XEXP (dest, 0);
2253 int regno = REGNO (dest);
2254 enum machine_mode mode = GET_MODE (dest);
2256 sched_analyze_reg (deps, regno, mode, code, insn);
2259 /* Treat all writes to a stack register as modifying the TOS. */
2260 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2264 /* Avoid analyzing the same register twice. */
2265 if (regno != FIRST_STACK_REG)
2266 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2268 nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2269 while (--nregs >= 0)
2270 SET_HARD_REG_BIT (implicit_reg_pending_uses,
2271 FIRST_STACK_REG + nregs);
2275 else if (MEM_P (dest))
2277 /* Writing memory. */
2280 if (sched_deps_info->use_cselib)
2282 enum machine_mode address_mode
2283 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2285 t = shallow_copy_rtx (dest);
2286 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2287 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2291 /* Pending lists can't get larger with a readonly context. */
2293 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2294 > MAX_PENDING_LIST_LENGTH))
2296 /* Flush all pending reads and writes to prevent the pending lists
2297 from getting any larger. Insn scheduling runs too slowly when
2298 these lists get long. When compiling GCC with itself,
2299 this flush occurs 8 times for sparc, and 10 times for m88k using
2300 the default value of 32. */
2301 flush_pending_lists (deps, insn, false, true);
2305 rtx pending, pending_mem;
2307 pending = deps->pending_read_insns;
2308 pending_mem = deps->pending_read_mems;
2311 if (anti_dependence (XEXP (pending_mem, 0), t)
2312 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2313 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2316 pending = XEXP (pending, 1);
2317 pending_mem = XEXP (pending_mem, 1);
2320 pending = deps->pending_write_insns;
2321 pending_mem = deps->pending_write_mems;
2324 if (output_dependence (XEXP (pending_mem, 0), t)
2325 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2326 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2329 pending = XEXP (pending, 1);
2330 pending_mem = XEXP (pending_mem, 1);
2333 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2336 if (!deps->readonly)
2337 add_insn_mem_dependence (deps, false, insn, dest);
2339 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2342 if (cslr_p && sched_deps_info->finish_lhs)
2343 sched_deps_info->finish_lhs ();
2345 /* Analyze reads. */
2346 if (GET_CODE (x) == SET)
2348 can_start_lhs_rhs_p = cslr_p;
2350 sched_analyze_2 (deps, SET_SRC (x), insn);
2352 can_start_lhs_rhs_p = false;
2356 /* Analyze the uses of memory and registers in rtx X in INSN. */
2358 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2364 bool cslr_p = can_start_lhs_rhs_p;
2366 can_start_lhs_rhs_p = false;
2372 if (cslr_p && sched_deps_info->start_rhs)
2373 sched_deps_info->start_rhs (x);
2375 code = GET_CODE (x);
2386 /* Ignore constants. */
2387 if (cslr_p && sched_deps_info->finish_rhs)
2388 sched_deps_info->finish_rhs ();
2394 /* User of CC0 depends on immediately preceding insn. */
2395 SCHED_GROUP_P (insn) = 1;
2396 /* Don't move CC0 setter to another block (it can set up the
2397 same flag for previous CC0 users which is safe). */
2398 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2400 if (cslr_p && sched_deps_info->finish_rhs)
2401 sched_deps_info->finish_rhs ();
2408 int regno = REGNO (x);
2409 enum machine_mode mode = GET_MODE (x);
2411 sched_analyze_reg (deps, regno, mode, USE, insn);
2414 /* Treat all reads of a stack register as modifying the TOS. */
2415 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2417 /* Avoid analyzing the same register twice. */
2418 if (regno != FIRST_STACK_REG)
2419 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2420 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2424 if (cslr_p && sched_deps_info->finish_rhs)
2425 sched_deps_info->finish_rhs ();
2432 /* Reading memory. */
2434 rtx pending, pending_mem;
2437 if (sched_deps_info->use_cselib)
2439 enum machine_mode address_mode
2440 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2442 t = shallow_copy_rtx (t);
2443 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2444 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2447 if (!DEBUG_INSN_P (insn))
2450 pending = deps->pending_read_insns;
2451 pending_mem = deps->pending_read_mems;
2454 if (read_dependence (XEXP (pending_mem, 0), t)
2455 && ! sched_insns_conditions_mutex_p (insn,
2457 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2460 pending = XEXP (pending, 1);
2461 pending_mem = XEXP (pending_mem, 1);
2464 pending = deps->pending_write_insns;
2465 pending_mem = deps->pending_write_mems;
2468 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2470 && ! sched_insns_conditions_mutex_p (insn,
2472 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2473 sched_deps_info->generate_spec_deps
2474 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2476 pending = XEXP (pending, 1);
2477 pending_mem = XEXP (pending_mem, 1);
2480 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2482 if (! NON_FLUSH_JUMP_P (u))
2483 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2484 else if (deps_may_trap_p (x))
2486 if ((sched_deps_info->generate_spec_deps)
2487 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2489 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2492 note_dep (XEXP (u, 0), ds);
2495 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2500 /* Always add these dependencies to pending_reads, since
2501 this insn may be followed by a write. */
2502 if (!deps->readonly)
2503 add_insn_mem_dependence (deps, true, insn, x);
2505 sched_analyze_2 (deps, XEXP (x, 0), insn);
2507 if (cslr_p && sched_deps_info->finish_rhs)
2508 sched_deps_info->finish_rhs ();
2513 /* Force pending stores to memory in case a trap handler needs them. */
2515 flush_pending_lists (deps, insn, true, false);
2519 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2520 reg_pending_barrier = TRUE_BARRIER;
2523 case UNSPEC_VOLATILE:
2524 flush_pending_lists (deps, insn, true, true);
2530 /* Traditional and volatile asm instructions must be considered to use
2531 and clobber all hard registers, all pseudo-registers and all of
2532 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2534 Consider for instance a volatile asm that changes the fpu rounding
2535 mode. An insn should not be moved across this even if it only uses
2536 pseudo-regs because it might give an incorrectly rounded result. */
2537 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2538 reg_pending_barrier = TRUE_BARRIER;
2540 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2541 We can not just fall through here since then we would be confused
2542 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2543 traditional asms unlike their normal usage. */
2545 if (code == ASM_OPERANDS)
2547 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2548 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2550 if (cslr_p && sched_deps_info->finish_rhs)
2551 sched_deps_info->finish_rhs ();
2562 /* These both read and modify the result. We must handle them as writes
2563 to get proper dependencies for following instructions. We must handle
2564 them as reads to get proper dependencies from this to previous
2565 instructions. Thus we need to pass them to both sched_analyze_1
2566 and sched_analyze_2. We must call sched_analyze_2 first in order
2567 to get the proper antecedent for the read. */
2568 sched_analyze_2 (deps, XEXP (x, 0), insn);
2569 sched_analyze_1 (deps, x, insn);
2571 if (cslr_p && sched_deps_info->finish_rhs)
2572 sched_deps_info->finish_rhs ();
2578 /* op0 = op0 + op1 */
2579 sched_analyze_2 (deps, XEXP (x, 0), insn);
2580 sched_analyze_2 (deps, XEXP (x, 1), insn);
2581 sched_analyze_1 (deps, x, insn);
2583 if (cslr_p && sched_deps_info->finish_rhs)
2584 sched_deps_info->finish_rhs ();
2592 /* Other cases: walk the insn. */
2593 fmt = GET_RTX_FORMAT (code);
2594 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2597 sched_analyze_2 (deps, XEXP (x, i), insn);
2598 else if (fmt[i] == 'E')
2599 for (j = 0; j < XVECLEN (x, i); j++)
2600 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2603 if (cslr_p && sched_deps_info->finish_rhs)
2604 sched_deps_info->finish_rhs ();
2607 /* Analyze an INSN with pattern X to find all dependencies. */
2609 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2611 RTX_CODE code = GET_CODE (x);
2614 reg_set_iterator rsi;
2616 if (! reload_completed)
2620 extract_insn (insn);
2621 preprocess_constraints ();
2622 ira_implicitly_set_insn_hard_regs (&temp);
2623 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2624 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2627 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2631 /* Avoid moving trapping instructions accross function calls that might
2632 not always return. */
2633 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2636 if (code == COND_EXEC)
2638 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2640 /* ??? Should be recording conditions so we reduce the number of
2641 false dependencies. */
2642 x = COND_EXEC_CODE (x);
2643 code = GET_CODE (x);
2645 if (code == SET || code == CLOBBER)
2647 sched_analyze_1 (deps, x, insn);
2649 /* Bare clobber insns are used for letting life analysis, reg-stack
2650 and others know that a value is dead. Depend on the last call
2651 instruction so that reg-stack won't get confused. */
2652 if (code == CLOBBER)
2653 add_dependence_list (insn, deps->last_function_call, 1,
2656 else if (code == PARALLEL)
2658 for (i = XVECLEN (x, 0); i--;)
2660 rtx sub = XVECEXP (x, 0, i);
2661 code = GET_CODE (sub);
2663 if (code == COND_EXEC)
2665 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2666 sub = COND_EXEC_CODE (sub);
2667 code = GET_CODE (sub);
2669 if (code == SET || code == CLOBBER)
2670 sched_analyze_1 (deps, sub, insn);
2672 sched_analyze_2 (deps, sub, insn);
2676 sched_analyze_2 (deps, x, insn);
2678 /* Mark registers CLOBBERED or used by called function. */
2681 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2683 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2684 sched_analyze_1 (deps, XEXP (link, 0), insn);
2686 sched_analyze_2 (deps, XEXP (link, 0), insn);
2688 if (find_reg_note (insn, REG_SETJMP, NULL))
2689 reg_pending_barrier = MOVE_BARRIER;
2695 next = next_nonnote_nondebug_insn (insn);
2696 if (next && BARRIER_P (next))
2697 reg_pending_barrier = MOVE_BARRIER;
2700 rtx pending, pending_mem;
2702 if (sched_deps_info->compute_jump_reg_dependencies)
2704 regset_head tmp_uses, tmp_sets;
2705 INIT_REG_SET (&tmp_uses);
2706 INIT_REG_SET (&tmp_sets);
2708 (*sched_deps_info->compute_jump_reg_dependencies)
2709 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2710 /* Make latency of jump equal to 0 by using anti-dependence. */
2711 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2713 struct deps_reg *reg_last = &deps->reg_last[i];
2714 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2715 add_dependence_list (insn, reg_last->implicit_sets,
2717 add_dependence_list (insn, reg_last->clobbers, 0,
2720 if (!deps->readonly)
2722 reg_last->uses_length++;
2723 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2726 IOR_REG_SET (reg_pending_sets, &tmp_sets);
2728 CLEAR_REG_SET (&tmp_uses);
2729 CLEAR_REG_SET (&tmp_sets);
2732 /* All memory writes and volatile reads must happen before the
2733 jump. Non-volatile reads must happen before the jump iff
2734 the result is needed by the above register used mask. */
2736 pending = deps->pending_write_insns;
2737 pending_mem = deps->pending_write_mems;
2740 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2741 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2742 pending = XEXP (pending, 1);
2743 pending_mem = XEXP (pending_mem, 1);
2746 pending = deps->pending_read_insns;
2747 pending_mem = deps->pending_read_mems;
2750 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2751 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2752 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2753 pending = XEXP (pending, 1);
2754 pending_mem = XEXP (pending_mem, 1);
2757 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2762 /* If this instruction can throw an exception, then moving it changes
2763 where block boundaries fall. This is mighty confusing elsewhere.
2764 Therefore, prevent such an instruction from being moved. Same for
2765 non-jump instructions that define block boundaries.
2766 ??? Unclear whether this is still necessary in EBB mode. If not,
2767 add_branch_dependences should be adjusted for RGN mode instead. */
2768 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2769 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2770 reg_pending_barrier = MOVE_BARRIER;
2772 if (sched_pressure_p)
2774 setup_insn_reg_uses (deps, insn);
2775 setup_insn_reg_pressure_info (insn);
2778 /* Add register dependencies for insn. */
2779 if (DEBUG_INSN_P (insn))
2781 rtx prev = deps->last_debug_insn;
2784 if (!deps->readonly)
2785 deps->last_debug_insn = insn;
2788 add_dependence (insn, prev, REG_DEP_ANTI);
2790 add_dependence_list (insn, deps->last_function_call, 1,
2793 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2794 if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
2795 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2797 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2799 struct deps_reg *reg_last = &deps->reg_last[i];
2800 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2801 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2803 if (!deps->readonly)
2804 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2806 CLEAR_REG_SET (reg_pending_uses);
2808 /* Quite often, a debug insn will refer to stuff in the
2809 previous instruction, but the reason we want this
2810 dependency here is to make sure the scheduler doesn't
2811 gratuitously move a debug insn ahead. This could dirty
2812 DF flags and cause additional analysis that wouldn't have
2813 occurred in compilation without debug insns, and such
2814 additional analysis can modify the generated code. */
2815 prev = PREV_INSN (insn);
2817 if (prev && NONDEBUG_INSN_P (prev))
2818 add_dependence (insn, prev, REG_DEP_ANTI);
2822 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2824 struct deps_reg *reg_last = &deps->reg_last[i];
2825 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2826 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2827 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2829 if (!deps->readonly)
2831 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2832 reg_last->uses_length++;
2836 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2837 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2839 struct deps_reg *reg_last = &deps->reg_last[i];
2840 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2841 add_dependence_list (insn, reg_last->implicit_sets, 0,
2843 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2845 if (!deps->readonly)
2847 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2848 reg_last->uses_length++;
2852 /* If the current insn is conditional, we can't free any
2854 if (sched_has_condition_p (insn))
2856 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2858 struct deps_reg *reg_last = &deps->reg_last[i];
2859 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2860 add_dependence_list (insn, reg_last->implicit_sets, 0,
2862 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2864 if (!deps->readonly)
2867 = alloc_INSN_LIST (insn, reg_last->clobbers);
2868 reg_last->clobbers_length++;
2871 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2873 struct deps_reg *reg_last = &deps->reg_last[i];
2874 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2875 add_dependence_list (insn, reg_last->implicit_sets, 0,
2877 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2878 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2880 if (!deps->readonly)
2882 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2883 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2889 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2891 struct deps_reg *reg_last = &deps->reg_last[i];
2892 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2893 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2895 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
2897 add_dependence_list_and_free (deps, insn,
2898 ®_last->implicit_sets, 0,
2900 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
2902 add_dependence_list_and_free
2903 (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
2905 if (!deps->readonly)
2907 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2908 reg_last->clobbers_length = 0;
2909 reg_last->uses_length = 0;
2914 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2915 add_dependence_list (insn, reg_last->implicit_sets, 0,
2917 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2920 if (!deps->readonly)
2922 reg_last->clobbers_length++;
2924 = alloc_INSN_LIST (insn, reg_last->clobbers);
2927 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2929 struct deps_reg *reg_last = &deps->reg_last[i];
2931 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
2933 add_dependence_list_and_free (deps, insn,
2934 ®_last->implicit_sets,
2936 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
2938 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
2941 if (!deps->readonly)
2943 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2944 reg_last->uses_length = 0;
2945 reg_last->clobbers_length = 0;
2946 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2952 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2953 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2955 struct deps_reg *reg_last = &deps->reg_last[i];
2956 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2957 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2958 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2960 if (!deps->readonly)
2961 reg_last->implicit_sets
2962 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2965 if (!deps->readonly)
2967 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2968 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2969 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2970 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2971 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2972 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2973 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2975 /* Set up the pending barrier found. */
2976 deps->last_reg_pending_barrier = reg_pending_barrier;
2979 CLEAR_REG_SET (reg_pending_uses);
2980 CLEAR_REG_SET (reg_pending_clobbers);
2981 CLEAR_REG_SET (reg_pending_sets);
2982 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2983 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2985 /* Add dependencies if a scheduling barrier was found. */
2986 if (reg_pending_barrier)
2988 /* In the case of barrier the most added dependencies are not
2989 real, so we use anti-dependence here. */
2990 if (sched_has_condition_p (insn))
2992 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2994 struct deps_reg *reg_last = &deps->reg_last[i];
2995 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2996 add_dependence_list (insn, reg_last->sets, 0,
2997 reg_pending_barrier == TRUE_BARRIER
2998 ? REG_DEP_TRUE : REG_DEP_ANTI);
2999 add_dependence_list (insn, reg_last->implicit_sets, 0,
3001 add_dependence_list (insn, reg_last->clobbers, 0,
3002 reg_pending_barrier == TRUE_BARRIER
3003 ? REG_DEP_TRUE : REG_DEP_ANTI);
3008 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3010 struct deps_reg *reg_last = &deps->reg_last[i];
3011 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3013 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3014 reg_pending_barrier == TRUE_BARRIER
3015 ? REG_DEP_TRUE : REG_DEP_ANTI);
3016 add_dependence_list_and_free (deps, insn,
3017 ®_last->implicit_sets, 0,
3019 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3020 reg_pending_barrier == TRUE_BARRIER
3021 ? REG_DEP_TRUE : REG_DEP_ANTI);
3023 if (!deps->readonly)
3025 reg_last->uses_length = 0;
3026 reg_last->clobbers_length = 0;
3031 if (!deps->readonly)
3032 for (i = 0; i < (unsigned)deps->max_reg; i++)
3034 struct deps_reg *reg_last = &deps->reg_last[i];
3035 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3036 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3039 /* Flush pending lists on jumps, but not on speculative checks. */
3040 if (JUMP_P (insn) && !(sel_sched_p ()
3041 && sel_insn_is_speculation_check (insn)))
3042 flush_pending_lists (deps, insn, true, true);
3044 if (!deps->readonly)
3045 CLEAR_REG_SET (&deps->reg_conditional_sets);
3046 reg_pending_barrier = NOT_A_BARRIER;
3049 /* If a post-call group is still open, see if it should remain so.
3050 This insn must be a simple move of a hard reg to a pseudo or
3053 We must avoid moving these insns for correctness on targets
3054 with small register classes, and for special registers like
3055 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3056 hard regs for all targets. */
3058 if (deps->in_post_call_group_p)
3060 rtx tmp, set = single_set (insn);
3061 int src_regno, dest_regno;
3065 if (DEBUG_INSN_P (insn))
3066 /* We don't want to mark debug insns as part of the same
3067 sched group. We know they really aren't, but if we use
3068 debug insns to tell that a call group is over, we'll
3069 get different code if debug insns are not there and
3070 instructions that follow seem like they should be part
3073 Also, if we did, fixup_sched_groups() would move the
3074 deps of the debug insn to the call insn, modifying
3075 non-debug post-dependency counts of the debug insn
3076 dependencies and otherwise messing with the scheduling
3079 Instead, let such debug insns be scheduled freely, but
3080 keep the call group open in case there are insns that
3081 should be part of it afterwards. Since we grant debug
3082 insns higher priority than even sched group insns, it
3083 will all turn out all right. */
3084 goto debug_dont_end_call_group;
3086 goto end_call_group;
3089 tmp = SET_DEST (set);
3090 if (GET_CODE (tmp) == SUBREG)
3091 tmp = SUBREG_REG (tmp);
3093 dest_regno = REGNO (tmp);
3095 goto end_call_group;
3097 tmp = SET_SRC (set);
3098 if (GET_CODE (tmp) == SUBREG)
3099 tmp = SUBREG_REG (tmp);
3100 if ((GET_CODE (tmp) == PLUS
3101 || GET_CODE (tmp) == MINUS)
3102 && REG_P (XEXP (tmp, 0))
3103 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3104 && dest_regno == STACK_POINTER_REGNUM)
3105 src_regno = STACK_POINTER_REGNUM;
3106 else if (REG_P (tmp))
3107 src_regno = REGNO (tmp);
3109 goto end_call_group;
3111 if (src_regno < FIRST_PSEUDO_REGISTER
3112 || dest_regno < FIRST_PSEUDO_REGISTER)
3115 && deps->in_post_call_group_p == post_call_initial)
3116 deps->in_post_call_group_p = post_call;
3118 if (!sel_sched_p () || sched_emulate_haifa_p)
3120 SCHED_GROUP_P (insn) = 1;
3121 CANT_MOVE (insn) = 1;
3127 if (!deps->readonly)
3128 deps->in_post_call_group_p = not_post_call;
3132 debug_dont_end_call_group:
3133 if ((current_sched_info->flags & DO_SPECULATION)
3134 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3135 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3139 sel_mark_hard_insn (insn);
3142 sd_iterator_def sd_it;
3145 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3146 sd_iterator_cond (&sd_it, &dep);)
3147 change_spec_dep_to_hard (sd_it);
3152 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3153 longjmp, loop forever, ...). */
3155 call_may_noreturn_p (rtx insn)
3159 /* const or pure calls that aren't looping will always return. */
3160 if (RTL_CONST_OR_PURE_CALL_P (insn)
3161 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3164 call = PATTERN (insn);
3165 if (GET_CODE (call) == PARALLEL)
3166 call = XVECEXP (call, 0, 0);
3167 if (GET_CODE (call) == SET)
3168 call = SET_SRC (call);
3169 if (GET_CODE (call) == CALL
3170 && MEM_P (XEXP (call, 0))
3171 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3173 rtx symbol = XEXP (XEXP (call, 0), 0);
3174 if (SYMBOL_REF_DECL (symbol)
3175 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3177 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3179 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3182 case BUILT_IN_BCOPY:
3183 case BUILT_IN_BZERO:
3184 case BUILT_IN_INDEX:
3185 case BUILT_IN_MEMCHR:
3186 case BUILT_IN_MEMCMP:
3187 case BUILT_IN_MEMCPY:
3188 case BUILT_IN_MEMMOVE:
3189 case BUILT_IN_MEMPCPY:
3190 case BUILT_IN_MEMSET:
3191 case BUILT_IN_RINDEX:
3192 case BUILT_IN_STPCPY:
3193 case BUILT_IN_STPNCPY:
3194 case BUILT_IN_STRCAT:
3195 case BUILT_IN_STRCHR:
3196 case BUILT_IN_STRCMP:
3197 case BUILT_IN_STRCPY:
3198 case BUILT_IN_STRCSPN:
3199 case BUILT_IN_STRLEN:
3200 case BUILT_IN_STRNCAT:
3201 case BUILT_IN_STRNCMP:
3202 case BUILT_IN_STRNCPY:
3203 case BUILT_IN_STRPBRK:
3204 case BUILT_IN_STRRCHR:
3205 case BUILT_IN_STRSPN:
3206 case BUILT_IN_STRSTR:
3207 /* Assume certain string/memory builtins always return. */
3215 /* For all other calls assume that they might not always return. */
3219 /* Analyze INSN with DEPS as a context. */
3221 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3223 if (sched_deps_info->start_insn)
3224 sched_deps_info->start_insn (insn);
3226 if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3228 /* Make each JUMP_INSN (but not a speculative check)
3229 a scheduling barrier for memory references. */
3233 && sel_insn_is_speculation_check (insn)))
3235 /* Keep the list a reasonable size. */
3236 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3237 flush_pending_lists (deps, insn, true, true);
3240 deps->last_pending_memory_flush
3241 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3242 /* Signal to sched_analyze_insn that this jump stands
3243 just for its own, not any other pending memory
3244 reads/writes flush_pending_lists had to flush. */
3245 PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
3246 NON_FLUSH_JUMP_KIND);
3250 sched_analyze_insn (deps, PATTERN (insn), insn);
3252 else if (CALL_P (insn))
3256 CANT_MOVE (insn) = 1;
3258 if (find_reg_note (insn, REG_SETJMP, NULL))
3260 /* This is setjmp. Assume that all registers, not just
3261 hard registers, may be clobbered by this call. */
3262 reg_pending_barrier = MOVE_BARRIER;
3266 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3267 /* A call may read and modify global register variables. */
3270 SET_REGNO_REG_SET (reg_pending_sets, i);
3271 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3273 /* Other call-clobbered hard regs may be clobbered.
3274 Since we only have a choice between 'might be clobbered'
3275 and 'definitely not clobbered', we must include all
3276 partly call-clobbered registers here. */
3277 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3278 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3279 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3280 /* We don't know what set of fixed registers might be used
3281 by the function, but it is certain that the stack pointer
3282 is among them, but be conservative. */
3283 else if (fixed_regs[i])
3284 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3285 /* The frame pointer is normally not used by the function
3286 itself, but by the debugger. */
3287 /* ??? MIPS o32 is an exception. It uses the frame pointer
3288 in the macro expansion of jal but does not represent this
3289 fact in the call_insn rtl. */
3290 else if (i == FRAME_POINTER_REGNUM
3291 || (i == HARD_FRAME_POINTER_REGNUM
3292 && (! reload_completed || frame_pointer_needed)))
3293 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3296 /* For each insn which shouldn't cross a call, add a dependence
3297 between that insn and this call insn. */
3298 add_dependence_list_and_free (deps, insn,
3299 &deps->sched_before_next_call, 1,
3302 sched_analyze_insn (deps, PATTERN (insn), insn);
3304 /* If CALL would be in a sched group, then this will violate
3305 convention that sched group insns have dependencies only on the
3306 previous instruction.
3308 Of course one can say: "Hey! What about head of the sched group?"
3309 And I will answer: "Basic principles (one dep per insn) are always
3311 gcc_assert (!SCHED_GROUP_P (insn));
3313 /* In the absence of interprocedural alias analysis, we must flush
3314 all pending reads and writes, and start new dependencies starting
3315 from here. But only flush writes for constant calls (which may
3316 be passed a pointer to something we haven't written yet). */
3317 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3319 if (!deps->readonly)
3321 /* Remember the last function call for limiting lifetimes. */
3322 free_INSN_LIST_list (&deps->last_function_call);
3323 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3325 if (call_may_noreturn_p (insn))
3327 /* Remember the last function call that might not always return
3328 normally for limiting moves of trapping insns. */
3329 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3330 deps->last_function_call_may_noreturn
3331 = alloc_INSN_LIST (insn, NULL_RTX);
3334 /* Before reload, begin a post-call group, so as to keep the
3335 lifetimes of hard registers correct. */
3336 if (! reload_completed)
3337 deps->in_post_call_group_p = post_call;
3341 if (sched_deps_info->use_cselib)
3342 cselib_process_insn (insn);
3344 /* EH_REGION insn notes can not appear until well after we complete
3347 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3348 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3350 if (sched_deps_info->finish_insn)
3351 sched_deps_info->finish_insn ();
3353 /* Fixup the dependencies in the sched group. */
3354 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3355 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3356 fixup_sched_groups (insn);
3359 /* Initialize DEPS for the new block beginning with HEAD. */
3361 deps_start_bb (struct deps_desc *deps, rtx head)
3363 gcc_assert (!deps->readonly);
3365 /* Before reload, if the previous block ended in a call, show that
3366 we are inside a post-call group, so as to keep the lifetimes of
3367 hard registers correct. */
3368 if (! reload_completed && !LABEL_P (head))
3370 rtx insn = prev_nonnote_nondebug_insn (head);
3372 if (insn && CALL_P (insn))
3373 deps->in_post_call_group_p = post_call_initial;
3377 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3378 dependencies for each insn. */
3380 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3384 if (sched_deps_info->use_cselib)
3385 cselib_init (CSELIB_RECORD_MEMORY);
3387 deps_start_bb (deps, head);
3389 for (insn = head;; insn = NEXT_INSN (insn))
3394 /* And initialize deps_lists. */
3395 sd_init_insn (insn);
3398 deps_analyze_insn (deps, insn);
3402 if (sched_deps_info->use_cselib)
3410 /* Helper for sched_free_deps ().
3411 Delete INSN's (RESOLVED_P) backward dependencies. */
3413 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3415 sd_iterator_def sd_it;
3417 sd_list_types_def types;
3420 types = SD_LIST_RES_BACK;
3422 types = SD_LIST_BACK;
3424 for (sd_it = sd_iterator_start (insn, types);
3425 sd_iterator_cond (&sd_it, &dep);)
3427 dep_link_t link = *sd_it.linkp;
3428 dep_node_t node = DEP_LINK_NODE (link);
3429 deps_list_t back_list;
3430 deps_list_t forw_list;
3432 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3433 remove_from_deps_list (link, back_list);
3434 delete_dep_node (node);
3438 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3441 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3444 rtx next_tail = NEXT_INSN (tail);
3446 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3447 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3449 /* Clear resolved back deps together with its dep_nodes. */
3450 delete_dep_nodes_in_back_deps (insn, resolved_p);
3452 /* Clear forward deps and leave the dep_nodes to the
3453 corresponding back_deps list. */
3455 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3457 clear_deps_list (INSN_FORW_DEPS (insn));
3459 sd_finish_insn (insn);
3463 /* Initialize variables for region data dependence analysis.
3464 When LAZY_REG_LAST is true, do not allocate reg_last array
3465 of struct deps_desc immediately. */
3468 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3470 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3472 deps->max_reg = max_reg;
3474 deps->reg_last = NULL;
3476 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3477 INIT_REG_SET (&deps->reg_last_in_use);
3478 INIT_REG_SET (&deps->reg_conditional_sets);
3480 deps->pending_read_insns = 0;
3481 deps->pending_read_mems = 0;
3482 deps->pending_write_insns = 0;
3483 deps->pending_write_mems = 0;
3484 deps->pending_read_list_length = 0;
3485 deps->pending_write_list_length = 0;
3486 deps->pending_flush_length = 0;
3487 deps->last_pending_memory_flush = 0;
3488 deps->last_function_call = 0;
3489 deps->last_function_call_may_noreturn = 0;
3490 deps->sched_before_next_call = 0;
3491 deps->in_post_call_group_p = not_post_call;
3492 deps->last_debug_insn = 0;
3493 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3497 /* Init only reg_last field of DEPS, which was not allocated before as
3498 we inited DEPS lazily. */
3500 init_deps_reg_last (struct deps_desc *deps)
3502 gcc_assert (deps && deps->max_reg > 0);
3503 gcc_assert (deps->reg_last == NULL);
3505 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3509 /* Free insn lists found in DEPS. */
3512 free_deps (struct deps_desc *deps)
3515 reg_set_iterator rsi;
3517 /* We set max_reg to 0 when this context was already freed. */
3518 if (deps->max_reg == 0)
3520 gcc_assert (deps->reg_last == NULL);
3525 free_INSN_LIST_list (&deps->pending_read_insns);
3526 free_EXPR_LIST_list (&deps->pending_read_mems);
3527 free_INSN_LIST_list (&deps->pending_write_insns);
3528 free_EXPR_LIST_list (&deps->pending_write_mems);
3529 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3531 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3532 times. For a testcase with 42000 regs and 8000 small basic blocks,
3533 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3534 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3536 struct deps_reg *reg_last = &deps->reg_last[i];
3538 free_INSN_LIST_list (®_last->uses);
3540 free_INSN_LIST_list (®_last->sets);
3541 if (reg_last->implicit_sets)
3542 free_INSN_LIST_list (®_last->implicit_sets);
3543 if (reg_last->clobbers)
3544 free_INSN_LIST_list (®_last->clobbers);
3546 CLEAR_REG_SET (&deps->reg_last_in_use);
3547 CLEAR_REG_SET (&deps->reg_conditional_sets);
3549 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3552 free (deps->reg_last);
3553 deps->reg_last = NULL;
3558 /* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
3561 remove_from_deps (struct deps_desc *deps, rtx insn)
3565 reg_set_iterator rsi;
3567 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3568 &deps->pending_read_mems);
3569 if (!DEBUG_INSN_P (insn))
3570 deps->pending_read_list_length -= removed;
3571 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3572 &deps->pending_write_mems);
3573 deps->pending_write_list_length -= removed;
3574 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3575 deps->pending_flush_length -= removed;
3577 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3579 struct deps_reg *reg_last = &deps->reg_last[i];
3581 remove_from_dependence_list (insn, ®_last->uses);
3583 remove_from_dependence_list (insn, ®_last->sets);
3584 if (reg_last->implicit_sets)
3585 remove_from_dependence_list (insn, ®_last->implicit_sets);
3586 if (reg_last->clobbers)
3587 remove_from_dependence_list (insn, ®_last->clobbers);
3588 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3589 && !reg_last->clobbers)
3590 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3595 remove_from_dependence_list (insn, &deps->last_function_call);
3596 remove_from_dependence_list (insn,
3597 &deps->last_function_call_may_noreturn);
3599 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3602 /* Init deps data vector. */
3604 init_deps_data_vector (void)
3606 int reserve = (sched_max_luid + 1
3607 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3609 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3610 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3611 3 * sched_max_luid / 2);
3614 /* If it is profitable to use them, initialize or extend (depending on
3615 GLOBAL_P) dependency data. */
3617 sched_deps_init (bool global_p)
3619 /* Average number of insns in the basic block.
3620 '+ 1' is used to make it nonzero. */
3621 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3623 init_deps_data_vector ();
3625 /* We use another caching mechanism for selective scheduling, so
3626 we don't use this one. */
3627 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3629 /* ?!? We could save some memory by computing a per-region luid mapping
3630 which could reduce both the number of vectors in the cache and the
3631 size of each vector. Instead we just avoid the cache entirely unless
3632 the average number of instructions in a basic block is very high. See
3633 the comment before the declaration of true_dependency_cache for
3634 what we consider "very high". */
3636 extend_dependency_caches (sched_max_luid, true);
3641 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3642 /* Allocate lists for one block at a time. */
3644 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3645 /* Allocate nodes for one block at a time.
3646 We assume that average insn has
3648 5 * insns_in_block);
3653 /* Create or extend (depending on CREATE_P) dependency caches to
3656 extend_dependency_caches (int n, bool create_p)
3658 if (create_p || true_dependency_cache)
3660 int i, luid = cache_size + n;
3662 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3664 output_dependency_cache = XRESIZEVEC (bitmap_head,
3665 output_dependency_cache, luid);
3666 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3669 if (current_sched_info->flags & DO_SPECULATION)
3670 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3673 for (i = cache_size; i < luid; i++)
3675 bitmap_initialize (&true_dependency_cache[i], 0);
3676 bitmap_initialize (&output_dependency_cache[i], 0);
3677 bitmap_initialize (&anti_dependency_cache[i], 0);
3679 if (current_sched_info->flags & DO_SPECULATION)
3680 bitmap_initialize (&spec_dependency_cache[i], 0);
3686 /* Finalize dependency information for the whole function. */
3688 sched_deps_finish (void)
3690 gcc_assert (deps_pools_are_empty_p ());
3691 free_alloc_pool_if_empty (&dn_pool);
3692 free_alloc_pool_if_empty (&dl_pool);
3693 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3695 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3698 if (true_dependency_cache)
3702 for (i = 0; i < cache_size; i++)
3704 bitmap_clear (&true_dependency_cache[i]);
3705 bitmap_clear (&output_dependency_cache[i]);
3706 bitmap_clear (&anti_dependency_cache[i]);
3708 if (sched_deps_info->generate_spec_deps)
3709 bitmap_clear (&spec_dependency_cache[i]);
3711 free (true_dependency_cache);
3712 true_dependency_cache = NULL;
3713 free (output_dependency_cache);
3714 output_dependency_cache = NULL;
3715 free (anti_dependency_cache);
3716 anti_dependency_cache = NULL;
3718 if (sched_deps_info->generate_spec_deps)
3720 free (spec_dependency_cache);
3721 spec_dependency_cache = NULL;
3727 /* Initialize some global variables needed by the dependency analysis
3731 init_deps_global (void)
3733 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3734 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3735 reg_pending_sets = ALLOC_REG_SET (®_obstack);
3736 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
3737 reg_pending_uses = ALLOC_REG_SET (®_obstack);
3738 reg_pending_barrier = NOT_A_BARRIER;
3740 if (!sel_sched_p () || sched_emulate_haifa_p)
3742 sched_deps_info->start_insn = haifa_start_insn;
3743 sched_deps_info->finish_insn = haifa_finish_insn;
3745 sched_deps_info->note_reg_set = haifa_note_reg_set;
3746 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3747 sched_deps_info->note_reg_use = haifa_note_reg_use;
3749 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3750 sched_deps_info->note_dep = haifa_note_dep;
3754 /* Free everything used by the dependency analysis code. */
3757 finish_deps_global (void)
3759 FREE_REG_SET (reg_pending_sets);
3760 FREE_REG_SET (reg_pending_clobbers);
3761 FREE_REG_SET (reg_pending_uses);
3764 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3766 estimate_dep_weak (rtx mem1, rtx mem2)
3771 /* MEMs are the same - don't speculate. */
3772 return MIN_DEP_WEAK;
3774 r1 = XEXP (mem1, 0);
3775 r2 = XEXP (mem2, 0);
3778 || (REG_P (r1) && REG_P (r2)
3779 && REGNO (r1) == REGNO (r2)))
3780 /* Again, MEMs are the same. */
3781 return MIN_DEP_WEAK;
3782 else if ((REG_P (r1) && !REG_P (r2))
3783 || (!REG_P (r1) && REG_P (r2)))
3784 /* Different addressing modes - reason to be more speculative,
3786 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3788 /* We can't say anything about the dependence. */
3789 return UNCERTAIN_DEP_WEAK;
3792 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3793 This function can handle same INSN and ELEM (INSN == ELEM).
3794 It is a convenience wrapper. */
3796 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3801 if (dep_type == REG_DEP_TRUE)
3803 else if (dep_type == REG_DEP_OUTPUT)
3807 gcc_assert (dep_type == REG_DEP_ANTI);
3811 /* When add_dependence is called from inside sched-deps.c, we expect
3812 cur_insn to be non-null. */
3813 internal = cur_insn != NULL;
3815 gcc_assert (insn == cur_insn);
3819 note_dep (elem, ds);
3824 /* Return weakness of speculative type TYPE in the dep_status DS. */
3826 get_dep_weak_1 (ds_t ds, ds_t type)
3832 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3833 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3834 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3835 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3836 default: gcc_unreachable ();
3843 get_dep_weak (ds_t ds, ds_t type)
3845 dw_t dw = get_dep_weak_1 (ds, type);
3847 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3851 /* Return the dep_status, which has the same parameters as DS, except for
3852 speculative type TYPE, that will have weakness DW. */
3854 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3856 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3861 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3862 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3863 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3864 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3865 default: gcc_unreachable ();
3870 /* Return the join of two dep_statuses DS1 and DS2.
3871 If MAX_P is true then choose the greater probability,
3872 otherwise multiply probabilities.
3873 This function assumes that both DS1 and DS2 contain speculative bits. */
3875 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3879 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3881 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3883 t = FIRST_SPEC_TYPE;
3886 if ((ds1 & t) && !(ds2 & t))
3888 else if (!(ds1 & t) && (ds2 & t))
3890 else if ((ds1 & t) && (ds2 & t))
3892 dw_t dw1 = get_dep_weak (ds1, t);
3893 dw_t dw2 = get_dep_weak (ds2, t);
3898 dw = ((ds_t) dw1) * ((ds_t) dw2);
3900 if (dw < MIN_DEP_WEAK)
3911 ds = set_dep_weak (ds, t, (dw_t) dw);
3914 if (t == LAST_SPEC_TYPE)
3916 t <<= SPEC_TYPE_SHIFT;
3923 /* Return the join of two dep_statuses DS1 and DS2.
3924 This function assumes that both DS1 and DS2 contain speculative bits. */
3926 ds_merge (ds_t ds1, ds_t ds2)
3928 return ds_merge_1 (ds1, ds2, false);
3931 /* Return the join of two dep_statuses DS1 and DS2. */
3933 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3935 ds_t new_status = ds | ds2;
3937 if (new_status & SPECULATIVE)
3939 if ((ds && !(ds & SPECULATIVE))
3940 || (ds2 && !(ds2 & SPECULATIVE)))
3941 /* Then this dep can't be speculative. */
3942 new_status &= ~SPECULATIVE;
3945 /* Both are speculative. Merging probabilities. */
3950 dw = estimate_dep_weak (mem1, mem2);
3951 ds = set_dep_weak (ds, BEGIN_DATA, dw);
3959 new_status = ds_merge (ds2, ds);
3966 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
3969 ds_max_merge (ds_t ds1, ds_t ds2)
3971 if (ds1 == 0 && ds2 == 0)
3974 if (ds1 == 0 && ds2 != 0)
3977 if (ds1 != 0 && ds2 == 0)
3980 return ds_merge_1 (ds1, ds2, true);
3983 /* Return the probability of speculation success for the speculation
3991 dt = FIRST_SPEC_TYPE;
3996 res *= (ds_t) get_dep_weak (ds, dt);
4000 if (dt == LAST_SPEC_TYPE)
4002 dt <<= SPEC_TYPE_SHIFT;
4008 res /= MAX_DEP_WEAK;
4010 if (res < MIN_DEP_WEAK)
4013 gcc_assert (res <= MAX_DEP_WEAK);
4018 /* Return a dep status that contains all speculation types of DS. */
4020 ds_get_speculation_types (ds_t ds)
4022 if (ds & BEGIN_DATA)
4024 if (ds & BE_IN_DATA)
4026 if (ds & BEGIN_CONTROL)
4027 ds |= BEGIN_CONTROL;
4028 if (ds & BE_IN_CONTROL)
4029 ds |= BE_IN_CONTROL;
4031 return ds & SPECULATIVE;
4034 /* Return a dep status that contains maximal weakness for each speculation
4035 type present in DS. */
4037 ds_get_max_dep_weak (ds_t ds)
4039 if (ds & BEGIN_DATA)
4040 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4041 if (ds & BE_IN_DATA)
4042 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4043 if (ds & BEGIN_CONTROL)
4044 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4045 if (ds & BE_IN_CONTROL)
4046 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4051 /* Dump information about the dependence status S. */
4053 dump_ds (FILE *f, ds_t s)
4058 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4060 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4061 if (s & BEGIN_CONTROL)
4062 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4063 if (s & BE_IN_CONTROL)
4064 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4067 fprintf (f, "HARD_DEP; ");
4070 fprintf (f, "DEP_TRUE; ");
4072 fprintf (f, "DEP_ANTI; ");
4074 fprintf (f, "DEP_OUTPUT; ");
4082 dump_ds (stderr, s);
4083 fprintf (stderr, "\n");
4086 #ifdef ENABLE_CHECKING
4087 /* Verify that dependence type and status are consistent.
4088 If RELAXED_P is true, then skip dep_weakness checks. */
4090 check_dep (dep_t dep, bool relaxed_p)
4092 enum reg_note dt = DEP_TYPE (dep);
4093 ds_t ds = DEP_STATUS (dep);
4095 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4097 if (!(current_sched_info->flags & USE_DEPS_LIST))
4099 gcc_assert (ds == -1);
4103 /* Check that dependence type contains the same bits as the status. */
4104 if (dt == REG_DEP_TRUE)
4105 gcc_assert (ds & DEP_TRUE);
4106 else if (dt == REG_DEP_OUTPUT)
4107 gcc_assert ((ds & DEP_OUTPUT)
4108 && !(ds & DEP_TRUE));
4110 gcc_assert ((dt == REG_DEP_ANTI)
4112 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4114 /* HARD_DEP can not appear in dep_status of a link. */
4115 gcc_assert (!(ds & HARD_DEP));
4117 /* Check that dependence status is set correctly when speculation is not
4119 if (!sched_deps_info->generate_spec_deps)
4120 gcc_assert (!(ds & SPECULATIVE));
4121 else if (ds & SPECULATIVE)
4125 ds_t type = FIRST_SPEC_TYPE;
4127 /* Check that dependence weakness is in proper range. */
4131 get_dep_weak (ds, type);
4133 if (type == LAST_SPEC_TYPE)
4135 type <<= SPEC_TYPE_SHIFT;
4140 if (ds & BEGIN_SPEC)
4142 /* Only true dependence can be data speculative. */
4143 if (ds & BEGIN_DATA)
4144 gcc_assert (ds & DEP_TRUE);
4146 /* Control dependencies in the insn scheduler are represented by
4147 anti-dependencies, therefore only anti dependence can be
4148 control speculative. */
4149 if (ds & BEGIN_CONTROL)
4150 gcc_assert (ds & DEP_ANTI);
4154 /* Subsequent speculations should resolve true dependencies. */
4155 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4158 /* Check that true and anti dependencies can't have other speculative
4161 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4162 /* An output dependence can't be speculative at all. */
4163 gcc_assert (!(ds & DEP_OUTPUT));
4165 gcc_assert (ds & BEGIN_CONTROL);
4168 #endif /* ENABLE_CHECKING */
4170 #endif /* INSN_SCHEDULING */