1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
6 Free Software Foundation, Inc.
7 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
8 and currently maintained by, Jim Wilson (wilson@cygnus.com)
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free
14 Software Foundation; either version 3, or (at your option) any later
17 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
18 WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
30 #include "diagnostic-core.h"
33 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
41 #include "sched-int.h"
47 #ifdef INSN_SCHEDULING
49 #ifdef ENABLE_CHECKING
55 /* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
56 added to the list because of flush_pending_lists, stands just
57 for itself and not for any other pending memory reads/writes. */
58 #define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
59 #define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
61 /* Holds current parameters for the dependency analyzer. */
62 struct sched_deps_info_def *sched_deps_info;
64 /* The data is specific to the Haifa scheduler. */
65 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
67 /* Return the major type present in the DS. */
75 return REG_DEP_OUTPUT;
77 gcc_assert (ds & DEP_ANTI);
82 /* Return equivalent dep_status. */
84 dk_to_ds (enum reg_note dk)
95 gcc_assert (dk == REG_DEP_ANTI);
100 /* Functions to operate with dependence information container - dep_t. */
102 /* Init DEP with the arguments. */
104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
108 DEP_TYPE (dep) = type;
109 DEP_STATUS (dep) = ds;
110 DEP_COST (dep) = UNKNOWN_DEP_COST;
113 /* Init DEP with the arguments.
114 While most of the scheduler (including targets) only need the major type
115 of the dependency, it is convenient to hide full dep_status from them. */
117 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
121 if ((current_sched_info->flags & USE_DEPS_LIST))
122 ds = dk_to_ds (kind);
126 init_dep_1 (dep, pro, con, kind, ds);
129 /* Make a copy of FROM in TO. */
131 copy_dep (dep_t to, dep_t from)
133 memcpy (to, from, sizeof (*to));
136 static void dump_ds (FILE *, ds_t);
138 /* Define flags for dump_dep (). */
140 /* Dump producer of the dependence. */
141 #define DUMP_DEP_PRO (2)
143 /* Dump consumer of the dependence. */
144 #define DUMP_DEP_CON (4)
146 /* Dump type of the dependence. */
147 #define DUMP_DEP_TYPE (8)
149 /* Dump status of the dependence. */
150 #define DUMP_DEP_STATUS (16)
152 /* Dump all information about the dependence. */
153 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
157 FLAGS is a bit mask specifying what information about DEP needs
159 If FLAGS has the very first bit set, then dump all information about DEP
160 and propagate this bit into the callee dump functions. */
162 dump_dep (FILE *dump, dep_t dep, int flags)
165 flags |= DUMP_DEP_ALL;
169 if (flags & DUMP_DEP_PRO)
170 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
172 if (flags & DUMP_DEP_CON)
173 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
175 if (flags & DUMP_DEP_TYPE)
178 enum reg_note type = DEP_TYPE (dep);
199 fprintf (dump, "%c; ", t);
202 if (flags & DUMP_DEP_STATUS)
204 if (current_sched_info->flags & USE_DEPS_LIST)
205 dump_ds (dump, DEP_STATUS (dep));
211 /* Default flags for dump_dep (). */
212 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
214 /* Dump all fields of DEP to STDERR. */
216 sd_debug_dep (dep_t dep)
218 dump_dep (stderr, dep, 1);
219 fprintf (stderr, "\n");
222 /* Determine whether DEP is a dependency link of a non-debug insn on a
226 depl_on_debug_p (dep_link_t dep)
228 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
229 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
232 /* Functions to operate with a single link from the dependencies lists -
235 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
238 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
240 dep_link_t next = *prev_nextp;
242 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
243 && DEP_LINK_NEXT (l) == NULL);
245 /* Init node being inserted. */
246 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
247 DEP_LINK_NEXT (l) = next;
252 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
254 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
261 /* Add dep_link LINK to deps_list L. */
263 add_to_deps_list (dep_link_t link, deps_list_t l)
265 attach_dep_link (link, &DEPS_LIST_FIRST (l));
267 /* Don't count debug deps. */
268 if (!depl_on_debug_p (link))
269 ++DEPS_LIST_N_LINKS (l);
272 /* Detach dep_link L from the list. */
274 detach_dep_link (dep_link_t l)
276 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
277 dep_link_t next = DEP_LINK_NEXT (l);
282 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
284 DEP_LINK_PREV_NEXTP (l) = NULL;
285 DEP_LINK_NEXT (l) = NULL;
288 /* Remove link LINK from list LIST. */
290 remove_from_deps_list (dep_link_t link, deps_list_t list)
292 detach_dep_link (link);
294 /* Don't count debug deps. */
295 if (!depl_on_debug_p (link))
296 --DEPS_LIST_N_LINKS (list);
299 /* Move link LINK from list FROM to list TO. */
301 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
303 remove_from_deps_list (link, from);
304 add_to_deps_list (link, to);
307 /* Return true of LINK is not attached to any list. */
309 dep_link_is_detached_p (dep_link_t link)
311 return DEP_LINK_PREV_NEXTP (link) == NULL;
314 /* Pool to hold all dependency nodes (dep_node_t). */
315 static alloc_pool dn_pool;
317 /* Number of dep_nodes out there. */
318 static int dn_pool_diff = 0;
320 /* Create a dep_node. */
322 create_dep_node (void)
324 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
325 dep_link_t back = DEP_NODE_BACK (n);
326 dep_link_t forw = DEP_NODE_FORW (n);
328 DEP_LINK_NODE (back) = n;
329 DEP_LINK_NEXT (back) = NULL;
330 DEP_LINK_PREV_NEXTP (back) = NULL;
332 DEP_LINK_NODE (forw) = n;
333 DEP_LINK_NEXT (forw) = NULL;
334 DEP_LINK_PREV_NEXTP (forw) = NULL;
341 /* Delete dep_node N. N must not be connected to any deps_list. */
343 delete_dep_node (dep_node_t n)
345 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
346 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
350 pool_free (dn_pool, n);
353 /* Pool to hold dependencies lists (deps_list_t). */
354 static alloc_pool dl_pool;
356 /* Number of deps_lists out there. */
357 static int dl_pool_diff = 0;
359 /* Functions to operate with dependences lists - deps_list_t. */
361 /* Return true if list L is empty. */
363 deps_list_empty_p (deps_list_t l)
365 return DEPS_LIST_N_LINKS (l) == 0;
368 /* Create a new deps_list. */
370 create_deps_list (void)
372 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
374 DEPS_LIST_FIRST (l) = NULL;
375 DEPS_LIST_N_LINKS (l) = 0;
381 /* Free deps_list L. */
383 free_deps_list (deps_list_t l)
385 gcc_assert (deps_list_empty_p (l));
389 pool_free (dl_pool, l);
392 /* Return true if there is no dep_nodes and deps_lists out there.
393 After the region is scheduled all the dependency nodes and lists
394 should [generally] be returned to pool. */
396 deps_pools_are_empty_p (void)
398 return dn_pool_diff == 0 && dl_pool_diff == 0;
401 /* Remove all elements from L. */
403 clear_deps_list (deps_list_t l)
407 dep_link_t link = DEPS_LIST_FIRST (l);
412 remove_from_deps_list (link, l);
417 /* Decide whether a dependency should be treated as a hard or a speculative
420 dep_spec_p (dep_t dep)
422 if (current_sched_info->flags & DO_SPECULATION)
423 return (DEP_STATUS (dep) & SPECULATIVE) != 0;
427 static regset reg_pending_sets;
428 static regset reg_pending_clobbers;
429 static regset reg_pending_uses;
430 static enum reg_pending_barrier_mode reg_pending_barrier;
432 /* Hard registers implicitly clobbered or used (or may be implicitly
433 clobbered or used) by the currently analyzed insn. For example,
434 insn in its constraint has one register class. Even if there is
435 currently no hard register in the insn, the particular hard
436 register will be in the insn after reload pass because the
437 constraint requires it. */
438 static HARD_REG_SET implicit_reg_pending_clobbers;
439 static HARD_REG_SET implicit_reg_pending_uses;
441 /* To speed up the test for duplicate dependency links we keep a
442 record of dependencies created by add_dependence when the average
443 number of instructions in a basic block is very large.
445 Studies have shown that there is typically around 5 instructions between
446 branches for typical C code. So we can make a guess that the average
447 basic block is approximately 5 instructions long; we will choose 100X
448 the average size as a very large basic block.
450 Each insn has associated bitmaps for its dependencies. Each bitmap
451 has enough entries to represent a dependency on any other insn in
452 the insn chain. All bitmap for true dependencies cache is
453 allocated then the rest two ones are also allocated. */
454 static bitmap_head *true_dependency_cache = NULL;
455 static bitmap_head *output_dependency_cache = NULL;
456 static bitmap_head *anti_dependency_cache = NULL;
457 static bitmap_head *spec_dependency_cache = NULL;
458 static int cache_size;
460 static int deps_may_trap_p (const_rtx);
461 static void add_dependence_list (rtx, rtx, int, enum reg_note);
462 static void add_dependence_list_and_free (struct deps_desc *, rtx,
463 rtx *, int, enum reg_note);
464 static void delete_all_dependences (rtx);
465 static void fixup_sched_groups (rtx);
467 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
468 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
469 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
470 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
472 static bool sched_has_condition_p (const_rtx);
473 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
475 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
477 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
479 #ifdef ENABLE_CHECKING
480 static void check_dep (dep_t, bool);
483 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
486 deps_may_trap_p (const_rtx mem)
488 const_rtx addr = XEXP (mem, 0);
490 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
492 const_rtx t = get_reg_known_value (REGNO (addr));
496 return rtx_addr_can_trap_p (addr);
500 /* Find the condition under which INSN is executed. If REV is not NULL,
501 it is set to TRUE when the returned comparison should be reversed
502 to get the actual condition. */
504 sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
506 rtx pat = PATTERN (insn);
515 if (GET_CODE (pat) == COND_EXEC)
516 return COND_EXEC_TEST (pat);
518 if (!any_condjump_p (insn) || !onlyjump_p (insn))
521 src = SET_SRC (pc_set (insn));
523 if (XEXP (src, 2) == pc_rtx)
524 return XEXP (src, 0);
525 else if (XEXP (src, 1) == pc_rtx)
527 rtx cond = XEXP (src, 0);
528 enum rtx_code revcode = reversed_comparison_code (cond, insn);
530 if (revcode == UNKNOWN)
541 /* Caching variant of sched_get_condition_with_rev_uncached.
542 We only do actual work the first time we come here for an insn; the
543 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
545 sched_get_condition_with_rev (const_rtx insn, bool *rev)
549 if (INSN_LUID (insn) == 0)
550 return sched_get_condition_with_rev_uncached (insn, rev);
552 if (INSN_CACHED_COND (insn) == const_true_rtx)
555 if (INSN_CACHED_COND (insn) != NULL_RTX)
558 *rev = INSN_REVERSE_COND (insn);
559 return INSN_CACHED_COND (insn);
562 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
563 INSN_REVERSE_COND (insn) = tmp;
565 if (INSN_CACHED_COND (insn) == NULL_RTX)
567 INSN_CACHED_COND (insn) = const_true_rtx;
572 *rev = INSN_REVERSE_COND (insn);
573 return INSN_CACHED_COND (insn);
576 /* True when we can find a condition under which INSN is executed. */
578 sched_has_condition_p (const_rtx insn)
580 return !! sched_get_condition_with_rev (insn, NULL);
585 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
587 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
589 if (COMPARISON_P (cond1)
590 && COMPARISON_P (cond2)
591 && GET_CODE (cond1) ==
593 ? reversed_comparison_code (cond2, NULL)
595 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
596 && XEXP (cond1, 1) == XEXP (cond2, 1))
601 /* Return true if insn1 and insn2 can never depend on one another because
602 the conditions under which they are executed are mutually exclusive. */
604 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
607 bool rev1 = false, rev2 = false;
609 /* df doesn't handle conditional lifetimes entirely correctly;
610 calls mess up the conditional lifetimes. */
611 if (!CALL_P (insn1) && !CALL_P (insn2))
613 cond1 = sched_get_condition_with_rev (insn1, &rev1);
614 cond2 = sched_get_condition_with_rev (insn2, &rev2);
616 && conditions_mutex_p (cond1, cond2, rev1, rev2)
617 /* Make sure first instruction doesn't affect condition of second
618 instruction if switched. */
619 && !modified_in_p (cond1, insn2)
620 /* Make sure second instruction doesn't affect condition of first
621 instruction if switched. */
622 && !modified_in_p (cond2, insn1))
629 /* Return true if INSN can potentially be speculated with type DS. */
631 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
633 if (HAS_INTERNAL_DEP (insn))
636 if (!NONJUMP_INSN_P (insn))
639 if (SCHED_GROUP_P (insn))
642 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
645 if (side_effects_p (PATTERN (insn)))
649 /* The following instructions, which depend on a speculatively scheduled
650 instruction, cannot be speculatively scheduled along. */
652 if (may_trap_or_fault_p (PATTERN (insn)))
653 /* If instruction might fault, it cannot be speculatively scheduled.
654 For control speculation it's obvious why and for data speculation
655 it's because the insn might get wrong input if speculation
656 wasn't successful. */
659 if ((ds & BE_IN_DATA)
660 && sched_has_condition_p (insn))
661 /* If this is a predicated instruction, then it cannot be
662 speculatively scheduled. See PR35659. */
669 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
670 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
671 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
672 This function is used to switch sd_iterator to the next list.
673 !!! For internal use only. Might consider moving it to sched-int.h. */
675 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
676 deps_list_t *list_ptr, bool *resolved_p_ptr)
678 sd_list_types_def types = *types_ptr;
680 if (types & SD_LIST_HARD_BACK)
682 *list_ptr = INSN_HARD_BACK_DEPS (insn);
683 *resolved_p_ptr = false;
684 *types_ptr = types & ~SD_LIST_HARD_BACK;
686 else if (types & SD_LIST_SPEC_BACK)
688 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
689 *resolved_p_ptr = false;
690 *types_ptr = types & ~SD_LIST_SPEC_BACK;
692 else if (types & SD_LIST_FORW)
694 *list_ptr = INSN_FORW_DEPS (insn);
695 *resolved_p_ptr = false;
696 *types_ptr = types & ~SD_LIST_FORW;
698 else if (types & SD_LIST_RES_BACK)
700 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
701 *resolved_p_ptr = true;
702 *types_ptr = types & ~SD_LIST_RES_BACK;
704 else if (types & SD_LIST_RES_FORW)
706 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
707 *resolved_p_ptr = true;
708 *types_ptr = types & ~SD_LIST_RES_FORW;
713 *resolved_p_ptr = false;
714 *types_ptr = SD_LIST_NONE;
718 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
720 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
724 while (list_types != SD_LIST_NONE)
729 sd_next_list (insn, &list_types, &list, &resolved_p);
731 size += DEPS_LIST_N_LINKS (list);
737 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
740 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
742 while (list_types != SD_LIST_NONE)
747 sd_next_list (insn, &list_types, &list, &resolved_p);
748 if (!deps_list_empty_p (list))
755 /* Initialize data for INSN. */
757 sd_init_insn (rtx insn)
759 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
760 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
761 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
762 INSN_FORW_DEPS (insn) = create_deps_list ();
763 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
765 /* ??? It would be nice to allocate dependency caches here. */
768 /* Free data for INSN. */
770 sd_finish_insn (rtx insn)
772 /* ??? It would be nice to deallocate dependency caches here. */
774 free_deps_list (INSN_HARD_BACK_DEPS (insn));
775 INSN_HARD_BACK_DEPS (insn) = NULL;
777 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
778 INSN_SPEC_BACK_DEPS (insn) = NULL;
780 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
781 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
783 free_deps_list (INSN_FORW_DEPS (insn));
784 INSN_FORW_DEPS (insn) = NULL;
786 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
787 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
790 /* Find a dependency between producer PRO and consumer CON.
791 Search through resolved dependency lists if RESOLVED_P is true.
792 If no such dependency is found return NULL,
793 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
794 with an iterator pointing to it. */
796 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
797 sd_iterator_def *sd_it_ptr)
799 sd_list_types_def pro_list_type;
800 sd_list_types_def con_list_type;
801 sd_iterator_def sd_it;
803 bool found_p = false;
807 pro_list_type = SD_LIST_RES_FORW;
808 con_list_type = SD_LIST_RES_BACK;
812 pro_list_type = SD_LIST_FORW;
813 con_list_type = SD_LIST_BACK;
816 /* Walk through either back list of INSN or forw list of ELEM
817 depending on which one is shorter. */
818 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
820 /* Find the dep_link with producer PRO in consumer's back_deps. */
821 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
822 if (DEP_PRO (dep) == pro)
830 /* Find the dep_link with consumer CON in producer's forw_deps. */
831 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
832 if (DEP_CON (dep) == con)
841 if (sd_it_ptr != NULL)
850 /* Find a dependency between producer PRO and consumer CON.
851 Use dependency [if available] to check if dependency is present at all.
852 Search through resolved dependency lists if RESOLVED_P is true.
853 If the dependency or NULL if none found. */
855 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
857 if (true_dependency_cache != NULL)
858 /* Avoiding the list walk below can cut compile times dramatically
861 int elem_luid = INSN_LUID (pro);
862 int insn_luid = INSN_LUID (con);
864 gcc_assert (output_dependency_cache != NULL
865 && anti_dependency_cache != NULL);
867 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
868 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
869 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
873 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
876 /* Add or update a dependence described by DEP.
877 MEM1 and MEM2, if non-null, correspond to memory locations in case of
880 The function returns a value indicating if an old entry has been changed
881 or a new entry has been added to insn's backward deps.
883 This function merely checks if producer and consumer is the same insn
884 and doesn't create a dep in this case. Actual manipulation of
885 dependence data structures is performed in add_or_update_dep_1. */
886 static enum DEPS_ADJUST_RESULT
887 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
889 rtx elem = DEP_PRO (dep);
890 rtx insn = DEP_CON (dep);
892 gcc_assert (INSN_P (insn) && INSN_P (elem));
894 /* Don't depend an insn on itself. */
897 if (sched_deps_info->generate_spec_deps)
898 /* INSN has an internal dependence, which we can't overcome. */
899 HAS_INTERNAL_DEP (insn) = 1;
904 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
907 /* Ask dependency caches what needs to be done for dependence DEP.
908 Return DEP_CREATED if new dependence should be created and there is no
909 need to try to find one searching the dependencies lists.
910 Return DEP_PRESENT if there already is a dependence described by DEP and
911 hence nothing is to be done.
912 Return DEP_CHANGED if there already is a dependence, but it should be
913 updated to incorporate additional information from DEP. */
914 static enum DEPS_ADJUST_RESULT
915 ask_dependency_caches (dep_t dep)
917 int elem_luid = INSN_LUID (DEP_PRO (dep));
918 int insn_luid = INSN_LUID (DEP_CON (dep));
920 gcc_assert (true_dependency_cache != NULL
921 && output_dependency_cache != NULL
922 && anti_dependency_cache != NULL);
924 if (!(current_sched_info->flags & USE_DEPS_LIST))
926 enum reg_note present_dep_type;
928 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
929 present_dep_type = REG_DEP_TRUE;
930 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
931 present_dep_type = REG_DEP_OUTPUT;
932 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
933 present_dep_type = REG_DEP_ANTI;
935 /* There is no existing dep so it should be created. */
938 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
939 /* DEP does not add anything to the existing dependence. */
944 ds_t present_dep_types = 0;
946 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
947 present_dep_types |= DEP_TRUE;
948 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
949 present_dep_types |= DEP_OUTPUT;
950 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
951 present_dep_types |= DEP_ANTI;
953 if (present_dep_types == 0)
954 /* There is no existing dep so it should be created. */
957 if (!(current_sched_info->flags & DO_SPECULATION)
958 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
960 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
961 == present_dep_types)
962 /* DEP does not add anything to the existing dependence. */
967 /* Only true dependencies can be data speculative and
968 only anti dependencies can be control speculative. */
969 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
970 == present_dep_types);
972 /* if (DEP is SPECULATIVE) then
973 ..we should update DEP_STATUS
975 ..we should reset existing dep to non-speculative. */
982 /* Set dependency caches according to DEP. */
984 set_dependency_caches (dep_t dep)
986 int elem_luid = INSN_LUID (DEP_PRO (dep));
987 int insn_luid = INSN_LUID (DEP_CON (dep));
989 if (!(current_sched_info->flags & USE_DEPS_LIST))
991 switch (DEP_TYPE (dep))
994 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
998 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1002 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1011 ds_t ds = DEP_STATUS (dep);
1014 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1015 if (ds & DEP_OUTPUT)
1016 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1018 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1020 if (ds & SPECULATIVE)
1022 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1023 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1028 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1029 caches accordingly. */
1031 update_dependency_caches (dep_t dep, enum reg_note old_type)
1033 int elem_luid = INSN_LUID (DEP_PRO (dep));
1034 int insn_luid = INSN_LUID (DEP_CON (dep));
1036 /* Clear corresponding cache entry because type of the link
1037 may have changed. Keep them if we use_deps_list. */
1038 if (!(current_sched_info->flags & USE_DEPS_LIST))
1042 case REG_DEP_OUTPUT:
1043 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1047 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1055 set_dependency_caches (dep);
1058 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1060 change_spec_dep_to_hard (sd_iterator_def sd_it)
1062 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1063 dep_link_t link = DEP_NODE_BACK (node);
1064 dep_t dep = DEP_NODE_DEP (node);
1065 rtx elem = DEP_PRO (dep);
1066 rtx insn = DEP_CON (dep);
1068 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1070 DEP_STATUS (dep) &= ~SPECULATIVE;
1072 if (true_dependency_cache != NULL)
1073 /* Clear the cache entry. */
1074 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1078 /* Update DEP to incorporate information from NEW_DEP.
1079 SD_IT points to DEP in case it should be moved to another list.
1080 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1081 data-speculative dependence should be updated. */
1082 static enum DEPS_ADJUST_RESULT
1083 update_dep (dep_t dep, dep_t new_dep,
1084 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1085 rtx mem1 ATTRIBUTE_UNUSED,
1086 rtx mem2 ATTRIBUTE_UNUSED)
1088 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1089 enum reg_note old_type = DEP_TYPE (dep);
1090 bool was_spec = dep_spec_p (dep);
1092 /* If this is a more restrictive type of dependence than the
1093 existing one, then change the existing dependence to this
1095 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1097 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1101 if (current_sched_info->flags & USE_DEPS_LIST)
1102 /* Update DEP_STATUS. */
1104 ds_t dep_status = DEP_STATUS (dep);
1105 ds_t ds = DEP_STATUS (new_dep);
1106 ds_t new_status = ds | dep_status;
1108 if (new_status & SPECULATIVE)
1110 /* Either existing dep or a dep we're adding or both are
1112 if (!(ds & SPECULATIVE)
1113 || !(dep_status & SPECULATIVE))
1114 /* The new dep can't be speculative. */
1115 new_status &= ~SPECULATIVE;
1118 /* Both are speculative. Merge probabilities. */
1123 dw = estimate_dep_weak (mem1, mem2);
1124 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1127 new_status = ds_merge (dep_status, ds);
1133 if (dep_status != ds)
1135 DEP_STATUS (dep) = ds;
1140 if (was_spec && !dep_spec_p (dep))
1141 /* The old dep was speculative, but now it isn't. */
1142 change_spec_dep_to_hard (sd_it);
1144 if (true_dependency_cache != NULL
1145 && res == DEP_CHANGED)
1146 update_dependency_caches (dep, old_type);
1151 /* Add or update a dependence described by DEP.
1152 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1155 The function returns a value indicating if an old entry has been changed
1156 or a new entry has been added to insn's backward deps or nothing has
1157 been updated at all. */
1158 static enum DEPS_ADJUST_RESULT
1159 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1160 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1162 bool maybe_present_p = true;
1163 bool present_p = false;
1165 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1166 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1168 #ifdef ENABLE_CHECKING
1169 check_dep (new_dep, mem1 != NULL);
1172 if (true_dependency_cache != NULL)
1174 switch (ask_dependency_caches (new_dep))
1180 maybe_present_p = true;
1185 maybe_present_p = false;
1195 /* Check that we don't already have this dependence. */
1196 if (maybe_present_p)
1199 sd_iterator_def sd_it;
1201 gcc_assert (true_dependency_cache == NULL || present_p);
1203 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1205 resolved_p, &sd_it);
1207 if (present_dep != NULL)
1208 /* We found an existing dependency between ELEM and INSN. */
1209 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1211 /* We didn't find a dep, it shouldn't present in the cache. */
1212 gcc_assert (!present_p);
1215 /* Might want to check one level of transitivity to save conses.
1216 This check should be done in maybe_add_or_update_dep_1.
1217 Since we made it to add_or_update_dep_1, we must create
1218 (or update) a link. */
1220 if (mem1 != NULL_RTX)
1222 gcc_assert (sched_deps_info->generate_spec_deps);
1223 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1224 estimate_dep_weak (mem1, mem2));
1227 sd_add_dep (new_dep, resolved_p);
1232 /* Initialize BACK_LIST_PTR with consumer's backward list and
1233 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1234 initialize with lists that hold resolved deps. */
1236 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1237 deps_list_t *back_list_ptr,
1238 deps_list_t *forw_list_ptr)
1240 rtx con = DEP_CON (dep);
1244 if (dep_spec_p (dep))
1245 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1247 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1249 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1253 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1254 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1258 /* Add dependence described by DEP.
1259 If RESOLVED_P is true treat the dependence as a resolved one. */
1261 sd_add_dep (dep_t dep, bool resolved_p)
1263 dep_node_t n = create_dep_node ();
1264 deps_list_t con_back_deps;
1265 deps_list_t pro_forw_deps;
1266 rtx elem = DEP_PRO (dep);
1267 rtx insn = DEP_CON (dep);
1269 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1271 if ((current_sched_info->flags & DO_SPECULATION) == 0
1272 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1273 DEP_STATUS (dep) &= ~SPECULATIVE;
1275 copy_dep (DEP_NODE_DEP (n), dep);
1277 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1279 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1281 #ifdef ENABLE_CHECKING
1282 check_dep (dep, false);
1285 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1287 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1288 in the bitmap caches of dependency information. */
1289 if (true_dependency_cache != NULL)
1290 set_dependency_caches (dep);
1293 /* Add or update backward dependence between INSN and ELEM
1294 with given type DEP_TYPE and dep_status DS.
1295 This function is a convenience wrapper. */
1296 enum DEPS_ADJUST_RESULT
1297 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1299 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1302 /* Resolved dependence pointed to by SD_IT.
1303 SD_IT will advance to the next element. */
1305 sd_resolve_dep (sd_iterator_def sd_it)
1307 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1308 dep_t dep = DEP_NODE_DEP (node);
1309 rtx pro = DEP_PRO (dep);
1310 rtx con = DEP_CON (dep);
1312 if (dep_spec_p (dep))
1313 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1314 INSN_RESOLVED_BACK_DEPS (con));
1316 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1317 INSN_RESOLVED_BACK_DEPS (con));
1319 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1320 INSN_RESOLVED_FORW_DEPS (pro));
1323 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1324 pointed to by SD_IT to unresolved state. */
1326 sd_unresolve_dep (sd_iterator_def sd_it)
1328 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1329 dep_t dep = DEP_NODE_DEP (node);
1330 rtx pro = DEP_PRO (dep);
1331 rtx con = DEP_CON (dep);
1333 if ((current_sched_info->flags & DO_SPECULATION)
1334 && (DEP_STATUS (dep) & SPECULATIVE))
1335 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1336 INSN_SPEC_BACK_DEPS (con));
1338 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1339 INSN_HARD_BACK_DEPS (con));
1341 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1342 INSN_FORW_DEPS (pro));
1345 /* Make TO depend on all the FROM's producers.
1346 If RESOLVED_P is true add dependencies to the resolved lists. */
1348 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1350 sd_list_types_def list_type;
1351 sd_iterator_def sd_it;
1354 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1356 FOR_EACH_DEP (from, list_type, sd_it, dep)
1358 dep_def _new_dep, *new_dep = &_new_dep;
1360 copy_dep (new_dep, dep);
1361 DEP_CON (new_dep) = to;
1362 sd_add_dep (new_dep, resolved_p);
1366 /* Remove a dependency referred to by SD_IT.
1367 SD_IT will point to the next dependence after removal. */
1369 sd_delete_dep (sd_iterator_def sd_it)
1371 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1372 dep_t dep = DEP_NODE_DEP (n);
1373 rtx pro = DEP_PRO (dep);
1374 rtx con = DEP_CON (dep);
1375 deps_list_t con_back_deps;
1376 deps_list_t pro_forw_deps;
1378 if (true_dependency_cache != NULL)
1380 int elem_luid = INSN_LUID (pro);
1381 int insn_luid = INSN_LUID (con);
1383 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1384 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1385 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1387 if (current_sched_info->flags & DO_SPECULATION)
1388 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1391 get_back_and_forw_lists (dep, sd_it.resolved_p,
1392 &con_back_deps, &pro_forw_deps);
1394 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1395 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1397 delete_dep_node (n);
1400 /* Dump size of the lists. */
1401 #define DUMP_LISTS_SIZE (2)
1403 /* Dump dependencies of the lists. */
1404 #define DUMP_LISTS_DEPS (4)
1406 /* Dump all information about the lists. */
1407 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1409 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1410 FLAGS is a bit mask specifying what information about the lists needs
1412 If FLAGS has the very first bit set, then dump all information about
1413 the lists and propagate this bit into the callee dump functions. */
1415 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1417 sd_iterator_def sd_it;
1424 flags |= DUMP_LISTS_ALL;
1426 fprintf (dump, "[");
1428 if (flags & DUMP_LISTS_SIZE)
1429 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1431 if (flags & DUMP_LISTS_DEPS)
1433 FOR_EACH_DEP (insn, types, sd_it, dep)
1435 dump_dep (dump, dep, dump_dep_flags | all);
1436 fprintf (dump, " ");
1441 /* Dump all information about deps_lists of INSN specified by TYPES
1444 sd_debug_lists (rtx insn, sd_list_types_def types)
1446 dump_lists (stderr, insn, types, 1);
1447 fprintf (stderr, "\n");
1450 /* A convenience wrapper to operate on an entire list. */
1453 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1455 for (; list; list = XEXP (list, 1))
1457 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1458 add_dependence (insn, XEXP (list, 0), dep_type);
1462 /* Similar, but free *LISTP at the same time, when the context
1466 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1467 int uncond, enum reg_note dep_type)
1471 /* We don't want to short-circuit dependencies involving debug
1472 insns, because they may cause actual dependencies to be
1474 if (deps->readonly || DEBUG_INSN_P (insn))
1476 add_dependence_list (insn, *listp, uncond, dep_type);
1480 for (list = *listp, *listp = NULL; list ; list = next)
1482 next = XEXP (list, 1);
1483 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1484 add_dependence (insn, XEXP (list, 0), dep_type);
1485 free_INSN_LIST_node (list);
1489 /* Remove all occurences of INSN from LIST. Return the number of
1490 occurences removed. */
1493 remove_from_dependence_list (rtx insn, rtx* listp)
1499 if (XEXP (*listp, 0) == insn)
1501 remove_free_INSN_LIST_node (listp);
1506 listp = &XEXP (*listp, 1);
1512 /* Same as above, but process two lists at once. */
1514 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1520 if (XEXP (*listp, 0) == insn)
1522 remove_free_INSN_LIST_node (listp);
1523 remove_free_EXPR_LIST_node (exprp);
1528 listp = &XEXP (*listp, 1);
1529 exprp = &XEXP (*exprp, 1);
1535 /* Clear all dependencies for an insn. */
1537 delete_all_dependences (rtx insn)
1539 sd_iterator_def sd_it;
1542 /* The below cycle can be optimized to clear the caches and back_deps
1543 in one call but that would provoke duplication of code from
1546 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1547 sd_iterator_cond (&sd_it, &dep);)
1548 sd_delete_dep (sd_it);
1551 /* All insns in a scheduling group except the first should only have
1552 dependencies on the previous insn in the group. So we find the
1553 first instruction in the scheduling group by walking the dependence
1554 chains backwards. Then we add the dependencies for the group to
1555 the previous nonnote insn. */
1558 fixup_sched_groups (rtx insn)
1560 sd_iterator_def sd_it;
1564 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1567 rtx pro = DEP_PRO (dep);
1571 i = prev_nonnote_insn (i);
1575 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1577 if (! sched_insns_conditions_mutex_p (i, pro))
1578 add_dependence (i, pro, DEP_TYPE (dep));
1582 delete_all_dependences (insn);
1584 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1585 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1586 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1587 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1590 /* Process an insn's memory dependencies. There are four kinds of
1593 (0) read dependence: read follows read
1594 (1) true dependence: read follows write
1595 (2) output dependence: write follows write
1596 (3) anti dependence: write follows read
1598 We are careful to build only dependencies which actually exist, and
1599 use transitivity to avoid building too many links. */
1601 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1602 The MEM is a memory reference contained within INSN, which we are saving
1603 so that we can do memory aliasing on it. */
1606 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1613 gcc_assert (!deps->readonly);
1616 insn_list = &deps->pending_read_insns;
1617 mem_list = &deps->pending_read_mems;
1618 if (!DEBUG_INSN_P (insn))
1619 deps->pending_read_list_length++;
1623 insn_list = &deps->pending_write_insns;
1624 mem_list = &deps->pending_write_mems;
1625 deps->pending_write_list_length++;
1628 link = alloc_INSN_LIST (insn, *insn_list);
1631 if (sched_deps_info->use_cselib)
1633 mem = shallow_copy_rtx (mem);
1634 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0), GET_MODE (mem));
1636 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1640 /* Make a dependency between every memory reference on the pending lists
1641 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1642 dependencies for a read operation, similarly with FOR_WRITE. */
1645 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1650 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1652 if (!deps->readonly)
1654 free_EXPR_LIST_list (&deps->pending_read_mems);
1655 deps->pending_read_list_length = 0;
1659 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1660 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1662 add_dependence_list_and_free (deps, insn,
1663 &deps->last_pending_memory_flush, 1,
1664 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1665 if (!deps->readonly)
1667 free_EXPR_LIST_list (&deps->pending_write_mems);
1668 deps->pending_write_list_length = 0;
1670 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1671 deps->pending_flush_length = 1;
1675 /* Instruction which dependencies we are analyzing. */
1676 static rtx cur_insn = NULL_RTX;
1678 /* Implement hooks for haifa scheduler. */
1681 haifa_start_insn (rtx insn)
1683 gcc_assert (insn && !cur_insn);
1689 haifa_finish_insn (void)
1695 haifa_note_reg_set (int regno)
1697 SET_REGNO_REG_SET (reg_pending_sets, regno);
1701 haifa_note_reg_clobber (int regno)
1703 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1707 haifa_note_reg_use (int regno)
1709 SET_REGNO_REG_SET (reg_pending_uses, regno);
1713 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1715 if (!(ds & SPECULATIVE))
1718 pending_mem = NULL_RTX;
1721 gcc_assert (ds & BEGIN_DATA);
1724 dep_def _dep, *dep = &_dep;
1726 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1727 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1728 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1734 haifa_note_dep (rtx elem, ds_t ds)
1739 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1740 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1744 note_reg_use (int r)
1746 if (sched_deps_info->note_reg_use)
1747 sched_deps_info->note_reg_use (r);
1751 note_reg_set (int r)
1753 if (sched_deps_info->note_reg_set)
1754 sched_deps_info->note_reg_set (r);
1758 note_reg_clobber (int r)
1760 if (sched_deps_info->note_reg_clobber)
1761 sched_deps_info->note_reg_clobber (r);
1765 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1767 if (sched_deps_info->note_mem_dep)
1768 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1772 note_dep (rtx e, ds_t ds)
1774 if (sched_deps_info->note_dep)
1775 sched_deps_info->note_dep (e, ds);
1778 /* Return corresponding to DS reg_note. */
1783 return REG_DEP_TRUE;
1784 else if (ds & DEP_OUTPUT)
1785 return REG_DEP_OUTPUT;
1788 gcc_assert (ds & DEP_ANTI);
1789 return REG_DEP_ANTI;
1795 /* Functions for computation of info needed for register pressure
1796 sensitive insn scheduling. */
1799 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1800 static struct reg_use_data *
1801 create_insn_reg_use (int regno, rtx insn)
1803 struct reg_use_data *use;
1805 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1808 use->next_insn_use = INSN_REG_USE_LIST (insn);
1809 INSN_REG_USE_LIST (insn) = use;
1813 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1814 static struct reg_set_data *
1815 create_insn_reg_set (int regno, rtx insn)
1817 struct reg_set_data *set;
1819 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1822 set->next_insn_set = INSN_REG_SET_LIST (insn);
1823 INSN_REG_SET_LIST (insn) = set;
1827 /* Set up insn register uses for INSN and dependency context DEPS. */
1829 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1832 reg_set_iterator rsi;
1834 struct reg_use_data *use, *use2, *next;
1835 struct deps_reg *reg_last;
1837 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1839 if (i < FIRST_PSEUDO_REGISTER
1840 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1843 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1844 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1845 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1846 /* Ignore use which is not dying. */
1849 use = create_insn_reg_use (i, insn);
1850 use->next_regno_use = use;
1851 reg_last = &deps->reg_last[i];
1853 /* Create the cycle list of uses. */
1854 for (list = reg_last->uses; list; list = XEXP (list, 1))
1856 use2 = create_insn_reg_use (i, XEXP (list, 0));
1857 next = use->next_regno_use;
1858 use->next_regno_use = use2;
1859 use2->next_regno_use = next;
1864 /* Register pressure info for the currently processed insn. */
1865 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1867 /* Return TRUE if INSN has the use structure for REGNO. */
1869 insn_use_p (rtx insn, int regno)
1871 struct reg_use_data *use;
1873 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1874 if (use->regno == regno)
1879 /* Update the register pressure info after birth of pseudo register REGNO
1880 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1881 the register is in clobber or unused after the insn. */
1883 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1888 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1889 cl = sched_regno_pressure_class[regno];
1892 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1895 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1896 reg_pressure_info[cl].clobber_increase = new_incr;
1900 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1901 reg_pressure_info[cl].unused_set_increase = new_incr;
1905 new_incr = reg_pressure_info[cl].set_increase + incr;
1906 reg_pressure_info[cl].set_increase = new_incr;
1907 if (! insn_use_p (insn, regno))
1908 reg_pressure_info[cl].change += incr;
1909 create_insn_reg_set (regno, insn);
1911 gcc_assert (new_incr < (1 << INCREASE_BITS));
1915 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1916 hard registers involved in the birth. */
1918 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1919 bool clobber_p, bool unused_p)
1922 int new_incr, last = regno + nregs;
1924 while (regno < last)
1926 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1927 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1929 cl = sched_regno_pressure_class[regno];
1934 new_incr = reg_pressure_info[cl].clobber_increase + 1;
1935 reg_pressure_info[cl].clobber_increase = new_incr;
1939 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1940 reg_pressure_info[cl].unused_set_increase = new_incr;
1944 new_incr = reg_pressure_info[cl].set_increase + 1;
1945 reg_pressure_info[cl].set_increase = new_incr;
1946 if (! insn_use_p (insn, regno))
1947 reg_pressure_info[cl].change += 1;
1948 create_insn_reg_set (regno, insn);
1950 gcc_assert (new_incr < (1 << INCREASE_BITS));
1957 /* Update the register pressure info after birth of pseudo or hard
1958 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1959 correspondingly that the register is in clobber or unused after the
1962 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1966 if (GET_CODE (reg) == SUBREG)
1967 reg = SUBREG_REG (reg);
1972 regno = REGNO (reg);
1973 if (regno < FIRST_PSEUDO_REGISTER)
1974 mark_insn_hard_regno_birth (insn, regno,
1975 hard_regno_nregs[regno][GET_MODE (reg)],
1976 clobber_p, unused_p);
1978 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1981 /* Update the register pressure info after death of pseudo register
1984 mark_pseudo_death (int regno)
1989 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1990 cl = sched_regno_pressure_class[regno];
1993 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1994 reg_pressure_info[cl].change -= incr;
1998 /* Like mark_pseudo_death except that NREGS saying how many hard
1999 registers involved in the death. */
2001 mark_hard_regno_death (int regno, int nregs)
2004 int last = regno + nregs;
2006 while (regno < last)
2008 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2009 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2011 cl = sched_regno_pressure_class[regno];
2013 reg_pressure_info[cl].change -= 1;
2019 /* Update the register pressure info after death of pseudo or hard
2022 mark_reg_death (rtx reg)
2026 if (GET_CODE (reg) == SUBREG)
2027 reg = SUBREG_REG (reg);
2032 regno = REGNO (reg);
2033 if (regno < FIRST_PSEUDO_REGISTER)
2034 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2036 mark_pseudo_death (regno);
2039 /* Process SETTER of REG. DATA is an insn containing the setter. */
2041 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2043 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2046 ((rtx) data, reg, false,
2047 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2050 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2052 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2054 if (GET_CODE (setter) == CLOBBER)
2055 mark_insn_reg_birth ((rtx) data, reg, true, false);
2058 /* Set up reg pressure info related to INSN. */
2060 init_insn_reg_pressure_info (rtx insn)
2064 static struct reg_pressure_data *pressure_info;
2067 gcc_assert (sched_pressure_p);
2069 if (! INSN_P (insn))
2072 for (i = 0; i < ira_pressure_classes_num; i++)
2074 cl = ira_pressure_classes[i];
2075 reg_pressure_info[cl].clobber_increase = 0;
2076 reg_pressure_info[cl].set_increase = 0;
2077 reg_pressure_info[cl].unused_set_increase = 0;
2078 reg_pressure_info[cl].change = 0;
2081 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2083 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2086 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2087 if (REG_NOTE_KIND (link) == REG_INC)
2088 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2091 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2092 if (REG_NOTE_KIND (link) == REG_DEAD)
2093 mark_reg_death (XEXP (link, 0));
2095 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2097 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2098 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2100 for (i = 0; i < ira_pressure_classes_num; i++)
2102 cl = ira_pressure_classes[i];
2103 pressure_info[i].clobber_increase
2104 = reg_pressure_info[cl].clobber_increase;
2105 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2106 pressure_info[i].unused_set_increase
2107 = reg_pressure_info[cl].unused_set_increase;
2108 pressure_info[i].change = reg_pressure_info[cl].change;
2115 /* Internal variable for sched_analyze_[12] () functions.
2116 If it is nonzero, this means that sched_analyze_[12] looks
2117 at the most toplevel SET. */
2118 static bool can_start_lhs_rhs_p;
2120 /* Extend reg info for the deps context DEPS given that
2121 we have just generated a register numbered REGNO. */
2123 extend_deps_reg_info (struct deps_desc *deps, int regno)
2125 int max_regno = regno + 1;
2127 gcc_assert (!reload_completed);
2129 /* In a readonly context, it would not hurt to extend info,
2130 but it should not be needed. */
2131 if (reload_completed && deps->readonly)
2133 deps->max_reg = max_regno;
2137 if (max_regno > deps->max_reg)
2139 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2141 memset (&deps->reg_last[deps->max_reg],
2142 0, (max_regno - deps->max_reg)
2143 * sizeof (struct deps_reg));
2144 deps->max_reg = max_regno;
2148 /* Extends REG_INFO_P if needed. */
2150 maybe_extend_reg_info_p (void)
2152 /* Extend REG_INFO_P, if needed. */
2153 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2155 size_t new_reg_info_p_size = max_regno + 128;
2157 gcc_assert (!reload_completed && sel_sched_p ());
2159 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2160 new_reg_info_p_size,
2162 sizeof (*reg_info_p));
2163 reg_info_p_size = new_reg_info_p_size;
2167 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2168 The type of the reference is specified by REF and can be SET,
2169 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2172 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2173 enum rtx_code ref, rtx insn)
2175 /* We could emit new pseudos in renaming. Extend the reg structures. */
2176 if (!reload_completed && sel_sched_p ()
2177 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2178 extend_deps_reg_info (deps, regno);
2180 maybe_extend_reg_info_p ();
2182 /* A hard reg in a wide mode may really be multiple registers.
2183 If so, mark all of them just like the first. */
2184 if (regno < FIRST_PSEUDO_REGISTER)
2186 int i = hard_regno_nregs[regno][mode];
2190 note_reg_set (regno + i);
2192 else if (ref == USE)
2195 note_reg_use (regno + i);
2200 note_reg_clobber (regno + i);
2204 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2205 it does not reload. Ignore these as they have served their
2207 else if (regno >= deps->max_reg)
2209 enum rtx_code code = GET_CODE (PATTERN (insn));
2210 gcc_assert (code == USE || code == CLOBBER);
2216 note_reg_set (regno);
2217 else if (ref == USE)
2218 note_reg_use (regno);
2220 note_reg_clobber (regno);
2222 /* Pseudos that are REG_EQUIV to something may be replaced
2223 by that during reloading. We need only add dependencies for
2224 the address in the REG_EQUIV note. */
2225 if (!reload_completed && get_reg_known_equiv_p (regno))
2227 rtx t = get_reg_known_value (regno);
2229 sched_analyze_2 (deps, XEXP (t, 0), insn);
2232 /* Don't let it cross a call after scheduling if it doesn't
2233 already cross one. */
2234 if (REG_N_CALLS_CROSSED (regno) == 0)
2236 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2237 deps->sched_before_next_call
2238 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2240 add_dependence_list (insn, deps->last_function_call, 1,
2246 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2247 rtx, X, creating all dependencies generated by the write to the
2248 destination of X, and reads of everything mentioned. */
2251 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2253 rtx dest = XEXP (x, 0);
2254 enum rtx_code code = GET_CODE (x);
2255 bool cslr_p = can_start_lhs_rhs_p;
2257 can_start_lhs_rhs_p = false;
2263 if (cslr_p && sched_deps_info->start_lhs)
2264 sched_deps_info->start_lhs (dest);
2266 if (GET_CODE (dest) == PARALLEL)
2270 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2271 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2272 sched_analyze_1 (deps,
2273 gen_rtx_CLOBBER (VOIDmode,
2274 XEXP (XVECEXP (dest, 0, i), 0)),
2277 if (cslr_p && sched_deps_info->finish_lhs)
2278 sched_deps_info->finish_lhs ();
2282 can_start_lhs_rhs_p = cslr_p;
2284 sched_analyze_2 (deps, SET_SRC (x), insn);
2286 can_start_lhs_rhs_p = false;
2292 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2293 || GET_CODE (dest) == ZERO_EXTRACT)
2295 if (GET_CODE (dest) == STRICT_LOW_PART
2296 || GET_CODE (dest) == ZERO_EXTRACT
2297 || df_read_modify_subreg_p (dest))
2299 /* These both read and modify the result. We must handle
2300 them as writes to get proper dependencies for following
2301 instructions. We must handle them as reads to get proper
2302 dependencies from this to previous instructions.
2303 Thus we need to call sched_analyze_2. */
2305 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2307 if (GET_CODE (dest) == ZERO_EXTRACT)
2309 /* The second and third arguments are values read by this insn. */
2310 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2311 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2313 dest = XEXP (dest, 0);
2318 int regno = REGNO (dest);
2319 enum machine_mode mode = GET_MODE (dest);
2321 sched_analyze_reg (deps, regno, mode, code, insn);
2324 /* Treat all writes to a stack register as modifying the TOS. */
2325 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2327 /* Avoid analyzing the same register twice. */
2328 if (regno != FIRST_STACK_REG)
2329 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2331 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2336 else if (MEM_P (dest))
2338 /* Writing memory. */
2341 if (sched_deps_info->use_cselib)
2343 enum machine_mode address_mode
2344 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2346 t = shallow_copy_rtx (dest);
2347 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2348 GET_MODE (t), insn);
2349 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2353 /* Pending lists can't get larger with a readonly context. */
2355 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2356 > MAX_PENDING_LIST_LENGTH))
2358 /* Flush all pending reads and writes to prevent the pending lists
2359 from getting any larger. Insn scheduling runs too slowly when
2360 these lists get long. When compiling GCC with itself,
2361 this flush occurs 8 times for sparc, and 10 times for m88k using
2362 the default value of 32. */
2363 flush_pending_lists (deps, insn, false, true);
2367 rtx pending, pending_mem;
2369 pending = deps->pending_read_insns;
2370 pending_mem = deps->pending_read_mems;
2373 if (anti_dependence (XEXP (pending_mem, 0), t)
2374 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2375 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2378 pending = XEXP (pending, 1);
2379 pending_mem = XEXP (pending_mem, 1);
2382 pending = deps->pending_write_insns;
2383 pending_mem = deps->pending_write_mems;
2386 if (output_dependence (XEXP (pending_mem, 0), t)
2387 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2388 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2391 pending = XEXP (pending, 1);
2392 pending_mem = XEXP (pending_mem, 1);
2395 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2398 if (!deps->readonly)
2399 add_insn_mem_dependence (deps, false, insn, dest);
2401 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2404 if (cslr_p && sched_deps_info->finish_lhs)
2405 sched_deps_info->finish_lhs ();
2407 /* Analyze reads. */
2408 if (GET_CODE (x) == SET)
2410 can_start_lhs_rhs_p = cslr_p;
2412 sched_analyze_2 (deps, SET_SRC (x), insn);
2414 can_start_lhs_rhs_p = false;
2418 /* Analyze the uses of memory and registers in rtx X in INSN. */
2420 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2426 bool cslr_p = can_start_lhs_rhs_p;
2428 can_start_lhs_rhs_p = false;
2434 if (cslr_p && sched_deps_info->start_rhs)
2435 sched_deps_info->start_rhs (x);
2437 code = GET_CODE (x);
2448 /* Ignore constants. */
2449 if (cslr_p && sched_deps_info->finish_rhs)
2450 sched_deps_info->finish_rhs ();
2456 /* User of CC0 depends on immediately preceding insn. */
2457 SCHED_GROUP_P (insn) = 1;
2458 /* Don't move CC0 setter to another block (it can set up the
2459 same flag for previous CC0 users which is safe). */
2460 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2462 if (cslr_p && sched_deps_info->finish_rhs)
2463 sched_deps_info->finish_rhs ();
2470 int regno = REGNO (x);
2471 enum machine_mode mode = GET_MODE (x);
2473 sched_analyze_reg (deps, regno, mode, USE, insn);
2476 /* Treat all reads of a stack register as modifying the TOS. */
2477 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2479 /* Avoid analyzing the same register twice. */
2480 if (regno != FIRST_STACK_REG)
2481 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2482 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2486 if (cslr_p && sched_deps_info->finish_rhs)
2487 sched_deps_info->finish_rhs ();
2494 /* Reading memory. */
2496 rtx pending, pending_mem;
2499 if (sched_deps_info->use_cselib)
2501 enum machine_mode address_mode
2502 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2504 t = shallow_copy_rtx (t);
2505 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2506 GET_MODE (t), insn);
2507 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2510 if (!DEBUG_INSN_P (insn))
2513 pending = deps->pending_read_insns;
2514 pending_mem = deps->pending_read_mems;
2517 if (read_dependence (XEXP (pending_mem, 0), t)
2518 && ! sched_insns_conditions_mutex_p (insn,
2520 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2523 pending = XEXP (pending, 1);
2524 pending_mem = XEXP (pending_mem, 1);
2527 pending = deps->pending_write_insns;
2528 pending_mem = deps->pending_write_mems;
2531 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2533 && ! sched_insns_conditions_mutex_p (insn,
2535 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2536 sched_deps_info->generate_spec_deps
2537 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2539 pending = XEXP (pending, 1);
2540 pending_mem = XEXP (pending_mem, 1);
2543 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2545 if (! NON_FLUSH_JUMP_P (u))
2546 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2547 else if (deps_may_trap_p (x))
2549 if ((sched_deps_info->generate_spec_deps)
2550 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2552 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2555 note_dep (XEXP (u, 0), ds);
2558 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2563 /* Always add these dependencies to pending_reads, since
2564 this insn may be followed by a write. */
2565 if (!deps->readonly)
2566 add_insn_mem_dependence (deps, true, insn, x);
2568 sched_analyze_2 (deps, XEXP (x, 0), insn);
2570 if (cslr_p && sched_deps_info->finish_rhs)
2571 sched_deps_info->finish_rhs ();
2576 /* Force pending stores to memory in case a trap handler needs them. */
2578 flush_pending_lists (deps, insn, true, false);
2582 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2583 reg_pending_barrier = TRUE_BARRIER;
2586 case UNSPEC_VOLATILE:
2587 flush_pending_lists (deps, insn, true, true);
2593 /* Traditional and volatile asm instructions must be considered to use
2594 and clobber all hard registers, all pseudo-registers and all of
2595 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2597 Consider for instance a volatile asm that changes the fpu rounding
2598 mode. An insn should not be moved across this even if it only uses
2599 pseudo-regs because it might give an incorrectly rounded result. */
2600 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2601 reg_pending_barrier = TRUE_BARRIER;
2603 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2604 We can not just fall through here since then we would be confused
2605 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2606 traditional asms unlike their normal usage. */
2608 if (code == ASM_OPERANDS)
2610 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2611 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2613 if (cslr_p && sched_deps_info->finish_rhs)
2614 sched_deps_info->finish_rhs ();
2625 /* These both read and modify the result. We must handle them as writes
2626 to get proper dependencies for following instructions. We must handle
2627 them as reads to get proper dependencies from this to previous
2628 instructions. Thus we need to pass them to both sched_analyze_1
2629 and sched_analyze_2. We must call sched_analyze_2 first in order
2630 to get the proper antecedent for the read. */
2631 sched_analyze_2 (deps, XEXP (x, 0), insn);
2632 sched_analyze_1 (deps, x, insn);
2634 if (cslr_p && sched_deps_info->finish_rhs)
2635 sched_deps_info->finish_rhs ();
2641 /* op0 = op0 + op1 */
2642 sched_analyze_2 (deps, XEXP (x, 0), insn);
2643 sched_analyze_2 (deps, XEXP (x, 1), insn);
2644 sched_analyze_1 (deps, x, insn);
2646 if (cslr_p && sched_deps_info->finish_rhs)
2647 sched_deps_info->finish_rhs ();
2655 /* Other cases: walk the insn. */
2656 fmt = GET_RTX_FORMAT (code);
2657 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2660 sched_analyze_2 (deps, XEXP (x, i), insn);
2661 else if (fmt[i] == 'E')
2662 for (j = 0; j < XVECLEN (x, i); j++)
2663 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2666 if (cslr_p && sched_deps_info->finish_rhs)
2667 sched_deps_info->finish_rhs ();
2670 /* Analyze an INSN with pattern X to find all dependencies. */
2672 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2674 RTX_CODE code = GET_CODE (x);
2677 reg_set_iterator rsi;
2679 if (! reload_completed)
2683 extract_insn (insn);
2684 preprocess_constraints ();
2685 ira_implicitly_set_insn_hard_regs (&temp);
2686 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2687 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2690 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2694 /* Avoid moving trapping instructions accross function calls that might
2695 not always return. */
2696 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2699 if (code == COND_EXEC)
2701 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2703 /* ??? Should be recording conditions so we reduce the number of
2704 false dependencies. */
2705 x = COND_EXEC_CODE (x);
2706 code = GET_CODE (x);
2708 if (code == SET || code == CLOBBER)
2710 sched_analyze_1 (deps, x, insn);
2712 /* Bare clobber insns are used for letting life analysis, reg-stack
2713 and others know that a value is dead. Depend on the last call
2714 instruction so that reg-stack won't get confused. */
2715 if (code == CLOBBER)
2716 add_dependence_list (insn, deps->last_function_call, 1,
2719 else if (code == PARALLEL)
2721 for (i = XVECLEN (x, 0); i--;)
2723 rtx sub = XVECEXP (x, 0, i);
2724 code = GET_CODE (sub);
2726 if (code == COND_EXEC)
2728 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2729 sub = COND_EXEC_CODE (sub);
2730 code = GET_CODE (sub);
2732 if (code == SET || code == CLOBBER)
2733 sched_analyze_1 (deps, sub, insn);
2735 sched_analyze_2 (deps, sub, insn);
2739 sched_analyze_2 (deps, x, insn);
2741 /* Mark registers CLOBBERED or used by called function. */
2744 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2746 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2747 sched_analyze_1 (deps, XEXP (link, 0), insn);
2749 sched_analyze_2 (deps, XEXP (link, 0), insn);
2751 if (find_reg_note (insn, REG_SETJMP, NULL))
2752 reg_pending_barrier = MOVE_BARRIER;
2758 next = next_nonnote_nondebug_insn (insn);
2759 if (next && BARRIER_P (next))
2760 reg_pending_barrier = MOVE_BARRIER;
2763 rtx pending, pending_mem;
2765 if (sched_deps_info->compute_jump_reg_dependencies)
2768 INIT_REG_SET (&tmp);
2770 (*sched_deps_info->compute_jump_reg_dependencies) (insn, &tmp);
2772 /* Make latency of jump equal to 0 by using anti-dependence. */
2773 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i, rsi)
2775 struct deps_reg *reg_last = &deps->reg_last[i];
2776 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2777 add_dependence_list (insn, reg_last->implicit_sets,
2779 add_dependence_list (insn, reg_last->clobbers, 0,
2782 if (!deps->readonly)
2784 reg_last->uses_length++;
2785 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2789 CLEAR_REG_SET (&tmp);
2792 /* All memory writes and volatile reads must happen before the
2793 jump. Non-volatile reads must happen before the jump iff
2794 the result is needed by the above register used mask. */
2796 pending = deps->pending_write_insns;
2797 pending_mem = deps->pending_write_mems;
2800 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2801 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2802 pending = XEXP (pending, 1);
2803 pending_mem = XEXP (pending_mem, 1);
2806 pending = deps->pending_read_insns;
2807 pending_mem = deps->pending_read_mems;
2810 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2811 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2812 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2813 pending = XEXP (pending, 1);
2814 pending_mem = XEXP (pending_mem, 1);
2817 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2822 /* If this instruction can throw an exception, then moving it changes
2823 where block boundaries fall. This is mighty confusing elsewhere.
2824 Therefore, prevent such an instruction from being moved. Same for
2825 non-jump instructions that define block boundaries.
2826 ??? Unclear whether this is still necessary in EBB mode. If not,
2827 add_branch_dependences should be adjusted for RGN mode instead. */
2828 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2829 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2830 reg_pending_barrier = MOVE_BARRIER;
2832 if (sched_pressure_p)
2834 setup_insn_reg_uses (deps, insn);
2835 init_insn_reg_pressure_info (insn);
2838 /* Add register dependencies for insn. */
2839 if (DEBUG_INSN_P (insn))
2841 rtx prev = deps->last_debug_insn;
2844 if (!deps->readonly)
2845 deps->last_debug_insn = insn;
2848 add_dependence (insn, prev, REG_DEP_ANTI);
2850 add_dependence_list (insn, deps->last_function_call, 1,
2853 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2854 if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
2855 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2857 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2859 struct deps_reg *reg_last = &deps->reg_last[i];
2860 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2861 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2863 if (!deps->readonly)
2864 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2866 CLEAR_REG_SET (reg_pending_uses);
2868 /* Quite often, a debug insn will refer to stuff in the
2869 previous instruction, but the reason we want this
2870 dependency here is to make sure the scheduler doesn't
2871 gratuitously move a debug insn ahead. This could dirty
2872 DF flags and cause additional analysis that wouldn't have
2873 occurred in compilation without debug insns, and such
2874 additional analysis can modify the generated code. */
2875 prev = PREV_INSN (insn);
2877 if (prev && NONDEBUG_INSN_P (prev))
2878 add_dependence (insn, prev, REG_DEP_ANTI);
2882 regset_head set_or_clobbered;
2884 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2886 struct deps_reg *reg_last = &deps->reg_last[i];
2887 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2888 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2889 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2891 if (!deps->readonly)
2893 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2894 reg_last->uses_length++;
2898 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2899 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2901 struct deps_reg *reg_last = &deps->reg_last[i];
2902 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2903 add_dependence_list (insn, reg_last->implicit_sets, 0,
2905 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2907 if (!deps->readonly)
2909 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2910 reg_last->uses_length++;
2914 if (targetm.sched.exposed_pipeline)
2916 INIT_REG_SET (&set_or_clobbered);
2917 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
2919 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
2921 struct deps_reg *reg_last = &deps->reg_last[i];
2923 for (list = reg_last->uses; list; list = XEXP (list, 1))
2925 rtx other = XEXP (list, 0);
2926 if (INSN_CACHED_COND (other) != const_true_rtx
2927 && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
2928 INSN_CACHED_COND (other) = const_true_rtx;
2933 /* If the current insn is conditional, we can't free any
2935 if (sched_has_condition_p (insn))
2937 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2939 struct deps_reg *reg_last = &deps->reg_last[i];
2940 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2941 add_dependence_list (insn, reg_last->implicit_sets, 0,
2943 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2945 if (!deps->readonly)
2948 = alloc_INSN_LIST (insn, reg_last->clobbers);
2949 reg_last->clobbers_length++;
2952 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2954 struct deps_reg *reg_last = &deps->reg_last[i];
2955 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2956 add_dependence_list (insn, reg_last->implicit_sets, 0,
2958 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2959 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2961 if (!deps->readonly)
2962 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2967 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2969 struct deps_reg *reg_last = &deps->reg_last[i];
2970 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2971 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2973 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
2975 add_dependence_list_and_free (deps, insn,
2976 ®_last->implicit_sets, 0,
2978 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
2980 add_dependence_list_and_free
2981 (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
2983 if (!deps->readonly)
2985 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2986 reg_last->clobbers_length = 0;
2987 reg_last->uses_length = 0;
2992 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2993 add_dependence_list (insn, reg_last->implicit_sets, 0,
2995 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2998 if (!deps->readonly)
3000 reg_last->clobbers_length++;
3002 = alloc_INSN_LIST (insn, reg_last->clobbers);
3005 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3007 struct deps_reg *reg_last = &deps->reg_last[i];
3009 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3011 add_dependence_list_and_free (deps, insn,
3012 ®_last->implicit_sets,
3014 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3016 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3019 if (!deps->readonly)
3021 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3022 reg_last->uses_length = 0;
3023 reg_last->clobbers_length = 0;
3029 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3030 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3032 struct deps_reg *reg_last = &deps->reg_last[i];
3033 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
3034 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
3035 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3037 if (!deps->readonly)
3038 reg_last->implicit_sets
3039 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3042 if (!deps->readonly)
3044 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3045 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3046 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3047 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3048 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3049 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3050 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3052 /* Set up the pending barrier found. */
3053 deps->last_reg_pending_barrier = reg_pending_barrier;
3056 CLEAR_REG_SET (reg_pending_uses);
3057 CLEAR_REG_SET (reg_pending_clobbers);
3058 CLEAR_REG_SET (reg_pending_sets);
3059 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3060 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3062 /* Add dependencies if a scheduling barrier was found. */
3063 if (reg_pending_barrier)
3065 /* In the case of barrier the most added dependencies are not
3066 real, so we use anti-dependence here. */
3067 if (sched_has_condition_p (insn))
3069 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3071 struct deps_reg *reg_last = &deps->reg_last[i];
3072 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3073 add_dependence_list (insn, reg_last->sets, 0,
3074 reg_pending_barrier == TRUE_BARRIER
3075 ? REG_DEP_TRUE : REG_DEP_ANTI);
3076 add_dependence_list (insn, reg_last->implicit_sets, 0,
3078 add_dependence_list (insn, reg_last->clobbers, 0,
3079 reg_pending_barrier == TRUE_BARRIER
3080 ? REG_DEP_TRUE : REG_DEP_ANTI);
3085 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3087 struct deps_reg *reg_last = &deps->reg_last[i];
3088 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3090 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3091 reg_pending_barrier == TRUE_BARRIER
3092 ? REG_DEP_TRUE : REG_DEP_ANTI);
3093 add_dependence_list_and_free (deps, insn,
3094 ®_last->implicit_sets, 0,
3096 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3097 reg_pending_barrier == TRUE_BARRIER
3098 ? REG_DEP_TRUE : REG_DEP_ANTI);
3100 if (!deps->readonly)
3102 reg_last->uses_length = 0;
3103 reg_last->clobbers_length = 0;
3108 if (!deps->readonly)
3109 for (i = 0; i < (unsigned)deps->max_reg; i++)
3111 struct deps_reg *reg_last = &deps->reg_last[i];
3112 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3113 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3116 /* Flush pending lists on jumps, but not on speculative checks. */
3117 if (JUMP_P (insn) && !(sel_sched_p ()
3118 && sel_insn_is_speculation_check (insn)))
3119 flush_pending_lists (deps, insn, true, true);
3121 reg_pending_barrier = NOT_A_BARRIER;
3124 /* If a post-call group is still open, see if it should remain so.
3125 This insn must be a simple move of a hard reg to a pseudo or
3128 We must avoid moving these insns for correctness on targets
3129 with small register classes, and for special registers like
3130 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3131 hard regs for all targets. */
3133 if (deps->in_post_call_group_p)
3135 rtx tmp, set = single_set (insn);
3136 int src_regno, dest_regno;
3140 if (DEBUG_INSN_P (insn))
3141 /* We don't want to mark debug insns as part of the same
3142 sched group. We know they really aren't, but if we use
3143 debug insns to tell that a call group is over, we'll
3144 get different code if debug insns are not there and
3145 instructions that follow seem like they should be part
3148 Also, if we did, fixup_sched_groups() would move the
3149 deps of the debug insn to the call insn, modifying
3150 non-debug post-dependency counts of the debug insn
3151 dependencies and otherwise messing with the scheduling
3154 Instead, let such debug insns be scheduled freely, but
3155 keep the call group open in case there are insns that
3156 should be part of it afterwards. Since we grant debug
3157 insns higher priority than even sched group insns, it
3158 will all turn out all right. */
3159 goto debug_dont_end_call_group;
3161 goto end_call_group;
3164 tmp = SET_DEST (set);
3165 if (GET_CODE (tmp) == SUBREG)
3166 tmp = SUBREG_REG (tmp);
3168 dest_regno = REGNO (tmp);
3170 goto end_call_group;
3172 tmp = SET_SRC (set);
3173 if (GET_CODE (tmp) == SUBREG)
3174 tmp = SUBREG_REG (tmp);
3175 if ((GET_CODE (tmp) == PLUS
3176 || GET_CODE (tmp) == MINUS)
3177 && REG_P (XEXP (tmp, 0))
3178 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3179 && dest_regno == STACK_POINTER_REGNUM)
3180 src_regno = STACK_POINTER_REGNUM;
3181 else if (REG_P (tmp))
3182 src_regno = REGNO (tmp);
3184 goto end_call_group;
3186 if (src_regno < FIRST_PSEUDO_REGISTER
3187 || dest_regno < FIRST_PSEUDO_REGISTER)
3190 && deps->in_post_call_group_p == post_call_initial)
3191 deps->in_post_call_group_p = post_call;
3193 if (!sel_sched_p () || sched_emulate_haifa_p)
3195 SCHED_GROUP_P (insn) = 1;
3196 CANT_MOVE (insn) = 1;
3202 if (!deps->readonly)
3203 deps->in_post_call_group_p = not_post_call;
3207 debug_dont_end_call_group:
3208 if ((current_sched_info->flags & DO_SPECULATION)
3209 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3210 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3214 sel_mark_hard_insn (insn);
3217 sd_iterator_def sd_it;
3220 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3221 sd_iterator_cond (&sd_it, &dep);)
3222 change_spec_dep_to_hard (sd_it);
3227 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3228 longjmp, loop forever, ...). */
3230 call_may_noreturn_p (rtx insn)
3234 /* const or pure calls that aren't looping will always return. */
3235 if (RTL_CONST_OR_PURE_CALL_P (insn)
3236 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3239 call = PATTERN (insn);
3240 if (GET_CODE (call) == PARALLEL)
3241 call = XVECEXP (call, 0, 0);
3242 if (GET_CODE (call) == SET)
3243 call = SET_SRC (call);
3244 if (GET_CODE (call) == CALL
3245 && MEM_P (XEXP (call, 0))
3246 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3248 rtx symbol = XEXP (XEXP (call, 0), 0);
3249 if (SYMBOL_REF_DECL (symbol)
3250 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3252 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3254 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3257 case BUILT_IN_BCOPY:
3258 case BUILT_IN_BZERO:
3259 case BUILT_IN_INDEX:
3260 case BUILT_IN_MEMCHR:
3261 case BUILT_IN_MEMCMP:
3262 case BUILT_IN_MEMCPY:
3263 case BUILT_IN_MEMMOVE:
3264 case BUILT_IN_MEMPCPY:
3265 case BUILT_IN_MEMSET:
3266 case BUILT_IN_RINDEX:
3267 case BUILT_IN_STPCPY:
3268 case BUILT_IN_STPNCPY:
3269 case BUILT_IN_STRCAT:
3270 case BUILT_IN_STRCHR:
3271 case BUILT_IN_STRCMP:
3272 case BUILT_IN_STRCPY:
3273 case BUILT_IN_STRCSPN:
3274 case BUILT_IN_STRLEN:
3275 case BUILT_IN_STRNCAT:
3276 case BUILT_IN_STRNCMP:
3277 case BUILT_IN_STRNCPY:
3278 case BUILT_IN_STRPBRK:
3279 case BUILT_IN_STRRCHR:
3280 case BUILT_IN_STRSPN:
3281 case BUILT_IN_STRSTR:
3282 /* Assume certain string/memory builtins always return. */
3290 /* For all other calls assume that they might not always return. */
3294 /* Analyze INSN with DEPS as a context. */
3296 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3298 if (sched_deps_info->start_insn)
3299 sched_deps_info->start_insn (insn);
3301 /* Record the condition for this insn. */
3302 if (NONDEBUG_INSN_P (insn))
3303 sched_get_condition_with_rev (insn, NULL);
3305 if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3307 /* Make each JUMP_INSN (but not a speculative check)
3308 a scheduling barrier for memory references. */
3312 && sel_insn_is_speculation_check (insn)))
3314 /* Keep the list a reasonable size. */
3315 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3316 flush_pending_lists (deps, insn, true, true);
3319 deps->last_pending_memory_flush
3320 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3321 /* Signal to sched_analyze_insn that this jump stands
3322 just for its own, not any other pending memory
3323 reads/writes flush_pending_lists had to flush. */
3324 PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
3325 NON_FLUSH_JUMP_KIND);
3329 sched_analyze_insn (deps, PATTERN (insn), insn);
3331 else if (CALL_P (insn))
3335 CANT_MOVE (insn) = 1;
3337 if (find_reg_note (insn, REG_SETJMP, NULL))
3339 /* This is setjmp. Assume that all registers, not just
3340 hard registers, may be clobbered by this call. */
3341 reg_pending_barrier = MOVE_BARRIER;
3345 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3346 /* A call may read and modify global register variables. */
3349 SET_REGNO_REG_SET (reg_pending_sets, i);
3350 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3352 /* Other call-clobbered hard regs may be clobbered.
3353 Since we only have a choice between 'might be clobbered'
3354 and 'definitely not clobbered', we must include all
3355 partly call-clobbered registers here. */
3356 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3357 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3358 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3359 /* We don't know what set of fixed registers might be used
3360 by the function, but it is certain that the stack pointer
3361 is among them, but be conservative. */
3362 else if (fixed_regs[i])
3363 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3364 /* The frame pointer is normally not used by the function
3365 itself, but by the debugger. */
3366 /* ??? MIPS o32 is an exception. It uses the frame pointer
3367 in the macro expansion of jal but does not represent this
3368 fact in the call_insn rtl. */
3369 else if (i == FRAME_POINTER_REGNUM
3370 || (i == HARD_FRAME_POINTER_REGNUM
3371 && (! reload_completed || frame_pointer_needed)))
3372 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3375 /* For each insn which shouldn't cross a call, add a dependence
3376 between that insn and this call insn. */
3377 add_dependence_list_and_free (deps, insn,
3378 &deps->sched_before_next_call, 1,
3381 sched_analyze_insn (deps, PATTERN (insn), insn);
3383 /* If CALL would be in a sched group, then this will violate
3384 convention that sched group insns have dependencies only on the
3385 previous instruction.
3387 Of course one can say: "Hey! What about head of the sched group?"
3388 And I will answer: "Basic principles (one dep per insn) are always
3390 gcc_assert (!SCHED_GROUP_P (insn));
3392 /* In the absence of interprocedural alias analysis, we must flush
3393 all pending reads and writes, and start new dependencies starting
3394 from here. But only flush writes for constant calls (which may
3395 be passed a pointer to something we haven't written yet). */
3396 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3398 if (!deps->readonly)
3400 /* Remember the last function call for limiting lifetimes. */
3401 free_INSN_LIST_list (&deps->last_function_call);
3402 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3404 if (call_may_noreturn_p (insn))
3406 /* Remember the last function call that might not always return
3407 normally for limiting moves of trapping insns. */
3408 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3409 deps->last_function_call_may_noreturn
3410 = alloc_INSN_LIST (insn, NULL_RTX);
3413 /* Before reload, begin a post-call group, so as to keep the
3414 lifetimes of hard registers correct. */
3415 if (! reload_completed)
3416 deps->in_post_call_group_p = post_call;
3420 if (sched_deps_info->use_cselib)
3421 cselib_process_insn (insn);
3423 /* EH_REGION insn notes can not appear until well after we complete
3426 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3427 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3429 if (sched_deps_info->finish_insn)
3430 sched_deps_info->finish_insn ();
3432 /* Fixup the dependencies in the sched group. */
3433 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3434 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3435 fixup_sched_groups (insn);
3438 /* Initialize DEPS for the new block beginning with HEAD. */
3440 deps_start_bb (struct deps_desc *deps, rtx head)
3442 gcc_assert (!deps->readonly);
3444 /* Before reload, if the previous block ended in a call, show that
3445 we are inside a post-call group, so as to keep the lifetimes of
3446 hard registers correct. */
3447 if (! reload_completed && !LABEL_P (head))
3449 rtx insn = prev_nonnote_nondebug_insn (head);
3451 if (insn && CALL_P (insn))
3452 deps->in_post_call_group_p = post_call_initial;
3456 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3457 dependencies for each insn. */
3459 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3463 if (sched_deps_info->use_cselib)
3464 cselib_init (CSELIB_RECORD_MEMORY);
3466 deps_start_bb (deps, head);
3468 for (insn = head;; insn = NEXT_INSN (insn))
3473 /* And initialize deps_lists. */
3474 sd_init_insn (insn);
3477 deps_analyze_insn (deps, insn);
3481 if (sched_deps_info->use_cselib)
3489 /* Helper for sched_free_deps ().
3490 Delete INSN's (RESOLVED_P) backward dependencies. */
3492 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3494 sd_iterator_def sd_it;
3496 sd_list_types_def types;
3499 types = SD_LIST_RES_BACK;
3501 types = SD_LIST_BACK;
3503 for (sd_it = sd_iterator_start (insn, types);
3504 sd_iterator_cond (&sd_it, &dep);)
3506 dep_link_t link = *sd_it.linkp;
3507 dep_node_t node = DEP_LINK_NODE (link);
3508 deps_list_t back_list;
3509 deps_list_t forw_list;
3511 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3512 remove_from_deps_list (link, back_list);
3513 delete_dep_node (node);
3517 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3520 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3523 rtx next_tail = NEXT_INSN (tail);
3525 /* We make two passes since some insns may be scheduled before their
3526 dependencies are resolved. */
3527 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3528 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3530 /* Clear forward deps and leave the dep_nodes to the
3531 corresponding back_deps list. */
3533 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3535 clear_deps_list (INSN_FORW_DEPS (insn));
3537 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3538 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3540 /* Clear resolved back deps together with its dep_nodes. */
3541 delete_dep_nodes_in_back_deps (insn, resolved_p);
3543 sd_finish_insn (insn);
3547 /* Initialize variables for region data dependence analysis.
3548 When LAZY_REG_LAST is true, do not allocate reg_last array
3549 of struct deps_desc immediately. */
3552 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3554 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3556 deps->max_reg = max_reg;
3558 deps->reg_last = NULL;
3560 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3561 INIT_REG_SET (&deps->reg_last_in_use);
3563 deps->pending_read_insns = 0;
3564 deps->pending_read_mems = 0;
3565 deps->pending_write_insns = 0;
3566 deps->pending_write_mems = 0;
3567 deps->pending_read_list_length = 0;
3568 deps->pending_write_list_length = 0;
3569 deps->pending_flush_length = 0;
3570 deps->last_pending_memory_flush = 0;
3571 deps->last_function_call = 0;
3572 deps->last_function_call_may_noreturn = 0;
3573 deps->sched_before_next_call = 0;
3574 deps->in_post_call_group_p = not_post_call;
3575 deps->last_debug_insn = 0;
3576 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3580 /* Init only reg_last field of DEPS, which was not allocated before as
3581 we inited DEPS lazily. */
3583 init_deps_reg_last (struct deps_desc *deps)
3585 gcc_assert (deps && deps->max_reg > 0);
3586 gcc_assert (deps->reg_last == NULL);
3588 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3592 /* Free insn lists found in DEPS. */
3595 free_deps (struct deps_desc *deps)
3598 reg_set_iterator rsi;
3600 /* We set max_reg to 0 when this context was already freed. */
3601 if (deps->max_reg == 0)
3603 gcc_assert (deps->reg_last == NULL);
3608 free_INSN_LIST_list (&deps->pending_read_insns);
3609 free_EXPR_LIST_list (&deps->pending_read_mems);
3610 free_INSN_LIST_list (&deps->pending_write_insns);
3611 free_EXPR_LIST_list (&deps->pending_write_mems);
3612 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3614 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3615 times. For a testcase with 42000 regs and 8000 small basic blocks,
3616 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3617 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3619 struct deps_reg *reg_last = &deps->reg_last[i];
3621 free_INSN_LIST_list (®_last->uses);
3623 free_INSN_LIST_list (®_last->sets);
3624 if (reg_last->implicit_sets)
3625 free_INSN_LIST_list (®_last->implicit_sets);
3626 if (reg_last->clobbers)
3627 free_INSN_LIST_list (®_last->clobbers);
3629 CLEAR_REG_SET (&deps->reg_last_in_use);
3631 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3633 free (deps->reg_last);
3634 deps->reg_last = NULL;
3639 /* Remove INSN from dependence contexts DEPS. */
3641 remove_from_deps (struct deps_desc *deps, rtx insn)
3645 reg_set_iterator rsi;
3647 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3648 &deps->pending_read_mems);
3649 if (!DEBUG_INSN_P (insn))
3650 deps->pending_read_list_length -= removed;
3651 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3652 &deps->pending_write_mems);
3653 deps->pending_write_list_length -= removed;
3654 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3655 deps->pending_flush_length -= removed;
3657 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3659 struct deps_reg *reg_last = &deps->reg_last[i];
3661 remove_from_dependence_list (insn, ®_last->uses);
3663 remove_from_dependence_list (insn, ®_last->sets);
3664 if (reg_last->implicit_sets)
3665 remove_from_dependence_list (insn, ®_last->implicit_sets);
3666 if (reg_last->clobbers)
3667 remove_from_dependence_list (insn, ®_last->clobbers);
3668 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3669 && !reg_last->clobbers)
3670 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3675 remove_from_dependence_list (insn, &deps->last_function_call);
3676 remove_from_dependence_list (insn,
3677 &deps->last_function_call_may_noreturn);
3679 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3682 /* Init deps data vector. */
3684 init_deps_data_vector (void)
3686 int reserve = (sched_max_luid + 1
3687 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3689 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3690 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3691 3 * sched_max_luid / 2);
3694 /* If it is profitable to use them, initialize or extend (depending on
3695 GLOBAL_P) dependency data. */
3697 sched_deps_init (bool global_p)
3699 /* Average number of insns in the basic block.
3700 '+ 1' is used to make it nonzero. */
3701 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3703 init_deps_data_vector ();
3705 /* We use another caching mechanism for selective scheduling, so
3706 we don't use this one. */
3707 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3709 /* ?!? We could save some memory by computing a per-region luid mapping
3710 which could reduce both the number of vectors in the cache and the
3711 size of each vector. Instead we just avoid the cache entirely unless
3712 the average number of instructions in a basic block is very high. See
3713 the comment before the declaration of true_dependency_cache for
3714 what we consider "very high". */
3716 extend_dependency_caches (sched_max_luid, true);
3721 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3722 /* Allocate lists for one block at a time. */
3724 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3725 /* Allocate nodes for one block at a time.
3726 We assume that average insn has
3728 5 * insns_in_block);
3733 /* Create or extend (depending on CREATE_P) dependency caches to
3736 extend_dependency_caches (int n, bool create_p)
3738 if (create_p || true_dependency_cache)
3740 int i, luid = cache_size + n;
3742 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3744 output_dependency_cache = XRESIZEVEC (bitmap_head,
3745 output_dependency_cache, luid);
3746 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3749 if (current_sched_info->flags & DO_SPECULATION)
3750 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3753 for (i = cache_size; i < luid; i++)
3755 bitmap_initialize (&true_dependency_cache[i], 0);
3756 bitmap_initialize (&output_dependency_cache[i], 0);
3757 bitmap_initialize (&anti_dependency_cache[i], 0);
3759 if (current_sched_info->flags & DO_SPECULATION)
3760 bitmap_initialize (&spec_dependency_cache[i], 0);
3766 /* Finalize dependency information for the whole function. */
3768 sched_deps_finish (void)
3770 gcc_assert (deps_pools_are_empty_p ());
3771 free_alloc_pool_if_empty (&dn_pool);
3772 free_alloc_pool_if_empty (&dl_pool);
3773 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3775 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3778 if (true_dependency_cache)
3782 for (i = 0; i < cache_size; i++)
3784 bitmap_clear (&true_dependency_cache[i]);
3785 bitmap_clear (&output_dependency_cache[i]);
3786 bitmap_clear (&anti_dependency_cache[i]);
3788 if (sched_deps_info->generate_spec_deps)
3789 bitmap_clear (&spec_dependency_cache[i]);
3791 free (true_dependency_cache);
3792 true_dependency_cache = NULL;
3793 free (output_dependency_cache);
3794 output_dependency_cache = NULL;
3795 free (anti_dependency_cache);
3796 anti_dependency_cache = NULL;
3798 if (sched_deps_info->generate_spec_deps)
3800 free (spec_dependency_cache);
3801 spec_dependency_cache = NULL;
3807 /* Initialize some global variables needed by the dependency analysis
3811 init_deps_global (void)
3813 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3814 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3815 reg_pending_sets = ALLOC_REG_SET (®_obstack);
3816 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
3817 reg_pending_uses = ALLOC_REG_SET (®_obstack);
3818 reg_pending_barrier = NOT_A_BARRIER;
3820 if (!sel_sched_p () || sched_emulate_haifa_p)
3822 sched_deps_info->start_insn = haifa_start_insn;
3823 sched_deps_info->finish_insn = haifa_finish_insn;
3825 sched_deps_info->note_reg_set = haifa_note_reg_set;
3826 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3827 sched_deps_info->note_reg_use = haifa_note_reg_use;
3829 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3830 sched_deps_info->note_dep = haifa_note_dep;
3834 /* Free everything used by the dependency analysis code. */
3837 finish_deps_global (void)
3839 FREE_REG_SET (reg_pending_sets);
3840 FREE_REG_SET (reg_pending_clobbers);
3841 FREE_REG_SET (reg_pending_uses);
3844 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3846 estimate_dep_weak (rtx mem1, rtx mem2)
3851 /* MEMs are the same - don't speculate. */
3852 return MIN_DEP_WEAK;
3854 r1 = XEXP (mem1, 0);
3855 r2 = XEXP (mem2, 0);
3858 || (REG_P (r1) && REG_P (r2)
3859 && REGNO (r1) == REGNO (r2)))
3860 /* Again, MEMs are the same. */
3861 return MIN_DEP_WEAK;
3862 else if ((REG_P (r1) && !REG_P (r2))
3863 || (!REG_P (r1) && REG_P (r2)))
3864 /* Different addressing modes - reason to be more speculative,
3866 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3868 /* We can't say anything about the dependence. */
3869 return UNCERTAIN_DEP_WEAK;
3872 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3873 This function can handle same INSN and ELEM (INSN == ELEM).
3874 It is a convenience wrapper. */
3876 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3881 if (dep_type == REG_DEP_TRUE)
3883 else if (dep_type == REG_DEP_OUTPUT)
3887 gcc_assert (dep_type == REG_DEP_ANTI);
3891 /* When add_dependence is called from inside sched-deps.c, we expect
3892 cur_insn to be non-null. */
3893 internal = cur_insn != NULL;
3895 gcc_assert (insn == cur_insn);
3899 note_dep (elem, ds);
3904 /* Return weakness of speculative type TYPE in the dep_status DS. */
3906 get_dep_weak_1 (ds_t ds, ds_t type)
3912 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3913 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3914 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3915 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3916 default: gcc_unreachable ();
3923 get_dep_weak (ds_t ds, ds_t type)
3925 dw_t dw = get_dep_weak_1 (ds, type);
3927 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3931 /* Return the dep_status, which has the same parameters as DS, except for
3932 speculative type TYPE, that will have weakness DW. */
3934 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3936 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3941 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3942 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3943 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3944 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3945 default: gcc_unreachable ();
3950 /* Return the join of two dep_statuses DS1 and DS2.
3951 If MAX_P is true then choose the greater probability,
3952 otherwise multiply probabilities.
3953 This function assumes that both DS1 and DS2 contain speculative bits. */
3955 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3959 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3961 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3963 t = FIRST_SPEC_TYPE;
3966 if ((ds1 & t) && !(ds2 & t))
3968 else if (!(ds1 & t) && (ds2 & t))
3970 else if ((ds1 & t) && (ds2 & t))
3972 dw_t dw1 = get_dep_weak (ds1, t);
3973 dw_t dw2 = get_dep_weak (ds2, t);
3978 dw = ((ds_t) dw1) * ((ds_t) dw2);
3980 if (dw < MIN_DEP_WEAK)
3991 ds = set_dep_weak (ds, t, (dw_t) dw);
3994 if (t == LAST_SPEC_TYPE)
3996 t <<= SPEC_TYPE_SHIFT;
4003 /* Return the join of two dep_statuses DS1 and DS2.
4004 This function assumes that both DS1 and DS2 contain speculative bits. */
4006 ds_merge (ds_t ds1, ds_t ds2)
4008 return ds_merge_1 (ds1, ds2, false);
4011 /* Return the join of two dep_statuses DS1 and DS2. */
4013 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4015 ds_t new_status = ds | ds2;
4017 if (new_status & SPECULATIVE)
4019 if ((ds && !(ds & SPECULATIVE))
4020 || (ds2 && !(ds2 & SPECULATIVE)))
4021 /* Then this dep can't be speculative. */
4022 new_status &= ~SPECULATIVE;
4025 /* Both are speculative. Merging probabilities. */
4030 dw = estimate_dep_weak (mem1, mem2);
4031 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4039 new_status = ds_merge (ds2, ds);
4046 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4049 ds_max_merge (ds_t ds1, ds_t ds2)
4051 if (ds1 == 0 && ds2 == 0)
4054 if (ds1 == 0 && ds2 != 0)
4057 if (ds1 != 0 && ds2 == 0)
4060 return ds_merge_1 (ds1, ds2, true);
4063 /* Return the probability of speculation success for the speculation
4071 dt = FIRST_SPEC_TYPE;
4076 res *= (ds_t) get_dep_weak (ds, dt);
4080 if (dt == LAST_SPEC_TYPE)
4082 dt <<= SPEC_TYPE_SHIFT;
4088 res /= MAX_DEP_WEAK;
4090 if (res < MIN_DEP_WEAK)
4093 gcc_assert (res <= MAX_DEP_WEAK);
4098 /* Return a dep status that contains all speculation types of DS. */
4100 ds_get_speculation_types (ds_t ds)
4102 if (ds & BEGIN_DATA)
4104 if (ds & BE_IN_DATA)
4106 if (ds & BEGIN_CONTROL)
4107 ds |= BEGIN_CONTROL;
4108 if (ds & BE_IN_CONTROL)
4109 ds |= BE_IN_CONTROL;
4111 return ds & SPECULATIVE;
4114 /* Return a dep status that contains maximal weakness for each speculation
4115 type present in DS. */
4117 ds_get_max_dep_weak (ds_t ds)
4119 if (ds & BEGIN_DATA)
4120 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4121 if (ds & BE_IN_DATA)
4122 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4123 if (ds & BEGIN_CONTROL)
4124 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4125 if (ds & BE_IN_CONTROL)
4126 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4131 /* Dump information about the dependence status S. */
4133 dump_ds (FILE *f, ds_t s)
4138 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4140 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4141 if (s & BEGIN_CONTROL)
4142 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4143 if (s & BE_IN_CONTROL)
4144 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4147 fprintf (f, "HARD_DEP; ");
4150 fprintf (f, "DEP_TRUE; ");
4152 fprintf (f, "DEP_ANTI; ");
4154 fprintf (f, "DEP_OUTPUT; ");
4162 dump_ds (stderr, s);
4163 fprintf (stderr, "\n");
4166 #ifdef ENABLE_CHECKING
4167 /* Verify that dependence type and status are consistent.
4168 If RELAXED_P is true, then skip dep_weakness checks. */
4170 check_dep (dep_t dep, bool relaxed_p)
4172 enum reg_note dt = DEP_TYPE (dep);
4173 ds_t ds = DEP_STATUS (dep);
4175 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4177 if (!(current_sched_info->flags & USE_DEPS_LIST))
4179 gcc_assert (ds == 0);
4183 /* Check that dependence type contains the same bits as the status. */
4184 if (dt == REG_DEP_TRUE)
4185 gcc_assert (ds & DEP_TRUE);
4186 else if (dt == REG_DEP_OUTPUT)
4187 gcc_assert ((ds & DEP_OUTPUT)
4188 && !(ds & DEP_TRUE));
4190 gcc_assert ((dt == REG_DEP_ANTI)
4192 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4194 /* HARD_DEP can not appear in dep_status of a link. */
4195 gcc_assert (!(ds & HARD_DEP));
4197 /* Check that dependence status is set correctly when speculation is not
4199 if (!sched_deps_info->generate_spec_deps)
4200 gcc_assert (!(ds & SPECULATIVE));
4201 else if (ds & SPECULATIVE)
4205 ds_t type = FIRST_SPEC_TYPE;
4207 /* Check that dependence weakness is in proper range. */
4211 get_dep_weak (ds, type);
4213 if (type == LAST_SPEC_TYPE)
4215 type <<= SPEC_TYPE_SHIFT;
4220 if (ds & BEGIN_SPEC)
4222 /* Only true dependence can be data speculative. */
4223 if (ds & BEGIN_DATA)
4224 gcc_assert (ds & DEP_TRUE);
4226 /* Control dependencies in the insn scheduler are represented by
4227 anti-dependencies, therefore only anti dependence can be
4228 control speculative. */
4229 if (ds & BEGIN_CONTROL)
4230 gcc_assert (ds & DEP_ANTI);
4234 /* Subsequent speculations should resolve true dependencies. */
4235 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4238 /* Check that true and anti dependencies can't have other speculative
4241 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4242 /* An output dependence can't be speculative at all. */
4243 gcc_assert (!(ds & DEP_OUTPUT));
4245 gcc_assert (ds & BEGIN_CONTROL);
4248 #endif /* ENABLE_CHECKING */
4250 #endif /* INSN_SCHEDULING */