1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
6 Free Software Foundation, Inc.
7 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
8 and currently maintained by, Jim Wilson (wilson@cygnus.com)
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free
14 Software Foundation; either version 3, or (at your option) any later
17 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
18 WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
30 #include "diagnostic-core.h"
33 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
41 #include "sched-int.h"
47 #ifdef INSN_SCHEDULING
49 #ifdef ENABLE_CHECKING
55 /* Holds current parameters for the dependency analyzer. */
56 struct sched_deps_info_def *sched_deps_info;
58 /* The data is specific to the Haifa scheduler. */
59 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
61 /* Return the major type present in the DS. */
69 return REG_DEP_OUTPUT;
72 return REG_DEP_CONTROL;
74 gcc_assert (ds & DEP_ANTI);
79 /* Return equivalent dep_status. */
81 dk_to_ds (enum reg_note dk)
95 gcc_assert (dk == REG_DEP_ANTI);
100 /* Functions to operate with dependence information container - dep_t. */
102 /* Init DEP with the arguments. */
104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
108 DEP_TYPE (dep) = type;
109 DEP_STATUS (dep) = ds;
110 DEP_COST (dep) = UNKNOWN_DEP_COST;
113 /* Init DEP with the arguments.
114 While most of the scheduler (including targets) only need the major type
115 of the dependency, it is convenient to hide full dep_status from them. */
117 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
121 if ((current_sched_info->flags & USE_DEPS_LIST))
122 ds = dk_to_ds (kind);
126 init_dep_1 (dep, pro, con, kind, ds);
129 /* Make a copy of FROM in TO. */
131 copy_dep (dep_t to, dep_t from)
133 memcpy (to, from, sizeof (*to));
136 static void dump_ds (FILE *, ds_t);
138 /* Define flags for dump_dep (). */
140 /* Dump producer of the dependence. */
141 #define DUMP_DEP_PRO (2)
143 /* Dump consumer of the dependence. */
144 #define DUMP_DEP_CON (4)
146 /* Dump type of the dependence. */
147 #define DUMP_DEP_TYPE (8)
149 /* Dump status of the dependence. */
150 #define DUMP_DEP_STATUS (16)
152 /* Dump all information about the dependence. */
153 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
157 FLAGS is a bit mask specifying what information about DEP needs
159 If FLAGS has the very first bit set, then dump all information about DEP
160 and propagate this bit into the callee dump functions. */
162 dump_dep (FILE *dump, dep_t dep, int flags)
165 flags |= DUMP_DEP_ALL;
169 if (flags & DUMP_DEP_PRO)
170 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
172 if (flags & DUMP_DEP_CON)
173 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
175 if (flags & DUMP_DEP_TYPE)
178 enum reg_note type = DEP_TYPE (dep);
190 case REG_DEP_CONTROL:
203 fprintf (dump, "%c; ", t);
206 if (flags & DUMP_DEP_STATUS)
208 if (current_sched_info->flags & USE_DEPS_LIST)
209 dump_ds (dump, DEP_STATUS (dep));
215 /* Default flags for dump_dep (). */
216 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
218 /* Dump all fields of DEP to STDERR. */
220 sd_debug_dep (dep_t dep)
222 dump_dep (stderr, dep, 1);
223 fprintf (stderr, "\n");
226 /* Determine whether DEP is a dependency link of a non-debug insn on a
230 depl_on_debug_p (dep_link_t dep)
232 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
233 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
236 /* Functions to operate with a single link from the dependencies lists -
239 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
242 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
244 dep_link_t next = *prev_nextp;
246 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
247 && DEP_LINK_NEXT (l) == NULL);
249 /* Init node being inserted. */
250 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
251 DEP_LINK_NEXT (l) = next;
256 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
258 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
265 /* Add dep_link LINK to deps_list L. */
267 add_to_deps_list (dep_link_t link, deps_list_t l)
269 attach_dep_link (link, &DEPS_LIST_FIRST (l));
271 /* Don't count debug deps. */
272 if (!depl_on_debug_p (link))
273 ++DEPS_LIST_N_LINKS (l);
276 /* Detach dep_link L from the list. */
278 detach_dep_link (dep_link_t l)
280 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
281 dep_link_t next = DEP_LINK_NEXT (l);
286 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
288 DEP_LINK_PREV_NEXTP (l) = NULL;
289 DEP_LINK_NEXT (l) = NULL;
292 /* Remove link LINK from list LIST. */
294 remove_from_deps_list (dep_link_t link, deps_list_t list)
296 detach_dep_link (link);
298 /* Don't count debug deps. */
299 if (!depl_on_debug_p (link))
300 --DEPS_LIST_N_LINKS (list);
303 /* Move link LINK from list FROM to list TO. */
305 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
307 remove_from_deps_list (link, from);
308 add_to_deps_list (link, to);
311 /* Return true of LINK is not attached to any list. */
313 dep_link_is_detached_p (dep_link_t link)
315 return DEP_LINK_PREV_NEXTP (link) == NULL;
318 /* Pool to hold all dependency nodes (dep_node_t). */
319 static alloc_pool dn_pool;
321 /* Number of dep_nodes out there. */
322 static int dn_pool_diff = 0;
324 /* Create a dep_node. */
326 create_dep_node (void)
328 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
329 dep_link_t back = DEP_NODE_BACK (n);
330 dep_link_t forw = DEP_NODE_FORW (n);
332 DEP_LINK_NODE (back) = n;
333 DEP_LINK_NEXT (back) = NULL;
334 DEP_LINK_PREV_NEXTP (back) = NULL;
336 DEP_LINK_NODE (forw) = n;
337 DEP_LINK_NEXT (forw) = NULL;
338 DEP_LINK_PREV_NEXTP (forw) = NULL;
345 /* Delete dep_node N. N must not be connected to any deps_list. */
347 delete_dep_node (dep_node_t n)
349 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
350 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
354 pool_free (dn_pool, n);
357 /* Pool to hold dependencies lists (deps_list_t). */
358 static alloc_pool dl_pool;
360 /* Number of deps_lists out there. */
361 static int dl_pool_diff = 0;
363 /* Functions to operate with dependences lists - deps_list_t. */
365 /* Return true if list L is empty. */
367 deps_list_empty_p (deps_list_t l)
369 return DEPS_LIST_N_LINKS (l) == 0;
372 /* Create a new deps_list. */
374 create_deps_list (void)
376 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
378 DEPS_LIST_FIRST (l) = NULL;
379 DEPS_LIST_N_LINKS (l) = 0;
385 /* Free deps_list L. */
387 free_deps_list (deps_list_t l)
389 gcc_assert (deps_list_empty_p (l));
393 pool_free (dl_pool, l);
396 /* Return true if there is no dep_nodes and deps_lists out there.
397 After the region is scheduled all the dependency nodes and lists
398 should [generally] be returned to pool. */
400 deps_pools_are_empty_p (void)
402 return dn_pool_diff == 0 && dl_pool_diff == 0;
405 /* Remove all elements from L. */
407 clear_deps_list (deps_list_t l)
411 dep_link_t link = DEPS_LIST_FIRST (l);
416 remove_from_deps_list (link, l);
421 /* Decide whether a dependency should be treated as a hard or a speculative
424 dep_spec_p (dep_t dep)
426 if (current_sched_info->flags & DO_SPECULATION)
428 if (DEP_STATUS (dep) & SPECULATIVE)
431 if (current_sched_info->flags & DO_PREDICATION)
433 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
439 static regset reg_pending_sets;
440 static regset reg_pending_clobbers;
441 static regset reg_pending_uses;
442 static regset reg_pending_control_uses;
443 static enum reg_pending_barrier_mode reg_pending_barrier;
445 /* Hard registers implicitly clobbered or used (or may be implicitly
446 clobbered or used) by the currently analyzed insn. For example,
447 insn in its constraint has one register class. Even if there is
448 currently no hard register in the insn, the particular hard
449 register will be in the insn after reload pass because the
450 constraint requires it. */
451 static HARD_REG_SET implicit_reg_pending_clobbers;
452 static HARD_REG_SET implicit_reg_pending_uses;
454 /* To speed up the test for duplicate dependency links we keep a
455 record of dependencies created by add_dependence when the average
456 number of instructions in a basic block is very large.
458 Studies have shown that there is typically around 5 instructions between
459 branches for typical C code. So we can make a guess that the average
460 basic block is approximately 5 instructions long; we will choose 100X
461 the average size as a very large basic block.
463 Each insn has associated bitmaps for its dependencies. Each bitmap
464 has enough entries to represent a dependency on any other insn in
465 the insn chain. All bitmap for true dependencies cache is
466 allocated then the rest two ones are also allocated. */
467 static bitmap_head *true_dependency_cache = NULL;
468 static bitmap_head *output_dependency_cache = NULL;
469 static bitmap_head *anti_dependency_cache = NULL;
470 static bitmap_head *control_dependency_cache = NULL;
471 static bitmap_head *spec_dependency_cache = NULL;
472 static int cache_size;
474 static int deps_may_trap_p (const_rtx);
475 static void add_dependence_1 (rtx, rtx, enum reg_note);
476 static void add_dependence_list (rtx, rtx, int, enum reg_note);
477 static void add_dependence_list_and_free (struct deps_desc *, rtx,
478 rtx *, int, enum reg_note);
479 static void delete_all_dependences (rtx);
480 static void fixup_sched_groups (rtx);
482 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
483 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
484 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
485 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
487 static bool sched_has_condition_p (const_rtx);
488 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
490 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
492 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
494 #ifdef ENABLE_CHECKING
495 static void check_dep (dep_t, bool);
498 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
501 deps_may_trap_p (const_rtx mem)
503 const_rtx addr = XEXP (mem, 0);
505 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
507 const_rtx t = get_reg_known_value (REGNO (addr));
511 return rtx_addr_can_trap_p (addr);
515 /* Find the condition under which INSN is executed. If REV is not NULL,
516 it is set to TRUE when the returned comparison should be reversed
517 to get the actual condition. */
519 sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
521 rtx pat = PATTERN (insn);
530 if (GET_CODE (pat) == COND_EXEC)
531 return COND_EXEC_TEST (pat);
533 if (!any_condjump_p (insn) || !onlyjump_p (insn))
536 src = SET_SRC (pc_set (insn));
538 if (XEXP (src, 2) == pc_rtx)
539 return XEXP (src, 0);
540 else if (XEXP (src, 1) == pc_rtx)
542 rtx cond = XEXP (src, 0);
543 enum rtx_code revcode = reversed_comparison_code (cond, insn);
545 if (revcode == UNKNOWN)
556 /* Return the condition under which INSN does not execute (i.e. the
557 not-taken condition for a conditional branch), or NULL if we cannot
558 find such a condition. The caller should make a copy of the condition
561 sched_get_reverse_condition_uncached (const_rtx insn)
564 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
565 if (cond == NULL_RTX)
569 enum rtx_code revcode = reversed_comparison_code (cond, insn);
570 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
577 /* Caching variant of sched_get_condition_with_rev_uncached.
578 We only do actual work the first time we come here for an insn; the
579 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
581 sched_get_condition_with_rev (const_rtx insn, bool *rev)
585 if (INSN_LUID (insn) == 0)
586 return sched_get_condition_with_rev_uncached (insn, rev);
588 if (INSN_CACHED_COND (insn) == const_true_rtx)
591 if (INSN_CACHED_COND (insn) != NULL_RTX)
594 *rev = INSN_REVERSE_COND (insn);
595 return INSN_CACHED_COND (insn);
598 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
599 INSN_REVERSE_COND (insn) = tmp;
601 if (INSN_CACHED_COND (insn) == NULL_RTX)
603 INSN_CACHED_COND (insn) = const_true_rtx;
608 *rev = INSN_REVERSE_COND (insn);
609 return INSN_CACHED_COND (insn);
612 /* True when we can find a condition under which INSN is executed. */
614 sched_has_condition_p (const_rtx insn)
616 return !! sched_get_condition_with_rev (insn, NULL);
621 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
623 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
625 if (COMPARISON_P (cond1)
626 && COMPARISON_P (cond2)
627 && GET_CODE (cond1) ==
629 ? reversed_comparison_code (cond2, NULL)
631 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
632 && XEXP (cond1, 1) == XEXP (cond2, 1))
637 /* Return true if insn1 and insn2 can never depend on one another because
638 the conditions under which they are executed are mutually exclusive. */
640 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
643 bool rev1 = false, rev2 = false;
645 /* df doesn't handle conditional lifetimes entirely correctly;
646 calls mess up the conditional lifetimes. */
647 if (!CALL_P (insn1) && !CALL_P (insn2))
649 cond1 = sched_get_condition_with_rev (insn1, &rev1);
650 cond2 = sched_get_condition_with_rev (insn2, &rev2);
652 && conditions_mutex_p (cond1, cond2, rev1, rev2)
653 /* Make sure first instruction doesn't affect condition of second
654 instruction if switched. */
655 && !modified_in_p (cond1, insn2)
656 /* Make sure second instruction doesn't affect condition of first
657 instruction if switched. */
658 && !modified_in_p (cond2, insn1))
665 /* Return true if INSN can potentially be speculated with type DS. */
667 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
669 if (HAS_INTERNAL_DEP (insn))
672 if (!NONJUMP_INSN_P (insn))
675 if (SCHED_GROUP_P (insn))
678 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
681 if (side_effects_p (PATTERN (insn)))
685 /* The following instructions, which depend on a speculatively scheduled
686 instruction, cannot be speculatively scheduled along. */
688 if (may_trap_or_fault_p (PATTERN (insn)))
689 /* If instruction might fault, it cannot be speculatively scheduled.
690 For control speculation it's obvious why and for data speculation
691 it's because the insn might get wrong input if speculation
692 wasn't successful. */
695 if ((ds & BE_IN_DATA)
696 && sched_has_condition_p (insn))
697 /* If this is a predicated instruction, then it cannot be
698 speculatively scheduled. See PR35659. */
705 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
706 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
707 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
708 This function is used to switch sd_iterator to the next list.
709 !!! For internal use only. Might consider moving it to sched-int.h. */
711 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
712 deps_list_t *list_ptr, bool *resolved_p_ptr)
714 sd_list_types_def types = *types_ptr;
716 if (types & SD_LIST_HARD_BACK)
718 *list_ptr = INSN_HARD_BACK_DEPS (insn);
719 *resolved_p_ptr = false;
720 *types_ptr = types & ~SD_LIST_HARD_BACK;
722 else if (types & SD_LIST_SPEC_BACK)
724 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
725 *resolved_p_ptr = false;
726 *types_ptr = types & ~SD_LIST_SPEC_BACK;
728 else if (types & SD_LIST_FORW)
730 *list_ptr = INSN_FORW_DEPS (insn);
731 *resolved_p_ptr = false;
732 *types_ptr = types & ~SD_LIST_FORW;
734 else if (types & SD_LIST_RES_BACK)
736 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
737 *resolved_p_ptr = true;
738 *types_ptr = types & ~SD_LIST_RES_BACK;
740 else if (types & SD_LIST_RES_FORW)
742 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
743 *resolved_p_ptr = true;
744 *types_ptr = types & ~SD_LIST_RES_FORW;
749 *resolved_p_ptr = false;
750 *types_ptr = SD_LIST_NONE;
754 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
756 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
760 while (list_types != SD_LIST_NONE)
765 sd_next_list (insn, &list_types, &list, &resolved_p);
767 size += DEPS_LIST_N_LINKS (list);
773 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
776 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
778 while (list_types != SD_LIST_NONE)
783 sd_next_list (insn, &list_types, &list, &resolved_p);
784 if (!deps_list_empty_p (list))
791 /* Initialize data for INSN. */
793 sd_init_insn (rtx insn)
795 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
796 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
797 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
798 INSN_FORW_DEPS (insn) = create_deps_list ();
799 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
801 /* ??? It would be nice to allocate dependency caches here. */
804 /* Free data for INSN. */
806 sd_finish_insn (rtx insn)
808 /* ??? It would be nice to deallocate dependency caches here. */
810 free_deps_list (INSN_HARD_BACK_DEPS (insn));
811 INSN_HARD_BACK_DEPS (insn) = NULL;
813 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
814 INSN_SPEC_BACK_DEPS (insn) = NULL;
816 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
817 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
819 free_deps_list (INSN_FORW_DEPS (insn));
820 INSN_FORW_DEPS (insn) = NULL;
822 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
823 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
826 /* Find a dependency between producer PRO and consumer CON.
827 Search through resolved dependency lists if RESOLVED_P is true.
828 If no such dependency is found return NULL,
829 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
830 with an iterator pointing to it. */
832 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
833 sd_iterator_def *sd_it_ptr)
835 sd_list_types_def pro_list_type;
836 sd_list_types_def con_list_type;
837 sd_iterator_def sd_it;
839 bool found_p = false;
843 pro_list_type = SD_LIST_RES_FORW;
844 con_list_type = SD_LIST_RES_BACK;
848 pro_list_type = SD_LIST_FORW;
849 con_list_type = SD_LIST_BACK;
852 /* Walk through either back list of INSN or forw list of ELEM
853 depending on which one is shorter. */
854 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
856 /* Find the dep_link with producer PRO in consumer's back_deps. */
857 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
858 if (DEP_PRO (dep) == pro)
866 /* Find the dep_link with consumer CON in producer's forw_deps. */
867 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
868 if (DEP_CON (dep) == con)
877 if (sd_it_ptr != NULL)
886 /* Find a dependency between producer PRO and consumer CON.
887 Use dependency [if available] to check if dependency is present at all.
888 Search through resolved dependency lists if RESOLVED_P is true.
889 If the dependency or NULL if none found. */
891 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
893 if (true_dependency_cache != NULL)
894 /* Avoiding the list walk below can cut compile times dramatically
897 int elem_luid = INSN_LUID (pro);
898 int insn_luid = INSN_LUID (con);
900 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
901 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
902 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
903 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
907 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
910 /* Add or update a dependence described by DEP.
911 MEM1 and MEM2, if non-null, correspond to memory locations in case of
914 The function returns a value indicating if an old entry has been changed
915 or a new entry has been added to insn's backward deps.
917 This function merely checks if producer and consumer is the same insn
918 and doesn't create a dep in this case. Actual manipulation of
919 dependence data structures is performed in add_or_update_dep_1. */
920 static enum DEPS_ADJUST_RESULT
921 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
923 rtx elem = DEP_PRO (dep);
924 rtx insn = DEP_CON (dep);
926 gcc_assert (INSN_P (insn) && INSN_P (elem));
928 /* Don't depend an insn on itself. */
931 if (sched_deps_info->generate_spec_deps)
932 /* INSN has an internal dependence, which we can't overcome. */
933 HAS_INTERNAL_DEP (insn) = 1;
938 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
941 /* Ask dependency caches what needs to be done for dependence DEP.
942 Return DEP_CREATED if new dependence should be created and there is no
943 need to try to find one searching the dependencies lists.
944 Return DEP_PRESENT if there already is a dependence described by DEP and
945 hence nothing is to be done.
946 Return DEP_CHANGED if there already is a dependence, but it should be
947 updated to incorporate additional information from DEP. */
948 static enum DEPS_ADJUST_RESULT
949 ask_dependency_caches (dep_t dep)
951 int elem_luid = INSN_LUID (DEP_PRO (dep));
952 int insn_luid = INSN_LUID (DEP_CON (dep));
954 gcc_assert (true_dependency_cache != NULL
955 && output_dependency_cache != NULL
956 && anti_dependency_cache != NULL
957 && control_dependency_cache != NULL);
959 if (!(current_sched_info->flags & USE_DEPS_LIST))
961 enum reg_note present_dep_type;
963 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
964 present_dep_type = REG_DEP_TRUE;
965 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
966 present_dep_type = REG_DEP_OUTPUT;
967 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
968 present_dep_type = REG_DEP_ANTI;
969 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
970 present_dep_type = REG_DEP_CONTROL;
972 /* There is no existing dep so it should be created. */
975 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
976 /* DEP does not add anything to the existing dependence. */
981 ds_t present_dep_types = 0;
983 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
984 present_dep_types |= DEP_TRUE;
985 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
986 present_dep_types |= DEP_OUTPUT;
987 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
988 present_dep_types |= DEP_ANTI;
989 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
990 present_dep_types |= DEP_CONTROL;
992 if (present_dep_types == 0)
993 /* There is no existing dep so it should be created. */
996 if (!(current_sched_info->flags & DO_SPECULATION)
997 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
999 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1000 == present_dep_types)
1001 /* DEP does not add anything to the existing dependence. */
1006 /* Only true dependencies can be data speculative and
1007 only anti dependencies can be control speculative. */
1008 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1009 == present_dep_types);
1011 /* if (DEP is SPECULATIVE) then
1012 ..we should update DEP_STATUS
1014 ..we should reset existing dep to non-speculative. */
1021 /* Set dependency caches according to DEP. */
1023 set_dependency_caches (dep_t dep)
1025 int elem_luid = INSN_LUID (DEP_PRO (dep));
1026 int insn_luid = INSN_LUID (DEP_CON (dep));
1028 if (!(current_sched_info->flags & USE_DEPS_LIST))
1030 switch (DEP_TYPE (dep))
1033 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1036 case REG_DEP_OUTPUT:
1037 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1041 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1044 case REG_DEP_CONTROL:
1045 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1054 ds_t ds = DEP_STATUS (dep);
1057 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1058 if (ds & DEP_OUTPUT)
1059 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1061 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1062 if (ds & DEP_CONTROL)
1063 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1065 if (ds & SPECULATIVE)
1067 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1068 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1073 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1074 caches accordingly. */
1076 update_dependency_caches (dep_t dep, enum reg_note old_type)
1078 int elem_luid = INSN_LUID (DEP_PRO (dep));
1079 int insn_luid = INSN_LUID (DEP_CON (dep));
1081 /* Clear corresponding cache entry because type of the link
1082 may have changed. Keep them if we use_deps_list. */
1083 if (!(current_sched_info->flags & USE_DEPS_LIST))
1087 case REG_DEP_OUTPUT:
1088 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1092 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1095 case REG_DEP_CONTROL:
1096 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1104 set_dependency_caches (dep);
1107 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1109 change_spec_dep_to_hard (sd_iterator_def sd_it)
1111 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1112 dep_link_t link = DEP_NODE_BACK (node);
1113 dep_t dep = DEP_NODE_DEP (node);
1114 rtx elem = DEP_PRO (dep);
1115 rtx insn = DEP_CON (dep);
1117 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1119 DEP_STATUS (dep) &= ~SPECULATIVE;
1121 if (true_dependency_cache != NULL)
1122 /* Clear the cache entry. */
1123 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1127 /* Update DEP to incorporate information from NEW_DEP.
1128 SD_IT points to DEP in case it should be moved to another list.
1129 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1130 data-speculative dependence should be updated. */
1131 static enum DEPS_ADJUST_RESULT
1132 update_dep (dep_t dep, dep_t new_dep,
1133 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1134 rtx mem1 ATTRIBUTE_UNUSED,
1135 rtx mem2 ATTRIBUTE_UNUSED)
1137 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1138 enum reg_note old_type = DEP_TYPE (dep);
1139 bool was_spec = dep_spec_p (dep);
1141 /* If this is a more restrictive type of dependence than the
1142 existing one, then change the existing dependence to this
1144 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1146 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1150 if (current_sched_info->flags & USE_DEPS_LIST)
1151 /* Update DEP_STATUS. */
1153 ds_t dep_status = DEP_STATUS (dep);
1154 ds_t ds = DEP_STATUS (new_dep);
1155 ds_t new_status = ds | dep_status;
1157 if (new_status & SPECULATIVE)
1159 /* Either existing dep or a dep we're adding or both are
1161 if (!(ds & SPECULATIVE)
1162 || !(dep_status & SPECULATIVE))
1163 /* The new dep can't be speculative. */
1164 new_status &= ~SPECULATIVE;
1167 /* Both are speculative. Merge probabilities. */
1172 dw = estimate_dep_weak (mem1, mem2);
1173 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1176 new_status = ds_merge (dep_status, ds);
1182 if (dep_status != ds)
1184 DEP_STATUS (dep) = ds;
1189 if (was_spec && !dep_spec_p (dep))
1190 /* The old dep was speculative, but now it isn't. */
1191 change_spec_dep_to_hard (sd_it);
1193 if (true_dependency_cache != NULL
1194 && res == DEP_CHANGED)
1195 update_dependency_caches (dep, old_type);
1200 /* Add or update a dependence described by DEP.
1201 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1204 The function returns a value indicating if an old entry has been changed
1205 or a new entry has been added to insn's backward deps or nothing has
1206 been updated at all. */
1207 static enum DEPS_ADJUST_RESULT
1208 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1209 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1211 bool maybe_present_p = true;
1212 bool present_p = false;
1214 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1215 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1217 #ifdef ENABLE_CHECKING
1218 check_dep (new_dep, mem1 != NULL);
1221 if (true_dependency_cache != NULL)
1223 switch (ask_dependency_caches (new_dep))
1229 maybe_present_p = true;
1234 maybe_present_p = false;
1244 /* Check that we don't already have this dependence. */
1245 if (maybe_present_p)
1248 sd_iterator_def sd_it;
1250 gcc_assert (true_dependency_cache == NULL || present_p);
1252 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1254 resolved_p, &sd_it);
1256 if (present_dep != NULL)
1257 /* We found an existing dependency between ELEM and INSN. */
1258 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1260 /* We didn't find a dep, it shouldn't present in the cache. */
1261 gcc_assert (!present_p);
1264 /* Might want to check one level of transitivity to save conses.
1265 This check should be done in maybe_add_or_update_dep_1.
1266 Since we made it to add_or_update_dep_1, we must create
1267 (or update) a link. */
1269 if (mem1 != NULL_RTX)
1271 gcc_assert (sched_deps_info->generate_spec_deps);
1272 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1273 estimate_dep_weak (mem1, mem2));
1276 sd_add_dep (new_dep, resolved_p);
1281 /* Initialize BACK_LIST_PTR with consumer's backward list and
1282 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1283 initialize with lists that hold resolved deps. */
1285 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1286 deps_list_t *back_list_ptr,
1287 deps_list_t *forw_list_ptr)
1289 rtx con = DEP_CON (dep);
1293 if (dep_spec_p (dep))
1294 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1296 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1298 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1302 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1303 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1307 /* Add dependence described by DEP.
1308 If RESOLVED_P is true treat the dependence as a resolved one. */
1310 sd_add_dep (dep_t dep, bool resolved_p)
1312 dep_node_t n = create_dep_node ();
1313 deps_list_t con_back_deps;
1314 deps_list_t pro_forw_deps;
1315 rtx elem = DEP_PRO (dep);
1316 rtx insn = DEP_CON (dep);
1318 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1320 if ((current_sched_info->flags & DO_SPECULATION) == 0
1321 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1322 DEP_STATUS (dep) &= ~SPECULATIVE;
1324 copy_dep (DEP_NODE_DEP (n), dep);
1326 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1328 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1330 #ifdef ENABLE_CHECKING
1331 check_dep (dep, false);
1334 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1336 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1337 in the bitmap caches of dependency information. */
1338 if (true_dependency_cache != NULL)
1339 set_dependency_caches (dep);
1342 /* Add or update backward dependence between INSN and ELEM
1343 with given type DEP_TYPE and dep_status DS.
1344 This function is a convenience wrapper. */
1345 enum DEPS_ADJUST_RESULT
1346 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1348 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1351 /* Resolved dependence pointed to by SD_IT.
1352 SD_IT will advance to the next element. */
1354 sd_resolve_dep (sd_iterator_def sd_it)
1356 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1357 dep_t dep = DEP_NODE_DEP (node);
1358 rtx pro = DEP_PRO (dep);
1359 rtx con = DEP_CON (dep);
1361 if (dep_spec_p (dep))
1362 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1363 INSN_RESOLVED_BACK_DEPS (con));
1365 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1366 INSN_RESOLVED_BACK_DEPS (con));
1368 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1369 INSN_RESOLVED_FORW_DEPS (pro));
1372 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1373 pointed to by SD_IT to unresolved state. */
1375 sd_unresolve_dep (sd_iterator_def sd_it)
1377 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1378 dep_t dep = DEP_NODE_DEP (node);
1379 rtx pro = DEP_PRO (dep);
1380 rtx con = DEP_CON (dep);
1382 if (dep_spec_p (dep))
1383 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1384 INSN_SPEC_BACK_DEPS (con));
1386 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1387 INSN_HARD_BACK_DEPS (con));
1389 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1390 INSN_FORW_DEPS (pro));
1393 /* Make TO depend on all the FROM's producers.
1394 If RESOLVED_P is true add dependencies to the resolved lists. */
1396 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1398 sd_list_types_def list_type;
1399 sd_iterator_def sd_it;
1402 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1404 FOR_EACH_DEP (from, list_type, sd_it, dep)
1406 dep_def _new_dep, *new_dep = &_new_dep;
1408 copy_dep (new_dep, dep);
1409 DEP_CON (new_dep) = to;
1410 sd_add_dep (new_dep, resolved_p);
1414 /* Remove a dependency referred to by SD_IT.
1415 SD_IT will point to the next dependence after removal. */
1417 sd_delete_dep (sd_iterator_def sd_it)
1419 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1420 dep_t dep = DEP_NODE_DEP (n);
1421 rtx pro = DEP_PRO (dep);
1422 rtx con = DEP_CON (dep);
1423 deps_list_t con_back_deps;
1424 deps_list_t pro_forw_deps;
1426 if (true_dependency_cache != NULL)
1428 int elem_luid = INSN_LUID (pro);
1429 int insn_luid = INSN_LUID (con);
1431 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1432 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1433 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1434 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1436 if (current_sched_info->flags & DO_SPECULATION)
1437 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1440 get_back_and_forw_lists (dep, sd_it.resolved_p,
1441 &con_back_deps, &pro_forw_deps);
1443 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1444 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1446 delete_dep_node (n);
1449 /* Dump size of the lists. */
1450 #define DUMP_LISTS_SIZE (2)
1452 /* Dump dependencies of the lists. */
1453 #define DUMP_LISTS_DEPS (4)
1455 /* Dump all information about the lists. */
1456 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1458 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1459 FLAGS is a bit mask specifying what information about the lists needs
1461 If FLAGS has the very first bit set, then dump all information about
1462 the lists and propagate this bit into the callee dump functions. */
1464 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1466 sd_iterator_def sd_it;
1473 flags |= DUMP_LISTS_ALL;
1475 fprintf (dump, "[");
1477 if (flags & DUMP_LISTS_SIZE)
1478 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1480 if (flags & DUMP_LISTS_DEPS)
1482 FOR_EACH_DEP (insn, types, sd_it, dep)
1484 dump_dep (dump, dep, dump_dep_flags | all);
1485 fprintf (dump, " ");
1490 /* Dump all information about deps_lists of INSN specified by TYPES
1493 sd_debug_lists (rtx insn, sd_list_types_def types)
1495 dump_lists (stderr, insn, types, 1);
1496 fprintf (stderr, "\n");
1499 /* A wrapper around add_dependence_1, to add a dependence of CON on
1500 PRO, with type DEP_TYPE. This function implements special handling
1501 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1502 the type to REG_DEP_ANTI if we can determine that predication is
1503 impossible; otherwise we add additional true dependencies on the
1504 INSN_COND_DEPS list of the jump (which PRO must be). */
1506 add_dependence (rtx con, rtx pro, enum reg_note dep_type)
1508 if (dep_type == REG_DEP_CONTROL
1509 && !(current_sched_info->flags & DO_PREDICATION))
1510 dep_type = REG_DEP_ANTI;
1512 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1513 so we must also make the insn dependent on the setter of the
1515 if (dep_type == REG_DEP_CONTROL)
1518 rtx other = real_insn_for_shadow (real_pro);
1521 if (other != NULL_RTX)
1523 cond = sched_get_reverse_condition_uncached (real_pro);
1524 /* Verify that the insn does not use a different value in
1525 the condition register than the one that was present at
1527 if (cond == NULL_RTX)
1528 dep_type = REG_DEP_ANTI;
1529 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1532 CLEAR_HARD_REG_SET (uses);
1533 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1534 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1535 dep_type = REG_DEP_ANTI;
1537 if (dep_type == REG_DEP_CONTROL)
1539 if (sched_verbose >= 5)
1540 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1541 INSN_UID (real_pro));
1542 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1547 add_dependence_1 (con, pro, dep_type);
1550 /* A convenience wrapper to operate on an entire list. */
1553 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1555 for (; list; list = XEXP (list, 1))
1557 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1558 add_dependence (insn, XEXP (list, 0), dep_type);
1562 /* Similar, but free *LISTP at the same time, when the context
1566 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1567 int uncond, enum reg_note dep_type)
1571 /* We don't want to short-circuit dependencies involving debug
1572 insns, because they may cause actual dependencies to be
1574 if (deps->readonly || DEBUG_INSN_P (insn))
1576 add_dependence_list (insn, *listp, uncond, dep_type);
1580 for (list = *listp, *listp = NULL; list ; list = next)
1582 next = XEXP (list, 1);
1583 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1584 add_dependence (insn, XEXP (list, 0), dep_type);
1585 free_INSN_LIST_node (list);
1589 /* Remove all occurences of INSN from LIST. Return the number of
1590 occurences removed. */
1593 remove_from_dependence_list (rtx insn, rtx* listp)
1599 if (XEXP (*listp, 0) == insn)
1601 remove_free_INSN_LIST_node (listp);
1606 listp = &XEXP (*listp, 1);
1612 /* Same as above, but process two lists at once. */
1614 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1620 if (XEXP (*listp, 0) == insn)
1622 remove_free_INSN_LIST_node (listp);
1623 remove_free_EXPR_LIST_node (exprp);
1628 listp = &XEXP (*listp, 1);
1629 exprp = &XEXP (*exprp, 1);
1635 /* Clear all dependencies for an insn. */
1637 delete_all_dependences (rtx insn)
1639 sd_iterator_def sd_it;
1642 /* The below cycle can be optimized to clear the caches and back_deps
1643 in one call but that would provoke duplication of code from
1646 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1647 sd_iterator_cond (&sd_it, &dep);)
1648 sd_delete_dep (sd_it);
1651 /* All insns in a scheduling group except the first should only have
1652 dependencies on the previous insn in the group. So we find the
1653 first instruction in the scheduling group by walking the dependence
1654 chains backwards. Then we add the dependencies for the group to
1655 the previous nonnote insn. */
1658 fixup_sched_groups (rtx insn)
1660 sd_iterator_def sd_it;
1664 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1667 rtx pro = DEP_PRO (dep);
1671 i = prev_nonnote_insn (i);
1675 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1677 if (! sched_insns_conditions_mutex_p (i, pro))
1678 add_dependence (i, pro, DEP_TYPE (dep));
1682 delete_all_dependences (insn);
1684 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1685 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1686 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1687 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1690 /* Process an insn's memory dependencies. There are four kinds of
1693 (0) read dependence: read follows read
1694 (1) true dependence: read follows write
1695 (2) output dependence: write follows write
1696 (3) anti dependence: write follows read
1698 We are careful to build only dependencies which actually exist, and
1699 use transitivity to avoid building too many links. */
1701 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1702 The MEM is a memory reference contained within INSN, which we are saving
1703 so that we can do memory aliasing on it. */
1706 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1713 gcc_assert (!deps->readonly);
1716 insn_list = &deps->pending_read_insns;
1717 mem_list = &deps->pending_read_mems;
1718 if (!DEBUG_INSN_P (insn))
1719 deps->pending_read_list_length++;
1723 insn_list = &deps->pending_write_insns;
1724 mem_list = &deps->pending_write_mems;
1725 deps->pending_write_list_length++;
1728 link = alloc_INSN_LIST (insn, *insn_list);
1731 if (sched_deps_info->use_cselib)
1733 mem = shallow_copy_rtx (mem);
1734 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0), GET_MODE (mem));
1736 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1740 /* Make a dependency between every memory reference on the pending lists
1741 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1742 dependencies for a read operation, similarly with FOR_WRITE. */
1745 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1750 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1752 if (!deps->readonly)
1754 free_EXPR_LIST_list (&deps->pending_read_mems);
1755 deps->pending_read_list_length = 0;
1759 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1760 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1762 add_dependence_list_and_free (deps, insn,
1763 &deps->last_pending_memory_flush, 1,
1764 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1766 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1769 if (!deps->readonly)
1771 free_EXPR_LIST_list (&deps->pending_write_mems);
1772 deps->pending_write_list_length = 0;
1774 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1775 deps->pending_flush_length = 1;
1779 /* Instruction which dependencies we are analyzing. */
1780 static rtx cur_insn = NULL_RTX;
1782 /* Implement hooks for haifa scheduler. */
1785 haifa_start_insn (rtx insn)
1787 gcc_assert (insn && !cur_insn);
1793 haifa_finish_insn (void)
1799 haifa_note_reg_set (int regno)
1801 SET_REGNO_REG_SET (reg_pending_sets, regno);
1805 haifa_note_reg_clobber (int regno)
1807 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1811 haifa_note_reg_use (int regno)
1813 SET_REGNO_REG_SET (reg_pending_uses, regno);
1817 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1819 if (!(ds & SPECULATIVE))
1822 pending_mem = NULL_RTX;
1825 gcc_assert (ds & BEGIN_DATA);
1828 dep_def _dep, *dep = &_dep;
1830 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1831 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1832 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1838 haifa_note_dep (rtx elem, ds_t ds)
1843 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1844 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1848 note_reg_use (int r)
1850 if (sched_deps_info->note_reg_use)
1851 sched_deps_info->note_reg_use (r);
1855 note_reg_set (int r)
1857 if (sched_deps_info->note_reg_set)
1858 sched_deps_info->note_reg_set (r);
1862 note_reg_clobber (int r)
1864 if (sched_deps_info->note_reg_clobber)
1865 sched_deps_info->note_reg_clobber (r);
1869 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1871 if (sched_deps_info->note_mem_dep)
1872 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1876 note_dep (rtx e, ds_t ds)
1878 if (sched_deps_info->note_dep)
1879 sched_deps_info->note_dep (e, ds);
1882 /* Return corresponding to DS reg_note. */
1887 return REG_DEP_TRUE;
1888 else if (ds & DEP_OUTPUT)
1889 return REG_DEP_OUTPUT;
1890 else if (ds & DEP_ANTI)
1891 return REG_DEP_ANTI;
1894 gcc_assert (ds & DEP_CONTROL);
1895 return REG_DEP_CONTROL;
1901 /* Functions for computation of info needed for register pressure
1902 sensitive insn scheduling. */
1905 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1906 static struct reg_use_data *
1907 create_insn_reg_use (int regno, rtx insn)
1909 struct reg_use_data *use;
1911 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1914 use->next_insn_use = INSN_REG_USE_LIST (insn);
1915 INSN_REG_USE_LIST (insn) = use;
1919 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1920 static struct reg_set_data *
1921 create_insn_reg_set (int regno, rtx insn)
1923 struct reg_set_data *set;
1925 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1928 set->next_insn_set = INSN_REG_SET_LIST (insn);
1929 INSN_REG_SET_LIST (insn) = set;
1933 /* Set up insn register uses for INSN and dependency context DEPS. */
1935 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1938 reg_set_iterator rsi;
1940 struct reg_use_data *use, *use2, *next;
1941 struct deps_reg *reg_last;
1943 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1945 if (i < FIRST_PSEUDO_REGISTER
1946 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1949 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1950 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1951 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1952 /* Ignore use which is not dying. */
1955 use = create_insn_reg_use (i, insn);
1956 use->next_regno_use = use;
1957 reg_last = &deps->reg_last[i];
1959 /* Create the cycle list of uses. */
1960 for (list = reg_last->uses; list; list = XEXP (list, 1))
1962 use2 = create_insn_reg_use (i, XEXP (list, 0));
1963 next = use->next_regno_use;
1964 use->next_regno_use = use2;
1965 use2->next_regno_use = next;
1970 /* Register pressure info for the currently processed insn. */
1971 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1973 /* Return TRUE if INSN has the use structure for REGNO. */
1975 insn_use_p (rtx insn, int regno)
1977 struct reg_use_data *use;
1979 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1980 if (use->regno == regno)
1985 /* Update the register pressure info after birth of pseudo register REGNO
1986 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1987 the register is in clobber or unused after the insn. */
1989 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1994 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1995 cl = sched_regno_pressure_class[regno];
1998 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2001 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2002 reg_pressure_info[cl].clobber_increase = new_incr;
2006 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2007 reg_pressure_info[cl].unused_set_increase = new_incr;
2011 new_incr = reg_pressure_info[cl].set_increase + incr;
2012 reg_pressure_info[cl].set_increase = new_incr;
2013 if (! insn_use_p (insn, regno))
2014 reg_pressure_info[cl].change += incr;
2015 create_insn_reg_set (regno, insn);
2017 gcc_assert (new_incr < (1 << INCREASE_BITS));
2021 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2022 hard registers involved in the birth. */
2024 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2025 bool clobber_p, bool unused_p)
2028 int new_incr, last = regno + nregs;
2030 while (regno < last)
2032 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2033 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2035 cl = sched_regno_pressure_class[regno];
2040 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2041 reg_pressure_info[cl].clobber_increase = new_incr;
2045 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2046 reg_pressure_info[cl].unused_set_increase = new_incr;
2050 new_incr = reg_pressure_info[cl].set_increase + 1;
2051 reg_pressure_info[cl].set_increase = new_incr;
2052 if (! insn_use_p (insn, regno))
2053 reg_pressure_info[cl].change += 1;
2054 create_insn_reg_set (regno, insn);
2056 gcc_assert (new_incr < (1 << INCREASE_BITS));
2063 /* Update the register pressure info after birth of pseudo or hard
2064 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2065 correspondingly that the register is in clobber or unused after the
2068 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2072 if (GET_CODE (reg) == SUBREG)
2073 reg = SUBREG_REG (reg);
2078 regno = REGNO (reg);
2079 if (regno < FIRST_PSEUDO_REGISTER)
2080 mark_insn_hard_regno_birth (insn, regno,
2081 hard_regno_nregs[regno][GET_MODE (reg)],
2082 clobber_p, unused_p);
2084 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2087 /* Update the register pressure info after death of pseudo register
2090 mark_pseudo_death (int regno)
2095 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2096 cl = sched_regno_pressure_class[regno];
2099 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2100 reg_pressure_info[cl].change -= incr;
2104 /* Like mark_pseudo_death except that NREGS saying how many hard
2105 registers involved in the death. */
2107 mark_hard_regno_death (int regno, int nregs)
2110 int last = regno + nregs;
2112 while (regno < last)
2114 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2115 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2117 cl = sched_regno_pressure_class[regno];
2119 reg_pressure_info[cl].change -= 1;
2125 /* Update the register pressure info after death of pseudo or hard
2128 mark_reg_death (rtx reg)
2132 if (GET_CODE (reg) == SUBREG)
2133 reg = SUBREG_REG (reg);
2138 regno = REGNO (reg);
2139 if (regno < FIRST_PSEUDO_REGISTER)
2140 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2142 mark_pseudo_death (regno);
2145 /* Process SETTER of REG. DATA is an insn containing the setter. */
2147 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2149 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2152 ((rtx) data, reg, false,
2153 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2156 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2158 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2160 if (GET_CODE (setter) == CLOBBER)
2161 mark_insn_reg_birth ((rtx) data, reg, true, false);
2164 /* Set up reg pressure info related to INSN. */
2166 init_insn_reg_pressure_info (rtx insn)
2170 static struct reg_pressure_data *pressure_info;
2173 gcc_assert (sched_pressure_p);
2175 if (! INSN_P (insn))
2178 for (i = 0; i < ira_pressure_classes_num; i++)
2180 cl = ira_pressure_classes[i];
2181 reg_pressure_info[cl].clobber_increase = 0;
2182 reg_pressure_info[cl].set_increase = 0;
2183 reg_pressure_info[cl].unused_set_increase = 0;
2184 reg_pressure_info[cl].change = 0;
2187 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2189 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2192 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2193 if (REG_NOTE_KIND (link) == REG_INC)
2194 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2197 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2198 if (REG_NOTE_KIND (link) == REG_DEAD)
2199 mark_reg_death (XEXP (link, 0));
2201 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2203 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2204 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2206 for (i = 0; i < ira_pressure_classes_num; i++)
2208 cl = ira_pressure_classes[i];
2209 pressure_info[i].clobber_increase
2210 = reg_pressure_info[cl].clobber_increase;
2211 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2212 pressure_info[i].unused_set_increase
2213 = reg_pressure_info[cl].unused_set_increase;
2214 pressure_info[i].change = reg_pressure_info[cl].change;
2221 /* Internal variable for sched_analyze_[12] () functions.
2222 If it is nonzero, this means that sched_analyze_[12] looks
2223 at the most toplevel SET. */
2224 static bool can_start_lhs_rhs_p;
2226 /* Extend reg info for the deps context DEPS given that
2227 we have just generated a register numbered REGNO. */
2229 extend_deps_reg_info (struct deps_desc *deps, int regno)
2231 int max_regno = regno + 1;
2233 gcc_assert (!reload_completed);
2235 /* In a readonly context, it would not hurt to extend info,
2236 but it should not be needed. */
2237 if (reload_completed && deps->readonly)
2239 deps->max_reg = max_regno;
2243 if (max_regno > deps->max_reg)
2245 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2247 memset (&deps->reg_last[deps->max_reg],
2248 0, (max_regno - deps->max_reg)
2249 * sizeof (struct deps_reg));
2250 deps->max_reg = max_regno;
2254 /* Extends REG_INFO_P if needed. */
2256 maybe_extend_reg_info_p (void)
2258 /* Extend REG_INFO_P, if needed. */
2259 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2261 size_t new_reg_info_p_size = max_regno + 128;
2263 gcc_assert (!reload_completed && sel_sched_p ());
2265 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2266 new_reg_info_p_size,
2268 sizeof (*reg_info_p));
2269 reg_info_p_size = new_reg_info_p_size;
2273 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2274 The type of the reference is specified by REF and can be SET,
2275 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2278 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2279 enum rtx_code ref, rtx insn)
2281 /* We could emit new pseudos in renaming. Extend the reg structures. */
2282 if (!reload_completed && sel_sched_p ()
2283 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2284 extend_deps_reg_info (deps, regno);
2286 maybe_extend_reg_info_p ();
2288 /* A hard reg in a wide mode may really be multiple registers.
2289 If so, mark all of them just like the first. */
2290 if (regno < FIRST_PSEUDO_REGISTER)
2292 int i = hard_regno_nregs[regno][mode];
2296 note_reg_set (regno + i);
2298 else if (ref == USE)
2301 note_reg_use (regno + i);
2306 note_reg_clobber (regno + i);
2310 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2311 it does not reload. Ignore these as they have served their
2313 else if (regno >= deps->max_reg)
2315 enum rtx_code code = GET_CODE (PATTERN (insn));
2316 gcc_assert (code == USE || code == CLOBBER);
2322 note_reg_set (regno);
2323 else if (ref == USE)
2324 note_reg_use (regno);
2326 note_reg_clobber (regno);
2328 /* Pseudos that are REG_EQUIV to something may be replaced
2329 by that during reloading. We need only add dependencies for
2330 the address in the REG_EQUIV note. */
2331 if (!reload_completed && get_reg_known_equiv_p (regno))
2333 rtx t = get_reg_known_value (regno);
2335 sched_analyze_2 (deps, XEXP (t, 0), insn);
2338 /* Don't let it cross a call after scheduling if it doesn't
2339 already cross one. */
2340 if (REG_N_CALLS_CROSSED (regno) == 0)
2342 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2343 deps->sched_before_next_call
2344 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2346 add_dependence_list (insn, deps->last_function_call, 1,
2352 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2353 rtx, X, creating all dependencies generated by the write to the
2354 destination of X, and reads of everything mentioned. */
2357 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2359 rtx dest = XEXP (x, 0);
2360 enum rtx_code code = GET_CODE (x);
2361 bool cslr_p = can_start_lhs_rhs_p;
2363 can_start_lhs_rhs_p = false;
2369 if (cslr_p && sched_deps_info->start_lhs)
2370 sched_deps_info->start_lhs (dest);
2372 if (GET_CODE (dest) == PARALLEL)
2376 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2377 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2378 sched_analyze_1 (deps,
2379 gen_rtx_CLOBBER (VOIDmode,
2380 XEXP (XVECEXP (dest, 0, i), 0)),
2383 if (cslr_p && sched_deps_info->finish_lhs)
2384 sched_deps_info->finish_lhs ();
2388 can_start_lhs_rhs_p = cslr_p;
2390 sched_analyze_2 (deps, SET_SRC (x), insn);
2392 can_start_lhs_rhs_p = false;
2398 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2399 || GET_CODE (dest) == ZERO_EXTRACT)
2401 if (GET_CODE (dest) == STRICT_LOW_PART
2402 || GET_CODE (dest) == ZERO_EXTRACT
2403 || df_read_modify_subreg_p (dest))
2405 /* These both read and modify the result. We must handle
2406 them as writes to get proper dependencies for following
2407 instructions. We must handle them as reads to get proper
2408 dependencies from this to previous instructions.
2409 Thus we need to call sched_analyze_2. */
2411 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2413 if (GET_CODE (dest) == ZERO_EXTRACT)
2415 /* The second and third arguments are values read by this insn. */
2416 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2417 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2419 dest = XEXP (dest, 0);
2424 int regno = REGNO (dest);
2425 enum machine_mode mode = GET_MODE (dest);
2427 sched_analyze_reg (deps, regno, mode, code, insn);
2430 /* Treat all writes to a stack register as modifying the TOS. */
2431 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2433 /* Avoid analyzing the same register twice. */
2434 if (regno != FIRST_STACK_REG)
2435 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2437 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2442 else if (MEM_P (dest))
2444 /* Writing memory. */
2447 if (sched_deps_info->use_cselib)
2449 enum machine_mode address_mode
2450 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2452 t = shallow_copy_rtx (dest);
2453 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2454 GET_MODE (t), insn);
2455 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2459 /* Pending lists can't get larger with a readonly context. */
2461 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2462 > MAX_PENDING_LIST_LENGTH))
2464 /* Flush all pending reads and writes to prevent the pending lists
2465 from getting any larger. Insn scheduling runs too slowly when
2466 these lists get long. When compiling GCC with itself,
2467 this flush occurs 8 times for sparc, and 10 times for m88k using
2468 the default value of 32. */
2469 flush_pending_lists (deps, insn, false, true);
2473 rtx pending, pending_mem;
2475 pending = deps->pending_read_insns;
2476 pending_mem = deps->pending_read_mems;
2479 if (anti_dependence (XEXP (pending_mem, 0), t)
2480 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2481 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2484 pending = XEXP (pending, 1);
2485 pending_mem = XEXP (pending_mem, 1);
2488 pending = deps->pending_write_insns;
2489 pending_mem = deps->pending_write_mems;
2492 if (output_dependence (XEXP (pending_mem, 0), t)
2493 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2494 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2497 pending = XEXP (pending, 1);
2498 pending_mem = XEXP (pending_mem, 1);
2501 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2503 add_dependence_list (insn, deps->pending_jump_insns, 1,
2506 if (!deps->readonly)
2507 add_insn_mem_dependence (deps, false, insn, dest);
2509 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2512 if (cslr_p && sched_deps_info->finish_lhs)
2513 sched_deps_info->finish_lhs ();
2515 /* Analyze reads. */
2516 if (GET_CODE (x) == SET)
2518 can_start_lhs_rhs_p = cslr_p;
2520 sched_analyze_2 (deps, SET_SRC (x), insn);
2522 can_start_lhs_rhs_p = false;
2526 /* Analyze the uses of memory and registers in rtx X in INSN. */
2528 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2534 bool cslr_p = can_start_lhs_rhs_p;
2536 can_start_lhs_rhs_p = false;
2542 if (cslr_p && sched_deps_info->start_rhs)
2543 sched_deps_info->start_rhs (x);
2545 code = GET_CODE (x);
2556 /* Ignore constants. */
2557 if (cslr_p && sched_deps_info->finish_rhs)
2558 sched_deps_info->finish_rhs ();
2564 /* User of CC0 depends on immediately preceding insn. */
2565 SCHED_GROUP_P (insn) = 1;
2566 /* Don't move CC0 setter to another block (it can set up the
2567 same flag for previous CC0 users which is safe). */
2568 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2570 if (cslr_p && sched_deps_info->finish_rhs)
2571 sched_deps_info->finish_rhs ();
2578 int regno = REGNO (x);
2579 enum machine_mode mode = GET_MODE (x);
2581 sched_analyze_reg (deps, regno, mode, USE, insn);
2584 /* Treat all reads of a stack register as modifying the TOS. */
2585 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2587 /* Avoid analyzing the same register twice. */
2588 if (regno != FIRST_STACK_REG)
2589 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2590 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2594 if (cslr_p && sched_deps_info->finish_rhs)
2595 sched_deps_info->finish_rhs ();
2602 /* Reading memory. */
2604 rtx pending, pending_mem;
2607 if (sched_deps_info->use_cselib)
2609 enum machine_mode address_mode
2610 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2612 t = shallow_copy_rtx (t);
2613 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2614 GET_MODE (t), insn);
2615 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2618 if (!DEBUG_INSN_P (insn))
2621 pending = deps->pending_read_insns;
2622 pending_mem = deps->pending_read_mems;
2625 if (read_dependence (XEXP (pending_mem, 0), t)
2626 && ! sched_insns_conditions_mutex_p (insn,
2628 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2631 pending = XEXP (pending, 1);
2632 pending_mem = XEXP (pending_mem, 1);
2635 pending = deps->pending_write_insns;
2636 pending_mem = deps->pending_write_mems;
2639 if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
2640 && ! sched_insns_conditions_mutex_p (insn,
2642 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2643 sched_deps_info->generate_spec_deps
2644 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2646 pending = XEXP (pending, 1);
2647 pending_mem = XEXP (pending_mem, 1);
2650 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2651 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2653 for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2654 if (deps_may_trap_p (x))
2656 if ((sched_deps_info->generate_spec_deps)
2657 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2659 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2662 note_dep (XEXP (u, 0), ds);
2665 add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
2669 /* Always add these dependencies to pending_reads, since
2670 this insn may be followed by a write. */
2671 if (!deps->readonly)
2672 add_insn_mem_dependence (deps, true, insn, x);
2674 sched_analyze_2 (deps, XEXP (x, 0), insn);
2676 if (cslr_p && sched_deps_info->finish_rhs)
2677 sched_deps_info->finish_rhs ();
2682 /* Force pending stores to memory in case a trap handler needs them. */
2684 flush_pending_lists (deps, insn, true, false);
2688 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2689 reg_pending_barrier = TRUE_BARRIER;
2692 case UNSPEC_VOLATILE:
2693 flush_pending_lists (deps, insn, true, true);
2699 /* Traditional and volatile asm instructions must be considered to use
2700 and clobber all hard registers, all pseudo-registers and all of
2701 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2703 Consider for instance a volatile asm that changes the fpu rounding
2704 mode. An insn should not be moved across this even if it only uses
2705 pseudo-regs because it might give an incorrectly rounded result. */
2706 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2707 reg_pending_barrier = TRUE_BARRIER;
2709 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2710 We can not just fall through here since then we would be confused
2711 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2712 traditional asms unlike their normal usage. */
2714 if (code == ASM_OPERANDS)
2716 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2717 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2719 if (cslr_p && sched_deps_info->finish_rhs)
2720 sched_deps_info->finish_rhs ();
2731 /* These both read and modify the result. We must handle them as writes
2732 to get proper dependencies for following instructions. We must handle
2733 them as reads to get proper dependencies from this to previous
2734 instructions. Thus we need to pass them to both sched_analyze_1
2735 and sched_analyze_2. We must call sched_analyze_2 first in order
2736 to get the proper antecedent for the read. */
2737 sched_analyze_2 (deps, XEXP (x, 0), insn);
2738 sched_analyze_1 (deps, x, insn);
2740 if (cslr_p && sched_deps_info->finish_rhs)
2741 sched_deps_info->finish_rhs ();
2747 /* op0 = op0 + op1 */
2748 sched_analyze_2 (deps, XEXP (x, 0), insn);
2749 sched_analyze_2 (deps, XEXP (x, 1), insn);
2750 sched_analyze_1 (deps, x, insn);
2752 if (cslr_p && sched_deps_info->finish_rhs)
2753 sched_deps_info->finish_rhs ();
2761 /* Other cases: walk the insn. */
2762 fmt = GET_RTX_FORMAT (code);
2763 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2766 sched_analyze_2 (deps, XEXP (x, i), insn);
2767 else if (fmt[i] == 'E')
2768 for (j = 0; j < XVECLEN (x, i); j++)
2769 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2772 if (cslr_p && sched_deps_info->finish_rhs)
2773 sched_deps_info->finish_rhs ();
2776 /* Analyze an INSN with pattern X to find all dependencies. */
2778 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2780 RTX_CODE code = GET_CODE (x);
2783 reg_set_iterator rsi;
2785 if (! reload_completed)
2789 extract_insn (insn);
2790 preprocess_constraints ();
2791 ira_implicitly_set_insn_hard_regs (&temp);
2792 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2793 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2796 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2800 /* Avoid moving trapping instructions accross function calls that might
2801 not always return. */
2802 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2805 /* We must avoid creating a situation in which two successors of the
2806 current block have different unwind info after scheduling. If at any
2807 point the two paths re-join this leads to incorrect unwind info. */
2808 /* ??? There are certain situations involving a forced frame pointer in
2809 which, with extra effort, we could fix up the unwind info at a later
2810 CFG join. However, it seems better to notice these cases earlier
2811 during prologue generation and avoid marking the frame pointer setup
2812 as frame-related at all. */
2813 if (RTX_FRAME_RELATED_P (insn))
2815 /* Make sure prologue insn is scheduled before next jump. */
2816 deps->sched_before_next_jump
2817 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2819 /* Make sure epilogue insn is scheduled after preceding jumps. */
2820 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI);
2823 if (code == COND_EXEC)
2825 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2827 /* ??? Should be recording conditions so we reduce the number of
2828 false dependencies. */
2829 x = COND_EXEC_CODE (x);
2830 code = GET_CODE (x);
2832 if (code == SET || code == CLOBBER)
2834 sched_analyze_1 (deps, x, insn);
2836 /* Bare clobber insns are used for letting life analysis, reg-stack
2837 and others know that a value is dead. Depend on the last call
2838 instruction so that reg-stack won't get confused. */
2839 if (code == CLOBBER)
2840 add_dependence_list (insn, deps->last_function_call, 1,
2843 else if (code == PARALLEL)
2845 for (i = XVECLEN (x, 0); i--;)
2847 rtx sub = XVECEXP (x, 0, i);
2848 code = GET_CODE (sub);
2850 if (code == COND_EXEC)
2852 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2853 sub = COND_EXEC_CODE (sub);
2854 code = GET_CODE (sub);
2856 if (code == SET || code == CLOBBER)
2857 sched_analyze_1 (deps, sub, insn);
2859 sched_analyze_2 (deps, sub, insn);
2863 sched_analyze_2 (deps, x, insn);
2865 /* Mark registers CLOBBERED or used by called function. */
2868 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2870 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2871 sched_analyze_1 (deps, XEXP (link, 0), insn);
2873 sched_analyze_2 (deps, XEXP (link, 0), insn);
2875 /* Don't schedule anything after a tail call, tail call needs
2876 to use at least all call-saved registers. */
2877 if (SIBLING_CALL_P (insn))
2878 reg_pending_barrier = TRUE_BARRIER;
2879 else if (find_reg_note (insn, REG_SETJMP, NULL))
2880 reg_pending_barrier = MOVE_BARRIER;
2886 next = next_nonnote_nondebug_insn (insn);
2887 if (next && BARRIER_P (next))
2888 reg_pending_barrier = MOVE_BARRIER;
2891 rtx pending, pending_mem;
2893 if (sched_deps_info->compute_jump_reg_dependencies)
2895 (*sched_deps_info->compute_jump_reg_dependencies)
2896 (insn, reg_pending_control_uses);
2898 /* Make latency of jump equal to 0 by using anti-dependence. */
2899 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2901 struct deps_reg *reg_last = &deps->reg_last[i];
2902 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2903 add_dependence_list (insn, reg_last->implicit_sets,
2905 add_dependence_list (insn, reg_last->clobbers, 0,
2910 /* All memory writes and volatile reads must happen before the
2911 jump. Non-volatile reads must happen before the jump iff
2912 the result is needed by the above register used mask. */
2914 pending = deps->pending_write_insns;
2915 pending_mem = deps->pending_write_mems;
2918 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2919 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2920 pending = XEXP (pending, 1);
2921 pending_mem = XEXP (pending_mem, 1);
2924 pending = deps->pending_read_insns;
2925 pending_mem = deps->pending_read_mems;
2928 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2929 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2930 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2931 pending = XEXP (pending, 1);
2932 pending_mem = XEXP (pending_mem, 1);
2935 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2937 add_dependence_list (insn, deps->pending_jump_insns, 1,
2942 /* If this instruction can throw an exception, then moving it changes
2943 where block boundaries fall. This is mighty confusing elsewhere.
2944 Therefore, prevent such an instruction from being moved. Same for
2945 non-jump instructions that define block boundaries.
2946 ??? Unclear whether this is still necessary in EBB mode. If not,
2947 add_branch_dependences should be adjusted for RGN mode instead. */
2948 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2949 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2950 reg_pending_barrier = MOVE_BARRIER;
2952 if (sched_pressure_p)
2954 setup_insn_reg_uses (deps, insn);
2955 init_insn_reg_pressure_info (insn);
2958 /* Add register dependencies for insn. */
2959 if (DEBUG_INSN_P (insn))
2961 rtx prev = deps->last_debug_insn;
2964 if (!deps->readonly)
2965 deps->last_debug_insn = insn;
2968 add_dependence (insn, prev, REG_DEP_ANTI);
2970 add_dependence_list (insn, deps->last_function_call, 1,
2973 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2974 if (!sel_sched_p ())
2975 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2977 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2979 struct deps_reg *reg_last = &deps->reg_last[i];
2980 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2981 /* There's no point in making REG_DEP_CONTROL dependencies for
2983 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2985 if (!deps->readonly)
2986 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2988 CLEAR_REG_SET (reg_pending_uses);
2990 /* Quite often, a debug insn will refer to stuff in the
2991 previous instruction, but the reason we want this
2992 dependency here is to make sure the scheduler doesn't
2993 gratuitously move a debug insn ahead. This could dirty
2994 DF flags and cause additional analysis that wouldn't have
2995 occurred in compilation without debug insns, and such
2996 additional analysis can modify the generated code. */
2997 prev = PREV_INSN (insn);
2999 if (prev && NONDEBUG_INSN_P (prev))
3000 add_dependence (insn, prev, REG_DEP_ANTI);
3004 regset_head set_or_clobbered;
3006 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3008 struct deps_reg *reg_last = &deps->reg_last[i];
3009 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
3010 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
3011 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
3013 if (!deps->readonly)
3015 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3016 reg_last->uses_length++;
3020 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3021 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3023 struct deps_reg *reg_last = &deps->reg_last[i];
3024 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
3025 add_dependence_list (insn, reg_last->implicit_sets, 0,
3027 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
3029 if (!deps->readonly)
3031 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3032 reg_last->uses_length++;
3036 if (targetm.sched.exposed_pipeline)
3038 INIT_REG_SET (&set_or_clobbered);
3039 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3041 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3043 struct deps_reg *reg_last = &deps->reg_last[i];
3045 for (list = reg_last->uses; list; list = XEXP (list, 1))
3047 rtx other = XEXP (list, 0);
3048 if (INSN_CACHED_COND (other) != const_true_rtx
3049 && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3050 INSN_CACHED_COND (other) = const_true_rtx;
3055 /* If the current insn is conditional, we can't free any
3057 if (sched_has_condition_p (insn))
3059 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3061 struct deps_reg *reg_last = &deps->reg_last[i];
3062 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3063 add_dependence_list (insn, reg_last->implicit_sets, 0,
3065 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3066 add_dependence_list (insn, reg_last->control_uses, 0,
3069 if (!deps->readonly)
3072 = alloc_INSN_LIST (insn, reg_last->clobbers);
3073 reg_last->clobbers_length++;
3076 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3078 struct deps_reg *reg_last = &deps->reg_last[i];
3079 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3080 add_dependence_list (insn, reg_last->implicit_sets, 0,
3082 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
3083 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3084 add_dependence_list (insn, reg_last->control_uses, 0,
3087 if (!deps->readonly)
3088 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3093 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3095 struct deps_reg *reg_last = &deps->reg_last[i];
3096 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
3097 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
3099 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3101 add_dependence_list_and_free (deps, insn,
3102 ®_last->implicit_sets, 0,
3104 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3106 add_dependence_list_and_free (deps, insn,
3107 ®_last->control_uses, 0,
3109 add_dependence_list_and_free
3110 (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
3112 if (!deps->readonly)
3114 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3115 reg_last->clobbers_length = 0;
3116 reg_last->uses_length = 0;
3121 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
3122 add_dependence_list (insn, reg_last->implicit_sets, 0,
3124 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3125 add_dependence_list (insn, reg_last->control_uses, 0,
3129 if (!deps->readonly)
3131 reg_last->clobbers_length++;
3133 = alloc_INSN_LIST (insn, reg_last->clobbers);
3136 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3138 struct deps_reg *reg_last = &deps->reg_last[i];
3140 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3142 add_dependence_list_and_free (deps, insn,
3143 ®_last->implicit_sets,
3145 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3147 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3149 add_dependence_list (insn, reg_last->control_uses, 0,
3152 if (!deps->readonly)
3154 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3155 reg_last->uses_length = 0;
3156 reg_last->clobbers_length = 0;
3160 if (!deps->readonly)
3162 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3164 struct deps_reg *reg_last = &deps->reg_last[i];
3165 reg_last->control_uses
3166 = alloc_INSN_LIST (insn, reg_last->control_uses);
3171 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3172 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3174 struct deps_reg *reg_last = &deps->reg_last[i];
3175 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
3176 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
3177 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3178 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI);
3180 if (!deps->readonly)
3181 reg_last->implicit_sets
3182 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3185 if (!deps->readonly)
3187 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3188 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3189 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3190 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3191 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3192 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3193 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3195 /* Set up the pending barrier found. */
3196 deps->last_reg_pending_barrier = reg_pending_barrier;
3199 CLEAR_REG_SET (reg_pending_uses);
3200 CLEAR_REG_SET (reg_pending_clobbers);
3201 CLEAR_REG_SET (reg_pending_sets);
3202 CLEAR_REG_SET (reg_pending_control_uses);
3203 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3204 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3206 /* Add dependencies if a scheduling barrier was found. */
3207 if (reg_pending_barrier)
3209 /* In the case of barrier the most added dependencies are not
3210 real, so we use anti-dependence here. */
3211 if (sched_has_condition_p (insn))
3213 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3215 struct deps_reg *reg_last = &deps->reg_last[i];
3216 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3217 add_dependence_list (insn, reg_last->sets, 0,
3218 reg_pending_barrier == TRUE_BARRIER
3219 ? REG_DEP_TRUE : REG_DEP_ANTI);
3220 add_dependence_list (insn, reg_last->implicit_sets, 0,
3222 add_dependence_list (insn, reg_last->clobbers, 0,
3223 reg_pending_barrier == TRUE_BARRIER
3224 ? REG_DEP_TRUE : REG_DEP_ANTI);
3229 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3231 struct deps_reg *reg_last = &deps->reg_last[i];
3232 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3234 add_dependence_list_and_free (deps, insn,
3235 ®_last->control_uses, 0,
3237 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3238 reg_pending_barrier == TRUE_BARRIER
3239 ? REG_DEP_TRUE : REG_DEP_ANTI);
3240 add_dependence_list_and_free (deps, insn,
3241 ®_last->implicit_sets, 0,
3243 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3244 reg_pending_barrier == TRUE_BARRIER
3245 ? REG_DEP_TRUE : REG_DEP_ANTI);
3247 if (!deps->readonly)
3249 reg_last->uses_length = 0;
3250 reg_last->clobbers_length = 0;
3255 if (!deps->readonly)
3256 for (i = 0; i < (unsigned)deps->max_reg; i++)
3258 struct deps_reg *reg_last = &deps->reg_last[i];
3259 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3260 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3263 /* Flush pending lists on jumps, but not on speculative checks. */
3264 if (JUMP_P (insn) && !(sel_sched_p ()
3265 && sel_insn_is_speculation_check (insn)))
3266 flush_pending_lists (deps, insn, true, true);
3268 reg_pending_barrier = NOT_A_BARRIER;
3271 /* If a post-call group is still open, see if it should remain so.
3272 This insn must be a simple move of a hard reg to a pseudo or
3275 We must avoid moving these insns for correctness on targets
3276 with small register classes, and for special registers like
3277 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3278 hard regs for all targets. */
3280 if (deps->in_post_call_group_p)
3282 rtx tmp, set = single_set (insn);
3283 int src_regno, dest_regno;
3287 if (DEBUG_INSN_P (insn))
3288 /* We don't want to mark debug insns as part of the same
3289 sched group. We know they really aren't, but if we use
3290 debug insns to tell that a call group is over, we'll
3291 get different code if debug insns are not there and
3292 instructions that follow seem like they should be part
3295 Also, if we did, fixup_sched_groups() would move the
3296 deps of the debug insn to the call insn, modifying
3297 non-debug post-dependency counts of the debug insn
3298 dependencies and otherwise messing with the scheduling
3301 Instead, let such debug insns be scheduled freely, but
3302 keep the call group open in case there are insns that
3303 should be part of it afterwards. Since we grant debug
3304 insns higher priority than even sched group insns, it
3305 will all turn out all right. */
3306 goto debug_dont_end_call_group;
3308 goto end_call_group;
3311 tmp = SET_DEST (set);
3312 if (GET_CODE (tmp) == SUBREG)
3313 tmp = SUBREG_REG (tmp);
3315 dest_regno = REGNO (tmp);
3317 goto end_call_group;
3319 tmp = SET_SRC (set);
3320 if (GET_CODE (tmp) == SUBREG)
3321 tmp = SUBREG_REG (tmp);
3322 if ((GET_CODE (tmp) == PLUS
3323 || GET_CODE (tmp) == MINUS)
3324 && REG_P (XEXP (tmp, 0))
3325 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3326 && dest_regno == STACK_POINTER_REGNUM)
3327 src_regno = STACK_POINTER_REGNUM;
3328 else if (REG_P (tmp))
3329 src_regno = REGNO (tmp);
3331 goto end_call_group;
3333 if (src_regno < FIRST_PSEUDO_REGISTER
3334 || dest_regno < FIRST_PSEUDO_REGISTER)
3337 && deps->in_post_call_group_p == post_call_initial)
3338 deps->in_post_call_group_p = post_call;
3340 if (!sel_sched_p () || sched_emulate_haifa_p)
3342 SCHED_GROUP_P (insn) = 1;
3343 CANT_MOVE (insn) = 1;
3349 if (!deps->readonly)
3350 deps->in_post_call_group_p = not_post_call;
3354 debug_dont_end_call_group:
3355 if ((current_sched_info->flags & DO_SPECULATION)
3356 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3357 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3361 sel_mark_hard_insn (insn);
3364 sd_iterator_def sd_it;
3367 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3368 sd_iterator_cond (&sd_it, &dep);)
3369 change_spec_dep_to_hard (sd_it);
3374 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3375 longjmp, loop forever, ...). */
3377 call_may_noreturn_p (rtx insn)
3381 /* const or pure calls that aren't looping will always return. */
3382 if (RTL_CONST_OR_PURE_CALL_P (insn)
3383 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3386 call = PATTERN (insn);
3387 if (GET_CODE (call) == PARALLEL)
3388 call = XVECEXP (call, 0, 0);
3389 if (GET_CODE (call) == SET)
3390 call = SET_SRC (call);
3391 if (GET_CODE (call) == CALL
3392 && MEM_P (XEXP (call, 0))
3393 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3395 rtx symbol = XEXP (XEXP (call, 0), 0);
3396 if (SYMBOL_REF_DECL (symbol)
3397 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3399 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3401 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3404 case BUILT_IN_BCOPY:
3405 case BUILT_IN_BZERO:
3406 case BUILT_IN_INDEX:
3407 case BUILT_IN_MEMCHR:
3408 case BUILT_IN_MEMCMP:
3409 case BUILT_IN_MEMCPY:
3410 case BUILT_IN_MEMMOVE:
3411 case BUILT_IN_MEMPCPY:
3412 case BUILT_IN_MEMSET:
3413 case BUILT_IN_RINDEX:
3414 case BUILT_IN_STPCPY:
3415 case BUILT_IN_STPNCPY:
3416 case BUILT_IN_STRCAT:
3417 case BUILT_IN_STRCHR:
3418 case BUILT_IN_STRCMP:
3419 case BUILT_IN_STRCPY:
3420 case BUILT_IN_STRCSPN:
3421 case BUILT_IN_STRLEN:
3422 case BUILT_IN_STRNCAT:
3423 case BUILT_IN_STRNCMP:
3424 case BUILT_IN_STRNCPY:
3425 case BUILT_IN_STRPBRK:
3426 case BUILT_IN_STRRCHR:
3427 case BUILT_IN_STRSPN:
3428 case BUILT_IN_STRSTR:
3429 /* Assume certain string/memory builtins always return. */
3437 /* For all other calls assume that they might not always return. */
3441 /* Analyze INSN with DEPS as a context. */
3443 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3445 if (sched_deps_info->start_insn)
3446 sched_deps_info->start_insn (insn);
3448 /* Record the condition for this insn. */
3449 if (NONDEBUG_INSN_P (insn))
3452 sched_get_condition_with_rev (insn, NULL);
3453 t = INSN_CACHED_COND (insn);
3454 INSN_COND_DEPS (insn) = NULL_RTX;
3455 if (reload_completed
3456 && (current_sched_info->flags & DO_PREDICATION)
3458 && REG_P (XEXP (t, 0))
3459 && CONSTANT_P (XEXP (t, 1)))
3465 nregs = hard_regno_nregs[regno][GET_MODE (t)];
3469 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3470 t = concat_INSN_LIST (reg_last->sets, t);
3471 t = concat_INSN_LIST (reg_last->clobbers, t);
3472 t = concat_INSN_LIST (reg_last->implicit_sets, t);
3474 INSN_COND_DEPS (insn) = t;
3480 /* Make each JUMP_INSN (but not a speculative check)
3481 a scheduling barrier for memory references. */
3484 && sel_insn_is_speculation_check (insn)))
3486 /* Keep the list a reasonable size. */
3487 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3488 flush_pending_lists (deps, insn, true, true);
3490 deps->pending_jump_insns
3491 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3494 /* For each insn which shouldn't cross a jump, add a dependence. */
3495 add_dependence_list_and_free (deps, insn,
3496 &deps->sched_before_next_jump, 1,
3499 sched_analyze_insn (deps, PATTERN (insn), insn);
3501 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3503 sched_analyze_insn (deps, PATTERN (insn), insn);
3505 else if (CALL_P (insn))
3509 CANT_MOVE (insn) = 1;
3511 if (find_reg_note (insn, REG_SETJMP, NULL))
3513 /* This is setjmp. Assume that all registers, not just
3514 hard registers, may be clobbered by this call. */
3515 reg_pending_barrier = MOVE_BARRIER;
3519 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3520 /* A call may read and modify global register variables. */
3523 SET_REGNO_REG_SET (reg_pending_sets, i);
3524 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3526 /* Other call-clobbered hard regs may be clobbered.
3527 Since we only have a choice between 'might be clobbered'
3528 and 'definitely not clobbered', we must include all
3529 partly call-clobbered registers here. */
3530 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3531 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3532 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3533 /* We don't know what set of fixed registers might be used
3534 by the function, but it is certain that the stack pointer
3535 is among them, but be conservative. */
3536 else if (fixed_regs[i])
3537 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3538 /* The frame pointer is normally not used by the function
3539 itself, but by the debugger. */
3540 /* ??? MIPS o32 is an exception. It uses the frame pointer
3541 in the macro expansion of jal but does not represent this
3542 fact in the call_insn rtl. */
3543 else if (i == FRAME_POINTER_REGNUM
3544 || (i == HARD_FRAME_POINTER_REGNUM
3545 && (! reload_completed || frame_pointer_needed)))
3546 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3549 /* For each insn which shouldn't cross a call, add a dependence
3550 between that insn and this call insn. */
3551 add_dependence_list_and_free (deps, insn,
3552 &deps->sched_before_next_call, 1,
3555 sched_analyze_insn (deps, PATTERN (insn), insn);
3557 /* If CALL would be in a sched group, then this will violate
3558 convention that sched group insns have dependencies only on the
3559 previous instruction.
3561 Of course one can say: "Hey! What about head of the sched group?"
3562 And I will answer: "Basic principles (one dep per insn) are always
3564 gcc_assert (!SCHED_GROUP_P (insn));
3566 /* In the absence of interprocedural alias analysis, we must flush
3567 all pending reads and writes, and start new dependencies starting
3568 from here. But only flush writes for constant calls (which may
3569 be passed a pointer to something we haven't written yet). */
3570 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3572 if (!deps->readonly)
3574 /* Remember the last function call for limiting lifetimes. */
3575 free_INSN_LIST_list (&deps->last_function_call);
3576 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3578 if (call_may_noreturn_p (insn))
3580 /* Remember the last function call that might not always return
3581 normally for limiting moves of trapping insns. */
3582 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3583 deps->last_function_call_may_noreturn
3584 = alloc_INSN_LIST (insn, NULL_RTX);
3587 /* Before reload, begin a post-call group, so as to keep the
3588 lifetimes of hard registers correct. */
3589 if (! reload_completed)
3590 deps->in_post_call_group_p = post_call;
3594 if (sched_deps_info->use_cselib)
3595 cselib_process_insn (insn);
3597 /* EH_REGION insn notes can not appear until well after we complete
3600 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3601 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3603 if (sched_deps_info->finish_insn)
3604 sched_deps_info->finish_insn ();
3606 /* Fixup the dependencies in the sched group. */
3607 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3608 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3609 fixup_sched_groups (insn);
3612 /* Initialize DEPS for the new block beginning with HEAD. */
3614 deps_start_bb (struct deps_desc *deps, rtx head)
3616 gcc_assert (!deps->readonly);
3618 /* Before reload, if the previous block ended in a call, show that
3619 we are inside a post-call group, so as to keep the lifetimes of
3620 hard registers correct. */
3621 if (! reload_completed && !LABEL_P (head))
3623 rtx insn = prev_nonnote_nondebug_insn (head);
3625 if (insn && CALL_P (insn))
3626 deps->in_post_call_group_p = post_call_initial;
3630 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3631 dependencies for each insn. */
3633 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3637 if (sched_deps_info->use_cselib)
3638 cselib_init (CSELIB_RECORD_MEMORY);
3640 deps_start_bb (deps, head);
3642 for (insn = head;; insn = NEXT_INSN (insn))
3647 /* And initialize deps_lists. */
3648 sd_init_insn (insn);
3651 deps_analyze_insn (deps, insn);
3655 if (sched_deps_info->use_cselib)
3663 /* Helper for sched_free_deps ().
3664 Delete INSN's (RESOLVED_P) backward dependencies. */
3666 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3668 sd_iterator_def sd_it;
3670 sd_list_types_def types;
3673 types = SD_LIST_RES_BACK;
3675 types = SD_LIST_BACK;
3677 for (sd_it = sd_iterator_start (insn, types);
3678 sd_iterator_cond (&sd_it, &dep);)
3680 dep_link_t link = *sd_it.linkp;
3681 dep_node_t node = DEP_LINK_NODE (link);
3682 deps_list_t back_list;
3683 deps_list_t forw_list;
3685 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3686 remove_from_deps_list (link, back_list);
3687 delete_dep_node (node);
3691 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3694 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3697 rtx next_tail = NEXT_INSN (tail);
3699 /* We make two passes since some insns may be scheduled before their
3700 dependencies are resolved. */
3701 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3702 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3704 /* Clear forward deps and leave the dep_nodes to the
3705 corresponding back_deps list. */
3707 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3709 clear_deps_list (INSN_FORW_DEPS (insn));
3711 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3712 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3714 /* Clear resolved back deps together with its dep_nodes. */
3715 delete_dep_nodes_in_back_deps (insn, resolved_p);
3717 sd_finish_insn (insn);
3721 /* Initialize variables for region data dependence analysis.
3722 When LAZY_REG_LAST is true, do not allocate reg_last array
3723 of struct deps_desc immediately. */
3726 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3728 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3730 deps->max_reg = max_reg;
3732 deps->reg_last = NULL;
3734 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3735 INIT_REG_SET (&deps->reg_last_in_use);
3737 deps->pending_read_insns = 0;
3738 deps->pending_read_mems = 0;
3739 deps->pending_write_insns = 0;
3740 deps->pending_write_mems = 0;
3741 deps->pending_jump_insns = 0;
3742 deps->pending_read_list_length = 0;
3743 deps->pending_write_list_length = 0;
3744 deps->pending_flush_length = 0;
3745 deps->last_pending_memory_flush = 0;
3746 deps->last_function_call = 0;
3747 deps->last_function_call_may_noreturn = 0;
3748 deps->sched_before_next_call = 0;
3749 deps->sched_before_next_jump = 0;
3750 deps->in_post_call_group_p = not_post_call;
3751 deps->last_debug_insn = 0;
3752 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3756 /* Init only reg_last field of DEPS, which was not allocated before as
3757 we inited DEPS lazily. */
3759 init_deps_reg_last (struct deps_desc *deps)
3761 gcc_assert (deps && deps->max_reg > 0);
3762 gcc_assert (deps->reg_last == NULL);
3764 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3768 /* Free insn lists found in DEPS. */
3771 free_deps (struct deps_desc *deps)
3774 reg_set_iterator rsi;
3776 /* We set max_reg to 0 when this context was already freed. */
3777 if (deps->max_reg == 0)
3779 gcc_assert (deps->reg_last == NULL);
3784 free_INSN_LIST_list (&deps->pending_read_insns);
3785 free_EXPR_LIST_list (&deps->pending_read_mems);
3786 free_INSN_LIST_list (&deps->pending_write_insns);
3787 free_EXPR_LIST_list (&deps->pending_write_mems);
3788 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3790 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3791 times. For a testcase with 42000 regs and 8000 small basic blocks,
3792 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3793 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3795 struct deps_reg *reg_last = &deps->reg_last[i];
3797 free_INSN_LIST_list (®_last->uses);
3799 free_INSN_LIST_list (®_last->sets);
3800 if (reg_last->implicit_sets)
3801 free_INSN_LIST_list (®_last->implicit_sets);
3802 if (reg_last->control_uses)
3803 free_INSN_LIST_list (®_last->control_uses);
3804 if (reg_last->clobbers)
3805 free_INSN_LIST_list (®_last->clobbers);
3807 CLEAR_REG_SET (&deps->reg_last_in_use);
3809 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3811 free (deps->reg_last);
3812 deps->reg_last = NULL;
3817 /* Remove INSN from dependence contexts DEPS. */
3819 remove_from_deps (struct deps_desc *deps, rtx insn)
3823 reg_set_iterator rsi;
3825 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3826 &deps->pending_read_mems);
3827 if (!DEBUG_INSN_P (insn))
3828 deps->pending_read_list_length -= removed;
3829 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3830 &deps->pending_write_mems);
3831 deps->pending_write_list_length -= removed;
3833 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3834 deps->pending_flush_length -= removed;
3835 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3836 deps->pending_flush_length -= removed;
3838 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3840 struct deps_reg *reg_last = &deps->reg_last[i];
3842 remove_from_dependence_list (insn, ®_last->uses);
3844 remove_from_dependence_list (insn, ®_last->sets);
3845 if (reg_last->implicit_sets)
3846 remove_from_dependence_list (insn, ®_last->implicit_sets);
3847 if (reg_last->clobbers)
3848 remove_from_dependence_list (insn, ®_last->clobbers);
3849 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3850 && !reg_last->clobbers)
3851 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3856 remove_from_dependence_list (insn, &deps->last_function_call);
3857 remove_from_dependence_list (insn,
3858 &deps->last_function_call_may_noreturn);
3860 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3863 /* Init deps data vector. */
3865 init_deps_data_vector (void)
3867 int reserve = (sched_max_luid + 1
3868 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3870 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3871 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3872 3 * sched_max_luid / 2);
3875 /* If it is profitable to use them, initialize or extend (depending on
3876 GLOBAL_P) dependency data. */
3878 sched_deps_init (bool global_p)
3880 /* Average number of insns in the basic block.
3881 '+ 1' is used to make it nonzero. */
3882 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3884 init_deps_data_vector ();
3886 /* We use another caching mechanism for selective scheduling, so
3887 we don't use this one. */
3888 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3890 /* ?!? We could save some memory by computing a per-region luid mapping
3891 which could reduce both the number of vectors in the cache and the
3892 size of each vector. Instead we just avoid the cache entirely unless
3893 the average number of instructions in a basic block is very high. See
3894 the comment before the declaration of true_dependency_cache for
3895 what we consider "very high". */
3897 extend_dependency_caches (sched_max_luid, true);
3902 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3903 /* Allocate lists for one block at a time. */
3905 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3906 /* Allocate nodes for one block at a time.
3907 We assume that average insn has
3909 5 * insns_in_block);
3914 /* Create or extend (depending on CREATE_P) dependency caches to
3917 extend_dependency_caches (int n, bool create_p)
3919 if (create_p || true_dependency_cache)
3921 int i, luid = cache_size + n;
3923 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3925 output_dependency_cache = XRESIZEVEC (bitmap_head,
3926 output_dependency_cache, luid);
3927 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3929 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
3932 if (current_sched_info->flags & DO_SPECULATION)
3933 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3936 for (i = cache_size; i < luid; i++)
3938 bitmap_initialize (&true_dependency_cache[i], 0);
3939 bitmap_initialize (&output_dependency_cache[i], 0);
3940 bitmap_initialize (&anti_dependency_cache[i], 0);
3941 bitmap_initialize (&control_dependency_cache[i], 0);
3943 if (current_sched_info->flags & DO_SPECULATION)
3944 bitmap_initialize (&spec_dependency_cache[i], 0);
3950 /* Finalize dependency information for the whole function. */
3952 sched_deps_finish (void)
3954 gcc_assert (deps_pools_are_empty_p ());
3955 free_alloc_pool_if_empty (&dn_pool);
3956 free_alloc_pool_if_empty (&dl_pool);
3957 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3959 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3962 if (true_dependency_cache)
3966 for (i = 0; i < cache_size; i++)
3968 bitmap_clear (&true_dependency_cache[i]);
3969 bitmap_clear (&output_dependency_cache[i]);
3970 bitmap_clear (&anti_dependency_cache[i]);
3971 bitmap_clear (&control_dependency_cache[i]);
3973 if (sched_deps_info->generate_spec_deps)
3974 bitmap_clear (&spec_dependency_cache[i]);
3976 free (true_dependency_cache);
3977 true_dependency_cache = NULL;
3978 free (output_dependency_cache);
3979 output_dependency_cache = NULL;
3980 free (anti_dependency_cache);
3981 anti_dependency_cache = NULL;
3982 free (control_dependency_cache);
3983 control_dependency_cache = NULL;
3985 if (sched_deps_info->generate_spec_deps)
3987 free (spec_dependency_cache);
3988 spec_dependency_cache = NULL;
3994 /* Initialize some global variables needed by the dependency analysis
3998 init_deps_global (void)
4000 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4001 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4002 reg_pending_sets = ALLOC_REG_SET (®_obstack);
4003 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
4004 reg_pending_uses = ALLOC_REG_SET (®_obstack);
4005 reg_pending_control_uses = ALLOC_REG_SET (®_obstack);
4006 reg_pending_barrier = NOT_A_BARRIER;
4008 if (!sel_sched_p () || sched_emulate_haifa_p)
4010 sched_deps_info->start_insn = haifa_start_insn;
4011 sched_deps_info->finish_insn = haifa_finish_insn;
4013 sched_deps_info->note_reg_set = haifa_note_reg_set;
4014 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4015 sched_deps_info->note_reg_use = haifa_note_reg_use;
4017 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4018 sched_deps_info->note_dep = haifa_note_dep;
4022 /* Free everything used by the dependency analysis code. */
4025 finish_deps_global (void)
4027 FREE_REG_SET (reg_pending_sets);
4028 FREE_REG_SET (reg_pending_clobbers);
4029 FREE_REG_SET (reg_pending_uses);
4030 FREE_REG_SET (reg_pending_control_uses);
4033 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4035 estimate_dep_weak (rtx mem1, rtx mem2)
4040 /* MEMs are the same - don't speculate. */
4041 return MIN_DEP_WEAK;
4043 r1 = XEXP (mem1, 0);
4044 r2 = XEXP (mem2, 0);
4047 || (REG_P (r1) && REG_P (r2)
4048 && REGNO (r1) == REGNO (r2)))
4049 /* Again, MEMs are the same. */
4050 return MIN_DEP_WEAK;
4051 else if ((REG_P (r1) && !REG_P (r2))
4052 || (!REG_P (r1) && REG_P (r2)))
4053 /* Different addressing modes - reason to be more speculative,
4055 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4057 /* We can't say anything about the dependence. */
4058 return UNCERTAIN_DEP_WEAK;
4061 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4062 This function can handle same INSN and ELEM (INSN == ELEM).
4063 It is a convenience wrapper. */
4065 add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
4070 if (dep_type == REG_DEP_TRUE)
4072 else if (dep_type == REG_DEP_OUTPUT)
4074 else if (dep_type == REG_DEP_CONTROL)
4078 gcc_assert (dep_type == REG_DEP_ANTI);
4082 /* When add_dependence is called from inside sched-deps.c, we expect
4083 cur_insn to be non-null. */
4084 internal = cur_insn != NULL;
4086 gcc_assert (insn == cur_insn);
4090 note_dep (elem, ds);
4095 /* Return weakness of speculative type TYPE in the dep_status DS. */
4097 get_dep_weak_1 (ds_t ds, ds_t type)
4103 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4104 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4105 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4106 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4107 default: gcc_unreachable ();
4114 get_dep_weak (ds_t ds, ds_t type)
4116 dw_t dw = get_dep_weak_1 (ds, type);
4118 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4122 /* Return the dep_status, which has the same parameters as DS, except for
4123 speculative type TYPE, that will have weakness DW. */
4125 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4127 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4132 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4133 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4134 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4135 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4136 default: gcc_unreachable ();
4141 /* Return the join of two dep_statuses DS1 and DS2.
4142 If MAX_P is true then choose the greater probability,
4143 otherwise multiply probabilities.
4144 This function assumes that both DS1 and DS2 contain speculative bits. */
4146 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4150 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4152 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4154 t = FIRST_SPEC_TYPE;
4157 if ((ds1 & t) && !(ds2 & t))
4159 else if (!(ds1 & t) && (ds2 & t))
4161 else if ((ds1 & t) && (ds2 & t))
4163 dw_t dw1 = get_dep_weak (ds1, t);
4164 dw_t dw2 = get_dep_weak (ds2, t);
4169 dw = ((ds_t) dw1) * ((ds_t) dw2);
4171 if (dw < MIN_DEP_WEAK)
4182 ds = set_dep_weak (ds, t, (dw_t) dw);
4185 if (t == LAST_SPEC_TYPE)
4187 t <<= SPEC_TYPE_SHIFT;
4194 /* Return the join of two dep_statuses DS1 and DS2.
4195 This function assumes that both DS1 and DS2 contain speculative bits. */
4197 ds_merge (ds_t ds1, ds_t ds2)
4199 return ds_merge_1 (ds1, ds2, false);
4202 /* Return the join of two dep_statuses DS1 and DS2. */
4204 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4206 ds_t new_status = ds | ds2;
4208 if (new_status & SPECULATIVE)
4210 if ((ds && !(ds & SPECULATIVE))
4211 || (ds2 && !(ds2 & SPECULATIVE)))
4212 /* Then this dep can't be speculative. */
4213 new_status &= ~SPECULATIVE;
4216 /* Both are speculative. Merging probabilities. */
4221 dw = estimate_dep_weak (mem1, mem2);
4222 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4230 new_status = ds_merge (ds2, ds);
4237 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4240 ds_max_merge (ds_t ds1, ds_t ds2)
4242 if (ds1 == 0 && ds2 == 0)
4245 if (ds1 == 0 && ds2 != 0)
4248 if (ds1 != 0 && ds2 == 0)
4251 return ds_merge_1 (ds1, ds2, true);
4254 /* Return the probability of speculation success for the speculation
4262 dt = FIRST_SPEC_TYPE;
4267 res *= (ds_t) get_dep_weak (ds, dt);
4271 if (dt == LAST_SPEC_TYPE)
4273 dt <<= SPEC_TYPE_SHIFT;
4279 res /= MAX_DEP_WEAK;
4281 if (res < MIN_DEP_WEAK)
4284 gcc_assert (res <= MAX_DEP_WEAK);
4289 /* Return a dep status that contains all speculation types of DS. */
4291 ds_get_speculation_types (ds_t ds)
4293 if (ds & BEGIN_DATA)
4295 if (ds & BE_IN_DATA)
4297 if (ds & BEGIN_CONTROL)
4298 ds |= BEGIN_CONTROL;
4299 if (ds & BE_IN_CONTROL)
4300 ds |= BE_IN_CONTROL;
4302 return ds & SPECULATIVE;
4305 /* Return a dep status that contains maximal weakness for each speculation
4306 type present in DS. */
4308 ds_get_max_dep_weak (ds_t ds)
4310 if (ds & BEGIN_DATA)
4311 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4312 if (ds & BE_IN_DATA)
4313 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4314 if (ds & BEGIN_CONTROL)
4315 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4316 if (ds & BE_IN_CONTROL)
4317 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4322 /* Dump information about the dependence status S. */
4324 dump_ds (FILE *f, ds_t s)
4329 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4331 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4332 if (s & BEGIN_CONTROL)
4333 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4334 if (s & BE_IN_CONTROL)
4335 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4338 fprintf (f, "HARD_DEP; ");
4341 fprintf (f, "DEP_TRUE; ");
4343 fprintf (f, "DEP_OUTPUT; ");
4345 fprintf (f, "DEP_ANTI; ");
4346 if (s & DEP_CONTROL)
4347 fprintf (f, "DEP_CONTROL; ");
4355 dump_ds (stderr, s);
4356 fprintf (stderr, "\n");
4359 #ifdef ENABLE_CHECKING
4360 /* Verify that dependence type and status are consistent.
4361 If RELAXED_P is true, then skip dep_weakness checks. */
4363 check_dep (dep_t dep, bool relaxed_p)
4365 enum reg_note dt = DEP_TYPE (dep);
4366 ds_t ds = DEP_STATUS (dep);
4368 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4370 if (!(current_sched_info->flags & USE_DEPS_LIST))
4372 gcc_assert (ds == 0);
4376 /* Check that dependence type contains the same bits as the status. */
4377 if (dt == REG_DEP_TRUE)
4378 gcc_assert (ds & DEP_TRUE);
4379 else if (dt == REG_DEP_OUTPUT)
4380 gcc_assert ((ds & DEP_OUTPUT)
4381 && !(ds & DEP_TRUE));
4382 else if (dt == REG_DEP_ANTI)
4383 gcc_assert ((ds & DEP_ANTI)
4384 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4386 gcc_assert (dt == REG_DEP_CONTROL
4387 && (ds & DEP_CONTROL)
4388 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4390 /* HARD_DEP can not appear in dep_status of a link. */
4391 gcc_assert (!(ds & HARD_DEP));
4393 /* Check that dependence status is set correctly when speculation is not
4395 if (!sched_deps_info->generate_spec_deps)
4396 gcc_assert (!(ds & SPECULATIVE));
4397 else if (ds & SPECULATIVE)
4401 ds_t type = FIRST_SPEC_TYPE;
4403 /* Check that dependence weakness is in proper range. */
4407 get_dep_weak (ds, type);
4409 if (type == LAST_SPEC_TYPE)
4411 type <<= SPEC_TYPE_SHIFT;
4416 if (ds & BEGIN_SPEC)
4418 /* Only true dependence can be data speculative. */
4419 if (ds & BEGIN_DATA)
4420 gcc_assert (ds & DEP_TRUE);
4422 /* Control dependencies in the insn scheduler are represented by
4423 anti-dependencies, therefore only anti dependence can be
4424 control speculative. */
4425 if (ds & BEGIN_CONTROL)
4426 gcc_assert (ds & DEP_ANTI);
4430 /* Subsequent speculations should resolve true dependencies. */
4431 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4434 /* Check that true and anti dependencies can't have other speculative
4437 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4438 /* An output dependence can't be speculative at all. */
4439 gcc_assert (!(ds & DEP_OUTPUT));
4441 gcc_assert (ds & BEGIN_CONTROL);
4444 #endif /* ENABLE_CHECKING */
4446 #endif /* INSN_SCHEDULING */