1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
6 Free Software Foundation, Inc.
7 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
8 and currently maintained by, Jim Wilson (wilson@cygnus.com)
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free
14 Software Foundation; either version 3, or (at your option) any later
17 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
18 WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
30 #include "diagnostic-core.h"
33 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
41 #include "sched-int.h"
47 #ifdef INSN_SCHEDULING
49 #ifdef ENABLE_CHECKING
55 /* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
56 added to the list because of flush_pending_lists, stands just
57 for itself and not for any other pending memory reads/writes. */
58 #define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
59 #define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
61 /* Holds current parameters for the dependency analyzer. */
62 struct sched_deps_info_def *sched_deps_info;
64 /* The data is specific to the Haifa scheduler. */
65 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
67 /* Return the major type present in the DS. */
75 return REG_DEP_OUTPUT;
77 gcc_assert (ds & DEP_ANTI);
82 /* Return equivalent dep_status. */
84 dk_to_ds (enum reg_note dk)
95 gcc_assert (dk == REG_DEP_ANTI);
100 /* Functions to operate with dependence information container - dep_t. */
102 /* Init DEP with the arguments. */
104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
108 DEP_TYPE (dep) = type;
109 DEP_STATUS (dep) = ds;
110 DEP_COST (dep) = UNKNOWN_DEP_COST;
113 /* Init DEP with the arguments.
114 While most of the scheduler (including targets) only need the major type
115 of the dependency, it is convenient to hide full dep_status from them. */
117 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
121 if ((current_sched_info->flags & USE_DEPS_LIST))
122 ds = dk_to_ds (kind);
126 init_dep_1 (dep, pro, con, kind, ds);
129 /* Make a copy of FROM in TO. */
131 copy_dep (dep_t to, dep_t from)
133 memcpy (to, from, sizeof (*to));
136 static void dump_ds (FILE *, ds_t);
138 /* Define flags for dump_dep (). */
140 /* Dump producer of the dependence. */
141 #define DUMP_DEP_PRO (2)
143 /* Dump consumer of the dependence. */
144 #define DUMP_DEP_CON (4)
146 /* Dump type of the dependence. */
147 #define DUMP_DEP_TYPE (8)
149 /* Dump status of the dependence. */
150 #define DUMP_DEP_STATUS (16)
152 /* Dump all information about the dependence. */
153 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
157 FLAGS is a bit mask specifying what information about DEP needs
159 If FLAGS has the very first bit set, then dump all information about DEP
160 and propagate this bit into the callee dump functions. */
162 dump_dep (FILE *dump, dep_t dep, int flags)
165 flags |= DUMP_DEP_ALL;
169 if (flags & DUMP_DEP_PRO)
170 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
172 if (flags & DUMP_DEP_CON)
173 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
175 if (flags & DUMP_DEP_TYPE)
178 enum reg_note type = DEP_TYPE (dep);
199 fprintf (dump, "%c; ", t);
202 if (flags & DUMP_DEP_STATUS)
204 if (current_sched_info->flags & USE_DEPS_LIST)
205 dump_ds (dump, DEP_STATUS (dep));
211 /* Default flags for dump_dep (). */
212 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
214 /* Dump all fields of DEP to STDERR. */
216 sd_debug_dep (dep_t dep)
218 dump_dep (stderr, dep, 1);
219 fprintf (stderr, "\n");
222 /* Determine whether DEP is a dependency link of a non-debug insn on a
226 depl_on_debug_p (dep_link_t dep)
228 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
229 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
232 /* Functions to operate with a single link from the dependencies lists -
235 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
238 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
240 dep_link_t next = *prev_nextp;
242 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
243 && DEP_LINK_NEXT (l) == NULL);
245 /* Init node being inserted. */
246 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
247 DEP_LINK_NEXT (l) = next;
252 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
254 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
261 /* Add dep_link LINK to deps_list L. */
263 add_to_deps_list (dep_link_t link, deps_list_t l)
265 attach_dep_link (link, &DEPS_LIST_FIRST (l));
267 /* Don't count debug deps. */
268 if (!depl_on_debug_p (link))
269 ++DEPS_LIST_N_LINKS (l);
272 /* Detach dep_link L from the list. */
274 detach_dep_link (dep_link_t l)
276 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
277 dep_link_t next = DEP_LINK_NEXT (l);
282 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
284 DEP_LINK_PREV_NEXTP (l) = NULL;
285 DEP_LINK_NEXT (l) = NULL;
288 /* Remove link LINK from list LIST. */
290 remove_from_deps_list (dep_link_t link, deps_list_t list)
292 detach_dep_link (link);
294 /* Don't count debug deps. */
295 if (!depl_on_debug_p (link))
296 --DEPS_LIST_N_LINKS (list);
299 /* Move link LINK from list FROM to list TO. */
301 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
303 remove_from_deps_list (link, from);
304 add_to_deps_list (link, to);
307 /* Return true of LINK is not attached to any list. */
309 dep_link_is_detached_p (dep_link_t link)
311 return DEP_LINK_PREV_NEXTP (link) == NULL;
314 /* Pool to hold all dependency nodes (dep_node_t). */
315 static alloc_pool dn_pool;
317 /* Number of dep_nodes out there. */
318 static int dn_pool_diff = 0;
320 /* Create a dep_node. */
322 create_dep_node (void)
324 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
325 dep_link_t back = DEP_NODE_BACK (n);
326 dep_link_t forw = DEP_NODE_FORW (n);
328 DEP_LINK_NODE (back) = n;
329 DEP_LINK_NEXT (back) = NULL;
330 DEP_LINK_PREV_NEXTP (back) = NULL;
332 DEP_LINK_NODE (forw) = n;
333 DEP_LINK_NEXT (forw) = NULL;
334 DEP_LINK_PREV_NEXTP (forw) = NULL;
341 /* Delete dep_node N. N must not be connected to any deps_list. */
343 delete_dep_node (dep_node_t n)
345 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
346 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
350 pool_free (dn_pool, n);
353 /* Pool to hold dependencies lists (deps_list_t). */
354 static alloc_pool dl_pool;
356 /* Number of deps_lists out there. */
357 static int dl_pool_diff = 0;
359 /* Functions to operate with dependences lists - deps_list_t. */
361 /* Return true if list L is empty. */
363 deps_list_empty_p (deps_list_t l)
365 return DEPS_LIST_N_LINKS (l) == 0;
368 /* Create a new deps_list. */
370 create_deps_list (void)
372 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
374 DEPS_LIST_FIRST (l) = NULL;
375 DEPS_LIST_N_LINKS (l) = 0;
381 /* Free deps_list L. */
383 free_deps_list (deps_list_t l)
385 gcc_assert (deps_list_empty_p (l));
389 pool_free (dl_pool, l);
392 /* Return true if there is no dep_nodes and deps_lists out there.
393 After the region is scheduled all the dependency nodes and lists
394 should [generally] be returned to pool. */
396 deps_pools_are_empty_p (void)
398 return dn_pool_diff == 0 && dl_pool_diff == 0;
401 /* Remove all elements from L. */
403 clear_deps_list (deps_list_t l)
407 dep_link_t link = DEPS_LIST_FIRST (l);
412 remove_from_deps_list (link, l);
417 /* Decide whether a dependency should be treated as a hard or a speculative
420 dep_spec_p (dep_t dep)
422 if (current_sched_info->flags & DO_SPECULATION)
423 return (DEP_STATUS (dep) & SPECULATIVE) != 0;
427 static regset reg_pending_sets;
428 static regset reg_pending_clobbers;
429 static regset reg_pending_uses;
430 static enum reg_pending_barrier_mode reg_pending_barrier;
432 /* Hard registers implicitly clobbered or used (or may be implicitly
433 clobbered or used) by the currently analyzed insn. For example,
434 insn in its constraint has one register class. Even if there is
435 currently no hard register in the insn, the particular hard
436 register will be in the insn after reload pass because the
437 constraint requires it. */
438 static HARD_REG_SET implicit_reg_pending_clobbers;
439 static HARD_REG_SET implicit_reg_pending_uses;
441 /* To speed up the test for duplicate dependency links we keep a
442 record of dependencies created by add_dependence when the average
443 number of instructions in a basic block is very large.
445 Studies have shown that there is typically around 5 instructions between
446 branches for typical C code. So we can make a guess that the average
447 basic block is approximately 5 instructions long; we will choose 100X
448 the average size as a very large basic block.
450 Each insn has associated bitmaps for its dependencies. Each bitmap
451 has enough entries to represent a dependency on any other insn in
452 the insn chain. All bitmap for true dependencies cache is
453 allocated then the rest two ones are also allocated. */
454 static bitmap_head *true_dependency_cache = NULL;
455 static bitmap_head *output_dependency_cache = NULL;
456 static bitmap_head *anti_dependency_cache = NULL;
457 static bitmap_head *spec_dependency_cache = NULL;
458 static int cache_size;
460 static int deps_may_trap_p (const_rtx);
461 static void add_dependence_list (rtx, rtx, int, enum reg_note);
462 static void add_dependence_list_and_free (struct deps_desc *, rtx,
463 rtx *, int, enum reg_note);
464 static void delete_all_dependences (rtx);
465 static void fixup_sched_groups (rtx);
467 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
468 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
469 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
470 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
472 static bool sched_has_condition_p (const_rtx);
473 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
475 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
477 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
479 #ifdef ENABLE_CHECKING
480 static void check_dep (dep_t, bool);
483 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
486 deps_may_trap_p (const_rtx mem)
488 const_rtx addr = XEXP (mem, 0);
490 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
492 const_rtx t = get_reg_known_value (REGNO (addr));
496 return rtx_addr_can_trap_p (addr);
500 /* Find the condition under which INSN is executed. If REV is not NULL,
501 it is set to TRUE when the returned comparison should be reversed
502 to get the actual condition.
503 We only do actual work the first time we come here for an insn; the
504 results are cached in INSN_COND and INSN_REVERSE_COND. */
506 sched_get_condition_with_rev (const_rtx insn, bool *rev)
508 rtx pat = PATTERN (insn);
511 if (INSN_COND (insn) == const_true_rtx)
514 if (INSN_COND (insn) != NULL_RTX)
517 *rev = INSN_REVERSE_COND (insn);
518 return INSN_COND (insn);
521 INSN_COND (insn) = const_true_rtx;
522 INSN_REVERSE_COND (insn) = false;
529 if (GET_CODE (pat) == COND_EXEC)
531 INSN_COND (insn) = COND_EXEC_TEST (pat);
532 return COND_EXEC_TEST (pat);
535 if (!any_condjump_p (insn) || !onlyjump_p (insn))
538 src = SET_SRC (pc_set (insn));
540 if (XEXP (src, 2) == pc_rtx)
542 INSN_COND (insn) = XEXP (src, 0);
543 return XEXP (src, 0);
545 else if (XEXP (src, 1) == pc_rtx)
547 rtx cond = XEXP (src, 0);
548 enum rtx_code revcode = reversed_comparison_code (cond, insn);
550 if (revcode == UNKNOWN)
555 INSN_COND (insn) = cond;
556 INSN_REVERSE_COND (insn) = true;
563 /* True when we can find a condition under which INSN is executed. */
565 sched_has_condition_p (const_rtx insn)
567 return !! sched_get_condition_with_rev (insn, NULL);
572 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
574 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
576 if (COMPARISON_P (cond1)
577 && COMPARISON_P (cond2)
578 && GET_CODE (cond1) ==
580 ? reversed_comparison_code (cond2, NULL)
582 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
583 && XEXP (cond1, 1) == XEXP (cond2, 1))
588 /* Return true if insn1 and insn2 can never depend on one another because
589 the conditions under which they are executed are mutually exclusive. */
591 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
594 bool rev1 = false, rev2 = false;
596 /* df doesn't handle conditional lifetimes entirely correctly;
597 calls mess up the conditional lifetimes. */
598 if (!CALL_P (insn1) && !CALL_P (insn2))
600 cond1 = sched_get_condition_with_rev (insn1, &rev1);
601 cond2 = sched_get_condition_with_rev (insn2, &rev2);
603 && conditions_mutex_p (cond1, cond2, rev1, rev2)
604 /* Make sure first instruction doesn't affect condition of second
605 instruction if switched. */
606 && !modified_in_p (cond1, insn2)
607 /* Make sure second instruction doesn't affect condition of first
608 instruction if switched. */
609 && !modified_in_p (cond2, insn1))
616 /* Return true if INSN can potentially be speculated with type DS. */
618 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
620 if (HAS_INTERNAL_DEP (insn))
623 if (!NONJUMP_INSN_P (insn))
626 if (SCHED_GROUP_P (insn))
629 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
632 if (side_effects_p (PATTERN (insn)))
636 /* The following instructions, which depend on a speculatively scheduled
637 instruction, cannot be speculatively scheduled along. */
639 if (may_trap_or_fault_p (PATTERN (insn)))
640 /* If instruction might fault, it cannot be speculatively scheduled.
641 For control speculation it's obvious why and for data speculation
642 it's because the insn might get wrong input if speculation
643 wasn't successful. */
646 if ((ds & BE_IN_DATA)
647 && sched_has_condition_p (insn))
648 /* If this is a predicated instruction, then it cannot be
649 speculatively scheduled. See PR35659. */
656 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
657 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
658 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
659 This function is used to switch sd_iterator to the next list.
660 !!! For internal use only. Might consider moving it to sched-int.h. */
662 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
663 deps_list_t *list_ptr, bool *resolved_p_ptr)
665 sd_list_types_def types = *types_ptr;
667 if (types & SD_LIST_HARD_BACK)
669 *list_ptr = INSN_HARD_BACK_DEPS (insn);
670 *resolved_p_ptr = false;
671 *types_ptr = types & ~SD_LIST_HARD_BACK;
673 else if (types & SD_LIST_SPEC_BACK)
675 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
676 *resolved_p_ptr = false;
677 *types_ptr = types & ~SD_LIST_SPEC_BACK;
679 else if (types & SD_LIST_FORW)
681 *list_ptr = INSN_FORW_DEPS (insn);
682 *resolved_p_ptr = false;
683 *types_ptr = types & ~SD_LIST_FORW;
685 else if (types & SD_LIST_RES_BACK)
687 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
688 *resolved_p_ptr = true;
689 *types_ptr = types & ~SD_LIST_RES_BACK;
691 else if (types & SD_LIST_RES_FORW)
693 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
694 *resolved_p_ptr = true;
695 *types_ptr = types & ~SD_LIST_RES_FORW;
700 *resolved_p_ptr = false;
701 *types_ptr = SD_LIST_NONE;
705 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
707 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
711 while (list_types != SD_LIST_NONE)
716 sd_next_list (insn, &list_types, &list, &resolved_p);
718 size += DEPS_LIST_N_LINKS (list);
724 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
727 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
729 while (list_types != SD_LIST_NONE)
734 sd_next_list (insn, &list_types, &list, &resolved_p);
735 if (!deps_list_empty_p (list))
742 /* Initialize data for INSN. */
744 sd_init_insn (rtx insn)
746 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
747 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
748 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
749 INSN_FORW_DEPS (insn) = create_deps_list ();
750 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
752 /* ??? It would be nice to allocate dependency caches here. */
755 /* Free data for INSN. */
757 sd_finish_insn (rtx insn)
759 /* ??? It would be nice to deallocate dependency caches here. */
761 free_deps_list (INSN_HARD_BACK_DEPS (insn));
762 INSN_HARD_BACK_DEPS (insn) = NULL;
764 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
765 INSN_SPEC_BACK_DEPS (insn) = NULL;
767 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
768 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
770 free_deps_list (INSN_FORW_DEPS (insn));
771 INSN_FORW_DEPS (insn) = NULL;
773 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
774 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
777 /* Find a dependency between producer PRO and consumer CON.
778 Search through resolved dependency lists if RESOLVED_P is true.
779 If no such dependency is found return NULL,
780 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
781 with an iterator pointing to it. */
783 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
784 sd_iterator_def *sd_it_ptr)
786 sd_list_types_def pro_list_type;
787 sd_list_types_def con_list_type;
788 sd_iterator_def sd_it;
790 bool found_p = false;
794 pro_list_type = SD_LIST_RES_FORW;
795 con_list_type = SD_LIST_RES_BACK;
799 pro_list_type = SD_LIST_FORW;
800 con_list_type = SD_LIST_BACK;
803 /* Walk through either back list of INSN or forw list of ELEM
804 depending on which one is shorter. */
805 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
807 /* Find the dep_link with producer PRO in consumer's back_deps. */
808 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
809 if (DEP_PRO (dep) == pro)
817 /* Find the dep_link with consumer CON in producer's forw_deps. */
818 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
819 if (DEP_CON (dep) == con)
828 if (sd_it_ptr != NULL)
837 /* Find a dependency between producer PRO and consumer CON.
838 Use dependency [if available] to check if dependency is present at all.
839 Search through resolved dependency lists if RESOLVED_P is true.
840 If the dependency or NULL if none found. */
842 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
844 if (true_dependency_cache != NULL)
845 /* Avoiding the list walk below can cut compile times dramatically
848 int elem_luid = INSN_LUID (pro);
849 int insn_luid = INSN_LUID (con);
851 gcc_assert (output_dependency_cache != NULL
852 && anti_dependency_cache != NULL);
854 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
855 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
856 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
860 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
863 /* Add or update a dependence described by DEP.
864 MEM1 and MEM2, if non-null, correspond to memory locations in case of
867 The function returns a value indicating if an old entry has been changed
868 or a new entry has been added to insn's backward deps.
870 This function merely checks if producer and consumer is the same insn
871 and doesn't create a dep in this case. Actual manipulation of
872 dependence data structures is performed in add_or_update_dep_1. */
873 static enum DEPS_ADJUST_RESULT
874 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
876 rtx elem = DEP_PRO (dep);
877 rtx insn = DEP_CON (dep);
879 gcc_assert (INSN_P (insn) && INSN_P (elem));
881 /* Don't depend an insn on itself. */
884 if (sched_deps_info->generate_spec_deps)
885 /* INSN has an internal dependence, which we can't overcome. */
886 HAS_INTERNAL_DEP (insn) = 1;
891 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
894 /* Ask dependency caches what needs to be done for dependence DEP.
895 Return DEP_CREATED if new dependence should be created and there is no
896 need to try to find one searching the dependencies lists.
897 Return DEP_PRESENT if there already is a dependence described by DEP and
898 hence nothing is to be done.
899 Return DEP_CHANGED if there already is a dependence, but it should be
900 updated to incorporate additional information from DEP. */
901 static enum DEPS_ADJUST_RESULT
902 ask_dependency_caches (dep_t dep)
904 int elem_luid = INSN_LUID (DEP_PRO (dep));
905 int insn_luid = INSN_LUID (DEP_CON (dep));
907 gcc_assert (true_dependency_cache != NULL
908 && output_dependency_cache != NULL
909 && anti_dependency_cache != NULL);
911 if (!(current_sched_info->flags & USE_DEPS_LIST))
913 enum reg_note present_dep_type;
915 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
916 present_dep_type = REG_DEP_TRUE;
917 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
918 present_dep_type = REG_DEP_OUTPUT;
919 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
920 present_dep_type = REG_DEP_ANTI;
922 /* There is no existing dep so it should be created. */
925 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
926 /* DEP does not add anything to the existing dependence. */
931 ds_t present_dep_types = 0;
933 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
934 present_dep_types |= DEP_TRUE;
935 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
936 present_dep_types |= DEP_OUTPUT;
937 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
938 present_dep_types |= DEP_ANTI;
940 if (present_dep_types == 0)
941 /* There is no existing dep so it should be created. */
944 if (!(current_sched_info->flags & DO_SPECULATION)
945 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
947 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
948 == present_dep_types)
949 /* DEP does not add anything to the existing dependence. */
954 /* Only true dependencies can be data speculative and
955 only anti dependencies can be control speculative. */
956 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
957 == present_dep_types);
959 /* if (DEP is SPECULATIVE) then
960 ..we should update DEP_STATUS
962 ..we should reset existing dep to non-speculative. */
969 /* Set dependency caches according to DEP. */
971 set_dependency_caches (dep_t dep)
973 int elem_luid = INSN_LUID (DEP_PRO (dep));
974 int insn_luid = INSN_LUID (DEP_CON (dep));
976 if (!(current_sched_info->flags & USE_DEPS_LIST))
978 switch (DEP_TYPE (dep))
981 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
985 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
989 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
998 ds_t ds = DEP_STATUS (dep);
1001 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1002 if (ds & DEP_OUTPUT)
1003 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1005 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1007 if (ds & SPECULATIVE)
1009 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1010 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1015 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1016 caches accordingly. */
1018 update_dependency_caches (dep_t dep, enum reg_note old_type)
1020 int elem_luid = INSN_LUID (DEP_PRO (dep));
1021 int insn_luid = INSN_LUID (DEP_CON (dep));
1023 /* Clear corresponding cache entry because type of the link
1024 may have changed. Keep them if we use_deps_list. */
1025 if (!(current_sched_info->flags & USE_DEPS_LIST))
1029 case REG_DEP_OUTPUT:
1030 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1034 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1042 set_dependency_caches (dep);
1045 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1047 change_spec_dep_to_hard (sd_iterator_def sd_it)
1049 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1050 dep_link_t link = DEP_NODE_BACK (node);
1051 dep_t dep = DEP_NODE_DEP (node);
1052 rtx elem = DEP_PRO (dep);
1053 rtx insn = DEP_CON (dep);
1055 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1057 DEP_STATUS (dep) &= ~SPECULATIVE;
1059 if (true_dependency_cache != NULL)
1060 /* Clear the cache entry. */
1061 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1065 /* Update DEP to incorporate information from NEW_DEP.
1066 SD_IT points to DEP in case it should be moved to another list.
1067 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1068 data-speculative dependence should be updated. */
1069 static enum DEPS_ADJUST_RESULT
1070 update_dep (dep_t dep, dep_t new_dep,
1071 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1072 rtx mem1 ATTRIBUTE_UNUSED,
1073 rtx mem2 ATTRIBUTE_UNUSED)
1075 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1076 enum reg_note old_type = DEP_TYPE (dep);
1077 bool was_spec = dep_spec_p (dep);
1079 /* If this is a more restrictive type of dependence than the
1080 existing one, then change the existing dependence to this
1082 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1084 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1088 if (current_sched_info->flags & USE_DEPS_LIST)
1089 /* Update DEP_STATUS. */
1091 ds_t dep_status = DEP_STATUS (dep);
1092 ds_t ds = DEP_STATUS (new_dep);
1093 ds_t new_status = ds | dep_status;
1095 if (new_status & SPECULATIVE)
1097 /* Either existing dep or a dep we're adding or both are
1099 if (!(ds & SPECULATIVE)
1100 || !(dep_status & SPECULATIVE))
1101 /* The new dep can't be speculative. */
1102 new_status &= ~SPECULATIVE;
1105 /* Both are speculative. Merge probabilities. */
1110 dw = estimate_dep_weak (mem1, mem2);
1111 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1114 new_status = ds_merge (dep_status, ds);
1120 if (dep_status != ds)
1122 DEP_STATUS (dep) = ds;
1127 if (was_spec && !dep_spec_p (dep))
1128 /* The old dep was speculative, but now it isn't. */
1129 change_spec_dep_to_hard (sd_it);
1131 if (true_dependency_cache != NULL
1132 && res == DEP_CHANGED)
1133 update_dependency_caches (dep, old_type);
1138 /* Add or update a dependence described by DEP.
1139 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1142 The function returns a value indicating if an old entry has been changed
1143 or a new entry has been added to insn's backward deps or nothing has
1144 been updated at all. */
1145 static enum DEPS_ADJUST_RESULT
1146 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1147 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1149 bool maybe_present_p = true;
1150 bool present_p = false;
1152 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1153 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1155 #ifdef ENABLE_CHECKING
1156 check_dep (new_dep, mem1 != NULL);
1159 if (true_dependency_cache != NULL)
1161 switch (ask_dependency_caches (new_dep))
1167 maybe_present_p = true;
1172 maybe_present_p = false;
1182 /* Check that we don't already have this dependence. */
1183 if (maybe_present_p)
1186 sd_iterator_def sd_it;
1188 gcc_assert (true_dependency_cache == NULL || present_p);
1190 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1192 resolved_p, &sd_it);
1194 if (present_dep != NULL)
1195 /* We found an existing dependency between ELEM and INSN. */
1196 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1198 /* We didn't find a dep, it shouldn't present in the cache. */
1199 gcc_assert (!present_p);
1202 /* Might want to check one level of transitivity to save conses.
1203 This check should be done in maybe_add_or_update_dep_1.
1204 Since we made it to add_or_update_dep_1, we must create
1205 (or update) a link. */
1207 if (mem1 != NULL_RTX)
1209 gcc_assert (sched_deps_info->generate_spec_deps);
1210 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1211 estimate_dep_weak (mem1, mem2));
1214 sd_add_dep (new_dep, resolved_p);
1219 /* Initialize BACK_LIST_PTR with consumer's backward list and
1220 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1221 initialize with lists that hold resolved deps. */
1223 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1224 deps_list_t *back_list_ptr,
1225 deps_list_t *forw_list_ptr)
1227 rtx con = DEP_CON (dep);
1231 if (dep_spec_p (dep))
1232 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1234 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1236 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1240 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1241 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1245 /* Add dependence described by DEP.
1246 If RESOLVED_P is true treat the dependence as a resolved one. */
1248 sd_add_dep (dep_t dep, bool resolved_p)
1250 dep_node_t n = create_dep_node ();
1251 deps_list_t con_back_deps;
1252 deps_list_t pro_forw_deps;
1253 rtx elem = DEP_PRO (dep);
1254 rtx insn = DEP_CON (dep);
1256 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1258 if ((current_sched_info->flags & DO_SPECULATION) == 0
1259 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1260 DEP_STATUS (dep) &= ~SPECULATIVE;
1262 copy_dep (DEP_NODE_DEP (n), dep);
1264 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1266 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1268 #ifdef ENABLE_CHECKING
1269 check_dep (dep, false);
1272 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1274 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1275 in the bitmap caches of dependency information. */
1276 if (true_dependency_cache != NULL)
1277 set_dependency_caches (dep);
1280 /* Add or update backward dependence between INSN and ELEM
1281 with given type DEP_TYPE and dep_status DS.
1282 This function is a convenience wrapper. */
1283 enum DEPS_ADJUST_RESULT
1284 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1286 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1289 /* Resolved dependence pointed to by SD_IT.
1290 SD_IT will advance to the next element. */
1292 sd_resolve_dep (sd_iterator_def sd_it)
1294 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1295 dep_t dep = DEP_NODE_DEP (node);
1296 rtx pro = DEP_PRO (dep);
1297 rtx con = DEP_CON (dep);
1299 if (dep_spec_p (dep))
1300 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1301 INSN_RESOLVED_BACK_DEPS (con));
1303 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1304 INSN_RESOLVED_BACK_DEPS (con));
1306 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1307 INSN_RESOLVED_FORW_DEPS (pro));
1310 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1311 pointed to by SD_IT to unresolved state. */
1313 sd_unresolve_dep (sd_iterator_def sd_it)
1315 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1316 dep_t dep = DEP_NODE_DEP (node);
1317 rtx pro = DEP_PRO (dep);
1318 rtx con = DEP_CON (dep);
1320 if ((current_sched_info->flags & DO_SPECULATION)
1321 && (DEP_STATUS (dep) & SPECULATIVE))
1322 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1323 INSN_SPEC_BACK_DEPS (con));
1325 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1326 INSN_HARD_BACK_DEPS (con));
1328 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1329 INSN_FORW_DEPS (pro));
1332 /* Make TO depend on all the FROM's producers.
1333 If RESOLVED_P is true add dependencies to the resolved lists. */
1335 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1337 sd_list_types_def list_type;
1338 sd_iterator_def sd_it;
1341 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1343 FOR_EACH_DEP (from, list_type, sd_it, dep)
1345 dep_def _new_dep, *new_dep = &_new_dep;
1347 copy_dep (new_dep, dep);
1348 DEP_CON (new_dep) = to;
1349 sd_add_dep (new_dep, resolved_p);
1353 /* Remove a dependency referred to by SD_IT.
1354 SD_IT will point to the next dependence after removal. */
1356 sd_delete_dep (sd_iterator_def sd_it)
1358 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1359 dep_t dep = DEP_NODE_DEP (n);
1360 rtx pro = DEP_PRO (dep);
1361 rtx con = DEP_CON (dep);
1362 deps_list_t con_back_deps;
1363 deps_list_t pro_forw_deps;
1365 if (true_dependency_cache != NULL)
1367 int elem_luid = INSN_LUID (pro);
1368 int insn_luid = INSN_LUID (con);
1370 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1371 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1372 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1374 if (current_sched_info->flags & DO_SPECULATION)
1375 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1378 get_back_and_forw_lists (dep, sd_it.resolved_p,
1379 &con_back_deps, &pro_forw_deps);
1381 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1382 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1384 delete_dep_node (n);
1387 /* Dump size of the lists. */
1388 #define DUMP_LISTS_SIZE (2)
1390 /* Dump dependencies of the lists. */
1391 #define DUMP_LISTS_DEPS (4)
1393 /* Dump all information about the lists. */
1394 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1396 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1397 FLAGS is a bit mask specifying what information about the lists needs
1399 If FLAGS has the very first bit set, then dump all information about
1400 the lists and propagate this bit into the callee dump functions. */
1402 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1404 sd_iterator_def sd_it;
1411 flags |= DUMP_LISTS_ALL;
1413 fprintf (dump, "[");
1415 if (flags & DUMP_LISTS_SIZE)
1416 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1418 if (flags & DUMP_LISTS_DEPS)
1420 FOR_EACH_DEP (insn, types, sd_it, dep)
1422 dump_dep (dump, dep, dump_dep_flags | all);
1423 fprintf (dump, " ");
1428 /* Dump all information about deps_lists of INSN specified by TYPES
1431 sd_debug_lists (rtx insn, sd_list_types_def types)
1433 dump_lists (stderr, insn, types, 1);
1434 fprintf (stderr, "\n");
1437 /* A convenience wrapper to operate on an entire list. */
1440 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1442 for (; list; list = XEXP (list, 1))
1444 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1445 add_dependence (insn, XEXP (list, 0), dep_type);
1449 /* Similar, but free *LISTP at the same time, when the context
1453 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1454 int uncond, enum reg_note dep_type)
1458 /* We don't want to short-circuit dependencies involving debug
1459 insns, because they may cause actual dependencies to be
1461 if (deps->readonly || DEBUG_INSN_P (insn))
1463 add_dependence_list (insn, *listp, uncond, dep_type);
1467 for (list = *listp, *listp = NULL; list ; list = next)
1469 next = XEXP (list, 1);
1470 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1471 add_dependence (insn, XEXP (list, 0), dep_type);
1472 free_INSN_LIST_node (list);
1476 /* Remove all occurences of INSN from LIST. Return the number of
1477 occurences removed. */
1480 remove_from_dependence_list (rtx insn, rtx* listp)
1486 if (XEXP (*listp, 0) == insn)
1488 remove_free_INSN_LIST_node (listp);
1493 listp = &XEXP (*listp, 1);
1499 /* Same as above, but process two lists at once. */
1501 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1507 if (XEXP (*listp, 0) == insn)
1509 remove_free_INSN_LIST_node (listp);
1510 remove_free_EXPR_LIST_node (exprp);
1515 listp = &XEXP (*listp, 1);
1516 exprp = &XEXP (*exprp, 1);
1522 /* Clear all dependencies for an insn. */
1524 delete_all_dependences (rtx insn)
1526 sd_iterator_def sd_it;
1529 /* The below cycle can be optimized to clear the caches and back_deps
1530 in one call but that would provoke duplication of code from
1533 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1534 sd_iterator_cond (&sd_it, &dep);)
1535 sd_delete_dep (sd_it);
1538 /* All insns in a scheduling group except the first should only have
1539 dependencies on the previous insn in the group. So we find the
1540 first instruction in the scheduling group by walking the dependence
1541 chains backwards. Then we add the dependencies for the group to
1542 the previous nonnote insn. */
1545 fixup_sched_groups (rtx insn)
1547 sd_iterator_def sd_it;
1551 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1554 rtx pro = DEP_PRO (dep);
1558 i = prev_nonnote_insn (i);
1562 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1564 if (! sched_insns_conditions_mutex_p (i, pro))
1565 add_dependence (i, pro, DEP_TYPE (dep));
1569 delete_all_dependences (insn);
1571 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1572 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1573 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1574 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1577 /* Process an insn's memory dependencies. There are four kinds of
1580 (0) read dependence: read follows read
1581 (1) true dependence: read follows write
1582 (2) output dependence: write follows write
1583 (3) anti dependence: write follows read
1585 We are careful to build only dependencies which actually exist, and
1586 use transitivity to avoid building too many links. */
1588 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1589 The MEM is a memory reference contained within INSN, which we are saving
1590 so that we can do memory aliasing on it. */
1593 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1600 gcc_assert (!deps->readonly);
1603 insn_list = &deps->pending_read_insns;
1604 mem_list = &deps->pending_read_mems;
1605 if (!DEBUG_INSN_P (insn))
1606 deps->pending_read_list_length++;
1610 insn_list = &deps->pending_write_insns;
1611 mem_list = &deps->pending_write_mems;
1612 deps->pending_write_list_length++;
1615 link = alloc_INSN_LIST (insn, *insn_list);
1618 if (sched_deps_info->use_cselib)
1620 mem = shallow_copy_rtx (mem);
1621 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0), GET_MODE (mem));
1623 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1627 /* Make a dependency between every memory reference on the pending lists
1628 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1629 dependencies for a read operation, similarly with FOR_WRITE. */
1632 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1637 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1639 if (!deps->readonly)
1641 free_EXPR_LIST_list (&deps->pending_read_mems);
1642 deps->pending_read_list_length = 0;
1646 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1647 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1649 add_dependence_list_and_free (deps, insn,
1650 &deps->last_pending_memory_flush, 1,
1651 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1652 if (!deps->readonly)
1654 free_EXPR_LIST_list (&deps->pending_write_mems);
1655 deps->pending_write_list_length = 0;
1657 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1658 deps->pending_flush_length = 1;
1662 /* Instruction which dependencies we are analyzing. */
1663 static rtx cur_insn = NULL_RTX;
1665 /* Implement hooks for haifa scheduler. */
1668 haifa_start_insn (rtx insn)
1670 gcc_assert (insn && !cur_insn);
1676 haifa_finish_insn (void)
1682 haifa_note_reg_set (int regno)
1684 SET_REGNO_REG_SET (reg_pending_sets, regno);
1688 haifa_note_reg_clobber (int regno)
1690 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1694 haifa_note_reg_use (int regno)
1696 SET_REGNO_REG_SET (reg_pending_uses, regno);
1700 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1702 if (!(ds & SPECULATIVE))
1705 pending_mem = NULL_RTX;
1708 gcc_assert (ds & BEGIN_DATA);
1711 dep_def _dep, *dep = &_dep;
1713 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1714 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1715 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1721 haifa_note_dep (rtx elem, ds_t ds)
1726 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1727 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1731 note_reg_use (int r)
1733 if (sched_deps_info->note_reg_use)
1734 sched_deps_info->note_reg_use (r);
1738 note_reg_set (int r)
1740 if (sched_deps_info->note_reg_set)
1741 sched_deps_info->note_reg_set (r);
1745 note_reg_clobber (int r)
1747 if (sched_deps_info->note_reg_clobber)
1748 sched_deps_info->note_reg_clobber (r);
1752 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1754 if (sched_deps_info->note_mem_dep)
1755 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1759 note_dep (rtx e, ds_t ds)
1761 if (sched_deps_info->note_dep)
1762 sched_deps_info->note_dep (e, ds);
1765 /* Return corresponding to DS reg_note. */
1770 return REG_DEP_TRUE;
1771 else if (ds & DEP_OUTPUT)
1772 return REG_DEP_OUTPUT;
1775 gcc_assert (ds & DEP_ANTI);
1776 return REG_DEP_ANTI;
1782 /* Functions for computation of info needed for register pressure
1783 sensitive insn scheduling. */
1786 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1787 static struct reg_use_data *
1788 create_insn_reg_use (int regno, rtx insn)
1790 struct reg_use_data *use;
1792 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1795 use->next_insn_use = INSN_REG_USE_LIST (insn);
1796 INSN_REG_USE_LIST (insn) = use;
1800 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1801 static struct reg_set_data *
1802 create_insn_reg_set (int regno, rtx insn)
1804 struct reg_set_data *set;
1806 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1809 set->next_insn_set = INSN_REG_SET_LIST (insn);
1810 INSN_REG_SET_LIST (insn) = set;
1814 /* Set up insn register uses for INSN and dependency context DEPS. */
1816 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1819 reg_set_iterator rsi;
1821 struct reg_use_data *use, *use2, *next;
1822 struct deps_reg *reg_last;
1824 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1826 if (i < FIRST_PSEUDO_REGISTER
1827 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1830 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1831 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1832 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1833 /* Ignore use which is not dying. */
1836 use = create_insn_reg_use (i, insn);
1837 use->next_regno_use = use;
1838 reg_last = &deps->reg_last[i];
1840 /* Create the cycle list of uses. */
1841 for (list = reg_last->uses; list; list = XEXP (list, 1))
1843 use2 = create_insn_reg_use (i, XEXP (list, 0));
1844 next = use->next_regno_use;
1845 use->next_regno_use = use2;
1846 use2->next_regno_use = next;
1851 /* Register pressure info for the currently processed insn. */
1852 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1854 /* Return TRUE if INSN has the use structure for REGNO. */
1856 insn_use_p (rtx insn, int regno)
1858 struct reg_use_data *use;
1860 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1861 if (use->regno == regno)
1866 /* Update the register pressure info after birth of pseudo register REGNO
1867 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1868 the register is in clobber or unused after the insn. */
1870 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1875 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1876 cl = sched_regno_pressure_class[regno];
1879 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1882 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1883 reg_pressure_info[cl].clobber_increase = new_incr;
1887 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1888 reg_pressure_info[cl].unused_set_increase = new_incr;
1892 new_incr = reg_pressure_info[cl].set_increase + incr;
1893 reg_pressure_info[cl].set_increase = new_incr;
1894 if (! insn_use_p (insn, regno))
1895 reg_pressure_info[cl].change += incr;
1896 create_insn_reg_set (regno, insn);
1898 gcc_assert (new_incr < (1 << INCREASE_BITS));
1902 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1903 hard registers involved in the birth. */
1905 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1906 bool clobber_p, bool unused_p)
1909 int new_incr, last = regno + nregs;
1911 while (regno < last)
1913 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1914 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1916 cl = sched_regno_pressure_class[regno];
1921 new_incr = reg_pressure_info[cl].clobber_increase + 1;
1922 reg_pressure_info[cl].clobber_increase = new_incr;
1926 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1927 reg_pressure_info[cl].unused_set_increase = new_incr;
1931 new_incr = reg_pressure_info[cl].set_increase + 1;
1932 reg_pressure_info[cl].set_increase = new_incr;
1933 if (! insn_use_p (insn, regno))
1934 reg_pressure_info[cl].change += 1;
1935 create_insn_reg_set (regno, insn);
1937 gcc_assert (new_incr < (1 << INCREASE_BITS));
1944 /* Update the register pressure info after birth of pseudo or hard
1945 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1946 correspondingly that the register is in clobber or unused after the
1949 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1953 if (GET_CODE (reg) == SUBREG)
1954 reg = SUBREG_REG (reg);
1959 regno = REGNO (reg);
1960 if (regno < FIRST_PSEUDO_REGISTER)
1961 mark_insn_hard_regno_birth (insn, regno,
1962 hard_regno_nregs[regno][GET_MODE (reg)],
1963 clobber_p, unused_p);
1965 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1968 /* Update the register pressure info after death of pseudo register
1971 mark_pseudo_death (int regno)
1976 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1977 cl = sched_regno_pressure_class[regno];
1980 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1981 reg_pressure_info[cl].change -= incr;
1985 /* Like mark_pseudo_death except that NREGS saying how many hard
1986 registers involved in the death. */
1988 mark_hard_regno_death (int regno, int nregs)
1991 int last = regno + nregs;
1993 while (regno < last)
1995 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1996 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1998 cl = sched_regno_pressure_class[regno];
2000 reg_pressure_info[cl].change -= 1;
2006 /* Update the register pressure info after death of pseudo or hard
2009 mark_reg_death (rtx reg)
2013 if (GET_CODE (reg) == SUBREG)
2014 reg = SUBREG_REG (reg);
2019 regno = REGNO (reg);
2020 if (regno < FIRST_PSEUDO_REGISTER)
2021 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2023 mark_pseudo_death (regno);
2026 /* Process SETTER of REG. DATA is an insn containing the setter. */
2028 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2030 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2033 ((rtx) data, reg, false,
2034 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2037 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2039 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2041 if (GET_CODE (setter) == CLOBBER)
2042 mark_insn_reg_birth ((rtx) data, reg, true, false);
2045 /* Set up reg pressure info related to INSN. */
2047 init_insn_reg_pressure_info (rtx insn)
2051 static struct reg_pressure_data *pressure_info;
2054 gcc_assert (sched_pressure_p);
2056 if (! INSN_P (insn))
2059 for (i = 0; i < ira_pressure_classes_num; i++)
2061 cl = ira_pressure_classes[i];
2062 reg_pressure_info[cl].clobber_increase = 0;
2063 reg_pressure_info[cl].set_increase = 0;
2064 reg_pressure_info[cl].unused_set_increase = 0;
2065 reg_pressure_info[cl].change = 0;
2068 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2070 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2073 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2074 if (REG_NOTE_KIND (link) == REG_INC)
2075 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2078 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2079 if (REG_NOTE_KIND (link) == REG_DEAD)
2080 mark_reg_death (XEXP (link, 0));
2082 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2084 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2085 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2087 for (i = 0; i < ira_pressure_classes_num; i++)
2089 cl = ira_pressure_classes[i];
2090 pressure_info[i].clobber_increase
2091 = reg_pressure_info[cl].clobber_increase;
2092 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2093 pressure_info[i].unused_set_increase
2094 = reg_pressure_info[cl].unused_set_increase;
2095 pressure_info[i].change = reg_pressure_info[cl].change;
2102 /* Internal variable for sched_analyze_[12] () functions.
2103 If it is nonzero, this means that sched_analyze_[12] looks
2104 at the most toplevel SET. */
2105 static bool can_start_lhs_rhs_p;
2107 /* Extend reg info for the deps context DEPS given that
2108 we have just generated a register numbered REGNO. */
2110 extend_deps_reg_info (struct deps_desc *deps, int regno)
2112 int max_regno = regno + 1;
2114 gcc_assert (!reload_completed);
2116 /* In a readonly context, it would not hurt to extend info,
2117 but it should not be needed. */
2118 if (reload_completed && deps->readonly)
2120 deps->max_reg = max_regno;
2124 if (max_regno > deps->max_reg)
2126 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2128 memset (&deps->reg_last[deps->max_reg],
2129 0, (max_regno - deps->max_reg)
2130 * sizeof (struct deps_reg));
2131 deps->max_reg = max_regno;
2135 /* Extends REG_INFO_P if needed. */
2137 maybe_extend_reg_info_p (void)
2139 /* Extend REG_INFO_P, if needed. */
2140 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2142 size_t new_reg_info_p_size = max_regno + 128;
2144 gcc_assert (!reload_completed && sel_sched_p ());
2146 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2147 new_reg_info_p_size,
2149 sizeof (*reg_info_p));
2150 reg_info_p_size = new_reg_info_p_size;
2154 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2155 The type of the reference is specified by REF and can be SET,
2156 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2159 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2160 enum rtx_code ref, rtx insn)
2162 /* We could emit new pseudos in renaming. Extend the reg structures. */
2163 if (!reload_completed && sel_sched_p ()
2164 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2165 extend_deps_reg_info (deps, regno);
2167 maybe_extend_reg_info_p ();
2169 /* A hard reg in a wide mode may really be multiple registers.
2170 If so, mark all of them just like the first. */
2171 if (regno < FIRST_PSEUDO_REGISTER)
2173 int i = hard_regno_nregs[regno][mode];
2177 note_reg_set (regno + i);
2179 else if (ref == USE)
2182 note_reg_use (regno + i);
2187 note_reg_clobber (regno + i);
2191 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2192 it does not reload. Ignore these as they have served their
2194 else if (regno >= deps->max_reg)
2196 enum rtx_code code = GET_CODE (PATTERN (insn));
2197 gcc_assert (code == USE || code == CLOBBER);
2203 note_reg_set (regno);
2204 else if (ref == USE)
2205 note_reg_use (regno);
2207 note_reg_clobber (regno);
2209 /* Pseudos that are REG_EQUIV to something may be replaced
2210 by that during reloading. We need only add dependencies for
2211 the address in the REG_EQUIV note. */
2212 if (!reload_completed && get_reg_known_equiv_p (regno))
2214 rtx t = get_reg_known_value (regno);
2216 sched_analyze_2 (deps, XEXP (t, 0), insn);
2219 /* Don't let it cross a call after scheduling if it doesn't
2220 already cross one. */
2221 if (REG_N_CALLS_CROSSED (regno) == 0)
2223 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2224 deps->sched_before_next_call
2225 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2227 add_dependence_list (insn, deps->last_function_call, 1,
2233 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2234 rtx, X, creating all dependencies generated by the write to the
2235 destination of X, and reads of everything mentioned. */
2238 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2240 rtx dest = XEXP (x, 0);
2241 enum rtx_code code = GET_CODE (x);
2242 bool cslr_p = can_start_lhs_rhs_p;
2244 can_start_lhs_rhs_p = false;
2250 if (cslr_p && sched_deps_info->start_lhs)
2251 sched_deps_info->start_lhs (dest);
2253 if (GET_CODE (dest) == PARALLEL)
2257 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2258 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2259 sched_analyze_1 (deps,
2260 gen_rtx_CLOBBER (VOIDmode,
2261 XEXP (XVECEXP (dest, 0, i), 0)),
2264 if (cslr_p && sched_deps_info->finish_lhs)
2265 sched_deps_info->finish_lhs ();
2269 can_start_lhs_rhs_p = cslr_p;
2271 sched_analyze_2 (deps, SET_SRC (x), insn);
2273 can_start_lhs_rhs_p = false;
2279 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2280 || GET_CODE (dest) == ZERO_EXTRACT)
2282 if (GET_CODE (dest) == STRICT_LOW_PART
2283 || GET_CODE (dest) == ZERO_EXTRACT
2284 || df_read_modify_subreg_p (dest))
2286 /* These both read and modify the result. We must handle
2287 them as writes to get proper dependencies for following
2288 instructions. We must handle them as reads to get proper
2289 dependencies from this to previous instructions.
2290 Thus we need to call sched_analyze_2. */
2292 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2294 if (GET_CODE (dest) == ZERO_EXTRACT)
2296 /* The second and third arguments are values read by this insn. */
2297 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2298 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2300 dest = XEXP (dest, 0);
2305 int regno = REGNO (dest);
2306 enum machine_mode mode = GET_MODE (dest);
2308 sched_analyze_reg (deps, regno, mode, code, insn);
2311 /* Treat all writes to a stack register as modifying the TOS. */
2312 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2314 /* Avoid analyzing the same register twice. */
2315 if (regno != FIRST_STACK_REG)
2316 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2318 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2323 else if (MEM_P (dest))
2325 /* Writing memory. */
2328 if (sched_deps_info->use_cselib)
2330 enum machine_mode address_mode
2331 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2333 t = shallow_copy_rtx (dest);
2334 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2335 GET_MODE (t), insn);
2336 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2340 /* Pending lists can't get larger with a readonly context. */
2342 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2343 > MAX_PENDING_LIST_LENGTH))
2345 /* Flush all pending reads and writes to prevent the pending lists
2346 from getting any larger. Insn scheduling runs too slowly when
2347 these lists get long. When compiling GCC with itself,
2348 this flush occurs 8 times for sparc, and 10 times for m88k using
2349 the default value of 32. */
2350 flush_pending_lists (deps, insn, false, true);
2354 rtx pending, pending_mem;
2356 pending = deps->pending_read_insns;
2357 pending_mem = deps->pending_read_mems;
2360 if (anti_dependence (XEXP (pending_mem, 0), t)
2361 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2362 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2365 pending = XEXP (pending, 1);
2366 pending_mem = XEXP (pending_mem, 1);
2369 pending = deps->pending_write_insns;
2370 pending_mem = deps->pending_write_mems;
2373 if (output_dependence (XEXP (pending_mem, 0), t)
2374 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2375 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2378 pending = XEXP (pending, 1);
2379 pending_mem = XEXP (pending_mem, 1);
2382 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2385 if (!deps->readonly)
2386 add_insn_mem_dependence (deps, false, insn, dest);
2388 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2391 if (cslr_p && sched_deps_info->finish_lhs)
2392 sched_deps_info->finish_lhs ();
2394 /* Analyze reads. */
2395 if (GET_CODE (x) == SET)
2397 can_start_lhs_rhs_p = cslr_p;
2399 sched_analyze_2 (deps, SET_SRC (x), insn);
2401 can_start_lhs_rhs_p = false;
2405 /* Analyze the uses of memory and registers in rtx X in INSN. */
2407 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2413 bool cslr_p = can_start_lhs_rhs_p;
2415 can_start_lhs_rhs_p = false;
2421 if (cslr_p && sched_deps_info->start_rhs)
2422 sched_deps_info->start_rhs (x);
2424 code = GET_CODE (x);
2435 /* Ignore constants. */
2436 if (cslr_p && sched_deps_info->finish_rhs)
2437 sched_deps_info->finish_rhs ();
2443 /* User of CC0 depends on immediately preceding insn. */
2444 SCHED_GROUP_P (insn) = 1;
2445 /* Don't move CC0 setter to another block (it can set up the
2446 same flag for previous CC0 users which is safe). */
2447 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2449 if (cslr_p && sched_deps_info->finish_rhs)
2450 sched_deps_info->finish_rhs ();
2457 int regno = REGNO (x);
2458 enum machine_mode mode = GET_MODE (x);
2460 sched_analyze_reg (deps, regno, mode, USE, insn);
2463 /* Treat all reads of a stack register as modifying the TOS. */
2464 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2466 /* Avoid analyzing the same register twice. */
2467 if (regno != FIRST_STACK_REG)
2468 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2469 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2473 if (cslr_p && sched_deps_info->finish_rhs)
2474 sched_deps_info->finish_rhs ();
2481 /* Reading memory. */
2483 rtx pending, pending_mem;
2486 if (sched_deps_info->use_cselib)
2488 enum machine_mode address_mode
2489 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2491 t = shallow_copy_rtx (t);
2492 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2493 GET_MODE (t), insn);
2494 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
2497 if (!DEBUG_INSN_P (insn))
2500 pending = deps->pending_read_insns;
2501 pending_mem = deps->pending_read_mems;
2504 if (read_dependence (XEXP (pending_mem, 0), t)
2505 && ! sched_insns_conditions_mutex_p (insn,
2507 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2510 pending = XEXP (pending, 1);
2511 pending_mem = XEXP (pending_mem, 1);
2514 pending = deps->pending_write_insns;
2515 pending_mem = deps->pending_write_mems;
2518 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2520 && ! sched_insns_conditions_mutex_p (insn,
2522 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2523 sched_deps_info->generate_spec_deps
2524 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2526 pending = XEXP (pending, 1);
2527 pending_mem = XEXP (pending_mem, 1);
2530 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2532 if (! NON_FLUSH_JUMP_P (u))
2533 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2534 else if (deps_may_trap_p (x))
2536 if ((sched_deps_info->generate_spec_deps)
2537 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2539 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2542 note_dep (XEXP (u, 0), ds);
2545 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2550 /* Always add these dependencies to pending_reads, since
2551 this insn may be followed by a write. */
2552 if (!deps->readonly)
2553 add_insn_mem_dependence (deps, true, insn, x);
2555 sched_analyze_2 (deps, XEXP (x, 0), insn);
2557 if (cslr_p && sched_deps_info->finish_rhs)
2558 sched_deps_info->finish_rhs ();
2563 /* Force pending stores to memory in case a trap handler needs them. */
2565 flush_pending_lists (deps, insn, true, false);
2569 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2570 reg_pending_barrier = TRUE_BARRIER;
2573 case UNSPEC_VOLATILE:
2574 flush_pending_lists (deps, insn, true, true);
2580 /* Traditional and volatile asm instructions must be considered to use
2581 and clobber all hard registers, all pseudo-registers and all of
2582 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2584 Consider for instance a volatile asm that changes the fpu rounding
2585 mode. An insn should not be moved across this even if it only uses
2586 pseudo-regs because it might give an incorrectly rounded result. */
2587 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2588 reg_pending_barrier = TRUE_BARRIER;
2590 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2591 We can not just fall through here since then we would be confused
2592 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2593 traditional asms unlike their normal usage. */
2595 if (code == ASM_OPERANDS)
2597 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2598 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2600 if (cslr_p && sched_deps_info->finish_rhs)
2601 sched_deps_info->finish_rhs ();
2612 /* These both read and modify the result. We must handle them as writes
2613 to get proper dependencies for following instructions. We must handle
2614 them as reads to get proper dependencies from this to previous
2615 instructions. Thus we need to pass them to both sched_analyze_1
2616 and sched_analyze_2. We must call sched_analyze_2 first in order
2617 to get the proper antecedent for the read. */
2618 sched_analyze_2 (deps, XEXP (x, 0), insn);
2619 sched_analyze_1 (deps, x, insn);
2621 if (cslr_p && sched_deps_info->finish_rhs)
2622 sched_deps_info->finish_rhs ();
2628 /* op0 = op0 + op1 */
2629 sched_analyze_2 (deps, XEXP (x, 0), insn);
2630 sched_analyze_2 (deps, XEXP (x, 1), insn);
2631 sched_analyze_1 (deps, x, insn);
2633 if (cslr_p && sched_deps_info->finish_rhs)
2634 sched_deps_info->finish_rhs ();
2642 /* Other cases: walk the insn. */
2643 fmt = GET_RTX_FORMAT (code);
2644 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2647 sched_analyze_2 (deps, XEXP (x, i), insn);
2648 else if (fmt[i] == 'E')
2649 for (j = 0; j < XVECLEN (x, i); j++)
2650 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2653 if (cslr_p && sched_deps_info->finish_rhs)
2654 sched_deps_info->finish_rhs ();
2657 /* Analyze an INSN with pattern X to find all dependencies. */
2659 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2661 RTX_CODE code = GET_CODE (x);
2664 reg_set_iterator rsi;
2666 if (! reload_completed)
2670 extract_insn (insn);
2671 preprocess_constraints ();
2672 ira_implicitly_set_insn_hard_regs (&temp);
2673 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2674 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2677 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2681 /* Avoid moving trapping instructions accross function calls that might
2682 not always return. */
2683 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2686 if (code == COND_EXEC)
2688 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2690 /* ??? Should be recording conditions so we reduce the number of
2691 false dependencies. */
2692 x = COND_EXEC_CODE (x);
2693 code = GET_CODE (x);
2695 if (code == SET || code == CLOBBER)
2697 sched_analyze_1 (deps, x, insn);
2699 /* Bare clobber insns are used for letting life analysis, reg-stack
2700 and others know that a value is dead. Depend on the last call
2701 instruction so that reg-stack won't get confused. */
2702 if (code == CLOBBER)
2703 add_dependence_list (insn, deps->last_function_call, 1,
2706 else if (code == PARALLEL)
2708 for (i = XVECLEN (x, 0); i--;)
2710 rtx sub = XVECEXP (x, 0, i);
2711 code = GET_CODE (sub);
2713 if (code == COND_EXEC)
2715 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2716 sub = COND_EXEC_CODE (sub);
2717 code = GET_CODE (sub);
2719 if (code == SET || code == CLOBBER)
2720 sched_analyze_1 (deps, sub, insn);
2722 sched_analyze_2 (deps, sub, insn);
2726 sched_analyze_2 (deps, x, insn);
2728 /* Mark registers CLOBBERED or used by called function. */
2731 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2733 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2734 sched_analyze_1 (deps, XEXP (link, 0), insn);
2736 sched_analyze_2 (deps, XEXP (link, 0), insn);
2738 if (find_reg_note (insn, REG_SETJMP, NULL))
2739 reg_pending_barrier = MOVE_BARRIER;
2745 next = next_nonnote_nondebug_insn (insn);
2746 if (next && BARRIER_P (next))
2747 reg_pending_barrier = MOVE_BARRIER;
2750 rtx pending, pending_mem;
2752 if (sched_deps_info->compute_jump_reg_dependencies)
2755 INIT_REG_SET (&tmp);
2757 (*sched_deps_info->compute_jump_reg_dependencies) (insn, &tmp);
2759 /* Make latency of jump equal to 0 by using anti-dependence. */
2760 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i, rsi)
2762 struct deps_reg *reg_last = &deps->reg_last[i];
2763 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2764 add_dependence_list (insn, reg_last->implicit_sets,
2766 add_dependence_list (insn, reg_last->clobbers, 0,
2769 if (!deps->readonly)
2771 reg_last->uses_length++;
2772 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2776 CLEAR_REG_SET (&tmp);
2779 /* All memory writes and volatile reads must happen before the
2780 jump. Non-volatile reads must happen before the jump iff
2781 the result is needed by the above register used mask. */
2783 pending = deps->pending_write_insns;
2784 pending_mem = deps->pending_write_mems;
2787 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2788 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2789 pending = XEXP (pending, 1);
2790 pending_mem = XEXP (pending_mem, 1);
2793 pending = deps->pending_read_insns;
2794 pending_mem = deps->pending_read_mems;
2797 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2798 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2799 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2800 pending = XEXP (pending, 1);
2801 pending_mem = XEXP (pending_mem, 1);
2804 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2809 /* If this instruction can throw an exception, then moving it changes
2810 where block boundaries fall. This is mighty confusing elsewhere.
2811 Therefore, prevent such an instruction from being moved. Same for
2812 non-jump instructions that define block boundaries.
2813 ??? Unclear whether this is still necessary in EBB mode. If not,
2814 add_branch_dependences should be adjusted for RGN mode instead. */
2815 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2816 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2817 reg_pending_barrier = MOVE_BARRIER;
2819 if (sched_pressure_p)
2821 setup_insn_reg_uses (deps, insn);
2822 init_insn_reg_pressure_info (insn);
2825 /* Add register dependencies for insn. */
2826 if (DEBUG_INSN_P (insn))
2828 rtx prev = deps->last_debug_insn;
2831 if (!deps->readonly)
2832 deps->last_debug_insn = insn;
2835 add_dependence (insn, prev, REG_DEP_ANTI);
2837 add_dependence_list (insn, deps->last_function_call, 1,
2840 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2841 if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
2842 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2844 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2846 struct deps_reg *reg_last = &deps->reg_last[i];
2847 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2848 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2850 if (!deps->readonly)
2851 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2853 CLEAR_REG_SET (reg_pending_uses);
2855 /* Quite often, a debug insn will refer to stuff in the
2856 previous instruction, but the reason we want this
2857 dependency here is to make sure the scheduler doesn't
2858 gratuitously move a debug insn ahead. This could dirty
2859 DF flags and cause additional analysis that wouldn't have
2860 occurred in compilation without debug insns, and such
2861 additional analysis can modify the generated code. */
2862 prev = PREV_INSN (insn);
2864 if (prev && NONDEBUG_INSN_P (prev))
2865 add_dependence (insn, prev, REG_DEP_ANTI);
2869 regset_head set_or_clobbered;
2871 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2873 struct deps_reg *reg_last = &deps->reg_last[i];
2874 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2875 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2876 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2878 if (!deps->readonly)
2880 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2881 reg_last->uses_length++;
2885 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2886 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2888 struct deps_reg *reg_last = &deps->reg_last[i];
2889 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2890 add_dependence_list (insn, reg_last->implicit_sets, 0,
2892 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2894 if (!deps->readonly)
2896 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2897 reg_last->uses_length++;
2901 if (targetm.sched.exposed_pipeline)
2903 INIT_REG_SET (&set_or_clobbered);
2904 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
2906 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
2908 struct deps_reg *reg_last = &deps->reg_last[i];
2910 for (list = reg_last->uses; list; list = XEXP (list, 1))
2912 rtx other = XEXP (list, 0);
2913 if (INSN_COND (other) != const_true_rtx
2914 && refers_to_regno_p (i, i + 1, INSN_COND (other), NULL))
2915 INSN_COND (other) = const_true_rtx;
2920 /* If the current insn is conditional, we can't free any
2922 if (sched_has_condition_p (insn))
2924 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2926 struct deps_reg *reg_last = &deps->reg_last[i];
2927 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2928 add_dependence_list (insn, reg_last->implicit_sets, 0,
2930 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2932 if (!deps->readonly)
2935 = alloc_INSN_LIST (insn, reg_last->clobbers);
2936 reg_last->clobbers_length++;
2939 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2941 struct deps_reg *reg_last = &deps->reg_last[i];
2942 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2943 add_dependence_list (insn, reg_last->implicit_sets, 0,
2945 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2946 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2948 if (!deps->readonly)
2949 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2954 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2956 struct deps_reg *reg_last = &deps->reg_last[i];
2957 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2958 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2960 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
2962 add_dependence_list_and_free (deps, insn,
2963 ®_last->implicit_sets, 0,
2965 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
2967 add_dependence_list_and_free
2968 (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
2970 if (!deps->readonly)
2972 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2973 reg_last->clobbers_length = 0;
2974 reg_last->uses_length = 0;
2979 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2980 add_dependence_list (insn, reg_last->implicit_sets, 0,
2982 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2985 if (!deps->readonly)
2987 reg_last->clobbers_length++;
2989 = alloc_INSN_LIST (insn, reg_last->clobbers);
2992 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2994 struct deps_reg *reg_last = &deps->reg_last[i];
2996 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
2998 add_dependence_list_and_free (deps, insn,
2999 ®_last->implicit_sets,
3001 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3003 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3006 if (!deps->readonly)
3008 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3009 reg_last->uses_length = 0;
3010 reg_last->clobbers_length = 0;
3016 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3017 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3019 struct deps_reg *reg_last = &deps->reg_last[i];
3020 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
3021 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
3022 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3024 if (!deps->readonly)
3025 reg_last->implicit_sets
3026 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3029 if (!deps->readonly)
3031 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3032 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3033 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3034 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3035 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3036 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3037 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3039 /* Set up the pending barrier found. */
3040 deps->last_reg_pending_barrier = reg_pending_barrier;
3043 CLEAR_REG_SET (reg_pending_uses);
3044 CLEAR_REG_SET (reg_pending_clobbers);
3045 CLEAR_REG_SET (reg_pending_sets);
3046 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3047 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3049 /* Add dependencies if a scheduling barrier was found. */
3050 if (reg_pending_barrier)
3052 /* In the case of barrier the most added dependencies are not
3053 real, so we use anti-dependence here. */
3054 if (sched_has_condition_p (insn))
3056 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3058 struct deps_reg *reg_last = &deps->reg_last[i];
3059 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3060 add_dependence_list (insn, reg_last->sets, 0,
3061 reg_pending_barrier == TRUE_BARRIER
3062 ? REG_DEP_TRUE : REG_DEP_ANTI);
3063 add_dependence_list (insn, reg_last->implicit_sets, 0,
3065 add_dependence_list (insn, reg_last->clobbers, 0,
3066 reg_pending_barrier == TRUE_BARRIER
3067 ? REG_DEP_TRUE : REG_DEP_ANTI);
3072 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3074 struct deps_reg *reg_last = &deps->reg_last[i];
3075 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3077 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3078 reg_pending_barrier == TRUE_BARRIER
3079 ? REG_DEP_TRUE : REG_DEP_ANTI);
3080 add_dependence_list_and_free (deps, insn,
3081 ®_last->implicit_sets, 0,
3083 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3084 reg_pending_barrier == TRUE_BARRIER
3085 ? REG_DEP_TRUE : REG_DEP_ANTI);
3087 if (!deps->readonly)
3089 reg_last->uses_length = 0;
3090 reg_last->clobbers_length = 0;
3095 if (!deps->readonly)
3096 for (i = 0; i < (unsigned)deps->max_reg; i++)
3098 struct deps_reg *reg_last = &deps->reg_last[i];
3099 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3100 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3103 /* Flush pending lists on jumps, but not on speculative checks. */
3104 if (JUMP_P (insn) && !(sel_sched_p ()
3105 && sel_insn_is_speculation_check (insn)))
3106 flush_pending_lists (deps, insn, true, true);
3108 reg_pending_barrier = NOT_A_BARRIER;
3111 /* If a post-call group is still open, see if it should remain so.
3112 This insn must be a simple move of a hard reg to a pseudo or
3115 We must avoid moving these insns for correctness on targets
3116 with small register classes, and for special registers like
3117 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3118 hard regs for all targets. */
3120 if (deps->in_post_call_group_p)
3122 rtx tmp, set = single_set (insn);
3123 int src_regno, dest_regno;
3127 if (DEBUG_INSN_P (insn))
3128 /* We don't want to mark debug insns as part of the same
3129 sched group. We know they really aren't, but if we use
3130 debug insns to tell that a call group is over, we'll
3131 get different code if debug insns are not there and
3132 instructions that follow seem like they should be part
3135 Also, if we did, fixup_sched_groups() would move the
3136 deps of the debug insn to the call insn, modifying
3137 non-debug post-dependency counts of the debug insn
3138 dependencies and otherwise messing with the scheduling
3141 Instead, let such debug insns be scheduled freely, but
3142 keep the call group open in case there are insns that
3143 should be part of it afterwards. Since we grant debug
3144 insns higher priority than even sched group insns, it
3145 will all turn out all right. */
3146 goto debug_dont_end_call_group;
3148 goto end_call_group;
3151 tmp = SET_DEST (set);
3152 if (GET_CODE (tmp) == SUBREG)
3153 tmp = SUBREG_REG (tmp);
3155 dest_regno = REGNO (tmp);
3157 goto end_call_group;
3159 tmp = SET_SRC (set);
3160 if (GET_CODE (tmp) == SUBREG)
3161 tmp = SUBREG_REG (tmp);
3162 if ((GET_CODE (tmp) == PLUS
3163 || GET_CODE (tmp) == MINUS)
3164 && REG_P (XEXP (tmp, 0))
3165 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3166 && dest_regno == STACK_POINTER_REGNUM)
3167 src_regno = STACK_POINTER_REGNUM;
3168 else if (REG_P (tmp))
3169 src_regno = REGNO (tmp);
3171 goto end_call_group;
3173 if (src_regno < FIRST_PSEUDO_REGISTER
3174 || dest_regno < FIRST_PSEUDO_REGISTER)
3177 && deps->in_post_call_group_p == post_call_initial)
3178 deps->in_post_call_group_p = post_call;
3180 if (!sel_sched_p () || sched_emulate_haifa_p)
3182 SCHED_GROUP_P (insn) = 1;
3183 CANT_MOVE (insn) = 1;
3189 if (!deps->readonly)
3190 deps->in_post_call_group_p = not_post_call;
3194 debug_dont_end_call_group:
3195 if ((current_sched_info->flags & DO_SPECULATION)
3196 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3197 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3201 sel_mark_hard_insn (insn);
3204 sd_iterator_def sd_it;
3207 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3208 sd_iterator_cond (&sd_it, &dep);)
3209 change_spec_dep_to_hard (sd_it);
3214 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3215 longjmp, loop forever, ...). */
3217 call_may_noreturn_p (rtx insn)
3221 /* const or pure calls that aren't looping will always return. */
3222 if (RTL_CONST_OR_PURE_CALL_P (insn)
3223 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3226 call = PATTERN (insn);
3227 if (GET_CODE (call) == PARALLEL)
3228 call = XVECEXP (call, 0, 0);
3229 if (GET_CODE (call) == SET)
3230 call = SET_SRC (call);
3231 if (GET_CODE (call) == CALL
3232 && MEM_P (XEXP (call, 0))
3233 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3235 rtx symbol = XEXP (XEXP (call, 0), 0);
3236 if (SYMBOL_REF_DECL (symbol)
3237 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3239 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3241 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3244 case BUILT_IN_BCOPY:
3245 case BUILT_IN_BZERO:
3246 case BUILT_IN_INDEX:
3247 case BUILT_IN_MEMCHR:
3248 case BUILT_IN_MEMCMP:
3249 case BUILT_IN_MEMCPY:
3250 case BUILT_IN_MEMMOVE:
3251 case BUILT_IN_MEMPCPY:
3252 case BUILT_IN_MEMSET:
3253 case BUILT_IN_RINDEX:
3254 case BUILT_IN_STPCPY:
3255 case BUILT_IN_STPNCPY:
3256 case BUILT_IN_STRCAT:
3257 case BUILT_IN_STRCHR:
3258 case BUILT_IN_STRCMP:
3259 case BUILT_IN_STRCPY:
3260 case BUILT_IN_STRCSPN:
3261 case BUILT_IN_STRLEN:
3262 case BUILT_IN_STRNCAT:
3263 case BUILT_IN_STRNCMP:
3264 case BUILT_IN_STRNCPY:
3265 case BUILT_IN_STRPBRK:
3266 case BUILT_IN_STRRCHR:
3267 case BUILT_IN_STRSPN:
3268 case BUILT_IN_STRSTR:
3269 /* Assume certain string/memory builtins always return. */
3277 /* For all other calls assume that they might not always return. */
3281 /* Analyze INSN with DEPS as a context. */
3283 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3285 if (sched_deps_info->start_insn)
3286 sched_deps_info->start_insn (insn);
3288 /* Record the condition for this insn. */
3289 if (NONDEBUG_INSN_P (insn))
3290 sched_get_condition_with_rev (insn, NULL);
3292 if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3294 /* Make each JUMP_INSN (but not a speculative check)
3295 a scheduling barrier for memory references. */
3299 && sel_insn_is_speculation_check (insn)))
3301 /* Keep the list a reasonable size. */
3302 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3303 flush_pending_lists (deps, insn, true, true);
3306 deps->last_pending_memory_flush
3307 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3308 /* Signal to sched_analyze_insn that this jump stands
3309 just for its own, not any other pending memory
3310 reads/writes flush_pending_lists had to flush. */
3311 PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
3312 NON_FLUSH_JUMP_KIND);
3316 sched_analyze_insn (deps, PATTERN (insn), insn);
3318 else if (CALL_P (insn))
3322 CANT_MOVE (insn) = 1;
3324 if (find_reg_note (insn, REG_SETJMP, NULL))
3326 /* This is setjmp. Assume that all registers, not just
3327 hard registers, may be clobbered by this call. */
3328 reg_pending_barrier = MOVE_BARRIER;
3332 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3333 /* A call may read and modify global register variables. */
3336 SET_REGNO_REG_SET (reg_pending_sets, i);
3337 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3339 /* Other call-clobbered hard regs may be clobbered.
3340 Since we only have a choice between 'might be clobbered'
3341 and 'definitely not clobbered', we must include all
3342 partly call-clobbered registers here. */
3343 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3344 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3345 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3346 /* We don't know what set of fixed registers might be used
3347 by the function, but it is certain that the stack pointer
3348 is among them, but be conservative. */
3349 else if (fixed_regs[i])
3350 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3351 /* The frame pointer is normally not used by the function
3352 itself, but by the debugger. */
3353 /* ??? MIPS o32 is an exception. It uses the frame pointer
3354 in the macro expansion of jal but does not represent this
3355 fact in the call_insn rtl. */
3356 else if (i == FRAME_POINTER_REGNUM
3357 || (i == HARD_FRAME_POINTER_REGNUM
3358 && (! reload_completed || frame_pointer_needed)))
3359 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3362 /* For each insn which shouldn't cross a call, add a dependence
3363 between that insn and this call insn. */
3364 add_dependence_list_and_free (deps, insn,
3365 &deps->sched_before_next_call, 1,
3368 sched_analyze_insn (deps, PATTERN (insn), insn);
3370 /* If CALL would be in a sched group, then this will violate
3371 convention that sched group insns have dependencies only on the
3372 previous instruction.
3374 Of course one can say: "Hey! What about head of the sched group?"
3375 And I will answer: "Basic principles (one dep per insn) are always
3377 gcc_assert (!SCHED_GROUP_P (insn));
3379 /* In the absence of interprocedural alias analysis, we must flush
3380 all pending reads and writes, and start new dependencies starting
3381 from here. But only flush writes for constant calls (which may
3382 be passed a pointer to something we haven't written yet). */
3383 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3385 if (!deps->readonly)
3387 /* Remember the last function call for limiting lifetimes. */
3388 free_INSN_LIST_list (&deps->last_function_call);
3389 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3391 if (call_may_noreturn_p (insn))
3393 /* Remember the last function call that might not always return
3394 normally for limiting moves of trapping insns. */
3395 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3396 deps->last_function_call_may_noreturn
3397 = alloc_INSN_LIST (insn, NULL_RTX);
3400 /* Before reload, begin a post-call group, so as to keep the
3401 lifetimes of hard registers correct. */
3402 if (! reload_completed)
3403 deps->in_post_call_group_p = post_call;
3407 if (sched_deps_info->use_cselib)
3408 cselib_process_insn (insn);
3410 /* EH_REGION insn notes can not appear until well after we complete
3413 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3414 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3416 if (sched_deps_info->finish_insn)
3417 sched_deps_info->finish_insn ();
3419 /* Fixup the dependencies in the sched group. */
3420 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3421 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3422 fixup_sched_groups (insn);
3425 /* Initialize DEPS for the new block beginning with HEAD. */
3427 deps_start_bb (struct deps_desc *deps, rtx head)
3429 gcc_assert (!deps->readonly);
3431 /* Before reload, if the previous block ended in a call, show that
3432 we are inside a post-call group, so as to keep the lifetimes of
3433 hard registers correct. */
3434 if (! reload_completed && !LABEL_P (head))
3436 rtx insn = prev_nonnote_nondebug_insn (head);
3438 if (insn && CALL_P (insn))
3439 deps->in_post_call_group_p = post_call_initial;
3443 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3444 dependencies for each insn. */
3446 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3450 if (sched_deps_info->use_cselib)
3451 cselib_init (CSELIB_RECORD_MEMORY);
3453 deps_start_bb (deps, head);
3455 for (insn = head;; insn = NEXT_INSN (insn))
3460 /* And initialize deps_lists. */
3461 sd_init_insn (insn);
3464 deps_analyze_insn (deps, insn);
3468 if (sched_deps_info->use_cselib)
3476 /* Helper for sched_free_deps ().
3477 Delete INSN's (RESOLVED_P) backward dependencies. */
3479 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3481 sd_iterator_def sd_it;
3483 sd_list_types_def types;
3486 types = SD_LIST_RES_BACK;
3488 types = SD_LIST_BACK;
3490 for (sd_it = sd_iterator_start (insn, types);
3491 sd_iterator_cond (&sd_it, &dep);)
3493 dep_link_t link = *sd_it.linkp;
3494 dep_node_t node = DEP_LINK_NODE (link);
3495 deps_list_t back_list;
3496 deps_list_t forw_list;
3498 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3499 remove_from_deps_list (link, back_list);
3500 delete_dep_node (node);
3504 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3507 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3510 rtx next_tail = NEXT_INSN (tail);
3512 /* We make two passes since some insns may be scheduled before their
3513 dependencies are resolved. */
3514 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3515 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3517 /* Clear forward deps and leave the dep_nodes to the
3518 corresponding back_deps list. */
3520 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3522 clear_deps_list (INSN_FORW_DEPS (insn));
3524 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3525 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3527 /* Clear resolved back deps together with its dep_nodes. */
3528 delete_dep_nodes_in_back_deps (insn, resolved_p);
3530 sd_finish_insn (insn);
3534 /* Initialize variables for region data dependence analysis.
3535 When LAZY_REG_LAST is true, do not allocate reg_last array
3536 of struct deps_desc immediately. */
3539 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3541 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3543 deps->max_reg = max_reg;
3545 deps->reg_last = NULL;
3547 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3548 INIT_REG_SET (&deps->reg_last_in_use);
3550 deps->pending_read_insns = 0;
3551 deps->pending_read_mems = 0;
3552 deps->pending_write_insns = 0;
3553 deps->pending_write_mems = 0;
3554 deps->pending_read_list_length = 0;
3555 deps->pending_write_list_length = 0;
3556 deps->pending_flush_length = 0;
3557 deps->last_pending_memory_flush = 0;
3558 deps->last_function_call = 0;
3559 deps->last_function_call_may_noreturn = 0;
3560 deps->sched_before_next_call = 0;
3561 deps->in_post_call_group_p = not_post_call;
3562 deps->last_debug_insn = 0;
3563 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3567 /* Init only reg_last field of DEPS, which was not allocated before as
3568 we inited DEPS lazily. */
3570 init_deps_reg_last (struct deps_desc *deps)
3572 gcc_assert (deps && deps->max_reg > 0);
3573 gcc_assert (deps->reg_last == NULL);
3575 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3579 /* Free insn lists found in DEPS. */
3582 free_deps (struct deps_desc *deps)
3585 reg_set_iterator rsi;
3587 /* We set max_reg to 0 when this context was already freed. */
3588 if (deps->max_reg == 0)
3590 gcc_assert (deps->reg_last == NULL);
3595 free_INSN_LIST_list (&deps->pending_read_insns);
3596 free_EXPR_LIST_list (&deps->pending_read_mems);
3597 free_INSN_LIST_list (&deps->pending_write_insns);
3598 free_EXPR_LIST_list (&deps->pending_write_mems);
3599 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3601 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3602 times. For a testcase with 42000 regs and 8000 small basic blocks,
3603 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3604 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3606 struct deps_reg *reg_last = &deps->reg_last[i];
3608 free_INSN_LIST_list (®_last->uses);
3610 free_INSN_LIST_list (®_last->sets);
3611 if (reg_last->implicit_sets)
3612 free_INSN_LIST_list (®_last->implicit_sets);
3613 if (reg_last->clobbers)
3614 free_INSN_LIST_list (®_last->clobbers);
3616 CLEAR_REG_SET (&deps->reg_last_in_use);
3618 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3620 free (deps->reg_last);
3621 deps->reg_last = NULL;
3626 /* Remove INSN from dependence contexts DEPS. */
3628 remove_from_deps (struct deps_desc *deps, rtx insn)
3632 reg_set_iterator rsi;
3634 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3635 &deps->pending_read_mems);
3636 if (!DEBUG_INSN_P (insn))
3637 deps->pending_read_list_length -= removed;
3638 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3639 &deps->pending_write_mems);
3640 deps->pending_write_list_length -= removed;
3641 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3642 deps->pending_flush_length -= removed;
3644 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3646 struct deps_reg *reg_last = &deps->reg_last[i];
3648 remove_from_dependence_list (insn, ®_last->uses);
3650 remove_from_dependence_list (insn, ®_last->sets);
3651 if (reg_last->implicit_sets)
3652 remove_from_dependence_list (insn, ®_last->implicit_sets);
3653 if (reg_last->clobbers)
3654 remove_from_dependence_list (insn, ®_last->clobbers);
3655 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3656 && !reg_last->clobbers)
3657 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3662 remove_from_dependence_list (insn, &deps->last_function_call);
3663 remove_from_dependence_list (insn,
3664 &deps->last_function_call_may_noreturn);
3666 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3669 /* Init deps data vector. */
3671 init_deps_data_vector (void)
3673 int reserve = (sched_max_luid + 1
3674 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3676 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3677 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3678 3 * sched_max_luid / 2);
3681 /* If it is profitable to use them, initialize or extend (depending on
3682 GLOBAL_P) dependency data. */
3684 sched_deps_init (bool global_p)
3686 /* Average number of insns in the basic block.
3687 '+ 1' is used to make it nonzero. */
3688 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3690 init_deps_data_vector ();
3692 /* We use another caching mechanism for selective scheduling, so
3693 we don't use this one. */
3694 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3696 /* ?!? We could save some memory by computing a per-region luid mapping
3697 which could reduce both the number of vectors in the cache and the
3698 size of each vector. Instead we just avoid the cache entirely unless
3699 the average number of instructions in a basic block is very high. See
3700 the comment before the declaration of true_dependency_cache for
3701 what we consider "very high". */
3703 extend_dependency_caches (sched_max_luid, true);
3708 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3709 /* Allocate lists for one block at a time. */
3711 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3712 /* Allocate nodes for one block at a time.
3713 We assume that average insn has
3715 5 * insns_in_block);
3720 /* Create or extend (depending on CREATE_P) dependency caches to
3723 extend_dependency_caches (int n, bool create_p)
3725 if (create_p || true_dependency_cache)
3727 int i, luid = cache_size + n;
3729 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3731 output_dependency_cache = XRESIZEVEC (bitmap_head,
3732 output_dependency_cache, luid);
3733 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3736 if (current_sched_info->flags & DO_SPECULATION)
3737 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3740 for (i = cache_size; i < luid; i++)
3742 bitmap_initialize (&true_dependency_cache[i], 0);
3743 bitmap_initialize (&output_dependency_cache[i], 0);
3744 bitmap_initialize (&anti_dependency_cache[i], 0);
3746 if (current_sched_info->flags & DO_SPECULATION)
3747 bitmap_initialize (&spec_dependency_cache[i], 0);
3753 /* Finalize dependency information for the whole function. */
3755 sched_deps_finish (void)
3757 gcc_assert (deps_pools_are_empty_p ());
3758 free_alloc_pool_if_empty (&dn_pool);
3759 free_alloc_pool_if_empty (&dl_pool);
3760 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3762 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3765 if (true_dependency_cache)
3769 for (i = 0; i < cache_size; i++)
3771 bitmap_clear (&true_dependency_cache[i]);
3772 bitmap_clear (&output_dependency_cache[i]);
3773 bitmap_clear (&anti_dependency_cache[i]);
3775 if (sched_deps_info->generate_spec_deps)
3776 bitmap_clear (&spec_dependency_cache[i]);
3778 free (true_dependency_cache);
3779 true_dependency_cache = NULL;
3780 free (output_dependency_cache);
3781 output_dependency_cache = NULL;
3782 free (anti_dependency_cache);
3783 anti_dependency_cache = NULL;
3785 if (sched_deps_info->generate_spec_deps)
3787 free (spec_dependency_cache);
3788 spec_dependency_cache = NULL;
3794 /* Initialize some global variables needed by the dependency analysis
3798 init_deps_global (void)
3800 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3801 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3802 reg_pending_sets = ALLOC_REG_SET (®_obstack);
3803 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
3804 reg_pending_uses = ALLOC_REG_SET (®_obstack);
3805 reg_pending_barrier = NOT_A_BARRIER;
3807 if (!sel_sched_p () || sched_emulate_haifa_p)
3809 sched_deps_info->start_insn = haifa_start_insn;
3810 sched_deps_info->finish_insn = haifa_finish_insn;
3812 sched_deps_info->note_reg_set = haifa_note_reg_set;
3813 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3814 sched_deps_info->note_reg_use = haifa_note_reg_use;
3816 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3817 sched_deps_info->note_dep = haifa_note_dep;
3821 /* Free everything used by the dependency analysis code. */
3824 finish_deps_global (void)
3826 FREE_REG_SET (reg_pending_sets);
3827 FREE_REG_SET (reg_pending_clobbers);
3828 FREE_REG_SET (reg_pending_uses);
3831 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3833 estimate_dep_weak (rtx mem1, rtx mem2)
3838 /* MEMs are the same - don't speculate. */
3839 return MIN_DEP_WEAK;
3841 r1 = XEXP (mem1, 0);
3842 r2 = XEXP (mem2, 0);
3845 || (REG_P (r1) && REG_P (r2)
3846 && REGNO (r1) == REGNO (r2)))
3847 /* Again, MEMs are the same. */
3848 return MIN_DEP_WEAK;
3849 else if ((REG_P (r1) && !REG_P (r2))
3850 || (!REG_P (r1) && REG_P (r2)))
3851 /* Different addressing modes - reason to be more speculative,
3853 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3855 /* We can't say anything about the dependence. */
3856 return UNCERTAIN_DEP_WEAK;
3859 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3860 This function can handle same INSN and ELEM (INSN == ELEM).
3861 It is a convenience wrapper. */
3863 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3868 if (dep_type == REG_DEP_TRUE)
3870 else if (dep_type == REG_DEP_OUTPUT)
3874 gcc_assert (dep_type == REG_DEP_ANTI);
3878 /* When add_dependence is called from inside sched-deps.c, we expect
3879 cur_insn to be non-null. */
3880 internal = cur_insn != NULL;
3882 gcc_assert (insn == cur_insn);
3886 note_dep (elem, ds);
3891 /* Return weakness of speculative type TYPE in the dep_status DS. */
3893 get_dep_weak_1 (ds_t ds, ds_t type)
3899 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3900 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3901 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3902 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3903 default: gcc_unreachable ();
3910 get_dep_weak (ds_t ds, ds_t type)
3912 dw_t dw = get_dep_weak_1 (ds, type);
3914 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3918 /* Return the dep_status, which has the same parameters as DS, except for
3919 speculative type TYPE, that will have weakness DW. */
3921 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3923 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3928 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3929 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3930 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3931 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3932 default: gcc_unreachable ();
3937 /* Return the join of two dep_statuses DS1 and DS2.
3938 If MAX_P is true then choose the greater probability,
3939 otherwise multiply probabilities.
3940 This function assumes that both DS1 and DS2 contain speculative bits. */
3942 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3946 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3948 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3950 t = FIRST_SPEC_TYPE;
3953 if ((ds1 & t) && !(ds2 & t))
3955 else if (!(ds1 & t) && (ds2 & t))
3957 else if ((ds1 & t) && (ds2 & t))
3959 dw_t dw1 = get_dep_weak (ds1, t);
3960 dw_t dw2 = get_dep_weak (ds2, t);
3965 dw = ((ds_t) dw1) * ((ds_t) dw2);
3967 if (dw < MIN_DEP_WEAK)
3978 ds = set_dep_weak (ds, t, (dw_t) dw);
3981 if (t == LAST_SPEC_TYPE)
3983 t <<= SPEC_TYPE_SHIFT;
3990 /* Return the join of two dep_statuses DS1 and DS2.
3991 This function assumes that both DS1 and DS2 contain speculative bits. */
3993 ds_merge (ds_t ds1, ds_t ds2)
3995 return ds_merge_1 (ds1, ds2, false);
3998 /* Return the join of two dep_statuses DS1 and DS2. */
4000 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4002 ds_t new_status = ds | ds2;
4004 if (new_status & SPECULATIVE)
4006 if ((ds && !(ds & SPECULATIVE))
4007 || (ds2 && !(ds2 & SPECULATIVE)))
4008 /* Then this dep can't be speculative. */
4009 new_status &= ~SPECULATIVE;
4012 /* Both are speculative. Merging probabilities. */
4017 dw = estimate_dep_weak (mem1, mem2);
4018 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4026 new_status = ds_merge (ds2, ds);
4033 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4036 ds_max_merge (ds_t ds1, ds_t ds2)
4038 if (ds1 == 0 && ds2 == 0)
4041 if (ds1 == 0 && ds2 != 0)
4044 if (ds1 != 0 && ds2 == 0)
4047 return ds_merge_1 (ds1, ds2, true);
4050 /* Return the probability of speculation success for the speculation
4058 dt = FIRST_SPEC_TYPE;
4063 res *= (ds_t) get_dep_weak (ds, dt);
4067 if (dt == LAST_SPEC_TYPE)
4069 dt <<= SPEC_TYPE_SHIFT;
4075 res /= MAX_DEP_WEAK;
4077 if (res < MIN_DEP_WEAK)
4080 gcc_assert (res <= MAX_DEP_WEAK);
4085 /* Return a dep status that contains all speculation types of DS. */
4087 ds_get_speculation_types (ds_t ds)
4089 if (ds & BEGIN_DATA)
4091 if (ds & BE_IN_DATA)
4093 if (ds & BEGIN_CONTROL)
4094 ds |= BEGIN_CONTROL;
4095 if (ds & BE_IN_CONTROL)
4096 ds |= BE_IN_CONTROL;
4098 return ds & SPECULATIVE;
4101 /* Return a dep status that contains maximal weakness for each speculation
4102 type present in DS. */
4104 ds_get_max_dep_weak (ds_t ds)
4106 if (ds & BEGIN_DATA)
4107 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4108 if (ds & BE_IN_DATA)
4109 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4110 if (ds & BEGIN_CONTROL)
4111 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4112 if (ds & BE_IN_CONTROL)
4113 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4118 /* Dump information about the dependence status S. */
4120 dump_ds (FILE *f, ds_t s)
4125 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4127 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4128 if (s & BEGIN_CONTROL)
4129 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4130 if (s & BE_IN_CONTROL)
4131 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4134 fprintf (f, "HARD_DEP; ");
4137 fprintf (f, "DEP_TRUE; ");
4139 fprintf (f, "DEP_ANTI; ");
4141 fprintf (f, "DEP_OUTPUT; ");
4149 dump_ds (stderr, s);
4150 fprintf (stderr, "\n");
4153 #ifdef ENABLE_CHECKING
4154 /* Verify that dependence type and status are consistent.
4155 If RELAXED_P is true, then skip dep_weakness checks. */
4157 check_dep (dep_t dep, bool relaxed_p)
4159 enum reg_note dt = DEP_TYPE (dep);
4160 ds_t ds = DEP_STATUS (dep);
4162 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4164 if (!(current_sched_info->flags & USE_DEPS_LIST))
4166 gcc_assert (ds == 0);
4170 /* Check that dependence type contains the same bits as the status. */
4171 if (dt == REG_DEP_TRUE)
4172 gcc_assert (ds & DEP_TRUE);
4173 else if (dt == REG_DEP_OUTPUT)
4174 gcc_assert ((ds & DEP_OUTPUT)
4175 && !(ds & DEP_TRUE));
4177 gcc_assert ((dt == REG_DEP_ANTI)
4179 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4181 /* HARD_DEP can not appear in dep_status of a link. */
4182 gcc_assert (!(ds & HARD_DEP));
4184 /* Check that dependence status is set correctly when speculation is not
4186 if (!sched_deps_info->generate_spec_deps)
4187 gcc_assert (!(ds & SPECULATIVE));
4188 else if (ds & SPECULATIVE)
4192 ds_t type = FIRST_SPEC_TYPE;
4194 /* Check that dependence weakness is in proper range. */
4198 get_dep_weak (ds, type);
4200 if (type == LAST_SPEC_TYPE)
4202 type <<= SPEC_TYPE_SHIFT;
4207 if (ds & BEGIN_SPEC)
4209 /* Only true dependence can be data speculative. */
4210 if (ds & BEGIN_DATA)
4211 gcc_assert (ds & DEP_TRUE);
4213 /* Control dependencies in the insn scheduler are represented by
4214 anti-dependencies, therefore only anti dependence can be
4215 control speculative. */
4216 if (ds & BEGIN_CONTROL)
4217 gcc_assert (ds & DEP_ANTI);
4221 /* Subsequent speculations should resolve true dependencies. */
4222 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4225 /* Check that true and anti dependencies can't have other speculative
4228 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4229 /* An output dependence can't be speculative at all. */
4230 gcc_assert (!(ds & DEP_OUTPUT));
4232 gcc_assert (ds & BEGIN_CONTROL);
4235 #endif /* ENABLE_CHECKING */
4237 #endif /* INSN_SCHEDULING */