1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
6 Free Software Foundation, Inc.
7 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
8 and currently maintained by, Jim Wilson (wilson@cygnus.com)
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free
14 Software Foundation; either version 3, or (at your option) any later
17 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
18 WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
30 #include "diagnostic-core.h"
32 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
34 #include "hard-reg-set.h"
38 #include "insn-config.h"
39 #include "insn-attr.h"
43 #include "sched-int.h"
49 #ifdef INSN_SCHEDULING
51 #ifdef ENABLE_CHECKING
57 /* Holds current parameters for the dependency analyzer. */
58 struct sched_deps_info_def *sched_deps_info;
60 /* The data is specific to the Haifa scheduler. */
61 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
63 /* Return the major type present in the DS. */
71 return REG_DEP_OUTPUT;
74 return REG_DEP_CONTROL;
76 gcc_assert (ds & DEP_ANTI);
81 /* Return equivalent dep_status. */
83 dk_to_ds (enum reg_note dk)
97 gcc_assert (dk == REG_DEP_ANTI);
102 /* Functions to operate with dependence information container - dep_t. */
104 /* Init DEP with the arguments. */
106 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
110 DEP_TYPE (dep) = type;
111 DEP_STATUS (dep) = ds;
112 DEP_COST (dep) = UNKNOWN_DEP_COST;
113 DEP_NONREG (dep) = 0;
114 DEP_MULTIPLE (dep) = 0;
115 DEP_REPLACE (dep) = NULL;
118 /* Init DEP with the arguments.
119 While most of the scheduler (including targets) only need the major type
120 of the dependency, it is convenient to hide full dep_status from them. */
122 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
126 if ((current_sched_info->flags & USE_DEPS_LIST))
127 ds = dk_to_ds (kind);
131 init_dep_1 (dep, pro, con, kind, ds);
134 /* Make a copy of FROM in TO. */
136 copy_dep (dep_t to, dep_t from)
138 memcpy (to, from, sizeof (*to));
141 static void dump_ds (FILE *, ds_t);
143 /* Define flags for dump_dep (). */
145 /* Dump producer of the dependence. */
146 #define DUMP_DEP_PRO (2)
148 /* Dump consumer of the dependence. */
149 #define DUMP_DEP_CON (4)
151 /* Dump type of the dependence. */
152 #define DUMP_DEP_TYPE (8)
154 /* Dump status of the dependence. */
155 #define DUMP_DEP_STATUS (16)
157 /* Dump all information about the dependence. */
158 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
162 FLAGS is a bit mask specifying what information about DEP needs
164 If FLAGS has the very first bit set, then dump all information about DEP
165 and propagate this bit into the callee dump functions. */
167 dump_dep (FILE *dump, dep_t dep, int flags)
170 flags |= DUMP_DEP_ALL;
174 if (flags & DUMP_DEP_PRO)
175 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
177 if (flags & DUMP_DEP_CON)
178 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
180 if (flags & DUMP_DEP_TYPE)
183 enum reg_note type = DEP_TYPE (dep);
195 case REG_DEP_CONTROL:
208 fprintf (dump, "%c; ", t);
211 if (flags & DUMP_DEP_STATUS)
213 if (current_sched_info->flags & USE_DEPS_LIST)
214 dump_ds (dump, DEP_STATUS (dep));
220 /* Default flags for dump_dep (). */
221 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
223 /* Dump all fields of DEP to STDERR. */
225 sd_debug_dep (dep_t dep)
227 dump_dep (stderr, dep, 1);
228 fprintf (stderr, "\n");
231 /* Determine whether DEP is a dependency link of a non-debug insn on a
235 depl_on_debug_p (dep_link_t dep)
237 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
238 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
241 /* Functions to operate with a single link from the dependencies lists -
244 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
247 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
249 dep_link_t next = *prev_nextp;
251 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
252 && DEP_LINK_NEXT (l) == NULL);
254 /* Init node being inserted. */
255 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
256 DEP_LINK_NEXT (l) = next;
261 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
263 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
270 /* Add dep_link LINK to deps_list L. */
272 add_to_deps_list (dep_link_t link, deps_list_t l)
274 attach_dep_link (link, &DEPS_LIST_FIRST (l));
276 /* Don't count debug deps. */
277 if (!depl_on_debug_p (link))
278 ++DEPS_LIST_N_LINKS (l);
281 /* Detach dep_link L from the list. */
283 detach_dep_link (dep_link_t l)
285 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
286 dep_link_t next = DEP_LINK_NEXT (l);
291 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
293 DEP_LINK_PREV_NEXTP (l) = NULL;
294 DEP_LINK_NEXT (l) = NULL;
297 /* Remove link LINK from list LIST. */
299 remove_from_deps_list (dep_link_t link, deps_list_t list)
301 detach_dep_link (link);
303 /* Don't count debug deps. */
304 if (!depl_on_debug_p (link))
305 --DEPS_LIST_N_LINKS (list);
308 /* Move link LINK from list FROM to list TO. */
310 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
312 remove_from_deps_list (link, from);
313 add_to_deps_list (link, to);
316 /* Return true of LINK is not attached to any list. */
318 dep_link_is_detached_p (dep_link_t link)
320 return DEP_LINK_PREV_NEXTP (link) == NULL;
323 /* Pool to hold all dependency nodes (dep_node_t). */
324 static alloc_pool dn_pool;
326 /* Number of dep_nodes out there. */
327 static int dn_pool_diff = 0;
329 /* Create a dep_node. */
331 create_dep_node (void)
333 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
334 dep_link_t back = DEP_NODE_BACK (n);
335 dep_link_t forw = DEP_NODE_FORW (n);
337 DEP_LINK_NODE (back) = n;
338 DEP_LINK_NEXT (back) = NULL;
339 DEP_LINK_PREV_NEXTP (back) = NULL;
341 DEP_LINK_NODE (forw) = n;
342 DEP_LINK_NEXT (forw) = NULL;
343 DEP_LINK_PREV_NEXTP (forw) = NULL;
350 /* Delete dep_node N. N must not be connected to any deps_list. */
352 delete_dep_node (dep_node_t n)
354 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
355 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
359 pool_free (dn_pool, n);
362 /* Pool to hold dependencies lists (deps_list_t). */
363 static alloc_pool dl_pool;
365 /* Number of deps_lists out there. */
366 static int dl_pool_diff = 0;
368 /* Functions to operate with dependences lists - deps_list_t. */
370 /* Return true if list L is empty. */
372 deps_list_empty_p (deps_list_t l)
374 return DEPS_LIST_N_LINKS (l) == 0;
377 /* Create a new deps_list. */
379 create_deps_list (void)
381 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
383 DEPS_LIST_FIRST (l) = NULL;
384 DEPS_LIST_N_LINKS (l) = 0;
390 /* Free deps_list L. */
392 free_deps_list (deps_list_t l)
394 gcc_assert (deps_list_empty_p (l));
398 pool_free (dl_pool, l);
401 /* Return true if there is no dep_nodes and deps_lists out there.
402 After the region is scheduled all the dependency nodes and lists
403 should [generally] be returned to pool. */
405 deps_pools_are_empty_p (void)
407 return dn_pool_diff == 0 && dl_pool_diff == 0;
410 /* Remove all elements from L. */
412 clear_deps_list (deps_list_t l)
416 dep_link_t link = DEPS_LIST_FIRST (l);
421 remove_from_deps_list (link, l);
426 /* Decide whether a dependency should be treated as a hard or a speculative
429 dep_spec_p (dep_t dep)
431 if (current_sched_info->flags & DO_SPECULATION)
433 if (DEP_STATUS (dep) & SPECULATIVE)
436 if (current_sched_info->flags & DO_PREDICATION)
438 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
441 if (DEP_REPLACE (dep) != NULL)
446 static regset reg_pending_sets;
447 static regset reg_pending_clobbers;
448 static regset reg_pending_uses;
449 static regset reg_pending_control_uses;
450 static enum reg_pending_barrier_mode reg_pending_barrier;
452 /* Hard registers implicitly clobbered or used (or may be implicitly
453 clobbered or used) by the currently analyzed insn. For example,
454 insn in its constraint has one register class. Even if there is
455 currently no hard register in the insn, the particular hard
456 register will be in the insn after reload pass because the
457 constraint requires it. */
458 static HARD_REG_SET implicit_reg_pending_clobbers;
459 static HARD_REG_SET implicit_reg_pending_uses;
461 /* To speed up the test for duplicate dependency links we keep a
462 record of dependencies created by add_dependence when the average
463 number of instructions in a basic block is very large.
465 Studies have shown that there is typically around 5 instructions between
466 branches for typical C code. So we can make a guess that the average
467 basic block is approximately 5 instructions long; we will choose 100X
468 the average size as a very large basic block.
470 Each insn has associated bitmaps for its dependencies. Each bitmap
471 has enough entries to represent a dependency on any other insn in
472 the insn chain. All bitmap for true dependencies cache is
473 allocated then the rest two ones are also allocated. */
474 static bitmap_head *true_dependency_cache = NULL;
475 static bitmap_head *output_dependency_cache = NULL;
476 static bitmap_head *anti_dependency_cache = NULL;
477 static bitmap_head *control_dependency_cache = NULL;
478 static bitmap_head *spec_dependency_cache = NULL;
479 static int cache_size;
481 /* True if we should mark added dependencies as a non-register deps. */
482 static bool mark_as_hard;
484 static int deps_may_trap_p (const_rtx);
485 static void add_dependence_1 (rtx, rtx, enum reg_note);
486 static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
487 static void add_dependence_list_and_free (struct deps_desc *, rtx,
488 rtx *, int, enum reg_note, bool);
489 static void delete_all_dependences (rtx);
490 static void chain_to_prev_insn (rtx);
492 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
493 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
494 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
495 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
497 static bool sched_has_condition_p (const_rtx);
498 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
500 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
502 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
504 #ifdef ENABLE_CHECKING
505 static void check_dep (dep_t, bool);
508 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
511 deps_may_trap_p (const_rtx mem)
513 const_rtx addr = XEXP (mem, 0);
515 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
517 const_rtx t = get_reg_known_value (REGNO (addr));
521 return rtx_addr_can_trap_p (addr);
525 /* Find the condition under which INSN is executed. If REV is not NULL,
526 it is set to TRUE when the returned comparison should be reversed
527 to get the actual condition. */
529 sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
531 rtx pat = PATTERN (insn);
537 if (GET_CODE (pat) == COND_EXEC)
538 return COND_EXEC_TEST (pat);
540 if (!any_condjump_p (insn) || !onlyjump_p (insn))
543 src = SET_SRC (pc_set (insn));
545 if (XEXP (src, 2) == pc_rtx)
546 return XEXP (src, 0);
547 else if (XEXP (src, 1) == pc_rtx)
549 rtx cond = XEXP (src, 0);
550 enum rtx_code revcode = reversed_comparison_code (cond, insn);
552 if (revcode == UNKNOWN)
563 /* Return the condition under which INSN does not execute (i.e. the
564 not-taken condition for a conditional branch), or NULL if we cannot
565 find such a condition. The caller should make a copy of the condition
568 sched_get_reverse_condition_uncached (const_rtx insn)
571 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
572 if (cond == NULL_RTX)
576 enum rtx_code revcode = reversed_comparison_code (cond, insn);
577 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
584 /* Caching variant of sched_get_condition_with_rev_uncached.
585 We only do actual work the first time we come here for an insn; the
586 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
588 sched_get_condition_with_rev (const_rtx insn, bool *rev)
592 if (INSN_LUID (insn) == 0)
593 return sched_get_condition_with_rev_uncached (insn, rev);
595 if (INSN_CACHED_COND (insn) == const_true_rtx)
598 if (INSN_CACHED_COND (insn) != NULL_RTX)
601 *rev = INSN_REVERSE_COND (insn);
602 return INSN_CACHED_COND (insn);
605 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
606 INSN_REVERSE_COND (insn) = tmp;
608 if (INSN_CACHED_COND (insn) == NULL_RTX)
610 INSN_CACHED_COND (insn) = const_true_rtx;
615 *rev = INSN_REVERSE_COND (insn);
616 return INSN_CACHED_COND (insn);
619 /* True when we can find a condition under which INSN is executed. */
621 sched_has_condition_p (const_rtx insn)
623 return !! sched_get_condition_with_rev (insn, NULL);
628 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
630 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
632 if (COMPARISON_P (cond1)
633 && COMPARISON_P (cond2)
634 && GET_CODE (cond1) ==
636 ? reversed_comparison_code (cond2, NULL)
638 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
639 && XEXP (cond1, 1) == XEXP (cond2, 1))
644 /* Return true if insn1 and insn2 can never depend on one another because
645 the conditions under which they are executed are mutually exclusive. */
647 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
650 bool rev1 = false, rev2 = false;
652 /* df doesn't handle conditional lifetimes entirely correctly;
653 calls mess up the conditional lifetimes. */
654 if (!CALL_P (insn1) && !CALL_P (insn2))
656 cond1 = sched_get_condition_with_rev (insn1, &rev1);
657 cond2 = sched_get_condition_with_rev (insn2, &rev2);
659 && conditions_mutex_p (cond1, cond2, rev1, rev2)
660 /* Make sure first instruction doesn't affect condition of second
661 instruction if switched. */
662 && !modified_in_p (cond1, insn2)
663 /* Make sure second instruction doesn't affect condition of first
664 instruction if switched. */
665 && !modified_in_p (cond2, insn1))
672 /* Return true if INSN can potentially be speculated with type DS. */
674 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
676 if (HAS_INTERNAL_DEP (insn))
679 if (!NONJUMP_INSN_P (insn))
682 if (SCHED_GROUP_P (insn))
685 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
688 if (side_effects_p (PATTERN (insn)))
692 /* The following instructions, which depend on a speculatively scheduled
693 instruction, cannot be speculatively scheduled along. */
695 if (may_trap_or_fault_p (PATTERN (insn)))
696 /* If instruction might fault, it cannot be speculatively scheduled.
697 For control speculation it's obvious why and for data speculation
698 it's because the insn might get wrong input if speculation
699 wasn't successful. */
702 if ((ds & BE_IN_DATA)
703 && sched_has_condition_p (insn))
704 /* If this is a predicated instruction, then it cannot be
705 speculatively scheduled. See PR35659. */
712 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
713 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
714 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
715 This function is used to switch sd_iterator to the next list.
716 !!! For internal use only. Might consider moving it to sched-int.h. */
718 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
719 deps_list_t *list_ptr, bool *resolved_p_ptr)
721 sd_list_types_def types = *types_ptr;
723 if (types & SD_LIST_HARD_BACK)
725 *list_ptr = INSN_HARD_BACK_DEPS (insn);
726 *resolved_p_ptr = false;
727 *types_ptr = types & ~SD_LIST_HARD_BACK;
729 else if (types & SD_LIST_SPEC_BACK)
731 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
732 *resolved_p_ptr = false;
733 *types_ptr = types & ~SD_LIST_SPEC_BACK;
735 else if (types & SD_LIST_FORW)
737 *list_ptr = INSN_FORW_DEPS (insn);
738 *resolved_p_ptr = false;
739 *types_ptr = types & ~SD_LIST_FORW;
741 else if (types & SD_LIST_RES_BACK)
743 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
744 *resolved_p_ptr = true;
745 *types_ptr = types & ~SD_LIST_RES_BACK;
747 else if (types & SD_LIST_RES_FORW)
749 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
750 *resolved_p_ptr = true;
751 *types_ptr = types & ~SD_LIST_RES_FORW;
756 *resolved_p_ptr = false;
757 *types_ptr = SD_LIST_NONE;
761 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
763 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
767 while (list_types != SD_LIST_NONE)
772 sd_next_list (insn, &list_types, &list, &resolved_p);
774 size += DEPS_LIST_N_LINKS (list);
780 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
783 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
785 while (list_types != SD_LIST_NONE)
790 sd_next_list (insn, &list_types, &list, &resolved_p);
791 if (!deps_list_empty_p (list))
798 /* Initialize data for INSN. */
800 sd_init_insn (rtx insn)
802 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
803 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
804 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
805 INSN_FORW_DEPS (insn) = create_deps_list ();
806 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
808 /* ??? It would be nice to allocate dependency caches here. */
811 /* Free data for INSN. */
813 sd_finish_insn (rtx insn)
815 /* ??? It would be nice to deallocate dependency caches here. */
817 free_deps_list (INSN_HARD_BACK_DEPS (insn));
818 INSN_HARD_BACK_DEPS (insn) = NULL;
820 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
821 INSN_SPEC_BACK_DEPS (insn) = NULL;
823 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
824 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
826 free_deps_list (INSN_FORW_DEPS (insn));
827 INSN_FORW_DEPS (insn) = NULL;
829 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
830 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
833 /* Find a dependency between producer PRO and consumer CON.
834 Search through resolved dependency lists if RESOLVED_P is true.
835 If no such dependency is found return NULL,
836 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
837 with an iterator pointing to it. */
839 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
840 sd_iterator_def *sd_it_ptr)
842 sd_list_types_def pro_list_type;
843 sd_list_types_def con_list_type;
844 sd_iterator_def sd_it;
846 bool found_p = false;
850 pro_list_type = SD_LIST_RES_FORW;
851 con_list_type = SD_LIST_RES_BACK;
855 pro_list_type = SD_LIST_FORW;
856 con_list_type = SD_LIST_BACK;
859 /* Walk through either back list of INSN or forw list of ELEM
860 depending on which one is shorter. */
861 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
863 /* Find the dep_link with producer PRO in consumer's back_deps. */
864 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
865 if (DEP_PRO (dep) == pro)
873 /* Find the dep_link with consumer CON in producer's forw_deps. */
874 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
875 if (DEP_CON (dep) == con)
884 if (sd_it_ptr != NULL)
893 /* Find a dependency between producer PRO and consumer CON.
894 Use dependency [if available] to check if dependency is present at all.
895 Search through resolved dependency lists if RESOLVED_P is true.
896 If the dependency or NULL if none found. */
898 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
900 if (true_dependency_cache != NULL)
901 /* Avoiding the list walk below can cut compile times dramatically
904 int elem_luid = INSN_LUID (pro);
905 int insn_luid = INSN_LUID (con);
907 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
908 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
909 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
910 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
914 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
917 /* Add or update a dependence described by DEP.
918 MEM1 and MEM2, if non-null, correspond to memory locations in case of
921 The function returns a value indicating if an old entry has been changed
922 or a new entry has been added to insn's backward deps.
924 This function merely checks if producer and consumer is the same insn
925 and doesn't create a dep in this case. Actual manipulation of
926 dependence data structures is performed in add_or_update_dep_1. */
927 static enum DEPS_ADJUST_RESULT
928 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
930 rtx elem = DEP_PRO (dep);
931 rtx insn = DEP_CON (dep);
933 gcc_assert (INSN_P (insn) && INSN_P (elem));
935 /* Don't depend an insn on itself. */
938 if (sched_deps_info->generate_spec_deps)
939 /* INSN has an internal dependence, which we can't overcome. */
940 HAS_INTERNAL_DEP (insn) = 1;
945 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
948 /* Ask dependency caches what needs to be done for dependence DEP.
949 Return DEP_CREATED if new dependence should be created and there is no
950 need to try to find one searching the dependencies lists.
951 Return DEP_PRESENT if there already is a dependence described by DEP and
952 hence nothing is to be done.
953 Return DEP_CHANGED if there already is a dependence, but it should be
954 updated to incorporate additional information from DEP. */
955 static enum DEPS_ADJUST_RESULT
956 ask_dependency_caches (dep_t dep)
958 int elem_luid = INSN_LUID (DEP_PRO (dep));
959 int insn_luid = INSN_LUID (DEP_CON (dep));
961 gcc_assert (true_dependency_cache != NULL
962 && output_dependency_cache != NULL
963 && anti_dependency_cache != NULL
964 && control_dependency_cache != NULL);
966 if (!(current_sched_info->flags & USE_DEPS_LIST))
968 enum reg_note present_dep_type;
970 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
971 present_dep_type = REG_DEP_TRUE;
972 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
973 present_dep_type = REG_DEP_OUTPUT;
974 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
975 present_dep_type = REG_DEP_ANTI;
976 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
977 present_dep_type = REG_DEP_CONTROL;
979 /* There is no existing dep so it should be created. */
982 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
983 /* DEP does not add anything to the existing dependence. */
988 ds_t present_dep_types = 0;
990 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
991 present_dep_types |= DEP_TRUE;
992 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
993 present_dep_types |= DEP_OUTPUT;
994 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
995 present_dep_types |= DEP_ANTI;
996 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
997 present_dep_types |= DEP_CONTROL;
999 if (present_dep_types == 0)
1000 /* There is no existing dep so it should be created. */
1003 if (!(current_sched_info->flags & DO_SPECULATION)
1004 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1006 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1007 == present_dep_types)
1008 /* DEP does not add anything to the existing dependence. */
1013 /* Only true dependencies can be data speculative and
1014 only anti dependencies can be control speculative. */
1015 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1016 == present_dep_types);
1018 /* if (DEP is SPECULATIVE) then
1019 ..we should update DEP_STATUS
1021 ..we should reset existing dep to non-speculative. */
1028 /* Set dependency caches according to DEP. */
1030 set_dependency_caches (dep_t dep)
1032 int elem_luid = INSN_LUID (DEP_PRO (dep));
1033 int insn_luid = INSN_LUID (DEP_CON (dep));
1035 if (!(current_sched_info->flags & USE_DEPS_LIST))
1037 switch (DEP_TYPE (dep))
1040 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1043 case REG_DEP_OUTPUT:
1044 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1048 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1051 case REG_DEP_CONTROL:
1052 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1061 ds_t ds = DEP_STATUS (dep);
1064 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1065 if (ds & DEP_OUTPUT)
1066 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1068 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1069 if (ds & DEP_CONTROL)
1070 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1072 if (ds & SPECULATIVE)
1074 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1075 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1080 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1081 caches accordingly. */
1083 update_dependency_caches (dep_t dep, enum reg_note old_type)
1085 int elem_luid = INSN_LUID (DEP_PRO (dep));
1086 int insn_luid = INSN_LUID (DEP_CON (dep));
1088 /* Clear corresponding cache entry because type of the link
1089 may have changed. Keep them if we use_deps_list. */
1090 if (!(current_sched_info->flags & USE_DEPS_LIST))
1094 case REG_DEP_OUTPUT:
1095 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1099 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1102 case REG_DEP_CONTROL:
1103 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1111 set_dependency_caches (dep);
1114 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1116 change_spec_dep_to_hard (sd_iterator_def sd_it)
1118 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1119 dep_link_t link = DEP_NODE_BACK (node);
1120 dep_t dep = DEP_NODE_DEP (node);
1121 rtx elem = DEP_PRO (dep);
1122 rtx insn = DEP_CON (dep);
1124 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1126 DEP_STATUS (dep) &= ~SPECULATIVE;
1128 if (true_dependency_cache != NULL)
1129 /* Clear the cache entry. */
1130 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1134 /* Update DEP to incorporate information from NEW_DEP.
1135 SD_IT points to DEP in case it should be moved to another list.
1136 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1137 data-speculative dependence should be updated. */
1138 static enum DEPS_ADJUST_RESULT
1139 update_dep (dep_t dep, dep_t new_dep,
1140 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1141 rtx mem1 ATTRIBUTE_UNUSED,
1142 rtx mem2 ATTRIBUTE_UNUSED)
1144 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1145 enum reg_note old_type = DEP_TYPE (dep);
1146 bool was_spec = dep_spec_p (dep);
1148 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1149 DEP_MULTIPLE (dep) = 1;
1151 /* If this is a more restrictive type of dependence than the
1152 existing one, then change the existing dependence to this
1154 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1156 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1160 if (current_sched_info->flags & USE_DEPS_LIST)
1161 /* Update DEP_STATUS. */
1163 ds_t dep_status = DEP_STATUS (dep);
1164 ds_t ds = DEP_STATUS (new_dep);
1165 ds_t new_status = ds | dep_status;
1167 if (new_status & SPECULATIVE)
1169 /* Either existing dep or a dep we're adding or both are
1171 if (!(ds & SPECULATIVE)
1172 || !(dep_status & SPECULATIVE))
1173 /* The new dep can't be speculative. */
1174 new_status &= ~SPECULATIVE;
1177 /* Both are speculative. Merge probabilities. */
1182 dw = estimate_dep_weak (mem1, mem2);
1183 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1186 new_status = ds_merge (dep_status, ds);
1192 if (dep_status != ds)
1194 DEP_STATUS (dep) = ds;
1199 if (was_spec && !dep_spec_p (dep))
1200 /* The old dep was speculative, but now it isn't. */
1201 change_spec_dep_to_hard (sd_it);
1203 if (true_dependency_cache != NULL
1204 && res == DEP_CHANGED)
1205 update_dependency_caches (dep, old_type);
1210 /* Add or update a dependence described by DEP.
1211 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1214 The function returns a value indicating if an old entry has been changed
1215 or a new entry has been added to insn's backward deps or nothing has
1216 been updated at all. */
1217 static enum DEPS_ADJUST_RESULT
1218 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1219 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1221 bool maybe_present_p = true;
1222 bool present_p = false;
1224 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1225 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1227 #ifdef ENABLE_CHECKING
1228 check_dep (new_dep, mem1 != NULL);
1231 if (true_dependency_cache != NULL)
1233 switch (ask_dependency_caches (new_dep))
1239 maybe_present_p = true;
1244 maybe_present_p = false;
1254 /* Check that we don't already have this dependence. */
1255 if (maybe_present_p)
1258 sd_iterator_def sd_it;
1260 gcc_assert (true_dependency_cache == NULL || present_p);
1262 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1264 resolved_p, &sd_it);
1266 if (present_dep != NULL)
1267 /* We found an existing dependency between ELEM and INSN. */
1268 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1270 /* We didn't find a dep, it shouldn't present in the cache. */
1271 gcc_assert (!present_p);
1274 /* Might want to check one level of transitivity to save conses.
1275 This check should be done in maybe_add_or_update_dep_1.
1276 Since we made it to add_or_update_dep_1, we must create
1277 (or update) a link. */
1279 if (mem1 != NULL_RTX)
1281 gcc_assert (sched_deps_info->generate_spec_deps);
1282 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1283 estimate_dep_weak (mem1, mem2));
1286 sd_add_dep (new_dep, resolved_p);
1291 /* Initialize BACK_LIST_PTR with consumer's backward list and
1292 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1293 initialize with lists that hold resolved deps. */
1295 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1296 deps_list_t *back_list_ptr,
1297 deps_list_t *forw_list_ptr)
1299 rtx con = DEP_CON (dep);
1303 if (dep_spec_p (dep))
1304 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1306 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1308 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1312 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1313 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1317 /* Add dependence described by DEP.
1318 If RESOLVED_P is true treat the dependence as a resolved one. */
1320 sd_add_dep (dep_t dep, bool resolved_p)
1322 dep_node_t n = create_dep_node ();
1323 deps_list_t con_back_deps;
1324 deps_list_t pro_forw_deps;
1325 rtx elem = DEP_PRO (dep);
1326 rtx insn = DEP_CON (dep);
1328 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1330 if ((current_sched_info->flags & DO_SPECULATION) == 0
1331 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1332 DEP_STATUS (dep) &= ~SPECULATIVE;
1334 copy_dep (DEP_NODE_DEP (n), dep);
1336 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1338 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1340 #ifdef ENABLE_CHECKING
1341 check_dep (dep, false);
1344 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1346 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1347 in the bitmap caches of dependency information. */
1348 if (true_dependency_cache != NULL)
1349 set_dependency_caches (dep);
1352 /* Add or update backward dependence between INSN and ELEM
1353 with given type DEP_TYPE and dep_status DS.
1354 This function is a convenience wrapper. */
1355 enum DEPS_ADJUST_RESULT
1356 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1358 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1361 /* Resolved dependence pointed to by SD_IT.
1362 SD_IT will advance to the next element. */
1364 sd_resolve_dep (sd_iterator_def sd_it)
1366 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1367 dep_t dep = DEP_NODE_DEP (node);
1368 rtx pro = DEP_PRO (dep);
1369 rtx con = DEP_CON (dep);
1371 if (dep_spec_p (dep))
1372 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1373 INSN_RESOLVED_BACK_DEPS (con));
1375 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1376 INSN_RESOLVED_BACK_DEPS (con));
1378 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1379 INSN_RESOLVED_FORW_DEPS (pro));
1382 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1383 pointed to by SD_IT to unresolved state. */
1385 sd_unresolve_dep (sd_iterator_def sd_it)
1387 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1388 dep_t dep = DEP_NODE_DEP (node);
1389 rtx pro = DEP_PRO (dep);
1390 rtx con = DEP_CON (dep);
1392 if (dep_spec_p (dep))
1393 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1394 INSN_SPEC_BACK_DEPS (con));
1396 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1397 INSN_HARD_BACK_DEPS (con));
1399 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1400 INSN_FORW_DEPS (pro));
1403 /* Make TO depend on all the FROM's producers.
1404 If RESOLVED_P is true add dependencies to the resolved lists. */
1406 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1408 sd_list_types_def list_type;
1409 sd_iterator_def sd_it;
1412 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1414 FOR_EACH_DEP (from, list_type, sd_it, dep)
1416 dep_def _new_dep, *new_dep = &_new_dep;
1418 copy_dep (new_dep, dep);
1419 DEP_CON (new_dep) = to;
1420 sd_add_dep (new_dep, resolved_p);
1424 /* Remove a dependency referred to by SD_IT.
1425 SD_IT will point to the next dependence after removal. */
1427 sd_delete_dep (sd_iterator_def sd_it)
1429 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1430 dep_t dep = DEP_NODE_DEP (n);
1431 rtx pro = DEP_PRO (dep);
1432 rtx con = DEP_CON (dep);
1433 deps_list_t con_back_deps;
1434 deps_list_t pro_forw_deps;
1436 if (true_dependency_cache != NULL)
1438 int elem_luid = INSN_LUID (pro);
1439 int insn_luid = INSN_LUID (con);
1441 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1442 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1443 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1444 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1446 if (current_sched_info->flags & DO_SPECULATION)
1447 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1450 get_back_and_forw_lists (dep, sd_it.resolved_p,
1451 &con_back_deps, &pro_forw_deps);
1453 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1454 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1456 delete_dep_node (n);
1459 /* Dump size of the lists. */
1460 #define DUMP_LISTS_SIZE (2)
1462 /* Dump dependencies of the lists. */
1463 #define DUMP_LISTS_DEPS (4)
1465 /* Dump all information about the lists. */
1466 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1468 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1469 FLAGS is a bit mask specifying what information about the lists needs
1471 If FLAGS has the very first bit set, then dump all information about
1472 the lists and propagate this bit into the callee dump functions. */
1474 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1476 sd_iterator_def sd_it;
1483 flags |= DUMP_LISTS_ALL;
1485 fprintf (dump, "[");
1487 if (flags & DUMP_LISTS_SIZE)
1488 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1490 if (flags & DUMP_LISTS_DEPS)
1492 FOR_EACH_DEP (insn, types, sd_it, dep)
1494 dump_dep (dump, dep, dump_dep_flags | all);
1495 fprintf (dump, " ");
1500 /* Dump all information about deps_lists of INSN specified by TYPES
1503 sd_debug_lists (rtx insn, sd_list_types_def types)
1505 dump_lists (stderr, insn, types, 1);
1506 fprintf (stderr, "\n");
1509 /* A wrapper around add_dependence_1, to add a dependence of CON on
1510 PRO, with type DEP_TYPE. This function implements special handling
1511 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1512 the type to REG_DEP_ANTI if we can determine that predication is
1513 impossible; otherwise we add additional true dependencies on the
1514 INSN_COND_DEPS list of the jump (which PRO must be). */
1516 add_dependence (rtx con, rtx pro, enum reg_note dep_type)
1518 if (dep_type == REG_DEP_CONTROL
1519 && !(current_sched_info->flags & DO_PREDICATION))
1520 dep_type = REG_DEP_ANTI;
1522 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1523 so we must also make the insn dependent on the setter of the
1525 if (dep_type == REG_DEP_CONTROL)
1528 rtx other = real_insn_for_shadow (real_pro);
1531 if (other != NULL_RTX)
1533 cond = sched_get_reverse_condition_uncached (real_pro);
1534 /* Verify that the insn does not use a different value in
1535 the condition register than the one that was present at
1537 if (cond == NULL_RTX)
1538 dep_type = REG_DEP_ANTI;
1539 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1542 CLEAR_HARD_REG_SET (uses);
1543 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1544 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1545 dep_type = REG_DEP_ANTI;
1547 if (dep_type == REG_DEP_CONTROL)
1549 if (sched_verbose >= 5)
1550 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1551 INSN_UID (real_pro));
1552 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1553 REG_DEP_TRUE, false);
1557 add_dependence_1 (con, pro, dep_type);
1560 /* A convenience wrapper to operate on an entire list. HARD should be
1561 true if DEP_NONREG should be set on newly created dependencies. */
1564 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
1567 mark_as_hard = hard;
1568 for (; list; list = XEXP (list, 1))
1570 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1571 add_dependence (insn, XEXP (list, 0), dep_type);
1573 mark_as_hard = false;
1576 /* Similar, but free *LISTP at the same time, when the context
1577 is not readonly. HARD should be true if DEP_NONREG should be set on
1578 newly created dependencies. */
1581 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1582 int uncond, enum reg_note dep_type, bool hard)
1584 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1586 /* We don't want to short-circuit dependencies involving debug
1587 insns, because they may cause actual dependencies to be
1589 if (deps->readonly || DEBUG_INSN_P (insn))
1592 free_INSN_LIST_list (listp);
1595 /* Remove all occurrences of INSN from LIST. Return the number of
1596 occurrences removed. */
1599 remove_from_dependence_list (rtx insn, rtx* listp)
1605 if (XEXP (*listp, 0) == insn)
1607 remove_free_INSN_LIST_node (listp);
1612 listp = &XEXP (*listp, 1);
1618 /* Same as above, but process two lists at once. */
1620 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1626 if (XEXP (*listp, 0) == insn)
1628 remove_free_INSN_LIST_node (listp);
1629 remove_free_EXPR_LIST_node (exprp);
1634 listp = &XEXP (*listp, 1);
1635 exprp = &XEXP (*exprp, 1);
1641 /* Clear all dependencies for an insn. */
1643 delete_all_dependences (rtx insn)
1645 sd_iterator_def sd_it;
1648 /* The below cycle can be optimized to clear the caches and back_deps
1649 in one call but that would provoke duplication of code from
1652 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1653 sd_iterator_cond (&sd_it, &dep);)
1654 sd_delete_dep (sd_it);
1657 /* All insns in a scheduling group except the first should only have
1658 dependencies on the previous insn in the group. So we find the
1659 first instruction in the scheduling group by walking the dependence
1660 chains backwards. Then we add the dependencies for the group to
1661 the previous nonnote insn. */
1664 chain_to_prev_insn (rtx insn)
1666 sd_iterator_def sd_it;
1670 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1673 rtx pro = DEP_PRO (dep);
1677 i = prev_nonnote_insn (i);
1681 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1683 if (! sched_insns_conditions_mutex_p (i, pro))
1684 add_dependence (i, pro, DEP_TYPE (dep));
1688 delete_all_dependences (insn);
1690 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1691 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1692 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1693 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1696 /* Process an insn's memory dependencies. There are four kinds of
1699 (0) read dependence: read follows read
1700 (1) true dependence: read follows write
1701 (2) output dependence: write follows write
1702 (3) anti dependence: write follows read
1704 We are careful to build only dependencies which actually exist, and
1705 use transitivity to avoid building too many links. */
1707 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1708 The MEM is a memory reference contained within INSN, which we are saving
1709 so that we can do memory aliasing on it. */
1712 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1719 gcc_assert (!deps->readonly);
1722 insn_list = &deps->pending_read_insns;
1723 mem_list = &deps->pending_read_mems;
1724 if (!DEBUG_INSN_P (insn))
1725 deps->pending_read_list_length++;
1729 insn_list = &deps->pending_write_insns;
1730 mem_list = &deps->pending_write_mems;
1731 deps->pending_write_list_length++;
1734 link = alloc_INSN_LIST (insn, *insn_list);
1737 if (sched_deps_info->use_cselib)
1739 mem = shallow_copy_rtx (mem);
1740 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1741 GET_MODE (mem), insn);
1743 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1747 /* Make a dependency between every memory reference on the pending lists
1748 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1749 dependencies for a read operation, similarly with FOR_WRITE. */
1752 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1757 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1758 1, REG_DEP_ANTI, true);
1759 if (!deps->readonly)
1761 free_EXPR_LIST_list (&deps->pending_read_mems);
1762 deps->pending_read_list_length = 0;
1766 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1767 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1770 add_dependence_list_and_free (deps, insn,
1771 &deps->last_pending_memory_flush, 1,
1772 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1775 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1776 REG_DEP_ANTI, true);
1778 if (DEBUG_INSN_P (insn))
1781 free_INSN_LIST_list (&deps->pending_read_insns);
1782 free_INSN_LIST_list (&deps->pending_write_insns);
1783 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1784 free_INSN_LIST_list (&deps->pending_jump_insns);
1787 if (!deps->readonly)
1789 free_EXPR_LIST_list (&deps->pending_write_mems);
1790 deps->pending_write_list_length = 0;
1792 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1793 deps->pending_flush_length = 1;
1795 mark_as_hard = false;
1798 /* Instruction which dependencies we are analyzing. */
1799 static rtx cur_insn = NULL_RTX;
1801 /* Implement hooks for haifa scheduler. */
1804 haifa_start_insn (rtx insn)
1806 gcc_assert (insn && !cur_insn);
1812 haifa_finish_insn (void)
1818 haifa_note_reg_set (int regno)
1820 SET_REGNO_REG_SET (reg_pending_sets, regno);
1824 haifa_note_reg_clobber (int regno)
1826 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1830 haifa_note_reg_use (int regno)
1832 SET_REGNO_REG_SET (reg_pending_uses, regno);
1836 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1838 if (!(ds & SPECULATIVE))
1841 pending_mem = NULL_RTX;
1844 gcc_assert (ds & BEGIN_DATA);
1847 dep_def _dep, *dep = &_dep;
1849 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1850 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1851 DEP_NONREG (dep) = 1;
1852 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1858 haifa_note_dep (rtx elem, ds_t ds)
1863 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1865 DEP_NONREG (dep) = 1;
1866 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1870 note_reg_use (int r)
1872 if (sched_deps_info->note_reg_use)
1873 sched_deps_info->note_reg_use (r);
1877 note_reg_set (int r)
1879 if (sched_deps_info->note_reg_set)
1880 sched_deps_info->note_reg_set (r);
1884 note_reg_clobber (int r)
1886 if (sched_deps_info->note_reg_clobber)
1887 sched_deps_info->note_reg_clobber (r);
1891 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1893 if (sched_deps_info->note_mem_dep)
1894 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1898 note_dep (rtx e, ds_t ds)
1900 if (sched_deps_info->note_dep)
1901 sched_deps_info->note_dep (e, ds);
1904 /* Return corresponding to DS reg_note. */
1909 return REG_DEP_TRUE;
1910 else if (ds & DEP_OUTPUT)
1911 return REG_DEP_OUTPUT;
1912 else if (ds & DEP_ANTI)
1913 return REG_DEP_ANTI;
1916 gcc_assert (ds & DEP_CONTROL);
1917 return REG_DEP_CONTROL;
1923 /* Functions for computation of info needed for register pressure
1924 sensitive insn scheduling. */
1927 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1928 static struct reg_use_data *
1929 create_insn_reg_use (int regno, rtx insn)
1931 struct reg_use_data *use;
1933 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1936 use->next_insn_use = INSN_REG_USE_LIST (insn);
1937 INSN_REG_USE_LIST (insn) = use;
1941 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1942 static struct reg_set_data *
1943 create_insn_reg_set (int regno, rtx insn)
1945 struct reg_set_data *set;
1947 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1950 set->next_insn_set = INSN_REG_SET_LIST (insn);
1951 INSN_REG_SET_LIST (insn) = set;
1955 /* Set up insn register uses for INSN and dependency context DEPS. */
1957 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1960 reg_set_iterator rsi;
1962 struct reg_use_data *use, *use2, *next;
1963 struct deps_reg *reg_last;
1965 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1967 if (i < FIRST_PSEUDO_REGISTER
1968 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1971 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1972 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1973 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1974 /* Ignore use which is not dying. */
1977 use = create_insn_reg_use (i, insn);
1978 use->next_regno_use = use;
1979 reg_last = &deps->reg_last[i];
1981 /* Create the cycle list of uses. */
1982 for (list = reg_last->uses; list; list = XEXP (list, 1))
1984 use2 = create_insn_reg_use (i, XEXP (list, 0));
1985 next = use->next_regno_use;
1986 use->next_regno_use = use2;
1987 use2->next_regno_use = next;
1992 /* Register pressure info for the currently processed insn. */
1993 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1995 /* Return TRUE if INSN has the use structure for REGNO. */
1997 insn_use_p (rtx insn, int regno)
1999 struct reg_use_data *use;
2001 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2002 if (use->regno == regno)
2007 /* Update the register pressure info after birth of pseudo register REGNO
2008 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2009 the register is in clobber or unused after the insn. */
2011 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2016 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2017 cl = sched_regno_pressure_class[regno];
2020 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2023 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2024 reg_pressure_info[cl].clobber_increase = new_incr;
2028 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2029 reg_pressure_info[cl].unused_set_increase = new_incr;
2033 new_incr = reg_pressure_info[cl].set_increase + incr;
2034 reg_pressure_info[cl].set_increase = new_incr;
2035 if (! insn_use_p (insn, regno))
2036 reg_pressure_info[cl].change += incr;
2037 create_insn_reg_set (regno, insn);
2039 gcc_assert (new_incr < (1 << INCREASE_BITS));
2043 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2044 hard registers involved in the birth. */
2046 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2047 bool clobber_p, bool unused_p)
2050 int new_incr, last = regno + nregs;
2052 while (regno < last)
2054 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2055 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2057 cl = sched_regno_pressure_class[regno];
2062 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2063 reg_pressure_info[cl].clobber_increase = new_incr;
2067 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2068 reg_pressure_info[cl].unused_set_increase = new_incr;
2072 new_incr = reg_pressure_info[cl].set_increase + 1;
2073 reg_pressure_info[cl].set_increase = new_incr;
2074 if (! insn_use_p (insn, regno))
2075 reg_pressure_info[cl].change += 1;
2076 create_insn_reg_set (regno, insn);
2078 gcc_assert (new_incr < (1 << INCREASE_BITS));
2085 /* Update the register pressure info after birth of pseudo or hard
2086 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2087 correspondingly that the register is in clobber or unused after the
2090 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2094 if (GET_CODE (reg) == SUBREG)
2095 reg = SUBREG_REG (reg);
2100 regno = REGNO (reg);
2101 if (regno < FIRST_PSEUDO_REGISTER)
2102 mark_insn_hard_regno_birth (insn, regno,
2103 hard_regno_nregs[regno][GET_MODE (reg)],
2104 clobber_p, unused_p);
2106 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2109 /* Update the register pressure info after death of pseudo register
2112 mark_pseudo_death (int regno)
2117 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2118 cl = sched_regno_pressure_class[regno];
2121 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2122 reg_pressure_info[cl].change -= incr;
2126 /* Like mark_pseudo_death except that NREGS saying how many hard
2127 registers involved in the death. */
2129 mark_hard_regno_death (int regno, int nregs)
2132 int last = regno + nregs;
2134 while (regno < last)
2136 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2137 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2139 cl = sched_regno_pressure_class[regno];
2141 reg_pressure_info[cl].change -= 1;
2147 /* Update the register pressure info after death of pseudo or hard
2150 mark_reg_death (rtx reg)
2154 if (GET_CODE (reg) == SUBREG)
2155 reg = SUBREG_REG (reg);
2160 regno = REGNO (reg);
2161 if (regno < FIRST_PSEUDO_REGISTER)
2162 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2164 mark_pseudo_death (regno);
2167 /* Process SETTER of REG. DATA is an insn containing the setter. */
2169 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2171 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2174 ((rtx) data, reg, false,
2175 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2178 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2180 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2182 if (GET_CODE (setter) == CLOBBER)
2183 mark_insn_reg_birth ((rtx) data, reg, true, false);
2186 /* Set up reg pressure info related to INSN. */
2188 init_insn_reg_pressure_info (rtx insn)
2192 static struct reg_pressure_data *pressure_info;
2195 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2197 if (! INSN_P (insn))
2200 for (i = 0; i < ira_pressure_classes_num; i++)
2202 cl = ira_pressure_classes[i];
2203 reg_pressure_info[cl].clobber_increase = 0;
2204 reg_pressure_info[cl].set_increase = 0;
2205 reg_pressure_info[cl].unused_set_increase = 0;
2206 reg_pressure_info[cl].change = 0;
2209 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2211 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2214 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2215 if (REG_NOTE_KIND (link) == REG_INC)
2216 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2219 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2220 if (REG_NOTE_KIND (link) == REG_DEAD)
2221 mark_reg_death (XEXP (link, 0));
2223 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2225 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2226 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2227 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2229 for (i = 0; i < ira_pressure_classes_num; i++)
2231 cl = ira_pressure_classes[i];
2232 pressure_info[i].clobber_increase
2233 = reg_pressure_info[cl].clobber_increase;
2234 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2235 pressure_info[i].unused_set_increase
2236 = reg_pressure_info[cl].unused_set_increase;
2237 pressure_info[i].change = reg_pressure_info[cl].change;
2244 /* Internal variable for sched_analyze_[12] () functions.
2245 If it is nonzero, this means that sched_analyze_[12] looks
2246 at the most toplevel SET. */
2247 static bool can_start_lhs_rhs_p;
2249 /* Extend reg info for the deps context DEPS given that
2250 we have just generated a register numbered REGNO. */
2252 extend_deps_reg_info (struct deps_desc *deps, int regno)
2254 int max_regno = regno + 1;
2256 gcc_assert (!reload_completed);
2258 /* In a readonly context, it would not hurt to extend info,
2259 but it should not be needed. */
2260 if (reload_completed && deps->readonly)
2262 deps->max_reg = max_regno;
2266 if (max_regno > deps->max_reg)
2268 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2270 memset (&deps->reg_last[deps->max_reg],
2271 0, (max_regno - deps->max_reg)
2272 * sizeof (struct deps_reg));
2273 deps->max_reg = max_regno;
2277 /* Extends REG_INFO_P if needed. */
2279 maybe_extend_reg_info_p (void)
2281 /* Extend REG_INFO_P, if needed. */
2282 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2284 size_t new_reg_info_p_size = max_regno + 128;
2286 gcc_assert (!reload_completed && sel_sched_p ());
2288 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2289 new_reg_info_p_size,
2291 sizeof (*reg_info_p));
2292 reg_info_p_size = new_reg_info_p_size;
2296 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2297 The type of the reference is specified by REF and can be SET,
2298 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2301 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2302 enum rtx_code ref, rtx insn)
2304 /* We could emit new pseudos in renaming. Extend the reg structures. */
2305 if (!reload_completed && sel_sched_p ()
2306 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2307 extend_deps_reg_info (deps, regno);
2309 maybe_extend_reg_info_p ();
2311 /* A hard reg in a wide mode may really be multiple registers.
2312 If so, mark all of them just like the first. */
2313 if (regno < FIRST_PSEUDO_REGISTER)
2315 int i = hard_regno_nregs[regno][mode];
2319 note_reg_set (regno + i);
2321 else if (ref == USE)
2324 note_reg_use (regno + i);
2329 note_reg_clobber (regno + i);
2333 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2334 it does not reload. Ignore these as they have served their
2336 else if (regno >= deps->max_reg)
2338 enum rtx_code code = GET_CODE (PATTERN (insn));
2339 gcc_assert (code == USE || code == CLOBBER);
2345 note_reg_set (regno);
2346 else if (ref == USE)
2347 note_reg_use (regno);
2349 note_reg_clobber (regno);
2351 /* Pseudos that are REG_EQUIV to something may be replaced
2352 by that during reloading. We need only add dependencies for
2353 the address in the REG_EQUIV note. */
2354 if (!reload_completed && get_reg_known_equiv_p (regno))
2356 rtx t = get_reg_known_value (regno);
2358 sched_analyze_2 (deps, XEXP (t, 0), insn);
2361 /* Don't let it cross a call after scheduling if it doesn't
2362 already cross one. */
2363 if (REG_N_CALLS_CROSSED (regno) == 0)
2365 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2366 deps->sched_before_next_call
2367 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2369 add_dependence_list (insn, deps->last_function_call, 1,
2370 REG_DEP_ANTI, false);
2375 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2376 rtx, X, creating all dependencies generated by the write to the
2377 destination of X, and reads of everything mentioned. */
2380 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2382 rtx dest = XEXP (x, 0);
2383 enum rtx_code code = GET_CODE (x);
2384 bool cslr_p = can_start_lhs_rhs_p;
2386 can_start_lhs_rhs_p = false;
2392 if (cslr_p && sched_deps_info->start_lhs)
2393 sched_deps_info->start_lhs (dest);
2395 if (GET_CODE (dest) == PARALLEL)
2399 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2400 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2401 sched_analyze_1 (deps,
2402 gen_rtx_CLOBBER (VOIDmode,
2403 XEXP (XVECEXP (dest, 0, i), 0)),
2406 if (cslr_p && sched_deps_info->finish_lhs)
2407 sched_deps_info->finish_lhs ();
2411 can_start_lhs_rhs_p = cslr_p;
2413 sched_analyze_2 (deps, SET_SRC (x), insn);
2415 can_start_lhs_rhs_p = false;
2421 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2422 || GET_CODE (dest) == ZERO_EXTRACT)
2424 if (GET_CODE (dest) == STRICT_LOW_PART
2425 || GET_CODE (dest) == ZERO_EXTRACT
2426 || df_read_modify_subreg_p (dest))
2428 /* These both read and modify the result. We must handle
2429 them as writes to get proper dependencies for following
2430 instructions. We must handle them as reads to get proper
2431 dependencies from this to previous instructions.
2432 Thus we need to call sched_analyze_2. */
2434 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2436 if (GET_CODE (dest) == ZERO_EXTRACT)
2438 /* The second and third arguments are values read by this insn. */
2439 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2440 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2442 dest = XEXP (dest, 0);
2447 int regno = REGNO (dest);
2448 enum machine_mode mode = GET_MODE (dest);
2450 sched_analyze_reg (deps, regno, mode, code, insn);
2453 /* Treat all writes to a stack register as modifying the TOS. */
2454 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2456 /* Avoid analyzing the same register twice. */
2457 if (regno != FIRST_STACK_REG)
2458 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2460 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2465 else if (MEM_P (dest))
2467 /* Writing memory. */
2470 if (sched_deps_info->use_cselib)
2472 enum machine_mode address_mode = get_address_mode (dest);
2474 t = shallow_copy_rtx (dest);
2475 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2476 GET_MODE (t), insn);
2478 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2483 /* Pending lists can't get larger with a readonly context. */
2485 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2486 > MAX_PENDING_LIST_LENGTH))
2488 /* Flush all pending reads and writes to prevent the pending lists
2489 from getting any larger. Insn scheduling runs too slowly when
2490 these lists get long. When compiling GCC with itself,
2491 this flush occurs 8 times for sparc, and 10 times for m88k using
2492 the default value of 32. */
2493 flush_pending_lists (deps, insn, false, true);
2497 rtx pending, pending_mem;
2499 pending = deps->pending_read_insns;
2500 pending_mem = deps->pending_read_mems;
2503 if (anti_dependence (XEXP (pending_mem, 0), t)
2504 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2505 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2508 pending = XEXP (pending, 1);
2509 pending_mem = XEXP (pending_mem, 1);
2512 pending = deps->pending_write_insns;
2513 pending_mem = deps->pending_write_mems;
2516 if (output_dependence (XEXP (pending_mem, 0), t)
2517 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2518 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2521 pending = XEXP (pending, 1);
2522 pending_mem = XEXP (pending_mem, 1);
2525 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2526 REG_DEP_ANTI, true);
2527 add_dependence_list (insn, deps->pending_jump_insns, 1,
2528 REG_DEP_CONTROL, true);
2530 if (!deps->readonly)
2531 add_insn_mem_dependence (deps, false, insn, dest);
2533 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2536 if (cslr_p && sched_deps_info->finish_lhs)
2537 sched_deps_info->finish_lhs ();
2539 /* Analyze reads. */
2540 if (GET_CODE (x) == SET)
2542 can_start_lhs_rhs_p = cslr_p;
2544 sched_analyze_2 (deps, SET_SRC (x), insn);
2546 can_start_lhs_rhs_p = false;
2550 /* Analyze the uses of memory and registers in rtx X in INSN. */
2552 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2558 bool cslr_p = can_start_lhs_rhs_p;
2560 can_start_lhs_rhs_p = false;
2566 if (cslr_p && sched_deps_info->start_rhs)
2567 sched_deps_info->start_rhs (x);
2569 code = GET_CODE (x);
2577 /* Ignore constants. */
2578 if (cslr_p && sched_deps_info->finish_rhs)
2579 sched_deps_info->finish_rhs ();
2585 /* User of CC0 depends on immediately preceding insn. */
2586 SCHED_GROUP_P (insn) = 1;
2587 /* Don't move CC0 setter to another block (it can set up the
2588 same flag for previous CC0 users which is safe). */
2589 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2591 if (cslr_p && sched_deps_info->finish_rhs)
2592 sched_deps_info->finish_rhs ();
2599 int regno = REGNO (x);
2600 enum machine_mode mode = GET_MODE (x);
2602 sched_analyze_reg (deps, regno, mode, USE, insn);
2605 /* Treat all reads of a stack register as modifying the TOS. */
2606 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2608 /* Avoid analyzing the same register twice. */
2609 if (regno != FIRST_STACK_REG)
2610 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2611 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2615 if (cslr_p && sched_deps_info->finish_rhs)
2616 sched_deps_info->finish_rhs ();
2623 /* Reading memory. */
2625 rtx pending, pending_mem;
2628 if (sched_deps_info->use_cselib)
2630 enum machine_mode address_mode = get_address_mode (t);
2632 t = shallow_copy_rtx (t);
2633 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2634 GET_MODE (t), insn);
2636 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2640 if (!DEBUG_INSN_P (insn))
2643 pending = deps->pending_read_insns;
2644 pending_mem = deps->pending_read_mems;
2647 if (read_dependence (XEXP (pending_mem, 0), t)
2648 && ! sched_insns_conditions_mutex_p (insn,
2650 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2653 pending = XEXP (pending, 1);
2654 pending_mem = XEXP (pending_mem, 1);
2657 pending = deps->pending_write_insns;
2658 pending_mem = deps->pending_write_mems;
2661 if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
2662 && ! sched_insns_conditions_mutex_p (insn,
2664 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2665 sched_deps_info->generate_spec_deps
2666 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2668 pending = XEXP (pending, 1);
2669 pending_mem = XEXP (pending_mem, 1);
2672 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2673 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2675 for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2676 if (deps_may_trap_p (x))
2678 if ((sched_deps_info->generate_spec_deps)
2679 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2681 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2684 note_dep (XEXP (u, 0), ds);
2687 add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
2691 /* Always add these dependencies to pending_reads, since
2692 this insn may be followed by a write. */
2693 if (!deps->readonly)
2694 add_insn_mem_dependence (deps, true, insn, x);
2696 sched_analyze_2 (deps, XEXP (x, 0), insn);
2698 if (cslr_p && sched_deps_info->finish_rhs)
2699 sched_deps_info->finish_rhs ();
2704 /* Force pending stores to memory in case a trap handler needs them. */
2706 flush_pending_lists (deps, insn, true, false);
2710 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2711 reg_pending_barrier = TRUE_BARRIER;
2714 case UNSPEC_VOLATILE:
2715 flush_pending_lists (deps, insn, true, true);
2721 /* Traditional and volatile asm instructions must be considered to use
2722 and clobber all hard registers, all pseudo-registers and all of
2723 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2725 Consider for instance a volatile asm that changes the fpu rounding
2726 mode. An insn should not be moved across this even if it only uses
2727 pseudo-regs because it might give an incorrectly rounded result. */
2728 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2729 reg_pending_barrier = TRUE_BARRIER;
2731 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2732 We can not just fall through here since then we would be confused
2733 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2734 traditional asms unlike their normal usage. */
2736 if (code == ASM_OPERANDS)
2738 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2739 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2741 if (cslr_p && sched_deps_info->finish_rhs)
2742 sched_deps_info->finish_rhs ();
2753 /* These both read and modify the result. We must handle them as writes
2754 to get proper dependencies for following instructions. We must handle
2755 them as reads to get proper dependencies from this to previous
2756 instructions. Thus we need to pass them to both sched_analyze_1
2757 and sched_analyze_2. We must call sched_analyze_2 first in order
2758 to get the proper antecedent for the read. */
2759 sched_analyze_2 (deps, XEXP (x, 0), insn);
2760 sched_analyze_1 (deps, x, insn);
2762 if (cslr_p && sched_deps_info->finish_rhs)
2763 sched_deps_info->finish_rhs ();
2769 /* op0 = op0 + op1 */
2770 sched_analyze_2 (deps, XEXP (x, 0), insn);
2771 sched_analyze_2 (deps, XEXP (x, 1), insn);
2772 sched_analyze_1 (deps, x, insn);
2774 if (cslr_p && sched_deps_info->finish_rhs)
2775 sched_deps_info->finish_rhs ();
2783 /* Other cases: walk the insn. */
2784 fmt = GET_RTX_FORMAT (code);
2785 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2788 sched_analyze_2 (deps, XEXP (x, i), insn);
2789 else if (fmt[i] == 'E')
2790 for (j = 0; j < XVECLEN (x, i); j++)
2791 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2794 if (cslr_p && sched_deps_info->finish_rhs)
2795 sched_deps_info->finish_rhs ();
2798 /* Analyze an INSN with pattern X to find all dependencies. */
2800 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2802 RTX_CODE code = GET_CODE (x);
2805 reg_set_iterator rsi;
2807 if (! reload_completed)
2811 extract_insn (insn);
2812 preprocess_constraints ();
2813 ira_implicitly_set_insn_hard_regs (&temp);
2814 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2815 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2818 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2822 /* Avoid moving trapping instructions across function calls that might
2823 not always return. */
2824 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2825 1, REG_DEP_ANTI, true);
2827 /* We must avoid creating a situation in which two successors of the
2828 current block have different unwind info after scheduling. If at any
2829 point the two paths re-join this leads to incorrect unwind info. */
2830 /* ??? There are certain situations involving a forced frame pointer in
2831 which, with extra effort, we could fix up the unwind info at a later
2832 CFG join. However, it seems better to notice these cases earlier
2833 during prologue generation and avoid marking the frame pointer setup
2834 as frame-related at all. */
2835 if (RTX_FRAME_RELATED_P (insn))
2837 /* Make sure prologue insn is scheduled before next jump. */
2838 deps->sched_before_next_jump
2839 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2841 /* Make sure epilogue insn is scheduled after preceding jumps. */
2842 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2846 if (code == COND_EXEC)
2848 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2850 /* ??? Should be recording conditions so we reduce the number of
2851 false dependencies. */
2852 x = COND_EXEC_CODE (x);
2853 code = GET_CODE (x);
2855 if (code == SET || code == CLOBBER)
2857 sched_analyze_1 (deps, x, insn);
2859 /* Bare clobber insns are used for letting life analysis, reg-stack
2860 and others know that a value is dead. Depend on the last call
2861 instruction so that reg-stack won't get confused. */
2862 if (code == CLOBBER)
2863 add_dependence_list (insn, deps->last_function_call, 1,
2864 REG_DEP_OUTPUT, true);
2866 else if (code == PARALLEL)
2868 for (i = XVECLEN (x, 0); i--;)
2870 rtx sub = XVECEXP (x, 0, i);
2871 code = GET_CODE (sub);
2873 if (code == COND_EXEC)
2875 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2876 sub = COND_EXEC_CODE (sub);
2877 code = GET_CODE (sub);
2879 if (code == SET || code == CLOBBER)
2880 sched_analyze_1 (deps, sub, insn);
2882 sched_analyze_2 (deps, sub, insn);
2886 sched_analyze_2 (deps, x, insn);
2888 /* Mark registers CLOBBERED or used by called function. */
2891 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2893 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2894 sched_analyze_1 (deps, XEXP (link, 0), insn);
2895 else if (GET_CODE (XEXP (link, 0)) != SET)
2896 sched_analyze_2 (deps, XEXP (link, 0), insn);
2898 /* Don't schedule anything after a tail call, tail call needs
2899 to use at least all call-saved registers. */
2900 if (SIBLING_CALL_P (insn))
2901 reg_pending_barrier = TRUE_BARRIER;
2902 else if (find_reg_note (insn, REG_SETJMP, NULL))
2903 reg_pending_barrier = MOVE_BARRIER;
2909 next = next_nonnote_nondebug_insn (insn);
2910 if (next && BARRIER_P (next))
2911 reg_pending_barrier = MOVE_BARRIER;
2914 rtx pending, pending_mem;
2916 if (sched_deps_info->compute_jump_reg_dependencies)
2918 (*sched_deps_info->compute_jump_reg_dependencies)
2919 (insn, reg_pending_control_uses);
2921 /* Make latency of jump equal to 0 by using anti-dependence. */
2922 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2924 struct deps_reg *reg_last = &deps->reg_last[i];
2925 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
2927 add_dependence_list (insn, reg_last->implicit_sets,
2928 0, REG_DEP_ANTI, false);
2929 add_dependence_list (insn, reg_last->clobbers, 0,
2930 REG_DEP_ANTI, false);
2934 /* All memory writes and volatile reads must happen before the
2935 jump. Non-volatile reads must happen before the jump iff
2936 the result is needed by the above register used mask. */
2938 pending = deps->pending_write_insns;
2939 pending_mem = deps->pending_write_mems;
2942 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2943 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2944 pending = XEXP (pending, 1);
2945 pending_mem = XEXP (pending_mem, 1);
2948 pending = deps->pending_read_insns;
2949 pending_mem = deps->pending_read_mems;
2952 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2953 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2954 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2955 pending = XEXP (pending, 1);
2956 pending_mem = XEXP (pending_mem, 1);
2959 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2960 REG_DEP_ANTI, true);
2961 add_dependence_list (insn, deps->pending_jump_insns, 1,
2962 REG_DEP_ANTI, true);
2966 /* If this instruction can throw an exception, then moving it changes
2967 where block boundaries fall. This is mighty confusing elsewhere.
2968 Therefore, prevent such an instruction from being moved. Same for
2969 non-jump instructions that define block boundaries.
2970 ??? Unclear whether this is still necessary in EBB mode. If not,
2971 add_branch_dependences should be adjusted for RGN mode instead. */
2972 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2973 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2974 reg_pending_barrier = MOVE_BARRIER;
2976 if (sched_pressure != SCHED_PRESSURE_NONE)
2978 setup_insn_reg_uses (deps, insn);
2979 init_insn_reg_pressure_info (insn);
2982 /* Add register dependencies for insn. */
2983 if (DEBUG_INSN_P (insn))
2985 rtx prev = deps->last_debug_insn;
2988 if (!deps->readonly)
2989 deps->last_debug_insn = insn;
2992 add_dependence (insn, prev, REG_DEP_ANTI);
2994 add_dependence_list (insn, deps->last_function_call, 1,
2995 REG_DEP_ANTI, false);
2997 if (!sel_sched_p ())
2998 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2999 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3001 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3003 struct deps_reg *reg_last = &deps->reg_last[i];
3004 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3005 /* There's no point in making REG_DEP_CONTROL dependencies for
3007 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3010 if (!deps->readonly)
3011 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3013 CLEAR_REG_SET (reg_pending_uses);
3015 /* Quite often, a debug insn will refer to stuff in the
3016 previous instruction, but the reason we want this
3017 dependency here is to make sure the scheduler doesn't
3018 gratuitously move a debug insn ahead. This could dirty
3019 DF flags and cause additional analysis that wouldn't have
3020 occurred in compilation without debug insns, and such
3021 additional analysis can modify the generated code. */
3022 prev = PREV_INSN (insn);
3024 if (prev && NONDEBUG_INSN_P (prev))
3025 add_dependence (insn, prev, REG_DEP_ANTI);
3029 regset_head set_or_clobbered;
3031 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3033 struct deps_reg *reg_last = &deps->reg_last[i];
3034 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3035 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3037 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3040 if (!deps->readonly)
3042 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3043 reg_last->uses_length++;
3047 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3048 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3050 struct deps_reg *reg_last = &deps->reg_last[i];
3051 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3052 add_dependence_list (insn, reg_last->implicit_sets, 0,
3053 REG_DEP_ANTI, false);
3054 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3057 if (!deps->readonly)
3059 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3060 reg_last->uses_length++;
3064 if (targetm.sched.exposed_pipeline)
3066 INIT_REG_SET (&set_or_clobbered);
3067 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3069 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3071 struct deps_reg *reg_last = &deps->reg_last[i];
3073 for (list = reg_last->uses; list; list = XEXP (list, 1))
3075 rtx other = XEXP (list, 0);
3076 if (INSN_CACHED_COND (other) != const_true_rtx
3077 && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3078 INSN_CACHED_COND (other) = const_true_rtx;
3083 /* If the current insn is conditional, we can't free any
3085 if (sched_has_condition_p (insn))
3087 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3089 struct deps_reg *reg_last = &deps->reg_last[i];
3090 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3092 add_dependence_list (insn, reg_last->implicit_sets, 0,
3093 REG_DEP_ANTI, false);
3094 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3096 add_dependence_list (insn, reg_last->control_uses, 0,
3097 REG_DEP_CONTROL, false);
3099 if (!deps->readonly)
3102 = alloc_INSN_LIST (insn, reg_last->clobbers);
3103 reg_last->clobbers_length++;
3106 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3108 struct deps_reg *reg_last = &deps->reg_last[i];
3109 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3111 add_dependence_list (insn, reg_last->implicit_sets, 0,
3112 REG_DEP_ANTI, false);
3113 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3115 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3117 add_dependence_list (insn, reg_last->control_uses, 0,
3118 REG_DEP_CONTROL, false);
3120 if (!deps->readonly)
3121 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3126 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3128 struct deps_reg *reg_last = &deps->reg_last[i];
3129 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
3130 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
3132 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3133 REG_DEP_OUTPUT, false);
3134 add_dependence_list_and_free (deps, insn,
3135 ®_last->implicit_sets, 0,
3136 REG_DEP_ANTI, false);
3137 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3138 REG_DEP_ANTI, false);
3139 add_dependence_list_and_free (deps, insn,
3140 ®_last->control_uses, 0,
3141 REG_DEP_ANTI, false);
3142 add_dependence_list_and_free (deps, insn,
3143 ®_last->clobbers, 0,
3144 REG_DEP_OUTPUT, false);
3146 if (!deps->readonly)
3148 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3149 reg_last->clobbers_length = 0;
3150 reg_last->uses_length = 0;
3155 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3157 add_dependence_list (insn, reg_last->implicit_sets, 0,
3158 REG_DEP_ANTI, false);
3159 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3161 add_dependence_list (insn, reg_last->control_uses, 0,
3162 REG_DEP_CONTROL, false);
3165 if (!deps->readonly)
3167 reg_last->clobbers_length++;
3169 = alloc_INSN_LIST (insn, reg_last->clobbers);
3172 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3174 struct deps_reg *reg_last = &deps->reg_last[i];
3176 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3177 REG_DEP_OUTPUT, false);
3178 add_dependence_list_and_free (deps, insn,
3179 ®_last->implicit_sets,
3180 0, REG_DEP_ANTI, false);
3181 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3182 REG_DEP_OUTPUT, false);
3183 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3184 REG_DEP_ANTI, false);
3185 add_dependence_list (insn, reg_last->control_uses, 0,
3186 REG_DEP_CONTROL, false);
3188 if (!deps->readonly)
3190 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3191 reg_last->uses_length = 0;
3192 reg_last->clobbers_length = 0;
3196 if (!deps->readonly)
3198 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3200 struct deps_reg *reg_last = &deps->reg_last[i];
3201 reg_last->control_uses
3202 = alloc_INSN_LIST (insn, reg_last->control_uses);
3207 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3208 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3210 struct deps_reg *reg_last = &deps->reg_last[i];
3211 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3212 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3213 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3214 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3217 if (!deps->readonly)
3218 reg_last->implicit_sets
3219 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3222 if (!deps->readonly)
3224 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3225 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3226 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3227 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3228 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3229 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3230 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3232 /* Set up the pending barrier found. */
3233 deps->last_reg_pending_barrier = reg_pending_barrier;
3236 CLEAR_REG_SET (reg_pending_uses);
3237 CLEAR_REG_SET (reg_pending_clobbers);
3238 CLEAR_REG_SET (reg_pending_sets);
3239 CLEAR_REG_SET (reg_pending_control_uses);
3240 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3241 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3243 /* Add dependencies if a scheduling barrier was found. */
3244 if (reg_pending_barrier)
3246 /* In the case of barrier the most added dependencies are not
3247 real, so we use anti-dependence here. */
3248 if (sched_has_condition_p (insn))
3250 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3252 struct deps_reg *reg_last = &deps->reg_last[i];
3253 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3255 add_dependence_list (insn, reg_last->sets, 0,
3256 reg_pending_barrier == TRUE_BARRIER
3257 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3258 add_dependence_list (insn, reg_last->implicit_sets, 0,
3259 REG_DEP_ANTI, true);
3260 add_dependence_list (insn, reg_last->clobbers, 0,
3261 reg_pending_barrier == TRUE_BARRIER
3262 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3267 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3269 struct deps_reg *reg_last = &deps->reg_last[i];
3270 add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3271 REG_DEP_ANTI, true);
3272 add_dependence_list_and_free (deps, insn,
3273 ®_last->control_uses, 0,
3274 REG_DEP_CONTROL, true);
3275 add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3276 reg_pending_barrier == TRUE_BARRIER
3277 ? REG_DEP_TRUE : REG_DEP_ANTI,
3279 add_dependence_list_and_free (deps, insn,
3280 ®_last->implicit_sets, 0,
3281 REG_DEP_ANTI, true);
3282 add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3283 reg_pending_barrier == TRUE_BARRIER
3284 ? REG_DEP_TRUE : REG_DEP_ANTI,
3287 if (!deps->readonly)
3289 reg_last->uses_length = 0;
3290 reg_last->clobbers_length = 0;
3295 if (!deps->readonly)
3296 for (i = 0; i < (unsigned)deps->max_reg; i++)
3298 struct deps_reg *reg_last = &deps->reg_last[i];
3299 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3300 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3303 /* Flush pending lists on jumps, but not on speculative checks. */
3304 if (JUMP_P (insn) && !(sel_sched_p ()
3305 && sel_insn_is_speculation_check (insn)))
3306 flush_pending_lists (deps, insn, true, true);
3308 reg_pending_barrier = NOT_A_BARRIER;
3311 /* If a post-call group is still open, see if it should remain so.
3312 This insn must be a simple move of a hard reg to a pseudo or
3315 We must avoid moving these insns for correctness on targets
3316 with small register classes, and for special registers like
3317 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3318 hard regs for all targets. */
3320 if (deps->in_post_call_group_p)
3322 rtx tmp, set = single_set (insn);
3323 int src_regno, dest_regno;
3327 if (DEBUG_INSN_P (insn))
3328 /* We don't want to mark debug insns as part of the same
3329 sched group. We know they really aren't, but if we use
3330 debug insns to tell that a call group is over, we'll
3331 get different code if debug insns are not there and
3332 instructions that follow seem like they should be part
3335 Also, if we did, chain_to_prev_insn would move the
3336 deps of the debug insn to the call insn, modifying
3337 non-debug post-dependency counts of the debug insn
3338 dependencies and otherwise messing with the scheduling
3341 Instead, let such debug insns be scheduled freely, but
3342 keep the call group open in case there are insns that
3343 should be part of it afterwards. Since we grant debug
3344 insns higher priority than even sched group insns, it
3345 will all turn out all right. */
3346 goto debug_dont_end_call_group;
3348 goto end_call_group;
3351 tmp = SET_DEST (set);
3352 if (GET_CODE (tmp) == SUBREG)
3353 tmp = SUBREG_REG (tmp);
3355 dest_regno = REGNO (tmp);
3357 goto end_call_group;
3359 tmp = SET_SRC (set);
3360 if (GET_CODE (tmp) == SUBREG)
3361 tmp = SUBREG_REG (tmp);
3362 if ((GET_CODE (tmp) == PLUS
3363 || GET_CODE (tmp) == MINUS)
3364 && REG_P (XEXP (tmp, 0))
3365 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3366 && dest_regno == STACK_POINTER_REGNUM)
3367 src_regno = STACK_POINTER_REGNUM;
3368 else if (REG_P (tmp))
3369 src_regno = REGNO (tmp);
3371 goto end_call_group;
3373 if (src_regno < FIRST_PSEUDO_REGISTER
3374 || dest_regno < FIRST_PSEUDO_REGISTER)
3377 && deps->in_post_call_group_p == post_call_initial)
3378 deps->in_post_call_group_p = post_call;
3380 if (!sel_sched_p () || sched_emulate_haifa_p)
3382 SCHED_GROUP_P (insn) = 1;
3383 CANT_MOVE (insn) = 1;
3389 if (!deps->readonly)
3390 deps->in_post_call_group_p = not_post_call;
3394 debug_dont_end_call_group:
3395 if ((current_sched_info->flags & DO_SPECULATION)
3396 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3397 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3401 sel_mark_hard_insn (insn);
3404 sd_iterator_def sd_it;
3407 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3408 sd_iterator_cond (&sd_it, &dep);)
3409 change_spec_dep_to_hard (sd_it);
3414 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3415 longjmp, loop forever, ...). */
3416 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3417 test for ECF_NORETURN? */
3419 call_may_noreturn_p (rtx insn)
3423 /* const or pure calls that aren't looping will always return. */
3424 if (RTL_CONST_OR_PURE_CALL_P (insn)
3425 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3428 call = PATTERN (insn);
3429 if (GET_CODE (call) == PARALLEL)
3430 call = XVECEXP (call, 0, 0);
3431 if (GET_CODE (call) == SET)
3432 call = SET_SRC (call);
3433 if (GET_CODE (call) == CALL
3434 && MEM_P (XEXP (call, 0))
3435 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3437 rtx symbol = XEXP (XEXP (call, 0), 0);
3438 if (SYMBOL_REF_DECL (symbol)
3439 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3441 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3443 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3446 case BUILT_IN_BCOPY:
3447 case BUILT_IN_BZERO:
3448 case BUILT_IN_INDEX:
3449 case BUILT_IN_MEMCHR:
3450 case BUILT_IN_MEMCMP:
3451 case BUILT_IN_MEMCPY:
3452 case BUILT_IN_MEMMOVE:
3453 case BUILT_IN_MEMPCPY:
3454 case BUILT_IN_MEMSET:
3455 case BUILT_IN_RINDEX:
3456 case BUILT_IN_STPCPY:
3457 case BUILT_IN_STPNCPY:
3458 case BUILT_IN_STRCAT:
3459 case BUILT_IN_STRCHR:
3460 case BUILT_IN_STRCMP:
3461 case BUILT_IN_STRCPY:
3462 case BUILT_IN_STRCSPN:
3463 case BUILT_IN_STRLEN:
3464 case BUILT_IN_STRNCAT:
3465 case BUILT_IN_STRNCMP:
3466 case BUILT_IN_STRNCPY:
3467 case BUILT_IN_STRPBRK:
3468 case BUILT_IN_STRRCHR:
3469 case BUILT_IN_STRSPN:
3470 case BUILT_IN_STRSTR:
3471 /* Assume certain string/memory builtins always return. */
3479 /* For all other calls assume that they might not always return. */
3483 /* Return true if INSN should be made dependent on the previous instruction
3484 group, and if all INSN's dependencies should be moved to the first
3485 instruction of that group. */
3488 chain_to_prev_insn_p (rtx insn)
3492 /* INSN forms a group with the previous instruction. */
3493 if (SCHED_GROUP_P (insn))
3496 /* If the previous instruction clobbers a register R and this one sets
3497 part of R, the clobber was added specifically to help us track the
3498 liveness of R. There's no point scheduling the clobber and leaving
3499 INSN behind, especially if we move the clobber to another block. */
3500 prev = prev_nonnote_nondebug_insn (insn);
3503 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3504 && GET_CODE (PATTERN (prev)) == CLOBBER)
3506 x = XEXP (PATTERN (prev), 0);
3507 if (set_of (x, insn))
3514 /* Analyze INSN with DEPS as a context. */
3516 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3518 if (sched_deps_info->start_insn)
3519 sched_deps_info->start_insn (insn);
3521 /* Record the condition for this insn. */
3522 if (NONDEBUG_INSN_P (insn))
3525 sched_get_condition_with_rev (insn, NULL);
3526 t = INSN_CACHED_COND (insn);
3527 INSN_COND_DEPS (insn) = NULL_RTX;
3528 if (reload_completed
3529 && (current_sched_info->flags & DO_PREDICATION)
3531 && REG_P (XEXP (t, 0))
3532 && CONSTANT_P (XEXP (t, 1)))
3538 nregs = hard_regno_nregs[regno][GET_MODE (t)];
3542 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3543 t = concat_INSN_LIST (reg_last->sets, t);
3544 t = concat_INSN_LIST (reg_last->clobbers, t);
3545 t = concat_INSN_LIST (reg_last->implicit_sets, t);
3547 INSN_COND_DEPS (insn) = t;
3553 /* Make each JUMP_INSN (but not a speculative check)
3554 a scheduling barrier for memory references. */
3557 && sel_insn_is_speculation_check (insn)))
3559 /* Keep the list a reasonable size. */
3560 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3561 flush_pending_lists (deps, insn, true, true);
3563 deps->pending_jump_insns
3564 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3567 /* For each insn which shouldn't cross a jump, add a dependence. */
3568 add_dependence_list_and_free (deps, insn,
3569 &deps->sched_before_next_jump, 1,
3570 REG_DEP_ANTI, true);
3572 sched_analyze_insn (deps, PATTERN (insn), insn);
3574 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3576 sched_analyze_insn (deps, PATTERN (insn), insn);
3578 else if (CALL_P (insn))
3582 CANT_MOVE (insn) = 1;
3584 if (find_reg_note (insn, REG_SETJMP, NULL))
3586 /* This is setjmp. Assume that all registers, not just
3587 hard registers, may be clobbered by this call. */
3588 reg_pending_barrier = MOVE_BARRIER;
3592 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3593 /* A call may read and modify global register variables. */
3596 SET_REGNO_REG_SET (reg_pending_sets, i);
3597 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3599 /* Other call-clobbered hard regs may be clobbered.
3600 Since we only have a choice between 'might be clobbered'
3601 and 'definitely not clobbered', we must include all
3602 partly call-clobbered registers here. */
3603 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3604 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3605 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3606 /* We don't know what set of fixed registers might be used
3607 by the function, but it is certain that the stack pointer
3608 is among them, but be conservative. */
3609 else if (fixed_regs[i])
3610 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3611 /* The frame pointer is normally not used by the function
3612 itself, but by the debugger. */
3613 /* ??? MIPS o32 is an exception. It uses the frame pointer
3614 in the macro expansion of jal but does not represent this
3615 fact in the call_insn rtl. */
3616 else if (i == FRAME_POINTER_REGNUM
3617 || (i == HARD_FRAME_POINTER_REGNUM
3618 && (! reload_completed || frame_pointer_needed)))
3619 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3622 /* For each insn which shouldn't cross a call, add a dependence
3623 between that insn and this call insn. */
3624 add_dependence_list_and_free (deps, insn,
3625 &deps->sched_before_next_call, 1,
3626 REG_DEP_ANTI, true);
3628 sched_analyze_insn (deps, PATTERN (insn), insn);
3630 /* If CALL would be in a sched group, then this will violate
3631 convention that sched group insns have dependencies only on the
3632 previous instruction.
3634 Of course one can say: "Hey! What about head of the sched group?"
3635 And I will answer: "Basic principles (one dep per insn) are always
3637 gcc_assert (!SCHED_GROUP_P (insn));
3639 /* In the absence of interprocedural alias analysis, we must flush
3640 all pending reads and writes, and start new dependencies starting
3641 from here. But only flush writes for constant calls (which may
3642 be passed a pointer to something we haven't written yet). */
3643 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3645 if (!deps->readonly)
3647 /* Remember the last function call for limiting lifetimes. */
3648 free_INSN_LIST_list (&deps->last_function_call);
3649 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3651 if (call_may_noreturn_p (insn))
3653 /* Remember the last function call that might not always return
3654 normally for limiting moves of trapping insns. */
3655 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3656 deps->last_function_call_may_noreturn
3657 = alloc_INSN_LIST (insn, NULL_RTX);
3660 /* Before reload, begin a post-call group, so as to keep the
3661 lifetimes of hard registers correct. */
3662 if (! reload_completed)
3663 deps->in_post_call_group_p = post_call;
3667 if (sched_deps_info->use_cselib)
3668 cselib_process_insn (insn);
3670 /* EH_REGION insn notes can not appear until well after we complete
3673 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3674 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3676 if (sched_deps_info->finish_insn)
3677 sched_deps_info->finish_insn ();
3679 /* Fixup the dependencies in the sched group. */
3680 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3681 && chain_to_prev_insn_p (insn)
3683 chain_to_prev_insn (insn);
3686 /* Initialize DEPS for the new block beginning with HEAD. */
3688 deps_start_bb (struct deps_desc *deps, rtx head)
3690 gcc_assert (!deps->readonly);
3692 /* Before reload, if the previous block ended in a call, show that
3693 we are inside a post-call group, so as to keep the lifetimes of
3694 hard registers correct. */
3695 if (! reload_completed && !LABEL_P (head))
3697 rtx insn = prev_nonnote_nondebug_insn (head);
3699 if (insn && CALL_P (insn))
3700 deps->in_post_call_group_p = post_call_initial;
3704 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3705 dependencies for each insn. */
3707 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3711 if (sched_deps_info->use_cselib)
3712 cselib_init (CSELIB_RECORD_MEMORY);
3714 deps_start_bb (deps, head);
3716 for (insn = head;; insn = NEXT_INSN (insn))
3721 /* And initialize deps_lists. */
3722 sd_init_insn (insn);
3725 deps_analyze_insn (deps, insn);
3729 if (sched_deps_info->use_cselib)
3737 /* Helper for sched_free_deps ().
3738 Delete INSN's (RESOLVED_P) backward dependencies. */
3740 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3742 sd_iterator_def sd_it;
3744 sd_list_types_def types;
3747 types = SD_LIST_RES_BACK;
3749 types = SD_LIST_BACK;
3751 for (sd_it = sd_iterator_start (insn, types);
3752 sd_iterator_cond (&sd_it, &dep);)
3754 dep_link_t link = *sd_it.linkp;
3755 dep_node_t node = DEP_LINK_NODE (link);
3756 deps_list_t back_list;
3757 deps_list_t forw_list;
3759 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3760 remove_from_deps_list (link, back_list);
3761 delete_dep_node (node);
3765 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3768 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3771 rtx next_tail = NEXT_INSN (tail);
3773 /* We make two passes since some insns may be scheduled before their
3774 dependencies are resolved. */
3775 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3776 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3778 /* Clear forward deps and leave the dep_nodes to the
3779 corresponding back_deps list. */
3781 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3783 clear_deps_list (INSN_FORW_DEPS (insn));
3785 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3786 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3788 /* Clear resolved back deps together with its dep_nodes. */
3789 delete_dep_nodes_in_back_deps (insn, resolved_p);
3791 sd_finish_insn (insn);
3795 /* Initialize variables for region data dependence analysis.
3796 When LAZY_REG_LAST is true, do not allocate reg_last array
3797 of struct deps_desc immediately. */
3800 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3802 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3804 deps->max_reg = max_reg;
3806 deps->reg_last = NULL;
3808 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3809 INIT_REG_SET (&deps->reg_last_in_use);
3811 deps->pending_read_insns = 0;
3812 deps->pending_read_mems = 0;
3813 deps->pending_write_insns = 0;
3814 deps->pending_write_mems = 0;
3815 deps->pending_jump_insns = 0;
3816 deps->pending_read_list_length = 0;
3817 deps->pending_write_list_length = 0;
3818 deps->pending_flush_length = 0;
3819 deps->last_pending_memory_flush = 0;
3820 deps->last_function_call = 0;
3821 deps->last_function_call_may_noreturn = 0;
3822 deps->sched_before_next_call = 0;
3823 deps->sched_before_next_jump = 0;
3824 deps->in_post_call_group_p = not_post_call;
3825 deps->last_debug_insn = 0;
3826 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3830 /* Init only reg_last field of DEPS, which was not allocated before as
3831 we inited DEPS lazily. */
3833 init_deps_reg_last (struct deps_desc *deps)
3835 gcc_assert (deps && deps->max_reg > 0);
3836 gcc_assert (deps->reg_last == NULL);
3838 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3842 /* Free insn lists found in DEPS. */
3845 free_deps (struct deps_desc *deps)
3848 reg_set_iterator rsi;
3850 /* We set max_reg to 0 when this context was already freed. */
3851 if (deps->max_reg == 0)
3853 gcc_assert (deps->reg_last == NULL);
3858 free_INSN_LIST_list (&deps->pending_read_insns);
3859 free_EXPR_LIST_list (&deps->pending_read_mems);
3860 free_INSN_LIST_list (&deps->pending_write_insns);
3861 free_EXPR_LIST_list (&deps->pending_write_mems);
3862 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3864 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3865 times. For a testcase with 42000 regs and 8000 small basic blocks,
3866 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3867 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3869 struct deps_reg *reg_last = &deps->reg_last[i];
3871 free_INSN_LIST_list (®_last->uses);
3873 free_INSN_LIST_list (®_last->sets);
3874 if (reg_last->implicit_sets)
3875 free_INSN_LIST_list (®_last->implicit_sets);
3876 if (reg_last->control_uses)
3877 free_INSN_LIST_list (®_last->control_uses);
3878 if (reg_last->clobbers)
3879 free_INSN_LIST_list (®_last->clobbers);
3881 CLEAR_REG_SET (&deps->reg_last_in_use);
3883 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3885 free (deps->reg_last);
3886 deps->reg_last = NULL;
3891 /* Remove INSN from dependence contexts DEPS. */
3893 remove_from_deps (struct deps_desc *deps, rtx insn)
3897 reg_set_iterator rsi;
3899 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3900 &deps->pending_read_mems);
3901 if (!DEBUG_INSN_P (insn))
3902 deps->pending_read_list_length -= removed;
3903 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3904 &deps->pending_write_mems);
3905 deps->pending_write_list_length -= removed;
3907 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3908 deps->pending_flush_length -= removed;
3909 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3910 deps->pending_flush_length -= removed;
3912 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3914 struct deps_reg *reg_last = &deps->reg_last[i];
3916 remove_from_dependence_list (insn, ®_last->uses);
3918 remove_from_dependence_list (insn, ®_last->sets);
3919 if (reg_last->implicit_sets)
3920 remove_from_dependence_list (insn, ®_last->implicit_sets);
3921 if (reg_last->clobbers)
3922 remove_from_dependence_list (insn, ®_last->clobbers);
3923 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3924 && !reg_last->clobbers)
3925 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3930 remove_from_dependence_list (insn, &deps->last_function_call);
3931 remove_from_dependence_list (insn,
3932 &deps->last_function_call_may_noreturn);
3934 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3937 /* Init deps data vector. */
3939 init_deps_data_vector (void)
3941 int reserve = (sched_max_luid + 1
3942 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3944 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3945 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3946 3 * sched_max_luid / 2);
3949 /* If it is profitable to use them, initialize or extend (depending on
3950 GLOBAL_P) dependency data. */
3952 sched_deps_init (bool global_p)
3954 /* Average number of insns in the basic block.
3955 '+ 1' is used to make it nonzero. */
3956 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3958 init_deps_data_vector ();
3960 /* We use another caching mechanism for selective scheduling, so
3961 we don't use this one. */
3962 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3964 /* ?!? We could save some memory by computing a per-region luid mapping
3965 which could reduce both the number of vectors in the cache and the
3966 size of each vector. Instead we just avoid the cache entirely unless
3967 the average number of instructions in a basic block is very high. See
3968 the comment before the declaration of true_dependency_cache for
3969 what we consider "very high". */
3971 extend_dependency_caches (sched_max_luid, true);
3976 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3977 /* Allocate lists for one block at a time. */
3979 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3980 /* Allocate nodes for one block at a time.
3981 We assume that average insn has
3983 5 * insns_in_block);
3988 /* Create or extend (depending on CREATE_P) dependency caches to
3991 extend_dependency_caches (int n, bool create_p)
3993 if (create_p || true_dependency_cache)
3995 int i, luid = cache_size + n;
3997 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3999 output_dependency_cache = XRESIZEVEC (bitmap_head,
4000 output_dependency_cache, luid);
4001 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4003 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4006 if (current_sched_info->flags & DO_SPECULATION)
4007 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4010 for (i = cache_size; i < luid; i++)
4012 bitmap_initialize (&true_dependency_cache[i], 0);
4013 bitmap_initialize (&output_dependency_cache[i], 0);
4014 bitmap_initialize (&anti_dependency_cache[i], 0);
4015 bitmap_initialize (&control_dependency_cache[i], 0);
4017 if (current_sched_info->flags & DO_SPECULATION)
4018 bitmap_initialize (&spec_dependency_cache[i], 0);
4024 /* Finalize dependency information for the whole function. */
4026 sched_deps_finish (void)
4028 gcc_assert (deps_pools_are_empty_p ());
4029 free_alloc_pool_if_empty (&dn_pool);
4030 free_alloc_pool_if_empty (&dl_pool);
4031 gcc_assert (dn_pool == NULL && dl_pool == NULL);
4033 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
4036 if (true_dependency_cache)
4040 for (i = 0; i < cache_size; i++)
4042 bitmap_clear (&true_dependency_cache[i]);
4043 bitmap_clear (&output_dependency_cache[i]);
4044 bitmap_clear (&anti_dependency_cache[i]);
4045 bitmap_clear (&control_dependency_cache[i]);
4047 if (sched_deps_info->generate_spec_deps)
4048 bitmap_clear (&spec_dependency_cache[i]);
4050 free (true_dependency_cache);
4051 true_dependency_cache = NULL;
4052 free (output_dependency_cache);
4053 output_dependency_cache = NULL;
4054 free (anti_dependency_cache);
4055 anti_dependency_cache = NULL;
4056 free (control_dependency_cache);
4057 control_dependency_cache = NULL;
4059 if (sched_deps_info->generate_spec_deps)
4061 free (spec_dependency_cache);
4062 spec_dependency_cache = NULL;
4068 /* Initialize some global variables needed by the dependency analysis
4072 init_deps_global (void)
4074 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4075 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4076 reg_pending_sets = ALLOC_REG_SET (®_obstack);
4077 reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
4078 reg_pending_uses = ALLOC_REG_SET (®_obstack);
4079 reg_pending_control_uses = ALLOC_REG_SET (®_obstack);
4080 reg_pending_barrier = NOT_A_BARRIER;
4082 if (!sel_sched_p () || sched_emulate_haifa_p)
4084 sched_deps_info->start_insn = haifa_start_insn;
4085 sched_deps_info->finish_insn = haifa_finish_insn;
4087 sched_deps_info->note_reg_set = haifa_note_reg_set;
4088 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4089 sched_deps_info->note_reg_use = haifa_note_reg_use;
4091 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4092 sched_deps_info->note_dep = haifa_note_dep;
4096 /* Free everything used by the dependency analysis code. */
4099 finish_deps_global (void)
4101 FREE_REG_SET (reg_pending_sets);
4102 FREE_REG_SET (reg_pending_clobbers);
4103 FREE_REG_SET (reg_pending_uses);
4104 FREE_REG_SET (reg_pending_control_uses);
4107 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4109 estimate_dep_weak (rtx mem1, rtx mem2)
4114 /* MEMs are the same - don't speculate. */
4115 return MIN_DEP_WEAK;
4117 r1 = XEXP (mem1, 0);
4118 r2 = XEXP (mem2, 0);
4121 || (REG_P (r1) && REG_P (r2)
4122 && REGNO (r1) == REGNO (r2)))
4123 /* Again, MEMs are the same. */
4124 return MIN_DEP_WEAK;
4125 else if ((REG_P (r1) && !REG_P (r2))
4126 || (!REG_P (r1) && REG_P (r2)))
4127 /* Different addressing modes - reason to be more speculative,
4129 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4131 /* We can't say anything about the dependence. */
4132 return UNCERTAIN_DEP_WEAK;
4135 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4136 This function can handle same INSN and ELEM (INSN == ELEM).
4137 It is a convenience wrapper. */
4139 add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
4144 if (dep_type == REG_DEP_TRUE)
4146 else if (dep_type == REG_DEP_OUTPUT)
4148 else if (dep_type == REG_DEP_CONTROL)
4152 gcc_assert (dep_type == REG_DEP_ANTI);
4156 /* When add_dependence is called from inside sched-deps.c, we expect
4157 cur_insn to be non-null. */
4158 internal = cur_insn != NULL;
4160 gcc_assert (insn == cur_insn);
4164 note_dep (elem, ds);
4169 /* Return weakness of speculative type TYPE in the dep_status DS. */
4171 get_dep_weak_1 (ds_t ds, ds_t type)
4177 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4178 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4179 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4180 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4181 default: gcc_unreachable ();
4188 get_dep_weak (ds_t ds, ds_t type)
4190 dw_t dw = get_dep_weak_1 (ds, type);
4192 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4196 /* Return the dep_status, which has the same parameters as DS, except for
4197 speculative type TYPE, that will have weakness DW. */
4199 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4201 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4206 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4207 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4208 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4209 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4210 default: gcc_unreachable ();
4215 /* Return the join of two dep_statuses DS1 and DS2.
4216 If MAX_P is true then choose the greater probability,
4217 otherwise multiply probabilities.
4218 This function assumes that both DS1 and DS2 contain speculative bits. */
4220 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4224 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4226 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4228 t = FIRST_SPEC_TYPE;
4231 if ((ds1 & t) && !(ds2 & t))
4233 else if (!(ds1 & t) && (ds2 & t))
4235 else if ((ds1 & t) && (ds2 & t))
4237 dw_t dw1 = get_dep_weak (ds1, t);
4238 dw_t dw2 = get_dep_weak (ds2, t);
4243 dw = ((ds_t) dw1) * ((ds_t) dw2);
4245 if (dw < MIN_DEP_WEAK)
4256 ds = set_dep_weak (ds, t, (dw_t) dw);
4259 if (t == LAST_SPEC_TYPE)
4261 t <<= SPEC_TYPE_SHIFT;
4268 /* Return the join of two dep_statuses DS1 and DS2.
4269 This function assumes that both DS1 and DS2 contain speculative bits. */
4271 ds_merge (ds_t ds1, ds_t ds2)
4273 return ds_merge_1 (ds1, ds2, false);
4276 /* Return the join of two dep_statuses DS1 and DS2. */
4278 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4280 ds_t new_status = ds | ds2;
4282 if (new_status & SPECULATIVE)
4284 if ((ds && !(ds & SPECULATIVE))
4285 || (ds2 && !(ds2 & SPECULATIVE)))
4286 /* Then this dep can't be speculative. */
4287 new_status &= ~SPECULATIVE;
4290 /* Both are speculative. Merging probabilities. */
4295 dw = estimate_dep_weak (mem1, mem2);
4296 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4304 new_status = ds_merge (ds2, ds);
4311 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4314 ds_max_merge (ds_t ds1, ds_t ds2)
4316 if (ds1 == 0 && ds2 == 0)
4319 if (ds1 == 0 && ds2 != 0)
4322 if (ds1 != 0 && ds2 == 0)
4325 return ds_merge_1 (ds1, ds2, true);
4328 /* Return the probability of speculation success for the speculation
4336 dt = FIRST_SPEC_TYPE;
4341 res *= (ds_t) get_dep_weak (ds, dt);
4345 if (dt == LAST_SPEC_TYPE)
4347 dt <<= SPEC_TYPE_SHIFT;
4353 res /= MAX_DEP_WEAK;
4355 if (res < MIN_DEP_WEAK)
4358 gcc_assert (res <= MAX_DEP_WEAK);
4363 /* Return a dep status that contains all speculation types of DS. */
4365 ds_get_speculation_types (ds_t ds)
4367 if (ds & BEGIN_DATA)
4369 if (ds & BE_IN_DATA)
4371 if (ds & BEGIN_CONTROL)
4372 ds |= BEGIN_CONTROL;
4373 if (ds & BE_IN_CONTROL)
4374 ds |= BE_IN_CONTROL;
4376 return ds & SPECULATIVE;
4379 /* Return a dep status that contains maximal weakness for each speculation
4380 type present in DS. */
4382 ds_get_max_dep_weak (ds_t ds)
4384 if (ds & BEGIN_DATA)
4385 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4386 if (ds & BE_IN_DATA)
4387 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4388 if (ds & BEGIN_CONTROL)
4389 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4390 if (ds & BE_IN_CONTROL)
4391 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4396 /* Dump information about the dependence status S. */
4398 dump_ds (FILE *f, ds_t s)
4403 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4405 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4406 if (s & BEGIN_CONTROL)
4407 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4408 if (s & BE_IN_CONTROL)
4409 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4412 fprintf (f, "HARD_DEP; ");
4415 fprintf (f, "DEP_TRUE; ");
4417 fprintf (f, "DEP_OUTPUT; ");
4419 fprintf (f, "DEP_ANTI; ");
4420 if (s & DEP_CONTROL)
4421 fprintf (f, "DEP_CONTROL; ");
4429 dump_ds (stderr, s);
4430 fprintf (stderr, "\n");
4433 #ifdef ENABLE_CHECKING
4434 /* Verify that dependence type and status are consistent.
4435 If RELAXED_P is true, then skip dep_weakness checks. */
4437 check_dep (dep_t dep, bool relaxed_p)
4439 enum reg_note dt = DEP_TYPE (dep);
4440 ds_t ds = DEP_STATUS (dep);
4442 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4444 if (!(current_sched_info->flags & USE_DEPS_LIST))
4446 gcc_assert (ds == 0);
4450 /* Check that dependence type contains the same bits as the status. */
4451 if (dt == REG_DEP_TRUE)
4452 gcc_assert (ds & DEP_TRUE);
4453 else if (dt == REG_DEP_OUTPUT)
4454 gcc_assert ((ds & DEP_OUTPUT)
4455 && !(ds & DEP_TRUE));
4456 else if (dt == REG_DEP_ANTI)
4457 gcc_assert ((ds & DEP_ANTI)
4458 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4460 gcc_assert (dt == REG_DEP_CONTROL
4461 && (ds & DEP_CONTROL)
4462 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4464 /* HARD_DEP can not appear in dep_status of a link. */
4465 gcc_assert (!(ds & HARD_DEP));
4467 /* Check that dependence status is set correctly when speculation is not
4469 if (!sched_deps_info->generate_spec_deps)
4470 gcc_assert (!(ds & SPECULATIVE));
4471 else if (ds & SPECULATIVE)
4475 ds_t type = FIRST_SPEC_TYPE;
4477 /* Check that dependence weakness is in proper range. */
4481 get_dep_weak (ds, type);
4483 if (type == LAST_SPEC_TYPE)
4485 type <<= SPEC_TYPE_SHIFT;
4490 if (ds & BEGIN_SPEC)
4492 /* Only true dependence can be data speculative. */
4493 if (ds & BEGIN_DATA)
4494 gcc_assert (ds & DEP_TRUE);
4496 /* Control dependencies in the insn scheduler are represented by
4497 anti-dependencies, therefore only anti dependence can be
4498 control speculative. */
4499 if (ds & BEGIN_CONTROL)
4500 gcc_assert (ds & DEP_ANTI);
4504 /* Subsequent speculations should resolve true dependencies. */
4505 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4508 /* Check that true and anti dependencies can't have other speculative
4511 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4512 /* An output dependence can't be speculative at all. */
4513 gcc_assert (!(ds & DEP_OUTPUT));
4515 gcc_assert (ds & BEGIN_CONTROL);
4518 #endif /* ENABLE_CHECKING */
4520 /* The following code discovers opportunities to switch a memory reference
4521 and an increment by modifying the address. We ensure that this is done
4522 only for dependencies that are only used to show a single register
4523 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4524 instruction involved is subject to only one dep that can cause a pattern
4527 When we discover a suitable dependency, we fill in the dep_replacement
4528 structure to show how to modify the memory reference. */
4530 /* Holds information about a pair of memory reference and register increment
4531 insns which depend on each other, but could possibly be interchanged. */
4538 /* A register occurring in the memory address for which we wish to break
4539 the dependence. This must be identical to the destination register of
4542 /* Any kind of index that is added to that register. */
4544 /* The constant offset used in the memory address. */
4545 HOST_WIDE_INT mem_constant;
4546 /* The constant added in the increment insn. Negated if the increment is
4547 after the memory address. */
4548 HOST_WIDE_INT inc_constant;
4549 /* The source register used in the increment. May be different from mem_reg0
4550 if the increment occurs before the memory address. */
4554 /* Verify that the memory location described in MII can be replaced with
4555 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4556 insn remains unchanged by this function. */
4559 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4561 rtx mem = *mii->mem_loc;
4564 /* Jump thru a lot of hoops to keep the attributes up to date. We
4565 do not want to call one of the change address variants that take
4566 an offset even though we know the offset in many cases. These
4567 assume you are changing where the address is pointing by the
4569 new_mem = replace_equiv_address_nv (mem, new_addr);
4570 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4572 if (sched_verbose >= 5)
4573 fprintf (sched_dump, "validation failure\n");
4577 /* Put back the old one. */
4578 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4583 /* Return true if INSN is of a form "a = b op c" where a and b are
4584 regs. op is + if c is a reg and +|- if c is a const. Fill in
4585 informantion in MII about what is found.
4586 BEFORE_MEM indicates whether the increment is found before or after
4587 a corresponding memory reference. */
4590 parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
4592 rtx pat = single_set (insn);
4596 if (RTX_FRAME_RELATED_P (insn) || !pat)
4599 /* Result must be single reg. */
4600 if (!REG_P (SET_DEST (pat)))
4603 if (GET_CODE (SET_SRC (pat)) != PLUS)
4606 mii->inc_insn = insn;
4607 src = SET_SRC (pat);
4608 mii->inc_input = XEXP (src, 0);
4610 if (!REG_P (XEXP (src, 0)))
4613 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4616 cst = XEXP (src, 1);
4617 if (!CONST_INT_P (cst))
4619 mii->inc_constant = INTVAL (cst);
4621 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4625 mii->inc_constant = -mii->inc_constant;
4630 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4632 /* Note that the sign has already been reversed for !before_mem. */
4633 #ifdef STACK_GROWS_DOWNWARD
4634 return mii->inc_constant > 0;
4636 return mii->inc_constant < 0;
4642 /* Once a suitable mem reference has been found and the corresponding data
4643 in MII has been filled in, this function is called to find a suitable
4644 add or inc insn involving the register we found in the memory
4648 find_inc (struct mem_inc_info *mii, bool backwards)
4650 sd_iterator_def sd_it;
4653 sd_it = sd_iterator_start (mii->mem_insn,
4654 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4655 while (sd_iterator_cond (&sd_it, &dep))
4657 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4658 rtx pro = DEP_PRO (dep);
4659 rtx con = DEP_CON (dep);
4660 rtx inc_cand = backwards ? pro : con;
4661 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4663 if (parse_add_or_inc (mii, inc_cand, backwards))
4665 struct dep_replacement *desc;
4667 rtx newaddr, newmem;
4669 if (sched_verbose >= 5)
4670 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4671 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4673 /* Need to assure that none of the operands of the inc
4674 instruction are assigned to by the mem insn. */
4675 for (def_rec = DF_INSN_DEFS (mii->mem_insn); *def_rec; def_rec++)
4677 df_ref def = *def_rec;
4678 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4679 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4681 if (sched_verbose >= 5)
4682 fprintf (sched_dump,
4683 "inc conflicts with store failure.\n");
4687 newaddr = mii->inc_input;
4688 if (mii->mem_index != NULL_RTX)
4689 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4691 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4692 mii->mem_constant + mii->inc_constant);
4693 newmem = attempt_change (mii, newaddr);
4694 if (newmem == NULL_RTX)
4696 if (sched_verbose >= 5)
4697 fprintf (sched_dump, "successful address replacement\n");
4698 desc = XCNEW (struct dep_replacement);
4699 DEP_REPLACE (dep) = desc;
4700 desc->loc = mii->mem_loc;
4701 desc->newval = newmem;
4702 desc->orig = *desc->loc;
4703 desc->insn = mii->mem_insn;
4704 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4705 INSN_SPEC_BACK_DEPS (con));
4708 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4709 if (modified_in_p (mii->inc_input, DEP_PRO (dep)))
4710 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4715 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4716 if (modified_in_p (mii->inc_input, DEP_CON (dep)))
4717 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4723 sd_iterator_next (&sd_it);
4728 /* A recursive function that walks ADDRESS_OF_X to find memory references
4729 which could be modified during scheduling. We call find_inc for each
4730 one we find that has a recognizable form. MII holds information about
4731 the pair of memory/increment instructions.
4732 We ensure that every instruction with a memory reference (which will be
4733 the location of the replacement) is assigned at most one breakable
4737 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4739 rtx x = *address_of_x;
4740 enum rtx_code code = GET_CODE (x);
4741 const char *const fmt = GET_RTX_FORMAT (code);
4746 rtx reg0 = XEXP (x, 0);
4748 mii->mem_loc = address_of_x;
4749 mii->mem_index = NULL_RTX;
4750 mii->mem_constant = 0;
4751 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4753 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4754 reg0 = XEXP (reg0, 0);
4756 if (GET_CODE (reg0) == PLUS)
4758 mii->mem_index = XEXP (reg0, 1);
4759 reg0 = XEXP (reg0, 0);
4764 int occurrences = 0;
4766 /* Make sure this reg appears only once in this insn. Can't use
4767 count_occurrences since that only works for pseudos. */
4768 for (def_rec = DF_INSN_USES (mii->mem_insn); *def_rec; def_rec++)
4770 df_ref def = *def_rec;
4771 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (def)))
4772 if (++occurrences > 1)
4774 if (sched_verbose >= 5)
4775 fprintf (sched_dump, "mem count failure\n");
4780 mii->mem_reg0 = reg0;
4781 return find_inc (mii, true) || find_inc (mii, false);
4786 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4788 /* If REG occurs inside a MEM used in a bit-field reference,
4789 that is unacceptable. */
4793 /* Time for some deep diving. */
4794 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4798 if (find_mem (mii, &XEXP (x, i)))
4801 else if (fmt[i] == 'E')
4804 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4805 if (find_mem (mii, &XVECEXP (x, i, j)))
4813 /* Examine the instructions between HEAD and TAIL and try to find
4814 dependencies that can be broken by modifying one of the patterns. */
4817 find_modifiable_mems (rtx head, rtx tail)
4819 rtx insn, next_tail = NEXT_INSN (tail);
4820 int success_in_block = 0;
4822 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4824 struct mem_inc_info mii;
4826 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4829 mii.mem_insn = insn;
4830 if (find_mem (&mii, &PATTERN (insn)))
4833 if (success_in_block && sched_verbose >= 5)
4834 fprintf (sched_dump, "%d candidates for address modification found.\n",
4838 #endif /* INSN_SCHEDULING */