1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
131 #include "diagnostic-core.h"
132 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
142 #include "sched-int.h"
144 #include "common/common-target.h"
151 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
154 #ifdef INSN_SCHEDULING
156 /* issue_rate is the number of insns that can be scheduled in the same
157 machine cycle. It can be defined in the config/mach/mach.h file,
158 otherwise we set it to 1. */
162 /* This can be set to true by a backend if the scheduler should not
163 enable a DCE pass. */
166 /* The current initiation interval used when modulo scheduling. */
167 static int modulo_ii;
169 /* The maximum number of stages we are prepared to handle. */
170 static int modulo_max_stages;
172 /* The number of insns that exist in each iteration of the loop. We use this
173 to detect when we've scheduled all insns from the first iteration. */
174 static int modulo_n_insns;
176 /* The current count of insns in the first iteration of the loop that have
177 already been scheduled. */
178 static int modulo_insns_scheduled;
180 /* The maximum uid of insns from the first iteration of the loop. */
181 static int modulo_iter0_max_uid;
183 /* The number of times we should attempt to backtrack when modulo scheduling.
184 Decreased each time we have to backtrack. */
185 static int modulo_backtracks_left;
187 /* The stage in which the last insn from the original loop was
189 static int modulo_last_stage;
191 /* sched-verbose controls the amount of debugging output the
192 scheduler prints. It is controlled by -fsched-verbose=N:
193 N>0 and no -DSR : the output is directed to stderr.
194 N>=10 will direct the printouts to stderr (regardless of -dSR).
196 N=2: bb's probabilities, detailed ready list info, unit/insn info.
197 N=3: rtl at abort point, control-flow, regions info.
198 N=5: dependences info. */
200 int sched_verbose = 0;
202 /* Debugging file. All printouts are sent to dump, which is always set,
203 either to stderr, or to the dump listing file (-dRS). */
204 FILE *sched_dump = 0;
206 /* This is a placeholder for the scheduler parameters common
207 to all schedulers. */
208 struct common_sched_info_def *common_sched_info;
210 #define INSN_TICK(INSN) (HID (INSN)->tick)
211 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
212 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
213 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
214 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
215 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
216 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
218 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
219 then it should be recalculated from scratch. */
220 #define INVALID_TICK (-(max_insn_queue_index + 1))
221 /* The minimal value of the INSN_TICK of an instruction. */
222 #define MIN_TICK (-max_insn_queue_index)
224 /* List of important notes we must keep around. This is a pointer to the
225 last element in the list. */
228 static struct spec_info_def spec_info_var;
229 /* Description of the speculative part of the scheduling.
230 If NULL - no speculation. */
231 spec_info_t spec_info = NULL;
233 /* True, if recovery block was added during scheduling of current block.
234 Used to determine, if we need to fix INSN_TICKs. */
235 static bool haifa_recovery_bb_recently_added_p;
237 /* True, if recovery block was added during this scheduling pass.
238 Used to determine if we should have empty memory pools of dependencies
239 after finishing current region. */
240 bool haifa_recovery_bb_ever_added_p;
242 /* Counters of different types of speculative instructions. */
243 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
245 /* Array used in {unlink, restore}_bb_notes. */
246 static rtx *bb_header = 0;
248 /* Basic block after which recovery blocks will be created. */
249 static basic_block before_recovery;
251 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
253 basic_block after_recovery;
255 /* FALSE if we add bb to another region, so we don't need to initialize it. */
256 bool adding_bb_to_current_region_p = true;
260 /* An instruction is ready to be scheduled when all insns preceding it
261 have already been scheduled. It is important to ensure that all
262 insns which use its result will not be executed until its result
263 has been computed. An insn is maintained in one of four structures:
265 (P) the "Pending" set of insns which cannot be scheduled until
266 their dependencies have been satisfied.
267 (Q) the "Queued" set of insns that can be scheduled when sufficient
269 (R) the "Ready" list of unscheduled, uncommitted insns.
270 (S) the "Scheduled" list of insns.
272 Initially, all insns are either "Pending" or "Ready" depending on
273 whether their dependencies are satisfied.
275 Insns move from the "Ready" list to the "Scheduled" list as they
276 are committed to the schedule. As this occurs, the insns in the
277 "Pending" list have their dependencies satisfied and move to either
278 the "Ready" list or the "Queued" set depending on whether
279 sufficient time has passed to make them ready. As time passes,
280 insns move from the "Queued" set to the "Ready" list.
282 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
283 unscheduled insns, i.e., those that are ready, queued, and pending.
284 The "Queued" set (Q) is implemented by the variable `insn_queue'.
285 The "Ready" list (R) is implemented by the variables `ready' and
287 The "Scheduled" list (S) is the new insn chain built by this pass.
289 The transition (R->S) is implemented in the scheduling loop in
290 `schedule_block' when the best insn to schedule is chosen.
291 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
292 insns move from the ready list to the scheduled list.
293 The transition (Q->R) is implemented in 'queue_to_insn' as time
294 passes or stalls are introduced. */
296 /* Implement a circular buffer to delay instructions until sufficient
297 time has passed. For the new pipeline description interface,
298 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
299 than maximal time of instruction execution computed by genattr.c on
300 the base maximal time of functional unit reservations and getting a
301 result. This is the longest time an insn may be queued. */
303 static rtx *insn_queue;
304 static int q_ptr = 0;
305 static int q_size = 0;
306 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
307 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
309 #define QUEUE_SCHEDULED (-3)
310 #define QUEUE_NOWHERE (-2)
311 #define QUEUE_READY (-1)
312 /* QUEUE_SCHEDULED - INSN is scheduled.
313 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
315 QUEUE_READY - INSN is in ready list.
316 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
318 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
320 /* The following variable value refers for all current and future
321 reservations of the processor units. */
324 /* The following variable value is size of memory representing all
325 current and future reservations of the processor units. */
326 size_t dfa_state_size;
328 /* The following array is used to find the best insn from ready when
329 the automaton pipeline interface is used. */
330 char *ready_try = NULL;
332 /* The ready list. */
333 struct ready_list ready = {NULL, 0, 0, 0, 0};
335 /* The pointer to the ready list (to be removed). */
336 static struct ready_list *readyp = &ready;
338 /* Scheduling clock. */
339 static int clock_var;
341 /* Clock at which the previous instruction was issued. */
342 static int last_clock_var;
344 /* Set to true if, when queuing a shadow insn, we discover that it would be
345 scheduled too late. */
346 static bool must_backtrack;
348 /* The following variable value is number of essential insns issued on
349 the current cycle. An insn is essential one if it changes the
351 int cycle_issued_insns;
353 /* This records the actual schedule. It is built up during the main phase
354 of schedule_block, and afterwards used to reorder the insns in the RTL. */
355 static VEC(rtx, heap) *scheduled_insns;
357 static int may_trap_exp (const_rtx, int);
359 /* Nonzero iff the address is comprised from at most 1 register. */
360 #define CONST_BASED_ADDRESS_P(x) \
362 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
363 || (GET_CODE (x) == LO_SUM)) \
364 && (CONSTANT_P (XEXP (x, 0)) \
365 || CONSTANT_P (XEXP (x, 1)))))
367 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
368 as found by analyzing insn's expression. */
371 static int haifa_luid_for_non_insn (rtx x);
373 /* Haifa version of sched_info hooks common to all headers. */
374 const struct common_sched_info_def haifa_common_sched_info =
376 NULL, /* fix_recovery_cfg */
377 NULL, /* add_block */
378 NULL, /* estimate_number_of_insns */
379 haifa_luid_for_non_insn, /* luid_for_non_insn */
380 SCHED_PASS_UNKNOWN /* sched_pass_id */
383 /* Mapping from instruction UID to its Logical UID. */
384 VEC (int, heap) *sched_luids = NULL;
386 /* Next LUID to assign to an instruction. */
387 int sched_max_luid = 1;
389 /* Haifa Instruction Data. */
390 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
392 void (* sched_init_only_bb) (basic_block, basic_block);
394 /* Split block function. Different schedulers might use different functions
395 to handle their internal data consistent. */
396 basic_block (* sched_split_block) (basic_block, rtx);
398 /* Create empty basic block after the specified block. */
399 basic_block (* sched_create_empty_bb) (basic_block);
402 may_trap_exp (const_rtx x, int is_store)
411 if (code == MEM && may_trap_p (x))
418 /* The insn uses memory: a volatile load. */
419 if (MEM_VOLATILE_P (x))
421 /* An exception-free load. */
424 /* A load with 1 base register, to be further checked. */
425 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
426 return PFREE_CANDIDATE;
427 /* No info on the load, to be further checked. */
428 return PRISKY_CANDIDATE;
433 int i, insn_class = TRAP_FREE;
435 /* Neither store nor load, check if it may cause a trap. */
438 /* Recursive step: walk the insn... */
439 fmt = GET_RTX_FORMAT (code);
440 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
444 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
445 insn_class = WORST_CLASS (insn_class, tmp_class);
447 else if (fmt[i] == 'E')
450 for (j = 0; j < XVECLEN (x, i); j++)
452 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
453 insn_class = WORST_CLASS (insn_class, tmp_class);
454 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
458 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
465 /* Classifies rtx X of an insn for the purpose of verifying that X can be
466 executed speculatively (and consequently the insn can be moved
467 speculatively), by examining X, returning:
468 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
469 TRAP_FREE: non-load insn.
470 IFREE: load from a globally safe location.
471 IRISKY: volatile load.
472 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
473 being either PFREE or PRISKY. */
476 haifa_classify_rtx (const_rtx x)
478 int tmp_class = TRAP_FREE;
479 int insn_class = TRAP_FREE;
482 if (GET_CODE (x) == PARALLEL)
484 int i, len = XVECLEN (x, 0);
486 for (i = len - 1; i >= 0; i--)
488 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
489 insn_class = WORST_CLASS (insn_class, tmp_class);
490 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
500 /* Test if it is a 'store'. */
501 tmp_class = may_trap_exp (XEXP (x, 0), 1);
504 /* Test if it is a store. */
505 tmp_class = may_trap_exp (SET_DEST (x), 1);
506 if (tmp_class == TRAP_RISKY)
508 /* Test if it is a load. */
510 WORST_CLASS (tmp_class,
511 may_trap_exp (SET_SRC (x), 0));
514 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
515 if (tmp_class == TRAP_RISKY)
517 tmp_class = WORST_CLASS (tmp_class,
518 may_trap_exp (COND_EXEC_TEST (x), 0));
521 tmp_class = TRAP_RISKY;
525 insn_class = tmp_class;
532 haifa_classify_insn (const_rtx insn)
534 return haifa_classify_rtx (PATTERN (insn));
537 /* After the scheduler initialization function has been called, this function
538 can be called to enable modulo scheduling. II is the initiation interval
539 we should use, it affects the delays for delay_pairs that were recorded as
540 separated by a given number of stages.
542 MAX_STAGES provides us with a limit
543 after which we give up scheduling; the caller must have unrolled at least
544 as many copies of the loop body and recorded delay_pairs for them.
546 INSNS is the number of real (non-debug) insns in one iteration of
547 the loop. MAX_UID can be used to test whether an insn belongs to
548 the first iteration of the loop; all of them have a uid lower than
551 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
554 modulo_max_stages = max_stages;
555 modulo_n_insns = insns;
556 modulo_iter0_max_uid = max_uid;
557 modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
560 /* A structure to record a pair of insns where the first one is a real
561 insn that has delay slots, and the second is its delayed shadow.
562 I1 is scheduled normally and will emit an assembly instruction,
563 while I2 describes the side effect that takes place at the
564 transition between cycles CYCLES and (CYCLES + 1) after I1. */
567 struct delay_pair *next_same_i1;
570 /* When doing modulo scheduling, we a delay_pair can also be used to
571 show that I1 and I2 are the same insn in a different stage. If that
572 is the case, STAGES will be nonzero. */
576 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
578 static htab_t delay_htab;
579 static htab_t delay_htab_i2;
581 /* Called through htab_traverse. Walk the hashtable using I2 as
582 index, and delete all elements involving an UID higher than
583 that pointed to by *DATA. */
585 htab_i2_traverse (void **slot, void *data)
587 int maxuid = *(int *)data;
588 struct delay_pair *p = *(struct delay_pair **)slot;
589 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
591 htab_clear_slot (delay_htab_i2, slot);
596 /* Called through htab_traverse. Walk the hashtable using I2 as
597 index, and delete all elements involving an UID higher than
598 that pointed to by *DATA. */
600 htab_i1_traverse (void **slot, void *data)
602 int maxuid = *(int *)data;
603 struct delay_pair **pslot = (struct delay_pair **)slot;
604 struct delay_pair *p, *first, **pprev;
606 if (INSN_UID ((*pslot)->i1) >= maxuid)
608 htab_clear_slot (delay_htab, slot);
612 for (p = *pslot; p; p = p->next_same_i1)
614 if (INSN_UID (p->i2) < maxuid)
617 pprev = &p->next_same_i1;
622 htab_clear_slot (delay_htab, slot);
628 /* Discard all delay pairs which involve an insn with an UID higher
631 discard_delay_pairs_above (int max_uid)
633 htab_traverse (delay_htab, htab_i1_traverse, &max_uid);
634 htab_traverse (delay_htab_i2, htab_i2_traverse, &max_uid);
637 /* Returns a hash value for X (which really is a delay_pair), based on
640 delay_hash_i1 (const void *x)
642 return htab_hash_pointer (((const struct delay_pair *) x)->i1);
645 /* Returns a hash value for X (which really is a delay_pair), based on
648 delay_hash_i2 (const void *x)
650 return htab_hash_pointer (((const struct delay_pair *) x)->i2);
653 /* Return nonzero if I1 of pair X is the same as that of pair Y. */
655 delay_i1_eq (const void *x, const void *y)
657 return ((const struct delay_pair *) x)->i1 == y;
660 /* Return nonzero if I2 of pair X is the same as that of pair Y. */
662 delay_i2_eq (const void *x, const void *y)
664 return ((const struct delay_pair *) x)->i2 == y;
667 /* This function can be called by a port just before it starts the final
668 scheduling pass. It records the fact that an instruction with delay
669 slots has been split into two insns, I1 and I2. The first one will be
670 scheduled normally and initiates the operation. The second one is a
671 shadow which must follow a specific number of cycles after I1; its only
672 purpose is to show the side effect that occurs at that cycle in the RTL.
673 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
674 while I2 retains the original insn type.
676 There are two ways in which the number of cycles can be specified,
677 involving the CYCLES and STAGES arguments to this function. If STAGES
678 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
679 which is multiplied by MODULO_II to give the number of cycles. This is
680 only useful if the caller also calls set_modulo_params to enable modulo
684 record_delay_slot_pair (rtx i1, rtx i2, int cycles, int stages)
686 struct delay_pair *p = XNEW (struct delay_pair);
687 struct delay_pair **slot;
696 delay_htab = htab_create (10, delay_hash_i1, delay_i1_eq, NULL);
697 delay_htab_i2 = htab_create (10, delay_hash_i2, delay_i2_eq, free);
699 slot = ((struct delay_pair **)
700 htab_find_slot_with_hash (delay_htab, i1, htab_hash_pointer (i1),
702 p->next_same_i1 = *slot;
704 slot = ((struct delay_pair **)
705 htab_find_slot_with_hash (delay_htab_i2, i2, htab_hash_pointer (i2),
710 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
711 and return the other insn if so. Return NULL otherwise. */
713 real_insn_for_shadow (rtx insn)
715 struct delay_pair *pair;
717 if (delay_htab == NULL)
721 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
722 htab_hash_pointer (insn));
723 if (!pair || pair->stages > 0)
728 /* For a pair P of insns, return the fixed distance in cycles from the first
729 insn after which the second must be scheduled. */
731 pair_delay (struct delay_pair *p)
736 return p->stages * modulo_ii;
739 /* Given an insn INSN, add a dependence on its delayed shadow if it
740 has one. Also try to find situations where shadows depend on each other
741 and add dependencies to the real insns to limit the amount of backtracking
744 add_delay_dependencies (rtx insn)
746 struct delay_pair *pair;
747 sd_iterator_def sd_it;
754 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
755 htab_hash_pointer (insn));
758 add_dependence (insn, pair->i1, REG_DEP_ANTI);
762 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
764 rtx pro = DEP_PRO (dep);
765 struct delay_pair *other_pair
766 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, pro,
767 htab_hash_pointer (pro));
768 if (!other_pair || other_pair->stages)
770 if (pair_delay (other_pair) >= pair_delay (pair))
772 if (sched_verbose >= 4)
774 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
775 INSN_UID (other_pair->i1),
776 INSN_UID (pair->i1));
777 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
781 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
782 INSN_UID (other_pair->i1),
783 INSN_UID (other_pair->i2),
784 pair_delay (other_pair));
786 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
791 /* Forward declarations. */
793 static int priority (rtx);
794 static int rank_for_schedule (const void *, const void *);
795 static void swap_sort (rtx *, int);
796 static void queue_insn (rtx, int, const char *);
797 static int schedule_insn (rtx);
798 static void adjust_priority (rtx);
799 static void advance_one_cycle (void);
800 static void extend_h_i_d (void);
803 /* Notes handling mechanism:
804 =========================
805 Generally, NOTES are saved before scheduling and restored after scheduling.
806 The scheduler distinguishes between two types of notes:
808 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
809 Before scheduling a region, a pointer to the note is added to the insn
810 that follows or precedes it. (This happens as part of the data dependence
811 computation). After scheduling an insn, the pointer contained in it is
812 used for regenerating the corresponding note (in reemit_notes).
814 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
815 these notes are put in a list (in rm_other_notes() and
816 unlink_other_notes ()). After scheduling the block, these notes are
817 inserted at the beginning of the block (in schedule_block()). */
819 static void ready_add (struct ready_list *, rtx, bool);
820 static rtx ready_remove_first (struct ready_list *);
821 static rtx ready_remove_first_dispatch (struct ready_list *ready);
823 static void queue_to_ready (struct ready_list *);
824 static int early_queue_to_ready (state_t, struct ready_list *);
826 static void debug_ready_list (struct ready_list *);
828 /* The following functions are used to implement multi-pass scheduling
829 on the first cycle. */
830 static rtx ready_remove (struct ready_list *, int);
831 static void ready_remove_insn (rtx);
833 static void fix_inter_tick (rtx, rtx);
834 static int fix_tick_ready (rtx);
835 static void change_queue_index (rtx, int);
837 /* The following functions are used to implement scheduling of data/control
838 speculative instructions. */
840 static void extend_h_i_d (void);
841 static void init_h_i_d (rtx);
842 static int haifa_speculate_insn (rtx, ds_t, rtx *);
843 static void generate_recovery_code (rtx);
844 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
845 static void begin_speculative_block (rtx);
846 static void add_to_speculative_block (rtx);
847 static void init_before_recovery (basic_block *);
848 static void create_check_block_twin (rtx, bool);
849 static void fix_recovery_deps (basic_block);
850 static bool haifa_change_pattern (rtx, rtx);
851 static void dump_new_block_header (int, basic_block, rtx, rtx);
852 static void restore_bb_notes (basic_block);
853 static void fix_jump_move (rtx);
854 static void move_block_after_check (rtx);
855 static void move_succs (VEC(edge,gc) **, basic_block);
856 static void sched_remove_insn (rtx);
857 static void clear_priorities (rtx, rtx_vec_t *);
858 static void calc_priorities (rtx_vec_t);
859 static void add_jump_dependencies (rtx, rtx);
861 #endif /* INSN_SCHEDULING */
863 /* Point to state used for the current scheduling pass. */
864 struct haifa_sched_info *current_sched_info;
866 #ifndef INSN_SCHEDULING
868 schedule_insns (void)
873 /* Do register pressure sensitive insn scheduling if the flag is set
875 bool sched_pressure_p;
877 /* Map regno -> its pressure class. The map defined only when
878 SCHED_PRESSURE_P is true. */
879 enum reg_class *sched_regno_pressure_class;
881 /* The current register pressure. Only elements corresponding pressure
882 classes are defined. */
883 static int curr_reg_pressure[N_REG_CLASSES];
885 /* Saved value of the previous array. */
886 static int saved_reg_pressure[N_REG_CLASSES];
888 /* Register living at given scheduling point. */
889 static bitmap curr_reg_live;
891 /* Saved value of the previous array. */
892 static bitmap saved_reg_live;
894 /* Registers mentioned in the current region. */
895 static bitmap region_ref_regs;
897 /* Initiate register pressure relative info for scheduling the current
898 region. Currently it is only clearing register mentioned in the
901 sched_init_region_reg_pressure_info (void)
903 bitmap_clear (region_ref_regs);
906 /* Update current register pressure related info after birth (if
907 BIRTH_P) or death of register REGNO. */
909 mark_regno_birth_or_death (int regno, bool birth_p)
911 enum reg_class pressure_class;
913 pressure_class = sched_regno_pressure_class[regno];
914 if (regno >= FIRST_PSEUDO_REGISTER)
916 if (pressure_class != NO_REGS)
920 bitmap_set_bit (curr_reg_live, regno);
921 curr_reg_pressure[pressure_class]
922 += (ira_reg_class_max_nregs
923 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
927 bitmap_clear_bit (curr_reg_live, regno);
928 curr_reg_pressure[pressure_class]
929 -= (ira_reg_class_max_nregs
930 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
934 else if (pressure_class != NO_REGS
935 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
939 bitmap_set_bit (curr_reg_live, regno);
940 curr_reg_pressure[pressure_class]++;
944 bitmap_clear_bit (curr_reg_live, regno);
945 curr_reg_pressure[pressure_class]--;
950 /* Initiate current register pressure related info from living
951 registers given by LIVE. */
953 initiate_reg_pressure_info (bitmap live)
959 for (i = 0; i < ira_pressure_classes_num; i++)
960 curr_reg_pressure[ira_pressure_classes[i]] = 0;
961 bitmap_clear (curr_reg_live);
962 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
963 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
964 mark_regno_birth_or_death (j, true);
967 /* Mark registers in X as mentioned in the current region. */
969 setup_ref_regs (rtx x)
972 const RTX_CODE code = GET_CODE (x);
978 if (HARD_REGISTER_NUM_P (regno))
979 bitmap_set_range (region_ref_regs, regno,
980 hard_regno_nregs[regno][GET_MODE (x)]);
982 bitmap_set_bit (region_ref_regs, REGNO (x));
985 fmt = GET_RTX_FORMAT (code);
986 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
988 setup_ref_regs (XEXP (x, i));
989 else if (fmt[i] == 'E')
991 for (j = 0; j < XVECLEN (x, i); j++)
992 setup_ref_regs (XVECEXP (x, i, j));
996 /* Initiate current register pressure related info at the start of
999 initiate_bb_reg_pressure_info (basic_block bb)
1001 unsigned int i ATTRIBUTE_UNUSED;
1004 if (current_nr_blocks > 1)
1005 FOR_BB_INSNS (bb, insn)
1006 if (NONDEBUG_INSN_P (insn))
1007 setup_ref_regs (PATTERN (insn));
1008 initiate_reg_pressure_info (df_get_live_in (bb));
1009 #ifdef EH_RETURN_DATA_REGNO
1010 if (bb_has_eh_pred (bb))
1013 unsigned int regno = EH_RETURN_DATA_REGNO (i);
1015 if (regno == INVALID_REGNUM)
1017 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1018 mark_regno_birth_or_death (regno, true);
1023 /* Save current register pressure related info. */
1025 save_reg_pressure (void)
1029 for (i = 0; i < ira_pressure_classes_num; i++)
1030 saved_reg_pressure[ira_pressure_classes[i]]
1031 = curr_reg_pressure[ira_pressure_classes[i]];
1032 bitmap_copy (saved_reg_live, curr_reg_live);
1035 /* Restore saved register pressure related info. */
1037 restore_reg_pressure (void)
1041 for (i = 0; i < ira_pressure_classes_num; i++)
1042 curr_reg_pressure[ira_pressure_classes[i]]
1043 = saved_reg_pressure[ira_pressure_classes[i]];
1044 bitmap_copy (curr_reg_live, saved_reg_live);
1047 /* Return TRUE if the register is dying after its USE. */
1049 dying_use_p (struct reg_use_data *use)
1051 struct reg_use_data *next;
1053 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1054 if (NONDEBUG_INSN_P (next->insn)
1055 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1060 /* Print info about the current register pressure and its excess for
1061 each pressure class. */
1063 print_curr_reg_pressure (void)
1068 fprintf (sched_dump, ";;\t");
1069 for (i = 0; i < ira_pressure_classes_num; i++)
1071 cl = ira_pressure_classes[i];
1072 gcc_assert (curr_reg_pressure[cl] >= 0);
1073 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1074 curr_reg_pressure[cl],
1075 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
1077 fprintf (sched_dump, "\n");
1080 /* Determine if INSN has a condition that is clobbered if a register
1081 in SET_REGS is modified. */
1083 cond_clobbered_p (rtx insn, HARD_REG_SET set_regs)
1085 rtx pat = PATTERN (insn);
1086 gcc_assert (GET_CODE (pat) == COND_EXEC);
1087 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1089 sd_iterator_def sd_it;
1091 haifa_change_pattern (insn, ORIG_PAT (insn));
1092 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1093 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1094 TODO_SPEC (insn) = HARD_DEP;
1095 if (sched_verbose >= 2)
1096 fprintf (sched_dump,
1097 ";;\t\tdequeue insn %s because of clobbered condition\n",
1098 (*current_sched_info->print_insn) (insn, 0));
1105 /* Look at the remaining dependencies for insn NEXT, and compute and return
1106 the TODO_SPEC value we should use for it. This is called after one of
1107 NEXT's dependencies has been resolved. */
1110 recompute_todo_spec (rtx next)
1113 sd_iterator_def sd_it;
1114 dep_t dep, control_dep = NULL;
1117 bool first_p = true;
1119 if (sd_lists_empty_p (next, SD_LIST_BACK))
1120 /* NEXT has all its dependencies resolved. */
1123 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1126 /* Now we've got NEXT with speculative deps only.
1127 1. Look at the deps to see what we have to do.
1128 2. Check if we can do 'todo'. */
1131 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1133 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1135 if (DEBUG_INSN_P (DEP_PRO (dep)) && !DEBUG_INSN_P (next))
1148 new_ds = ds_merge (new_ds, ds);
1150 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1154 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1158 if (n_control == 1 && n_spec == 0)
1160 rtx pro, other, new_pat;
1161 rtx cond = NULL_RTX;
1163 rtx prev = NULL_RTX;
1167 if ((current_sched_info->flags & DO_PREDICATION) == 0
1168 || (ORIG_PAT (next) != NULL_RTX
1169 && PREDICATED_PAT (next) == NULL_RTX))
1172 pro = DEP_PRO (control_dep);
1173 other = real_insn_for_shadow (pro);
1174 if (other != NULL_RTX)
1177 cond = sched_get_reverse_condition_uncached (pro);
1178 regno = REGNO (XEXP (cond, 0));
1180 /* Find the last scheduled insn that modifies the condition register.
1181 If we have a true dependency on it, it sets it to the correct value,
1182 otherwise it must be a later insn scheduled in-between that clobbers
1184 FOR_EACH_VEC_ELT_REVERSE (rtx, scheduled_insns, i, prev)
1186 sd_iterator_def sd_it;
1191 find_all_hard_reg_sets (prev, &t);
1192 if (!TEST_HARD_REG_BIT (t, regno))
1196 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
1198 if (DEP_PRO (dep) == prev && DEP_TYPE (dep) == REG_DEP_TRUE)
1208 if (ORIG_PAT (next) == NULL_RTX)
1210 ORIG_PAT (next) = PATTERN (next);
1212 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1213 success = haifa_change_pattern (next, new_pat);
1216 PREDICATED_PAT (next) = new_pat;
1218 else if (PATTERN (next) != PREDICATED_PAT (next))
1220 bool success = haifa_change_pattern (next,
1221 PREDICATED_PAT (next));
1222 gcc_assert (success);
1224 DEP_STATUS (control_dep) |= DEP_CANCELLED;
1228 if (PREDICATED_PAT (next) != NULL_RTX)
1230 int tick = INSN_TICK (next);
1231 bool success = haifa_change_pattern (next,
1233 INSN_TICK (next) = tick;
1234 gcc_assert (success);
1237 /* We can't handle the case where there are both speculative and control
1238 dependencies, so we return HARD_DEP in such a case. Also fail if
1239 we have speculative dependencies with not enough points, or more than
1240 one control dependency. */
1241 if ((n_spec > 0 && n_control > 0)
1243 /* Too few points? */
1244 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1251 /* Pointer to the last instruction scheduled. */
1252 static rtx last_scheduled_insn;
1254 /* Pointer to the last nondebug instruction scheduled within the
1255 block, or the prev_head of the scheduling block. Used by
1256 rank_for_schedule, so that insns independent of the last scheduled
1257 insn will be preferred over dependent instructions. */
1258 static rtx last_nondebug_scheduled_insn;
1260 /* Pointer that iterates through the list of unscheduled insns if we
1261 have a dbg_cnt enabled. It always points at an insn prior to the
1262 first unscheduled one. */
1263 static rtx nonscheduled_insns_begin;
1265 /* Cached cost of the instruction. Use below function to get cost of the
1266 insn. -1 here means that the field is not initialized. */
1267 #define INSN_COST(INSN) (HID (INSN)->cost)
1269 /* Compute cost of executing INSN.
1270 This is the number of cycles between instruction issue and
1271 instruction results. */
1273 insn_cost (rtx insn)
1279 if (recog_memoized (insn) < 0)
1282 cost = insn_default_latency (insn);
1289 cost = INSN_COST (insn);
1293 /* A USE insn, or something else we don't need to
1294 understand. We can't pass these directly to
1295 result_ready_cost or insn_default_latency because it will
1296 trigger a fatal error for unrecognizable insns. */
1297 if (recog_memoized (insn) < 0)
1299 INSN_COST (insn) = 0;
1304 cost = insn_default_latency (insn);
1308 INSN_COST (insn) = cost;
1315 /* Compute cost of dependence LINK.
1316 This is the number of cycles between instruction issue and
1317 instruction results.
1318 ??? We also use this function to call recog_memoized on all insns. */
1320 dep_cost_1 (dep_t link, dw_t dw)
1322 rtx insn = DEP_PRO (link);
1323 rtx used = DEP_CON (link);
1326 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1327 return DEP_COST (link);
1331 struct delay_pair *delay_entry;
1333 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, used,
1334 htab_hash_pointer (used));
1337 if (delay_entry->i1 == insn)
1339 DEP_COST (link) = pair_delay (delay_entry);
1340 return DEP_COST (link);
1345 /* A USE insn should never require the value used to be computed.
1346 This allows the computation of a function's result and parameter
1347 values to overlap the return and call. We don't care about the
1348 dependence cost when only decreasing register pressure. */
1349 if (recog_memoized (used) < 0)
1352 recog_memoized (insn);
1356 enum reg_note dep_type = DEP_TYPE (link);
1358 cost = insn_cost (insn);
1360 if (INSN_CODE (insn) >= 0)
1362 if (dep_type == REG_DEP_ANTI)
1364 else if (dep_type == REG_DEP_OUTPUT)
1366 cost = (insn_default_latency (insn)
1367 - insn_default_latency (used));
1371 else if (bypass_p (insn))
1372 cost = insn_latency (insn, used);
1376 if (targetm.sched.adjust_cost_2)
1377 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1379 else if (targetm.sched.adjust_cost != NULL)
1381 /* This variable is used for backward compatibility with the
1383 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
1385 /* Make it self-cycled, so that if some tries to walk over this
1386 incomplete list he/she will be caught in an endless loop. */
1387 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1389 /* Targets use only REG_NOTE_KIND of the link. */
1390 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1392 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1395 free_INSN_LIST_node (dep_cost_rtx_link);
1402 DEP_COST (link) = cost;
1406 /* Compute cost of dependence LINK.
1407 This is the number of cycles between instruction issue and
1408 instruction results. */
1410 dep_cost (dep_t link)
1412 return dep_cost_1 (link, 0);
1415 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1416 INSN_PRIORITY explicitly. */
1418 increase_insn_priority (rtx insn, int amount)
1420 if (!sel_sched_p ())
1422 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1423 if (INSN_PRIORITY_KNOWN (insn))
1424 INSN_PRIORITY (insn) += amount;
1428 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1429 Use EXPR_PRIORITY instead. */
1430 sel_add_to_insn_priority (insn, amount);
1434 /* Return 'true' if DEP should be included in priority calculations. */
1436 contributes_to_priority_p (dep_t dep)
1438 if (DEBUG_INSN_P (DEP_CON (dep))
1439 || DEBUG_INSN_P (DEP_PRO (dep)))
1442 /* Critical path is meaningful in block boundaries only. */
1443 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1447 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1448 then speculative instructions will less likely be
1449 scheduled. That is because the priority of
1450 their producers will increase, and, thus, the
1451 producers will more likely be scheduled, thus,
1452 resolving the dependence. */
1453 if (sched_deps_info->generate_spec_deps
1454 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1455 && (DEP_STATUS (dep) & SPECULATIVE))
1461 /* Compute the number of nondebug forward deps of an insn. */
1464 dep_list_size (rtx insn)
1466 sd_iterator_def sd_it;
1468 int dbgcount = 0, nodbgcount = 0;
1470 if (!MAY_HAVE_DEBUG_INSNS)
1471 return sd_lists_size (insn, SD_LIST_FORW);
1473 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1475 if (DEBUG_INSN_P (DEP_CON (dep)))
1477 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1481 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
1486 /* Compute the priority number for INSN. */
1490 if (! INSN_P (insn))
1493 /* We should not be interested in priority of an already scheduled insn. */
1494 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1496 if (!INSN_PRIORITY_KNOWN (insn))
1498 int this_priority = -1;
1500 if (dep_list_size (insn) == 0)
1501 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1502 some forward deps but all of them are ignored by
1503 contributes_to_priority hook. At the moment we set priority of
1505 this_priority = insn_cost (insn);
1508 rtx prev_first, twin;
1511 /* For recovery check instructions we calculate priority slightly
1512 different than that of normal instructions. Instead of walking
1513 through INSN_FORW_DEPS (check) list, we walk through
1514 INSN_FORW_DEPS list of each instruction in the corresponding
1517 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1518 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1519 if (!rec || rec == EXIT_BLOCK_PTR)
1521 prev_first = PREV_INSN (insn);
1526 prev_first = NEXT_INSN (BB_HEAD (rec));
1527 twin = PREV_INSN (BB_END (rec));
1532 sd_iterator_def sd_it;
1535 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1540 next = DEP_CON (dep);
1542 if (BLOCK_FOR_INSN (next) != rec)
1546 if (!contributes_to_priority_p (dep))
1550 cost = dep_cost (dep);
1553 struct _dep _dep1, *dep1 = &_dep1;
1555 init_dep (dep1, insn, next, REG_DEP_ANTI);
1557 cost = dep_cost (dep1);
1560 next_priority = cost + priority (next);
1562 if (next_priority > this_priority)
1563 this_priority = next_priority;
1567 twin = PREV_INSN (twin);
1569 while (twin != prev_first);
1572 if (this_priority < 0)
1574 gcc_assert (this_priority == -1);
1576 this_priority = insn_cost (insn);
1579 INSN_PRIORITY (insn) = this_priority;
1580 INSN_PRIORITY_STATUS (insn) = 1;
1583 return INSN_PRIORITY (insn);
1586 /* Macros and functions for keeping the priority queue sorted, and
1587 dealing with queuing and dequeuing of instructions. */
1589 #define SCHED_SORT(READY, N_READY) \
1590 do { if ((N_READY) == 2) \
1591 swap_sort (READY, N_READY); \
1592 else if ((N_READY) > 2) \
1593 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1596 /* Setup info about the current register pressure impact of scheduling
1597 INSN at the current scheduling point. */
1599 setup_insn_reg_pressure_info (rtx insn)
1601 int i, change, before, after, hard_regno;
1602 int excess_cost_change;
1603 enum machine_mode mode;
1605 struct reg_pressure_data *pressure_info;
1606 int *max_reg_pressure;
1607 struct reg_use_data *use;
1608 static int death[N_REG_CLASSES];
1610 gcc_checking_assert (!DEBUG_INSN_P (insn));
1612 excess_cost_change = 0;
1613 for (i = 0; i < ira_pressure_classes_num; i++)
1614 death[ira_pressure_classes[i]] = 0;
1615 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1616 if (dying_use_p (use))
1618 cl = sched_regno_pressure_class[use->regno];
1619 if (use->regno < FIRST_PSEUDO_REGISTER)
1623 += ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1625 pressure_info = INSN_REG_PRESSURE (insn);
1626 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1627 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1628 for (i = 0; i < ira_pressure_classes_num; i++)
1630 cl = ira_pressure_classes[i];
1631 gcc_assert (curr_reg_pressure[cl] >= 0);
1632 change = (int) pressure_info[i].set_increase - death[cl];
1633 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1634 after = MAX (0, max_reg_pressure[i] + change
1635 - ira_available_class_regs[cl]);
1636 hard_regno = ira_class_hard_regs[cl][0];
1637 gcc_assert (hard_regno >= 0);
1638 mode = reg_raw_mode[hard_regno];
1639 excess_cost_change += ((after - before)
1640 * (ira_memory_move_cost[mode][cl][0]
1641 + ira_memory_move_cost[mode][cl][1]));
1643 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1646 /* Returns a positive value if x is preferred; returns a negative value if
1647 y is preferred. Should never return 0, since that will make the sort
1651 rank_for_schedule (const void *x, const void *y)
1653 rtx tmp = *(const rtx *) y;
1654 rtx tmp2 = *(const rtx *) x;
1655 int tmp_class, tmp2_class;
1656 int val, priority_val, info_val;
1658 if (MAY_HAVE_DEBUG_INSNS)
1660 /* Schedule debug insns as early as possible. */
1661 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1663 else if (DEBUG_INSN_P (tmp2))
1667 /* The insn in a schedule group should be issued the first. */
1668 if (flag_sched_group_heuristic &&
1669 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1670 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1672 /* Make sure that priority of TMP and TMP2 are initialized. */
1673 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1675 if (sched_pressure_p)
1679 /* Prefer insn whose scheduling results in the smallest register
1681 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1682 + (INSN_TICK (tmp) > clock_var
1683 ? INSN_TICK (tmp) - clock_var : 0)
1684 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1685 - (INSN_TICK (tmp2) > clock_var
1686 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1691 if (sched_pressure_p
1692 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1694 if (INSN_TICK (tmp) <= clock_var)
1696 else if (INSN_TICK (tmp2) <= clock_var)
1699 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1702 /* If we are doing backtracking in this schedule, prefer insns that
1703 have forward dependencies with negative cost against an insn that
1704 was already scheduled. */
1705 if (current_sched_info->flags & DO_BACKTRACKING)
1707 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
1709 return priority_val;
1712 /* Prefer insn with higher priority. */
1713 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1715 if (flag_sched_critical_path_heuristic && priority_val)
1716 return priority_val;
1718 /* Prefer speculative insn with greater dependencies weakness. */
1719 if (flag_sched_spec_insn_heuristic && spec_info)
1725 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1727 dw1 = ds_weak (ds1);
1731 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1733 dw2 = ds_weak (ds2);
1738 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1742 info_val = (*current_sched_info->rank) (tmp, tmp2);
1743 if(flag_sched_rank_heuristic && info_val)
1746 /* Compare insns based on their relation to the last scheduled
1748 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
1752 rtx last = last_nondebug_scheduled_insn;
1754 /* Classify the instructions into three classes:
1755 1) Data dependent on last schedule insn.
1756 2) Anti/Output dependent on last scheduled insn.
1757 3) Independent of last scheduled insn, or has latency of one.
1758 Choose the insn from the highest numbered class if different. */
1759 dep1 = sd_find_dep_between (last, tmp, true);
1761 if (dep1 == NULL || dep_cost (dep1) == 1)
1763 else if (/* Data dependence. */
1764 DEP_TYPE (dep1) == REG_DEP_TRUE)
1769 dep2 = sd_find_dep_between (last, tmp2, true);
1771 if (dep2 == NULL || dep_cost (dep2) == 1)
1773 else if (/* Data dependence. */
1774 DEP_TYPE (dep2) == REG_DEP_TRUE)
1779 if ((val = tmp2_class - tmp_class))
1783 /* Prefer the insn which has more later insns that depend on it.
1784 This gives the scheduler more freedom when scheduling later
1785 instructions at the expense of added register pressure. */
1787 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1789 if (flag_sched_dep_count_heuristic && val != 0)
1792 /* If insns are equally good, sort by INSN_LUID (original insn order),
1793 so that we make the sort stable. This minimizes instruction movement,
1794 thus minimizing sched's effect on debugging and cross-jumping. */
1795 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1798 /* Resort the array A in which only element at index N may be out of order. */
1800 HAIFA_INLINE static void
1801 swap_sort (rtx *a, int n)
1803 rtx insn = a[n - 1];
1806 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1814 /* Add INSN to the insn queue so that it can be executed at least
1815 N_CYCLES after the currently executing insn. Preserve insns
1816 chain for debugging purposes. REASON will be printed in debugging
1819 HAIFA_INLINE static void
1820 queue_insn (rtx insn, int n_cycles, const char *reason)
1822 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1823 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1826 gcc_assert (n_cycles <= max_insn_queue_index);
1827 gcc_assert (!DEBUG_INSN_P (insn));
1829 insn_queue[next_q] = link;
1832 if (sched_verbose >= 2)
1834 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1835 (*current_sched_info->print_insn) (insn, 0));
1837 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
1840 QUEUE_INDEX (insn) = next_q;
1842 if (current_sched_info->flags & DO_BACKTRACKING)
1844 new_tick = clock_var + n_cycles;
1845 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
1846 INSN_TICK (insn) = new_tick;
1848 if (INSN_EXACT_TICK (insn) != INVALID_TICK
1849 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
1851 must_backtrack = true;
1852 if (sched_verbose >= 2)
1853 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
1858 /* Remove INSN from queue. */
1860 queue_remove (rtx insn)
1862 gcc_assert (QUEUE_INDEX (insn) >= 0);
1863 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1865 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1868 /* Return a pointer to the bottom of the ready list, i.e. the insn
1869 with the lowest priority. */
1872 ready_lastpos (struct ready_list *ready)
1874 gcc_assert (ready->n_ready >= 1);
1875 return ready->vec + ready->first - ready->n_ready + 1;
1878 /* Add an element INSN to the ready list so that it ends up with the
1879 lowest/highest priority depending on FIRST_P. */
1881 HAIFA_INLINE static void
1882 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1886 if (ready->first == ready->n_ready)
1888 memmove (ready->vec + ready->veclen - ready->n_ready,
1889 ready_lastpos (ready),
1890 ready->n_ready * sizeof (rtx));
1891 ready->first = ready->veclen - 1;
1893 ready->vec[ready->first - ready->n_ready] = insn;
1897 if (ready->first == ready->veclen - 1)
1900 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1901 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1902 ready_lastpos (ready),
1903 ready->n_ready * sizeof (rtx));
1904 ready->first = ready->veclen - 2;
1906 ready->vec[++(ready->first)] = insn;
1910 if (DEBUG_INSN_P (insn))
1913 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1914 QUEUE_INDEX (insn) = QUEUE_READY;
1916 if (INSN_EXACT_TICK (insn) != INVALID_TICK
1917 && INSN_EXACT_TICK (insn) < clock_var)
1919 must_backtrack = true;
1923 /* Remove the element with the highest priority from the ready list and
1926 HAIFA_INLINE static rtx
1927 ready_remove_first (struct ready_list *ready)
1931 gcc_assert (ready->n_ready);
1932 t = ready->vec[ready->first--];
1934 if (DEBUG_INSN_P (t))
1936 /* If the queue becomes empty, reset it. */
1937 if (ready->n_ready == 0)
1938 ready->first = ready->veclen - 1;
1940 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1941 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1946 /* The following code implements multi-pass scheduling for the first
1947 cycle. In other words, we will try to choose ready insn which
1948 permits to start maximum number of insns on the same cycle. */
1950 /* Return a pointer to the element INDEX from the ready. INDEX for
1951 insn with the highest priority is 0, and the lowest priority has
1955 ready_element (struct ready_list *ready, int index)
1957 gcc_assert (ready->n_ready && index < ready->n_ready);
1959 return ready->vec[ready->first - index];
1962 /* Remove the element INDEX from the ready list and return it. INDEX
1963 for insn with the highest priority is 0, and the lowest priority
1966 HAIFA_INLINE static rtx
1967 ready_remove (struct ready_list *ready, int index)
1973 return ready_remove_first (ready);
1974 gcc_assert (ready->n_ready && index < ready->n_ready);
1975 t = ready->vec[ready->first - index];
1977 if (DEBUG_INSN_P (t))
1979 for (i = index; i < ready->n_ready; i++)
1980 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1981 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1985 /* Remove INSN from the ready list. */
1987 ready_remove_insn (rtx insn)
1991 for (i = 0; i < readyp->n_ready; i++)
1992 if (ready_element (readyp, i) == insn)
1994 ready_remove (readyp, i);
2000 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
2004 ready_sort (struct ready_list *ready)
2007 rtx *first = ready_lastpos (ready);
2009 if (sched_pressure_p)
2011 for (i = 0; i < ready->n_ready; i++)
2012 if (!DEBUG_INSN_P (first[i]))
2013 setup_insn_reg_pressure_info (first[i]);
2015 SCHED_SORT (first, ready->n_ready);
2018 /* PREV is an insn that is ready to execute. Adjust its priority if that
2019 will help shorten or lengthen register lifetimes as appropriate. Also
2020 provide a hook for the target to tweak itself. */
2022 HAIFA_INLINE static void
2023 adjust_priority (rtx prev)
2025 /* ??? There used to be code here to try and estimate how an insn
2026 affected register lifetimes, but it did it by looking at REG_DEAD
2027 notes, which we removed in schedule_region. Nor did it try to
2028 take into account register pressure or anything useful like that.
2030 Revisit when we have a machine model to work with and not before. */
2032 if (targetm.sched.adjust_priority)
2033 INSN_PRIORITY (prev) =
2034 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
2037 /* Advance DFA state STATE on one cycle. */
2039 advance_state (state_t state)
2041 if (targetm.sched.dfa_pre_advance_cycle)
2042 targetm.sched.dfa_pre_advance_cycle ();
2044 if (targetm.sched.dfa_pre_cycle_insn)
2045 state_transition (state,
2046 targetm.sched.dfa_pre_cycle_insn ());
2048 state_transition (state, NULL);
2050 if (targetm.sched.dfa_post_cycle_insn)
2051 state_transition (state,
2052 targetm.sched.dfa_post_cycle_insn ());
2054 if (targetm.sched.dfa_post_advance_cycle)
2055 targetm.sched.dfa_post_advance_cycle ();
2058 /* Advance time on one cycle. */
2059 HAIFA_INLINE static void
2060 advance_one_cycle (void)
2062 advance_state (curr_state);
2063 if (sched_verbose >= 6)
2064 fprintf (sched_dump, ";;\tAdvanced a state.\n");
2067 /* Update register pressure after scheduling INSN. */
2069 update_register_pressure (rtx insn)
2071 struct reg_use_data *use;
2072 struct reg_set_data *set;
2074 gcc_checking_assert (!DEBUG_INSN_P (insn));
2076 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2077 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
2078 mark_regno_birth_or_death (use->regno, false);
2079 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
2080 mark_regno_birth_or_death (set->regno, true);
2083 /* Set up or update (if UPDATE_P) max register pressure (see its
2084 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
2085 after insn AFTER. */
2087 setup_insn_max_reg_pressure (rtx after, bool update_p)
2092 static int max_reg_pressure[N_REG_CLASSES];
2094 save_reg_pressure ();
2095 for (i = 0; i < ira_pressure_classes_num; i++)
2096 max_reg_pressure[ira_pressure_classes[i]]
2097 = curr_reg_pressure[ira_pressure_classes[i]];
2098 for (insn = NEXT_INSN (after);
2099 insn != NULL_RTX && ! BARRIER_P (insn)
2100 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
2101 insn = NEXT_INSN (insn))
2102 if (NONDEBUG_INSN_P (insn))
2105 for (i = 0; i < ira_pressure_classes_num; i++)
2107 p = max_reg_pressure[ira_pressure_classes[i]];
2108 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
2111 INSN_MAX_REG_PRESSURE (insn)[i]
2112 = max_reg_pressure[ira_pressure_classes[i]];
2115 if (update_p && eq_p)
2117 update_register_pressure (insn);
2118 for (i = 0; i < ira_pressure_classes_num; i++)
2119 if (max_reg_pressure[ira_pressure_classes[i]]
2120 < curr_reg_pressure[ira_pressure_classes[i]])
2121 max_reg_pressure[ira_pressure_classes[i]]
2122 = curr_reg_pressure[ira_pressure_classes[i]];
2124 restore_reg_pressure ();
2127 /* Update the current register pressure after scheduling INSN. Update
2128 also max register pressure for unscheduled insns of the current
2131 update_reg_and_insn_max_reg_pressure (rtx insn)
2134 int before[N_REG_CLASSES];
2136 for (i = 0; i < ira_pressure_classes_num; i++)
2137 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
2138 update_register_pressure (insn);
2139 for (i = 0; i < ira_pressure_classes_num; i++)
2140 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
2142 if (i < ira_pressure_classes_num)
2143 setup_insn_max_reg_pressure (insn, true);
2146 /* Set up register pressure at the beginning of basic block BB whose
2147 insns starting after insn AFTER. Set up also max register pressure
2148 for all insns of the basic block. */
2150 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
2152 gcc_assert (sched_pressure_p);
2153 initiate_bb_reg_pressure_info (bb);
2154 setup_insn_max_reg_pressure (after, false);
2157 /* If doing predication while scheduling, verify whether INSN, which
2158 has just been scheduled, clobbers the conditions of any
2159 instructions that must be predicated in order to break their
2160 dependencies. If so, remove them from the queues so that they will
2161 only be scheduled once their control dependency is resolved. */
2164 check_clobbered_conditions (rtx insn)
2169 if ((current_sched_info->flags & DO_PREDICATION) == 0)
2172 find_all_hard_reg_sets (insn, &t);
2175 for (i = 0; i < ready.n_ready; i++)
2177 rtx x = ready_element (&ready, i);
2178 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
2180 ready_remove_insn (x);
2184 for (i = 0; i <= max_insn_queue_index; i++)
2187 int q = NEXT_Q_AFTER (q_ptr, i);
2190 for (link = insn_queue[q]; link; link = XEXP (link, 1))
2192 rtx x = XEXP (link, 0);
2193 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
2202 /* A structure that holds local state for the loop in schedule_block. */
2203 struct sched_block_state
2205 /* True if no real insns have been scheduled in the current cycle. */
2206 bool first_cycle_insn_p;
2207 /* True if a shadow insn has been scheduled in the current cycle, which
2208 means that no more normal insns can be issued. */
2209 bool shadows_only_p;
2210 /* True if we're winding down a modulo schedule, which means that we only
2211 issue insns with INSN_EXACT_TICK set. */
2212 bool modulo_epilogue;
2213 /* Initialized with the machine's issue rate every cycle, and updated
2214 by calls to the variable_issue hook. */
2218 /* INSN is the "currently executing insn". Launch each insn which was
2219 waiting on INSN. READY is the ready list which contains the insns
2220 that are ready to fire. CLOCK is the current cycle. The function
2221 returns necessary cycle advance after issuing the insn (it is not
2222 zero for insns in a schedule group). */
2225 schedule_insn (rtx insn)
2227 sd_iterator_def sd_it;
2232 if (sched_verbose >= 1)
2234 struct reg_pressure_data *pressure_info;
2237 print_insn (buf, insn, 0);
2239 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
2241 if (recog_memoized (insn) < 0)
2242 fprintf (sched_dump, "nothing");
2244 print_reservation (sched_dump, insn);
2245 pressure_info = INSN_REG_PRESSURE (insn);
2246 if (pressure_info != NULL)
2248 fputc (':', sched_dump);
2249 for (i = 0; i < ira_pressure_classes_num; i++)
2250 fprintf (sched_dump, "%s%+d(%d)",
2251 reg_class_names[ira_pressure_classes[i]],
2252 pressure_info[i].set_increase, pressure_info[i].change);
2254 fputc ('\n', sched_dump);
2257 if (sched_pressure_p && !DEBUG_INSN_P (insn))
2258 update_reg_and_insn_max_reg_pressure (insn);
2260 /* Scheduling instruction should have all its dependencies resolved and
2261 should have been removed from the ready list. */
2262 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
2264 /* Reset debug insns invalidated by moving this insn. */
2265 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
2266 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
2267 sd_iterator_cond (&sd_it, &dep);)
2269 rtx dbg = DEP_PRO (dep);
2270 struct reg_use_data *use, *next;
2272 if (DEP_STATUS (dep) & DEP_CANCELLED)
2274 sd_iterator_next (&sd_it);
2278 gcc_assert (DEBUG_INSN_P (dbg));
2280 if (sched_verbose >= 6)
2281 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
2284 /* ??? Rather than resetting the debug insn, we might be able
2285 to emit a debug temp before the just-scheduled insn, but
2286 this would involve checking that the expression at the
2287 point of the debug insn is equivalent to the expression
2288 before the just-scheduled insn. They might not be: the
2289 expression in the debug insn may depend on other insns not
2290 yet scheduled that set MEMs, REGs or even other debug
2291 insns. It's not clear that attempting to preserve debug
2292 information in these cases is worth the effort, given how
2293 uncommon these resets are and the likelihood that the debug
2294 temps introduced won't survive the schedule change. */
2295 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
2296 df_insn_rescan (dbg);
2298 /* Unknown location doesn't use any registers. */
2299 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
2301 struct reg_use_data *prev = use;
2303 /* Remove use from the cyclic next_regno_use chain first. */
2304 while (prev->next_regno_use != use)
2305 prev = prev->next_regno_use;
2306 prev->next_regno_use = use->next_regno_use;
2307 next = use->next_insn_use;
2310 INSN_REG_USE_LIST (dbg) = NULL;
2312 /* We delete rather than resolve these deps, otherwise we
2313 crash in sched_free_deps(), because forward deps are
2314 expected to be released before backward deps. */
2315 sd_delete_dep (sd_it);
2318 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
2319 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
2321 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
2322 if (INSN_TICK (insn) > clock_var)
2323 /* INSN has been prematurely moved from the queue to the ready list.
2324 This is possible only if following flag is set. */
2325 gcc_assert (flag_sched_stalled_insns);
2327 /* ??? Probably, if INSN is scheduled prematurely, we should leave
2328 INSN_TICK untouched. This is a machine-dependent issue, actually. */
2329 INSN_TICK (insn) = clock_var;
2331 check_clobbered_conditions (insn);
2333 /* Update dependent instructions. */
2334 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
2335 sd_iterator_cond (&sd_it, &dep);)
2337 rtx next = DEP_CON (dep);
2338 bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
2340 /* Resolve the dependence between INSN and NEXT.
2341 sd_resolve_dep () moves current dep to another list thus
2342 advancing the iterator. */
2343 sd_resolve_dep (sd_it);
2347 if (QUEUE_INDEX (next) != QUEUE_SCHEDULED)
2349 int tick = INSN_TICK (next);
2350 gcc_assert (ORIG_PAT (next) != NULL_RTX);
2351 haifa_change_pattern (next, ORIG_PAT (next));
2352 INSN_TICK (next) = tick;
2353 if (sd_lists_empty_p (next, SD_LIST_BACK))
2354 TODO_SPEC (next) = 0;
2355 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
2356 TODO_SPEC (next) = HARD_DEP;
2361 /* Don't bother trying to mark next as ready if insn is a debug
2362 insn. If insn is the last hard dependency, it will have
2363 already been discounted. */
2364 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
2367 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
2371 effective_cost = try_ready (next);
2373 if (effective_cost >= 0
2374 && SCHED_GROUP_P (next)
2375 && advance < effective_cost)
2376 advance = effective_cost;
2379 /* Check always has only one forward dependence (to the first insn in
2380 the recovery block), therefore, this will be executed only once. */
2382 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
2383 fix_recovery_deps (RECOVERY_BLOCK (insn));
2387 /* Annotate the instruction with issue information -- TImode
2388 indicates that the instruction is expected not to be able
2389 to issue on the same cycle as the previous insn. A machine
2390 may use this information to decide how the instruction should
2393 && GET_CODE (PATTERN (insn)) != USE
2394 && GET_CODE (PATTERN (insn)) != CLOBBER
2395 && !DEBUG_INSN_P (insn))
2397 if (reload_completed)
2398 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
2399 last_clock_var = clock_var;
2405 /* Functions for handling of notes. */
2407 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
2409 concat_note_lists (rtx from_end, rtx *to_endp)
2413 /* It's easy when have nothing to concat. */
2414 if (from_end == NULL)
2417 /* It's also easy when destination is empty. */
2418 if (*to_endp == NULL)
2420 *to_endp = from_end;
2424 from_start = from_end;
2425 while (PREV_INSN (from_start) != NULL)
2426 from_start = PREV_INSN (from_start);
2428 PREV_INSN (from_start) = *to_endp;
2429 NEXT_INSN (*to_endp) = from_start;
2430 *to_endp = from_end;
2433 /* Delete notes between HEAD and TAIL and put them in the chain
2434 of notes ended by NOTE_LIST. */
2436 remove_notes (rtx head, rtx tail)
2438 rtx next_tail, insn, next;
2441 if (head == tail && !INSN_P (head))
2444 next_tail = NEXT_INSN (tail);
2445 for (insn = head; insn != next_tail; insn = next)
2447 next = NEXT_INSN (insn);
2451 switch (NOTE_KIND (insn))
2453 case NOTE_INSN_BASIC_BLOCK:
2456 case NOTE_INSN_EPILOGUE_BEG:
2460 add_reg_note (next, REG_SAVE_NOTE,
2461 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
2469 /* Add the note to list that ends at NOTE_LIST. */
2470 PREV_INSN (insn) = note_list;
2471 NEXT_INSN (insn) = NULL_RTX;
2473 NEXT_INSN (note_list) = insn;
2478 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
2482 /* A structure to record enough data to allow us to backtrack the scheduler to
2483 a previous state. */
2484 struct haifa_saved_data
2486 /* Next entry on the list. */
2487 struct haifa_saved_data *next;
2489 /* Backtracking is associated with scheduling insns that have delay slots.
2490 DELAY_PAIR points to the structure that contains the insns involved, and
2491 the number of cycles between them. */
2492 struct delay_pair *delay_pair;
2494 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
2495 void *fe_saved_data;
2496 /* Data used by the backend. */
2497 void *be_saved_data;
2499 /* Copies of global state. */
2500 int clock_var, last_clock_var;
2501 struct ready_list ready;
2504 rtx last_scheduled_insn;
2505 rtx last_nondebug_scheduled_insn;
2506 int cycle_issued_insns;
2508 /* Copies of state used in the inner loop of schedule_block. */
2509 struct sched_block_state sched_block;
2511 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
2512 to 0 when restoring. */
2517 /* A record, in reverse order, of all scheduled insns which have delay slots
2518 and may require backtracking. */
2519 static struct haifa_saved_data *backtrack_queue;
2521 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
2524 mark_backtrack_feeds (rtx insn, int set_p)
2526 sd_iterator_def sd_it;
2528 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
2530 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
2534 /* Save the current scheduler state so that we can backtrack to it
2535 later if necessary. PAIR gives the insns that make it necessary to
2536 save this point. SCHED_BLOCK is the local state of schedule_block
2537 that need to be saved. */
2539 save_backtrack_point (struct delay_pair *pair,
2540 struct sched_block_state sched_block)
2543 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
2545 save->curr_state = xmalloc (dfa_state_size);
2546 memcpy (save->curr_state, curr_state, dfa_state_size);
2548 save->ready.first = ready.first;
2549 save->ready.n_ready = ready.n_ready;
2550 save->ready.n_debug = ready.n_debug;
2551 save->ready.veclen = ready.veclen;
2552 save->ready.vec = XNEWVEC (rtx, ready.veclen);
2553 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
2555 save->insn_queue = XNEWVEC (rtx, max_insn_queue_index + 1);
2556 save->q_size = q_size;
2557 for (i = 0; i <= max_insn_queue_index; i++)
2559 int q = NEXT_Q_AFTER (q_ptr, i);
2560 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
2563 save->clock_var = clock_var;
2564 save->last_clock_var = last_clock_var;
2565 save->cycle_issued_insns = cycle_issued_insns;
2566 save->last_scheduled_insn = last_scheduled_insn;
2567 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
2569 save->sched_block = sched_block;
2571 if (current_sched_info->save_state)
2572 save->fe_saved_data = (*current_sched_info->save_state) ();
2574 if (targetm.sched.alloc_sched_context)
2576 save->be_saved_data = targetm.sched.alloc_sched_context ();
2577 targetm.sched.init_sched_context (save->be_saved_data, false);
2580 save->be_saved_data = NULL;
2582 save->delay_pair = pair;
2584 save->next = backtrack_queue;
2585 backtrack_queue = save;
2589 mark_backtrack_feeds (pair->i2, 1);
2590 INSN_TICK (pair->i2) = INVALID_TICK;
2591 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
2592 SHADOW_P (pair->i2) = pair->stages == 0;
2593 pair = pair->next_same_i1;
2597 /* Walk the ready list and all queues. If any insns have unresolved backwards
2598 dependencies, these must be cancelled deps, broken by predication. Set or
2599 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
2602 toggle_cancelled_flags (bool set)
2605 sd_iterator_def sd_it;
2608 if (ready.n_ready > 0)
2610 rtx *first = ready_lastpos (&ready);
2611 for (i = 0; i < ready.n_ready; i++)
2612 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
2613 if (!DEBUG_INSN_P (DEP_PRO (dep)))
2616 DEP_STATUS (dep) |= DEP_CANCELLED;
2618 DEP_STATUS (dep) &= ~DEP_CANCELLED;
2621 for (i = 0; i <= max_insn_queue_index; i++)
2623 int q = NEXT_Q_AFTER (q_ptr, i);
2625 for (link = insn_queue[q]; link; link = XEXP (link, 1))
2627 rtx insn = XEXP (link, 0);
2628 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
2629 if (!DEBUG_INSN_P (DEP_PRO (dep)))
2632 DEP_STATUS (dep) |= DEP_CANCELLED;
2634 DEP_STATUS (dep) &= ~DEP_CANCELLED;
2640 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
2641 Restore their dependencies to an unresolved state, and mark them as
2645 unschedule_insns_until (rtx insn)
2647 VEC (rtx, heap) *recompute_vec;
2649 recompute_vec = VEC_alloc (rtx, heap, 0);
2651 /* Make two passes over the insns to be unscheduled. First, we clear out
2652 dependencies and other trivial bookkeeping. */
2656 sd_iterator_def sd_it;
2659 last = VEC_pop (rtx, scheduled_insns);
2661 /* This will be changed by restore_backtrack_point if the insn is in
2663 QUEUE_INDEX (last) = QUEUE_NOWHERE;
2665 INSN_TICK (last) = INVALID_TICK;
2667 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
2668 modulo_insns_scheduled--;
2670 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
2671 sd_iterator_cond (&sd_it, &dep);)
2673 rtx con = DEP_CON (dep);
2674 sd_unresolve_dep (sd_it);
2675 if (!MUST_RECOMPUTE_SPEC_P (con))
2677 MUST_RECOMPUTE_SPEC_P (con) = 1;
2678 VEC_safe_push (rtx, heap, recompute_vec, con);
2686 /* A second pass, to update ready and speculation status for insns
2687 depending on the unscheduled ones. The first pass must have
2688 popped the scheduled_insns vector up to the point where we
2689 restart scheduling, as recompute_todo_spec requires it to be
2691 while (!VEC_empty (rtx, recompute_vec))
2695 con = VEC_pop (rtx, recompute_vec);
2696 MUST_RECOMPUTE_SPEC_P (con) = 0;
2697 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
2699 TODO_SPEC (con) = HARD_DEP;
2700 INSN_TICK (con) = INVALID_TICK;
2701 if (PREDICATED_PAT (con) != NULL_RTX)
2702 haifa_change_pattern (con, ORIG_PAT (con));
2704 else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
2705 TODO_SPEC (con) = recompute_todo_spec (con);
2707 VEC_free (rtx, heap, recompute_vec);
2710 /* Restore scheduler state from the topmost entry on the backtracking queue.
2711 PSCHED_BLOCK_P points to the local data of schedule_block that we must
2712 overwrite with the saved data.
2713 The caller must already have called unschedule_insns_until. */
2716 restore_last_backtrack_point (struct sched_block_state *psched_block)
2720 struct haifa_saved_data *save = backtrack_queue;
2722 backtrack_queue = save->next;
2724 if (current_sched_info->restore_state)
2725 (*current_sched_info->restore_state) (save->fe_saved_data);
2727 if (targetm.sched.alloc_sched_context)
2729 targetm.sched.set_sched_context (save->be_saved_data);
2730 targetm.sched.free_sched_context (save->be_saved_data);
2733 /* Clear the QUEUE_INDEX of everything in the ready list or one
2735 if (ready.n_ready > 0)
2737 rtx *first = ready_lastpos (&ready);
2738 for (i = 0; i < ready.n_ready; i++)
2740 rtx insn = first[i];
2741 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2742 INSN_TICK (insn) = INVALID_TICK;
2745 for (i = 0; i <= max_insn_queue_index; i++)
2747 int q = NEXT_Q_AFTER (q_ptr, i);
2749 for (link = insn_queue[q]; link; link = XEXP (link, 1))
2751 rtx x = XEXP (link, 0);
2752 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2753 INSN_TICK (x) = INVALID_TICK;
2755 free_INSN_LIST_list (&insn_queue[q]);
2759 ready = save->ready;
2761 if (ready.n_ready > 0)
2763 rtx *first = ready_lastpos (&ready);
2764 for (i = 0; i < ready.n_ready; i++)
2766 rtx insn = first[i];
2767 QUEUE_INDEX (insn) = QUEUE_READY;
2768 TODO_SPEC (insn) = recompute_todo_spec (insn);
2769 INSN_TICK (insn) = save->clock_var;
2774 q_size = save->q_size;
2775 for (i = 0; i <= max_insn_queue_index; i++)
2777 int q = NEXT_Q_AFTER (q_ptr, i);
2779 insn_queue[q] = save->insn_queue[q];
2781 for (link = insn_queue[q]; link; link = XEXP (link, 1))
2783 rtx x = XEXP (link, 0);
2784 QUEUE_INDEX (x) = i;
2785 TODO_SPEC (x) = recompute_todo_spec (x);
2786 INSN_TICK (x) = save->clock_var + i;
2789 free (save->insn_queue);
2791 toggle_cancelled_flags (true);
2793 clock_var = save->clock_var;
2794 last_clock_var = save->last_clock_var;
2795 cycle_issued_insns = save->cycle_issued_insns;
2796 last_scheduled_insn = save->last_scheduled_insn;
2797 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
2799 *psched_block = save->sched_block;
2801 memcpy (curr_state, save->curr_state, dfa_state_size);
2802 free (save->curr_state);
2804 mark_backtrack_feeds (save->delay_pair->i2, 0);
2808 for (save = backtrack_queue; save; save = save->next)
2810 mark_backtrack_feeds (save->delay_pair->i2, 1);
2814 /* Discard all data associated with the topmost entry in the backtrack
2815 queue. If RESET_TICK is false, we just want to free the data. If true,
2816 we are doing this because we discovered a reason to backtrack. In the
2817 latter case, also reset the INSN_TICK for the shadow insn. */
2819 free_topmost_backtrack_point (bool reset_tick)
2821 struct haifa_saved_data *save = backtrack_queue;
2824 backtrack_queue = save->next;
2828 struct delay_pair *pair = save->delay_pair;
2831 INSN_TICK (pair->i2) = INVALID_TICK;
2832 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
2833 pair = pair->next_same_i1;
2836 if (targetm.sched.free_sched_context)
2837 targetm.sched.free_sched_context (save->be_saved_data);
2838 if (current_sched_info->restore_state)
2839 free (save->fe_saved_data);
2840 for (i = 0; i <= max_insn_queue_index; i++)
2841 free_INSN_LIST_list (&save->insn_queue[i]);
2842 free (save->insn_queue);
2843 free (save->curr_state);
2844 free (save->ready.vec);
2848 /* Free the entire backtrack queue. */
2850 free_backtrack_queue (void)
2852 while (backtrack_queue)
2853 free_topmost_backtrack_point (false);
2856 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
2857 instructions we've previously encountered, a set bit prevents
2858 recursion. BUDGET is a limit on how far ahead we look, it is
2859 reduced on recursive calls. Return true if we produced a good
2860 estimate, or false if we exceeded the budget. */
2862 estimate_insn_tick (bitmap processed, rtx insn, int budget)
2864 sd_iterator_def sd_it;
2866 int earliest = INSN_TICK (insn);
2868 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
2870 rtx pro = DEP_PRO (dep);
2873 if (DEP_STATUS (dep) & DEP_CANCELLED)
2876 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
2877 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
2880 int cost = dep_cost (dep);
2883 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
2885 if (!estimate_insn_tick (processed, pro, budget - cost))
2888 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
2889 t = INSN_TICK_ESTIMATE (pro) + cost;
2890 if (earliest == INVALID_TICK || t > earliest)
2894 bitmap_set_bit (processed, INSN_LUID (insn));
2895 INSN_TICK_ESTIMATE (insn) = earliest;
2899 /* Examine the pair of insns in P, and estimate (optimistically, assuming
2900 infinite resources) the cycle in which the delayed shadow can be issued.
2901 Return the number of cycles that must pass before the real insn can be
2902 issued in order to meet this constraint. */
2904 estimate_shadow_tick (struct delay_pair *p)
2906 bitmap_head processed;
2909 bitmap_initialize (&processed, 0);
2911 cutoff = !estimate_insn_tick (&processed, p->i2,
2912 max_insn_queue_index + pair_delay (p));
2913 bitmap_clear (&processed);
2915 return max_insn_queue_index;
2916 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
2922 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
2923 recursively resolve all its forward dependencies. */
2925 resolve_dependencies (rtx insn)
2927 sd_iterator_def sd_it;
2930 /* Don't use sd_lists_empty_p; it ignores debug insns. */
2931 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
2932 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
2935 if (sched_verbose >= 4)
2936 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
2938 if (QUEUE_INDEX (insn) >= 0)
2939 queue_remove (insn);
2941 VEC_safe_push (rtx, heap, scheduled_insns, insn);
2943 /* Update dependent instructions. */
2944 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
2945 sd_iterator_cond (&sd_it, &dep);)
2947 rtx next = DEP_CON (dep);
2949 if (sched_verbose >= 4)
2950 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
2953 /* Resolve the dependence between INSN and NEXT.
2954 sd_resolve_dep () moves current dep to another list thus
2955 advancing the iterator. */
2956 sd_resolve_dep (sd_it);
2958 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
2960 resolve_dependencies (next);
2963 /* Check always has only one forward dependence (to the first insn in
2964 the recovery block), therefore, this will be executed only once. */
2966 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
2972 /* Return the head and tail pointers of ebb starting at BEG and ending
2975 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
2977 rtx beg_head = BB_HEAD (beg);
2978 rtx beg_tail = BB_END (beg);
2979 rtx end_head = BB_HEAD (end);
2980 rtx end_tail = BB_END (end);
2982 /* Don't include any notes or labels at the beginning of the BEG
2983 basic block, or notes at the end of the END basic blocks. */
2985 if (LABEL_P (beg_head))
2986 beg_head = NEXT_INSN (beg_head);
2988 while (beg_head != beg_tail)
2989 if (NOTE_P (beg_head))
2990 beg_head = NEXT_INSN (beg_head);
2991 else if (DEBUG_INSN_P (beg_head))
2995 for (note = NEXT_INSN (beg_head);
2999 next = NEXT_INSN (note);
3002 if (sched_verbose >= 9)
3003 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
3005 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
3007 if (BLOCK_FOR_INSN (note) != beg)
3008 df_insn_change_bb (note, beg);
3010 else if (!DEBUG_INSN_P (note))
3022 end_head = beg_head;
3023 else if (LABEL_P (end_head))
3024 end_head = NEXT_INSN (end_head);
3026 while (end_head != end_tail)
3027 if (NOTE_P (end_tail))
3028 end_tail = PREV_INSN (end_tail);
3029 else if (DEBUG_INSN_P (end_tail))
3033 for (note = PREV_INSN (end_tail);
3037 prev = PREV_INSN (note);
3040 if (sched_verbose >= 9)
3041 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
3043 reorder_insns_nobb (note, note, end_tail);
3045 if (end_tail == BB_END (end))
3046 BB_END (end) = note;
3048 if (BLOCK_FOR_INSN (note) != end)
3049 df_insn_change_bb (note, end);
3051 else if (!DEBUG_INSN_P (note))
3063 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
3066 no_real_insns_p (const_rtx head, const_rtx tail)
3068 while (head != NEXT_INSN (tail))
3070 if (!NOTE_P (head) && !LABEL_P (head))
3072 head = NEXT_INSN (head);
3077 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
3078 previously found among the insns. Insert them just before HEAD. */
3080 restore_other_notes (rtx head, basic_block head_bb)
3084 rtx note_head = note_list;
3087 head_bb = BLOCK_FOR_INSN (head);
3089 head = NEXT_INSN (bb_note (head_bb));
3091 while (PREV_INSN (note_head))
3093 set_block_for_insn (note_head, head_bb);
3094 note_head = PREV_INSN (note_head);
3096 /* In the above cycle we've missed this note. */
3097 set_block_for_insn (note_head, head_bb);
3099 PREV_INSN (note_head) = PREV_INSN (head);
3100 NEXT_INSN (PREV_INSN (head)) = note_head;
3101 PREV_INSN (head) = note_list;
3102 NEXT_INSN (note_list) = head;
3104 if (BLOCK_FOR_INSN (head) != head_bb)
3105 BB_END (head_bb) = note_list;
3113 /* Move insns that became ready to fire from queue to ready list. */
3116 queue_to_ready (struct ready_list *ready)
3122 q_ptr = NEXT_Q (q_ptr);
3124 if (dbg_cnt (sched_insn) == false)
3126 /* If debug counter is activated do not requeue the first
3127 nonscheduled insn. */
3128 skip_insn = nonscheduled_insns_begin;
3131 skip_insn = next_nonnote_nondebug_insn (skip_insn);
3133 while (QUEUE_INDEX (skip_insn) == QUEUE_SCHEDULED);
3136 skip_insn = NULL_RTX;
3138 /* Add all pending insns that can be scheduled without stalls to the
3140 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
3142 insn = XEXP (link, 0);
3145 if (sched_verbose >= 2)
3146 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
3147 (*current_sched_info->print_insn) (insn, 0));
3149 /* If the ready list is full, delay the insn for 1 cycle.
3150 See the comment in schedule_block for the rationale. */
3151 if (!reload_completed
3152 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
3153 && !SCHED_GROUP_P (insn)
3154 && insn != skip_insn)
3155 queue_insn (insn, 1, "ready full");
3158 ready_add (ready, insn, false);
3159 if (sched_verbose >= 2)
3160 fprintf (sched_dump, "moving to ready without stalls\n");
3163 free_INSN_LIST_list (&insn_queue[q_ptr]);
3165 /* If there are no ready insns, stall until one is ready and add all
3166 of the pending insns at that point to the ready list. */
3167 if (ready->n_ready == 0)
3171 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
3173 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
3175 for (; link; link = XEXP (link, 1))
3177 insn = XEXP (link, 0);
3180 if (sched_verbose >= 2)
3181 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
3182 (*current_sched_info->print_insn) (insn, 0));
3184 ready_add (ready, insn, false);
3185 if (sched_verbose >= 2)
3186 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
3188 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
3190 advance_one_cycle ();
3195 advance_one_cycle ();
3198 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
3199 clock_var += stalls;
3203 /* Used by early_queue_to_ready. Determines whether it is "ok" to
3204 prematurely move INSN from the queue to the ready list. Currently,
3205 if a target defines the hook 'is_costly_dependence', this function
3206 uses the hook to check whether there exist any dependences which are
3207 considered costly by the target, between INSN and other insns that
3208 have already been scheduled. Dependences are checked up to Y cycles
3209 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
3210 controlling this value.
3211 (Other considerations could be taken into account instead (or in
3212 addition) depending on user flags and target hooks. */
3215 ok_for_early_queue_removal (rtx insn)
3217 if (targetm.sched.is_costly_dependence)
3221 int i = VEC_length (rtx, scheduled_insns);
3222 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
3228 prev_insn = VEC_index (rtx, scheduled_insns, i);
3230 if (!NOTE_P (prev_insn))
3234 dep = sd_find_dep_between (prev_insn, insn, true);
3238 cost = dep_cost (dep);
3240 if (targetm.sched.is_costly_dependence (dep, cost,
3241 flag_sched_stalled_insns_dep - n_cycles))
3246 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
3259 /* Remove insns from the queue, before they become "ready" with respect
3260 to FU latency considerations. */
3263 early_queue_to_ready (state_t state, struct ready_list *ready)
3271 state_t temp_state = alloca (dfa_state_size);
3273 int insns_removed = 0;
3276 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
3279 X == 0: There is no limit on how many queued insns can be removed
3280 prematurely. (flag_sched_stalled_insns = -1).
3282 X >= 1: Only X queued insns can be removed prematurely in each
3283 invocation. (flag_sched_stalled_insns = X).
3285 Otherwise: Early queue removal is disabled.
3286 (flag_sched_stalled_insns = 0)
3289 if (! flag_sched_stalled_insns)
3292 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
3294 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
3296 if (sched_verbose > 6)
3297 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
3302 next_link = XEXP (link, 1);
3303 insn = XEXP (link, 0);
3304 if (insn && sched_verbose > 6)
3305 print_rtl_single (sched_dump, insn);
3307 memcpy (temp_state, state, dfa_state_size);
3308 if (recog_memoized (insn) < 0)
3309 /* non-negative to indicate that it's not ready
3310 to avoid infinite Q->R->Q->R... */
3313 cost = state_transition (temp_state, insn);
3315 if (sched_verbose >= 6)
3316 fprintf (sched_dump, "transition cost = %d\n", cost);
3318 move_to_ready = false;
3321 move_to_ready = ok_for_early_queue_removal (insn);
3322 if (move_to_ready == true)
3324 /* move from Q to R */
3326 ready_add (ready, insn, false);
3329 XEXP (prev_link, 1) = next_link;
3331 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
3333 free_INSN_LIST_node (link);
3335 if (sched_verbose >= 2)
3336 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
3337 (*current_sched_info->print_insn) (insn, 0));
3340 if (insns_removed == flag_sched_stalled_insns)
3341 /* Remove no more than flag_sched_stalled_insns insns
3342 from Q at a time. */
3343 return insns_removed;
3347 if (move_to_ready == false)
3354 } /* for stalls.. */
3356 return insns_removed;
3360 /* Print the ready list for debugging purposes. Callable from debugger. */
3363 debug_ready_list (struct ready_list *ready)
3368 if (ready->n_ready == 0)
3370 fprintf (sched_dump, "\n");
3374 p = ready_lastpos (ready);
3375 for (i = 0; i < ready->n_ready; i++)
3377 fprintf (sched_dump, " %s:%d",
3378 (*current_sched_info->print_insn) (p[i], 0),
3380 if (sched_pressure_p)
3381 fprintf (sched_dump, "(cost=%d",
3382 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
3383 if (INSN_TICK (p[i]) > clock_var)
3384 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
3385 if (sched_pressure_p)
3386 fprintf (sched_dump, ")");
3388 fprintf (sched_dump, "\n");
3391 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
3392 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
3393 replaces the epilogue note in the correct basic block. */
3395 reemit_notes (rtx insn)
3397 rtx note, last = insn;
3399 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3401 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
3403 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
3405 last = emit_note_before (note_type, last);
3406 remove_note (insn, note);
3411 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
3413 move_insn (rtx insn, rtx last, rtx nt)
3415 if (PREV_INSN (insn) != last)
3421 bb = BLOCK_FOR_INSN (insn);
3423 /* BB_HEAD is either LABEL or NOTE. */
3424 gcc_assert (BB_HEAD (bb) != insn);
3426 if (BB_END (bb) == insn)
3427 /* If this is last instruction in BB, move end marker one
3430 /* Jumps are always placed at the end of basic block. */
3431 jump_p = control_flow_insn_p (insn);
3434 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
3435 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
3436 || (common_sched_info->sched_pass_id
3437 == SCHED_EBB_PASS));
3439 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
3441 BB_END (bb) = PREV_INSN (insn);
3444 gcc_assert (BB_END (bb) != last);
3447 /* We move the block note along with jump. */
3451 note = NEXT_INSN (insn);
3452 while (NOTE_NOT_BB_P (note) && note != nt)
3453 note = NEXT_INSN (note);
3457 || BARRIER_P (note)))
3458 note = NEXT_INSN (note);
3460 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3465 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
3466 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
3468 NEXT_INSN (note) = NEXT_INSN (last);
3469 PREV_INSN (NEXT_INSN (last)) = note;
3471 NEXT_INSN (last) = insn;
3472 PREV_INSN (insn) = last;
3474 bb = BLOCK_FOR_INSN (last);
3478 fix_jump_move (insn);
3480 if (BLOCK_FOR_INSN (insn) != bb)
3481 move_block_after_check (insn);
3483 gcc_assert (BB_END (bb) == last);
3486 df_insn_change_bb (insn, bb);
3488 /* Update BB_END, if needed. */
3489 if (BB_END (bb) == last)
3493 SCHED_GROUP_P (insn) = 0;
3496 /* Return true if scheduling INSN will finish current clock cycle. */
3498 insn_finishes_cycle_p (rtx insn)
3500 if (SCHED_GROUP_P (insn))
3501 /* After issuing INSN, rest of the sched_group will be forced to issue
3502 in order. Don't make any plans for the rest of cycle. */
3505 /* Finishing the block will, apparently, finish the cycle. */
3506 if (current_sched_info->insn_finishes_block_p
3507 && current_sched_info->insn_finishes_block_p (insn))
3513 /* Define type for target data used in multipass scheduling. */
3514 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
3515 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
3517 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
3519 /* The following structure describe an entry of the stack of choices. */
3522 /* Ordinal number of the issued insn in the ready queue. */
3524 /* The number of the rest insns whose issues we should try. */
3526 /* The number of issued essential insns. */
3528 /* State after issuing the insn. */
3530 /* Target-specific data. */
3531 first_cycle_multipass_data_t target_data;
3534 /* The following array is used to implement a stack of choices used in
3535 function max_issue. */
3536 static struct choice_entry *choice_stack;
3538 /* This holds the value of the target dfa_lookahead hook. */
3541 /* The following variable value is maximal number of tries of issuing
3542 insns for the first cycle multipass insn scheduling. We define
3543 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
3544 need this constraint if all real insns (with non-negative codes)
3545 had reservations because in this case the algorithm complexity is
3546 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
3547 might be incomplete and such insn might occur. For such
3548 descriptions, the complexity of algorithm (without the constraint)
3549 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
3550 static int max_lookahead_tries;
3552 /* The following value is value of hook
3553 `first_cycle_multipass_dfa_lookahead' at the last call of
3555 static int cached_first_cycle_multipass_dfa_lookahead = 0;
3557 /* The following value is value of `issue_rate' at the last call of
3559 static int cached_issue_rate = 0;
3561 /* The following function returns maximal (or close to maximal) number
3562 of insns which can be issued on the same cycle and one of which
3563 insns is insns with the best rank (the first insn in READY). To
3564 make this function tries different samples of ready insns. READY
3565 is current queue `ready'. Global array READY_TRY reflects what
3566 insns are already issued in this try. The function stops immediately,
3567 if it reached the such a solution, that all instruction can be issued.
3568 INDEX will contain index of the best insn in READY. The following
3569 function is used only for first cycle multipass scheduling.
3573 This function expects recognized insns only. All USEs,
3574 CLOBBERs, etc must be filtered elsewhere. */
3576 max_issue (struct ready_list *ready, int privileged_n, state_t state,
3577 bool first_cycle_insn_p, int *index)
3579 int n, i, all, n_ready, best, delay, tries_num;
3581 struct choice_entry *top;
3584 n_ready = ready->n_ready;
3585 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
3586 && privileged_n <= n_ready);
3588 /* Init MAX_LOOKAHEAD_TRIES. */
3589 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
3591 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
3592 max_lookahead_tries = 100;
3593 for (i = 0; i < issue_rate; i++)
3594 max_lookahead_tries *= dfa_lookahead;
3597 /* Init max_points. */
3598 more_issue = issue_rate - cycle_issued_insns;
3599 gcc_assert (more_issue >= 0);
3601 /* The number of the issued insns in the best solution. */
3606 /* Set initial state of the search. */
3607 memcpy (top->state, state, dfa_state_size);
3608 top->rest = dfa_lookahead;
3610 if (targetm.sched.first_cycle_multipass_begin)
3611 targetm.sched.first_cycle_multipass_begin (&top->target_data,
3613 first_cycle_insn_p);
3615 /* Count the number of the insns to search among. */
3616 for (all = i = 0; i < n_ready; i++)
3620 /* I is the index of the insn to try next. */
3625 if (/* If we've reached a dead end or searched enough of what we have
3628 /* or have nothing else to try... */
3630 /* or should not issue more. */
3631 || top->n >= more_issue)
3633 /* ??? (... || i == n_ready). */
3634 gcc_assert (i <= n_ready);
3636 /* We should not issue more than issue_rate instructions. */
3637 gcc_assert (top->n <= more_issue);
3639 if (top == choice_stack)
3642 if (best < top - choice_stack)
3647 /* Try to find issued privileged insn. */
3648 while (n && !ready_try[--n])
3652 if (/* If all insns are equally good... */
3654 /* Or a privileged insn will be issued. */
3656 /* Then we have a solution. */
3658 best = top - choice_stack;
3659 /* This is the index of the insn issued first in this
3661 *index = choice_stack [1].index;
3662 if (top->n == more_issue || best == all)
3667 /* Set ready-list index to point to the last insn
3668 ('i++' below will advance it to the next insn). */
3674 if (targetm.sched.first_cycle_multipass_backtrack)
3675 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
3676 ready_try, n_ready);
3679 memcpy (state, top->state, dfa_state_size);
3681 else if (!ready_try [i])
3684 if (tries_num > max_lookahead_tries)
3686 insn = ready_element (ready, i);
3687 delay = state_transition (state, insn);
3690 if (state_dead_lock_p (state)
3691 || insn_finishes_cycle_p (insn))
3692 /* We won't issue any more instructions in the next
3699 if (memcmp (top->state, state, dfa_state_size) != 0)
3702 /* Advance to the next choice_entry. */
3704 /* Initialize it. */
3705 top->rest = dfa_lookahead;
3708 memcpy (top->state, state, dfa_state_size);
3711 if (targetm.sched.first_cycle_multipass_issue)
3712 targetm.sched.first_cycle_multipass_issue (&top->target_data,
3722 /* Increase ready-list index. */
3726 if (targetm.sched.first_cycle_multipass_end)
3727 targetm.sched.first_cycle_multipass_end (best != 0
3728 ? &choice_stack[1].target_data
3731 /* Restore the original state of the DFA. */
3732 memcpy (state, choice_stack->state, dfa_state_size);
3737 /* The following function chooses insn from READY and modifies
3738 READY. The following function is used only for first
3739 cycle multipass scheduling.
3741 -1 if cycle should be advanced,
3742 0 if INSN_PTR is set to point to the desirable insn,
3743 1 if choose_ready () should be restarted without advancing the cycle. */
3745 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
3750 if (dbg_cnt (sched_insn) == false)
3752 rtx insn = nonscheduled_insns_begin;
3755 insn = next_nonnote_insn (insn);
3757 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
3759 if (QUEUE_INDEX (insn) == QUEUE_READY)
3760 /* INSN is in the ready_list. */
3762 nonscheduled_insns_begin = insn;
3763 ready_remove_insn (insn);
3768 /* INSN is in the queue. Advance cycle to move it to the ready list. */
3774 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3775 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3776 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
3777 || DEBUG_INSN_P (ready_element (ready, 0)))
3779 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3780 *insn_ptr = ready_remove_first_dispatch (ready);
3782 *insn_ptr = ready_remove_first (ready);
3788 /* Try to choose the better insn. */
3789 int index = 0, i, n;
3791 int try_data = 1, try_control = 1;
3794 insn = ready_element (ready, 0);
3795 if (INSN_CODE (insn) < 0)
3797 *insn_ptr = ready_remove_first (ready);
3802 && spec_info->flags & (PREFER_NON_DATA_SPEC
3803 | PREFER_NON_CONTROL_SPEC))
3805 for (i = 0, n = ready->n_ready; i < n; i++)
3810 x = ready_element (ready, i);
3813 if (spec_info->flags & PREFER_NON_DATA_SPEC
3814 && !(s & DATA_SPEC))
3817 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
3822 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
3823 && !(s & CONTROL_SPEC))
3826 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
3832 ts = TODO_SPEC (insn);
3833 if ((ts & SPECULATIVE)
3834 && (((!try_data && (ts & DATA_SPEC))
3835 || (!try_control && (ts & CONTROL_SPEC)))
3836 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
3838 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
3839 /* Discard speculative instruction that stands first in the ready
3842 change_queue_index (insn, 1);
3848 for (i = 1; i < ready->n_ready; i++)
3850 insn = ready_element (ready, i);
3853 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
3854 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
3857 /* Let the target filter the search space. */
3858 for (i = 1; i < ready->n_ready; i++)
3861 insn = ready_element (ready, i);
3863 /* If this insn is recognizable we should have already
3864 recognized it earlier.
3865 ??? Not very clear where this is supposed to be done.
3867 gcc_checking_assert (INSN_CODE (insn) >= 0
3868 || recog_memoized (insn) < 0);
3871 = (/* INSN_CODE check can be omitted here as it is also done later
3873 INSN_CODE (insn) < 0
3874 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
3875 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
3879 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
3881 *insn_ptr = ready_remove_first (ready);
3882 if (sched_verbose >= 4)
3883 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
3884 (*current_sched_info->print_insn) (*insn_ptr, 0));
3889 if (sched_verbose >= 4)
3890 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
3891 (*current_sched_info->print_insn)
3892 (ready_element (ready, index), 0));
3894 *insn_ptr = ready_remove (ready, index);
3900 /* This function is called when we have successfully scheduled a
3901 block. It uses the schedule stored in the scheduled_insns vector
3902 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
3903 append the scheduled insns; TAIL is the insn after the scheduled
3904 block. TARGET_BB is the argument passed to schedule_block. */
3907 commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
3912 last_scheduled_insn = prev_head;
3914 VEC_iterate (rtx, scheduled_insns, i, insn);
3917 if (control_flow_insn_p (last_scheduled_insn)
3918 || current_sched_info->advance_target_bb (*target_bb, insn))
3920 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
3926 x = next_real_insn (last_scheduled_insn);
3928 dump_new_block_header (1, *target_bb, x, tail);
3931 last_scheduled_insn = bb_note (*target_bb);
3934 if (current_sched_info->begin_move_insn)
3935 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
3936 move_insn (insn, last_scheduled_insn,
3937 current_sched_info->next_tail);
3938 if (!DEBUG_INSN_P (insn))
3939 reemit_notes (insn);
3940 last_scheduled_insn = insn;
3943 VEC_truncate (rtx, scheduled_insns, 0);
3946 /* Examine all insns on the ready list and queue those which can't be
3947 issued in this cycle. TEMP_STATE is temporary scheduler state we
3948 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
3949 have been issued for the current cycle, which means it is valid to
3950 issue an asm statement.
3952 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
3953 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
3954 we only leave insns which have an INSN_EXACT_TICK. */
3957 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
3958 bool shadows_only_p, bool modulo_epilogue_p)
3963 for (i = 0; i < ready.n_ready; i++)
3965 rtx insn = ready_element (&ready, i);
3967 const char *reason = "resource conflict";
3969 if (modulo_epilogue_p && !DEBUG_INSN_P (insn)
3970 && INSN_EXACT_TICK (insn) == INVALID_TICK)
3972 cost = max_insn_queue_index;
3973 reason = "not an epilogue insn";
3975 if (shadows_only_p && !DEBUG_INSN_P (insn) && !SHADOW_P (insn))
3978 reason = "not a shadow";
3980 else if (recog_memoized (insn) < 0)
3982 if (!first_cycle_insn_p
3983 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
3984 || asm_noperands (PATTERN (insn)) >= 0))
3988 else if (sched_pressure_p)
3996 struct delay_pair *delay_entry;
3998 = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
3999 htab_hash_pointer (insn));
4000 while (delay_entry && delay_cost == 0)
4002 delay_cost = estimate_shadow_tick (delay_entry);
4003 if (delay_cost > max_insn_queue_index)
4004 delay_cost = max_insn_queue_index;
4005 delay_entry = delay_entry->next_same_i1;
4009 memcpy (temp_state, curr_state, dfa_state_size);
4010 cost = state_transition (temp_state, insn);
4015 if (cost < delay_cost)
4018 reason = "shadow tick";
4023 ready_remove (&ready, i);
4024 queue_insn (insn, cost, reason);
4030 /* Called when we detect that the schedule is impossible. We examine the
4031 backtrack queue to find the earliest insn that caused this condition. */
4033 static struct haifa_saved_data *
4034 verify_shadows (void)
4036 struct haifa_saved_data *save, *earliest_fail = NULL;
4037 for (save = backtrack_queue; save; save = save->next)
4040 struct delay_pair *pair = save->delay_pair;
4043 for (; pair; pair = pair->next_same_i1)
4047 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
4050 t = INSN_TICK (i1) + pair_delay (pair);
4053 if (sched_verbose >= 2)
4054 fprintf (sched_dump,
4055 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
4057 INSN_UID (pair->i1), INSN_UID (pair->i2),
4058 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
4059 earliest_fail = save;
4062 if (QUEUE_INDEX (i2) >= 0)
4064 int queued_for = INSN_TICK (i2);
4068 if (sched_verbose >= 2)
4069 fprintf (sched_dump,
4070 ";;\t\tfailed delay requirements for %d/%d"
4071 " (%d->%d), queued too late\n",
4072 INSN_UID (pair->i1), INSN_UID (pair->i2),
4073 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
4074 earliest_fail = save;
4081 return earliest_fail;
4084 /* Use forward list scheduling to rearrange insns of block pointed to by
4085 TARGET_BB, possibly bringing insns from subsequent blocks in the same
4089 schedule_block (basic_block *target_bb)
4092 bool success = modulo_ii == 0;
4093 struct sched_block_state ls;
4094 state_t temp_state = NULL; /* It is used for multipass scheduling. */
4095 int sort_p, advance, start_clock_var;
4097 /* Head/tail info for this block. */
4098 rtx prev_head = current_sched_info->prev_head;
4099 rtx next_tail = current_sched_info->next_tail;
4100 rtx head = NEXT_INSN (prev_head);
4101 rtx tail = PREV_INSN (next_tail);
4103 /* We used to have code to avoid getting parameters moved from hard
4104 argument registers into pseudos.
4106 However, it was removed when it proved to be of marginal benefit
4107 and caused problems because schedule_block and compute_forward_dependences
4108 had different notions of what the "head" insn was. */
4110 gcc_assert (head != tail || INSN_P (head));
4112 haifa_recovery_bb_recently_added_p = false;
4114 backtrack_queue = NULL;
4118 dump_new_block_header (0, *target_bb, head, tail);
4120 state_reset (curr_state);
4122 /* Clear the ready list. */
4123 ready.first = ready.veclen - 1;
4127 /* It is used for first cycle multipass scheduling. */
4128 temp_state = alloca (dfa_state_size);
4130 if (targetm.sched.init)
4131 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
4133 /* We start inserting insns after PREV_HEAD. */
4134 last_scheduled_insn = nonscheduled_insns_begin = prev_head;
4135 last_nondebug_scheduled_insn = NULL_RTX;
4137 gcc_assert ((NOTE_P (last_scheduled_insn)
4138 || DEBUG_INSN_P (last_scheduled_insn))
4139 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
4141 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
4146 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
4147 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
4149 /* Start just before the beginning of time. */
4152 /* We need queue and ready lists and clock_var be initialized
4153 in try_ready () (which is called through init_ready_list ()). */
4154 (*current_sched_info->init_ready_list) ();
4156 /* The algorithm is O(n^2) in the number of ready insns at any given
4157 time in the worst case. Before reload we are more likely to have
4158 big lists so truncate them to a reasonable size. */
4159 if (!reload_completed
4160 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
4162 ready_sort (&ready);
4164 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
4165 If there are debug insns, we know they're first. */
4166 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
4167 if (!SCHED_GROUP_P (ready_element (&ready, i)))
4170 if (sched_verbose >= 2)
4172 fprintf (sched_dump,
4173 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
4174 fprintf (sched_dump,
4175 ";;\t\t before reload => truncated to %d insns\n", i);
4178 /* Delay all insns past it for 1 cycle. If debug counter is
4179 activated make an exception for the insn right after
4180 nonscheduled_insns_begin. */
4184 if (dbg_cnt (sched_insn) == false)
4185 skip_insn = next_nonnote_insn (nonscheduled_insns_begin);
4187 skip_insn = NULL_RTX;
4189 while (i < ready.n_ready)
4193 insn = ready_remove (&ready, i);
4195 if (insn != skip_insn)
4196 queue_insn (insn, 1, "list truncated");
4199 ready_add (&ready, skip_insn, true);
4203 /* Now we can restore basic block notes and maintain precise cfg. */
4204 restore_bb_notes (*target_bb);
4206 last_clock_var = -1;
4210 gcc_assert (VEC_length (rtx, scheduled_insns) == 0);
4212 must_backtrack = false;
4213 modulo_insns_scheduled = 0;
4215 ls.modulo_epilogue = false;
4217 /* Loop until all the insns in BB are scheduled. */
4218 while ((*current_sched_info->schedule_more_p) ())
4222 start_clock_var = clock_var;
4226 advance_one_cycle ();
4228 /* Add to the ready list all pending insns that can be issued now.
4229 If there are no ready insns, increment clock until one
4230 is ready and add all pending insns at that point to the ready
4232 queue_to_ready (&ready);
4234 gcc_assert (ready.n_ready);
4236 if (sched_verbose >= 2)
4238 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
4239 debug_ready_list (&ready);
4241 advance -= clock_var - start_clock_var;
4243 while (advance > 0);
4245 if (ls.modulo_epilogue)
4247 int stage = clock_var / modulo_ii;
4248 if (stage > modulo_last_stage * 2 + 2)
4250 if (sched_verbose >= 2)
4251 fprintf (sched_dump,
4252 ";;\t\tmodulo scheduled succeeded at II %d\n",
4258 else if (modulo_ii > 0)
4260 int stage = clock_var / modulo_ii;
4261 if (stage > modulo_max_stages)
4263 if (sched_verbose >= 2)
4264 fprintf (sched_dump,
4265 ";;\t\tfailing schedule due to excessive stages\n");
4268 if (modulo_n_insns == modulo_insns_scheduled
4269 && stage > modulo_last_stage)
4271 if (sched_verbose >= 2)
4272 fprintf (sched_dump,
4273 ";;\t\tfound kernel after %d stages, II %d\n",
4275 ls.modulo_epilogue = true;
4279 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
4280 if (ready.n_ready == 0)
4285 ls.first_cycle_insn_p = true;
4286 ls.shadows_only_p = false;
4287 cycle_issued_insns = 0;
4288 ls.can_issue_more = issue_rate;
4295 if (sort_p && ready.n_ready > 0)
4297 /* Sort the ready list based on priority. This must be
4298 done every iteration through the loop, as schedule_insn
4299 may have readied additional insns that will not be
4300 sorted correctly. */
4301 ready_sort (&ready);
4303 if (sched_verbose >= 2)
4305 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
4306 debug_ready_list (&ready);
4310 /* We don't want md sched reorder to even see debug isns, so put
4311 them out right away. */
4312 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
4313 && (*current_sched_info->schedule_more_p) ())
4315 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
4317 rtx insn = ready_remove_first (&ready);
4318 gcc_assert (DEBUG_INSN_P (insn));
4319 (*current_sched_info->begin_schedule_ready) (insn);
4320 VEC_safe_push (rtx, heap, scheduled_insns, insn);
4321 last_scheduled_insn = insn;
4322 advance = schedule_insn (insn);
4323 gcc_assert (advance == 0);
4324 if (ready.n_ready > 0)
4325 ready_sort (&ready);
4329 if (ls.first_cycle_insn_p && !ready.n_ready)
4332 resume_after_backtrack:
4333 /* Allow the target to reorder the list, typically for
4334 better instruction bundling. */
4336 && (ready.n_ready == 0
4337 || !SCHED_GROUP_P (ready_element (&ready, 0))))
4339 if (ls.first_cycle_insn_p && targetm.sched.reorder)
4341 = targetm.sched.reorder (sched_dump, sched_verbose,
4342 ready_lastpos (&ready),
4343 &ready.n_ready, clock_var);
4344 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
4346 = targetm.sched.reorder2 (sched_dump, sched_verbose,
4348 ? ready_lastpos (&ready) : NULL,
4349 &ready.n_ready, clock_var);
4352 restart_choose_ready:
4353 if (sched_verbose >= 2)
4355 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
4357 debug_ready_list (&ready);
4358 if (sched_pressure_p)
4359 print_curr_reg_pressure ();
4362 if (ready.n_ready == 0
4363 && ls.can_issue_more
4364 && reload_completed)
4366 /* Allow scheduling insns directly from the queue in case
4367 there's nothing better to do (ready list is empty) but
4368 there are still vacant dispatch slots in the current cycle. */
4369 if (sched_verbose >= 6)
4370 fprintf (sched_dump,";;\t\tSecond chance\n");
4371 memcpy (temp_state, curr_state, dfa_state_size);
4372 if (early_queue_to_ready (temp_state, &ready))
4373 ready_sort (&ready);
4376 if (ready.n_ready == 0
4377 || !ls.can_issue_more
4378 || state_dead_lock_p (curr_state)
4379 || !(*current_sched_info->schedule_more_p) ())
4382 /* Select and remove the insn from the ready list. */
4388 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
4394 goto restart_choose_ready;
4396 gcc_assert (insn != NULL_RTX);
4399 insn = ready_remove_first (&ready);
4401 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
4403 ready_add (&ready, insn, true);
4408 if (targetm.sched.dfa_new_cycle
4409 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
4410 insn, last_clock_var,
4411 clock_var, &sort_p))
4412 /* SORT_P is used by the target to override sorting
4413 of the ready list. This is needed when the target
4414 has modified its internal structures expecting that
4415 the insn will be issued next. As we need the insn
4416 to have the highest priority (so it will be returned by
4417 the ready_remove_first call above), we invoke
4418 ready_add (&ready, insn, true).
4419 But, still, there is one issue: INSN can be later
4420 discarded by scheduler's front end through
4421 current_sched_info->can_schedule_ready_p, hence, won't
4424 ready_add (&ready, insn, true);
4430 if (current_sched_info->can_schedule_ready_p
4431 && ! (*current_sched_info->can_schedule_ready_p) (insn))
4432 /* We normally get here only if we don't want to move
4433 insn from the split block. */
4435 TODO_SPEC (insn) = HARD_DEP;
4436 goto restart_choose_ready;
4441 /* If this insn is the first part of a delay-slot pair, record a
4443 struct delay_pair *delay_entry;
4445 = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
4446 htab_hash_pointer (insn));
4449 save_backtrack_point (delay_entry, ls);
4450 if (sched_verbose >= 2)
4451 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
4455 /* DECISION is made. */
4457 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
4459 modulo_insns_scheduled++;
4460 modulo_last_stage = clock_var / modulo_ii;
4462 if (TODO_SPEC (insn) & SPECULATIVE)
4463 generate_recovery_code (insn);
4465 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
4466 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
4468 /* Update counters, etc in the scheduler's front end. */
4469 (*current_sched_info->begin_schedule_ready) (insn);
4470 VEC_safe_push (rtx, heap, scheduled_insns, insn);
4471 gcc_assert (NONDEBUG_INSN_P (insn));
4472 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
4474 if (recog_memoized (insn) >= 0)
4476 memcpy (temp_state, curr_state, dfa_state_size);
4477 cost = state_transition (curr_state, insn);
4478 if (!sched_pressure_p)
4479 gcc_assert (cost < 0);
4480 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
4481 cycle_issued_insns++;
4485 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
4486 || asm_noperands (PATTERN (insn)) >= 0);
4488 if (targetm.sched.variable_issue)
4490 targetm.sched.variable_issue (sched_dump, sched_verbose,
4491 insn, ls.can_issue_more);
4492 /* A naked CLOBBER or USE generates no instruction, so do
4493 not count them against the issue rate. */
4494 else if (GET_CODE (PATTERN (insn)) != USE
4495 && GET_CODE (PATTERN (insn)) != CLOBBER)
4496 ls.can_issue_more--;
4497 advance = schedule_insn (insn);
4499 if (SHADOW_P (insn))
4500 ls.shadows_only_p = true;
4502 /* After issuing an asm insn we should start a new cycle. */
4503 if (advance == 0 && asm_p)
4512 ls.first_cycle_insn_p = false;
4513 if (ready.n_ready > 0)
4514 prune_ready_list (temp_state, false, ls.shadows_only_p,
4515 ls.modulo_epilogue);
4519 if (!must_backtrack)
4520 for (i = 0; i < ready.n_ready; i++)
4522 rtx insn = ready_element (&ready, i);
4523 if (INSN_EXACT_TICK (insn) == clock_var)
4525 must_backtrack = true;
4530 if (must_backtrack && modulo_ii > 0)
4532 if (modulo_backtracks_left == 0)
4534 modulo_backtracks_left--;
4536 while (must_backtrack)
4538 struct haifa_saved_data *failed;
4541 must_backtrack = false;
4542 failed = verify_shadows ();
4543 gcc_assert (failed);
4545 failed_insn = failed->delay_pair->i1;
4546 toggle_cancelled_flags (false);
4547 unschedule_insns_until (failed_insn);
4548 while (failed != backtrack_queue)
4549 free_topmost_backtrack_point (true);
4550 restore_last_backtrack_point (&ls);
4551 if (sched_verbose >= 2)
4552 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
4553 /* Delay by at least a cycle. This could cause additional
4555 queue_insn (failed_insn, 1, "backtracked");
4559 if (ready.n_ready > 0)
4560 goto resume_after_backtrack;
4563 if (clock_var == 0 && ls.first_cycle_insn_p)
4570 if (ls.modulo_epilogue)
4575 /* Once again, debug insn suckiness: they can be on the ready list
4576 even if they have unresolved dependencies. To make our view
4577 of the world consistent, remove such "ready" insns. */
4578 restart_debug_insn_loop:
4579 for (i = ready.n_ready - 1; i >= 0; i--)
4583 x = ready_element (&ready, i);
4584 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
4585 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
4587 ready_remove (&ready, i);
4588 goto restart_debug_insn_loop;
4591 for (i = ready.n_ready - 1; i >= 0; i--)
4595 x = ready_element (&ready, i);
4596 resolve_dependencies (x);
4598 for (i = 0; i <= max_insn_queue_index; i++)
4601 while ((link = insn_queue[i]) != NULL)
4603 rtx x = XEXP (link, 0);
4604 insn_queue[i] = XEXP (link, 1);
4605 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4606 free_INSN_LIST_node (link);
4607 resolve_dependencies (x);
4615 fprintf (sched_dump, ";;\tReady list (final): ");
4616 debug_ready_list (&ready);
4619 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
4620 /* Sanity check -- queue must be empty now. Meaningless if region has
4622 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
4623 else if (modulo_ii == 0)
4625 /* We must maintain QUEUE_INDEX between blocks in region. */
4626 for (i = ready.n_ready - 1; i >= 0; i--)
4630 x = ready_element (&ready, i);
4631 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4632 TODO_SPEC (x) = HARD_DEP;
4636 for (i = 0; i <= max_insn_queue_index; i++)
4639 for (link = insn_queue[i]; link; link = XEXP (link, 1))
4644 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4645 TODO_SPEC (x) = HARD_DEP;
4647 free_INSN_LIST_list (&insn_queue[i]);
4653 commit_schedule (prev_head, tail, target_bb);
4655 fprintf (sched_dump, ";; total time = %d\n", clock_var);
4658 last_scheduled_insn = tail;
4660 VEC_truncate (rtx, scheduled_insns, 0);
4662 if (!current_sched_info->queue_must_finish_empty
4663 || haifa_recovery_bb_recently_added_p)
4665 /* INSN_TICK (minimum clock tick at which the insn becomes
4666 ready) may be not correct for the insn in the subsequent
4667 blocks of the region. We should use a correct value of
4668 `clock_var' or modify INSN_TICK. It is better to keep
4669 clock_var value equal to 0 at the start of a basic block.
4670 Therefore we modify INSN_TICK here. */
4671 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
4674 if (targetm.sched.finish)
4676 targetm.sched.finish (sched_dump, sched_verbose);
4677 /* Target might have added some instructions to the scheduled block
4678 in its md_finish () hook. These new insns don't have any data
4679 initialized and to identify them we extend h_i_d so that they'll
4681 sched_extend_luids ();
4685 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
4686 INSN_UID (head), INSN_UID (tail));
4688 /* Update head/tail boundaries. */
4689 head = NEXT_INSN (prev_head);
4690 tail = last_scheduled_insn;
4692 head = restore_other_notes (head, NULL);
4694 current_sched_info->head = head;
4695 current_sched_info->tail = tail;
4697 free_backtrack_queue ();
4702 /* Set_priorities: compute priority of each insn in the block. */
4705 set_priorities (rtx head, rtx tail)
4709 int sched_max_insns_priority =
4710 current_sched_info->sched_max_insns_priority;
4713 if (head == tail && ! INSN_P (head))
4718 prev_head = PREV_INSN (head);
4719 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
4725 (void) priority (insn);
4727 gcc_assert (INSN_PRIORITY_KNOWN (insn));
4729 sched_max_insns_priority = MAX (sched_max_insns_priority,
4730 INSN_PRIORITY (insn));
4733 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
4738 /* Set dump and sched_verbose for the desired debugging output. If no
4739 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
4740 For -fsched-verbose=N, N>=10, print everything to stderr. */
4742 setup_sched_dump (void)
4744 sched_verbose = sched_verbose_param;
4745 if (sched_verbose_param == 0 && dump_file)
4747 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
4748 ? stderr : dump_file);
4751 /* Initialize some global state for the scheduler. This function works
4752 with the common data shared between all the schedulers. It is called
4753 from the scheduler specific initialization routine. */
4758 /* Disable speculative loads in their presence if cc0 defined. */
4760 flag_schedule_speculative_load = 0;
4763 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
4764 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
4766 sched_pressure_p = (flag_sched_pressure && ! reload_completed
4767 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
4769 if (sched_pressure_p)
4770 ira_setup_eliminable_regset ();
4772 /* Initialize SPEC_INFO. */
4773 if (targetm.sched.set_sched_flags)
4775 spec_info = &spec_info_var;
4776 targetm.sched.set_sched_flags (spec_info);
4778 if (spec_info->mask != 0)
4780 spec_info->data_weakness_cutoff =
4781 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
4782 spec_info->control_weakness_cutoff =
4783 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
4784 * REG_BR_PROB_BASE) / 100;
4787 /* So we won't read anything accidentally. */
4792 /* So we won't read anything accidentally. */
4795 /* Initialize issue_rate. */
4796 if (targetm.sched.issue_rate)
4797 issue_rate = targetm.sched.issue_rate ();
4801 if (cached_issue_rate != issue_rate)
4803 cached_issue_rate = issue_rate;
4804 /* To invalidate max_lookahead_tries: */
4805 cached_first_cycle_multipass_dfa_lookahead = 0;
4808 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
4809 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
4813 if (targetm.sched.init_dfa_pre_cycle_insn)
4814 targetm.sched.init_dfa_pre_cycle_insn ();
4816 if (targetm.sched.init_dfa_post_cycle_insn)
4817 targetm.sched.init_dfa_post_cycle_insn ();
4820 dfa_state_size = state_size ();
4822 init_alias_analysis ();
4825 df_set_flags (DF_LR_RUN_DCE);
4826 df_note_add_problem ();
4828 /* More problems needed for interloop dep calculation in SMS. */
4829 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
4831 df_rd_add_problem ();
4832 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
4837 /* Do not run DCE after reload, as this can kill nops inserted
4839 if (reload_completed)
4840 df_clear_flags (DF_LR_RUN_DCE);
4842 regstat_compute_calls_crossed ();
4844 if (targetm.sched.init_global)
4845 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
4847 if (sched_pressure_p)
4849 int i, max_regno = max_reg_num ();
4851 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
4852 sched_regno_pressure_class
4853 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
4854 for (i = 0; i < max_regno; i++)
4855 sched_regno_pressure_class[i]
4856 = (i < FIRST_PSEUDO_REGISTER
4857 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
4858 : ira_pressure_class_translate[reg_allocno_class (i)]);
4859 curr_reg_live = BITMAP_ALLOC (NULL);
4860 saved_reg_live = BITMAP_ALLOC (NULL);
4861 region_ref_regs = BITMAP_ALLOC (NULL);
4864 curr_state = xmalloc (dfa_state_size);
4867 static void haifa_init_only_bb (basic_block, basic_block);
4869 /* Initialize data structures specific to the Haifa scheduler. */
4871 haifa_sched_init (void)
4873 setup_sched_dump ();
4876 scheduled_insns = VEC_alloc (rtx, heap, 0);
4878 if (spec_info != NULL)
4880 sched_deps_info->use_deps_list = 1;
4881 sched_deps_info->generate_spec_deps = 1;
4884 /* Initialize luids, dependency caches, target and h_i_d for the
4887 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
4893 VEC_quick_push (basic_block, bbs, bb);
4894 sched_init_luids (bbs);
4895 sched_deps_init (true);
4896 sched_extend_target ();
4897 haifa_init_h_i_d (bbs);
4899 VEC_free (basic_block, heap, bbs);
4902 sched_init_only_bb = haifa_init_only_bb;
4903 sched_split_block = sched_split_block_1;
4904 sched_create_empty_bb = sched_create_empty_bb_1;
4905 haifa_recovery_bb_ever_added_p = false;
4907 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
4908 before_recovery = 0;
4914 /* Finish work with the data specific to the Haifa scheduler. */
4916 haifa_sched_finish (void)
4918 sched_create_empty_bb = NULL;
4919 sched_split_block = NULL;
4920 sched_init_only_bb = NULL;
4922 if (spec_info && spec_info->dump)
4924 char c = reload_completed ? 'a' : 'b';
4926 fprintf (spec_info->dump,
4927 ";; %s:\n", current_function_name ());
4929 fprintf (spec_info->dump,
4930 ";; Procedure %cr-begin-data-spec motions == %d\n",
4932 fprintf (spec_info->dump,
4933 ";; Procedure %cr-be-in-data-spec motions == %d\n",
4935 fprintf (spec_info->dump,
4936 ";; Procedure %cr-begin-control-spec motions == %d\n",
4937 c, nr_begin_control);
4938 fprintf (spec_info->dump,
4939 ";; Procedure %cr-be-in-control-spec motions == %d\n",
4940 c, nr_be_in_control);
4943 VEC_free (rtx, heap, scheduled_insns);
4945 /* Finalize h_i_d, dependency caches, and luids for the whole
4946 function. Target will be finalized in md_global_finish (). */
4947 sched_deps_finish ();
4948 sched_finish_luids ();
4949 current_sched_info = NULL;
4953 /* Free global data used during insn scheduling. This function works with
4954 the common data shared between the schedulers. */
4959 haifa_finish_h_i_d ();
4960 if (sched_pressure_p)
4962 free (sched_regno_pressure_class);
4963 BITMAP_FREE (region_ref_regs);
4964 BITMAP_FREE (saved_reg_live);
4965 BITMAP_FREE (curr_reg_live);
4969 if (targetm.sched.finish_global)
4970 targetm.sched.finish_global (sched_dump, sched_verbose);
4972 end_alias_analysis ();
4974 regstat_free_calls_crossed ();
4979 /* Free all delay_pair structures that were recorded. */
4981 free_delay_pairs (void)
4985 htab_empty (delay_htab);
4986 htab_empty (delay_htab_i2);
4990 /* Fix INSN_TICKs of the instructions in the current block as well as
4991 INSN_TICKs of their dependents.
4992 HEAD and TAIL are the begin and the end of the current scheduled block. */
4994 fix_inter_tick (rtx head, rtx tail)
4996 /* Set of instructions with corrected INSN_TICK. */
4997 bitmap_head processed;
4998 /* ??? It is doubtful if we should assume that cycle advance happens on
4999 basic block boundaries. Basically insns that are unconditionally ready
5000 on the start of the block are more preferable then those which have
5001 a one cycle dependency over insn from the previous block. */
5002 int next_clock = clock_var + 1;
5004 bitmap_initialize (&processed, 0);
5006 /* Iterates over scheduled instructions and fix their INSN_TICKs and
5007 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
5008 across different blocks. */
5009 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
5014 sd_iterator_def sd_it;
5017 tick = INSN_TICK (head);
5018 gcc_assert (tick >= MIN_TICK);
5020 /* Fix INSN_TICK of instruction from just scheduled block. */
5021 if (bitmap_set_bit (&processed, INSN_LUID (head)))
5025 if (tick < MIN_TICK)
5028 INSN_TICK (head) = tick;
5031 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
5035 next = DEP_CON (dep);
5036 tick = INSN_TICK (next);
5038 if (tick != INVALID_TICK
5039 /* If NEXT has its INSN_TICK calculated, fix it.
5040 If not - it will be properly calculated from
5041 scratch later in fix_tick_ready. */
5042 && bitmap_set_bit (&processed, INSN_LUID (next)))
5046 if (tick < MIN_TICK)
5049 if (tick > INTER_TICK (next))
5050 INTER_TICK (next) = tick;
5052 tick = INTER_TICK (next);
5054 INSN_TICK (next) = tick;
5059 bitmap_clear (&processed);
5062 /* Check if NEXT is ready to be added to the ready or queue list.
5063 If "yes", add it to the proper list.
5065 -1 - is not ready yet,
5066 0 - added to the ready list,
5067 0 < N - queued for N cycles. */
5069 try_ready (rtx next)
5071 ds_t old_ts, new_ts;
5073 old_ts = TODO_SPEC (next);
5075 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL))
5076 && ((old_ts & HARD_DEP)
5077 || (old_ts & SPECULATIVE)
5078 || (old_ts & DEP_CONTROL)));
5080 new_ts = recompute_todo_spec (next);
5082 if (new_ts & HARD_DEP)
5083 gcc_assert (new_ts == old_ts
5084 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
5085 else if (current_sched_info->new_ready)
5086 new_ts = current_sched_info->new_ready (next, new_ts);
5088 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
5089 have its original pattern or changed (speculative) one. This is due
5090 to changing ebb in region scheduling.
5091 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
5092 has speculative pattern.
5094 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
5095 control-speculative NEXT could have been discarded by sched-rgn.c
5096 (the same case as when discarded by can_schedule_ready_p ()). */
5098 if ((new_ts & SPECULATIVE)
5099 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
5100 need to change anything. */
5101 && new_ts != old_ts)
5106 gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
5108 res = haifa_speculate_insn (next, new_ts, &new_pat);
5113 /* It would be nice to change DEP_STATUS of all dependences,
5114 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
5115 so we won't reanalyze anything. */
5120 /* We follow the rule, that every speculative insn
5121 has non-null ORIG_PAT. */
5122 if (!ORIG_PAT (next))
5123 ORIG_PAT (next) = PATTERN (next);
5127 if (!ORIG_PAT (next))
5128 /* If we gonna to overwrite the original pattern of insn,
5130 ORIG_PAT (next) = PATTERN (next);
5132 res = haifa_change_pattern (next, new_pat);
5141 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
5142 either correct (new_ts & SPECULATIVE),
5143 or we simply don't care (new_ts & HARD_DEP). */
5145 gcc_assert (!ORIG_PAT (next)
5146 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
5148 TODO_SPEC (next) = new_ts;
5150 if (new_ts & HARD_DEP)
5152 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
5153 control-speculative NEXT could have been discarded by sched-rgn.c
5154 (the same case as when discarded by can_schedule_ready_p ()). */
5155 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
5157 change_queue_index (next, QUEUE_NOWHERE);
5161 else if (!(new_ts & BEGIN_SPEC)
5162 && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
5163 && !IS_SPECULATION_CHECK_P (next))
5164 /* We should change pattern of every previously speculative
5165 instruction - and we determine if NEXT was speculative by using
5166 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
5167 pat too, so skip them. */
5169 bool success = haifa_change_pattern (next, ORIG_PAT (next));
5170 gcc_assert (success);
5171 ORIG_PAT (next) = 0;
5174 if (sched_verbose >= 2)
5176 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
5177 (*current_sched_info->print_insn) (next, 0));
5179 if (spec_info && spec_info->dump)
5181 if (new_ts & BEGIN_DATA)
5182 fprintf (spec_info->dump, "; data-spec;");
5183 if (new_ts & BEGIN_CONTROL)
5184 fprintf (spec_info->dump, "; control-spec;");
5185 if (new_ts & BE_IN_CONTROL)
5186 fprintf (spec_info->dump, "; in-control-spec;");
5188 if (TODO_SPEC (next) & DEP_CONTROL)
5189 fprintf (sched_dump, " predicated");
5190 fprintf (sched_dump, "\n");
5193 adjust_priority (next);
5195 return fix_tick_ready (next);
5198 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
5200 fix_tick_ready (rtx next)
5204 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
5207 sd_iterator_def sd_it;
5210 tick = INSN_TICK (next);
5211 /* if tick is not equal to INVALID_TICK, then update
5212 INSN_TICK of NEXT with the most recent resolved dependence
5213 cost. Otherwise, recalculate from scratch. */
5214 full_p = (tick == INVALID_TICK);
5216 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
5218 rtx pro = DEP_PRO (dep);
5221 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
5223 tick1 = INSN_TICK (pro) + dep_cost (dep);
5234 INSN_TICK (next) = tick;
5236 delay = tick - clock_var;
5237 if (delay <= 0 || sched_pressure_p)
5238 delay = QUEUE_READY;
5240 change_queue_index (next, delay);
5245 /* Move NEXT to the proper queue list with (DELAY >= 1),
5246 or add it to the ready list (DELAY == QUEUE_READY),
5247 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
5249 change_queue_index (rtx next, int delay)
5251 int i = QUEUE_INDEX (next);
5253 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
5255 gcc_assert (i != QUEUE_SCHEDULED);
5257 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
5258 || (delay < 0 && delay == i))
5259 /* We have nothing to do. */
5262 /* Remove NEXT from wherever it is now. */
5263 if (i == QUEUE_READY)
5264 ready_remove_insn (next);
5266 queue_remove (next);
5268 /* Add it to the proper place. */
5269 if (delay == QUEUE_READY)
5270 ready_add (readyp, next, false);
5271 else if (delay >= 1)
5272 queue_insn (next, delay, "change queue index");
5274 if (sched_verbose >= 2)
5276 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
5277 (*current_sched_info->print_insn) (next, 0));
5279 if (delay == QUEUE_READY)
5280 fprintf (sched_dump, " into ready\n");
5281 else if (delay >= 1)
5282 fprintf (sched_dump, " into queue with cost=%d\n", delay);
5284 fprintf (sched_dump, " removed from ready or queue lists\n");
5288 static int sched_ready_n_insns = -1;
5290 /* Initialize per region data structures. */
5292 sched_extend_ready_list (int new_sched_ready_n_insns)
5296 if (sched_ready_n_insns == -1)
5297 /* At the first call we need to initialize one more choice_stack
5301 sched_ready_n_insns = 0;
5302 VEC_reserve (rtx, heap, scheduled_insns, new_sched_ready_n_insns);
5305 i = sched_ready_n_insns + 1;
5307 ready.veclen = new_sched_ready_n_insns + issue_rate;
5308 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
5310 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
5312 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
5313 sched_ready_n_insns, sizeof (*ready_try));
5315 /* We allocate +1 element to save initial state in the choice_stack[0]
5317 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
5318 new_sched_ready_n_insns + 1);
5320 for (; i <= new_sched_ready_n_insns; i++)
5322 choice_stack[i].state = xmalloc (dfa_state_size);
5324 if (targetm.sched.first_cycle_multipass_init)
5325 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
5329 sched_ready_n_insns = new_sched_ready_n_insns;
5332 /* Free per region data structures. */
5334 sched_finish_ready_list (void)
5345 for (i = 0; i <= sched_ready_n_insns; i++)
5347 if (targetm.sched.first_cycle_multipass_fini)
5348 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
5351 free (choice_stack [i].state);
5353 free (choice_stack);
5354 choice_stack = NULL;
5356 sched_ready_n_insns = -1;
5360 haifa_luid_for_non_insn (rtx x)
5362 gcc_assert (NOTE_P (x) || LABEL_P (x));
5367 /* Generates recovery code for INSN. */
5369 generate_recovery_code (rtx insn)
5371 if (TODO_SPEC (insn) & BEGIN_SPEC)
5372 begin_speculative_block (insn);
5374 /* Here we have insn with no dependencies to
5375 instructions other then CHECK_SPEC ones. */
5377 if (TODO_SPEC (insn) & BE_IN_SPEC)
5378 add_to_speculative_block (insn);
5382 Tries to add speculative dependencies of type FS between instructions
5383 in deps_list L and TWIN. */
5385 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
5387 sd_iterator_def sd_it;
5390 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
5395 consumer = DEP_CON (dep);
5397 ds = DEP_STATUS (dep);
5399 if (/* If we want to create speculative dep. */
5401 /* And we can do that because this is a true dep. */
5402 && (ds & DEP_TYPES) == DEP_TRUE)
5404 gcc_assert (!(ds & BE_IN_SPEC));
5406 if (/* If this dep can be overcome with 'begin speculation'. */
5408 /* Then we have a choice: keep the dep 'begin speculative'
5409 or transform it into 'be in speculative'. */
5411 if (/* In try_ready we assert that if insn once became ready
5412 it can be removed from the ready (or queue) list only
5413 due to backend decision. Hence we can't let the
5414 probability of the speculative dep to decrease. */
5415 ds_weak (ds) <= ds_weak (fs))
5419 new_ds = (ds & ~BEGIN_SPEC) | fs;
5421 if (/* consumer can 'be in speculative'. */
5422 sched_insn_is_legitimate_for_speculation_p (consumer,
5424 /* Transform it to be in speculative. */
5429 /* Mark the dep as 'be in speculative'. */
5434 dep_def _new_dep, *new_dep = &_new_dep;
5436 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
5437 sd_add_dep (new_dep, false);
5442 /* Generates recovery code for BEGIN speculative INSN. */
5444 begin_speculative_block (rtx insn)
5446 if (TODO_SPEC (insn) & BEGIN_DATA)
5448 if (TODO_SPEC (insn) & BEGIN_CONTROL)
5451 create_check_block_twin (insn, false);
5453 TODO_SPEC (insn) &= ~BEGIN_SPEC;
5456 static void haifa_init_insn (rtx);
5458 /* Generates recovery code for BE_IN speculative INSN. */
5460 add_to_speculative_block (rtx insn)
5463 sd_iterator_def sd_it;
5466 rtx_vec_t priorities_roots;
5468 ts = TODO_SPEC (insn);
5469 gcc_assert (!(ts & ~BE_IN_SPEC));
5471 if (ts & BE_IN_DATA)
5473 if (ts & BE_IN_CONTROL)
5476 TODO_SPEC (insn) &= ~BE_IN_SPEC;
5477 gcc_assert (!TODO_SPEC (insn));
5479 DONE_SPEC (insn) |= ts;
5481 /* First we convert all simple checks to branchy. */
5482 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5483 sd_iterator_cond (&sd_it, &dep);)
5485 rtx check = DEP_PRO (dep);
5487 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
5489 create_check_block_twin (check, true);
5491 /* Restart search. */
5492 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5495 /* Continue search. */
5496 sd_iterator_next (&sd_it);
5499 priorities_roots = NULL;
5500 clear_priorities (insn, &priorities_roots);
5507 /* Get the first backward dependency of INSN. */
5508 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5509 if (!sd_iterator_cond (&sd_it, &dep))
5510 /* INSN has no backward dependencies left. */
5513 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
5514 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
5515 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
5517 check = DEP_PRO (dep);
5519 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
5520 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
5522 rec = BLOCK_FOR_INSN (check);
5524 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
5525 haifa_init_insn (twin);
5527 sd_copy_back_deps (twin, insn, true);
5529 if (sched_verbose && spec_info->dump)
5530 /* INSN_BB (insn) isn't determined for twin insns yet.
5531 So we can't use current_sched_info->print_insn. */
5532 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
5533 INSN_UID (twin), rec->index);
5535 twins = alloc_INSN_LIST (twin, twins);
5537 /* Add dependences between TWIN and all appropriate
5538 instructions from REC. */
5539 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
5541 rtx pro = DEP_PRO (dep);
5543 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
5545 /* INSN might have dependencies from the instructions from
5546 several recovery blocks. At this iteration we process those
5547 producers that reside in REC. */
5548 if (BLOCK_FOR_INSN (pro) == rec)
5550 dep_def _new_dep, *new_dep = &_new_dep;
5552 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
5553 sd_add_dep (new_dep, false);
5557 process_insn_forw_deps_be_in_spec (insn, twin, ts);
5559 /* Remove all dependencies between INSN and insns in REC. */
5560 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5561 sd_iterator_cond (&sd_it, &dep);)
5563 rtx pro = DEP_PRO (dep);
5565 if (BLOCK_FOR_INSN (pro) == rec)
5566 sd_delete_dep (sd_it);
5568 sd_iterator_next (&sd_it);
5572 /* We couldn't have added the dependencies between INSN and TWINS earlier
5573 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
5578 twin = XEXP (twins, 0);
5581 dep_def _new_dep, *new_dep = &_new_dep;
5583 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
5584 sd_add_dep (new_dep, false);
5587 twin = XEXP (twins, 1);
5588 free_INSN_LIST_node (twins);
5592 calc_priorities (priorities_roots);
5593 VEC_free (rtx, heap, priorities_roots);
5596 /* Extends and fills with zeros (only the new part) array pointed to by P. */
5598 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
5600 gcc_assert (new_nmemb >= old_nmemb);
5601 p = XRESIZEVAR (void, p, new_nmemb * size);
5602 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
5607 Find fallthru edge from PRED. */
5609 find_fallthru_edge_from (basic_block pred)
5614 succ = pred->next_bb;
5615 gcc_assert (succ->prev_bb == pred);
5617 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
5619 e = find_fallthru_edge (pred->succs);
5623 gcc_assert (e->dest == succ);
5629 e = find_fallthru_edge (succ->preds);
5633 gcc_assert (e->src == pred);
5641 /* Extend per basic block data structures. */
5643 sched_extend_bb (void)
5647 /* The following is done to keep current_sched_info->next_tail non null. */
5648 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
5649 if (NEXT_INSN (insn) == 0
5652 /* Don't emit a NOTE if it would end up before a BARRIER. */
5653 && !BARRIER_P (NEXT_INSN (insn))))
5655 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
5656 /* Make insn appear outside BB. */
5657 set_block_for_insn (note, NULL);
5658 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
5662 /* Init per basic block data structures. */
5664 sched_init_bbs (void)
5669 /* Initialize BEFORE_RECOVERY variable. */
5671 init_before_recovery (basic_block *before_recovery_ptr)
5676 last = EXIT_BLOCK_PTR->prev_bb;
5677 e = find_fallthru_edge_from (last);
5681 /* We create two basic blocks:
5682 1. Single instruction block is inserted right after E->SRC
5684 2. Empty block right before EXIT_BLOCK.
5685 Between these two blocks recovery blocks will be emitted. */
5687 basic_block single, empty;
5690 /* If the fallthrough edge to exit we've found is from the block we've
5691 created before, don't do anything more. */
5692 if (last == after_recovery)
5695 adding_bb_to_current_region_p = false;
5697 single = sched_create_empty_bb (last);
5698 empty = sched_create_empty_bb (single);
5700 /* Add new blocks to the root loop. */
5701 if (current_loops != NULL)
5703 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
5704 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
5707 single->count = last->count;
5708 empty->count = last->count;
5709 single->frequency = last->frequency;
5710 empty->frequency = last->frequency;
5711 BB_COPY_PARTITION (single, last);
5712 BB_COPY_PARTITION (empty, last);
5714 redirect_edge_succ (e, single);
5715 make_single_succ_edge (single, empty, 0);
5716 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
5717 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
5719 label = block_label (empty);
5720 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
5721 JUMP_LABEL (x) = label;
5722 LABEL_NUSES (label)++;
5723 haifa_init_insn (x);
5725 emit_barrier_after (x);
5727 sched_init_only_bb (empty, NULL);
5728 sched_init_only_bb (single, NULL);
5731 adding_bb_to_current_region_p = true;
5732 before_recovery = single;
5733 after_recovery = empty;
5735 if (before_recovery_ptr)
5736 *before_recovery_ptr = before_recovery;
5738 if (sched_verbose >= 2 && spec_info->dump)
5739 fprintf (spec_info->dump,
5740 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
5741 last->index, single->index, empty->index);
5744 before_recovery = last;
5747 /* Returns new recovery block. */
5749 sched_create_recovery_block (basic_block *before_recovery_ptr)
5755 haifa_recovery_bb_recently_added_p = true;
5756 haifa_recovery_bb_ever_added_p = true;
5758 init_before_recovery (before_recovery_ptr);
5760 barrier = get_last_bb_insn (before_recovery);
5761 gcc_assert (BARRIER_P (barrier));
5763 label = emit_label_after (gen_label_rtx (), barrier);
5765 rec = create_basic_block (label, label, before_recovery);
5767 /* A recovery block always ends with an unconditional jump. */
5768 emit_barrier_after (BB_END (rec));
5770 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
5771 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
5773 if (sched_verbose && spec_info->dump)
5774 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
5780 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
5781 and emit necessary jumps. */
5783 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
5784 basic_block second_bb)
5790 /* This is fixing of incoming edge. */
5791 /* ??? Which other flags should be specified? */
5792 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
5793 /* Partition type is the same, if it is "unpartitioned". */
5794 edge_flags = EDGE_CROSSING;
5798 make_edge (first_bb, rec, edge_flags);
5799 label = block_label (second_bb);
5800 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
5801 JUMP_LABEL (jump) = label;
5802 LABEL_NUSES (label)++;
5804 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
5805 /* Partition type is the same, if it is "unpartitioned". */
5807 /* Rewritten from cfgrtl.c. */
5808 if (flag_reorder_blocks_and_partition
5809 && targetm_common.have_named_sections)
5811 /* We don't need the same note for the check because
5812 any_condjump_p (check) == true. */
5813 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
5815 edge_flags = EDGE_CROSSING;
5820 make_single_succ_edge (rec, second_bb, edge_flags);
5821 if (dom_info_available_p (CDI_DOMINATORS))
5822 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
5825 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
5826 INSN is a simple check, that should be converted to branchy one. */
5828 create_check_block_twin (rtx insn, bool mutate_p)
5831 rtx label, check, twin;
5833 sd_iterator_def sd_it;
5835 dep_def _new_dep, *new_dep = &_new_dep;
5838 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
5841 todo_spec = TODO_SPEC (insn);
5844 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
5845 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
5847 todo_spec = CHECK_SPEC (insn);
5850 todo_spec &= SPECULATIVE;
5852 /* Create recovery block. */
5853 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
5855 rec = sched_create_recovery_block (NULL);
5856 label = BB_HEAD (rec);
5860 rec = EXIT_BLOCK_PTR;
5865 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
5867 if (rec != EXIT_BLOCK_PTR)
5869 /* To have mem_reg alive at the beginning of second_bb,
5870 we emit check BEFORE insn, so insn after splitting
5871 insn will be at the beginning of second_bb, which will
5872 provide us with the correct life information. */
5873 check = emit_jump_insn_before (check, insn);
5874 JUMP_LABEL (check) = label;
5875 LABEL_NUSES (label)++;
5878 check = emit_insn_before (check, insn);
5880 /* Extend data structures. */
5881 haifa_init_insn (check);
5883 /* CHECK is being added to current region. Extend ready list. */
5884 gcc_assert (sched_ready_n_insns != -1);
5885 sched_extend_ready_list (sched_ready_n_insns + 1);
5887 if (current_sched_info->add_remove_insn)
5888 current_sched_info->add_remove_insn (insn, 0);
5890 RECOVERY_BLOCK (check) = rec;
5892 if (sched_verbose && spec_info->dump)
5893 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
5894 (*current_sched_info->print_insn) (check, 0));
5896 gcc_assert (ORIG_PAT (insn));
5898 /* Initialize TWIN (twin is a duplicate of original instruction
5899 in the recovery block). */
5900 if (rec != EXIT_BLOCK_PTR)
5902 sd_iterator_def sd_it;
5905 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
5906 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
5908 struct _dep _dep2, *dep2 = &_dep2;
5910 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
5912 sd_add_dep (dep2, true);
5915 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
5916 haifa_init_insn (twin);
5918 if (sched_verbose && spec_info->dump)
5919 /* INSN_BB (insn) isn't determined for twin insns yet.
5920 So we can't use current_sched_info->print_insn. */
5921 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
5922 INSN_UID (twin), rec->index);
5926 ORIG_PAT (check) = ORIG_PAT (insn);
5927 HAS_INTERNAL_DEP (check) = 1;
5929 /* ??? We probably should change all OUTPUT dependencies to
5933 /* Copy all resolved back dependencies of INSN to TWIN. This will
5934 provide correct value for INSN_TICK (TWIN). */
5935 sd_copy_back_deps (twin, insn, true);
5937 if (rec != EXIT_BLOCK_PTR)
5938 /* In case of branchy check, fix CFG. */
5940 basic_block first_bb, second_bb;
5943 first_bb = BLOCK_FOR_INSN (check);
5944 second_bb = sched_split_block (first_bb, check);
5946 sched_create_recovery_edges (first_bb, rec, second_bb);
5948 sched_init_only_bb (second_bb, first_bb);
5949 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
5951 jump = BB_END (rec);
5952 haifa_init_insn (jump);
5955 /* Move backward dependences from INSN to CHECK and
5956 move forward dependences from INSN to TWIN. */
5958 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
5959 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5961 rtx pro = DEP_PRO (dep);
5964 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
5965 check --TRUE--> producer ??? or ANTI ???
5966 twin --TRUE--> producer
5967 twin --ANTI--> check
5969 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
5970 check --ANTI--> producer
5971 twin --ANTI--> producer
5972 twin --ANTI--> check
5974 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
5975 check ~~TRUE~~> producer
5976 twin ~~TRUE~~> producer
5977 twin --ANTI--> check */
5979 ds = DEP_STATUS (dep);
5981 if (ds & BEGIN_SPEC)
5983 gcc_assert (!mutate_p);
5987 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
5988 sd_add_dep (new_dep, false);
5990 if (rec != EXIT_BLOCK_PTR)
5992 DEP_CON (new_dep) = twin;
5993 sd_add_dep (new_dep, false);
5997 /* Second, remove backward dependencies of INSN. */
5998 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5999 sd_iterator_cond (&sd_it, &dep);)
6001 if ((DEP_STATUS (dep) & BEGIN_SPEC)
6003 /* We can delete this dep because we overcome it with
6004 BEGIN_SPECULATION. */
6005 sd_delete_dep (sd_it);
6007 sd_iterator_next (&sd_it);
6010 /* Future Speculations. Determine what BE_IN speculations will be like. */
6013 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
6016 gcc_assert (!DONE_SPEC (insn));
6020 ds_t ts = TODO_SPEC (insn);
6022 DONE_SPEC (insn) = ts & BEGIN_SPEC;
6023 CHECK_SPEC (check) = ts & BEGIN_SPEC;
6025 /* Luckiness of future speculations solely depends upon initial
6026 BEGIN speculation. */
6027 if (ts & BEGIN_DATA)
6028 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
6029 if (ts & BEGIN_CONTROL)
6030 fs = set_dep_weak (fs, BE_IN_CONTROL,
6031 get_dep_weak (ts, BEGIN_CONTROL));
6034 CHECK_SPEC (check) = CHECK_SPEC (insn);
6036 /* Future speculations: call the helper. */
6037 process_insn_forw_deps_be_in_spec (insn, twin, fs);
6039 if (rec != EXIT_BLOCK_PTR)
6041 /* Which types of dependencies should we use here is,
6042 generally, machine-dependent question... But, for now,
6047 init_dep (new_dep, insn, check, REG_DEP_TRUE);
6048 sd_add_dep (new_dep, false);
6050 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
6051 sd_add_dep (new_dep, false);
6055 if (spec_info->dump)
6056 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
6057 (*current_sched_info->print_insn) (insn, 0));
6059 /* Remove all dependencies of the INSN. */
6061 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
6063 | SD_LIST_RES_BACK));
6064 while (sd_iterator_cond (&sd_it, &dep))
6065 sd_delete_dep (sd_it);
6068 /* If former check (INSN) already was moved to the ready (or queue)
6069 list, add new check (CHECK) there too. */
6070 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
6073 /* Remove old check from instruction stream and free its
6075 sched_remove_insn (insn);
6078 init_dep (new_dep, check, twin, REG_DEP_ANTI);
6079 sd_add_dep (new_dep, false);
6083 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
6084 sd_add_dep (new_dep, false);
6088 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
6089 because it'll be done later in add_to_speculative_block. */
6091 rtx_vec_t priorities_roots = NULL;
6093 clear_priorities (twin, &priorities_roots);
6094 calc_priorities (priorities_roots);
6095 VEC_free (rtx, heap, priorities_roots);
6099 /* Removes dependency between instructions in the recovery block REC
6100 and usual region instructions. It keeps inner dependences so it
6101 won't be necessary to recompute them. */
6103 fix_recovery_deps (basic_block rec)
6105 rtx note, insn, jump, ready_list = 0;
6106 bitmap_head in_ready;
6109 bitmap_initialize (&in_ready, 0);
6111 /* NOTE - a basic block note. */
6112 note = NEXT_INSN (BB_HEAD (rec));
6113 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6114 insn = BB_END (rec);
6115 gcc_assert (JUMP_P (insn));
6116 insn = PREV_INSN (insn);
6120 sd_iterator_def sd_it;
6123 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
6124 sd_iterator_cond (&sd_it, &dep);)
6126 rtx consumer = DEP_CON (dep);
6128 if (BLOCK_FOR_INSN (consumer) != rec)
6130 sd_delete_dep (sd_it);
6132 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
6133 ready_list = alloc_INSN_LIST (consumer, ready_list);
6137 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
6139 sd_iterator_next (&sd_it);
6143 insn = PREV_INSN (insn);
6145 while (insn != note);
6147 bitmap_clear (&in_ready);
6149 /* Try to add instructions to the ready or queue list. */
6150 for (link = ready_list; link; link = XEXP (link, 1))
6151 try_ready (XEXP (link, 0));
6152 free_INSN_LIST_list (&ready_list);
6154 /* Fixing jump's dependences. */
6155 insn = BB_HEAD (rec);
6156 jump = BB_END (rec);
6158 gcc_assert (LABEL_P (insn));
6159 insn = NEXT_INSN (insn);
6161 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
6162 add_jump_dependencies (insn, jump);
6165 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
6166 instruction data. */
6168 haifa_change_pattern (rtx insn, rtx new_pat)
6170 sd_iterator_def sd_it;
6174 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
6177 dfa_clear_single_insn_cache (insn);
6179 sd_it = sd_iterator_start (insn,
6180 SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
6181 while (sd_iterator_cond (&sd_it, &dep))
6183 DEP_COST (dep) = UNKNOWN_DEP_COST;
6184 sd_iterator_next (&sd_it);
6187 /* Invalidate INSN_COST, so it'll be recalculated. */
6188 INSN_COST (insn) = -1;
6189 /* Invalidate INSN_TICK, so it'll be recalculated. */
6190 INSN_TICK (insn) = INVALID_TICK;
6194 /* -1 - can't speculate,
6195 0 - for speculation with REQUEST mode it is OK to use
6196 current instruction pattern,
6197 1 - need to change pattern for *NEW_PAT to be speculative. */
6199 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
6201 gcc_assert (current_sched_info->flags & DO_SPECULATION
6202 && (request & SPECULATIVE)
6203 && sched_insn_is_legitimate_for_speculation_p (insn, request));
6205 if ((request & spec_info->mask) != request)
6208 if (request & BE_IN_SPEC
6209 && !(request & BEGIN_SPEC))
6212 return targetm.sched.speculate_insn (insn, request, new_pat);
6216 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
6218 gcc_assert (sched_deps_info->generate_spec_deps
6219 && !IS_SPECULATION_CHECK_P (insn));
6221 if (HAS_INTERNAL_DEP (insn)
6222 || SCHED_GROUP_P (insn))
6225 return sched_speculate_insn (insn, request, new_pat);
6228 /* Print some information about block BB, which starts with HEAD and
6229 ends with TAIL, before scheduling it.
6230 I is zero, if scheduler is about to start with the fresh ebb. */
6232 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
6235 fprintf (sched_dump,
6236 ";; ======================================================\n");
6238 fprintf (sched_dump,
6239 ";; =====================ADVANCING TO=====================\n");
6240 fprintf (sched_dump,
6241 ";; -- basic block %d from %d to %d -- %s reload\n",
6242 bb->index, INSN_UID (head), INSN_UID (tail),
6243 (reload_completed ? "after" : "before"));
6244 fprintf (sched_dump,
6245 ";; ======================================================\n");
6246 fprintf (sched_dump, "\n");
6249 /* Unlink basic block notes and labels and saves them, so they
6250 can be easily restored. We unlink basic block notes in EBB to
6251 provide back-compatibility with the previous code, as target backends
6252 assume, that there'll be only instructions between
6253 current_sched_info->{head and tail}. We restore these notes as soon
6255 FIRST (LAST) is the first (last) basic block in the ebb.
6256 NB: In usual case (FIRST == LAST) nothing is really done. */
6258 unlink_bb_notes (basic_block first, basic_block last)
6260 /* We DON'T unlink basic block notes of the first block in the ebb. */
6264 bb_header = XNEWVEC (rtx, last_basic_block);
6266 /* Make a sentinel. */
6267 if (last->next_bb != EXIT_BLOCK_PTR)
6268 bb_header[last->next_bb->index] = 0;
6270 first = first->next_bb;
6273 rtx prev, label, note, next;
6275 label = BB_HEAD (last);
6276 if (LABEL_P (label))
6277 note = NEXT_INSN (label);
6280 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6282 prev = PREV_INSN (label);
6283 next = NEXT_INSN (note);
6284 gcc_assert (prev && next);
6286 NEXT_INSN (prev) = next;
6287 PREV_INSN (next) = prev;
6289 bb_header[last->index] = label;
6294 last = last->prev_bb;
6299 /* Restore basic block notes.
6300 FIRST is the first basic block in the ebb. */
6302 restore_bb_notes (basic_block first)
6307 /* We DON'T unlink basic block notes of the first block in the ebb. */
6308 first = first->next_bb;
6309 /* Remember: FIRST is actually a second basic block in the ebb. */
6311 while (first != EXIT_BLOCK_PTR
6312 && bb_header[first->index])
6314 rtx prev, label, note, next;
6316 label = bb_header[first->index];
6317 prev = PREV_INSN (label);
6318 next = NEXT_INSN (prev);
6320 if (LABEL_P (label))
6321 note = NEXT_INSN (label);
6324 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6326 bb_header[first->index] = 0;
6328 NEXT_INSN (prev) = label;
6329 NEXT_INSN (note) = next;
6330 PREV_INSN (next) = note;
6332 first = first->next_bb;
6340 Fix CFG after both in- and inter-block movement of
6341 control_flow_insn_p JUMP. */
6343 fix_jump_move (rtx jump)
6345 basic_block bb, jump_bb, jump_bb_next;
6347 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
6348 jump_bb = BLOCK_FOR_INSN (jump);
6349 jump_bb_next = jump_bb->next_bb;
6351 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
6352 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
6354 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
6355 /* if jump_bb_next is not empty. */
6356 BB_END (jump_bb) = BB_END (jump_bb_next);
6358 if (BB_END (bb) != PREV_INSN (jump))
6359 /* Then there are instruction after jump that should be placed
6361 BB_END (jump_bb_next) = BB_END (bb);
6363 /* Otherwise jump_bb_next is empty. */
6364 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
6366 /* To make assertion in move_insn happy. */
6367 BB_END (bb) = PREV_INSN (jump);
6369 update_bb_for_insn (jump_bb_next);
6372 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
6374 move_block_after_check (rtx jump)
6376 basic_block bb, jump_bb, jump_bb_next;
6379 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
6380 jump_bb = BLOCK_FOR_INSN (jump);
6381 jump_bb_next = jump_bb->next_bb;
6383 update_bb_for_insn (jump_bb);
6385 gcc_assert (IS_SPECULATION_CHECK_P (jump)
6386 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
6388 unlink_block (jump_bb_next);
6389 link_block (jump_bb_next, bb);
6393 move_succs (&(jump_bb->succs), bb);
6394 move_succs (&(jump_bb_next->succs), jump_bb);
6395 move_succs (&t, jump_bb_next);
6397 df_mark_solutions_dirty ();
6399 common_sched_info->fix_recovery_cfg
6400 (bb->index, jump_bb->index, jump_bb_next->index);
6403 /* Helper function for move_block_after_check.
6404 This functions attaches edge vector pointed to by SUCCSP to
6407 move_succs (VEC(edge,gc) **succsp, basic_block to)
6412 gcc_assert (to->succs == 0);
6414 to->succs = *succsp;
6416 FOR_EACH_EDGE (e, ei, to->succs)
6422 /* Remove INSN from the instruction stream.
6423 INSN should have any dependencies. */
6425 sched_remove_insn (rtx insn)
6427 sd_finish_insn (insn);
6429 change_queue_index (insn, QUEUE_NOWHERE);
6430 current_sched_info->add_remove_insn (insn, 1);
6434 /* Clear priorities of all instructions, that are forward dependent on INSN.
6435 Store in vector pointed to by ROOTS_PTR insns on which priority () should
6436 be invoked to initialize all cleared priorities. */
6438 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
6440 sd_iterator_def sd_it;
6442 bool insn_is_root_p = true;
6444 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
6446 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6448 rtx pro = DEP_PRO (dep);
6450 if (INSN_PRIORITY_STATUS (pro) >= 0
6451 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
6453 /* If DEP doesn't contribute to priority then INSN itself should
6454 be added to priority roots. */
6455 if (contributes_to_priority_p (dep))
6456 insn_is_root_p = false;
6458 INSN_PRIORITY_STATUS (pro) = -1;
6459 clear_priorities (pro, roots_ptr);
6464 VEC_safe_push (rtx, heap, *roots_ptr, insn);
6467 /* Recompute priorities of instructions, whose priorities might have been
6468 changed. ROOTS is a vector of instructions whose priority computation will
6469 trigger initialization of all cleared priorities. */
6471 calc_priorities (rtx_vec_t roots)
6476 FOR_EACH_VEC_ELT (rtx, roots, i, insn)
6481 /* Add dependences between JUMP and other instructions in the recovery
6482 block. INSN is the first insn the recovery block. */
6484 add_jump_dependencies (rtx insn, rtx jump)
6488 insn = NEXT_INSN (insn);
6492 if (dep_list_size (insn) == 0)
6494 dep_def _new_dep, *new_dep = &_new_dep;
6496 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
6497 sd_add_dep (new_dep, false);
6502 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
6505 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
6507 bb_note (basic_block bb)
6511 note = BB_HEAD (bb);
6513 note = NEXT_INSN (note);
6515 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6519 /* Extend data structures for logical insn UID. */
6521 sched_extend_luids (void)
6523 int new_luids_max_uid = get_max_uid () + 1;
6525 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
6528 /* Initialize LUID for INSN. */
6530 sched_init_insn_luid (rtx insn)
6532 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
6537 luid = sched_max_luid;
6538 sched_max_luid += i;
6543 SET_INSN_LUID (insn, luid);
6546 /* Initialize luids for BBS.
6547 The hook common_sched_info->luid_for_non_insn () is used to determine
6548 if notes, labels, etc. need luids. */
6550 sched_init_luids (bb_vec_t bbs)
6555 sched_extend_luids ();
6556 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
6560 FOR_BB_INSNS (bb, insn)
6561 sched_init_insn_luid (insn);
6567 sched_finish_luids (void)
6569 VEC_free (int, heap, sched_luids);
6573 /* Return logical uid of INSN. Helpful while debugging. */
6575 insn_luid (rtx insn)
6577 return INSN_LUID (insn);
6580 /* Extend per insn data in the target. */
6582 sched_extend_target (void)
6584 if (targetm.sched.h_i_d_extended)
6585 targetm.sched.h_i_d_extended ();
6588 /* Extend global scheduler structures (those, that live across calls to
6589 schedule_block) to include information about just emitted INSN. */
6593 int reserve = (get_max_uid () + 1
6594 - VEC_length (haifa_insn_data_def, h_i_d));
6596 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
6598 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
6599 3 * get_max_uid () / 2);
6600 sched_extend_target ();
6604 /* Initialize h_i_d entry of the INSN with default values.
6605 Values, that are not explicitly initialized here, hold zero. */
6607 init_h_i_d (rtx insn)
6609 if (INSN_LUID (insn) > 0)
6611 INSN_COST (insn) = -1;
6612 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
6613 INSN_TICK (insn) = INVALID_TICK;
6614 INSN_EXACT_TICK (insn) = INVALID_TICK;
6615 INTER_TICK (insn) = INVALID_TICK;
6616 TODO_SPEC (insn) = HARD_DEP;
6620 /* Initialize haifa_insn_data for BBS. */
6622 haifa_init_h_i_d (bb_vec_t bbs)
6628 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
6632 FOR_BB_INSNS (bb, insn)
6637 /* Finalize haifa_insn_data. */
6639 haifa_finish_h_i_d (void)
6642 haifa_insn_data_t data;
6643 struct reg_use_data *use, *next;
6645 FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
6647 free (data->reg_pressure);
6648 for (use = data->reg_use_list; use != NULL; use = next)
6650 next = use->next_insn_use;
6654 VEC_free (haifa_insn_data_def, heap, h_i_d);
6657 /* Init data for the new insn INSN. */
6659 haifa_init_insn (rtx insn)
6661 gcc_assert (insn != NULL);
6663 sched_extend_luids ();
6664 sched_init_insn_luid (insn);
6665 sched_extend_target ();
6666 sched_deps_init (false);
6670 if (adding_bb_to_current_region_p)
6672 sd_init_insn (insn);
6674 /* Extend dependency caches by one element. */
6675 extend_dependency_caches (1, false);
6677 if (sched_pressure_p)
6678 init_insn_reg_pressure_info (insn);
6681 /* Init data for the new basic block BB which comes after AFTER. */
6683 haifa_init_only_bb (basic_block bb, basic_block after)
6685 gcc_assert (bb != NULL);
6689 if (common_sched_info->add_block)
6690 /* This changes only data structures of the front-end. */
6691 common_sched_info->add_block (bb, after);
6694 /* A generic version of sched_split_block (). */
6696 sched_split_block_1 (basic_block first_bb, rtx after)
6700 e = split_block (first_bb, after);
6701 gcc_assert (e->src == first_bb);
6703 /* sched_split_block emits note if *check == BB_END. Probably it
6704 is better to rip that note off. */
6709 /* A generic version of sched_create_empty_bb (). */
6711 sched_create_empty_bb_1 (basic_block after)
6713 return create_empty_bb (after);
6716 /* Insert PAT as an INSN into the schedule and update the necessary data
6717 structures to account for it. */
6719 sched_emit_insn (rtx pat)
6721 rtx insn = emit_insn_before (pat, nonscheduled_insns_begin);
6722 haifa_init_insn (insn);
6724 if (current_sched_info->add_remove_insn)
6725 current_sched_info->add_remove_insn (insn, 0);
6727 (*current_sched_info->begin_schedule_ready) (insn);
6728 VEC_safe_push (rtx, heap, scheduled_insns, insn);
6730 last_scheduled_insn = insn;
6734 /* This function returns a candidate satisfying dispatch constraints from
6738 ready_remove_first_dispatch (struct ready_list *ready)
6741 rtx insn = ready_element (ready, 0);
6743 if (ready->n_ready == 1
6744 || INSN_CODE (insn) < 0
6746 || !active_insn_p (insn)
6747 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
6748 return ready_remove_first (ready);
6750 for (i = 1; i < ready->n_ready; i++)
6752 insn = ready_element (ready, i);
6754 if (INSN_CODE (insn) < 0
6756 || !active_insn_p (insn))
6759 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
6761 /* Return ith element of ready. */
6762 insn = ready_remove (ready, i);
6767 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
6768 return ready_remove_first (ready);
6770 for (i = 1; i < ready->n_ready; i++)
6772 insn = ready_element (ready, i);
6774 if (INSN_CODE (insn) < 0
6776 || !active_insn_p (insn))
6779 /* Return i-th element of ready. */
6780 if (targetm.sched.dispatch (insn, IS_CMP))
6781 return ready_remove (ready, i);
6784 return ready_remove_first (ready);
6787 /* Get number of ready insn in the ready list. */
6790 number_in_ready (void)
6792 return ready.n_ready;
6795 /* Get number of ready's in the ready list. */
6798 get_ready_element (int i)
6800 return ready_element (&ready, i);
6803 #endif /* INSN_SCHEDULING */