1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
143 #include "sched-int.h"
147 #ifdef INSN_SCHEDULING
149 /* issue_rate is the number of insns that can be scheduled in the same
150 machine cycle. It can be defined in the config/mach/mach.h file,
151 otherwise we set it to 1. */
153 static int issue_rate;
155 /* sched-verbose controls the amount of debugging output the
156 scheduler prints. It is controlled by -fsched-verbose=N:
157 N>0 and no -DSR : the output is directed to stderr.
158 N>=10 will direct the printouts to stderr (regardless of -dSR).
160 N=2: bb's probabilities, detailed ready list info, unit/insn info.
161 N=3: rtl at abort point, control-flow, regions info.
162 N=5: dependences info. */
164 static int sched_verbose_param = 0;
165 int sched_verbose = 0;
167 /* Debugging file. All printouts are sent to dump, which is always set,
168 either to stderr, or to the dump listing file (-dRS). */
169 FILE *sched_dump = 0;
171 /* Highest uid before scheduling. */
172 static int old_max_uid;
174 /* fix_sched_param() is called from toplev.c upon detection
175 of the -fsched-verbose=N option. */
178 fix_sched_param (const char *param, const char *val)
180 if (!strcmp (param, "verbose"))
181 sched_verbose_param = atoi (val);
183 warning (0, "fix_sched_param: unknown param: %s", param);
186 struct haifa_insn_data *h_i_d;
188 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
189 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
190 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
192 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
193 then it should be recalculated from scratch. */
194 #define INVALID_TICK (-(max_insn_queue_index + 1))
195 /* The minimal value of the INSN_TICK of an instruction. */
196 #define MIN_TICK (-max_insn_queue_index)
198 /* Vector indexed by basic block number giving the starting line-number
199 for each basic block. */
200 static rtx *line_note_head;
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204 static rtx note_list;
208 /* An instruction is ready to be scheduled when all insns preceding it
209 have already been scheduled. It is important to ensure that all
210 insns which use its result will not be executed until its result
211 has been computed. An insn is maintained in one of four structures:
213 (P) the "Pending" set of insns which cannot be scheduled until
214 their dependencies have been satisfied.
215 (Q) the "Queued" set of insns that can be scheduled when sufficient
217 (R) the "Ready" list of unscheduled, uncommitted insns.
218 (S) the "Scheduled" list of insns.
220 Initially, all insns are either "Pending" or "Ready" depending on
221 whether their dependencies are satisfied.
223 Insns move from the "Ready" list to the "Scheduled" list as they
224 are committed to the schedule. As this occurs, the insns in the
225 "Pending" list have their dependencies satisfied and move to either
226 the "Ready" list or the "Queued" set depending on whether
227 sufficient time has passed to make them ready. As time passes,
228 insns move from the "Queued" set to the "Ready" list.
230 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
231 insns, i.e., those that are ready, queued, and pending.
232 The "Queued" set (Q) is implemented by the variable `insn_queue'.
233 The "Ready" list (R) is implemented by the variables `ready' and
235 The "Scheduled" list (S) is the new insn chain built by this pass.
237 The transition (R->S) is implemented in the scheduling loop in
238 `schedule_block' when the best insn to schedule is chosen.
239 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
240 insns move from the ready list to the scheduled list.
241 The transition (Q->R) is implemented in 'queue_to_insn' as time
242 passes or stalls are introduced. */
244 /* Implement a circular buffer to delay instructions until sufficient
245 time has passed. For the new pipeline description interface,
246 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
247 than maximal time of instruction execution computed by genattr.c on
248 the base maximal time of functional unit reservations and getting a
249 result. This is the longest time an insn may be queued. */
251 static rtx *insn_queue;
252 static int q_ptr = 0;
253 static int q_size = 0;
254 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
255 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
257 #define QUEUE_SCHEDULED (-3)
258 #define QUEUE_NOWHERE (-2)
259 #define QUEUE_READY (-1)
260 /* QUEUE_SCHEDULED - INSN is scheduled.
261 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
263 QUEUE_READY - INSN is in ready list.
264 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
266 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
268 /* The following variable value refers for all current and future
269 reservations of the processor units. */
272 /* The following variable value is size of memory representing all
273 current and future reservations of the processor units. */
274 static size_t dfa_state_size;
276 /* The following array is used to find the best insn from ready when
277 the automaton pipeline interface is used. */
278 static char *ready_try;
280 /* Describe the ready list of the scheduler.
281 VEC holds space enough for all insns in the current region. VECLEN
282 says how many exactly.
283 FIRST is the index of the element with the highest priority; i.e. the
284 last one in the ready list, since elements are ordered by ascending
286 N_READY determines how many insns are on the ready list. */
296 /* The pointer to the ready list. */
297 static struct ready_list *readyp;
299 /* Scheduling clock. */
300 static int clock_var;
302 static int may_trap_exp (rtx, int);
304 /* Nonzero iff the address is comprised from at most 1 register. */
305 #define CONST_BASED_ADDRESS_P(x) \
307 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
308 || (GET_CODE (x) == LO_SUM)) \
309 && (CONSTANT_P (XEXP (x, 0)) \
310 || CONSTANT_P (XEXP (x, 1)))))
312 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
313 as found by analyzing insn's expression. */
316 may_trap_exp (rtx x, int is_store)
325 if (code == MEM && may_trap_p (x))
332 /* The insn uses memory: a volatile load. */
333 if (MEM_VOLATILE_P (x))
335 /* An exception-free load. */
338 /* A load with 1 base register, to be further checked. */
339 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
340 return PFREE_CANDIDATE;
341 /* No info on the load, to be further checked. */
342 return PRISKY_CANDIDATE;
347 int i, insn_class = TRAP_FREE;
349 /* Neither store nor load, check if it may cause a trap. */
352 /* Recursive step: walk the insn... */
353 fmt = GET_RTX_FORMAT (code);
354 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
358 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
359 insn_class = WORST_CLASS (insn_class, tmp_class);
361 else if (fmt[i] == 'E')
364 for (j = 0; j < XVECLEN (x, i); j++)
366 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
367 insn_class = WORST_CLASS (insn_class, tmp_class);
368 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
372 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
379 /* Classifies insn for the purpose of verifying that it can be
380 moved speculatively, by examining it's patterns, returning:
381 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
382 TRAP_FREE: non-load insn.
383 IFREE: load from a globally safe location.
384 IRISKY: volatile load.
385 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
386 being either PFREE or PRISKY. */
389 haifa_classify_insn (rtx insn)
391 rtx pat = PATTERN (insn);
392 int tmp_class = TRAP_FREE;
393 int insn_class = TRAP_FREE;
396 if (GET_CODE (pat) == PARALLEL)
398 int i, len = XVECLEN (pat, 0);
400 for (i = len - 1; i >= 0; i--)
402 code = GET_CODE (XVECEXP (pat, 0, i));
406 /* Test if it is a 'store'. */
407 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
410 /* Test if it is a store. */
411 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
412 if (tmp_class == TRAP_RISKY)
414 /* Test if it is a load. */
416 = WORST_CLASS (tmp_class,
417 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
422 tmp_class = TRAP_RISKY;
427 insn_class = WORST_CLASS (insn_class, tmp_class);
428 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
434 code = GET_CODE (pat);
438 /* Test if it is a 'store'. */
439 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
442 /* Test if it is a store. */
443 tmp_class = may_trap_exp (SET_DEST (pat), 1);
444 if (tmp_class == TRAP_RISKY)
446 /* Test if it is a load. */
448 WORST_CLASS (tmp_class,
449 may_trap_exp (SET_SRC (pat), 0));
453 tmp_class = TRAP_RISKY;
457 insn_class = tmp_class;
463 /* Forward declarations. */
465 static int priority (rtx);
466 static int rank_for_schedule (const void *, const void *);
467 static void swap_sort (rtx *, int);
468 static void queue_insn (rtx, int);
469 static int schedule_insn (rtx);
470 static int find_set_reg_weight (rtx);
471 static void find_insn_reg_weight (int);
472 static void adjust_priority (rtx);
473 static void advance_one_cycle (void);
475 /* Notes handling mechanism:
476 =========================
477 Generally, NOTES are saved before scheduling and restored after scheduling.
478 The scheduler distinguishes between three types of notes:
480 (1) LINE_NUMBER notes, generated and used for debugging. Here,
481 before scheduling a region, a pointer to the LINE_NUMBER note is
482 added to the insn following it (in save_line_notes()), and the note
483 is removed (in rm_line_notes() and unlink_line_notes()). After
484 scheduling the region, this pointer is used for regeneration of
485 the LINE_NUMBER note (in restore_line_notes()).
487 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
488 Before scheduling a region, a pointer to the note is added to the insn
489 that follows or precedes it. (This happens as part of the data dependence
490 computation). After scheduling an insn, the pointer contained in it is
491 used for regenerating the corresponding note (in reemit_notes).
493 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
494 these notes are put in a list (in rm_other_notes() and
495 unlink_other_notes ()). After scheduling the block, these notes are
496 inserted at the beginning of the block (in schedule_block()). */
498 static rtx unlink_other_notes (rtx, rtx);
499 static rtx unlink_line_notes (rtx, rtx);
500 static rtx reemit_notes (rtx, rtx);
502 static rtx *ready_lastpos (struct ready_list *);
503 static void ready_add (struct ready_list *, rtx, bool);
504 static void ready_sort (struct ready_list *);
505 static rtx ready_remove_first (struct ready_list *);
507 static void queue_to_ready (struct ready_list *);
508 static int early_queue_to_ready (state_t, struct ready_list *);
510 static void debug_ready_list (struct ready_list *);
512 static rtx move_insn1 (rtx, rtx);
513 static rtx move_insn (rtx, rtx);
515 /* The following functions are used to implement multi-pass scheduling
516 on the first cycle. */
517 static rtx ready_element (struct ready_list *, int);
518 static rtx ready_remove (struct ready_list *, int);
519 static void ready_remove_insn (rtx);
520 static int max_issue (struct ready_list *, int *);
522 static rtx choose_ready (struct ready_list *);
524 static void fix_inter_tick (rtx, rtx);
525 static int fix_tick_ready (rtx);
526 static void change_queue_index (rtx, int);
527 static void resolve_dep (rtx, rtx);
529 #endif /* INSN_SCHEDULING */
531 /* Point to state used for the current scheduling pass. */
532 struct sched_info *current_sched_info;
534 #ifndef INSN_SCHEDULING
536 schedule_insns (void)
541 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
542 so that insns independent of the last scheduled insn will be preferred
543 over dependent instructions. */
545 static rtx last_scheduled_insn;
547 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
548 This is the number of cycles between instruction issue and
549 instruction results. */
552 insn_cost (rtx insn, rtx link, rtx used)
554 int cost = INSN_COST (insn);
558 /* A USE insn, or something else we don't need to
559 understand. We can't pass these directly to
560 result_ready_cost or insn_default_latency because it will
561 trigger a fatal error for unrecognizable insns. */
562 if (recog_memoized (insn) < 0)
564 INSN_COST (insn) = 0;
569 cost = insn_default_latency (insn);
573 INSN_COST (insn) = cost;
577 /* In this case estimate cost without caring how insn is used. */
578 if (link == 0 || used == 0)
581 /* A USE insn should never require the value used to be computed.
582 This allows the computation of a function's result and parameter
583 values to overlap the return and call. */
584 if (recog_memoized (used) < 0)
588 if (INSN_CODE (insn) >= 0)
590 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
592 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
594 cost = (insn_default_latency (insn)
595 - insn_default_latency (used));
599 else if (bypass_p (insn))
600 cost = insn_latency (insn, used);
603 if (targetm.sched.adjust_cost)
604 cost = targetm.sched.adjust_cost (used, link, insn, cost);
613 /* Compute the priority number for INSN. */
623 if (! INSN_PRIORITY_KNOWN (insn))
625 int this_priority = 0;
627 if (INSN_DEPEND (insn) == 0)
628 this_priority = insn_cost (insn, 0, 0);
631 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
636 next = XEXP (link, 0);
638 /* Critical path is meaningful in block boundaries only. */
639 if (! (*current_sched_info->contributes_to_priority) (next, insn))
642 next_priority = insn_cost (insn, link, next) + priority (next);
643 if (next_priority > this_priority)
644 this_priority = next_priority;
647 INSN_PRIORITY (insn) = this_priority;
648 INSN_PRIORITY_KNOWN (insn) = 1;
651 return INSN_PRIORITY (insn);
654 /* Macros and functions for keeping the priority queue sorted, and
655 dealing with queuing and dequeuing of instructions. */
657 #define SCHED_SORT(READY, N_READY) \
658 do { if ((N_READY) == 2) \
659 swap_sort (READY, N_READY); \
660 else if ((N_READY) > 2) \
661 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
664 /* Returns a positive value if x is preferred; returns a negative value if
665 y is preferred. Should never return 0, since that will make the sort
669 rank_for_schedule (const void *x, const void *y)
671 rtx tmp = *(const rtx *) y;
672 rtx tmp2 = *(const rtx *) x;
674 int tmp_class, tmp2_class, depend_count1, depend_count2;
675 int val, priority_val, weight_val, info_val;
677 /* The insn in a schedule group should be issued the first. */
678 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
679 return SCHED_GROUP_P (tmp2) ? 1 : -1;
681 /* Prefer insn with higher priority. */
682 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
687 /* Prefer an insn with smaller contribution to registers-pressure. */
688 if (!reload_completed &&
689 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
692 info_val = (*current_sched_info->rank) (tmp, tmp2);
696 /* Compare insns based on their relation to the last-scheduled-insn. */
697 if (INSN_P (last_scheduled_insn))
699 /* Classify the instructions into three classes:
700 1) Data dependent on last schedule insn.
701 2) Anti/Output dependent on last scheduled insn.
702 3) Independent of last scheduled insn, or has latency of one.
703 Choose the insn from the highest numbered class if different. */
704 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
705 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
707 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
712 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
713 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
715 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
720 if ((val = tmp2_class - tmp_class))
724 /* Prefer the insn which has more later insns that depend on it.
725 This gives the scheduler more freedom when scheduling later
726 instructions at the expense of added register pressure. */
728 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
732 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
735 val = depend_count2 - depend_count1;
739 /* If insns are equally good, sort by INSN_LUID (original insn order),
740 so that we make the sort stable. This minimizes instruction movement,
741 thus minimizing sched's effect on debugging and cross-jumping. */
742 return INSN_LUID (tmp) - INSN_LUID (tmp2);
745 /* Resort the array A in which only element at index N may be out of order. */
747 HAIFA_INLINE static void
748 swap_sort (rtx *a, int n)
753 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
761 /* Add INSN to the insn queue so that it can be executed at least
762 N_CYCLES after the currently executing insn. Preserve insns
763 chain for debugging purposes. */
765 HAIFA_INLINE static void
766 queue_insn (rtx insn, int n_cycles)
768 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
769 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
771 gcc_assert (n_cycles <= max_insn_queue_index);
773 insn_queue[next_q] = link;
776 if (sched_verbose >= 2)
778 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
779 (*current_sched_info->print_insn) (insn, 0));
781 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
784 QUEUE_INDEX (insn) = next_q;
787 /* Remove INSN from queue. */
789 queue_remove (rtx insn)
791 gcc_assert (QUEUE_INDEX (insn) >= 0);
792 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
794 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
797 /* Return a pointer to the bottom of the ready list, i.e. the insn
798 with the lowest priority. */
800 HAIFA_INLINE static rtx *
801 ready_lastpos (struct ready_list *ready)
803 gcc_assert (ready->n_ready >= 1);
804 return ready->vec + ready->first - ready->n_ready + 1;
807 /* Add an element INSN to the ready list so that it ends up with the
808 lowest/highest priority dependending on FIRST_P. */
810 HAIFA_INLINE static void
811 ready_add (struct ready_list *ready, rtx insn, bool first_p)
815 if (ready->first == ready->n_ready)
817 memmove (ready->vec + ready->veclen - ready->n_ready,
818 ready_lastpos (ready),
819 ready->n_ready * sizeof (rtx));
820 ready->first = ready->veclen - 1;
822 ready->vec[ready->first - ready->n_ready] = insn;
826 if (ready->first == ready->veclen - 1)
829 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
830 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
831 ready_lastpos (ready),
832 ready->n_ready * sizeof (rtx));
833 ready->first = ready->veclen - 2;
835 ready->vec[++(ready->first)] = insn;
840 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
841 QUEUE_INDEX (insn) = QUEUE_READY;
844 /* Remove the element with the highest priority from the ready list and
847 HAIFA_INLINE static rtx
848 ready_remove_first (struct ready_list *ready)
852 gcc_assert (ready->n_ready);
853 t = ready->vec[ready->first--];
855 /* If the queue becomes empty, reset it. */
856 if (ready->n_ready == 0)
857 ready->first = ready->veclen - 1;
859 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
860 QUEUE_INDEX (t) = QUEUE_NOWHERE;
865 /* The following code implements multi-pass scheduling for the first
866 cycle. In other words, we will try to choose ready insn which
867 permits to start maximum number of insns on the same cycle. */
869 /* Return a pointer to the element INDEX from the ready. INDEX for
870 insn with the highest priority is 0, and the lowest priority has
873 HAIFA_INLINE static rtx
874 ready_element (struct ready_list *ready, int index)
876 gcc_assert (ready->n_ready && index < ready->n_ready);
878 return ready->vec[ready->first - index];
881 /* Remove the element INDEX from the ready list and return it. INDEX
882 for insn with the highest priority is 0, and the lowest priority
885 HAIFA_INLINE static rtx
886 ready_remove (struct ready_list *ready, int index)
892 return ready_remove_first (ready);
893 gcc_assert (ready->n_ready && index < ready->n_ready);
894 t = ready->vec[ready->first - index];
896 for (i = index; i < ready->n_ready; i++)
897 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
898 QUEUE_INDEX (t) = QUEUE_NOWHERE;
902 /* Remove INSN from the ready list. */
904 ready_remove_insn (rtx insn)
908 for (i = 0; i < readyp->n_ready; i++)
909 if (ready_element (readyp, i) == insn)
911 ready_remove (readyp, i);
917 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
920 HAIFA_INLINE static void
921 ready_sort (struct ready_list *ready)
923 rtx *first = ready_lastpos (ready);
924 SCHED_SORT (first, ready->n_ready);
927 /* PREV is an insn that is ready to execute. Adjust its priority if that
928 will help shorten or lengthen register lifetimes as appropriate. Also
929 provide a hook for the target to tweek itself. */
931 HAIFA_INLINE static void
932 adjust_priority (rtx prev)
934 /* ??? There used to be code here to try and estimate how an insn
935 affected register lifetimes, but it did it by looking at REG_DEAD
936 notes, which we removed in schedule_region. Nor did it try to
937 take into account register pressure or anything useful like that.
939 Revisit when we have a machine model to work with and not before. */
941 if (targetm.sched.adjust_priority)
942 INSN_PRIORITY (prev) =
943 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
946 /* Advance time on one cycle. */
947 HAIFA_INLINE static void
948 advance_one_cycle (void)
950 if (targetm.sched.dfa_pre_cycle_insn)
951 state_transition (curr_state,
952 targetm.sched.dfa_pre_cycle_insn ());
954 state_transition (curr_state, NULL);
956 if (targetm.sched.dfa_post_cycle_insn)
957 state_transition (curr_state,
958 targetm.sched.dfa_post_cycle_insn ());
961 /* Clock at which the previous instruction was issued. */
962 static int last_clock_var;
964 /* INSN is the "currently executing insn". Launch each insn which was
965 waiting on INSN. READY is the ready list which contains the insns
966 that are ready to fire. CLOCK is the current cycle. The function
967 returns necessary cycle advance after issuing the insn (it is not
968 zero for insns in a schedule group). */
971 schedule_insn (rtx insn)
976 if (sched_verbose >= 1)
980 print_insn (buf, insn, 0);
982 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
984 if (recog_memoized (insn) < 0)
985 fprintf (sched_dump, "nothing");
987 print_reservation (sched_dump, insn);
988 fputc ('\n', sched_dump);
991 /* Scheduling instruction should have all its dependencies resolved and
992 should have been removed from the ready list. */
993 gcc_assert (INSN_DEP_COUNT (insn) == 0);
994 gcc_assert (!LOG_LINKS (insn));
995 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
997 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
999 /* Now we can free RESOLVED_DEPS list. */
1000 if (current_sched_info->flags & USE_DEPS_LIST)
1001 free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1003 free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1005 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1006 if (INSN_TICK (insn) > clock_var)
1007 /* INSN has been prematurely moved from the queue to the ready list.
1008 This is possible only if following flag is set. */
1009 gcc_assert (flag_sched_stalled_insns);
1011 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1012 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1013 INSN_TICK (insn) = clock_var;
1015 /* Update dependent instructions. */
1016 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1019 rtx next = XEXP (link, 0);
1021 resolve_dep (next, insn);
1023 effective_cost = try_ready (next);
1025 if (effective_cost >= 0
1026 && SCHED_GROUP_P (next)
1027 && advance < effective_cost)
1028 advance = effective_cost;
1031 /* Annotate the instruction with issue information -- TImode
1032 indicates that the instruction is expected not to be able
1033 to issue on the same cycle as the previous insn. A machine
1034 may use this information to decide how the instruction should
1037 && GET_CODE (PATTERN (insn)) != USE
1038 && GET_CODE (PATTERN (insn)) != CLOBBER)
1040 if (reload_completed)
1041 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1042 last_clock_var = clock_var;
1048 /* Functions for handling of notes. */
1050 /* Delete notes beginning with INSN and put them in the chain
1051 of notes ended by NOTE_LIST.
1052 Returns the insn following the notes. */
1055 unlink_other_notes (rtx insn, rtx tail)
1057 rtx prev = PREV_INSN (insn);
1059 while (insn != tail && NOTE_P (insn))
1061 rtx next = NEXT_INSN (insn);
1062 /* Delete the note from its current position. */
1064 NEXT_INSN (prev) = next;
1066 PREV_INSN (next) = prev;
1068 /* See sched_analyze to see how these are handled. */
1069 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK
1070 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1071 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1073 /* Insert the note at the end of the notes list. */
1074 PREV_INSN (insn) = note_list;
1076 NEXT_INSN (note_list) = insn;
1085 /* Delete line notes beginning with INSN. Record line-number notes so
1086 they can be reused. Returns the insn following the notes. */
1089 unlink_line_notes (rtx insn, rtx tail)
1091 rtx prev = PREV_INSN (insn);
1093 while (insn != tail && NOTE_P (insn))
1095 rtx next = NEXT_INSN (insn);
1097 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1099 /* Delete the note from its current position. */
1101 NEXT_INSN (prev) = next;
1103 PREV_INSN (next) = prev;
1105 /* Record line-number notes so they can be reused. */
1106 LINE_NOTE (insn) = insn;
1116 /* Return the head and tail pointers of BB. */
1119 get_block_head_tail (int b, rtx *headp, rtx *tailp)
1121 /* HEAD and TAIL delimit the basic block being scheduled. */
1122 rtx head = BB_HEAD (BASIC_BLOCK (b));
1123 rtx tail = BB_END (BASIC_BLOCK (b));
1125 /* Don't include any notes or labels at the beginning of the
1126 basic block, or notes at the ends of basic blocks. */
1127 while (head != tail)
1130 head = NEXT_INSN (head);
1131 else if (NOTE_P (tail))
1132 tail = PREV_INSN (tail);
1133 else if (LABEL_P (head))
1134 head = NEXT_INSN (head);
1143 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1146 no_real_insns_p (rtx head, rtx tail)
1148 while (head != NEXT_INSN (tail))
1150 if (!NOTE_P (head) && !LABEL_P (head))
1152 head = NEXT_INSN (head);
1157 /* Delete line notes from one block. Save them so they can be later restored
1158 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1159 block in which notes should be processed. */
1162 rm_line_notes (rtx head, rtx tail)
1167 next_tail = NEXT_INSN (tail);
1168 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1172 /* Farm out notes, and maybe save them in NOTE_LIST.
1173 This is needed to keep the debugger from
1174 getting completely deranged. */
1178 insn = unlink_line_notes (insn, next_tail);
1180 gcc_assert (prev != tail && prev != head && insn != next_tail);
1185 /* Save line number notes for each insn in block B. HEAD and TAIL are
1186 the boundaries of the block in which notes should be processed. */
1189 save_line_notes (int b, rtx head, rtx tail)
1193 /* We must use the true line number for the first insn in the block
1194 that was computed and saved at the start of this pass. We can't
1195 use the current line number, because scheduling of the previous
1196 block may have changed the current line number. */
1198 rtx line = line_note_head[b];
1201 next_tail = NEXT_INSN (tail);
1203 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1204 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1207 LINE_NOTE (insn) = line;
1210 /* After a block was scheduled, insert line notes into the insns list.
1211 HEAD and TAIL are the boundaries of the block in which notes should
1215 restore_line_notes (rtx head, rtx tail)
1217 rtx line, note, prev, new;
1218 int added_notes = 0;
1219 rtx next_tail, insn;
1222 next_tail = NEXT_INSN (tail);
1224 /* Determine the current line-number. We want to know the current
1225 line number of the first insn of the block here, in case it is
1226 different from the true line number that was saved earlier. If
1227 different, then we need a line number note before the first insn
1228 of this block. If it happens to be the same, then we don't want to
1229 emit another line number note here. */
1230 for (line = head; line; line = PREV_INSN (line))
1231 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1234 /* Walk the insns keeping track of the current line-number and inserting
1235 the line-number notes as needed. */
1236 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1237 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1239 /* This used to emit line number notes before every non-deleted note.
1240 However, this confuses a debugger, because line notes not separated
1241 by real instructions all end up at the same address. I can find no
1242 use for line number notes before other notes, so none are emitted. */
1243 else if (!NOTE_P (insn)
1244 && INSN_UID (insn) < old_max_uid
1245 && (note = LINE_NOTE (insn)) != 0
1248 #ifdef USE_MAPPED_LOCATION
1249 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1251 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1252 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1257 prev = PREV_INSN (insn);
1258 if (LINE_NOTE (note))
1260 /* Re-use the original line-number note. */
1261 LINE_NOTE (note) = 0;
1262 PREV_INSN (note) = prev;
1263 NEXT_INSN (prev) = note;
1264 PREV_INSN (insn) = note;
1265 NEXT_INSN (note) = insn;
1270 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1271 #ifndef USE_MAPPED_LOCATION
1272 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1276 if (sched_verbose && added_notes)
1277 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1280 /* After scheduling the function, delete redundant line notes from the
1284 rm_redundant_line_notes (void)
1287 rtx insn = get_insns ();
1288 int active_insn = 0;
1291 /* Walk the insns deleting redundant line-number notes. Many of these
1292 are already present. The remainder tend to occur at basic
1293 block boundaries. */
1294 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1295 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1297 /* If there are no active insns following, INSN is redundant. */
1298 if (active_insn == 0)
1301 SET_INSN_DELETED (insn);
1303 /* If the line number is unchanged, LINE is redundant. */
1305 #ifdef USE_MAPPED_LOCATION
1306 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1308 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1309 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1314 SET_INSN_DELETED (line);
1321 else if (!((NOTE_P (insn)
1322 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1323 || (NONJUMP_INSN_P (insn)
1324 && (GET_CODE (PATTERN (insn)) == USE
1325 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1328 if (sched_verbose && notes)
1329 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1332 /* Delete notes between HEAD and TAIL and put them in the chain
1333 of notes ended by NOTE_LIST. */
1336 rm_other_notes (rtx head, rtx tail)
1342 if (head == tail && (! INSN_P (head)))
1345 next_tail = NEXT_INSN (tail);
1346 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1350 /* Farm out notes, and maybe save them in NOTE_LIST.
1351 This is needed to keep the debugger from
1352 getting completely deranged. */
1357 insn = unlink_other_notes (insn, next_tail);
1359 gcc_assert (prev != tail && prev != head && insn != next_tail);
1364 /* Functions for computation of registers live/usage info. */
1366 /* This function looks for a new register being defined.
1367 If the destination register is already used by the source,
1368 a new register is not needed. */
1371 find_set_reg_weight (rtx x)
1373 if (GET_CODE (x) == CLOBBER
1374 && register_operand (SET_DEST (x), VOIDmode))
1376 if (GET_CODE (x) == SET
1377 && register_operand (SET_DEST (x), VOIDmode))
1379 if (REG_P (SET_DEST (x)))
1381 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1391 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1394 find_insn_reg_weight (int b)
1396 rtx insn, next_tail, head, tail;
1398 get_block_head_tail (b, &head, &tail);
1399 next_tail = NEXT_INSN (tail);
1401 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1406 /* Handle register life information. */
1407 if (! INSN_P (insn))
1410 /* Increment weight for each register born here. */
1412 reg_weight += find_set_reg_weight (x);
1413 if (GET_CODE (x) == PARALLEL)
1416 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1418 x = XVECEXP (PATTERN (insn), 0, j);
1419 reg_weight += find_set_reg_weight (x);
1422 /* Decrement weight for each register that dies here. */
1423 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1425 if (REG_NOTE_KIND (x) == REG_DEAD
1426 || REG_NOTE_KIND (x) == REG_UNUSED)
1430 INSN_REG_WEIGHT (insn) = reg_weight;
1434 /* Move insns that became ready to fire from queue to ready list. */
1437 queue_to_ready (struct ready_list *ready)
1442 q_ptr = NEXT_Q (q_ptr);
1444 /* Add all pending insns that can be scheduled without stalls to the
1446 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1448 insn = XEXP (link, 0);
1451 if (sched_verbose >= 2)
1452 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1453 (*current_sched_info->print_insn) (insn, 0));
1455 ready_add (ready, insn, false);
1456 if (sched_verbose >= 2)
1457 fprintf (sched_dump, "moving to ready without stalls\n");
1459 free_INSN_LIST_list (&insn_queue[q_ptr]);
1461 /* If there are no ready insns, stall until one is ready and add all
1462 of the pending insns at that point to the ready list. */
1463 if (ready->n_ready == 0)
1467 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1469 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1471 for (; link; link = XEXP (link, 1))
1473 insn = XEXP (link, 0);
1476 if (sched_verbose >= 2)
1477 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1478 (*current_sched_info->print_insn) (insn, 0));
1480 ready_add (ready, insn, false);
1481 if (sched_verbose >= 2)
1482 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1484 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1486 advance_one_cycle ();
1491 advance_one_cycle ();
1494 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1495 clock_var += stalls;
1499 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1500 prematurely move INSN from the queue to the ready list. Currently,
1501 if a target defines the hook 'is_costly_dependence', this function
1502 uses the hook to check whether there exist any dependences which are
1503 considered costly by the target, between INSN and other insns that
1504 have already been scheduled. Dependences are checked up to Y cycles
1505 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1506 controlling this value.
1507 (Other considerations could be taken into account instead (or in
1508 addition) depending on user flags and target hooks. */
1511 ok_for_early_queue_removal (rtx insn)
1514 rtx prev_insn = last_scheduled_insn;
1516 if (targetm.sched.is_costly_dependence)
1518 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1520 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1525 if (!NOTE_P (prev_insn))
1527 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1530 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1531 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1533 flag_sched_stalled_insns_dep - n_cycles))
1538 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1544 prev_insn = PREV_INSN (prev_insn);
1552 /* Remove insns from the queue, before they become "ready" with respect
1553 to FU latency considerations. */
1556 early_queue_to_ready (state_t state, struct ready_list *ready)
1564 state_t temp_state = alloca (dfa_state_size);
1566 int insns_removed = 0;
1569 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1572 X == 0: There is no limit on how many queued insns can be removed
1573 prematurely. (flag_sched_stalled_insns = -1).
1575 X >= 1: Only X queued insns can be removed prematurely in each
1576 invocation. (flag_sched_stalled_insns = X).
1578 Otherwise: Early queue removal is disabled.
1579 (flag_sched_stalled_insns = 0)
1582 if (! flag_sched_stalled_insns)
1585 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1587 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1589 if (sched_verbose > 6)
1590 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1595 next_link = XEXP (link, 1);
1596 insn = XEXP (link, 0);
1597 if (insn && sched_verbose > 6)
1598 print_rtl_single (sched_dump, insn);
1600 memcpy (temp_state, state, dfa_state_size);
1601 if (recog_memoized (insn) < 0)
1602 /* non-negative to indicate that it's not ready
1603 to avoid infinite Q->R->Q->R... */
1606 cost = state_transition (temp_state, insn);
1608 if (sched_verbose >= 6)
1609 fprintf (sched_dump, "transition cost = %d\n", cost);
1611 move_to_ready = false;
1614 move_to_ready = ok_for_early_queue_removal (insn);
1615 if (move_to_ready == true)
1617 /* move from Q to R */
1619 ready_add (ready, insn, false);
1622 XEXP (prev_link, 1) = next_link;
1624 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1626 free_INSN_LIST_node (link);
1628 if (sched_verbose >= 2)
1629 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1630 (*current_sched_info->print_insn) (insn, 0));
1633 if (insns_removed == flag_sched_stalled_insns)
1634 /* Remove no more than flag_sched_stalled_insns insns
1635 from Q at a time. */
1636 return insns_removed;
1640 if (move_to_ready == false)
1647 } /* for stalls.. */
1649 return insns_removed;
1653 /* Print the ready list for debugging purposes. Callable from debugger. */
1656 debug_ready_list (struct ready_list *ready)
1661 if (ready->n_ready == 0)
1663 fprintf (sched_dump, "\n");
1667 p = ready_lastpos (ready);
1668 for (i = 0; i < ready->n_ready; i++)
1669 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1670 fprintf (sched_dump, "\n");
1673 /* move_insn1: Remove INSN from insn chain, and link it after LAST insn. */
1676 move_insn1 (rtx insn, rtx last)
1678 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1679 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1681 NEXT_INSN (insn) = NEXT_INSN (last);
1682 PREV_INSN (NEXT_INSN (last)) = insn;
1684 NEXT_INSN (last) = insn;
1685 PREV_INSN (insn) = last;
1690 /* Search INSN for REG_SAVE_NOTE note pairs for
1691 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1692 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1693 saved value for NOTE_BLOCK_NUMBER which is useful for
1694 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
1695 output by the instruction scheduler. Return the new value of LAST. */
1698 reemit_notes (rtx insn, rtx last)
1703 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1705 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1707 enum insn_note note_type = INTVAL (XEXP (note, 0));
1709 last = emit_note_before (note_type, last);
1710 remove_note (insn, note);
1716 /* Move INSN. Reemit notes if needed.
1718 Return the last insn emitted by the scheduler, which is the
1719 return value from the first call to reemit_notes. */
1722 move_insn (rtx insn, rtx last)
1726 move_insn1 (insn, last);
1728 /* If this is the first call to reemit_notes, then record
1729 its return value. */
1730 if (retval == NULL_RTX)
1731 retval = reemit_notes (insn, insn);
1733 reemit_notes (insn, insn);
1735 SCHED_GROUP_P (insn) = 0;
1740 /* The following structure describe an entry of the stack of choices. */
1743 /* Ordinal number of the issued insn in the ready queue. */
1745 /* The number of the rest insns whose issues we should try. */
1747 /* The number of issued essential insns. */
1749 /* State after issuing the insn. */
1753 /* The following array is used to implement a stack of choices used in
1754 function max_issue. */
1755 static struct choice_entry *choice_stack;
1757 /* The following variable value is number of essential insns issued on
1758 the current cycle. An insn is essential one if it changes the
1759 processors state. */
1760 static int cycle_issued_insns;
1762 /* The following variable value is maximal number of tries of issuing
1763 insns for the first cycle multipass insn scheduling. We define
1764 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
1765 need this constraint if all real insns (with non-negative codes)
1766 had reservations because in this case the algorithm complexity is
1767 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
1768 might be incomplete and such insn might occur. For such
1769 descriptions, the complexity of algorithm (without the constraint)
1770 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
1771 static int max_lookahead_tries;
1773 /* The following value is value of hook
1774 `first_cycle_multipass_dfa_lookahead' at the last call of
1776 static int cached_first_cycle_multipass_dfa_lookahead = 0;
1778 /* The following value is value of `issue_rate' at the last call of
1780 static int cached_issue_rate = 0;
1782 /* The following function returns maximal (or close to maximal) number
1783 of insns which can be issued on the same cycle and one of which
1784 insns is insns with the best rank (the first insn in READY). To
1785 make this function tries different samples of ready insns. READY
1786 is current queue `ready'. Global array READY_TRY reflects what
1787 insns are already issued in this try. INDEX will contain index
1788 of the best insn in READY. The following function is used only for
1789 first cycle multipass scheduling. */
1791 max_issue (struct ready_list *ready, int *index)
1793 int n, i, all, n_ready, best, delay, tries_num;
1794 struct choice_entry *top;
1798 memcpy (choice_stack->state, curr_state, dfa_state_size);
1800 top->rest = cached_first_cycle_multipass_dfa_lookahead;
1802 n_ready = ready->n_ready;
1803 for (all = i = 0; i < n_ready; i++)
1810 if (top->rest == 0 || i >= n_ready)
1812 if (top == choice_stack)
1814 if (best < top - choice_stack && ready_try [0])
1816 best = top - choice_stack;
1817 *index = choice_stack [1].index;
1818 if (top->n == issue_rate - cycle_issued_insns || best == all)
1824 memcpy (curr_state, top->state, dfa_state_size);
1826 else if (!ready_try [i])
1829 if (tries_num > max_lookahead_tries)
1831 insn = ready_element (ready, i);
1832 delay = state_transition (curr_state, insn);
1835 if (state_dead_lock_p (curr_state))
1840 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
1843 top->rest = cached_first_cycle_multipass_dfa_lookahead;
1846 memcpy (top->state, curr_state, dfa_state_size);
1853 while (top != choice_stack)
1855 ready_try [top->index] = 0;
1858 memcpy (curr_state, choice_stack->state, dfa_state_size);
1862 /* The following function chooses insn from READY and modifies
1863 *N_READY and READY. The following function is used only for first
1864 cycle multipass scheduling. */
1867 choose_ready (struct ready_list *ready)
1871 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
1872 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
1873 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
1874 return ready_remove_first (ready);
1877 /* Try to choose the better insn. */
1881 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
1883 cached_first_cycle_multipass_dfa_lookahead = lookahead;
1884 max_lookahead_tries = 100;
1885 for (i = 0; i < issue_rate; i++)
1886 max_lookahead_tries *= lookahead;
1888 insn = ready_element (ready, 0);
1889 if (INSN_CODE (insn) < 0)
1890 return ready_remove_first (ready);
1891 for (i = 1; i < ready->n_ready; i++)
1893 insn = ready_element (ready, i);
1895 = (INSN_CODE (insn) < 0
1896 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
1897 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn)));
1899 if (max_issue (ready, &index) == 0)
1900 return ready_remove_first (ready);
1902 return ready_remove (ready, index);
1906 /* Use forward list scheduling to rearrange insns of block B in region RGN,
1907 possibly bringing insns from subsequent blocks in the same region. */
1910 schedule_block (int b, int rgn_n_insns)
1912 struct ready_list ready;
1913 int i, first_cycle_insn_p;
1915 state_t temp_state = NULL; /* It is used for multipass scheduling. */
1916 int sort_p, advance, start_clock_var;
1918 /* Head/tail info for this block. */
1919 rtx prev_head = current_sched_info->prev_head;
1920 rtx next_tail = current_sched_info->next_tail;
1921 rtx head = NEXT_INSN (prev_head);
1922 rtx tail = PREV_INSN (next_tail);
1924 /* We used to have code to avoid getting parameters moved from hard
1925 argument registers into pseudos.
1927 However, it was removed when it proved to be of marginal benefit
1928 and caused problems because schedule_block and compute_forward_dependences
1929 had different notions of what the "head" insn was. */
1931 gcc_assert (head != tail || INSN_P (head));
1936 fprintf (sched_dump,
1937 ";; ======================================================\n");
1938 fprintf (sched_dump,
1939 ";; -- basic block %d from %d to %d -- %s reload\n",
1940 b, INSN_UID (head), INSN_UID (tail),
1941 (reload_completed ? "after" : "before"));
1942 fprintf (sched_dump,
1943 ";; ======================================================\n");
1944 fprintf (sched_dump, "\n");
1947 state_reset (curr_state);
1949 /* Allocate the ready list. */
1951 ready.veclen = rgn_n_insns + 1 + issue_rate;
1952 ready.first = ready.veclen - 1;
1953 ready.vec = XNEWVEC (rtx, ready.veclen);
1956 /* It is used for first cycle multipass scheduling. */
1957 temp_state = alloca (dfa_state_size);
1958 ready_try = XCNEWVEC (char, rgn_n_insns + 1);
1959 choice_stack = XNEWVEC (struct choice_entry, rgn_n_insns + 1);
1960 for (i = 0; i <= rgn_n_insns; i++)
1961 choice_stack[i].state = xmalloc (dfa_state_size);
1963 if (targetm.sched.md_init)
1964 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
1966 /* We start inserting insns after PREV_HEAD. */
1967 last_scheduled_insn = prev_head;
1969 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
1974 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
1975 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
1977 /* Start just before the beginning of time. */
1980 /* We need queue and ready lists and clock_var be initialized
1981 in try_ready () (which is called through init_ready_list ()). */
1982 (*current_sched_info->init_ready_list) ();
1984 last_clock_var = -1;
1989 /* Loop until all the insns in BB are scheduled. */
1990 while ((*current_sched_info->schedule_more_p) ())
1994 start_clock_var = clock_var;
1998 advance_one_cycle ();
2000 /* Add to the ready list all pending insns that can be issued now.
2001 If there are no ready insns, increment clock until one
2002 is ready and add all pending insns at that point to the ready
2004 queue_to_ready (&ready);
2006 gcc_assert (ready.n_ready);
2008 if (sched_verbose >= 2)
2010 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2011 debug_ready_list (&ready);
2013 advance -= clock_var - start_clock_var;
2015 while (advance > 0);
2019 /* Sort the ready list based on priority. */
2020 ready_sort (&ready);
2022 if (sched_verbose >= 2)
2024 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2025 debug_ready_list (&ready);
2029 /* Allow the target to reorder the list, typically for
2030 better instruction bundling. */
2031 if (sort_p && targetm.sched.reorder
2032 && (ready.n_ready == 0
2033 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2035 targetm.sched.reorder (sched_dump, sched_verbose,
2036 ready_lastpos (&ready),
2037 &ready.n_ready, clock_var);
2039 can_issue_more = issue_rate;
2041 first_cycle_insn_p = 1;
2042 cycle_issued_insns = 0;
2049 if (sched_verbose >= 2)
2051 fprintf (sched_dump, ";;\tReady list (t =%3d): ",
2053 debug_ready_list (&ready);
2056 if (ready.n_ready == 0
2058 && reload_completed)
2060 /* Allow scheduling insns directly from the queue in case
2061 there's nothing better to do (ready list is empty) but
2062 there are still vacant dispatch slots in the current cycle. */
2063 if (sched_verbose >= 6)
2064 fprintf(sched_dump,";;\t\tSecond chance\n");
2065 memcpy (temp_state, curr_state, dfa_state_size);
2066 if (early_queue_to_ready (temp_state, &ready))
2067 ready_sort (&ready);
2070 if (ready.n_ready == 0 || !can_issue_more
2071 || state_dead_lock_p (curr_state)
2072 || !(*current_sched_info->schedule_more_p) ())
2075 /* Select and remove the insn from the ready list. */
2077 insn = choose_ready (&ready);
2079 insn = ready_remove_first (&ready);
2081 if (targetm.sched.dfa_new_cycle
2082 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2083 insn, last_clock_var,
2084 clock_var, &sort_p))
2085 /* SORT_P is used by the target to override sorting
2086 of the ready list. This is needed when the target
2087 has modified its internal structures expecting that
2088 the insn will be issued next. As we need the insn
2089 to have the highest priority (so it will be returned by
2090 the ready_remove_first call above), we invoke
2091 ready_add (&ready, insn, true).
2092 But, still, there is one issue: INSN can be later
2093 discarded by scheduler's front end through
2094 current_sched_info->can_schedule_ready_p, hence, won't
2097 ready_add (&ready, insn, true);
2102 memcpy (temp_state, curr_state, dfa_state_size);
2103 if (recog_memoized (insn) < 0)
2105 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2106 || asm_noperands (PATTERN (insn)) >= 0);
2107 if (!first_cycle_insn_p && asm_p)
2108 /* This is asm insn which is tryed to be issued on the
2109 cycle not first. Issue it on the next cycle. */
2112 /* A USE insn, or something else we don't need to
2113 understand. We can't pass these directly to
2114 state_transition because it will trigger a
2115 fatal error for unrecognizable insns. */
2120 cost = state_transition (temp_state, insn);
2129 queue_insn (insn, cost);
2130 if (SCHED_GROUP_P (insn))
2139 if (! (*current_sched_info->can_schedule_ready_p) (insn))
2142 last_scheduled_insn = move_insn (insn, last_scheduled_insn);
2144 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2146 cycle_issued_insns++;
2147 memcpy (curr_state, temp_state, dfa_state_size);
2150 if (targetm.sched.variable_issue)
2152 targetm.sched.variable_issue (sched_dump, sched_verbose,
2153 insn, can_issue_more);
2154 /* A naked CLOBBER or USE generates no instruction, so do
2155 not count them against the issue rate. */
2156 else if (GET_CODE (PATTERN (insn)) != USE
2157 && GET_CODE (PATTERN (insn)) != CLOBBER)
2160 advance = schedule_insn (insn);
2162 /* After issuing an asm insn we should start a new cycle. */
2163 if (advance == 0 && asm_p)
2169 first_cycle_insn_p = 0;
2171 /* Sort the ready list based on priority. This must be
2172 redone here, as schedule_insn may have readied additional
2173 insns that will not be sorted correctly. */
2174 if (ready.n_ready > 0)
2175 ready_sort (&ready);
2177 if (targetm.sched.reorder2
2178 && (ready.n_ready == 0
2179 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2182 targetm.sched.reorder2 (sched_dump, sched_verbose,
2184 ? ready_lastpos (&ready) : NULL,
2185 &ready.n_ready, clock_var);
2193 fprintf (sched_dump, ";;\tReady list (final): ");
2194 debug_ready_list (&ready);
2197 if (current_sched_info->queue_must_finish_empty)
2198 /* Sanity check -- queue must be empty now. Meaningless if region has
2200 gcc_assert (!q_size && !ready.n_ready);
2203 /* We must maintain QUEUE_INDEX between blocks in region. */
2204 for (i = ready.n_ready - 1; i >= 0; i--)
2205 QUEUE_INDEX (ready_element (&ready, i)) = QUEUE_NOWHERE;
2208 for (i = 0; i <= max_insn_queue_index; i++)
2211 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2212 QUEUE_INDEX (XEXP (link, 0)) = QUEUE_NOWHERE;
2213 free_INSN_LIST_list (&insn_queue[i]);
2216 /* INSN_TICK (minimum clock tick at which the insn becomes
2217 ready) may be not correct for the insn in the subsequent
2218 blocks of the region. We should use a correct value of
2219 `clock_var' or modify INSN_TICK. It is better to keep
2220 clock_var value equal to 0 at the start of a basic block.
2221 Therefore we modify INSN_TICK here. */
2222 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2225 if (targetm.sched.md_finish)
2226 targetm.sched.md_finish (sched_dump, sched_verbose);
2228 /* Update head/tail boundaries. */
2229 head = NEXT_INSN (prev_head);
2230 tail = last_scheduled_insn;
2232 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2233 previously found among the insns. Insert them at the beginning
2237 rtx note_head = note_list;
2239 while (PREV_INSN (note_head))
2241 note_head = PREV_INSN (note_head);
2244 PREV_INSN (note_head) = PREV_INSN (head);
2245 NEXT_INSN (PREV_INSN (head)) = note_head;
2246 PREV_INSN (head) = note_list;
2247 NEXT_INSN (note_list) = head;
2254 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2255 clock_var, INSN_UID (head));
2256 fprintf (sched_dump, ";; new tail = %d\n\n",
2260 current_sched_info->head = head;
2261 current_sched_info->tail = tail;
2266 for (i = 0; i <= rgn_n_insns; i++)
2267 free (choice_stack [i].state);
2268 free (choice_stack);
2271 /* Set_priorities: compute priority of each insn in the block. */
2274 set_priorities (rtx head, rtx tail)
2278 int sched_max_insns_priority =
2279 current_sched_info->sched_max_insns_priority;
2282 if (head == tail && (! INSN_P (head)))
2287 prev_head = PREV_INSN (head);
2288 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2294 (void) priority (insn);
2296 if (INSN_PRIORITY_KNOWN (insn))
2297 sched_max_insns_priority =
2298 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2301 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2306 /* Initialize some global state for the scheduler. */
2316 /* Disable speculative loads in their presence if cc0 defined. */
2318 flag_schedule_speculative_load = 0;
2321 /* Set dump and sched_verbose for the desired debugging output. If no
2322 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2323 For -fsched-verbose=N, N>=10, print everything to stderr. */
2324 sched_verbose = sched_verbose_param;
2325 if (sched_verbose_param == 0 && dump_file)
2327 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2328 ? stderr : dump_file);
2330 /* Initialize issue_rate. */
2331 if (targetm.sched.issue_rate)
2332 issue_rate = targetm.sched.issue_rate ();
2336 if (cached_issue_rate != issue_rate)
2338 cached_issue_rate = issue_rate;
2339 /* To invalidate max_lookahead_tries: */
2340 cached_first_cycle_multipass_dfa_lookahead = 0;
2343 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
2344 pseudos which do not cross calls. */
2345 old_max_uid = get_max_uid () + 1;
2347 h_i_d = XCNEWVEC (struct haifa_insn_data, old_max_uid);
2349 for (i = 0; i < old_max_uid; i++)
2352 h_i_d[i].queue_index = QUEUE_NOWHERE;
2353 h_i_d[i].tick = INVALID_TICK;
2354 h_i_d[i].inter_tick = INVALID_TICK;
2357 if (targetm.sched.init_dfa_pre_cycle_insn)
2358 targetm.sched.init_dfa_pre_cycle_insn ();
2360 if (targetm.sched.init_dfa_post_cycle_insn)
2361 targetm.sched.init_dfa_post_cycle_insn ();
2364 dfa_state_size = state_size ();
2365 curr_state = xmalloc (dfa_state_size);
2370 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2372 INSN_LUID (insn) = luid;
2374 /* Increment the next luid, unless this is a note. We don't
2375 really need separate IDs for notes and we don't want to
2376 schedule differently depending on whether or not there are
2377 line-number notes, i.e., depending on whether or not we're
2378 generating debugging information. */
2382 if (insn == BB_END (b))
2386 init_dependency_caches (luid);
2388 init_alias_analysis ();
2390 if (write_symbols != NO_DEBUG)
2394 line_note_head = XCNEWVEC (rtx, last_basic_block);
2396 /* Save-line-note-head:
2397 Determine the line-number at the start of each basic block.
2398 This must be computed and saved now, because after a basic block's
2399 predecessor has been scheduled, it is impossible to accurately
2400 determine the correct line number for the first insn of the block. */
2404 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
2405 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
2407 line_note_head[b->index] = line;
2410 /* Do a forward search as well, since we won't get to see the first
2411 notes in a basic block. */
2412 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
2416 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
2417 line_note_head[b->index] = line;
2422 /* The following is done to keep current_sched_info->next_tail non null. */
2424 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
2425 if (NEXT_INSN (insn) == 0
2428 /* Don't emit a NOTE if it would end up before a BARRIER. */
2429 && !BARRIER_P (NEXT_INSN (insn))))
2431 emit_note_after (NOTE_INSN_DELETED, insn);
2432 /* Make insn to appear outside BB. */
2433 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
2436 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2437 removing death notes. */
2438 FOR_EACH_BB_REVERSE (b)
2439 find_insn_reg_weight (b->index);
2441 if (targetm.sched.md_init_global)
2442 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2445 /* Free global data used during insn scheduling. */
2453 free_dependency_caches ();
2454 end_alias_analysis ();
2455 if (write_symbols != NO_DEBUG)
2456 free (line_note_head);
2458 if (targetm.sched.md_finish_global)
2459 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2461 current_sched_info = NULL;
2464 /* Fix INSN_TICKs of the instructions in the current block as well as
2465 INSN_TICKs of their dependants.
2466 HEAD and TAIL are the begin and the end of the current scheduled block. */
2468 fix_inter_tick (rtx head, rtx tail)
2470 /* Set of instructions with corrected INSN_TICK. */
2471 bitmap_head processed;
2472 int next_clock = clock_var + 1;
2474 bitmap_initialize (&processed, 0);
2476 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2477 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2478 across different blocks. */
2479 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2486 tick = INSN_TICK (head);
2487 gcc_assert (tick >= MIN_TICK);
2489 /* Fix INSN_TICK of instruction from just scheduled block. */
2490 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2492 bitmap_set_bit (&processed, INSN_LUID (head));
2495 if (tick < MIN_TICK)
2498 INSN_TICK (head) = tick;
2501 for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2505 next = XEXP (link, 0);
2506 tick = INSN_TICK (next);
2508 if (tick != INVALID_TICK
2509 /* If NEXT has its INSN_TICK calculated, fix it.
2510 If not - it will be properly calculated from
2511 scratch later in fix_tick_ready. */
2512 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2514 bitmap_set_bit (&processed, INSN_LUID (next));
2517 if (tick < MIN_TICK)
2520 if (tick > INTER_TICK (next))
2521 INTER_TICK (next) = tick;
2523 tick = INTER_TICK (next);
2525 INSN_TICK (next) = tick;
2530 bitmap_clear (&processed);
2533 /* Check if NEXT is ready to be added to the ready or queue list.
2534 If "yes", add it to the proper list.
2536 -1 - is not ready yet,
2537 0 - added to the ready list,
2538 0 < N - queued for N cycles. */
2540 try_ready (rtx next)
2542 if (LOG_LINKS (next)
2543 || (current_sched_info->new_ready
2544 && !current_sched_info->new_ready (next)))
2546 gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);
2550 if (sched_verbose >= 2)
2551 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s\n",
2552 (*current_sched_info->print_insn) (next, 0));
2554 adjust_priority (next);
2556 return fix_tick_ready (next);
2559 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
2561 fix_tick_ready (rtx next)
2566 link = RESOLVED_DEPS (next);
2572 tick = INSN_TICK (next);
2573 /* if tick is note equals to INVALID_TICK, then update
2574 INSN_TICK of NEXT with the most recent resolved dependence
2575 cost. Overwise, recalculate from scratch. */
2576 full_p = tick == INVALID_TICK;
2582 pro = XEXP (link, 0);
2583 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
2584 /* We should specify FORWARD link to insn_cost,
2585 but are giving a BACKWARD one.
2586 This is ok, because only REG_NOTE_KIND of link is used.
2587 May be substitute LINK with REG_NOTE_KIND? */
2588 tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
2592 while ((link = XEXP (link, 1)) && full_p);
2597 INSN_TICK (next) = tick;
2599 delay = tick - clock_var;
2601 delay = QUEUE_READY;
2603 change_queue_index (next, delay);
2608 /* Move NEXT to the proper queue list with (DELAY >= 1),
2609 or add it to the ready list (DELAY == QUEUE_READY),
2610 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
2612 change_queue_index (rtx next, int delay)
2614 int i = QUEUE_INDEX (next);
2616 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
2618 gcc_assert (i != QUEUE_SCHEDULED);
2620 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
2621 || (delay < 0 && delay == i))
2622 /* We have nothing to do. */
2625 /* Remove NEXT from whereever it is now. */
2626 if (i == QUEUE_READY)
2627 ready_remove_insn (next);
2629 queue_remove (next);
2631 /* Add it to the proper place. */
2632 if (delay == QUEUE_READY)
2633 ready_add (readyp, next, false);
2634 else if (delay >= 1)
2635 queue_insn (next, delay);
2637 if (sched_verbose >= 2)
2639 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
2640 (*current_sched_info->print_insn) (next, 0));
2642 if (delay == QUEUE_READY)
2643 fprintf (sched_dump, " into ready\n");
2644 else if (delay >= 1)
2645 fprintf (sched_dump, " into queue with cost=%d\n", delay);
2647 fprintf (sched_dump, " removed from ready or queue lists\n");
2651 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
2653 resolve_dep (rtx next, rtx insn)
2657 INSN_DEP_COUNT (next)--;
2659 dep = remove_list_elem (insn, &LOG_LINKS (next));
2660 XEXP (dep, 1) = RESOLVED_DEPS (next);
2661 RESOLVED_DEPS (next) = dep;
2663 gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
2664 && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
2667 #endif /* INSN_SCHEDULING */