1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* Instruction scheduling pass. This file, along with sched-deps.c,
24 contains the generic parts. The actual entry point is found for
25 the normal instruction scheduling pass is found in sched-rgn.c.
27 We compute insn priorities based on data dependencies. Flow
28 analysis only creates a fraction of the data-dependencies we must
29 observe: namely, only those dependencies which the combiner can be
30 expected to use. For this pass, we must therefore create the
31 remaining dependencies we need to observe: register dependencies,
32 memory dependencies, dependencies to keep function calls in order,
33 and the dependence between a conditional branch and the setting of
34 condition codes are all dealt with here.
36 The scheduler first traverses the data flow graph, starting with
37 the last instruction, and proceeding to the first, assigning values
38 to insn_priority as it goes. This sorts the instructions
39 topologically by data dependence.
41 Once priorities have been established, we order the insns using
42 list scheduling. This works as follows: starting with a list of
43 all the ready insns, and sorted according to priority number, we
44 schedule the insn from the end of the list by placing its
45 predecessors in the list according to their priority order. We
46 consider this insn scheduled by setting the pointer to the "end" of
47 the list to point to the previous insn. When an insn has no
48 predecessors, we either queue it until sufficient time has elapsed
49 or add it to the ready list. As the instructions are scheduled or
50 when stalls are introduced, the queue advances and dumps insns into
51 the ready list. When all insns down to the lowest priority have
52 been scheduled, the critical path of the basic block has been made
53 as short as possible. The remaining insns are then scheduled in
56 The following list shows the order in which we want to break ties
57 among insns in the ready list:
59 1. choose insn with the longest path to end of bb, ties
61 2. choose insn with least contribution to register pressure,
63 3. prefer in-block upon interblock motion, ties broken by
64 4. prefer useful upon speculative motion, ties broken by
65 5. choose insn with largest control flow probability, ties
67 6. choose insn with the least dependences upon the previously
68 scheduled insn, or finally
69 7 choose the insn which has the most insns dependent on it.
70 8. choose insn with lowest UID.
72 Memory references complicate matters. Only if we can be certain
73 that memory references are not part of the data dependency graph
74 (via true, anti, or output dependence), can we move operations past
75 memory references. To first approximation, reads can be done
76 independently, while writes introduce dependencies. Better
77 approximations will yield fewer dependencies.
79 Before reload, an extended analysis of interblock data dependences
80 is required for interblock scheduling. This is performed in
81 compute_block_backward_dependences ().
83 Dependencies set up by memory references are treated in exactly the
84 same way as other dependencies, by using insn backward dependences
85 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
86 INSN_FORW_DEPS the purpose of forward list scheduling.
88 Having optimized the critical path, we may have also unduly
89 extended the lifetimes of some registers. If an operation requires
90 that constants be loaded into registers, it is certainly desirable
91 to load those constants as early as necessary, but no earlier.
92 I.e., it will not do to load up a bunch of registers at the
93 beginning of a basic block only to use them at the end, if they
94 could be loaded later, since this may result in excessive register
97 Note that since branches are never in basic blocks, but only end
98 basic blocks, this pass will not move branches. But that is ok,
99 since we can use GNU's delayed branch scheduling pass to take care
102 Also note that no further optimizations based on algebraic
103 identities are performed, so this pass would be a good one to
104 perform instruction splitting, such as breaking up a multiply
105 instruction into shifts and adds where that is profitable.
107 Given the memory aliasing analysis that this pass should perform,
108 it should be possible to remove redundant stores to memory, and to
109 load values from registers instead of hitting memory.
111 Before reload, speculative insns are moved only if a 'proof' exists
112 that no exception will be caused by this, and if no live registers
113 exist that inhibit the motion (live registers constraints are not
114 represented by data dependence edges).
116 This pass must update information that subsequent passes expect to
117 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
118 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120 The information in the line number notes is carefully retained by
121 this pass. Notes that refer to the starting and ending of
122 exception regions are also carefully retained by this pass. All
123 other NOTE insns are grouped in their same relative order at the
124 beginning of basic blocks and regions that have been scheduled. */
128 #include "coretypes.h"
133 #include "hard-reg-set.h"
135 #include "function.h"
137 #include "insn-config.h"
138 #include "insn-attr.h"
142 #include "sched-int.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param = 0;
166 int sched_verbose = 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump = 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
179 fix_sched_param (const char *param, const char *val)
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
184 warning (0, "fix_sched_param: unknown param: %s", param);
187 struct haifa_insn_data *h_i_d;
189 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
190 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
192 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
193 then it should be recalculated from scratch. */
194 #define INVALID_TICK (-(max_insn_queue_index + 1))
195 /* The minimal value of the INSN_TICK of an instruction. */
196 #define MIN_TICK (-max_insn_queue_index)
198 /* Issue points are used to distinguish between instructions in max_issue ().
199 For now, all instructions are equally good. */
200 #define ISSUE_POINTS(INSN) 1
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204 static rtx note_list;
206 static struct spec_info_def spec_info_var;
207 /* Description of the speculative part of the scheduling.
208 If NULL - no speculation. */
209 static spec_info_t spec_info;
211 /* True, if recovery block was added during scheduling of current block.
212 Used to determine, if we need to fix INSN_TICKs. */
213 static bool added_recovery_block_p;
215 /* Counters of different types of speculative instructions. */
216 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
218 /* Array used in {unlink, restore}_bb_notes. */
219 static rtx *bb_header = 0;
221 /* Number of basic_blocks. */
222 static int old_last_basic_block;
224 /* Basic block after which recovery blocks will be created. */
225 static basic_block before_recovery;
229 /* An instruction is ready to be scheduled when all insns preceding it
230 have already been scheduled. It is important to ensure that all
231 insns which use its result will not be executed until its result
232 has been computed. An insn is maintained in one of four structures:
234 (P) the "Pending" set of insns which cannot be scheduled until
235 their dependencies have been satisfied.
236 (Q) the "Queued" set of insns that can be scheduled when sufficient
238 (R) the "Ready" list of unscheduled, uncommitted insns.
239 (S) the "Scheduled" list of insns.
241 Initially, all insns are either "Pending" or "Ready" depending on
242 whether their dependencies are satisfied.
244 Insns move from the "Ready" list to the "Scheduled" list as they
245 are committed to the schedule. As this occurs, the insns in the
246 "Pending" list have their dependencies satisfied and move to either
247 the "Ready" list or the "Queued" set depending on whether
248 sufficient time has passed to make them ready. As time passes,
249 insns move from the "Queued" set to the "Ready" list.
251 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
252 unscheduled insns, i.e., those that are ready, queued, and pending.
253 The "Queued" set (Q) is implemented by the variable `insn_queue'.
254 The "Ready" list (R) is implemented by the variables `ready' and
256 The "Scheduled" list (S) is the new insn chain built by this pass.
258 The transition (R->S) is implemented in the scheduling loop in
259 `schedule_block' when the best insn to schedule is chosen.
260 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
261 insns move from the ready list to the scheduled list.
262 The transition (Q->R) is implemented in 'queue_to_insn' as time
263 passes or stalls are introduced. */
265 /* Implement a circular buffer to delay instructions until sufficient
266 time has passed. For the new pipeline description interface,
267 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
268 than maximal time of instruction execution computed by genattr.c on
269 the base maximal time of functional unit reservations and getting a
270 result. This is the longest time an insn may be queued. */
272 static rtx *insn_queue;
273 static int q_ptr = 0;
274 static int q_size = 0;
275 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
276 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
278 #define QUEUE_SCHEDULED (-3)
279 #define QUEUE_NOWHERE (-2)
280 #define QUEUE_READY (-1)
281 /* QUEUE_SCHEDULED - INSN is scheduled.
282 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
284 QUEUE_READY - INSN is in ready list.
285 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
287 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
289 /* The following variable value refers for all current and future
290 reservations of the processor units. */
293 /* The following variable value is size of memory representing all
294 current and future reservations of the processor units. */
295 static size_t dfa_state_size;
297 /* The following array is used to find the best insn from ready when
298 the automaton pipeline interface is used. */
299 static char *ready_try;
301 /* Describe the ready list of the scheduler.
302 VEC holds space enough for all insns in the current region. VECLEN
303 says how many exactly.
304 FIRST is the index of the element with the highest priority; i.e. the
305 last one in the ready list, since elements are ordered by ascending
307 N_READY determines how many insns are on the ready list. */
317 /* The pointer to the ready list. */
318 static struct ready_list *readyp;
320 /* Scheduling clock. */
321 static int clock_var;
323 /* Number of instructions in current scheduling region. */
324 static int rgn_n_insns;
326 static int may_trap_exp (rtx, int);
328 /* Nonzero iff the address is comprised from at most 1 register. */
329 #define CONST_BASED_ADDRESS_P(x) \
331 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
332 || (GET_CODE (x) == LO_SUM)) \
333 && (CONSTANT_P (XEXP (x, 0)) \
334 || CONSTANT_P (XEXP (x, 1)))))
336 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
337 as found by analyzing insn's expression. */
340 may_trap_exp (rtx x, int is_store)
349 if (code == MEM && may_trap_p (x))
356 /* The insn uses memory: a volatile load. */
357 if (MEM_VOLATILE_P (x))
359 /* An exception-free load. */
362 /* A load with 1 base register, to be further checked. */
363 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
364 return PFREE_CANDIDATE;
365 /* No info on the load, to be further checked. */
366 return PRISKY_CANDIDATE;
371 int i, insn_class = TRAP_FREE;
373 /* Neither store nor load, check if it may cause a trap. */
376 /* Recursive step: walk the insn... */
377 fmt = GET_RTX_FORMAT (code);
378 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
382 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
383 insn_class = WORST_CLASS (insn_class, tmp_class);
385 else if (fmt[i] == 'E')
388 for (j = 0; j < XVECLEN (x, i); j++)
390 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
391 insn_class = WORST_CLASS (insn_class, tmp_class);
392 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
396 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
403 /* Classifies insn for the purpose of verifying that it can be
404 moved speculatively, by examining it's patterns, returning:
405 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
406 TRAP_FREE: non-load insn.
407 IFREE: load from a globally safe location.
408 IRISKY: volatile load.
409 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
410 being either PFREE or PRISKY. */
413 haifa_classify_insn (rtx insn)
415 rtx pat = PATTERN (insn);
416 int tmp_class = TRAP_FREE;
417 int insn_class = TRAP_FREE;
420 if (GET_CODE (pat) == PARALLEL)
422 int i, len = XVECLEN (pat, 0);
424 for (i = len - 1; i >= 0; i--)
426 code = GET_CODE (XVECEXP (pat, 0, i));
430 /* Test if it is a 'store'. */
431 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
434 /* Test if it is a store. */
435 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
436 if (tmp_class == TRAP_RISKY)
438 /* Test if it is a load. */
440 = WORST_CLASS (tmp_class,
441 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
446 tmp_class = TRAP_RISKY;
451 insn_class = WORST_CLASS (insn_class, tmp_class);
452 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
458 code = GET_CODE (pat);
462 /* Test if it is a 'store'. */
463 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
466 /* Test if it is a store. */
467 tmp_class = may_trap_exp (SET_DEST (pat), 1);
468 if (tmp_class == TRAP_RISKY)
470 /* Test if it is a load. */
472 WORST_CLASS (tmp_class,
473 may_trap_exp (SET_SRC (pat), 0));
477 tmp_class = TRAP_RISKY;
481 insn_class = tmp_class;
487 /* A typedef for rtx vector. */
488 typedef VEC(rtx, heap) *rtx_vec_t;
490 /* Forward declarations. */
492 static int priority (rtx);
493 static int rank_for_schedule (const void *, const void *);
494 static void swap_sort (rtx *, int);
495 static void queue_insn (rtx, int);
496 static int schedule_insn (rtx);
497 static int find_set_reg_weight (rtx);
498 static void find_insn_reg_weight (basic_block);
499 static void find_insn_reg_weight1 (rtx);
500 static void adjust_priority (rtx);
501 static void advance_one_cycle (void);
503 /* Notes handling mechanism:
504 =========================
505 Generally, NOTES are saved before scheduling and restored after scheduling.
506 The scheduler distinguishes between two types of notes:
508 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
509 Before scheduling a region, a pointer to the note is added to the insn
510 that follows or precedes it. (This happens as part of the data dependence
511 computation). After scheduling an insn, the pointer contained in it is
512 used for regenerating the corresponding note (in reemit_notes).
514 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
515 these notes are put in a list (in rm_other_notes() and
516 unlink_other_notes ()). After scheduling the block, these notes are
517 inserted at the beginning of the block (in schedule_block()). */
519 static rtx unlink_other_notes (rtx, rtx);
520 static void reemit_notes (rtx);
522 static rtx *ready_lastpos (struct ready_list *);
523 static void ready_add (struct ready_list *, rtx, bool);
524 static void ready_sort (struct ready_list *);
525 static rtx ready_remove_first (struct ready_list *);
527 static void queue_to_ready (struct ready_list *);
528 static int early_queue_to_ready (state_t, struct ready_list *);
530 static void debug_ready_list (struct ready_list *);
532 static void move_insn (rtx);
534 /* The following functions are used to implement multi-pass scheduling
535 on the first cycle. */
536 static rtx ready_element (struct ready_list *, int);
537 static rtx ready_remove (struct ready_list *, int);
538 static void ready_remove_insn (rtx);
539 static int max_issue (struct ready_list *, int *, int);
541 static int choose_ready (struct ready_list *, rtx *);
543 static void fix_inter_tick (rtx, rtx);
544 static int fix_tick_ready (rtx);
545 static void change_queue_index (rtx, int);
547 /* The following functions are used to implement scheduling of data/control
548 speculative instructions. */
550 static void extend_h_i_d (void);
551 static void extend_ready (int);
552 static void extend_global (rtx);
553 static void extend_all (rtx);
554 static void init_h_i_d (rtx);
555 static void generate_recovery_code (rtx);
556 static void process_insn_forw_deps_be_in_spec (deps_list_t, rtx, ds_t);
557 static void begin_speculative_block (rtx);
558 static void add_to_speculative_block (rtx);
559 static dw_t dep_weak (ds_t);
560 static edge find_fallthru_edge (basic_block);
561 static void init_before_recovery (void);
562 static basic_block create_recovery_block (void);
563 static void create_check_block_twin (rtx, bool);
564 static void fix_recovery_deps (basic_block);
565 static void change_pattern (rtx, rtx);
566 static int speculate_insn (rtx, ds_t, rtx *);
567 static void dump_new_block_header (int, basic_block, rtx, rtx);
568 static void restore_bb_notes (basic_block);
569 static void extend_bb (void);
570 static void fix_jump_move (rtx);
571 static void move_block_after_check (rtx);
572 static void move_succs (VEC(edge,gc) **, basic_block);
573 static void sched_remove_insn (rtx);
574 static void clear_priorities (rtx, rtx_vec_t *);
575 static void calc_priorities (rtx_vec_t);
576 static void add_jump_dependencies (rtx, rtx);
577 #ifdef ENABLE_CHECKING
578 static int has_edge_p (VEC(edge,gc) *, int);
579 static void check_cfg (rtx, rtx);
580 static void check_sched_flags (void);
583 #endif /* INSN_SCHEDULING */
585 /* Point to state used for the current scheduling pass. */
586 struct sched_info *current_sched_info;
588 #ifndef INSN_SCHEDULING
590 schedule_insns (void)
595 /* Working copy of frontend's sched_info variable. */
596 static struct sched_info current_sched_info_var;
598 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
599 so that insns independent of the last scheduled insn will be preferred
600 over dependent instructions. */
602 static rtx last_scheduled_insn;
604 /* Cached cost of the instruction. Use below function to get cost of the
605 insn. -1 here means that the field is not initialized. */
606 #define INSN_COST(INSN) (h_i_d[INSN_UID (INSN)].cost)
608 /* Compute cost of executing INSN.
609 This is the number of cycles between instruction issue and
610 instruction results. */
614 int cost = INSN_COST (insn);
618 /* A USE insn, or something else we don't need to
619 understand. We can't pass these directly to
620 result_ready_cost or insn_default_latency because it will
621 trigger a fatal error for unrecognizable insns. */
622 if (recog_memoized (insn) < 0)
624 INSN_COST (insn) = 0;
629 cost = insn_default_latency (insn);
633 INSN_COST (insn) = cost;
640 /* Compute cost of dependence LINK.
641 This is the number of cycles between instruction issue and
642 instruction results. */
644 dep_cost (dep_t link)
646 rtx used = DEP_CON (link);
649 /* A USE insn should never require the value used to be computed.
650 This allows the computation of a function's result and parameter
651 values to overlap the return and call. */
652 if (recog_memoized (used) < 0)
656 rtx insn = DEP_PRO (link);
657 enum reg_note dep_type = DEP_KIND (link);
659 cost = insn_cost (insn);
661 if (INSN_CODE (insn) >= 0)
663 if (dep_type == REG_DEP_ANTI)
665 else if (dep_type == REG_DEP_OUTPUT)
667 cost = (insn_default_latency (insn)
668 - insn_default_latency (used));
672 else if (bypass_p (insn))
673 cost = insn_latency (insn, used);
676 if (targetm.sched.adjust_cost != NULL)
678 /* This variable is used for backward compatibility with the
680 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
682 /* Make it self-cycled, so that if some tries to walk over this
683 incomplete list he/she will be caught in an endless loop. */
684 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
686 /* Targets use only REG_NOTE_KIND of the link. */
687 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_KIND (link));
689 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
692 free_INSN_LIST_node (dep_cost_rtx_link);
702 /* Return 'true' if DEP should be included in priority calculations. */
704 contributes_to_priority_p (dep_t dep)
706 /* Critical path is meaningful in block boundaries only. */
707 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
711 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
712 then speculative instructions will less likely be
713 scheduled. That is because the priority of
714 their producers will increase, and, thus, the
715 producers will more likely be scheduled, thus,
716 resolving the dependence. */
717 if ((current_sched_info->flags & DO_SPECULATION)
718 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
719 && (DEP_STATUS (dep) & SPECULATIVE))
725 /* Compute the priority number for INSN. */
734 /* We should not be interested in priority of an already scheduled insn. */
735 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
737 if (!INSN_PRIORITY_KNOWN (insn))
739 int this_priority = 0;
741 if (deps_list_empty_p (INSN_FORW_DEPS (insn)))
742 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
743 some forward deps but all of them are ignored by
744 contributes_to_priority hook. At the moment we set priority of
746 this_priority = insn_cost (insn);
749 rtx prev_first, twin;
752 /* For recovery check instructions we calculate priority slightly
753 different than that of normal instructions. Instead of walking
754 through INSN_FORW_DEPS (check) list, we walk through
755 INSN_FORW_DEPS list of each instruction in the corresponding
758 rec = RECOVERY_BLOCK (insn);
759 if (!rec || rec == EXIT_BLOCK_PTR)
761 prev_first = PREV_INSN (insn);
766 prev_first = NEXT_INSN (BB_HEAD (rec));
767 twin = PREV_INSN (BB_END (rec));
772 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (twin))
776 dep_t dep = DEP_LINK_DEP (link);
778 next = DEP_CON (dep);
780 if (BLOCK_FOR_INSN (next) != rec)
784 if (!contributes_to_priority_p (dep))
788 cost = dep_cost (dep);
791 struct _dep _dep1, *dep1 = &_dep1;
793 init_dep (dep1, insn, next, REG_DEP_ANTI);
795 cost = dep_cost (dep1);
798 next_priority = cost + priority (next);
800 if (next_priority > this_priority)
801 this_priority = next_priority;
805 twin = PREV_INSN (twin);
807 while (twin != prev_first);
809 INSN_PRIORITY (insn) = this_priority;
810 INSN_PRIORITY_STATUS (insn) = 1;
813 return INSN_PRIORITY (insn);
816 /* Macros and functions for keeping the priority queue sorted, and
817 dealing with queuing and dequeuing of instructions. */
819 #define SCHED_SORT(READY, N_READY) \
820 do { if ((N_READY) == 2) \
821 swap_sort (READY, N_READY); \
822 else if ((N_READY) > 2) \
823 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
826 /* Returns a positive value if x is preferred; returns a negative value if
827 y is preferred. Should never return 0, since that will make the sort
831 rank_for_schedule (const void *x, const void *y)
833 rtx tmp = *(const rtx *) y;
834 rtx tmp2 = *(const rtx *) x;
835 dep_link_t link1, link2;
836 int tmp_class, tmp2_class;
837 int val, priority_val, weight_val, info_val;
839 /* The insn in a schedule group should be issued the first. */
840 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
841 return SCHED_GROUP_P (tmp2) ? 1 : -1;
843 /* Make sure that priority of TMP and TMP2 are initialized. */
844 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
846 /* Prefer insn with higher priority. */
847 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
852 /* Prefer speculative insn with greater dependencies weakness. */
859 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
861 dw1 = dep_weak (ds1);
865 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
867 dw2 = dep_weak (ds2);
872 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
876 /* Prefer an insn with smaller contribution to registers-pressure. */
877 if (!reload_completed &&
878 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
881 info_val = (*current_sched_info->rank) (tmp, tmp2);
885 /* Compare insns based on their relation to the last-scheduled-insn. */
886 if (INSN_P (last_scheduled_insn))
888 /* Classify the instructions into three classes:
889 1) Data dependent on last schedule insn.
890 2) Anti/Output dependent on last scheduled insn.
891 3) Independent of last scheduled insn, or has latency of one.
892 Choose the insn from the highest numbered class if different. */
894 = find_link_by_con_in_deps_list (INSN_FORW_DEPS (last_scheduled_insn),
897 if (link1 == NULL || dep_cost (DEP_LINK_DEP (link1)) == 1)
899 else if (/* Data dependence. */
900 DEP_LINK_KIND (link1) == REG_DEP_TRUE)
906 = find_link_by_con_in_deps_list (INSN_FORW_DEPS (last_scheduled_insn),
909 if (link2 == NULL || dep_cost (DEP_LINK_DEP (link2)) == 1)
911 else if (/* Data dependence. */
912 DEP_LINK_KIND (link2) == REG_DEP_TRUE)
917 if ((val = tmp2_class - tmp_class))
921 /* Prefer the insn which has more later insns that depend on it.
922 This gives the scheduler more freedom when scheduling later
923 instructions at the expense of added register pressure. */
925 link1 = DEPS_LIST_FIRST (INSN_FORW_DEPS (tmp));
926 link2 = DEPS_LIST_FIRST (INSN_FORW_DEPS (tmp2));
928 while (link1 != NULL && link2 != NULL)
930 link1 = DEP_LINK_NEXT (link1);
931 link2 = DEP_LINK_NEXT (link2);
934 if (link1 != NULL && link2 == NULL)
935 /* TMP (Y) has more insns that depend on it. */
937 if (link1 == NULL && link2 != NULL)
938 /* TMP2 (X) has more insns that depend on it. */
941 /* If insns are equally good, sort by INSN_LUID (original insn order),
942 so that we make the sort stable. This minimizes instruction movement,
943 thus minimizing sched's effect on debugging and cross-jumping. */
944 return INSN_LUID (tmp) - INSN_LUID (tmp2);
947 /* Resort the array A in which only element at index N may be out of order. */
949 HAIFA_INLINE static void
950 swap_sort (rtx *a, int n)
955 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
963 /* Add INSN to the insn queue so that it can be executed at least
964 N_CYCLES after the currently executing insn. Preserve insns
965 chain for debugging purposes. */
967 HAIFA_INLINE static void
968 queue_insn (rtx insn, int n_cycles)
970 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
971 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
973 gcc_assert (n_cycles <= max_insn_queue_index);
975 insn_queue[next_q] = link;
978 if (sched_verbose >= 2)
980 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
981 (*current_sched_info->print_insn) (insn, 0));
983 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
986 QUEUE_INDEX (insn) = next_q;
989 /* Remove INSN from queue. */
991 queue_remove (rtx insn)
993 gcc_assert (QUEUE_INDEX (insn) >= 0);
994 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
996 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
999 /* Return a pointer to the bottom of the ready list, i.e. the insn
1000 with the lowest priority. */
1002 HAIFA_INLINE static rtx *
1003 ready_lastpos (struct ready_list *ready)
1005 gcc_assert (ready->n_ready >= 1);
1006 return ready->vec + ready->first - ready->n_ready + 1;
1009 /* Add an element INSN to the ready list so that it ends up with the
1010 lowest/highest priority depending on FIRST_P. */
1012 HAIFA_INLINE static void
1013 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1017 if (ready->first == ready->n_ready)
1019 memmove (ready->vec + ready->veclen - ready->n_ready,
1020 ready_lastpos (ready),
1021 ready->n_ready * sizeof (rtx));
1022 ready->first = ready->veclen - 1;
1024 ready->vec[ready->first - ready->n_ready] = insn;
1028 if (ready->first == ready->veclen - 1)
1031 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1032 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1033 ready_lastpos (ready),
1034 ready->n_ready * sizeof (rtx));
1035 ready->first = ready->veclen - 2;
1037 ready->vec[++(ready->first)] = insn;
1042 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1043 QUEUE_INDEX (insn) = QUEUE_READY;
1046 /* Remove the element with the highest priority from the ready list and
1049 HAIFA_INLINE static rtx
1050 ready_remove_first (struct ready_list *ready)
1054 gcc_assert (ready->n_ready);
1055 t = ready->vec[ready->first--];
1057 /* If the queue becomes empty, reset it. */
1058 if (ready->n_ready == 0)
1059 ready->first = ready->veclen - 1;
1061 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1062 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1067 /* The following code implements multi-pass scheduling for the first
1068 cycle. In other words, we will try to choose ready insn which
1069 permits to start maximum number of insns on the same cycle. */
1071 /* Return a pointer to the element INDEX from the ready. INDEX for
1072 insn with the highest priority is 0, and the lowest priority has
1075 HAIFA_INLINE static rtx
1076 ready_element (struct ready_list *ready, int index)
1078 gcc_assert (ready->n_ready && index < ready->n_ready);
1080 return ready->vec[ready->first - index];
1083 /* Remove the element INDEX from the ready list and return it. INDEX
1084 for insn with the highest priority is 0, and the lowest priority
1087 HAIFA_INLINE static rtx
1088 ready_remove (struct ready_list *ready, int index)
1094 return ready_remove_first (ready);
1095 gcc_assert (ready->n_ready && index < ready->n_ready);
1096 t = ready->vec[ready->first - index];
1098 for (i = index; i < ready->n_ready; i++)
1099 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1100 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1104 /* Remove INSN from the ready list. */
1106 ready_remove_insn (rtx insn)
1110 for (i = 0; i < readyp->n_ready; i++)
1111 if (ready_element (readyp, i) == insn)
1113 ready_remove (readyp, i);
1119 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1122 HAIFA_INLINE static void
1123 ready_sort (struct ready_list *ready)
1125 rtx *first = ready_lastpos (ready);
1126 SCHED_SORT (first, ready->n_ready);
1129 /* PREV is an insn that is ready to execute. Adjust its priority if that
1130 will help shorten or lengthen register lifetimes as appropriate. Also
1131 provide a hook for the target to tweek itself. */
1133 HAIFA_INLINE static void
1134 adjust_priority (rtx prev)
1136 /* ??? There used to be code here to try and estimate how an insn
1137 affected register lifetimes, but it did it by looking at REG_DEAD
1138 notes, which we removed in schedule_region. Nor did it try to
1139 take into account register pressure or anything useful like that.
1141 Revisit when we have a machine model to work with and not before. */
1143 if (targetm.sched.adjust_priority)
1144 INSN_PRIORITY (prev) =
1145 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1148 /* Advance time on one cycle. */
1149 HAIFA_INLINE static void
1150 advance_one_cycle (void)
1152 if (targetm.sched.dfa_pre_cycle_insn)
1153 state_transition (curr_state,
1154 targetm.sched.dfa_pre_cycle_insn ());
1156 state_transition (curr_state, NULL);
1158 if (targetm.sched.dfa_post_cycle_insn)
1159 state_transition (curr_state,
1160 targetm.sched.dfa_post_cycle_insn ());
1163 /* Clock at which the previous instruction was issued. */
1164 static int last_clock_var;
1166 /* INSN is the "currently executing insn". Launch each insn which was
1167 waiting on INSN. READY is the ready list which contains the insns
1168 that are ready to fire. CLOCK is the current cycle. The function
1169 returns necessary cycle advance after issuing the insn (it is not
1170 zero for insns in a schedule group). */
1173 schedule_insn (rtx insn)
1178 if (sched_verbose >= 1)
1182 print_insn (buf, insn, 0);
1184 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1186 if (recog_memoized (insn) < 0)
1187 fprintf (sched_dump, "nothing");
1189 print_reservation (sched_dump, insn);
1190 fputc ('\n', sched_dump);
1193 /* Scheduling instruction should have all its dependencies resolved and
1194 should have been removed from the ready list. */
1195 gcc_assert (INSN_DEP_COUNT (insn) == 0
1196 && deps_list_empty_p (INSN_BACK_DEPS (insn)));
1197 free_deps_list (INSN_BACK_DEPS (insn));
1199 /* Now we can free INSN_RESOLVED_BACK_DEPS list. */
1200 delete_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
1202 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1203 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1205 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1206 if (INSN_TICK (insn) > clock_var)
1207 /* INSN has been prematurely moved from the queue to the ready list.
1208 This is possible only if following flag is set. */
1209 gcc_assert (flag_sched_stalled_insns);
1211 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1212 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1213 INSN_TICK (insn) = clock_var;
1215 /* Update dependent instructions. */
1216 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn))
1218 rtx next = DEP_LINK_CON (link);
1220 /* Resolve the dependence between INSN and NEXT. */
1222 INSN_DEP_COUNT (next)--;
1224 move_dep_link (DEP_NODE_BACK (DEP_LINK_NODE (link)),
1225 INSN_RESOLVED_BACK_DEPS (next));
1227 gcc_assert ((INSN_DEP_COUNT (next) == 0)
1228 == deps_list_empty_p (INSN_BACK_DEPS (next)));
1230 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1234 effective_cost = try_ready (next);
1236 if (effective_cost >= 0
1237 && SCHED_GROUP_P (next)
1238 && advance < effective_cost)
1239 advance = effective_cost;
1242 /* Check always has only one forward dependence (to the first insn in
1243 the recovery block), therefore, this will be executed only once. */
1245 gcc_assert (DEP_LINK_NEXT (link) == NULL);
1246 fix_recovery_deps (RECOVERY_BLOCK (insn));
1250 /* Annotate the instruction with issue information -- TImode
1251 indicates that the instruction is expected not to be able
1252 to issue on the same cycle as the previous insn. A machine
1253 may use this information to decide how the instruction should
1256 && GET_CODE (PATTERN (insn)) != USE
1257 && GET_CODE (PATTERN (insn)) != CLOBBER)
1259 if (reload_completed)
1260 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1261 last_clock_var = clock_var;
1267 /* Functions for handling of notes. */
1269 /* Delete notes beginning with INSN and put them in the chain
1270 of notes ended by NOTE_LIST.
1271 Returns the insn following the notes. */
1274 unlink_other_notes (rtx insn, rtx tail)
1276 rtx prev = PREV_INSN (insn);
1278 while (insn != tail && NOTE_NOT_BB_P (insn))
1280 rtx next = NEXT_INSN (insn);
1281 basic_block bb = BLOCK_FOR_INSN (insn);
1283 /* Delete the note from its current position. */
1285 NEXT_INSN (prev) = next;
1287 PREV_INSN (next) = prev;
1291 /* Basic block can begin with either LABEL or
1292 NOTE_INSN_BASIC_BLOCK. */
1293 gcc_assert (BB_HEAD (bb) != insn);
1295 /* Check if we are removing last insn in the BB. */
1296 if (BB_END (bb) == insn)
1300 /* See sched_analyze to see how these are handled. */
1301 if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
1302 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END)
1304 /* Insert the note at the end of the notes list. */
1305 PREV_INSN (insn) = note_list;
1307 NEXT_INSN (note_list) = insn;
1316 /* Return the head and tail pointers of ebb starting at BEG and ending
1320 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1322 rtx beg_head = BB_HEAD (beg);
1323 rtx beg_tail = BB_END (beg);
1324 rtx end_head = BB_HEAD (end);
1325 rtx end_tail = BB_END (end);
1327 /* Don't include any notes or labels at the beginning of the BEG
1328 basic block, or notes at the end of the END basic blocks. */
1330 if (LABEL_P (beg_head))
1331 beg_head = NEXT_INSN (beg_head);
1333 while (beg_head != beg_tail)
1334 if (NOTE_P (beg_head))
1335 beg_head = NEXT_INSN (beg_head);
1342 end_head = beg_head;
1343 else if (LABEL_P (end_head))
1344 end_head = NEXT_INSN (end_head);
1346 while (end_head != end_tail)
1347 if (NOTE_P (end_tail))
1348 end_tail = PREV_INSN (end_tail);
1355 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1358 no_real_insns_p (rtx head, rtx tail)
1360 while (head != NEXT_INSN (tail))
1362 if (!NOTE_P (head) && !LABEL_P (head))
1364 head = NEXT_INSN (head);
1369 /* Delete notes between HEAD and TAIL and put them in the chain
1370 of notes ended by NOTE_LIST. */
1373 rm_other_notes (rtx head, rtx tail)
1379 if (head == tail && (! INSN_P (head)))
1382 next_tail = NEXT_INSN (tail);
1383 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1387 /* Farm out notes, and maybe save them in NOTE_LIST.
1388 This is needed to keep the debugger from
1389 getting completely deranged. */
1390 if (NOTE_NOT_BB_P (insn))
1394 insn = unlink_other_notes (insn, next_tail);
1396 gcc_assert (prev != tail && prev != head && insn != next_tail);
1401 /* Functions for computation of registers live/usage info. */
1403 /* This function looks for a new register being defined.
1404 If the destination register is already used by the source,
1405 a new register is not needed. */
1408 find_set_reg_weight (rtx x)
1410 if (GET_CODE (x) == CLOBBER
1411 && register_operand (SET_DEST (x), VOIDmode))
1413 if (GET_CODE (x) == SET
1414 && register_operand (SET_DEST (x), VOIDmode))
1416 if (REG_P (SET_DEST (x)))
1418 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1428 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1431 find_insn_reg_weight (basic_block bb)
1433 rtx insn, next_tail, head, tail;
1435 get_ebb_head_tail (bb, bb, &head, &tail);
1436 next_tail = NEXT_INSN (tail);
1438 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1439 find_insn_reg_weight1 (insn);
1442 /* Calculate INSN_REG_WEIGHT for single instruction.
1443 Separated from find_insn_reg_weight because of need
1444 to initialize new instruction in generate_recovery_code. */
1446 find_insn_reg_weight1 (rtx insn)
1451 /* Handle register life information. */
1452 if (! INSN_P (insn))
1455 /* Increment weight for each register born here. */
1457 reg_weight += find_set_reg_weight (x);
1458 if (GET_CODE (x) == PARALLEL)
1461 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1463 x = XVECEXP (PATTERN (insn), 0, j);
1464 reg_weight += find_set_reg_weight (x);
1467 /* Decrement weight for each register that dies here. */
1468 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1470 if (REG_NOTE_KIND (x) == REG_DEAD
1471 || REG_NOTE_KIND (x) == REG_UNUSED)
1475 INSN_REG_WEIGHT (insn) = reg_weight;
1478 /* Move insns that became ready to fire from queue to ready list. */
1481 queue_to_ready (struct ready_list *ready)
1487 q_ptr = NEXT_Q (q_ptr);
1489 if (dbg_cnt (sched_insn) == false)
1490 /* If debug counter is activated do not requeue insn next after
1491 last_scheduled_insn. */
1492 skip_insn = next_nonnote_insn (last_scheduled_insn);
1494 skip_insn = NULL_RTX;
1496 /* Add all pending insns that can be scheduled without stalls to the
1498 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1500 insn = XEXP (link, 0);
1503 if (sched_verbose >= 2)
1504 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1505 (*current_sched_info->print_insn) (insn, 0));
1507 /* If the ready list is full, delay the insn for 1 cycle.
1508 See the comment in schedule_block for the rationale. */
1509 if (!reload_completed
1510 && ready->n_ready > MAX_SCHED_READY_INSNS
1511 && !SCHED_GROUP_P (insn)
1512 && insn != skip_insn)
1514 if (sched_verbose >= 2)
1515 fprintf (sched_dump, "requeued because ready full\n");
1516 queue_insn (insn, 1);
1520 ready_add (ready, insn, false);
1521 if (sched_verbose >= 2)
1522 fprintf (sched_dump, "moving to ready without stalls\n");
1525 free_INSN_LIST_list (&insn_queue[q_ptr]);
1527 /* If there are no ready insns, stall until one is ready and add all
1528 of the pending insns at that point to the ready list. */
1529 if (ready->n_ready == 0)
1533 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1535 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1537 for (; link; link = XEXP (link, 1))
1539 insn = XEXP (link, 0);
1542 if (sched_verbose >= 2)
1543 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1544 (*current_sched_info->print_insn) (insn, 0));
1546 ready_add (ready, insn, false);
1547 if (sched_verbose >= 2)
1548 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1550 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1552 advance_one_cycle ();
1557 advance_one_cycle ();
1560 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1561 clock_var += stalls;
1565 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1566 prematurely move INSN from the queue to the ready list. Currently,
1567 if a target defines the hook 'is_costly_dependence', this function
1568 uses the hook to check whether there exist any dependences which are
1569 considered costly by the target, between INSN and other insns that
1570 have already been scheduled. Dependences are checked up to Y cycles
1571 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1572 controlling this value.
1573 (Other considerations could be taken into account instead (or in
1574 addition) depending on user flags and target hooks. */
1577 ok_for_early_queue_removal (rtx insn)
1580 rtx prev_insn = last_scheduled_insn;
1582 if (targetm.sched.is_costly_dependence)
1584 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1586 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1590 if (!NOTE_P (prev_insn))
1592 dep_link_t dep_link;
1594 dep_link = (find_link_by_con_in_deps_list
1595 (INSN_FORW_DEPS (prev_insn), insn));
1599 dep_t dep = DEP_LINK_DEP (dep_link);
1601 cost = dep_cost (dep);
1603 if (targetm.sched.is_costly_dependence (dep, cost,
1604 flag_sched_stalled_insns_dep - n_cycles))
1609 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1615 prev_insn = PREV_INSN (prev_insn);
1623 /* Remove insns from the queue, before they become "ready" with respect
1624 to FU latency considerations. */
1627 early_queue_to_ready (state_t state, struct ready_list *ready)
1635 state_t temp_state = alloca (dfa_state_size);
1637 int insns_removed = 0;
1640 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1643 X == 0: There is no limit on how many queued insns can be removed
1644 prematurely. (flag_sched_stalled_insns = -1).
1646 X >= 1: Only X queued insns can be removed prematurely in each
1647 invocation. (flag_sched_stalled_insns = X).
1649 Otherwise: Early queue removal is disabled.
1650 (flag_sched_stalled_insns = 0)
1653 if (! flag_sched_stalled_insns)
1656 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1658 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1660 if (sched_verbose > 6)
1661 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1666 next_link = XEXP (link, 1);
1667 insn = XEXP (link, 0);
1668 if (insn && sched_verbose > 6)
1669 print_rtl_single (sched_dump, insn);
1671 memcpy (temp_state, state, dfa_state_size);
1672 if (recog_memoized (insn) < 0)
1673 /* non-negative to indicate that it's not ready
1674 to avoid infinite Q->R->Q->R... */
1677 cost = state_transition (temp_state, insn);
1679 if (sched_verbose >= 6)
1680 fprintf (sched_dump, "transition cost = %d\n", cost);
1682 move_to_ready = false;
1685 move_to_ready = ok_for_early_queue_removal (insn);
1686 if (move_to_ready == true)
1688 /* move from Q to R */
1690 ready_add (ready, insn, false);
1693 XEXP (prev_link, 1) = next_link;
1695 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1697 free_INSN_LIST_node (link);
1699 if (sched_verbose >= 2)
1700 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1701 (*current_sched_info->print_insn) (insn, 0));
1704 if (insns_removed == flag_sched_stalled_insns)
1705 /* Remove no more than flag_sched_stalled_insns insns
1706 from Q at a time. */
1707 return insns_removed;
1711 if (move_to_ready == false)
1718 } /* for stalls.. */
1720 return insns_removed;
1724 /* Print the ready list for debugging purposes. Callable from debugger. */
1727 debug_ready_list (struct ready_list *ready)
1732 if (ready->n_ready == 0)
1734 fprintf (sched_dump, "\n");
1738 p = ready_lastpos (ready);
1739 for (i = 0; i < ready->n_ready; i++)
1740 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1741 fprintf (sched_dump, "\n");
1744 /* Search INSN for REG_SAVE_NOTE note pairs for
1745 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1746 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1747 saved value for NOTE_BLOCK_NUMBER which is useful for
1748 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1751 reemit_notes (rtx insn)
1753 rtx note, last = insn;
1755 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1757 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1759 enum insn_note note_type = INTVAL (XEXP (note, 0));
1761 last = emit_note_before (note_type, last);
1762 remove_note (insn, note);
1767 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1769 move_insn (rtx insn)
1771 rtx last = last_scheduled_insn;
1773 if (PREV_INSN (insn) != last)
1779 bb = BLOCK_FOR_INSN (insn);
1781 /* BB_HEAD is either LABEL or NOTE. */
1782 gcc_assert (BB_HEAD (bb) != insn);
1784 if (BB_END (bb) == insn)
1785 /* If this is last instruction in BB, move end marker one
1788 /* Jumps are always placed at the end of basic block. */
1789 jump_p = control_flow_insn_p (insn);
1792 || ((current_sched_info->flags & SCHED_RGN)
1793 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1794 || (current_sched_info->flags & SCHED_EBB));
1796 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1798 BB_END (bb) = PREV_INSN (insn);
1801 gcc_assert (BB_END (bb) != last);
1804 /* We move the block note along with jump. */
1806 /* NT is needed for assertion below. */
1807 rtx nt = current_sched_info->next_tail;
1809 note = NEXT_INSN (insn);
1810 while (NOTE_NOT_BB_P (note) && note != nt)
1811 note = NEXT_INSN (note);
1815 || BARRIER_P (note)))
1816 note = NEXT_INSN (note);
1818 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1823 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1824 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1826 NEXT_INSN (note) = NEXT_INSN (last);
1827 PREV_INSN (NEXT_INSN (last)) = note;
1829 NEXT_INSN (last) = insn;
1830 PREV_INSN (insn) = last;
1832 bb = BLOCK_FOR_INSN (last);
1836 fix_jump_move (insn);
1838 if (BLOCK_FOR_INSN (insn) != bb)
1839 move_block_after_check (insn);
1841 gcc_assert (BB_END (bb) == last);
1844 set_block_for_insn (insn, bb);
1845 df_insn_change_bb (insn);
1847 /* Update BB_END, if needed. */
1848 if (BB_END (bb) == last)
1852 reemit_notes (insn);
1854 SCHED_GROUP_P (insn) = 0;
1857 /* The following structure describe an entry of the stack of choices. */
1860 /* Ordinal number of the issued insn in the ready queue. */
1862 /* The number of the rest insns whose issues we should try. */
1864 /* The number of issued essential insns. */
1866 /* State after issuing the insn. */
1870 /* The following array is used to implement a stack of choices used in
1871 function max_issue. */
1872 static struct choice_entry *choice_stack;
1874 /* The following variable value is number of essential insns issued on
1875 the current cycle. An insn is essential one if it changes the
1876 processors state. */
1877 static int cycle_issued_insns;
1879 /* The following variable value is maximal number of tries of issuing
1880 insns for the first cycle multipass insn scheduling. We define
1881 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
1882 need this constraint if all real insns (with non-negative codes)
1883 had reservations because in this case the algorithm complexity is
1884 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
1885 might be incomplete and such insn might occur. For such
1886 descriptions, the complexity of algorithm (without the constraint)
1887 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
1888 static int max_lookahead_tries;
1890 /* The following value is value of hook
1891 `first_cycle_multipass_dfa_lookahead' at the last call of
1893 static int cached_first_cycle_multipass_dfa_lookahead = 0;
1895 /* The following value is value of `issue_rate' at the last call of
1897 static int cached_issue_rate = 0;
1899 /* The following function returns maximal (or close to maximal) number
1900 of insns which can be issued on the same cycle and one of which
1901 insns is insns with the best rank (the first insn in READY). To
1902 make this function tries different samples of ready insns. READY
1903 is current queue `ready'. Global array READY_TRY reflects what
1904 insns are already issued in this try. MAX_POINTS is the sum of points
1905 of all instructions in READY. The function stops immediately,
1906 if it reached the such a solution, that all instruction can be issued.
1907 INDEX will contain index of the best insn in READY. The following
1908 function is used only for first cycle multipass scheduling. */
1910 max_issue (struct ready_list *ready, int *index, int max_points)
1912 int n, i, all, n_ready, best, delay, tries_num, points = -1;
1913 struct choice_entry *top;
1917 memcpy (choice_stack->state, curr_state, dfa_state_size);
1919 top->rest = cached_first_cycle_multipass_dfa_lookahead;
1921 n_ready = ready->n_ready;
1922 for (all = i = 0; i < n_ready; i++)
1929 if (top->rest == 0 || i >= n_ready)
1931 if (top == choice_stack)
1933 if (best < top - choice_stack && ready_try [0])
1935 best = top - choice_stack;
1936 *index = choice_stack [1].index;
1938 if (top->n == max_points || best == all)
1944 memcpy (curr_state, top->state, dfa_state_size);
1946 else if (!ready_try [i])
1949 if (tries_num > max_lookahead_tries)
1951 insn = ready_element (ready, i);
1952 delay = state_transition (curr_state, insn);
1955 if (state_dead_lock_p (curr_state))
1960 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
1961 n += ISSUE_POINTS (insn);
1963 top->rest = cached_first_cycle_multipass_dfa_lookahead;
1966 memcpy (top->state, curr_state, dfa_state_size);
1973 while (top != choice_stack)
1975 ready_try [top->index] = 0;
1978 memcpy (curr_state, choice_stack->state, dfa_state_size);
1980 if (sched_verbose >= 4)
1981 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
1982 (*current_sched_info->print_insn) (ready_element (ready, *index),
1984 points, max_points);
1989 /* The following function chooses insn from READY and modifies
1990 *N_READY and READY. The following function is used only for first
1991 cycle multipass scheduling.
1993 -1 if cycle should be advanced,
1994 0 if INSN_PTR is set to point to the desirable insn,
1995 1 if choose_ready () should be restarted without advancing the cycle. */
1997 choose_ready (struct ready_list *ready, rtx *insn_ptr)
2001 if (dbg_cnt (sched_insn) == false)
2005 insn = next_nonnote_insn (last_scheduled_insn);
2007 if (QUEUE_INDEX (insn) == QUEUE_READY)
2008 /* INSN is in the ready_list. */
2010 ready_remove_insn (insn);
2015 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2021 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2022 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2023 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2025 *insn_ptr = ready_remove_first (ready);
2030 /* Try to choose the better insn. */
2031 int index = 0, i, n;
2033 int more_issue, max_points, try_data = 1, try_control = 1;
2035 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2037 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2038 max_lookahead_tries = 100;
2039 for (i = 0; i < issue_rate; i++)
2040 max_lookahead_tries *= lookahead;
2042 insn = ready_element (ready, 0);
2043 if (INSN_CODE (insn) < 0)
2045 *insn_ptr = ready_remove_first (ready);
2050 && spec_info->flags & (PREFER_NON_DATA_SPEC
2051 | PREFER_NON_CONTROL_SPEC))
2053 for (i = 0, n = ready->n_ready; i < n; i++)
2058 x = ready_element (ready, i);
2061 if (spec_info->flags & PREFER_NON_DATA_SPEC
2062 && !(s & DATA_SPEC))
2065 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2070 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2071 && !(s & CONTROL_SPEC))
2074 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2080 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2081 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2082 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2083 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2085 /* Discard speculative instruction that stands first in the ready
2088 change_queue_index (insn, 1);
2092 max_points = ISSUE_POINTS (insn);
2093 more_issue = issue_rate - cycle_issued_insns - 1;
2095 for (i = 1; i < ready->n_ready; i++)
2097 insn = ready_element (ready, i);
2099 = (INSN_CODE (insn) < 0
2100 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2101 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2102 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2103 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2106 if (!ready_try [i] && more_issue-- > 0)
2107 max_points += ISSUE_POINTS (insn);
2110 if (max_issue (ready, &index, max_points) == 0)
2112 *insn_ptr = ready_remove_first (ready);
2117 *insn_ptr = ready_remove (ready, index);
2123 /* Use forward list scheduling to rearrange insns of block pointed to by
2124 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2128 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2130 struct ready_list ready;
2131 int i, first_cycle_insn_p;
2133 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2134 int sort_p, advance, start_clock_var;
2136 /* Head/tail info for this block. */
2137 rtx prev_head = current_sched_info->prev_head;
2138 rtx next_tail = current_sched_info->next_tail;
2139 rtx head = NEXT_INSN (prev_head);
2140 rtx tail = PREV_INSN (next_tail);
2142 /* We used to have code to avoid getting parameters moved from hard
2143 argument registers into pseudos.
2145 However, it was removed when it proved to be of marginal benefit
2146 and caused problems because schedule_block and compute_forward_dependences
2147 had different notions of what the "head" insn was. */
2149 gcc_assert (head != tail || INSN_P (head));
2151 added_recovery_block_p = false;
2155 dump_new_block_header (0, *target_bb, head, tail);
2157 state_reset (curr_state);
2159 /* Allocate the ready list. */
2163 choice_stack = NULL;
2166 extend_ready (rgn_n_insns1 + 1);
2168 ready.first = ready.veclen - 1;
2171 /* It is used for first cycle multipass scheduling. */
2172 temp_state = alloca (dfa_state_size);
2174 if (targetm.sched.md_init)
2175 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2177 /* We start inserting insns after PREV_HEAD. */
2178 last_scheduled_insn = prev_head;
2180 gcc_assert (NOTE_P (last_scheduled_insn)
2181 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2183 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2188 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2189 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2191 /* Start just before the beginning of time. */
2194 /* We need queue and ready lists and clock_var be initialized
2195 in try_ready () (which is called through init_ready_list ()). */
2196 (*current_sched_info->init_ready_list) ();
2198 /* The algorithm is O(n^2) in the number of ready insns at any given
2199 time in the worst case. Before reload we are more likely to have
2200 big lists so truncate them to a reasonable size. */
2201 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2203 ready_sort (&ready);
2205 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2206 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2207 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2210 if (sched_verbose >= 2)
2212 fprintf (sched_dump,
2213 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2214 fprintf (sched_dump,
2215 ";;\t\t before reload => truncated to %d insns\n", i);
2218 /* Delay all insns past it for 1 cycle. If debug counter is
2219 activated make an exception for the insn right after
2220 last_scheduled_insn. */
2224 if (dbg_cnt (sched_insn) == false)
2225 skip_insn = next_nonnote_insn (last_scheduled_insn);
2227 skip_insn = NULL_RTX;
2229 while (i < ready.n_ready)
2233 insn = ready_remove (&ready, i);
2235 if (insn != skip_insn)
2236 queue_insn (insn, 1);
2241 /* Now we can restore basic block notes and maintain precise cfg. */
2242 restore_bb_notes (*target_bb);
2244 last_clock_var = -1;
2249 /* Loop until all the insns in BB are scheduled. */
2250 while ((*current_sched_info->schedule_more_p) ())
2254 start_clock_var = clock_var;
2258 advance_one_cycle ();
2260 /* Add to the ready list all pending insns that can be issued now.
2261 If there are no ready insns, increment clock until one
2262 is ready and add all pending insns at that point to the ready
2264 queue_to_ready (&ready);
2266 gcc_assert (ready.n_ready);
2268 if (sched_verbose >= 2)
2270 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2271 debug_ready_list (&ready);
2273 advance -= clock_var - start_clock_var;
2275 while (advance > 0);
2279 /* Sort the ready list based on priority. */
2280 ready_sort (&ready);
2282 if (sched_verbose >= 2)
2284 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2285 debug_ready_list (&ready);
2289 /* Allow the target to reorder the list, typically for
2290 better instruction bundling. */
2291 if (sort_p && targetm.sched.reorder
2292 && (ready.n_ready == 0
2293 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2295 targetm.sched.reorder (sched_dump, sched_verbose,
2296 ready_lastpos (&ready),
2297 &ready.n_ready, clock_var);
2299 can_issue_more = issue_rate;
2301 first_cycle_insn_p = 1;
2302 cycle_issued_insns = 0;
2309 if (sched_verbose >= 2)
2311 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2313 debug_ready_list (&ready);
2316 if (ready.n_ready == 0
2318 && reload_completed)
2320 /* Allow scheduling insns directly from the queue in case
2321 there's nothing better to do (ready list is empty) but
2322 there are still vacant dispatch slots in the current cycle. */
2323 if (sched_verbose >= 6)
2324 fprintf (sched_dump,";;\t\tSecond chance\n");
2325 memcpy (temp_state, curr_state, dfa_state_size);
2326 if (early_queue_to_ready (temp_state, &ready))
2327 ready_sort (&ready);
2330 if (ready.n_ready == 0 || !can_issue_more
2331 || state_dead_lock_p (curr_state)
2332 || !(*current_sched_info->schedule_more_p) ())
2335 /* Select and remove the insn from the ready list. */
2341 res = choose_ready (&ready, &insn);
2347 /* Restart choose_ready (). */
2350 gcc_assert (insn != NULL_RTX);
2353 insn = ready_remove_first (&ready);
2355 if (targetm.sched.dfa_new_cycle
2356 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2357 insn, last_clock_var,
2358 clock_var, &sort_p))
2359 /* SORT_P is used by the target to override sorting
2360 of the ready list. This is needed when the target
2361 has modified its internal structures expecting that
2362 the insn will be issued next. As we need the insn
2363 to have the highest priority (so it will be returned by
2364 the ready_remove_first call above), we invoke
2365 ready_add (&ready, insn, true).
2366 But, still, there is one issue: INSN can be later
2367 discarded by scheduler's front end through
2368 current_sched_info->can_schedule_ready_p, hence, won't
2371 ready_add (&ready, insn, true);
2376 memcpy (temp_state, curr_state, dfa_state_size);
2377 if (recog_memoized (insn) < 0)
2379 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2380 || asm_noperands (PATTERN (insn)) >= 0);
2381 if (!first_cycle_insn_p && asm_p)
2382 /* This is asm insn which is tryed to be issued on the
2383 cycle not first. Issue it on the next cycle. */
2386 /* A USE insn, or something else we don't need to
2387 understand. We can't pass these directly to
2388 state_transition because it will trigger a
2389 fatal error for unrecognizable insns. */
2394 cost = state_transition (temp_state, insn);
2403 queue_insn (insn, cost);
2404 if (SCHED_GROUP_P (insn))
2413 if (current_sched_info->can_schedule_ready_p
2414 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2415 /* We normally get here only if we don't want to move
2416 insn from the split block. */
2418 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2422 /* DECISION is made. */
2424 if (TODO_SPEC (insn) & SPECULATIVE)
2425 generate_recovery_code (insn);
2427 if (control_flow_insn_p (last_scheduled_insn)
2428 /* This is used to switch basic blocks by request
2429 from scheduler front-end (actually, sched-ebb.c only).
2430 This is used to process blocks with single fallthru
2431 edge. If succeeding block has jump, it [jump] will try
2432 move at the end of current bb, thus corrupting CFG. */
2433 || current_sched_info->advance_target_bb (*target_bb, insn))
2435 *target_bb = current_sched_info->advance_target_bb
2442 x = next_real_insn (last_scheduled_insn);
2444 dump_new_block_header (1, *target_bb, x, tail);
2447 last_scheduled_insn = bb_note (*target_bb);
2450 /* Update counters, etc in the scheduler's front end. */
2451 (*current_sched_info->begin_schedule_ready) (insn,
2452 last_scheduled_insn);
2455 last_scheduled_insn = insn;
2457 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2459 cycle_issued_insns++;
2460 memcpy (curr_state, temp_state, dfa_state_size);
2463 if (targetm.sched.variable_issue)
2465 targetm.sched.variable_issue (sched_dump, sched_verbose,
2466 insn, can_issue_more);
2467 /* A naked CLOBBER or USE generates no instruction, so do
2468 not count them against the issue rate. */
2469 else if (GET_CODE (PATTERN (insn)) != USE
2470 && GET_CODE (PATTERN (insn)) != CLOBBER)
2473 advance = schedule_insn (insn);
2475 /* After issuing an asm insn we should start a new cycle. */
2476 if (advance == 0 && asm_p)
2481 first_cycle_insn_p = 0;
2483 /* Sort the ready list based on priority. This must be
2484 redone here, as schedule_insn may have readied additional
2485 insns that will not be sorted correctly. */
2486 if (ready.n_ready > 0)
2487 ready_sort (&ready);
2489 if (targetm.sched.reorder2
2490 && (ready.n_ready == 0
2491 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2494 targetm.sched.reorder2 (sched_dump, sched_verbose,
2496 ? ready_lastpos (&ready) : NULL,
2497 &ready.n_ready, clock_var);
2505 fprintf (sched_dump, ";;\tReady list (final): ");
2506 debug_ready_list (&ready);
2509 if (current_sched_info->queue_must_finish_empty)
2510 /* Sanity check -- queue must be empty now. Meaningless if region has
2512 gcc_assert (!q_size && !ready.n_ready);
2515 /* We must maintain QUEUE_INDEX between blocks in region. */
2516 for (i = ready.n_ready - 1; i >= 0; i--)
2520 x = ready_element (&ready, i);
2521 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2522 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2526 for (i = 0; i <= max_insn_queue_index; i++)
2529 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2534 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2535 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2537 free_INSN_LIST_list (&insn_queue[i]);
2541 if (!current_sched_info->queue_must_finish_empty
2542 || added_recovery_block_p)
2544 /* INSN_TICK (minimum clock tick at which the insn becomes
2545 ready) may be not correct for the insn in the subsequent
2546 blocks of the region. We should use a correct value of
2547 `clock_var' or modify INSN_TICK. It is better to keep
2548 clock_var value equal to 0 at the start of a basic block.
2549 Therefore we modify INSN_TICK here. */
2550 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2553 if (targetm.sched.md_finish)
2554 targetm.sched.md_finish (sched_dump, sched_verbose);
2556 /* Update head/tail boundaries. */
2557 head = NEXT_INSN (prev_head);
2558 tail = last_scheduled_insn;
2560 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2561 previously found among the insns. Insert them at the beginning
2565 basic_block head_bb = BLOCK_FOR_INSN (head);
2566 rtx note_head = note_list;
2568 while (PREV_INSN (note_head))
2570 set_block_for_insn (note_head, head_bb);
2571 note_head = PREV_INSN (note_head);
2573 /* In the above cycle we've missed this note: */
2574 set_block_for_insn (note_head, head_bb);
2576 PREV_INSN (note_head) = PREV_INSN (head);
2577 NEXT_INSN (PREV_INSN (head)) = note_head;
2578 PREV_INSN (head) = note_list;
2579 NEXT_INSN (note_list) = head;
2586 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2587 clock_var, INSN_UID (head));
2588 fprintf (sched_dump, ";; new tail = %d\n\n",
2592 current_sched_info->head = head;
2593 current_sched_info->tail = tail;
2598 for (i = 0; i <= rgn_n_insns; i++)
2599 free (choice_stack [i].state);
2600 free (choice_stack);
2603 /* Set_priorities: compute priority of each insn in the block. */
2606 set_priorities (rtx head, rtx tail)
2610 int sched_max_insns_priority =
2611 current_sched_info->sched_max_insns_priority;
2614 if (head == tail && (! INSN_P (head)))
2619 prev_head = PREV_INSN (head);
2620 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2626 (void) priority (insn);
2628 gcc_assert (INSN_PRIORITY_KNOWN (insn));
2630 sched_max_insns_priority = MAX (sched_max_insns_priority,
2631 INSN_PRIORITY (insn));
2634 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2639 /* Next LUID to assign to an instruction. */
2642 /* Initialize some global state for the scheduler. */
2651 /* Switch to working copy of sched_info. */
2652 memcpy (¤t_sched_info_var, current_sched_info,
2653 sizeof (current_sched_info_var));
2654 current_sched_info = ¤t_sched_info_var;
2656 /* Disable speculative loads in their presence if cc0 defined. */
2658 flag_schedule_speculative_load = 0;
2661 /* Set dump and sched_verbose for the desired debugging output. If no
2662 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2663 For -fsched-verbose=N, N>=10, print everything to stderr. */
2664 sched_verbose = sched_verbose_param;
2665 if (sched_verbose_param == 0 && dump_file)
2667 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2668 ? stderr : dump_file);
2670 /* Initialize SPEC_INFO. */
2671 if (targetm.sched.set_sched_flags)
2673 spec_info = &spec_info_var;
2674 targetm.sched.set_sched_flags (spec_info);
2675 if (current_sched_info->flags & DO_SPECULATION)
2676 spec_info->weakness_cutoff =
2677 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2679 /* So we won't read anything accidentally. */
2681 #ifdef ENABLE_CHECKING
2682 check_sched_flags ();
2686 /* So we won't read anything accidentally. */
2689 /* Initialize issue_rate. */
2690 if (targetm.sched.issue_rate)
2691 issue_rate = targetm.sched.issue_rate ();
2695 if (cached_issue_rate != issue_rate)
2697 cached_issue_rate = issue_rate;
2698 /* To invalidate max_lookahead_tries: */
2699 cached_first_cycle_multipass_dfa_lookahead = 0;
2706 for (i = 0; i < old_max_uid; i++)
2709 h_i_d[i].todo_spec = HARD_DEP;
2710 h_i_d[i].queue_index = QUEUE_NOWHERE;
2711 h_i_d[i].tick = INVALID_TICK;
2712 h_i_d[i].inter_tick = INVALID_TICK;
2715 if (targetm.sched.init_dfa_pre_cycle_insn)
2716 targetm.sched.init_dfa_pre_cycle_insn ();
2718 if (targetm.sched.init_dfa_post_cycle_insn)
2719 targetm.sched.init_dfa_post_cycle_insn ();
2722 dfa_state_size = state_size ();
2723 curr_state = xmalloc (dfa_state_size);
2728 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2730 INSN_LUID (insn) = luid;
2732 /* Increment the next luid, unless this is a note. We don't
2733 really need separate IDs for notes and we don't want to
2734 schedule differently depending on whether or not there are
2735 line-number notes, i.e., depending on whether or not we're
2736 generating debugging information. */
2740 if (insn == BB_END (b))
2744 init_dependency_caches (luid);
2746 init_alias_analysis ();
2748 old_last_basic_block = 0;
2751 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2752 removing death notes. */
2753 FOR_EACH_BB_REVERSE (b)
2754 find_insn_reg_weight (b);
2756 if (targetm.sched.md_init_global)
2757 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2759 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2760 before_recovery = 0;
2762 #ifdef ENABLE_CHECKING
2763 /* This is used preferably for finding bugs in check_cfg () itself. */
2768 /* Free global data used during insn scheduling. */
2776 free_dependency_caches ();
2777 end_alias_analysis ();
2779 if (targetm.sched.md_finish_global)
2780 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2782 if (spec_info && spec_info->dump)
2784 char c = reload_completed ? 'a' : 'b';
2786 fprintf (spec_info->dump,
2787 ";; %s:\n", current_function_name ());
2789 fprintf (spec_info->dump,
2790 ";; Procedure %cr-begin-data-spec motions == %d\n",
2792 fprintf (spec_info->dump,
2793 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2795 fprintf (spec_info->dump,
2796 ";; Procedure %cr-begin-control-spec motions == %d\n",
2797 c, nr_begin_control);
2798 fprintf (spec_info->dump,
2799 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2800 c, nr_be_in_control);
2803 #ifdef ENABLE_CHECKING
2804 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2805 if (!reload_completed)
2809 current_sched_info = NULL;
2812 /* Fix INSN_TICKs of the instructions in the current block as well as
2813 INSN_TICKs of their dependents.
2814 HEAD and TAIL are the begin and the end of the current scheduled block. */
2816 fix_inter_tick (rtx head, rtx tail)
2818 /* Set of instructions with corrected INSN_TICK. */
2819 bitmap_head processed;
2820 int next_clock = clock_var + 1;
2822 bitmap_initialize (&processed, 0);
2824 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2825 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2826 across different blocks. */
2827 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2834 tick = INSN_TICK (head);
2835 gcc_assert (tick >= MIN_TICK);
2837 /* Fix INSN_TICK of instruction from just scheduled block. */
2838 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2840 bitmap_set_bit (&processed, INSN_LUID (head));
2843 if (tick < MIN_TICK)
2846 INSN_TICK (head) = tick;
2849 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (head))
2853 next = DEP_LINK_CON (link);
2854 tick = INSN_TICK (next);
2856 if (tick != INVALID_TICK
2857 /* If NEXT has its INSN_TICK calculated, fix it.
2858 If not - it will be properly calculated from
2859 scratch later in fix_tick_ready. */
2860 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2862 bitmap_set_bit (&processed, INSN_LUID (next));
2865 if (tick < MIN_TICK)
2868 if (tick > INTER_TICK (next))
2869 INTER_TICK (next) = tick;
2871 tick = INTER_TICK (next);
2873 INSN_TICK (next) = tick;
2878 bitmap_clear (&processed);
2881 /* Check if NEXT is ready to be added to the ready or queue list.
2882 If "yes", add it to the proper list.
2884 -1 - is not ready yet,
2885 0 - added to the ready list,
2886 0 < N - queued for N cycles. */
2888 try_ready (rtx next)
2893 ts = &TODO_SPEC (next);
2896 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
2897 && ((old_ts & HARD_DEP)
2898 || (old_ts & SPECULATIVE)));
2900 if (!(current_sched_info->flags & DO_SPECULATION))
2902 if (deps_list_empty_p (INSN_BACK_DEPS (next)))
2907 *ts &= ~SPECULATIVE & ~HARD_DEP;
2909 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (next));
2913 ds_t ds = DEP_LINK_STATUS (link) & SPECULATIVE;
2915 /* Backward dependencies of the insn are maintained sorted.
2916 So if DEP_STATUS of the first dep is SPECULATIVE,
2917 than all other deps are speculative too. */
2920 /* Now we've got NEXT with speculative deps only.
2921 1. Look at the deps to see what we have to do.
2922 2. Check if we can do 'todo'. */
2925 while ((link = DEP_LINK_NEXT (link)) != NULL)
2927 ds = DEP_LINK_STATUS (link) & SPECULATIVE;
2928 *ts = ds_merge (*ts, ds);
2931 if (dep_weak (*ts) < spec_info->weakness_cutoff)
2932 /* Too few points. */
2933 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
2941 gcc_assert (*ts == old_ts
2942 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
2943 else if (current_sched_info->new_ready)
2944 *ts = current_sched_info->new_ready (next, *ts);
2946 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
2947 have its original pattern or changed (speculative) one. This is due
2948 to changing ebb in region scheduling.
2949 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
2950 has speculative pattern.
2952 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
2953 control-speculative NEXT could have been discarded by sched-rgn.c
2954 (the same case as when discarded by can_schedule_ready_p ()). */
2956 if ((*ts & SPECULATIVE)
2957 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
2958 need to change anything. */
2964 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
2966 res = speculate_insn (next, *ts, &new_pat);
2971 /* It would be nice to change DEP_STATUS of all dependences,
2972 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
2973 so we won't reanalyze anything. */
2974 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
2978 /* We follow the rule, that every speculative insn
2979 has non-null ORIG_PAT. */
2980 if (!ORIG_PAT (next))
2981 ORIG_PAT (next) = PATTERN (next);
2985 if (!ORIG_PAT (next))
2986 /* If we gonna to overwrite the original pattern of insn,
2988 ORIG_PAT (next) = PATTERN (next);
2990 change_pattern (next, new_pat);
2998 /* We need to restore pattern only if (*ts == 0), because otherwise it is
2999 either correct (*ts & SPECULATIVE),
3000 or we simply don't care (*ts & HARD_DEP). */
3002 gcc_assert (!ORIG_PAT (next)
3003 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3007 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3008 control-speculative NEXT could have been discarded by sched-rgn.c
3009 (the same case as when discarded by can_schedule_ready_p ()). */
3010 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3012 change_queue_index (next, QUEUE_NOWHERE);
3015 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3016 /* We should change pattern of every previously speculative
3017 instruction - and we determine if NEXT was speculative by using
3018 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3019 pat too, so skip them. */
3021 change_pattern (next, ORIG_PAT (next));
3022 ORIG_PAT (next) = 0;
3025 if (sched_verbose >= 2)
3027 int s = TODO_SPEC (next);
3029 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3030 (*current_sched_info->print_insn) (next, 0));
3032 if (spec_info && spec_info->dump)
3035 fprintf (spec_info->dump, "; data-spec;");
3036 if (s & BEGIN_CONTROL)
3037 fprintf (spec_info->dump, "; control-spec;");
3038 if (s & BE_IN_CONTROL)
3039 fprintf (spec_info->dump, "; in-control-spec;");
3042 fprintf (sched_dump, "\n");
3045 adjust_priority (next);
3047 return fix_tick_ready (next);
3050 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3052 fix_tick_ready (rtx next)
3056 if (!deps_list_empty_p (INSN_RESOLVED_BACK_DEPS (next)))
3061 tick = INSN_TICK (next);
3062 /* if tick is not equal to INVALID_TICK, then update
3063 INSN_TICK of NEXT with the most recent resolved dependence
3064 cost. Otherwise, recalculate from scratch. */
3065 full_p = (tick == INVALID_TICK);
3067 FOR_EACH_DEP_LINK (link, INSN_RESOLVED_BACK_DEPS (next))
3069 dep_t dep = DEP_LINK_DEP (link);
3070 rtx pro = DEP_PRO (dep);
3073 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3075 tick1 = INSN_TICK (pro) + dep_cost (dep);
3086 INSN_TICK (next) = tick;
3088 delay = tick - clock_var;
3090 delay = QUEUE_READY;
3092 change_queue_index (next, delay);
3097 /* Move NEXT to the proper queue list with (DELAY >= 1),
3098 or add it to the ready list (DELAY == QUEUE_READY),
3099 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3101 change_queue_index (rtx next, int delay)
3103 int i = QUEUE_INDEX (next);
3105 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3107 gcc_assert (i != QUEUE_SCHEDULED);
3109 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3110 || (delay < 0 && delay == i))
3111 /* We have nothing to do. */
3114 /* Remove NEXT from wherever it is now. */
3115 if (i == QUEUE_READY)
3116 ready_remove_insn (next);
3118 queue_remove (next);
3120 /* Add it to the proper place. */
3121 if (delay == QUEUE_READY)
3122 ready_add (readyp, next, false);
3123 else if (delay >= 1)
3124 queue_insn (next, delay);
3126 if (sched_verbose >= 2)
3128 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3129 (*current_sched_info->print_insn) (next, 0));
3131 if (delay == QUEUE_READY)
3132 fprintf (sched_dump, " into ready\n");
3133 else if (delay >= 1)
3134 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3136 fprintf (sched_dump, " removed from ready or queue lists\n");
3140 /* Extend H_I_D data. */
3144 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3145 pseudos which do not cross calls. */
3146 int new_max_uid = get_max_uid () + 1;
3148 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3149 old_max_uid = new_max_uid;
3151 if (targetm.sched.h_i_d_extended)
3152 targetm.sched.h_i_d_extended ();
3155 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3156 N_NEW_INSNS is the number of additional elements to allocate. */
3158 extend_ready (int n_new_insns)
3162 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3163 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3165 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3166 rgn_n_insns + 1, sizeof (char));
3168 rgn_n_insns += n_new_insns;
3170 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3173 for (i = rgn_n_insns; n_new_insns--; i--)
3174 choice_stack[i].state = xmalloc (dfa_state_size);
3177 /* Extend global scheduler structures (those, that live across calls to
3178 schedule_block) to include information about just emitted INSN. */
3180 extend_global (rtx insn)
3182 gcc_assert (INSN_P (insn));
3183 /* These structures have scheduler scope. */
3187 extend_dependency_caches (1, 0);
3190 /* Extends global and local scheduler structures to include information
3191 about just emitted INSN. */
3193 extend_all (rtx insn)
3195 extend_global (insn);
3197 /* These structures have block scope. */
3200 (*current_sched_info->add_remove_insn) (insn, 0);
3203 /* Initialize h_i_d entry of the new INSN with default values.
3204 Values, that are not explicitly initialized here, hold zero. */
3206 init_h_i_d (rtx insn)
3208 INSN_LUID (insn) = luid++;
3209 INSN_COST (insn) = -1;
3210 TODO_SPEC (insn) = HARD_DEP;
3211 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3212 INSN_TICK (insn) = INVALID_TICK;
3213 INTER_TICK (insn) = INVALID_TICK;
3214 find_insn_reg_weight1 (insn);
3216 /* These two lists will be freed in schedule_insn (). */
3217 INSN_BACK_DEPS (insn) = create_deps_list (false);
3218 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list (false);
3220 /* This one should be allocated on the obstack because it should live till
3221 the scheduling ends. */
3222 INSN_FORW_DEPS (insn) = create_deps_list (true);
3225 /* Generates recovery code for INSN. */
3227 generate_recovery_code (rtx insn)
3229 if (TODO_SPEC (insn) & BEGIN_SPEC)
3230 begin_speculative_block (insn);
3232 /* Here we have insn with no dependencies to
3233 instructions other then CHECK_SPEC ones. */
3235 if (TODO_SPEC (insn) & BE_IN_SPEC)
3236 add_to_speculative_block (insn);
3240 Tries to add speculative dependencies of type FS between instructions
3241 in deps_list L and TWIN. */
3243 process_insn_forw_deps_be_in_spec (deps_list_t l, rtx twin, ds_t fs)
3247 FOR_EACH_DEP_LINK (link, l)
3252 consumer = DEP_LINK_CON (link);
3254 ds = DEP_LINK_STATUS (link);
3256 if (/* If we want to create speculative dep. */
3258 /* And we can do that because this is a true dep. */
3259 && (ds & DEP_TYPES) == DEP_TRUE)
3261 gcc_assert (!(ds & BE_IN_SPEC));
3263 if (/* If this dep can be overcome with 'begin speculation'. */
3265 /* Then we have a choice: keep the dep 'begin speculative'
3266 or transform it into 'be in speculative'. */
3268 if (/* In try_ready we assert that if insn once became ready
3269 it can be removed from the ready (or queue) list only
3270 due to backend decision. Hence we can't let the
3271 probability of the speculative dep to decrease. */
3272 dep_weak (ds) <= dep_weak (fs))
3273 /* Transform it to be in speculative. */
3274 ds = (ds & ~BEGIN_SPEC) | fs;
3277 /* Mark the dep as 'be in speculative'. */
3281 add_back_forw_dep (consumer, twin, DEP_LINK_KIND (link), ds);
3285 /* Generates recovery code for BEGIN speculative INSN. */
3287 begin_speculative_block (rtx insn)
3289 if (TODO_SPEC (insn) & BEGIN_DATA)
3291 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3294 create_check_block_twin (insn, false);
3296 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3299 /* Generates recovery code for BE_IN speculative INSN. */
3301 add_to_speculative_block (rtx insn)
3306 rtx_vec_t priorities_roots;
3308 ts = TODO_SPEC (insn);
3309 gcc_assert (!(ts & ~BE_IN_SPEC));
3311 if (ts & BE_IN_DATA)
3313 if (ts & BE_IN_CONTROL)
3316 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3317 gcc_assert (!TODO_SPEC (insn));
3319 DONE_SPEC (insn) |= ts;
3321 /* First we convert all simple checks to branchy. */
3322 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3324 rtx check = DEP_LINK_PRO (link);
3326 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3328 create_check_block_twin (check, true);
3330 /* Restart search. */
3331 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3334 /* Continue search. */
3335 link = DEP_LINK_NEXT (link);
3338 priorities_roots = NULL;
3339 clear_priorities (insn, &priorities_roots);
3347 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3349 gcc_assert ((DEP_LINK_STATUS (link) & BEGIN_SPEC) == 0
3350 && (DEP_LINK_STATUS (link) & BE_IN_SPEC) != 0
3351 && (DEP_LINK_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3353 check = DEP_LINK_PRO (link);
3355 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3356 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3358 rec = BLOCK_FOR_INSN (check);
3360 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
3361 extend_global (twin);
3363 copy_deps_list_change_con (INSN_RESOLVED_BACK_DEPS (twin),
3364 INSN_RESOLVED_BACK_DEPS (insn),
3367 if (sched_verbose && spec_info->dump)
3368 /* INSN_BB (insn) isn't determined for twin insns yet.
3369 So we can't use current_sched_info->print_insn. */
3370 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3371 INSN_UID (twin), rec->index);
3373 twins = alloc_INSN_LIST (twin, twins);
3375 /* Add dependences between TWIN and all appropriate
3376 instructions from REC. */
3379 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3383 link = DEP_LINK_NEXT (link);
3387 check = DEP_LINK_PRO (link);
3388 if (BLOCK_FOR_INSN (check) == rec)
3396 while (link != NULL);
3398 process_insn_forw_deps_be_in_spec (INSN_FORW_DEPS (insn), twin, ts);
3400 /* Remove all dependencies between INSN and insns in REC. */
3401 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3403 check = DEP_LINK_PRO (link);
3405 if (BLOCK_FOR_INSN (check) == rec)
3407 delete_back_forw_dep (link);
3409 /* Restart search. */
3410 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3413 /* Continue search. */
3414 link = DEP_LINK_NEXT (link);
3417 while (!deps_list_empty_p (INSN_BACK_DEPS (insn)));
3419 /* We couldn't have added the dependencies between INSN and TWINS earlier
3420 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
3425 twin = XEXP (twins, 0);
3426 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3428 twin = XEXP (twins, 1);
3429 free_INSN_LIST_node (twins);
3433 calc_priorities (priorities_roots);
3434 VEC_free (rtx, heap, priorities_roots);
3437 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3439 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3441 gcc_assert (new_nmemb >= old_nmemb);
3442 p = XRESIZEVAR (void, p, new_nmemb * size);
3443 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3447 /* Return the probability of speculation success for the speculation
3455 dt = FIRST_SPEC_TYPE;
3460 res *= (ds_t) get_dep_weak (ds, dt);
3464 if (dt == LAST_SPEC_TYPE)
3466 dt <<= SPEC_TYPE_SHIFT;
3472 res /= MAX_DEP_WEAK;
3474 if (res < MIN_DEP_WEAK)
3477 gcc_assert (res <= MAX_DEP_WEAK);
3483 Find fallthru edge from PRED. */
3485 find_fallthru_edge (basic_block pred)
3491 succ = pred->next_bb;
3492 gcc_assert (succ->prev_bb == pred);
3494 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3496 FOR_EACH_EDGE (e, ei, pred->succs)
3497 if (e->flags & EDGE_FALLTHRU)
3499 gcc_assert (e->dest == succ);
3505 FOR_EACH_EDGE (e, ei, succ->preds)
3506 if (e->flags & EDGE_FALLTHRU)
3508 gcc_assert (e->src == pred);
3516 /* Initialize BEFORE_RECOVERY variable. */
3518 init_before_recovery (void)
3523 last = EXIT_BLOCK_PTR->prev_bb;
3524 e = find_fallthru_edge (last);
3528 /* We create two basic blocks:
3529 1. Single instruction block is inserted right after E->SRC
3531 2. Empty block right before EXIT_BLOCK.
3532 Between these two blocks recovery blocks will be emitted. */
3534 basic_block single, empty;
3537 single = create_empty_bb (last);
3538 empty = create_empty_bb (single);
3540 single->count = last->count;
3541 empty->count = last->count;
3542 single->frequency = last->frequency;
3543 empty->frequency = last->frequency;
3544 BB_COPY_PARTITION (single, last);
3545 BB_COPY_PARTITION (empty, last);
3547 redirect_edge_succ (e, single);
3548 make_single_succ_edge (single, empty, 0);
3549 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3550 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3552 label = block_label (empty);
3553 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3554 JUMP_LABEL (x) = label;
3555 LABEL_NUSES (label)++;
3558 emit_barrier_after (x);
3560 add_block (empty, 0);
3561 add_block (single, 0);
3563 before_recovery = single;
3565 if (sched_verbose >= 2 && spec_info->dump)
3566 fprintf (spec_info->dump,
3567 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3568 last->index, single->index, empty->index);
3571 before_recovery = last;
3574 /* Returns new recovery block. */
3576 create_recovery_block (void)
3582 added_recovery_block_p = true;
3584 if (!before_recovery)
3585 init_before_recovery ();
3587 barrier = get_last_bb_insn (before_recovery);
3588 gcc_assert (BARRIER_P (barrier));
3590 label = emit_label_after (gen_label_rtx (), barrier);
3592 rec = create_basic_block (label, label, before_recovery);
3594 /* Recovery block always end with an unconditional jump. */
3595 emit_barrier_after (BB_END (rec));
3597 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3598 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3600 if (sched_verbose && spec_info->dump)
3601 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3604 before_recovery = rec;
3609 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3610 INSN is a simple check, that should be converted to branchy one. */
3612 create_check_block_twin (rtx insn, bool mutate_p)
3615 rtx label, check, twin;
3619 gcc_assert (ORIG_PAT (insn)
3621 || (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3622 && !(TODO_SPEC (insn) & SPECULATIVE))));
3624 /* Create recovery block. */
3625 if (mutate_p || targetm.sched.needs_block_p (insn))
3627 rec = create_recovery_block ();
3628 label = BB_HEAD (rec);
3632 rec = EXIT_BLOCK_PTR;
3637 check = targetm.sched.gen_check (insn, label, mutate_p);
3639 if (rec != EXIT_BLOCK_PTR)
3641 /* To have mem_reg alive at the beginning of second_bb,
3642 we emit check BEFORE insn, so insn after splitting
3643 insn will be at the beginning of second_bb, which will
3644 provide us with the correct life information. */
3645 check = emit_jump_insn_before (check, insn);
3646 JUMP_LABEL (check) = label;
3647 LABEL_NUSES (label)++;
3650 check = emit_insn_before (check, insn);
3652 /* Extend data structures. */
3654 RECOVERY_BLOCK (check) = rec;
3656 if (sched_verbose && spec_info->dump)
3657 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3658 (*current_sched_info->print_insn) (check, 0));
3660 gcc_assert (ORIG_PAT (insn));
3662 /* Initialize TWIN (twin is a duplicate of original instruction
3663 in the recovery block). */
3664 if (rec != EXIT_BLOCK_PTR)
3666 FOR_EACH_DEP_LINK (link, INSN_RESOLVED_BACK_DEPS (insn))
3667 if ((DEP_LINK_STATUS (link) & DEP_OUTPUT) != 0)
3669 struct _dep _dep, *dep = &_dep;
3671 init_dep (dep, DEP_LINK_PRO (link), check, REG_DEP_TRUE);
3673 add_back_dep_to_deps_list (INSN_RESOLVED_BACK_DEPS (check), dep);
3676 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3677 extend_global (twin);
3679 if (sched_verbose && spec_info->dump)
3680 /* INSN_BB (insn) isn't determined for twin insns yet.
3681 So we can't use current_sched_info->print_insn. */
3682 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3683 INSN_UID (twin), rec->index);
3687 ORIG_PAT (check) = ORIG_PAT (insn);
3688 HAS_INTERNAL_DEP (check) = 1;
3690 /* ??? We probably should change all OUTPUT dependencies to
3694 copy_deps_list_change_con (INSN_RESOLVED_BACK_DEPS (twin),
3695 INSN_RESOLVED_BACK_DEPS (insn),
3698 if (rec != EXIT_BLOCK_PTR)
3699 /* In case of branchy check, fix CFG. */
3701 basic_block first_bb, second_bb;
3706 first_bb = BLOCK_FOR_INSN (check);
3707 e = split_block (first_bb, check);
3708 /* split_block emits note if *check == BB_END. Probably it
3709 is better to rip that note off. */
3710 gcc_assert (e->src == first_bb);
3711 second_bb = e->dest;
3713 /* This is fixing of incoming edge. */
3714 /* ??? Which other flags should be specified? */
3715 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3716 /* Partition type is the same, if it is "unpartitioned". */
3717 edge_flags = EDGE_CROSSING;
3721 e = make_edge (first_bb, rec, edge_flags);
3723 add_block (second_bb, first_bb);
3725 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3726 label = block_label (second_bb);
3727 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3728 JUMP_LABEL (jump) = label;
3729 LABEL_NUSES (label)++;
3730 extend_global (jump);
3732 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3733 /* Partition type is the same, if it is "unpartitioned". */
3735 /* Rewritten from cfgrtl.c. */
3736 if (flag_reorder_blocks_and_partition
3737 && targetm.have_named_sections
3738 /*&& !any_condjump_p (jump)*/)
3739 /* any_condjump_p (jump) == false.
3740 We don't need the same note for the check because
3741 any_condjump_p (check) == true. */
3743 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3747 edge_flags = EDGE_CROSSING;
3752 make_single_succ_edge (rec, second_bb, edge_flags);
3754 add_block (rec, EXIT_BLOCK_PTR);
3757 /* Move backward dependences from INSN to CHECK and
3758 move forward dependences from INSN to TWIN. */
3759 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
3761 rtx pro = DEP_LINK_PRO (link);
3762 enum reg_note dk = DEP_LINK_KIND (link);
3765 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3766 check --TRUE--> producer ??? or ANTI ???
3767 twin --TRUE--> producer
3768 twin --ANTI--> check
3770 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3771 check --ANTI--> producer
3772 twin --ANTI--> producer
3773 twin --ANTI--> check
3775 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3776 check ~~TRUE~~> producer
3777 twin ~~TRUE~~> producer
3778 twin --ANTI--> check */
3780 ds = DEP_LINK_STATUS (link);
3782 if (ds & BEGIN_SPEC)
3784 gcc_assert (!mutate_p);
3788 if (rec != EXIT_BLOCK_PTR)
3790 add_back_forw_dep (check, pro, dk, ds);
3791 add_back_forw_dep (twin, pro, dk, ds);
3794 add_back_forw_dep (check, pro, dk, ds);
3797 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3798 if ((DEP_LINK_STATUS (link) & BEGIN_SPEC)
3800 /* We can delete this dep only if we totally overcome it with
3801 BEGIN_SPECULATION. */
3803 delete_back_forw_dep (link);
3805 /* Restart search. */
3806 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3809 /* Continue search. */
3810 link = DEP_LINK_NEXT (link);
3814 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3817 gcc_assert (!DONE_SPEC (insn));
3821 ds_t ts = TODO_SPEC (insn);
3823 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3824 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3826 if (ts & BEGIN_DATA)
3827 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3828 if (ts & BEGIN_CONTROL)
3829 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3832 CHECK_SPEC (check) = CHECK_SPEC (insn);
3834 /* Future speculations: call the helper. */
3835 process_insn_forw_deps_be_in_spec (INSN_FORW_DEPS (insn), twin, fs);
3837 if (rec != EXIT_BLOCK_PTR)
3839 /* Which types of dependencies should we use here is,
3840 generally, machine-dependent question... But, for now,
3845 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3846 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3852 if (spec_info->dump)
3853 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3854 (*current_sched_info->print_insn) (insn, 0));
3856 /* Remove all forward dependencies of the INSN. */
3857 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3858 while (link != NULL)
3860 delete_back_forw_dep (link);
3861 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3864 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3867 sched_remove_insn (insn);
3870 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3873 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3876 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3877 because it'll be done later in add_to_speculative_block. */
3879 rtx_vec_t priorities_roots = NULL;
3881 clear_priorities (twin, &priorities_roots);
3882 calc_priorities (priorities_roots);
3883 VEC_free (rtx, heap, priorities_roots);
3887 /* Removes dependency between instructions in the recovery block REC
3888 and usual region instructions. It keeps inner dependences so it
3889 won't be necessary to recompute them. */
3891 fix_recovery_deps (basic_block rec)
3894 rtx note, insn, jump, ready_list = 0;
3895 bitmap_head in_ready;
3898 bitmap_initialize (&in_ready, 0);
3900 /* NOTE - a basic block note. */
3901 note = NEXT_INSN (BB_HEAD (rec));
3902 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3903 insn = BB_END (rec);
3904 gcc_assert (JUMP_P (insn));
3905 insn = PREV_INSN (insn);
3909 for (link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn)); link != NULL;)
3913 consumer = DEP_LINK_CON (link);
3915 if (BLOCK_FOR_INSN (consumer) != rec)
3917 delete_back_forw_dep (link);
3919 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3921 ready_list = alloc_INSN_LIST (consumer, ready_list);
3922 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3925 /* Restart search. */
3926 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3930 gcc_assert ((DEP_LINK_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3932 /* Continue search. */
3933 link = DEP_LINK_NEXT (link);
3937 insn = PREV_INSN (insn);
3939 while (insn != note);
3941 bitmap_clear (&in_ready);
3943 /* Try to add instructions to the ready or queue list. */
3944 for (link1 = ready_list; link1; link1 = XEXP (link1, 1))
3945 try_ready (XEXP (link1, 0));
3946 free_INSN_LIST_list (&ready_list);
3948 /* Fixing jump's dependences. */
3949 insn = BB_HEAD (rec);
3950 jump = BB_END (rec);
3952 gcc_assert (LABEL_P (insn));
3953 insn = NEXT_INSN (insn);
3955 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
3956 add_jump_dependencies (insn, jump);
3959 /* Changes pattern of the INSN to NEW_PAT. */
3961 change_pattern (rtx insn, rtx new_pat)
3965 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
3967 /* Invalidate INSN_COST, so it'll be recalculated. */
3968 INSN_COST (insn) = -1;
3969 /* Invalidate INSN_TICK, so it'll be recalculated. */
3970 INSN_TICK (insn) = INVALID_TICK;
3971 dfa_clear_single_insn_cache (insn);
3975 /* -1 - can't speculate,
3976 0 - for speculation with REQUEST mode it is OK to use
3977 current instruction pattern,
3978 1 - need to change pattern for *NEW_PAT to be speculative. */
3980 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
3982 gcc_assert (current_sched_info->flags & DO_SPECULATION
3983 && (request & SPECULATIVE));
3985 if (!NONJUMP_INSN_P (insn)
3986 || HAS_INTERNAL_DEP (insn)
3987 || SCHED_GROUP_P (insn)
3988 || side_effects_p (PATTERN (insn))
3989 || (request & spec_info->mask) != request)
3992 gcc_assert (!IS_SPECULATION_CHECK_P (insn));
3994 if (request & BE_IN_SPEC)
3996 if (may_trap_p (PATTERN (insn)))
3999 if (!(request & BEGIN_SPEC))
4003 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4006 /* Print some information about block BB, which starts with HEAD and
4007 ends with TAIL, before scheduling it.
4008 I is zero, if scheduler is about to start with the fresh ebb. */
4010 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4013 fprintf (sched_dump,
4014 ";; ======================================================\n");
4016 fprintf (sched_dump,
4017 ";; =====================ADVANCING TO=====================\n");
4018 fprintf (sched_dump,
4019 ";; -- basic block %d from %d to %d -- %s reload\n",
4020 bb->index, INSN_UID (head), INSN_UID (tail),
4021 (reload_completed ? "after" : "before"));
4022 fprintf (sched_dump,
4023 ";; ======================================================\n");
4024 fprintf (sched_dump, "\n");
4027 /* Unlink basic block notes and labels and saves them, so they
4028 can be easily restored. We unlink basic block notes in EBB to
4029 provide back-compatibility with the previous code, as target backends
4030 assume, that there'll be only instructions between
4031 current_sched_info->{head and tail}. We restore these notes as soon
4033 FIRST (LAST) is the first (last) basic block in the ebb.
4034 NB: In usual case (FIRST == LAST) nothing is really done. */
4036 unlink_bb_notes (basic_block first, basic_block last)
4038 /* We DON'T unlink basic block notes of the first block in the ebb. */
4042 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4044 /* Make a sentinel. */
4045 if (last->next_bb != EXIT_BLOCK_PTR)
4046 bb_header[last->next_bb->index] = 0;
4048 first = first->next_bb;
4051 rtx prev, label, note, next;
4053 label = BB_HEAD (last);
4054 if (LABEL_P (label))
4055 note = NEXT_INSN (label);
4058 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4060 prev = PREV_INSN (label);
4061 next = NEXT_INSN (note);
4062 gcc_assert (prev && next);
4064 NEXT_INSN (prev) = next;
4065 PREV_INSN (next) = prev;
4067 bb_header[last->index] = label;
4072 last = last->prev_bb;
4077 /* Restore basic block notes.
4078 FIRST is the first basic block in the ebb. */
4080 restore_bb_notes (basic_block first)
4085 /* We DON'T unlink basic block notes of the first block in the ebb. */
4086 first = first->next_bb;
4087 /* Remember: FIRST is actually a second basic block in the ebb. */
4089 while (first != EXIT_BLOCK_PTR
4090 && bb_header[first->index])
4092 rtx prev, label, note, next;
4094 label = bb_header[first->index];
4095 prev = PREV_INSN (label);
4096 next = NEXT_INSN (prev);
4098 if (LABEL_P (label))
4099 note = NEXT_INSN (label);
4102 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4104 bb_header[first->index] = 0;
4106 NEXT_INSN (prev) = label;
4107 NEXT_INSN (note) = next;
4108 PREV_INSN (next) = note;
4110 first = first->next_bb;
4117 /* Extend per basic block data structures of the scheduler.
4118 If BB is NULL, initialize structures for the whole CFG.
4119 Otherwise, initialize them for the just created BB. */
4125 old_last_basic_block = last_basic_block;
4127 /* The following is done to keep current_sched_info->next_tail non null. */
4129 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4130 if (NEXT_INSN (insn) == 0
4133 /* Don't emit a NOTE if it would end up before a BARRIER. */
4134 && !BARRIER_P (NEXT_INSN (insn))))
4136 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4137 /* Make insn appear outside BB. */
4138 set_block_for_insn (note, NULL);
4139 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4143 /* Add a basic block BB to extended basic block EBB.
4144 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4145 If EBB is NULL, then BB should be a new region. */
4147 add_block (basic_block bb, basic_block ebb)
4149 gcc_assert (current_sched_info->flags & NEW_BBS);
4153 if (current_sched_info->add_block)
4154 /* This changes only data structures of the front-end. */
4155 current_sched_info->add_block (bb, ebb);
4159 Fix CFG after both in- and inter-block movement of
4160 control_flow_insn_p JUMP. */
4162 fix_jump_move (rtx jump)
4164 basic_block bb, jump_bb, jump_bb_next;
4166 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4167 jump_bb = BLOCK_FOR_INSN (jump);
4168 jump_bb_next = jump_bb->next_bb;
4170 gcc_assert (current_sched_info->flags & SCHED_EBB
4171 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4173 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4174 /* if jump_bb_next is not empty. */
4175 BB_END (jump_bb) = BB_END (jump_bb_next);
4177 if (BB_END (bb) != PREV_INSN (jump))
4178 /* Then there are instruction after jump that should be placed
4180 BB_END (jump_bb_next) = BB_END (bb);
4182 /* Otherwise jump_bb_next is empty. */
4183 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4185 /* To make assertion in move_insn happy. */
4186 BB_END (bb) = PREV_INSN (jump);
4188 update_bb_for_insn (jump_bb_next);
4191 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4193 move_block_after_check (rtx jump)
4195 basic_block bb, jump_bb, jump_bb_next;
4198 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4199 jump_bb = BLOCK_FOR_INSN (jump);
4200 jump_bb_next = jump_bb->next_bb;
4202 update_bb_for_insn (jump_bb);
4204 gcc_assert (IS_SPECULATION_CHECK_P (jump)
4205 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4207 unlink_block (jump_bb_next);
4208 link_block (jump_bb_next, bb);
4212 move_succs (&(jump_bb->succs), bb);
4213 move_succs (&(jump_bb_next->succs), jump_bb);
4214 move_succs (&t, jump_bb_next);
4216 df_mark_solutions_dirty ();
4218 if (current_sched_info->fix_recovery_cfg)
4219 current_sched_info->fix_recovery_cfg
4220 (bb->index, jump_bb->index, jump_bb_next->index);
4223 /* Helper function for move_block_after_check.
4224 This functions attaches edge vector pointed to by SUCCSP to
4227 move_succs (VEC(edge,gc) **succsp, basic_block to)
4232 gcc_assert (to->succs == 0);
4234 to->succs = *succsp;
4236 FOR_EACH_EDGE (e, ei, to->succs)
4242 /* Remove INSN from the instruction stream.
4243 INSN should have any dependencies. */
4245 sched_remove_insn (rtx insn)
4247 change_queue_index (insn, QUEUE_NOWHERE);
4248 current_sched_info->add_remove_insn (insn, 1);
4252 /* Clear priorities of all instructions, that are forward dependent on INSN.
4253 Store in vector pointed to by ROOTS_PTR insns on which priority () should
4254 be invoked to initialize all cleared priorities. */
4256 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
4259 bool insn_is_root_p = true;
4261 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
4263 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
4265 dep_t dep = DEP_LINK_DEP (link);
4266 rtx pro = DEP_PRO (dep);
4268 if (INSN_PRIORITY_STATUS (pro) >= 0
4269 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
4271 /* If DEP doesn't contribute to priority then INSN itself should
4272 be added to priority roots. */
4273 if (contributes_to_priority_p (dep))
4274 insn_is_root_p = false;
4276 INSN_PRIORITY_STATUS (pro) = -1;
4277 clear_priorities (pro, roots_ptr);
4282 VEC_safe_push (rtx, heap, *roots_ptr, insn);
4285 /* Recompute priorities of instructions, whose priorities might have been
4286 changed. ROOTS is a vector of instructions whose priority computation will
4287 trigger initialization of all cleared priorities. */
4289 calc_priorities (rtx_vec_t roots)
4294 for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
4299 /* Add dependences between JUMP and other instructions in the recovery
4300 block. INSN is the first insn the recovery block. */
4302 add_jump_dependencies (rtx insn, rtx jump)
4306 insn = NEXT_INSN (insn);
4310 if (deps_list_empty_p (INSN_FORW_DEPS (insn)))
4311 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4315 gcc_assert (!deps_list_empty_p (INSN_BACK_DEPS (jump)));
4318 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4320 bb_note (basic_block bb)
4324 note = BB_HEAD (bb);
4326 note = NEXT_INSN (note);
4328 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4332 #ifdef ENABLE_CHECKING
4333 extern void debug_spec_status (ds_t);
4335 /* Dump information about the dependence status S. */
4337 debug_spec_status (ds_t s)
4342 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4344 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4345 if (s & BEGIN_CONTROL)
4346 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4347 if (s & BE_IN_CONTROL)
4348 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4351 fprintf (f, "HARD_DEP; ");
4354 fprintf (f, "DEP_TRUE; ");
4356 fprintf (f, "DEP_ANTI; ");
4358 fprintf (f, "DEP_OUTPUT; ");
4363 /* Helper function for check_cfg.
4364 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4367 has_edge_p (VEC(edge,gc) *el, int type)
4372 FOR_EACH_EDGE (e, ei, el)
4373 if (e->flags & type)
4378 /* Check few properties of CFG between HEAD and TAIL.
4379 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4380 instruction stream. */
4382 check_cfg (rtx head, rtx tail)
4386 int not_first = 0, not_last;
4389 head = get_insns ();
4391 tail = get_last_insn ();
4392 next_tail = NEXT_INSN (tail);
4396 not_last = head != tail;
4399 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4401 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4404 || (NOTE_INSN_BASIC_BLOCK_P (head)
4406 || (not_first && !LABEL_P (PREV_INSN (head))))))
4408 gcc_assert (bb == 0);
4409 bb = BLOCK_FOR_INSN (head);
4411 gcc_assert (BB_HEAD (bb) == head);
4413 /* This is the case of jump table. See inside_basic_block_p (). */
4414 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4419 gcc_assert (!inside_basic_block_p (head));
4420 head = NEXT_INSN (head);
4424 gcc_assert (inside_basic_block_p (head)
4426 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4430 head = NEXT_INSN (head);
4431 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4435 if (control_flow_insn_p (head))
4437 gcc_assert (BB_END (bb) == head);
4439 if (any_uncondjump_p (head))
4440 gcc_assert (EDGE_COUNT (bb->succs) == 1
4441 && BARRIER_P (NEXT_INSN (head)));
4442 else if (any_condjump_p (head))
4443 gcc_assert (/* Usual case. */
4444 (EDGE_COUNT (bb->succs) > 1
4445 && !BARRIER_P (NEXT_INSN (head)))
4446 /* Or jump to the next instruction. */
4447 || (EDGE_COUNT (bb->succs) == 1
4448 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4449 == JUMP_LABEL (head))));
4451 if (BB_END (bb) == head)
4453 if (EDGE_COUNT (bb->succs) > 1)
4454 gcc_assert (control_flow_insn_p (head)
4455 || has_edge_p (bb->succs, EDGE_COMPLEX));
4459 head = NEXT_INSN (head);
4465 while (head != next_tail);
4467 gcc_assert (bb == 0);
4470 /* Perform a few consistency checks of flags in different data structures. */
4472 check_sched_flags (void)
4474 unsigned int f = current_sched_info->flags;
4476 if (flag_sched_stalled_insns)
4477 gcc_assert (!(f & DO_SPECULATION));
4478 if (f & DO_SPECULATION)
4479 gcc_assert (!flag_sched_stalled_insns
4481 && spec_info->mask);
4483 #endif /* ENABLE_CHECKING */
4485 #endif /* INSN_SCHEDULING */