1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
143 #include "sched-int.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param = 0;
166 int sched_verbose = 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump = 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
179 fix_sched_param (const char *param, const char *val)
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
184 warning (0, "fix_sched_param: unknown param: %s", param);
187 struct haifa_insn_data *h_i_d;
189 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
190 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
192 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
193 then it should be recalculated from scratch. */
194 #define INVALID_TICK (-(max_insn_queue_index + 1))
195 /* The minimal value of the INSN_TICK of an instruction. */
196 #define MIN_TICK (-max_insn_queue_index)
198 /* Issue points are used to distinguish between instructions in max_issue ().
199 For now, all instructions are equally good. */
200 #define ISSUE_POINTS(INSN) 1
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204 static rtx note_list;
206 static struct spec_info_def spec_info_var;
207 /* Description of the speculative part of the scheduling.
208 If NULL - no speculation. */
209 static spec_info_t spec_info;
211 /* True, if recovery block was added during scheduling of current block.
212 Used to determine, if we need to fix INSN_TICKs. */
213 static bool added_recovery_block_p;
215 /* Counters of different types of speculative instructions. */
216 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
218 /* Pointers to GLAT data. See init_glat for more information. */
219 regset *glat_start, *glat_end;
221 /* Array used in {unlink, restore}_bb_notes. */
222 static rtx *bb_header = 0;
224 /* Number of basic_blocks. */
225 static int old_last_basic_block;
227 /* Basic block after which recovery blocks will be created. */
228 static basic_block before_recovery;
232 /* An instruction is ready to be scheduled when all insns preceding it
233 have already been scheduled. It is important to ensure that all
234 insns which use its result will not be executed until its result
235 has been computed. An insn is maintained in one of four structures:
237 (P) the "Pending" set of insns which cannot be scheduled until
238 their dependencies have been satisfied.
239 (Q) the "Queued" set of insns that can be scheduled when sufficient
241 (R) the "Ready" list of unscheduled, uncommitted insns.
242 (S) the "Scheduled" list of insns.
244 Initially, all insns are either "Pending" or "Ready" depending on
245 whether their dependencies are satisfied.
247 Insns move from the "Ready" list to the "Scheduled" list as they
248 are committed to the schedule. As this occurs, the insns in the
249 "Pending" list have their dependencies satisfied and move to either
250 the "Ready" list or the "Queued" set depending on whether
251 sufficient time has passed to make them ready. As time passes,
252 insns move from the "Queued" set to the "Ready" list.
254 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
255 unscheduled insns, i.e., those that are ready, queued, and pending.
256 The "Queued" set (Q) is implemented by the variable `insn_queue'.
257 The "Ready" list (R) is implemented by the variables `ready' and
259 The "Scheduled" list (S) is the new insn chain built by this pass.
261 The transition (R->S) is implemented in the scheduling loop in
262 `schedule_block' when the best insn to schedule is chosen.
263 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
264 insns move from the ready list to the scheduled list.
265 The transition (Q->R) is implemented in 'queue_to_insn' as time
266 passes or stalls are introduced. */
268 /* Implement a circular buffer to delay instructions until sufficient
269 time has passed. For the new pipeline description interface,
270 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
271 than maximal time of instruction execution computed by genattr.c on
272 the base maximal time of functional unit reservations and getting a
273 result. This is the longest time an insn may be queued. */
275 static rtx *insn_queue;
276 static int q_ptr = 0;
277 static int q_size = 0;
278 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
279 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
281 #define QUEUE_SCHEDULED (-3)
282 #define QUEUE_NOWHERE (-2)
283 #define QUEUE_READY (-1)
284 /* QUEUE_SCHEDULED - INSN is scheduled.
285 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
287 QUEUE_READY - INSN is in ready list.
288 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
290 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
292 /* The following variable value refers for all current and future
293 reservations of the processor units. */
296 /* The following variable value is size of memory representing all
297 current and future reservations of the processor units. */
298 static size_t dfa_state_size;
300 /* The following array is used to find the best insn from ready when
301 the automaton pipeline interface is used. */
302 static char *ready_try;
304 /* Describe the ready list of the scheduler.
305 VEC holds space enough for all insns in the current region. VECLEN
306 says how many exactly.
307 FIRST is the index of the element with the highest priority; i.e. the
308 last one in the ready list, since elements are ordered by ascending
310 N_READY determines how many insns are on the ready list. */
320 /* The pointer to the ready list. */
321 static struct ready_list *readyp;
323 /* Scheduling clock. */
324 static int clock_var;
326 /* Number of instructions in current scheduling region. */
327 static int rgn_n_insns;
329 static int may_trap_exp (rtx, int);
331 /* Nonzero iff the address is comprised from at most 1 register. */
332 #define CONST_BASED_ADDRESS_P(x) \
334 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
335 || (GET_CODE (x) == LO_SUM)) \
336 && (CONSTANT_P (XEXP (x, 0)) \
337 || CONSTANT_P (XEXP (x, 1)))))
339 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
340 as found by analyzing insn's expression. */
343 may_trap_exp (rtx x, int is_store)
352 if (code == MEM && may_trap_p (x))
359 /* The insn uses memory: a volatile load. */
360 if (MEM_VOLATILE_P (x))
362 /* An exception-free load. */
365 /* A load with 1 base register, to be further checked. */
366 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
367 return PFREE_CANDIDATE;
368 /* No info on the load, to be further checked. */
369 return PRISKY_CANDIDATE;
374 int i, insn_class = TRAP_FREE;
376 /* Neither store nor load, check if it may cause a trap. */
379 /* Recursive step: walk the insn... */
380 fmt = GET_RTX_FORMAT (code);
381 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
385 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
386 insn_class = WORST_CLASS (insn_class, tmp_class);
388 else if (fmt[i] == 'E')
391 for (j = 0; j < XVECLEN (x, i); j++)
393 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
394 insn_class = WORST_CLASS (insn_class, tmp_class);
395 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
399 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
406 /* Classifies insn for the purpose of verifying that it can be
407 moved speculatively, by examining it's patterns, returning:
408 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
409 TRAP_FREE: non-load insn.
410 IFREE: load from a globally safe location.
411 IRISKY: volatile load.
412 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
413 being either PFREE or PRISKY. */
416 haifa_classify_insn (rtx insn)
418 rtx pat = PATTERN (insn);
419 int tmp_class = TRAP_FREE;
420 int insn_class = TRAP_FREE;
423 if (GET_CODE (pat) == PARALLEL)
425 int i, len = XVECLEN (pat, 0);
427 for (i = len - 1; i >= 0; i--)
429 code = GET_CODE (XVECEXP (pat, 0, i));
433 /* Test if it is a 'store'. */
434 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
437 /* Test if it is a store. */
438 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
439 if (tmp_class == TRAP_RISKY)
441 /* Test if it is a load. */
443 = WORST_CLASS (tmp_class,
444 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
449 tmp_class = TRAP_RISKY;
454 insn_class = WORST_CLASS (insn_class, tmp_class);
455 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
461 code = GET_CODE (pat);
465 /* Test if it is a 'store'. */
466 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
469 /* Test if it is a store. */
470 tmp_class = may_trap_exp (SET_DEST (pat), 1);
471 if (tmp_class == TRAP_RISKY)
473 /* Test if it is a load. */
475 WORST_CLASS (tmp_class,
476 may_trap_exp (SET_SRC (pat), 0));
480 tmp_class = TRAP_RISKY;
484 insn_class = tmp_class;
490 /* A typedef for rtx vector. */
491 typedef VEC(rtx, heap) *rtx_vec_t;
493 /* Forward declarations. */
495 static int priority (rtx);
496 static int rank_for_schedule (const void *, const void *);
497 static void swap_sort (rtx *, int);
498 static void queue_insn (rtx, int);
499 static int schedule_insn (rtx);
500 static int find_set_reg_weight (rtx);
501 static void find_insn_reg_weight (basic_block);
502 static void find_insn_reg_weight1 (rtx);
503 static void adjust_priority (rtx);
504 static void advance_one_cycle (void);
506 /* Notes handling mechanism:
507 =========================
508 Generally, NOTES are saved before scheduling and restored after scheduling.
509 The scheduler distinguishes between two types of notes:
511 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
512 Before scheduling a region, a pointer to the note is added to the insn
513 that follows or precedes it. (This happens as part of the data dependence
514 computation). After scheduling an insn, the pointer contained in it is
515 used for regenerating the corresponding note (in reemit_notes).
517 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
518 these notes are put in a list (in rm_other_notes() and
519 unlink_other_notes ()). After scheduling the block, these notes are
520 inserted at the beginning of the block (in schedule_block()). */
522 static rtx unlink_other_notes (rtx, rtx);
523 static void reemit_notes (rtx);
525 static rtx *ready_lastpos (struct ready_list *);
526 static void ready_add (struct ready_list *, rtx, bool);
527 static void ready_sort (struct ready_list *);
528 static rtx ready_remove_first (struct ready_list *);
530 static void queue_to_ready (struct ready_list *);
531 static int early_queue_to_ready (state_t, struct ready_list *);
533 static void debug_ready_list (struct ready_list *);
535 static void move_insn (rtx);
537 /* The following functions are used to implement multi-pass scheduling
538 on the first cycle. */
539 static rtx ready_element (struct ready_list *, int);
540 static rtx ready_remove (struct ready_list *, int);
541 static void ready_remove_insn (rtx);
542 static int max_issue (struct ready_list *, int *, int);
544 static rtx choose_ready (struct ready_list *);
546 static void fix_inter_tick (rtx, rtx);
547 static int fix_tick_ready (rtx);
548 static void change_queue_index (rtx, int);
550 /* The following functions are used to implement scheduling of data/control
551 speculative instructions. */
553 static void extend_h_i_d (void);
554 static void extend_ready (int);
555 static void extend_global (rtx);
556 static void extend_all (rtx);
557 static void init_h_i_d (rtx);
558 static void generate_recovery_code (rtx);
559 static void process_insn_forw_deps_be_in_spec (deps_list_t, rtx, ds_t);
560 static void begin_speculative_block (rtx);
561 static void add_to_speculative_block (rtx);
562 static dw_t dep_weak (ds_t);
563 static edge find_fallthru_edge (basic_block);
564 static void init_before_recovery (void);
565 static basic_block create_recovery_block (void);
566 static void create_check_block_twin (rtx, bool);
567 static void fix_recovery_deps (basic_block);
568 static void change_pattern (rtx, rtx);
569 static int speculate_insn (rtx, ds_t, rtx *);
570 static void dump_new_block_header (int, basic_block, rtx, rtx);
571 static void restore_bb_notes (basic_block);
572 static void extend_bb (void);
573 static void fix_jump_move (rtx);
574 static void move_block_after_check (rtx);
575 static void move_succs (VEC(edge,gc) **, basic_block);
576 static void init_glat (void);
577 static void init_glat1 (basic_block);
578 static void attach_life_info1 (basic_block);
579 static void free_glat (void);
580 static void sched_remove_insn (rtx);
581 static void clear_priorities (rtx, rtx_vec_t *);
582 static void calc_priorities (rtx_vec_t);
583 static void add_jump_dependencies (rtx, rtx);
584 #ifdef ENABLE_CHECKING
585 static int has_edge_p (VEC(edge,gc) *, int);
586 static void check_cfg (rtx, rtx);
587 static void check_sched_flags (void);
590 #endif /* INSN_SCHEDULING */
592 /* Point to state used for the current scheduling pass. */
593 struct sched_info *current_sched_info;
595 #ifndef INSN_SCHEDULING
597 schedule_insns (void)
602 /* Working copy of frontend's sched_info variable. */
603 static struct sched_info current_sched_info_var;
605 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
606 so that insns independent of the last scheduled insn will be preferred
607 over dependent instructions. */
609 static rtx last_scheduled_insn;
611 /* Cached cost of the instruction. Use below function to get cost of the
612 insn. -1 here means that the field is not initialized. */
613 #define INSN_COST(INSN) (h_i_d[INSN_UID (INSN)].cost)
615 /* Compute cost of executing INSN.
616 This is the number of cycles between instruction issue and
617 instruction results. */
621 int cost = INSN_COST (insn);
625 /* A USE insn, or something else we don't need to
626 understand. We can't pass these directly to
627 result_ready_cost or insn_default_latency because it will
628 trigger a fatal error for unrecognizable insns. */
629 if (recog_memoized (insn) < 0)
631 INSN_COST (insn) = 0;
636 cost = insn_default_latency (insn);
640 INSN_COST (insn) = cost;
647 /* Compute cost of dependence LINK.
648 This is the number of cycles between instruction issue and
649 instruction results. */
651 dep_cost (dep_t link)
653 rtx used = DEP_CON (link);
656 /* A USE insn should never require the value used to be computed.
657 This allows the computation of a function's result and parameter
658 values to overlap the return and call. */
659 if (recog_memoized (used) < 0)
663 rtx insn = DEP_PRO (link);
664 enum reg_note dep_type = DEP_KIND (link);
666 cost = insn_cost (insn);
668 if (INSN_CODE (insn) >= 0)
670 if (dep_type == REG_DEP_ANTI)
672 else if (dep_type == REG_DEP_OUTPUT)
674 cost = (insn_default_latency (insn)
675 - insn_default_latency (used));
679 else if (bypass_p (insn))
680 cost = insn_latency (insn, used);
683 if (targetm.sched.adjust_cost != NULL)
685 /* This variable is used for backward compatibility with the
687 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
689 /* Make it self-cycled, so that if some tries to walk over this
690 incomplete list he/she will be caught in an endless loop. */
691 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
693 /* Targets use only REG_NOTE_KIND of the link. */
694 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_KIND (link));
696 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
699 free_INSN_LIST_node (dep_cost_rtx_link);
709 /* Return 'true' if DEP should be included in priority calculations. */
711 contributes_to_priority_p (dep_t dep)
713 /* Critical path is meaningful in block boundaries only. */
714 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
718 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
719 then speculative instructions will less likely be
720 scheduled. That is because the priority of
721 their producers will increase, and, thus, the
722 producers will more likely be scheduled, thus,
723 resolving the dependence. */
724 if ((current_sched_info->flags & DO_SPECULATION)
725 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
726 && (DEP_STATUS (dep) & SPECULATIVE))
732 /* Compute the priority number for INSN. */
741 /* We should not be insterested in priority of an already scheduled insn. */
742 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
744 if (!INSN_PRIORITY_KNOWN (insn))
746 int this_priority = 0;
748 if (deps_list_empty_p (INSN_FORW_DEPS (insn)))
749 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
750 some forward deps but all of them are ignored by
751 contributes_to_priority hook. At the moment we set priority of
753 this_priority = insn_cost (insn);
756 rtx prev_first, twin;
759 /* For recovery check instructions we calculate priority slightly
760 different than that of normal instructions. Instead of walking
761 through INSN_FORW_DEPS (check) list, we walk through
762 INSN_FORW_DEPS list of each instruction in the corresponding
765 rec = RECOVERY_BLOCK (insn);
766 if (!rec || rec == EXIT_BLOCK_PTR)
768 prev_first = PREV_INSN (insn);
773 prev_first = NEXT_INSN (BB_HEAD (rec));
774 twin = PREV_INSN (BB_END (rec));
779 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (twin))
783 dep_t dep = DEP_LINK_DEP (link);
785 next = DEP_CON (dep);
787 if (BLOCK_FOR_INSN (next) != rec)
791 if (!contributes_to_priority_p (dep))
795 cost = dep_cost (dep);
798 struct _dep _dep1, *dep1 = &_dep1;
800 init_dep (dep1, insn, next, REG_DEP_ANTI);
802 cost = dep_cost (dep1);
805 next_priority = cost + priority (next);
807 if (next_priority > this_priority)
808 this_priority = next_priority;
812 twin = PREV_INSN (twin);
814 while (twin != prev_first);
816 INSN_PRIORITY (insn) = this_priority;
817 INSN_PRIORITY_STATUS (insn) = 1;
820 return INSN_PRIORITY (insn);
823 /* Macros and functions for keeping the priority queue sorted, and
824 dealing with queuing and dequeuing of instructions. */
826 #define SCHED_SORT(READY, N_READY) \
827 do { if ((N_READY) == 2) \
828 swap_sort (READY, N_READY); \
829 else if ((N_READY) > 2) \
830 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
833 /* Returns a positive value if x is preferred; returns a negative value if
834 y is preferred. Should never return 0, since that will make the sort
838 rank_for_schedule (const void *x, const void *y)
840 rtx tmp = *(const rtx *) y;
841 rtx tmp2 = *(const rtx *) x;
842 dep_link_t link1, link2;
843 int tmp_class, tmp2_class;
844 int val, priority_val, weight_val, info_val;
846 /* The insn in a schedule group should be issued the first. */
847 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
848 return SCHED_GROUP_P (tmp2) ? 1 : -1;
850 /* Make sure that priority of TMP and TMP2 are initialized. */
851 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
853 /* Prefer insn with higher priority. */
854 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
859 /* Prefer speculative insn with greater dependencies weakness. */
866 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
868 dw1 = dep_weak (ds1);
872 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
874 dw2 = dep_weak (ds2);
879 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
883 /* Prefer an insn with smaller contribution to registers-pressure. */
884 if (!reload_completed &&
885 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
888 info_val = (*current_sched_info->rank) (tmp, tmp2);
892 /* Compare insns based on their relation to the last-scheduled-insn. */
893 if (INSN_P (last_scheduled_insn))
895 /* Classify the instructions into three classes:
896 1) Data dependent on last schedule insn.
897 2) Anti/Output dependent on last scheduled insn.
898 3) Independent of last scheduled insn, or has latency of one.
899 Choose the insn from the highest numbered class if different. */
901 = find_link_by_con_in_deps_list (INSN_FORW_DEPS (last_scheduled_insn),
904 if (link1 == NULL || dep_cost (DEP_LINK_DEP (link1)) == 1)
906 else if (/* Data dependence. */
907 DEP_LINK_KIND (link1) == REG_DEP_TRUE)
913 = find_link_by_con_in_deps_list (INSN_FORW_DEPS (last_scheduled_insn),
916 if (link2 == NULL || dep_cost (DEP_LINK_DEP (link2)) == 1)
918 else if (/* Data dependence. */
919 DEP_LINK_KIND (link2) == REG_DEP_TRUE)
924 if ((val = tmp2_class - tmp_class))
928 /* Prefer the insn which has more later insns that depend on it.
929 This gives the scheduler more freedom when scheduling later
930 instructions at the expense of added register pressure. */
932 link1 = DEPS_LIST_FIRST (INSN_FORW_DEPS (tmp));
933 link2 = DEPS_LIST_FIRST (INSN_FORW_DEPS (tmp2));
935 while (link1 != NULL && link2 != NULL)
937 link1 = DEP_LINK_NEXT (link1);
938 link2 = DEP_LINK_NEXT (link2);
941 if (link1 != NULL && link2 == NULL)
942 /* TMP (Y) has more insns that depend on it. */
944 if (link1 == NULL && link2 != NULL)
945 /* TMP2 (X) has more insns that depend on it. */
948 /* If insns are equally good, sort by INSN_LUID (original insn order),
949 so that we make the sort stable. This minimizes instruction movement,
950 thus minimizing sched's effect on debugging and cross-jumping. */
951 return INSN_LUID (tmp) - INSN_LUID (tmp2);
954 /* Resort the array A in which only element at index N may be out of order. */
956 HAIFA_INLINE static void
957 swap_sort (rtx *a, int n)
962 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
970 /* Add INSN to the insn queue so that it can be executed at least
971 N_CYCLES after the currently executing insn. Preserve insns
972 chain for debugging purposes. */
974 HAIFA_INLINE static void
975 queue_insn (rtx insn, int n_cycles)
977 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
978 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
980 gcc_assert (n_cycles <= max_insn_queue_index);
982 insn_queue[next_q] = link;
985 if (sched_verbose >= 2)
987 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
988 (*current_sched_info->print_insn) (insn, 0));
990 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
993 QUEUE_INDEX (insn) = next_q;
996 /* Remove INSN from queue. */
998 queue_remove (rtx insn)
1000 gcc_assert (QUEUE_INDEX (insn) >= 0);
1001 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1003 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1006 /* Return a pointer to the bottom of the ready list, i.e. the insn
1007 with the lowest priority. */
1009 HAIFA_INLINE static rtx *
1010 ready_lastpos (struct ready_list *ready)
1012 gcc_assert (ready->n_ready >= 1);
1013 return ready->vec + ready->first - ready->n_ready + 1;
1016 /* Add an element INSN to the ready list so that it ends up with the
1017 lowest/highest priority depending on FIRST_P. */
1019 HAIFA_INLINE static void
1020 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1024 if (ready->first == ready->n_ready)
1026 memmove (ready->vec + ready->veclen - ready->n_ready,
1027 ready_lastpos (ready),
1028 ready->n_ready * sizeof (rtx));
1029 ready->first = ready->veclen - 1;
1031 ready->vec[ready->first - ready->n_ready] = insn;
1035 if (ready->first == ready->veclen - 1)
1038 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1039 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1040 ready_lastpos (ready),
1041 ready->n_ready * sizeof (rtx));
1042 ready->first = ready->veclen - 2;
1044 ready->vec[++(ready->first)] = insn;
1049 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1050 QUEUE_INDEX (insn) = QUEUE_READY;
1053 /* Remove the element with the highest priority from the ready list and
1056 HAIFA_INLINE static rtx
1057 ready_remove_first (struct ready_list *ready)
1061 gcc_assert (ready->n_ready);
1062 t = ready->vec[ready->first--];
1064 /* If the queue becomes empty, reset it. */
1065 if (ready->n_ready == 0)
1066 ready->first = ready->veclen - 1;
1068 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1069 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1074 /* The following code implements multi-pass scheduling for the first
1075 cycle. In other words, we will try to choose ready insn which
1076 permits to start maximum number of insns on the same cycle. */
1078 /* Return a pointer to the element INDEX from the ready. INDEX for
1079 insn with the highest priority is 0, and the lowest priority has
1082 HAIFA_INLINE static rtx
1083 ready_element (struct ready_list *ready, int index)
1085 gcc_assert (ready->n_ready && index < ready->n_ready);
1087 return ready->vec[ready->first - index];
1090 /* Remove the element INDEX from the ready list and return it. INDEX
1091 for insn with the highest priority is 0, and the lowest priority
1094 HAIFA_INLINE static rtx
1095 ready_remove (struct ready_list *ready, int index)
1101 return ready_remove_first (ready);
1102 gcc_assert (ready->n_ready && index < ready->n_ready);
1103 t = ready->vec[ready->first - index];
1105 for (i = index; i < ready->n_ready; i++)
1106 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1107 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1111 /* Remove INSN from the ready list. */
1113 ready_remove_insn (rtx insn)
1117 for (i = 0; i < readyp->n_ready; i++)
1118 if (ready_element (readyp, i) == insn)
1120 ready_remove (readyp, i);
1126 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1129 HAIFA_INLINE static void
1130 ready_sort (struct ready_list *ready)
1132 rtx *first = ready_lastpos (ready);
1133 SCHED_SORT (first, ready->n_ready);
1136 /* PREV is an insn that is ready to execute. Adjust its priority if that
1137 will help shorten or lengthen register lifetimes as appropriate. Also
1138 provide a hook for the target to tweek itself. */
1140 HAIFA_INLINE static void
1141 adjust_priority (rtx prev)
1143 /* ??? There used to be code here to try and estimate how an insn
1144 affected register lifetimes, but it did it by looking at REG_DEAD
1145 notes, which we removed in schedule_region. Nor did it try to
1146 take into account register pressure or anything useful like that.
1148 Revisit when we have a machine model to work with and not before. */
1150 if (targetm.sched.adjust_priority)
1151 INSN_PRIORITY (prev) =
1152 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1155 /* Advance time on one cycle. */
1156 HAIFA_INLINE static void
1157 advance_one_cycle (void)
1159 if (targetm.sched.dfa_pre_cycle_insn)
1160 state_transition (curr_state,
1161 targetm.sched.dfa_pre_cycle_insn ());
1163 state_transition (curr_state, NULL);
1165 if (targetm.sched.dfa_post_cycle_insn)
1166 state_transition (curr_state,
1167 targetm.sched.dfa_post_cycle_insn ());
1170 /* Clock at which the previous instruction was issued. */
1171 static int last_clock_var;
1173 /* INSN is the "currently executing insn". Launch each insn which was
1174 waiting on INSN. READY is the ready list which contains the insns
1175 that are ready to fire. CLOCK is the current cycle. The function
1176 returns necessary cycle advance after issuing the insn (it is not
1177 zero for insns in a schedule group). */
1180 schedule_insn (rtx insn)
1185 if (sched_verbose >= 1)
1189 print_insn (buf, insn, 0);
1191 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1193 if (recog_memoized (insn) < 0)
1194 fprintf (sched_dump, "nothing");
1196 print_reservation (sched_dump, insn);
1197 fputc ('\n', sched_dump);
1200 /* Scheduling instruction should have all its dependencies resolved and
1201 should have been removed from the ready list. */
1202 gcc_assert (INSN_DEP_COUNT (insn) == 0
1203 && deps_list_empty_p (INSN_BACK_DEPS (insn)));
1204 free_deps_list (INSN_BACK_DEPS (insn));
1206 /* Now we can free INSN_RESOLVED_BACK_DEPS list. */
1207 delete_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
1209 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1210 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1212 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1213 if (INSN_TICK (insn) > clock_var)
1214 /* INSN has been prematurely moved from the queue to the ready list.
1215 This is possible only if following flag is set. */
1216 gcc_assert (flag_sched_stalled_insns);
1218 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1219 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1220 INSN_TICK (insn) = clock_var;
1222 /* Update dependent instructions. */
1223 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn))
1225 rtx next = DEP_LINK_CON (link);
1227 /* Resolve the dependence between INSN and NEXT. */
1229 INSN_DEP_COUNT (next)--;
1231 move_dep_link (DEP_NODE_BACK (DEP_LINK_NODE (link)),
1232 INSN_RESOLVED_BACK_DEPS (next));
1234 gcc_assert ((INSN_DEP_COUNT (next) == 0)
1235 == deps_list_empty_p (INSN_BACK_DEPS (next)));
1237 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1241 effective_cost = try_ready (next);
1243 if (effective_cost >= 0
1244 && SCHED_GROUP_P (next)
1245 && advance < effective_cost)
1246 advance = effective_cost;
1249 /* Check always has only one forward dependence (to the first insn in
1250 the recovery block), therefore, this will be executed only once. */
1252 gcc_assert (DEP_LINK_NEXT (link) == NULL);
1253 fix_recovery_deps (RECOVERY_BLOCK (insn));
1257 /* Annotate the instruction with issue information -- TImode
1258 indicates that the instruction is expected not to be able
1259 to issue on the same cycle as the previous insn. A machine
1260 may use this information to decide how the instruction should
1263 && GET_CODE (PATTERN (insn)) != USE
1264 && GET_CODE (PATTERN (insn)) != CLOBBER)
1266 if (reload_completed)
1267 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1268 last_clock_var = clock_var;
1274 /* Functions for handling of notes. */
1276 /* Delete notes beginning with INSN and put them in the chain
1277 of notes ended by NOTE_LIST.
1278 Returns the insn following the notes. */
1281 unlink_other_notes (rtx insn, rtx tail)
1283 rtx prev = PREV_INSN (insn);
1285 while (insn != tail && NOTE_NOT_BB_P (insn))
1287 rtx next = NEXT_INSN (insn);
1288 basic_block bb = BLOCK_FOR_INSN (insn);
1290 /* Delete the note from its current position. */
1292 NEXT_INSN (prev) = next;
1294 PREV_INSN (next) = prev;
1298 /* Basic block can begin with either LABEL or
1299 NOTE_INSN_BASIC_BLOCK. */
1300 gcc_assert (BB_HEAD (bb) != insn);
1302 /* Check if we are removing last insn in the BB. */
1303 if (BB_END (bb) == insn)
1307 /* See sched_analyze to see how these are handled. */
1308 if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
1309 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END)
1311 /* Insert the note at the end of the notes list. */
1312 PREV_INSN (insn) = note_list;
1314 NEXT_INSN (note_list) = insn;
1323 /* Return the head and tail pointers of ebb starting at BEG and ending
1327 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1329 rtx beg_head = BB_HEAD (beg);
1330 rtx beg_tail = BB_END (beg);
1331 rtx end_head = BB_HEAD (end);
1332 rtx end_tail = BB_END (end);
1334 /* Don't include any notes or labels at the beginning of the BEG
1335 basic block, or notes at the end of the END basic blocks. */
1337 if (LABEL_P (beg_head))
1338 beg_head = NEXT_INSN (beg_head);
1340 while (beg_head != beg_tail)
1341 if (NOTE_P (beg_head))
1342 beg_head = NEXT_INSN (beg_head);
1349 end_head = beg_head;
1350 else if (LABEL_P (end_head))
1351 end_head = NEXT_INSN (end_head);
1353 while (end_head != end_tail)
1354 if (NOTE_P (end_tail))
1355 end_tail = PREV_INSN (end_tail);
1362 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1365 no_real_insns_p (rtx head, rtx tail)
1367 while (head != NEXT_INSN (tail))
1369 if (!NOTE_P (head) && !LABEL_P (head))
1371 head = NEXT_INSN (head);
1376 /* Delete notes between HEAD and TAIL and put them in the chain
1377 of notes ended by NOTE_LIST. */
1380 rm_other_notes (rtx head, rtx tail)
1386 if (head == tail && (! INSN_P (head)))
1389 next_tail = NEXT_INSN (tail);
1390 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1394 /* Farm out notes, and maybe save them in NOTE_LIST.
1395 This is needed to keep the debugger from
1396 getting completely deranged. */
1397 if (NOTE_NOT_BB_P (insn))
1401 insn = unlink_other_notes (insn, next_tail);
1403 gcc_assert (prev != tail && prev != head && insn != next_tail);
1408 /* Functions for computation of registers live/usage info. */
1410 /* This function looks for a new register being defined.
1411 If the destination register is already used by the source,
1412 a new register is not needed. */
1415 find_set_reg_weight (rtx x)
1417 if (GET_CODE (x) == CLOBBER
1418 && register_operand (SET_DEST (x), VOIDmode))
1420 if (GET_CODE (x) == SET
1421 && register_operand (SET_DEST (x), VOIDmode))
1423 if (REG_P (SET_DEST (x)))
1425 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1435 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1438 find_insn_reg_weight (basic_block bb)
1440 rtx insn, next_tail, head, tail;
1442 get_ebb_head_tail (bb, bb, &head, &tail);
1443 next_tail = NEXT_INSN (tail);
1445 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1446 find_insn_reg_weight1 (insn);
1449 /* Calculate INSN_REG_WEIGHT for single instruction.
1450 Separated from find_insn_reg_weight because of need
1451 to initialize new instruction in generate_recovery_code. */
1453 find_insn_reg_weight1 (rtx insn)
1458 /* Handle register life information. */
1459 if (! INSN_P (insn))
1462 /* Increment weight for each register born here. */
1464 reg_weight += find_set_reg_weight (x);
1465 if (GET_CODE (x) == PARALLEL)
1468 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1470 x = XVECEXP (PATTERN (insn), 0, j);
1471 reg_weight += find_set_reg_weight (x);
1474 /* Decrement weight for each register that dies here. */
1475 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1477 if (REG_NOTE_KIND (x) == REG_DEAD
1478 || REG_NOTE_KIND (x) == REG_UNUSED)
1482 INSN_REG_WEIGHT (insn) = reg_weight;
1485 /* Move insns that became ready to fire from queue to ready list. */
1488 queue_to_ready (struct ready_list *ready)
1493 q_ptr = NEXT_Q (q_ptr);
1495 /* Add all pending insns that can be scheduled without stalls to the
1497 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1499 insn = XEXP (link, 0);
1502 if (sched_verbose >= 2)
1503 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1504 (*current_sched_info->print_insn) (insn, 0));
1506 /* If the ready list is full, delay the insn for 1 cycle.
1507 See the comment in schedule_block for the rationale. */
1508 if (!reload_completed
1509 && ready->n_ready > MAX_SCHED_READY_INSNS
1510 && !SCHED_GROUP_P (insn))
1512 if (sched_verbose >= 2)
1513 fprintf (sched_dump, "requeued because ready full\n");
1514 queue_insn (insn, 1);
1518 ready_add (ready, insn, false);
1519 if (sched_verbose >= 2)
1520 fprintf (sched_dump, "moving to ready without stalls\n");
1523 free_INSN_LIST_list (&insn_queue[q_ptr]);
1525 /* If there are no ready insns, stall until one is ready and add all
1526 of the pending insns at that point to the ready list. */
1527 if (ready->n_ready == 0)
1531 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1533 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1535 for (; link; link = XEXP (link, 1))
1537 insn = XEXP (link, 0);
1540 if (sched_verbose >= 2)
1541 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1542 (*current_sched_info->print_insn) (insn, 0));
1544 ready_add (ready, insn, false);
1545 if (sched_verbose >= 2)
1546 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1548 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1550 advance_one_cycle ();
1555 advance_one_cycle ();
1558 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1559 clock_var += stalls;
1563 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1564 prematurely move INSN from the queue to the ready list. Currently,
1565 if a target defines the hook 'is_costly_dependence', this function
1566 uses the hook to check whether there exist any dependences which are
1567 considered costly by the target, between INSN and other insns that
1568 have already been scheduled. Dependences are checked up to Y cycles
1569 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1570 controlling this value.
1571 (Other considerations could be taken into account instead (or in
1572 addition) depending on user flags and target hooks. */
1575 ok_for_early_queue_removal (rtx insn)
1578 rtx prev_insn = last_scheduled_insn;
1580 if (targetm.sched.is_costly_dependence)
1582 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1584 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1588 if (!NOTE_P (prev_insn))
1590 dep_link_t dep_link;
1592 dep_link = (find_link_by_con_in_deps_list
1593 (INSN_FORW_DEPS (prev_insn), insn));
1597 dep_t dep = DEP_LINK_DEP (dep_link);
1599 cost = dep_cost (dep);
1601 if (targetm.sched.is_costly_dependence (dep, cost,
1602 flag_sched_stalled_insns_dep - n_cycles))
1607 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1613 prev_insn = PREV_INSN (prev_insn);
1621 /* Remove insns from the queue, before they become "ready" with respect
1622 to FU latency considerations. */
1625 early_queue_to_ready (state_t state, struct ready_list *ready)
1633 state_t temp_state = alloca (dfa_state_size);
1635 int insns_removed = 0;
1638 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1641 X == 0: There is no limit on how many queued insns can be removed
1642 prematurely. (flag_sched_stalled_insns = -1).
1644 X >= 1: Only X queued insns can be removed prematurely in each
1645 invocation. (flag_sched_stalled_insns = X).
1647 Otherwise: Early queue removal is disabled.
1648 (flag_sched_stalled_insns = 0)
1651 if (! flag_sched_stalled_insns)
1654 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1656 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1658 if (sched_verbose > 6)
1659 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1664 next_link = XEXP (link, 1);
1665 insn = XEXP (link, 0);
1666 if (insn && sched_verbose > 6)
1667 print_rtl_single (sched_dump, insn);
1669 memcpy (temp_state, state, dfa_state_size);
1670 if (recog_memoized (insn) < 0)
1671 /* non-negative to indicate that it's not ready
1672 to avoid infinite Q->R->Q->R... */
1675 cost = state_transition (temp_state, insn);
1677 if (sched_verbose >= 6)
1678 fprintf (sched_dump, "transition cost = %d\n", cost);
1680 move_to_ready = false;
1683 move_to_ready = ok_for_early_queue_removal (insn);
1684 if (move_to_ready == true)
1686 /* move from Q to R */
1688 ready_add (ready, insn, false);
1691 XEXP (prev_link, 1) = next_link;
1693 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1695 free_INSN_LIST_node (link);
1697 if (sched_verbose >= 2)
1698 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1699 (*current_sched_info->print_insn) (insn, 0));
1702 if (insns_removed == flag_sched_stalled_insns)
1703 /* Remove no more than flag_sched_stalled_insns insns
1704 from Q at a time. */
1705 return insns_removed;
1709 if (move_to_ready == false)
1716 } /* for stalls.. */
1718 return insns_removed;
1722 /* Print the ready list for debugging purposes. Callable from debugger. */
1725 debug_ready_list (struct ready_list *ready)
1730 if (ready->n_ready == 0)
1732 fprintf (sched_dump, "\n");
1736 p = ready_lastpos (ready);
1737 for (i = 0; i < ready->n_ready; i++)
1738 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1739 fprintf (sched_dump, "\n");
1742 /* Search INSN for REG_SAVE_NOTE note pairs for
1743 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1744 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1745 saved value for NOTE_BLOCK_NUMBER which is useful for
1746 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1749 reemit_notes (rtx insn)
1751 rtx note, last = insn;
1753 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1755 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1757 enum insn_note note_type = INTVAL (XEXP (note, 0));
1759 last = emit_note_before (note_type, last);
1760 remove_note (insn, note);
1765 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1767 move_insn (rtx insn)
1769 rtx last = last_scheduled_insn;
1771 if (PREV_INSN (insn) != last)
1777 bb = BLOCK_FOR_INSN (insn);
1779 /* BB_HEAD is either LABEL or NOTE. */
1780 gcc_assert (BB_HEAD (bb) != insn);
1782 if (BB_END (bb) == insn)
1783 /* If this is last instruction in BB, move end marker one
1786 /* Jumps are always placed at the end of basic block. */
1787 jump_p = control_flow_insn_p (insn);
1790 || ((current_sched_info->flags & SCHED_RGN)
1791 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1792 || (current_sched_info->flags & SCHED_EBB));
1794 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1796 BB_END (bb) = PREV_INSN (insn);
1799 gcc_assert (BB_END (bb) != last);
1802 /* We move the block note along with jump. */
1804 /* NT is needed for assertion below. */
1805 rtx nt = current_sched_info->next_tail;
1807 note = NEXT_INSN (insn);
1808 while (NOTE_NOT_BB_P (note) && note != nt)
1809 note = NEXT_INSN (note);
1813 || BARRIER_P (note)))
1814 note = NEXT_INSN (note);
1816 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1821 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1822 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1824 NEXT_INSN (note) = NEXT_INSN (last);
1825 PREV_INSN (NEXT_INSN (last)) = note;
1827 NEXT_INSN (last) = insn;
1828 PREV_INSN (insn) = last;
1830 bb = BLOCK_FOR_INSN (last);
1834 fix_jump_move (insn);
1836 if (BLOCK_FOR_INSN (insn) != bb)
1837 move_block_after_check (insn);
1839 gcc_assert (BB_END (bb) == last);
1842 set_block_for_insn (insn, bb);
1844 /* Update BB_END, if needed. */
1845 if (BB_END (bb) == last)
1849 reemit_notes (insn);
1851 SCHED_GROUP_P (insn) = 0;
1854 /* The following structure describe an entry of the stack of choices. */
1857 /* Ordinal number of the issued insn in the ready queue. */
1859 /* The number of the rest insns whose issues we should try. */
1861 /* The number of issued essential insns. */
1863 /* State after issuing the insn. */
1867 /* The following array is used to implement a stack of choices used in
1868 function max_issue. */
1869 static struct choice_entry *choice_stack;
1871 /* The following variable value is number of essential insns issued on
1872 the current cycle. An insn is essential one if it changes the
1873 processors state. */
1874 static int cycle_issued_insns;
1876 /* The following variable value is maximal number of tries of issuing
1877 insns for the first cycle multipass insn scheduling. We define
1878 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
1879 need this constraint if all real insns (with non-negative codes)
1880 had reservations because in this case the algorithm complexity is
1881 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
1882 might be incomplete and such insn might occur. For such
1883 descriptions, the complexity of algorithm (without the constraint)
1884 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
1885 static int max_lookahead_tries;
1887 /* The following value is value of hook
1888 `first_cycle_multipass_dfa_lookahead' at the last call of
1890 static int cached_first_cycle_multipass_dfa_lookahead = 0;
1892 /* The following value is value of `issue_rate' at the last call of
1894 static int cached_issue_rate = 0;
1896 /* The following function returns maximal (or close to maximal) number
1897 of insns which can be issued on the same cycle and one of which
1898 insns is insns with the best rank (the first insn in READY). To
1899 make this function tries different samples of ready insns. READY
1900 is current queue `ready'. Global array READY_TRY reflects what
1901 insns are already issued in this try. MAX_POINTS is the sum of points
1902 of all instructions in READY. The function stops immediately,
1903 if it reached the such a solution, that all instruction can be issued.
1904 INDEX will contain index of the best insn in READY. The following
1905 function is used only for first cycle multipass scheduling. */
1907 max_issue (struct ready_list *ready, int *index, int max_points)
1909 int n, i, all, n_ready, best, delay, tries_num, points = -1;
1910 struct choice_entry *top;
1914 memcpy (choice_stack->state, curr_state, dfa_state_size);
1916 top->rest = cached_first_cycle_multipass_dfa_lookahead;
1918 n_ready = ready->n_ready;
1919 for (all = i = 0; i < n_ready; i++)
1926 if (top->rest == 0 || i >= n_ready)
1928 if (top == choice_stack)
1930 if (best < top - choice_stack && ready_try [0])
1932 best = top - choice_stack;
1933 *index = choice_stack [1].index;
1935 if (top->n == max_points || best == all)
1941 memcpy (curr_state, top->state, dfa_state_size);
1943 else if (!ready_try [i])
1946 if (tries_num > max_lookahead_tries)
1948 insn = ready_element (ready, i);
1949 delay = state_transition (curr_state, insn);
1952 if (state_dead_lock_p (curr_state))
1957 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
1958 n += ISSUE_POINTS (insn);
1960 top->rest = cached_first_cycle_multipass_dfa_lookahead;
1963 memcpy (top->state, curr_state, dfa_state_size);
1970 while (top != choice_stack)
1972 ready_try [top->index] = 0;
1975 memcpy (curr_state, choice_stack->state, dfa_state_size);
1977 if (sched_verbose >= 4)
1978 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
1979 (*current_sched_info->print_insn) (ready_element (ready, *index),
1981 points, max_points);
1986 /* The following function chooses insn from READY and modifies
1987 *N_READY and READY. The following function is used only for first
1988 cycle multipass scheduling. */
1991 choose_ready (struct ready_list *ready)
1995 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
1996 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
1997 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
1998 return ready_remove_first (ready);
2001 /* Try to choose the better insn. */
2002 int index = 0, i, n;
2004 int more_issue, max_points, try_data = 1, try_control = 1;
2006 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2008 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2009 max_lookahead_tries = 100;
2010 for (i = 0; i < issue_rate; i++)
2011 max_lookahead_tries *= lookahead;
2013 insn = ready_element (ready, 0);
2014 if (INSN_CODE (insn) < 0)
2015 return ready_remove_first (ready);
2018 && spec_info->flags & (PREFER_NON_DATA_SPEC
2019 | PREFER_NON_CONTROL_SPEC))
2021 for (i = 0, n = ready->n_ready; i < n; i++)
2026 x = ready_element (ready, i);
2029 if (spec_info->flags & PREFER_NON_DATA_SPEC
2030 && !(s & DATA_SPEC))
2033 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2038 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2039 && !(s & CONTROL_SPEC))
2042 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2048 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2049 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2050 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2051 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2053 /* Discard speculative instruction that stands first in the ready
2056 change_queue_index (insn, 1);
2060 max_points = ISSUE_POINTS (insn);
2061 more_issue = issue_rate - cycle_issued_insns - 1;
2063 for (i = 1; i < ready->n_ready; i++)
2065 insn = ready_element (ready, i);
2067 = (INSN_CODE (insn) < 0
2068 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2069 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2070 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2071 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2074 if (!ready_try [i] && more_issue-- > 0)
2075 max_points += ISSUE_POINTS (insn);
2078 if (max_issue (ready, &index, max_points) == 0)
2079 return ready_remove_first (ready);
2081 return ready_remove (ready, index);
2085 /* Use forward list scheduling to rearrange insns of block pointed to by
2086 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2090 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2092 struct ready_list ready;
2093 int i, first_cycle_insn_p;
2095 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2096 int sort_p, advance, start_clock_var;
2098 /* Head/tail info for this block. */
2099 rtx prev_head = current_sched_info->prev_head;
2100 rtx next_tail = current_sched_info->next_tail;
2101 rtx head = NEXT_INSN (prev_head);
2102 rtx tail = PREV_INSN (next_tail);
2104 /* We used to have code to avoid getting parameters moved from hard
2105 argument registers into pseudos.
2107 However, it was removed when it proved to be of marginal benefit
2108 and caused problems because schedule_block and compute_forward_dependences
2109 had different notions of what the "head" insn was. */
2111 gcc_assert (head != tail || INSN_P (head));
2113 added_recovery_block_p = false;
2117 dump_new_block_header (0, *target_bb, head, tail);
2119 state_reset (curr_state);
2121 /* Allocate the ready list. */
2125 choice_stack = NULL;
2128 extend_ready (rgn_n_insns1 + 1);
2130 ready.first = ready.veclen - 1;
2133 /* It is used for first cycle multipass scheduling. */
2134 temp_state = alloca (dfa_state_size);
2136 if (targetm.sched.md_init)
2137 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2139 /* We start inserting insns after PREV_HEAD. */
2140 last_scheduled_insn = prev_head;
2142 gcc_assert (NOTE_P (last_scheduled_insn)
2143 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2145 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2150 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2151 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2153 /* Start just before the beginning of time. */
2156 /* We need queue and ready lists and clock_var be initialized
2157 in try_ready () (which is called through init_ready_list ()). */
2158 (*current_sched_info->init_ready_list) ();
2160 /* The algorithm is O(n^2) in the number of ready insns at any given
2161 time in the worst case. Before reload we are more likely to have
2162 big lists so truncate them to a reasonable size. */
2163 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2165 ready_sort (&ready);
2167 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2168 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2169 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2172 if (sched_verbose >= 2)
2174 fprintf (sched_dump,
2175 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2176 fprintf (sched_dump,
2177 ";;\t\t before reload => truncated to %d insns\n", i);
2180 /* Delay all insns past it for 1 cycle. */
2181 while (i < ready.n_ready)
2182 queue_insn (ready_remove (&ready, i), 1);
2185 /* Now we can restore basic block notes and maintain precise cfg. */
2186 restore_bb_notes (*target_bb);
2188 last_clock_var = -1;
2193 /* Loop until all the insns in BB are scheduled. */
2194 while ((*current_sched_info->schedule_more_p) ())
2198 start_clock_var = clock_var;
2202 advance_one_cycle ();
2204 /* Add to the ready list all pending insns that can be issued now.
2205 If there are no ready insns, increment clock until one
2206 is ready and add all pending insns at that point to the ready
2208 queue_to_ready (&ready);
2210 gcc_assert (ready.n_ready);
2212 if (sched_verbose >= 2)
2214 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2215 debug_ready_list (&ready);
2217 advance -= clock_var - start_clock_var;
2219 while (advance > 0);
2223 /* Sort the ready list based on priority. */
2224 ready_sort (&ready);
2226 if (sched_verbose >= 2)
2228 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2229 debug_ready_list (&ready);
2233 /* Allow the target to reorder the list, typically for
2234 better instruction bundling. */
2235 if (sort_p && targetm.sched.reorder
2236 && (ready.n_ready == 0
2237 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2239 targetm.sched.reorder (sched_dump, sched_verbose,
2240 ready_lastpos (&ready),
2241 &ready.n_ready, clock_var);
2243 can_issue_more = issue_rate;
2245 first_cycle_insn_p = 1;
2246 cycle_issued_insns = 0;
2253 if (sched_verbose >= 2)
2255 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2257 debug_ready_list (&ready);
2260 if (ready.n_ready == 0
2262 && reload_completed)
2264 /* Allow scheduling insns directly from the queue in case
2265 there's nothing better to do (ready list is empty) but
2266 there are still vacant dispatch slots in the current cycle. */
2267 if (sched_verbose >= 6)
2268 fprintf (sched_dump,";;\t\tSecond chance\n");
2269 memcpy (temp_state, curr_state, dfa_state_size);
2270 if (early_queue_to_ready (temp_state, &ready))
2271 ready_sort (&ready);
2274 if (ready.n_ready == 0 || !can_issue_more
2275 || state_dead_lock_p (curr_state)
2276 || !(*current_sched_info->schedule_more_p) ())
2279 /* Select and remove the insn from the ready list. */
2282 insn = choose_ready (&ready);
2287 insn = ready_remove_first (&ready);
2289 if (targetm.sched.dfa_new_cycle
2290 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2291 insn, last_clock_var,
2292 clock_var, &sort_p))
2293 /* SORT_P is used by the target to override sorting
2294 of the ready list. This is needed when the target
2295 has modified its internal structures expecting that
2296 the insn will be issued next. As we need the insn
2297 to have the highest priority (so it will be returned by
2298 the ready_remove_first call above), we invoke
2299 ready_add (&ready, insn, true).
2300 But, still, there is one issue: INSN can be later
2301 discarded by scheduler's front end through
2302 current_sched_info->can_schedule_ready_p, hence, won't
2305 ready_add (&ready, insn, true);
2310 memcpy (temp_state, curr_state, dfa_state_size);
2311 if (recog_memoized (insn) < 0)
2313 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2314 || asm_noperands (PATTERN (insn)) >= 0);
2315 if (!first_cycle_insn_p && asm_p)
2316 /* This is asm insn which is tryed to be issued on the
2317 cycle not first. Issue it on the next cycle. */
2320 /* A USE insn, or something else we don't need to
2321 understand. We can't pass these directly to
2322 state_transition because it will trigger a
2323 fatal error for unrecognizable insns. */
2328 cost = state_transition (temp_state, insn);
2337 queue_insn (insn, cost);
2338 if (SCHED_GROUP_P (insn))
2347 if (current_sched_info->can_schedule_ready_p
2348 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2349 /* We normally get here only if we don't want to move
2350 insn from the split block. */
2352 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2356 /* DECISION is made. */
2358 if (TODO_SPEC (insn) & SPECULATIVE)
2359 generate_recovery_code (insn);
2361 if (control_flow_insn_p (last_scheduled_insn)
2362 /* This is used to to switch basic blocks by request
2363 from scheduler front-end (actually, sched-ebb.c only).
2364 This is used to process blocks with single fallthru
2365 edge. If succeeding block has jump, it [jump] will try
2366 move at the end of current bb, thus corrupting CFG. */
2367 || current_sched_info->advance_target_bb (*target_bb, insn))
2369 *target_bb = current_sched_info->advance_target_bb
2376 x = next_real_insn (last_scheduled_insn);
2378 dump_new_block_header (1, *target_bb, x, tail);
2381 last_scheduled_insn = bb_note (*target_bb);
2384 /* Update counters, etc in the scheduler's front end. */
2385 (*current_sched_info->begin_schedule_ready) (insn,
2386 last_scheduled_insn);
2389 last_scheduled_insn = insn;
2391 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2393 cycle_issued_insns++;
2394 memcpy (curr_state, temp_state, dfa_state_size);
2397 if (targetm.sched.variable_issue)
2399 targetm.sched.variable_issue (sched_dump, sched_verbose,
2400 insn, can_issue_more);
2401 /* A naked CLOBBER or USE generates no instruction, so do
2402 not count them against the issue rate. */
2403 else if (GET_CODE (PATTERN (insn)) != USE
2404 && GET_CODE (PATTERN (insn)) != CLOBBER)
2407 advance = schedule_insn (insn);
2409 /* After issuing an asm insn we should start a new cycle. */
2410 if (advance == 0 && asm_p)
2415 first_cycle_insn_p = 0;
2417 /* Sort the ready list based on priority. This must be
2418 redone here, as schedule_insn may have readied additional
2419 insns that will not be sorted correctly. */
2420 if (ready.n_ready > 0)
2421 ready_sort (&ready);
2423 if (targetm.sched.reorder2
2424 && (ready.n_ready == 0
2425 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2428 targetm.sched.reorder2 (sched_dump, sched_verbose,
2430 ? ready_lastpos (&ready) : NULL,
2431 &ready.n_ready, clock_var);
2439 fprintf (sched_dump, ";;\tReady list (final): ");
2440 debug_ready_list (&ready);
2443 if (current_sched_info->queue_must_finish_empty)
2444 /* Sanity check -- queue must be empty now. Meaningless if region has
2446 gcc_assert (!q_size && !ready.n_ready);
2449 /* We must maintain QUEUE_INDEX between blocks in region. */
2450 for (i = ready.n_ready - 1; i >= 0; i--)
2454 x = ready_element (&ready, i);
2455 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2456 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2460 for (i = 0; i <= max_insn_queue_index; i++)
2463 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2468 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2469 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2471 free_INSN_LIST_list (&insn_queue[i]);
2475 if (!current_sched_info->queue_must_finish_empty
2476 || added_recovery_block_p)
2478 /* INSN_TICK (minimum clock tick at which the insn becomes
2479 ready) may be not correct for the insn in the subsequent
2480 blocks of the region. We should use a correct value of
2481 `clock_var' or modify INSN_TICK. It is better to keep
2482 clock_var value equal to 0 at the start of a basic block.
2483 Therefore we modify INSN_TICK here. */
2484 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2487 if (targetm.sched.md_finish)
2488 targetm.sched.md_finish (sched_dump, sched_verbose);
2490 /* Update head/tail boundaries. */
2491 head = NEXT_INSN (prev_head);
2492 tail = last_scheduled_insn;
2494 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2495 previously found among the insns. Insert them at the beginning
2499 basic_block head_bb = BLOCK_FOR_INSN (head);
2500 rtx note_head = note_list;
2502 while (PREV_INSN (note_head))
2504 set_block_for_insn (note_head, head_bb);
2505 note_head = PREV_INSN (note_head);
2507 /* In the above cycle we've missed this note: */
2508 set_block_for_insn (note_head, head_bb);
2510 PREV_INSN (note_head) = PREV_INSN (head);
2511 NEXT_INSN (PREV_INSN (head)) = note_head;
2512 PREV_INSN (head) = note_list;
2513 NEXT_INSN (note_list) = head;
2520 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2521 clock_var, INSN_UID (head));
2522 fprintf (sched_dump, ";; new tail = %d\n\n",
2526 current_sched_info->head = head;
2527 current_sched_info->tail = tail;
2532 for (i = 0; i <= rgn_n_insns; i++)
2533 free (choice_stack [i].state);
2534 free (choice_stack);
2537 /* Set_priorities: compute priority of each insn in the block. */
2540 set_priorities (rtx head, rtx tail)
2544 int sched_max_insns_priority =
2545 current_sched_info->sched_max_insns_priority;
2548 if (head == tail && (! INSN_P (head)))
2553 prev_head = PREV_INSN (head);
2554 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2560 (void) priority (insn);
2562 gcc_assert (INSN_PRIORITY_KNOWN (insn));
2564 sched_max_insns_priority = MAX (sched_max_insns_priority,
2565 INSN_PRIORITY (insn));
2568 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2573 /* Next LUID to assign to an instruction. */
2576 /* Initialize some global state for the scheduler. */
2585 /* Switch to working copy of sched_info. */
2586 memcpy (¤t_sched_info_var, current_sched_info,
2587 sizeof (current_sched_info_var));
2588 current_sched_info = ¤t_sched_info_var;
2590 /* Disable speculative loads in their presence if cc0 defined. */
2592 flag_schedule_speculative_load = 0;
2595 /* Set dump and sched_verbose for the desired debugging output. If no
2596 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2597 For -fsched-verbose=N, N>=10, print everything to stderr. */
2598 sched_verbose = sched_verbose_param;
2599 if (sched_verbose_param == 0 && dump_file)
2601 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2602 ? stderr : dump_file);
2604 /* Initialize SPEC_INFO. */
2605 if (targetm.sched.set_sched_flags)
2607 spec_info = &spec_info_var;
2608 targetm.sched.set_sched_flags (spec_info);
2609 if (current_sched_info->flags & DO_SPECULATION)
2610 spec_info->weakness_cutoff =
2611 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2613 /* So we won't read anything accidentally. */
2615 #ifdef ENABLE_CHECKING
2616 check_sched_flags ();
2620 /* So we won't read anything accidentally. */
2623 /* Initialize issue_rate. */
2624 if (targetm.sched.issue_rate)
2625 issue_rate = targetm.sched.issue_rate ();
2629 if (cached_issue_rate != issue_rate)
2631 cached_issue_rate = issue_rate;
2632 /* To invalidate max_lookahead_tries: */
2633 cached_first_cycle_multipass_dfa_lookahead = 0;
2640 for (i = 0; i < old_max_uid; i++)
2643 h_i_d[i].todo_spec = HARD_DEP;
2644 h_i_d[i].queue_index = QUEUE_NOWHERE;
2645 h_i_d[i].tick = INVALID_TICK;
2646 h_i_d[i].inter_tick = INVALID_TICK;
2649 if (targetm.sched.init_dfa_pre_cycle_insn)
2650 targetm.sched.init_dfa_pre_cycle_insn ();
2652 if (targetm.sched.init_dfa_post_cycle_insn)
2653 targetm.sched.init_dfa_post_cycle_insn ();
2656 dfa_state_size = state_size ();
2657 curr_state = xmalloc (dfa_state_size);
2662 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2664 INSN_LUID (insn) = luid;
2666 /* Increment the next luid, unless this is a note. We don't
2667 really need separate IDs for notes and we don't want to
2668 schedule differently depending on whether or not there are
2669 line-number notes, i.e., depending on whether or not we're
2670 generating debugging information. */
2674 if (insn == BB_END (b))
2678 init_dependency_caches (luid);
2680 init_alias_analysis ();
2682 old_last_basic_block = 0;
2687 if (current_sched_info->flags & USE_GLAT)
2690 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2691 removing death notes. */
2692 FOR_EACH_BB_REVERSE (b)
2693 find_insn_reg_weight (b);
2695 if (targetm.sched.md_init_global)
2696 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2698 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2699 before_recovery = 0;
2701 #ifdef ENABLE_CHECKING
2702 /* This is used preferably for finding bugs in check_cfg () itself. */
2707 /* Free global data used during insn scheduling. */
2715 free_dependency_caches ();
2716 end_alias_analysis ();
2719 if (targetm.sched.md_finish_global)
2720 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2722 if (spec_info && spec_info->dump)
2724 char c = reload_completed ? 'a' : 'b';
2726 fprintf (spec_info->dump,
2727 ";; %s:\n", current_function_name ());
2729 fprintf (spec_info->dump,
2730 ";; Procedure %cr-begin-data-spec motions == %d\n",
2732 fprintf (spec_info->dump,
2733 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2735 fprintf (spec_info->dump,
2736 ";; Procedure %cr-begin-control-spec motions == %d\n",
2737 c, nr_begin_control);
2738 fprintf (spec_info->dump,
2739 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2740 c, nr_be_in_control);
2743 #ifdef ENABLE_CHECKING
2744 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2745 if (!reload_completed)
2749 current_sched_info = NULL;
2752 /* Fix INSN_TICKs of the instructions in the current block as well as
2753 INSN_TICKs of their dependents.
2754 HEAD and TAIL are the begin and the end of the current scheduled block. */
2756 fix_inter_tick (rtx head, rtx tail)
2758 /* Set of instructions with corrected INSN_TICK. */
2759 bitmap_head processed;
2760 int next_clock = clock_var + 1;
2762 bitmap_initialize (&processed, 0);
2764 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2765 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2766 across different blocks. */
2767 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2774 tick = INSN_TICK (head);
2775 gcc_assert (tick >= MIN_TICK);
2777 /* Fix INSN_TICK of instruction from just scheduled block. */
2778 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2780 bitmap_set_bit (&processed, INSN_LUID (head));
2783 if (tick < MIN_TICK)
2786 INSN_TICK (head) = tick;
2789 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (head))
2793 next = DEP_LINK_CON (link);
2794 tick = INSN_TICK (next);
2796 if (tick != INVALID_TICK
2797 /* If NEXT has its INSN_TICK calculated, fix it.
2798 If not - it will be properly calculated from
2799 scratch later in fix_tick_ready. */
2800 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2802 bitmap_set_bit (&processed, INSN_LUID (next));
2805 if (tick < MIN_TICK)
2808 if (tick > INTER_TICK (next))
2809 INTER_TICK (next) = tick;
2811 tick = INTER_TICK (next);
2813 INSN_TICK (next) = tick;
2818 bitmap_clear (&processed);
2821 /* Check if NEXT is ready to be added to the ready or queue list.
2822 If "yes", add it to the proper list.
2824 -1 - is not ready yet,
2825 0 - added to the ready list,
2826 0 < N - queued for N cycles. */
2828 try_ready (rtx next)
2833 ts = &TODO_SPEC (next);
2836 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
2837 && ((old_ts & HARD_DEP)
2838 || (old_ts & SPECULATIVE)));
2840 if (!(current_sched_info->flags & DO_SPECULATION))
2842 if (deps_list_empty_p (INSN_BACK_DEPS (next)))
2847 *ts &= ~SPECULATIVE & ~HARD_DEP;
2849 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (next));
2853 ds_t ds = DEP_LINK_STATUS (link) & SPECULATIVE;
2855 /* Backward dependencies of the insn are maintained sorted.
2856 So if DEP_STATUS of the first dep is SPECULATIVE,
2857 than all other deps are speculative too. */
2860 /* Now we've got NEXT with speculative deps only.
2861 1. Look at the deps to see what we have to do.
2862 2. Check if we can do 'todo'. */
2865 while ((link = DEP_LINK_NEXT (link)) != NULL)
2867 ds = DEP_LINK_STATUS (link) & SPECULATIVE;
2868 *ts = ds_merge (*ts, ds);
2871 if (dep_weak (*ts) < spec_info->weakness_cutoff)
2872 /* Too few points. */
2873 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
2881 gcc_assert (*ts == old_ts
2882 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
2883 else if (current_sched_info->new_ready)
2884 *ts = current_sched_info->new_ready (next, *ts);
2886 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
2887 have its original pattern or changed (speculative) one. This is due
2888 to changing ebb in region scheduling.
2889 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
2890 has speculative pattern.
2892 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
2893 control-speculative NEXT could have been discarded by sched-rgn.c
2894 (the same case as when discarded by can_schedule_ready_p ()). */
2896 if ((*ts & SPECULATIVE)
2897 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
2898 need to change anything. */
2904 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
2906 res = speculate_insn (next, *ts, &new_pat);
2911 /* It would be nice to change DEP_STATUS of all dependences,
2912 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
2913 so we won't reanalyze anything. */
2914 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
2918 /* We follow the rule, that every speculative insn
2919 has non-null ORIG_PAT. */
2920 if (!ORIG_PAT (next))
2921 ORIG_PAT (next) = PATTERN (next);
2925 if (!ORIG_PAT (next))
2926 /* If we gonna to overwrite the original pattern of insn,
2928 ORIG_PAT (next) = PATTERN (next);
2930 change_pattern (next, new_pat);
2938 /* We need to restore pattern only if (*ts == 0), because otherwise it is
2939 either correct (*ts & SPECULATIVE),
2940 or we simply don't care (*ts & HARD_DEP). */
2942 gcc_assert (!ORIG_PAT (next)
2943 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
2947 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
2948 control-speculative NEXT could have been discarded by sched-rgn.c
2949 (the same case as when discarded by can_schedule_ready_p ()). */
2950 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
2952 change_queue_index (next, QUEUE_NOWHERE);
2955 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
2956 /* We should change pattern of every previously speculative
2957 instruction - and we determine if NEXT was speculative by using
2958 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
2959 pat too, so skip them. */
2961 change_pattern (next, ORIG_PAT (next));
2962 ORIG_PAT (next) = 0;
2965 if (sched_verbose >= 2)
2967 int s = TODO_SPEC (next);
2969 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
2970 (*current_sched_info->print_insn) (next, 0));
2972 if (spec_info && spec_info->dump)
2975 fprintf (spec_info->dump, "; data-spec;");
2976 if (s & BEGIN_CONTROL)
2977 fprintf (spec_info->dump, "; control-spec;");
2978 if (s & BE_IN_CONTROL)
2979 fprintf (spec_info->dump, "; in-control-spec;");
2982 fprintf (sched_dump, "\n");
2985 adjust_priority (next);
2987 return fix_tick_ready (next);
2990 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
2992 fix_tick_ready (rtx next)
2996 if (!deps_list_empty_p (INSN_RESOLVED_BACK_DEPS (next)))
3001 tick = INSN_TICK (next);
3002 /* if tick is not equal to INVALID_TICK, then update
3003 INSN_TICK of NEXT with the most recent resolved dependence
3004 cost. Otherwise, recalculate from scratch. */
3005 full_p = (tick == INVALID_TICK);
3007 FOR_EACH_DEP_LINK (link, INSN_RESOLVED_BACK_DEPS (next))
3009 dep_t dep = DEP_LINK_DEP (link);
3010 rtx pro = DEP_PRO (dep);
3013 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3015 tick1 = INSN_TICK (pro) + dep_cost (dep);
3026 INSN_TICK (next) = tick;
3028 delay = tick - clock_var;
3030 delay = QUEUE_READY;
3032 change_queue_index (next, delay);
3037 /* Move NEXT to the proper queue list with (DELAY >= 1),
3038 or add it to the ready list (DELAY == QUEUE_READY),
3039 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3041 change_queue_index (rtx next, int delay)
3043 int i = QUEUE_INDEX (next);
3045 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3047 gcc_assert (i != QUEUE_SCHEDULED);
3049 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3050 || (delay < 0 && delay == i))
3051 /* We have nothing to do. */
3054 /* Remove NEXT from wherever it is now. */
3055 if (i == QUEUE_READY)
3056 ready_remove_insn (next);
3058 queue_remove (next);
3060 /* Add it to the proper place. */
3061 if (delay == QUEUE_READY)
3062 ready_add (readyp, next, false);
3063 else if (delay >= 1)
3064 queue_insn (next, delay);
3066 if (sched_verbose >= 2)
3068 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3069 (*current_sched_info->print_insn) (next, 0));
3071 if (delay == QUEUE_READY)
3072 fprintf (sched_dump, " into ready\n");
3073 else if (delay >= 1)
3074 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3076 fprintf (sched_dump, " removed from ready or queue lists\n");
3080 /* Extend H_I_D data. */
3084 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3085 pseudos which do not cross calls. */
3086 int new_max_uid = get_max_uid () + 1;
3088 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3089 old_max_uid = new_max_uid;
3091 if (targetm.sched.h_i_d_extended)
3092 targetm.sched.h_i_d_extended ();
3095 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3096 N_NEW_INSNS is the number of additional elements to allocate. */
3098 extend_ready (int n_new_insns)
3102 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3103 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3105 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3106 rgn_n_insns + 1, sizeof (char));
3108 rgn_n_insns += n_new_insns;
3110 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3113 for (i = rgn_n_insns; n_new_insns--; i--)
3114 choice_stack[i].state = xmalloc (dfa_state_size);
3117 /* Extend global scheduler structures (those, that live across calls to
3118 schedule_block) to include information about just emitted INSN. */
3120 extend_global (rtx insn)
3122 gcc_assert (INSN_P (insn));
3123 /* These structures have scheduler scope. */
3127 extend_dependency_caches (1, 0);
3130 /* Extends global and local scheduler structures to include information
3131 about just emitted INSN. */
3133 extend_all (rtx insn)
3135 extend_global (insn);
3137 /* These structures have block scope. */
3140 (*current_sched_info->add_remove_insn) (insn, 0);
3143 /* Initialize h_i_d entry of the new INSN with default values.
3144 Values, that are not explicitly initialized here, hold zero. */
3146 init_h_i_d (rtx insn)
3148 INSN_LUID (insn) = luid++;
3149 INSN_COST (insn) = -1;
3150 TODO_SPEC (insn) = HARD_DEP;
3151 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3152 INSN_TICK (insn) = INVALID_TICK;
3153 INTER_TICK (insn) = INVALID_TICK;
3154 find_insn_reg_weight1 (insn);
3156 /* These two lists will be freed in schedule_insn (). */
3157 INSN_BACK_DEPS (insn) = create_deps_list (false);
3158 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list (false);
3160 /* This one should be allocated on the obstack because it should live till
3161 the scheduling ends. */
3162 INSN_FORW_DEPS (insn) = create_deps_list (true);
3165 /* Generates recovery code for INSN. */
3167 generate_recovery_code (rtx insn)
3169 if (TODO_SPEC (insn) & BEGIN_SPEC)
3170 begin_speculative_block (insn);
3172 /* Here we have insn with no dependencies to
3173 instructions other then CHECK_SPEC ones. */
3175 if (TODO_SPEC (insn) & BE_IN_SPEC)
3176 add_to_speculative_block (insn);
3180 Tries to add speculative dependencies of type FS between instructions
3181 in deps_list L and TWIN. */
3183 process_insn_forw_deps_be_in_spec (deps_list_t l, rtx twin, ds_t fs)
3187 FOR_EACH_DEP_LINK (link, l)
3192 consumer = DEP_LINK_CON (link);
3194 ds = DEP_LINK_STATUS (link);
3196 if (/* If we want to create speculative dep. */
3198 /* And we can do that because this is a true dep. */
3199 && (ds & DEP_TYPES) == DEP_TRUE)
3201 gcc_assert (!(ds & BE_IN_SPEC));
3203 if (/* If this dep can be overcome with 'begin speculation'. */
3205 /* Then we have a choice: keep the dep 'begin speculative'
3206 or transform it into 'be in speculative'. */
3208 if (/* In try_ready we assert that if insn once became ready
3209 it can be removed from the ready (or queue) list only
3210 due to backend decision. Hence we can't let the
3211 probability of the speculative dep to decrease. */
3212 dep_weak (ds) <= dep_weak (fs))
3213 /* Transform it to be in speculative. */
3214 ds = (ds & ~BEGIN_SPEC) | fs;
3217 /* Mark the dep as 'be in speculative'. */
3221 add_back_forw_dep (consumer, twin, DEP_LINK_KIND (link), ds);
3225 /* Generates recovery code for BEGIN speculative INSN. */
3227 begin_speculative_block (rtx insn)
3229 if (TODO_SPEC (insn) & BEGIN_DATA)
3231 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3234 create_check_block_twin (insn, false);
3236 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3239 /* Generates recovery code for BE_IN speculative INSN. */
3241 add_to_speculative_block (rtx insn)
3246 rtx_vec_t priorities_roots;
3248 ts = TODO_SPEC (insn);
3249 gcc_assert (!(ts & ~BE_IN_SPEC));
3251 if (ts & BE_IN_DATA)
3253 if (ts & BE_IN_CONTROL)
3256 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3257 gcc_assert (!TODO_SPEC (insn));
3259 DONE_SPEC (insn) |= ts;
3261 /* First we convert all simple checks to branchy. */
3262 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3264 rtx check = DEP_LINK_PRO (link);
3266 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3268 create_check_block_twin (check, true);
3270 /* Restart search. */
3271 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3274 /* Continue search. */
3275 link = DEP_LINK_NEXT (link);
3278 priorities_roots = NULL;
3279 clear_priorities (insn, &priorities_roots);
3287 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3289 gcc_assert ((DEP_LINK_STATUS (link) & BEGIN_SPEC) == 0
3290 && (DEP_LINK_STATUS (link) & BE_IN_SPEC) != 0
3291 && (DEP_LINK_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3293 check = DEP_LINK_PRO (link);
3295 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3296 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3298 rec = BLOCK_FOR_INSN (check);
3300 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3301 extend_global (twin);
3303 copy_deps_list_change_con (INSN_RESOLVED_BACK_DEPS (twin),
3304 INSN_RESOLVED_BACK_DEPS (insn),
3307 if (sched_verbose && spec_info->dump)
3308 /* INSN_BB (insn) isn't determined for twin insns yet.
3309 So we can't use current_sched_info->print_insn. */
3310 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3311 INSN_UID (twin), rec->index);
3313 twins = alloc_INSN_LIST (twin, twins);
3315 /* Add dependences between TWIN and all appropriate
3316 instructions from REC. */
3319 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3323 link = DEP_LINK_NEXT (link);
3327 check = DEP_LINK_PRO (link);
3328 if (BLOCK_FOR_INSN (check) == rec)
3336 while (link != NULL);
3338 process_insn_forw_deps_be_in_spec (INSN_FORW_DEPS (insn), twin, ts);
3340 /* Remove all dependencies between INSN and insns in REC. */
3341 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3343 check = DEP_LINK_PRO (link);
3345 if (BLOCK_FOR_INSN (check) == rec)
3347 delete_back_forw_dep (link);
3349 /* Restart search. */
3350 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3353 /* Continue search. */
3354 link = DEP_LINK_NEXT (link);
3357 while (!deps_list_empty_p (INSN_BACK_DEPS (insn)));
3359 /* We couldn't have added the dependencies between INSN and TWINS earlier
3360 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
3365 twin = XEXP (twins, 0);
3366 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3368 twin = XEXP (twins, 1);
3369 free_INSN_LIST_node (twins);
3373 calc_priorities (priorities_roots);
3374 VEC_free (rtx, heap, priorities_roots);
3377 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3379 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3381 gcc_assert (new_nmemb >= old_nmemb);
3382 p = XRESIZEVAR (void, p, new_nmemb * size);
3383 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3387 /* Return the probability of speculation success for the speculation
3395 dt = FIRST_SPEC_TYPE;
3400 res *= (ds_t) get_dep_weak (ds, dt);
3404 if (dt == LAST_SPEC_TYPE)
3406 dt <<= SPEC_TYPE_SHIFT;
3412 res /= MAX_DEP_WEAK;
3414 if (res < MIN_DEP_WEAK)
3417 gcc_assert (res <= MAX_DEP_WEAK);
3423 Find fallthru edge from PRED. */
3425 find_fallthru_edge (basic_block pred)
3431 succ = pred->next_bb;
3432 gcc_assert (succ->prev_bb == pred);
3434 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3436 FOR_EACH_EDGE (e, ei, pred->succs)
3437 if (e->flags & EDGE_FALLTHRU)
3439 gcc_assert (e->dest == succ);
3445 FOR_EACH_EDGE (e, ei, succ->preds)
3446 if (e->flags & EDGE_FALLTHRU)
3448 gcc_assert (e->src == pred);
3456 /* Initialize BEFORE_RECOVERY variable. */
3458 init_before_recovery (void)
3463 last = EXIT_BLOCK_PTR->prev_bb;
3464 e = find_fallthru_edge (last);
3468 /* We create two basic blocks:
3469 1. Single instruction block is inserted right after E->SRC
3471 2. Empty block right before EXIT_BLOCK.
3472 Between these two blocks recovery blocks will be emitted. */
3474 basic_block single, empty;
3477 single = create_empty_bb (last);
3478 empty = create_empty_bb (single);
3480 single->count = last->count;
3481 empty->count = last->count;
3482 single->frequency = last->frequency;
3483 empty->frequency = last->frequency;
3484 BB_COPY_PARTITION (single, last);
3485 BB_COPY_PARTITION (empty, last);
3487 redirect_edge_succ (e, single);
3488 make_single_succ_edge (single, empty, 0);
3489 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3490 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3492 label = block_label (empty);
3493 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3494 JUMP_LABEL (x) = label;
3495 LABEL_NUSES (label)++;
3498 emit_barrier_after (x);
3500 add_block (empty, 0);
3501 add_block (single, 0);
3503 before_recovery = single;
3505 if (sched_verbose >= 2 && spec_info->dump)
3506 fprintf (spec_info->dump,
3507 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3508 last->index, single->index, empty->index);
3511 before_recovery = last;
3514 /* Returns new recovery block. */
3516 create_recovery_block (void)
3522 added_recovery_block_p = true;
3524 if (!before_recovery)
3525 init_before_recovery ();
3527 barrier = get_last_bb_insn (before_recovery);
3528 gcc_assert (BARRIER_P (barrier));
3530 label = emit_label_after (gen_label_rtx (), barrier);
3532 rec = create_basic_block (label, label, before_recovery);
3534 /* Recovery block always end with an unconditional jump. */
3535 emit_barrier_after (BB_END (rec));
3537 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3538 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3540 if (sched_verbose && spec_info->dump)
3541 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3544 before_recovery = rec;
3549 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3550 INSN is a simple check, that should be converted to branchy one. */
3552 create_check_block_twin (rtx insn, bool mutate_p)
3555 rtx label, check, twin;
3559 gcc_assert (ORIG_PAT (insn)
3561 || (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3562 && !(TODO_SPEC (insn) & SPECULATIVE))));
3564 /* Create recovery block. */
3565 if (mutate_p || targetm.sched.needs_block_p (insn))
3567 rec = create_recovery_block ();
3568 label = BB_HEAD (rec);
3572 rec = EXIT_BLOCK_PTR;
3577 check = targetm.sched.gen_check (insn, label, mutate_p);
3579 if (rec != EXIT_BLOCK_PTR)
3581 /* To have mem_reg alive at the beginning of second_bb,
3582 we emit check BEFORE insn, so insn after splitting
3583 insn will be at the beginning of second_bb, which will
3584 provide us with the correct life information. */
3585 check = emit_jump_insn_before (check, insn);
3586 JUMP_LABEL (check) = label;
3587 LABEL_NUSES (label)++;
3590 check = emit_insn_before (check, insn);
3592 /* Extend data structures. */
3594 RECOVERY_BLOCK (check) = rec;
3596 if (sched_verbose && spec_info->dump)
3597 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3598 (*current_sched_info->print_insn) (check, 0));
3600 gcc_assert (ORIG_PAT (insn));
3602 /* Initialize TWIN (twin is a duplicate of original instruction
3603 in the recovery block). */
3604 if (rec != EXIT_BLOCK_PTR)
3606 FOR_EACH_DEP_LINK (link, INSN_RESOLVED_BACK_DEPS (insn))
3607 if ((DEP_LINK_STATUS (link) & DEP_OUTPUT) != 0)
3609 struct _dep _dep, *dep = &_dep;
3611 init_dep (dep, DEP_LINK_PRO (link), check, REG_DEP_TRUE);
3613 add_back_dep_to_deps_list (INSN_RESOLVED_BACK_DEPS (check), dep);
3616 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3617 extend_global (twin);
3619 if (sched_verbose && spec_info->dump)
3620 /* INSN_BB (insn) isn't determined for twin insns yet.
3621 So we can't use current_sched_info->print_insn. */
3622 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3623 INSN_UID (twin), rec->index);
3627 ORIG_PAT (check) = ORIG_PAT (insn);
3628 HAS_INTERNAL_DEP (check) = 1;
3630 /* ??? We probably should change all OUTPUT dependencies to
3634 copy_deps_list_change_con (INSN_RESOLVED_BACK_DEPS (twin),
3635 INSN_RESOLVED_BACK_DEPS (insn),
3638 if (rec != EXIT_BLOCK_PTR)
3639 /* In case of branchy check, fix CFG. */
3641 basic_block first_bb, second_bb;
3646 first_bb = BLOCK_FOR_INSN (check);
3647 e = split_block (first_bb, check);
3648 /* split_block emits note if *check == BB_END. Probably it
3649 is better to rip that note off. */
3650 gcc_assert (e->src == first_bb);
3651 second_bb = e->dest;
3653 /* This is fixing of incoming edge. */
3654 /* ??? Which other flags should be specified? */
3655 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3656 /* Partition type is the same, if it is "unpartitioned". */
3657 edge_flags = EDGE_CROSSING;
3661 e = make_edge (first_bb, rec, edge_flags);
3663 add_block (second_bb, first_bb);
3665 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3666 label = block_label (second_bb);
3667 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3668 JUMP_LABEL (jump) = label;
3669 LABEL_NUSES (label)++;
3670 extend_global (jump);
3672 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3673 /* Partition type is the same, if it is "unpartitioned". */
3675 /* Rewritten from cfgrtl.c. */
3676 if (flag_reorder_blocks_and_partition
3677 && targetm.have_named_sections
3678 /*&& !any_condjump_p (jump)*/)
3679 /* any_condjump_p (jump) == false.
3680 We don't need the same note for the check because
3681 any_condjump_p (check) == true. */
3683 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3687 edge_flags = EDGE_CROSSING;
3692 make_single_succ_edge (rec, second_bb, edge_flags);
3694 add_block (rec, EXIT_BLOCK_PTR);
3697 /* Move backward dependences from INSN to CHECK and
3698 move forward dependences from INSN to TWIN. */
3699 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
3701 rtx pro = DEP_LINK_PRO (link);
3702 enum reg_note dk = DEP_LINK_KIND (link);
3705 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3706 check --TRUE--> producer ??? or ANTI ???
3707 twin --TRUE--> producer
3708 twin --ANTI--> check
3710 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3711 check --ANTI--> producer
3712 twin --ANTI--> producer
3713 twin --ANTI--> check
3715 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3716 check ~~TRUE~~> producer
3717 twin ~~TRUE~~> producer
3718 twin --ANTI--> check */
3720 ds = DEP_LINK_STATUS (link);
3722 if (ds & BEGIN_SPEC)
3724 gcc_assert (!mutate_p);
3728 if (rec != EXIT_BLOCK_PTR)
3730 add_back_forw_dep (check, pro, dk, ds);
3731 add_back_forw_dep (twin, pro, dk, ds);
3734 add_back_forw_dep (check, pro, dk, ds);
3737 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3738 if ((DEP_LINK_STATUS (link) & BEGIN_SPEC)
3740 /* We can delete this dep only if we totally overcome it with
3741 BEGIN_SPECULATION. */
3743 delete_back_forw_dep (link);
3745 /* Restart search. */
3746 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3749 /* Continue search. */
3750 link = DEP_LINK_NEXT (link);
3754 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3757 gcc_assert (!DONE_SPEC (insn));
3761 ds_t ts = TODO_SPEC (insn);
3763 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3764 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3766 if (ts & BEGIN_DATA)
3767 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3768 if (ts & BEGIN_CONTROL)
3769 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3772 CHECK_SPEC (check) = CHECK_SPEC (insn);
3774 /* Future speculations: call the helper. */
3775 process_insn_forw_deps_be_in_spec (INSN_FORW_DEPS (insn), twin, fs);
3777 if (rec != EXIT_BLOCK_PTR)
3779 /* Which types of dependencies should we use here is,
3780 generally, machine-dependent question... But, for now,
3785 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3786 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3792 if (spec_info->dump)
3793 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3794 (*current_sched_info->print_insn) (insn, 0));
3796 /* Remove all forward dependencies of the INSN. */
3797 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3798 while (link != NULL)
3800 delete_back_forw_dep (link);
3801 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3804 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3807 sched_remove_insn (insn);
3810 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3813 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3816 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3817 because it'll be done later in add_to_speculative_block. */
3819 rtx_vec_t priorities_roots = NULL;
3821 clear_priorities (twin, &priorities_roots);
3822 calc_priorities (priorities_roots);
3823 VEC_free (rtx, heap, priorities_roots);
3827 /* Removes dependency between instructions in the recovery block REC
3828 and usual region instructions. It keeps inner dependences so it
3829 won't be necessary to recompute them. */
3831 fix_recovery_deps (basic_block rec)
3834 rtx note, insn, jump, ready_list = 0;
3835 bitmap_head in_ready;
3838 bitmap_initialize (&in_ready, 0);
3840 /* NOTE - a basic block note. */
3841 note = NEXT_INSN (BB_HEAD (rec));
3842 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3843 insn = BB_END (rec);
3844 gcc_assert (JUMP_P (insn));
3845 insn = PREV_INSN (insn);
3849 for (link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn)); link != NULL;)
3853 consumer = DEP_LINK_CON (link);
3855 if (BLOCK_FOR_INSN (consumer) != rec)
3857 delete_back_forw_dep (link);
3859 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3861 ready_list = alloc_INSN_LIST (consumer, ready_list);
3862 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3865 /* Restart search. */
3866 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3870 gcc_assert ((DEP_LINK_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3872 /* Continue search. */
3873 link = DEP_LINK_NEXT (link);
3877 insn = PREV_INSN (insn);
3879 while (insn != note);
3881 bitmap_clear (&in_ready);
3883 /* Try to add instructions to the ready or queue list. */
3884 for (link1 = ready_list; link1; link1 = XEXP (link1, 1))
3885 try_ready (XEXP (link1, 0));
3886 free_INSN_LIST_list (&ready_list);
3888 /* Fixing jump's dependences. */
3889 insn = BB_HEAD (rec);
3890 jump = BB_END (rec);
3892 gcc_assert (LABEL_P (insn));
3893 insn = NEXT_INSN (insn);
3895 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
3896 add_jump_dependencies (insn, jump);
3899 /* Changes pattern of the INSN to NEW_PAT. */
3901 change_pattern (rtx insn, rtx new_pat)
3905 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
3907 /* Invalidate INSN_COST, so it'll be recalculated. */
3908 INSN_COST (insn) = -1;
3909 /* Invalidate INSN_TICK, so it'll be recalculated. */
3910 INSN_TICK (insn) = INVALID_TICK;
3911 dfa_clear_single_insn_cache (insn);
3915 /* -1 - can't speculate,
3916 0 - for speculation with REQUEST mode it is OK to use
3917 current instruction pattern,
3918 1 - need to change pattern for *NEW_PAT to be speculative. */
3920 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
3922 gcc_assert (current_sched_info->flags & DO_SPECULATION
3923 && (request & SPECULATIVE));
3925 if (!NONJUMP_INSN_P (insn)
3926 || HAS_INTERNAL_DEP (insn)
3927 || SCHED_GROUP_P (insn)
3928 || side_effects_p (PATTERN (insn))
3929 || (request & spec_info->mask) != request)
3932 gcc_assert (!IS_SPECULATION_CHECK_P (insn));
3934 if (request & BE_IN_SPEC)
3936 if (may_trap_p (PATTERN (insn)))
3939 if (!(request & BEGIN_SPEC))
3943 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
3946 /* Print some information about block BB, which starts with HEAD and
3947 ends with TAIL, before scheduling it.
3948 I is zero, if scheduler is about to start with the fresh ebb. */
3950 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
3953 fprintf (sched_dump,
3954 ";; ======================================================\n");
3956 fprintf (sched_dump,
3957 ";; =====================ADVANCING TO=====================\n");
3958 fprintf (sched_dump,
3959 ";; -- basic block %d from %d to %d -- %s reload\n",
3960 bb->index, INSN_UID (head), INSN_UID (tail),
3961 (reload_completed ? "after" : "before"));
3962 fprintf (sched_dump,
3963 ";; ======================================================\n");
3964 fprintf (sched_dump, "\n");
3967 /* Unlink basic block notes and labels and saves them, so they
3968 can be easily restored. We unlink basic block notes in EBB to
3969 provide back-compatibility with the previous code, as target backends
3970 assume, that there'll be only instructions between
3971 current_sched_info->{head and tail}. We restore these notes as soon
3973 FIRST (LAST) is the first (last) basic block in the ebb.
3974 NB: In usual case (FIRST == LAST) nothing is really done. */
3976 unlink_bb_notes (basic_block first, basic_block last)
3978 /* We DON'T unlink basic block notes of the first block in the ebb. */
3982 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
3984 /* Make a sentinel. */
3985 if (last->next_bb != EXIT_BLOCK_PTR)
3986 bb_header[last->next_bb->index] = 0;
3988 first = first->next_bb;
3991 rtx prev, label, note, next;
3993 label = BB_HEAD (last);
3994 if (LABEL_P (label))
3995 note = NEXT_INSN (label);
3998 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4000 prev = PREV_INSN (label);
4001 next = NEXT_INSN (note);
4002 gcc_assert (prev && next);
4004 NEXT_INSN (prev) = next;
4005 PREV_INSN (next) = prev;
4007 bb_header[last->index] = label;
4012 last = last->prev_bb;
4017 /* Restore basic block notes.
4018 FIRST is the first basic block in the ebb. */
4020 restore_bb_notes (basic_block first)
4025 /* We DON'T unlink basic block notes of the first block in the ebb. */
4026 first = first->next_bb;
4027 /* Remember: FIRST is actually a second basic block in the ebb. */
4029 while (first != EXIT_BLOCK_PTR
4030 && bb_header[first->index])
4032 rtx prev, label, note, next;
4034 label = bb_header[first->index];
4035 prev = PREV_INSN (label);
4036 next = NEXT_INSN (prev);
4038 if (LABEL_P (label))
4039 note = NEXT_INSN (label);
4042 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4044 bb_header[first->index] = 0;
4046 NEXT_INSN (prev) = label;
4047 NEXT_INSN (note) = next;
4048 PREV_INSN (next) = note;
4050 first = first->next_bb;
4057 /* Extend per basic block data structures of the scheduler.
4058 If BB is NULL, initialize structures for the whole CFG.
4059 Otherwise, initialize them for the just created BB. */
4065 old_last_basic_block = last_basic_block;
4067 if (current_sched_info->flags & USE_GLAT)
4069 glat_start = xrealloc (glat_start,
4070 last_basic_block * sizeof (*glat_start));
4071 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4074 /* The following is done to keep current_sched_info->next_tail non null. */
4076 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4077 if (NEXT_INSN (insn) == 0
4080 /* Don't emit a NOTE if it would end up before a BARRIER. */
4081 && !BARRIER_P (NEXT_INSN (insn))))
4083 emit_note_after (NOTE_INSN_DELETED, insn);
4084 /* Make insn to appear outside BB. */
4085 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4089 /* Add a basic block BB to extended basic block EBB.
4090 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4091 If EBB is NULL, then BB should be a new region. */
4093 add_block (basic_block bb, basic_block ebb)
4095 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4096 && bb->il.rtl->global_live_at_start == 0
4097 && bb->il.rtl->global_live_at_end == 0);
4101 glat_start[bb->index] = 0;
4102 glat_end[bb->index] = 0;
4104 if (current_sched_info->add_block)
4105 /* This changes only data structures of the front-end. */
4106 current_sched_info->add_block (bb, ebb);
4110 Fix CFG after both in- and inter-block movement of
4111 control_flow_insn_p JUMP. */
4113 fix_jump_move (rtx jump)
4115 basic_block bb, jump_bb, jump_bb_next;
4117 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4118 jump_bb = BLOCK_FOR_INSN (jump);
4119 jump_bb_next = jump_bb->next_bb;
4121 gcc_assert (current_sched_info->flags & SCHED_EBB
4122 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4124 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4125 /* if jump_bb_next is not empty. */
4126 BB_END (jump_bb) = BB_END (jump_bb_next);
4128 if (BB_END (bb) != PREV_INSN (jump))
4129 /* Then there are instruction after jump that should be placed
4131 BB_END (jump_bb_next) = BB_END (bb);
4133 /* Otherwise jump_bb_next is empty. */
4134 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4136 /* To make assertion in move_insn happy. */
4137 BB_END (bb) = PREV_INSN (jump);
4139 update_bb_for_insn (jump_bb_next);
4142 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4144 move_block_after_check (rtx jump)
4146 basic_block bb, jump_bb, jump_bb_next;
4149 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4150 jump_bb = BLOCK_FOR_INSN (jump);
4151 jump_bb_next = jump_bb->next_bb;
4153 update_bb_for_insn (jump_bb);
4155 gcc_assert (IS_SPECULATION_CHECK_P (jump)
4156 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4158 unlink_block (jump_bb_next);
4159 link_block (jump_bb_next, bb);
4163 move_succs (&(jump_bb->succs), bb);
4164 move_succs (&(jump_bb_next->succs), jump_bb);
4165 move_succs (&t, jump_bb_next);
4167 if (current_sched_info->fix_recovery_cfg)
4168 current_sched_info->fix_recovery_cfg
4169 (bb->index, jump_bb->index, jump_bb_next->index);
4172 /* Helper function for move_block_after_check.
4173 This functions attaches edge vector pointed to by SUCCSP to
4176 move_succs (VEC(edge,gc) **succsp, basic_block to)
4181 gcc_assert (to->succs == 0);
4183 to->succs = *succsp;
4185 FOR_EACH_EDGE (e, ei, to->succs)
4191 /* Initialize GLAT (global_live_at_{start, end}) structures.
4192 GLAT structures are used to substitute global_live_{start, end}
4193 regsets during scheduling. This is necessary to use such functions as
4194 split_block (), as they assume consistency of register live information. */
4204 /* Helper function for init_glat. */
4206 init_glat1 (basic_block bb)
4208 gcc_assert (bb->il.rtl->global_live_at_start != 0
4209 && bb->il.rtl->global_live_at_end != 0);
4211 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4212 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4214 if (current_sched_info->flags & DETACH_LIFE_INFO)
4216 bb->il.rtl->global_live_at_start = 0;
4217 bb->il.rtl->global_live_at_end = 0;
4221 /* Attach reg_live_info back to basic blocks.
4222 Also save regsets, that should not have been changed during scheduling,
4223 for checking purposes (see check_reg_live). */
4225 attach_life_info (void)
4230 attach_life_info1 (bb);
4233 /* Helper function for attach_life_info. */
4235 attach_life_info1 (basic_block bb)
4237 gcc_assert (bb->il.rtl->global_live_at_start == 0
4238 && bb->il.rtl->global_live_at_end == 0);
4240 if (glat_start[bb->index])
4242 gcc_assert (glat_end[bb->index]);
4244 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4245 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4247 /* Make them NULL, so they won't be freed in free_glat. */
4248 glat_start[bb->index] = 0;
4249 glat_end[bb->index] = 0;
4251 #ifdef ENABLE_CHECKING
4252 if (bb->index < NUM_FIXED_BLOCKS
4253 || current_sched_info->region_head_or_leaf_p (bb, 0))
4255 glat_start[bb->index] = ALLOC_REG_SET (®_obstack);
4256 COPY_REG_SET (glat_start[bb->index],
4257 bb->il.rtl->global_live_at_start);
4260 if (bb->index < NUM_FIXED_BLOCKS
4261 || current_sched_info->region_head_or_leaf_p (bb, 1))
4263 glat_end[bb->index] = ALLOC_REG_SET (®_obstack);
4264 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4270 gcc_assert (!glat_end[bb->index]);
4272 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (®_obstack);
4273 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (®_obstack);
4277 /* Free GLAT information. */
4281 #ifdef ENABLE_CHECKING
4282 if (current_sched_info->flags & DETACH_LIFE_INFO)
4288 if (glat_start[bb->index])
4289 FREE_REG_SET (glat_start[bb->index]);
4290 if (glat_end[bb->index])
4291 FREE_REG_SET (glat_end[bb->index]);
4300 /* Remove INSN from the instruction stream.
4301 INSN should have any dependencies. */
4303 sched_remove_insn (rtx insn)
4305 change_queue_index (insn, QUEUE_NOWHERE);
4306 current_sched_info->add_remove_insn (insn, 1);
4310 /* Clear priorities of all instructions, that are forward dependent on INSN.
4311 Store in vector pointed to by ROOTS_PTR insns on which priority () should
4312 be invoked to initialize all cleared priorities. */
4314 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
4317 bool insn_is_root_p = true;
4319 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
4321 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
4323 dep_t dep = DEP_LINK_DEP (link);
4324 rtx pro = DEP_PRO (dep);
4326 if (INSN_PRIORITY_STATUS (pro) >= 0
4327 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
4329 /* If DEP doesn't contribute to priority then INSN itself should
4330 be added to priority roots. */
4331 if (contributes_to_priority_p (dep))
4332 insn_is_root_p = false;
4334 INSN_PRIORITY_STATUS (pro) = -1;
4335 clear_priorities (pro, roots_ptr);
4340 VEC_safe_push (rtx, heap, *roots_ptr, insn);
4343 /* Recompute priorities of instructions, whose priorities might have been
4344 changed. ROOTS is a vector of instructions whose priority computation will
4345 trigger initialization of all cleared priorities. */
4347 calc_priorities (rtx_vec_t roots)
4352 for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
4357 /* Add dependences between JUMP and other instructions in the recovery
4358 block. INSN is the first insn the recovery block. */
4360 add_jump_dependencies (rtx insn, rtx jump)
4364 insn = NEXT_INSN (insn);
4368 if (deps_list_empty_p (INSN_FORW_DEPS (insn)))
4369 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4373 gcc_assert (!deps_list_empty_p (INSN_BACK_DEPS (jump)));
4376 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4378 bb_note (basic_block bb)
4382 note = BB_HEAD (bb);
4384 note = NEXT_INSN (note);
4386 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4390 #ifdef ENABLE_CHECKING
4391 extern void debug_spec_status (ds_t);
4393 /* Dump information about the dependence status S. */
4395 debug_spec_status (ds_t s)
4400 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4402 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4403 if (s & BEGIN_CONTROL)
4404 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4405 if (s & BE_IN_CONTROL)
4406 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4409 fprintf (f, "HARD_DEP; ");
4412 fprintf (f, "DEP_TRUE; ");
4414 fprintf (f, "DEP_ANTI; ");
4416 fprintf (f, "DEP_OUTPUT; ");
4421 /* Helper function for check_cfg.
4422 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4425 has_edge_p (VEC(edge,gc) *el, int type)
4430 FOR_EACH_EDGE (e, ei, el)
4431 if (e->flags & type)
4436 /* Check few properties of CFG between HEAD and TAIL.
4437 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4438 instruction stream. */
4440 check_cfg (rtx head, rtx tail)
4444 int not_first = 0, not_last;
4447 head = get_insns ();
4449 tail = get_last_insn ();
4450 next_tail = NEXT_INSN (tail);
4454 not_last = head != tail;
4457 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4459 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4462 || (NOTE_INSN_BASIC_BLOCK_P (head)
4464 || (not_first && !LABEL_P (PREV_INSN (head))))))
4466 gcc_assert (bb == 0);
4467 bb = BLOCK_FOR_INSN (head);
4469 gcc_assert (BB_HEAD (bb) == head);
4471 /* This is the case of jump table. See inside_basic_block_p (). */
4472 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4477 gcc_assert (!inside_basic_block_p (head));
4478 head = NEXT_INSN (head);
4482 gcc_assert (inside_basic_block_p (head)
4484 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4488 head = NEXT_INSN (head);
4489 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4493 if (control_flow_insn_p (head))
4495 gcc_assert (BB_END (bb) == head);
4497 if (any_uncondjump_p (head))
4498 gcc_assert (EDGE_COUNT (bb->succs) == 1
4499 && BARRIER_P (NEXT_INSN (head)));
4500 else if (any_condjump_p (head))
4501 gcc_assert (/* Usual case. */
4502 (EDGE_COUNT (bb->succs) > 1
4503 && !BARRIER_P (NEXT_INSN (head)))
4504 /* Or jump to the next instruction. */
4505 || (EDGE_COUNT (bb->succs) == 1
4506 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4507 == JUMP_LABEL (head))));
4509 if (BB_END (bb) == head)
4511 if (EDGE_COUNT (bb->succs) > 1)
4512 gcc_assert (control_flow_insn_p (head)
4513 || has_edge_p (bb->succs, EDGE_COMPLEX));
4517 head = NEXT_INSN (head);
4523 while (head != next_tail);
4525 gcc_assert (bb == 0);
4528 /* Perform a few consistency checks of flags in different data structures. */
4530 check_sched_flags (void)
4532 unsigned int f = current_sched_info->flags;
4534 if (flag_sched_stalled_insns)
4535 gcc_assert (!(f & DO_SPECULATION));
4536 if (f & DO_SPECULATION)
4537 gcc_assert (!flag_sched_stalled_insns
4538 && (f & DETACH_LIFE_INFO)
4540 && spec_info->mask);
4541 if (f & DETACH_LIFE_INFO)
4542 gcc_assert (f & USE_GLAT);
4545 /* Check global_live_at_{start, end} regsets.
4546 If FATAL_P is TRUE, then abort execution at the first failure.
4547 Otherwise, print diagnostics to STDERR (this mode is for calling
4550 check_reg_live (bool fatal_p)
4562 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4567 gcc_assert (!fatal_p);
4569 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4575 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4580 gcc_assert (!fatal_p);
4582 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4587 #endif /* ENABLE_CHECKING */
4589 #endif /* INSN_SCHEDULING */