1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 /* Instruction scheduling pass. This file, along with sched-deps.c,
26 contains the generic parts. The actual entry point is found for
27 the normal instruction scheduling pass is found in sched-rgn.c.
29 We compute insn priorities based on data dependencies. Flow
30 analysis only creates a fraction of the data-dependencies we must
31 observe: namely, only those dependencies which the combiner can be
32 expected to use. For this pass, we must therefore create the
33 remaining dependencies we need to observe: register dependencies,
34 memory dependencies, dependencies to keep function calls in order,
35 and the dependence between a conditional branch and the setting of
36 condition codes are all dealt with here.
38 The scheduler first traverses the data flow graph, starting with
39 the last instruction, and proceeding to the first, assigning values
40 to insn_priority as it goes. This sorts the instructions
41 topologically by data dependence.
43 Once priorities have been established, we order the insns using
44 list scheduling. This works as follows: starting with a list of
45 all the ready insns, and sorted according to priority number, we
46 schedule the insn from the end of the list by placing its
47 predecessors in the list according to their priority order. We
48 consider this insn scheduled by setting the pointer to the "end" of
49 the list to point to the previous insn. When an insn has no
50 predecessors, we either queue it until sufficient time has elapsed
51 or add it to the ready list. As the instructions are scheduled or
52 when stalls are introduced, the queue advances and dumps insns into
53 the ready list. When all insns down to the lowest priority have
54 been scheduled, the critical path of the basic block has been made
55 as short as possible. The remaining insns are then scheduled in
58 The following list shows the order in which we want to break ties
59 among insns in the ready list:
61 1. choose insn with the longest path to end of bb, ties
63 2. choose insn with least contribution to register pressure,
65 3. prefer in-block upon interblock motion, ties broken by
66 4. prefer useful upon speculative motion, ties broken by
67 5. choose insn with largest control flow probability, ties
69 6. choose insn with the least dependences upon the previously
70 scheduled insn, or finally
71 7 choose the insn which has the most insns dependent on it.
72 8. choose insn with lowest UID.
74 Memory references complicate matters. Only if we can be certain
75 that memory references are not part of the data dependency graph
76 (via true, anti, or output dependence), can we move operations past
77 memory references. To first approximation, reads can be done
78 independently, while writes introduce dependencies. Better
79 approximations will yield fewer dependencies.
81 Before reload, an extended analysis of interblock data dependences
82 is required for interblock scheduling. This is performed in
83 compute_block_backward_dependences ().
85 Dependencies set up by memory references are treated in exactly the
86 same way as other dependencies, by using LOG_LINKS backward
87 dependences. LOG_LINKS are translated into INSN_DEPEND forward
88 dependences for the purpose of forward list scheduling.
90 Having optimized the critical path, we may have also unduly
91 extended the lifetimes of some registers. If an operation requires
92 that constants be loaded into registers, it is certainly desirable
93 to load those constants as early as necessary, but no earlier.
94 I.e., it will not do to load up a bunch of registers at the
95 beginning of a basic block only to use them at the end, if they
96 could be loaded later, since this may result in excessive register
99 Note that since branches are never in basic blocks, but only end
100 basic blocks, this pass will not move branches. But that is ok,
101 since we can use GNU's delayed branch scheduling pass to take care
104 Also note that no further optimizations based on algebraic
105 identities are performed, so this pass would be a good one to
106 perform instruction splitting, such as breaking up a multiply
107 instruction into shifts and adds where that is profitable.
109 Given the memory aliasing analysis that this pass should perform,
110 it should be possible to remove redundant stores to memory, and to
111 load values from registers instead of hitting memory.
113 Before reload, speculative insns are moved only if a 'proof' exists
114 that no exception will be caused by this, and if no live registers
115 exist that inhibit the motion (live registers constraints are not
116 represented by data dependence edges).
118 This pass must update information that subsequent passes expect to
119 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
120 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
122 The information in the line number notes is carefully retained by
123 this pass. Notes that refer to the starting and ending of
124 exception regions are also carefully retained by this pass. All
125 other NOTE insns are grouped in their same relative order at the
126 beginning of basic blocks and regions that have been scheduled. */
130 #include "coretypes.h"
135 #include "hard-reg-set.h"
137 #include "function.h"
139 #include "insn-config.h"
140 #include "insn-attr.h"
144 #include "sched-int.h"
149 #ifdef INSN_SCHEDULING
151 /* issue_rate is the number of insns that can be scheduled in the same
152 machine cycle. It can be defined in the config/mach/mach.h file,
153 otherwise we set it to 1. */
155 static int issue_rate;
157 /* sched-verbose controls the amount of debugging output the
158 scheduler prints. It is controlled by -fsched-verbose=N:
159 N>0 and no -DSR : the output is directed to stderr.
160 N>=10 will direct the printouts to stderr (regardless of -dSR).
162 N=2: bb's probabilities, detailed ready list info, unit/insn info.
163 N=3: rtl at abort point, control-flow, regions info.
164 N=5: dependences info. */
166 static int sched_verbose_param = 0;
167 int sched_verbose = 0;
169 /* Debugging file. All printouts are sent to dump, which is always set,
170 either to stderr, or to the dump listing file (-dRS). */
171 FILE *sched_dump = 0;
173 /* Highest uid before scheduling. */
174 static int old_max_uid;
176 /* fix_sched_param() is called from toplev.c upon detection
177 of the -fsched-verbose=N option. */
180 fix_sched_param (const char *param, const char *val)
182 if (!strcmp (param, "verbose"))
183 sched_verbose_param = atoi (val);
185 warning (0, "fix_sched_param: unknown param: %s", param);
188 struct haifa_insn_data *h_i_d;
190 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
191 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
192 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
194 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
195 then it should be recalculated from scratch. */
196 #define INVALID_TICK (-(max_insn_queue_index + 1))
197 /* The minimal value of the INSN_TICK of an instruction. */
198 #define MIN_TICK (-max_insn_queue_index)
200 /* Issue points are used to distinguish between instructions in max_issue ().
201 For now, all instructions are equally good. */
202 #define ISSUE_POINTS(INSN) 1
204 /* Vector indexed by basic block number giving the starting line-number
205 for each basic block. */
206 static rtx *line_note_head;
208 /* List of important notes we must keep around. This is a pointer to the
209 last element in the list. */
210 static rtx note_list;
212 static struct spec_info_def spec_info_var;
213 /* Description of the speculative part of the scheduling.
214 If NULL - no speculation. */
215 static spec_info_t spec_info;
217 /* True, if recovery block was added during scheduling of current block.
218 Used to determine, if we need to fix INSN_TICKs. */
219 static bool added_recovery_block_p;
221 /* Counters of different types of speculative isntructions. */
222 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
224 /* Pointers to GLAT data. See init_glat for more information. */
225 regset *glat_start, *glat_end;
227 /* Array used in {unlink, restore}_bb_notes. */
228 static rtx *bb_header = 0;
230 /* Number of basic_blocks. */
231 static int old_last_basic_block;
233 /* Basic block after which recovery blocks will be created. */
234 static basic_block before_recovery;
238 /* An instruction is ready to be scheduled when all insns preceding it
239 have already been scheduled. It is important to ensure that all
240 insns which use its result will not be executed until its result
241 has been computed. An insn is maintained in one of four structures:
243 (P) the "Pending" set of insns which cannot be scheduled until
244 their dependencies have been satisfied.
245 (Q) the "Queued" set of insns that can be scheduled when sufficient
247 (R) the "Ready" list of unscheduled, uncommitted insns.
248 (S) the "Scheduled" list of insns.
250 Initially, all insns are either "Pending" or "Ready" depending on
251 whether their dependencies are satisfied.
253 Insns move from the "Ready" list to the "Scheduled" list as they
254 are committed to the schedule. As this occurs, the insns in the
255 "Pending" list have their dependencies satisfied and move to either
256 the "Ready" list or the "Queued" set depending on whether
257 sufficient time has passed to make them ready. As time passes,
258 insns move from the "Queued" set to the "Ready" list.
260 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
261 insns, i.e., those that are ready, queued, and pending.
262 The "Queued" set (Q) is implemented by the variable `insn_queue'.
263 The "Ready" list (R) is implemented by the variables `ready' and
265 The "Scheduled" list (S) is the new insn chain built by this pass.
267 The transition (R->S) is implemented in the scheduling loop in
268 `schedule_block' when the best insn to schedule is chosen.
269 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
270 insns move from the ready list to the scheduled list.
271 The transition (Q->R) is implemented in 'queue_to_insn' as time
272 passes or stalls are introduced. */
274 /* Implement a circular buffer to delay instructions until sufficient
275 time has passed. For the new pipeline description interface,
276 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
277 than maximal time of instruction execution computed by genattr.c on
278 the base maximal time of functional unit reservations and getting a
279 result. This is the longest time an insn may be queued. */
281 static rtx *insn_queue;
282 static int q_ptr = 0;
283 static int q_size = 0;
284 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
285 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
287 #define QUEUE_SCHEDULED (-3)
288 #define QUEUE_NOWHERE (-2)
289 #define QUEUE_READY (-1)
290 /* QUEUE_SCHEDULED - INSN is scheduled.
291 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
293 QUEUE_READY - INSN is in ready list.
294 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
296 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
298 /* The following variable value refers for all current and future
299 reservations of the processor units. */
302 /* The following variable value is size of memory representing all
303 current and future reservations of the processor units. */
304 static size_t dfa_state_size;
306 /* The following array is used to find the best insn from ready when
307 the automaton pipeline interface is used. */
308 static char *ready_try;
310 /* Describe the ready list of the scheduler.
311 VEC holds space enough for all insns in the current region. VECLEN
312 says how many exactly.
313 FIRST is the index of the element with the highest priority; i.e. the
314 last one in the ready list, since elements are ordered by ascending
316 N_READY determines how many insns are on the ready list. */
326 /* The pointer to the ready list. */
327 static struct ready_list *readyp;
329 /* Scheduling clock. */
330 static int clock_var;
332 /* Number of instructions in current scheduling region. */
333 static int rgn_n_insns;
335 static int may_trap_exp (rtx, int);
337 /* Nonzero iff the address is comprised from at most 1 register. */
338 #define CONST_BASED_ADDRESS_P(x) \
340 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
341 || (GET_CODE (x) == LO_SUM)) \
342 && (CONSTANT_P (XEXP (x, 0)) \
343 || CONSTANT_P (XEXP (x, 1)))))
345 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
346 as found by analyzing insn's expression. */
349 may_trap_exp (rtx x, int is_store)
358 if (code == MEM && may_trap_p (x))
365 /* The insn uses memory: a volatile load. */
366 if (MEM_VOLATILE_P (x))
368 /* An exception-free load. */
371 /* A load with 1 base register, to be further checked. */
372 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
373 return PFREE_CANDIDATE;
374 /* No info on the load, to be further checked. */
375 return PRISKY_CANDIDATE;
380 int i, insn_class = TRAP_FREE;
382 /* Neither store nor load, check if it may cause a trap. */
385 /* Recursive step: walk the insn... */
386 fmt = GET_RTX_FORMAT (code);
387 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
391 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
392 insn_class = WORST_CLASS (insn_class, tmp_class);
394 else if (fmt[i] == 'E')
397 for (j = 0; j < XVECLEN (x, i); j++)
399 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
400 insn_class = WORST_CLASS (insn_class, tmp_class);
401 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
412 /* Classifies insn for the purpose of verifying that it can be
413 moved speculatively, by examining it's patterns, returning:
414 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
415 TRAP_FREE: non-load insn.
416 IFREE: load from a globally safe location.
417 IRISKY: volatile load.
418 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
419 being either PFREE or PRISKY. */
422 haifa_classify_insn (rtx insn)
424 rtx pat = PATTERN (insn);
425 int tmp_class = TRAP_FREE;
426 int insn_class = TRAP_FREE;
429 if (GET_CODE (pat) == PARALLEL)
431 int i, len = XVECLEN (pat, 0);
433 for (i = len - 1; i >= 0; i--)
435 code = GET_CODE (XVECEXP (pat, 0, i));
439 /* Test if it is a 'store'. */
440 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
443 /* Test if it is a store. */
444 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
445 if (tmp_class == TRAP_RISKY)
447 /* Test if it is a load. */
449 = WORST_CLASS (tmp_class,
450 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
455 tmp_class = TRAP_RISKY;
460 insn_class = WORST_CLASS (insn_class, tmp_class);
461 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
467 code = GET_CODE (pat);
471 /* Test if it is a 'store'. */
472 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
475 /* Test if it is a store. */
476 tmp_class = may_trap_exp (SET_DEST (pat), 1);
477 if (tmp_class == TRAP_RISKY)
479 /* Test if it is a load. */
481 WORST_CLASS (tmp_class,
482 may_trap_exp (SET_SRC (pat), 0));
486 tmp_class = TRAP_RISKY;
490 insn_class = tmp_class;
496 /* Forward declarations. */
498 HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
499 static int priority (rtx);
500 static int rank_for_schedule (const void *, const void *);
501 static void swap_sort (rtx *, int);
502 static void queue_insn (rtx, int);
503 static int schedule_insn (rtx);
504 static int find_set_reg_weight (rtx);
505 static void find_insn_reg_weight (basic_block);
506 static void find_insn_reg_weight1 (rtx);
507 static void adjust_priority (rtx);
508 static void advance_one_cycle (void);
510 /* Notes handling mechanism:
511 =========================
512 Generally, NOTES are saved before scheduling and restored after scheduling.
513 The scheduler distinguishes between three types of notes:
515 (1) LINE_NUMBER notes, generated and used for debugging. Here,
516 before scheduling a region, a pointer to the LINE_NUMBER note is
517 added to the insn following it (in save_line_notes()), and the note
518 is removed (in rm_line_notes() and unlink_line_notes()). After
519 scheduling the region, this pointer is used for regeneration of
520 the LINE_NUMBER note (in restore_line_notes()).
522 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
523 Before scheduling a region, a pointer to the note is added to the insn
524 that follows or precedes it. (This happens as part of the data dependence
525 computation). After scheduling an insn, the pointer contained in it is
526 used for regenerating the corresponding note (in reemit_notes).
528 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
529 these notes are put in a list (in rm_other_notes() and
530 unlink_other_notes ()). After scheduling the block, these notes are
531 inserted at the beginning of the block (in schedule_block()). */
533 static rtx unlink_other_notes (rtx, rtx);
534 static rtx unlink_line_notes (rtx, rtx);
535 static void reemit_notes (rtx);
537 static rtx *ready_lastpos (struct ready_list *);
538 static void ready_add (struct ready_list *, rtx, bool);
539 static void ready_sort (struct ready_list *);
540 static rtx ready_remove_first (struct ready_list *);
542 static void queue_to_ready (struct ready_list *);
543 static int early_queue_to_ready (state_t, struct ready_list *);
545 static void debug_ready_list (struct ready_list *);
547 static void move_insn (rtx);
549 /* The following functions are used to implement multi-pass scheduling
550 on the first cycle. */
551 static rtx ready_element (struct ready_list *, int);
552 static rtx ready_remove (struct ready_list *, int);
553 static void ready_remove_insn (rtx);
554 static int max_issue (struct ready_list *, int *, int);
556 static rtx choose_ready (struct ready_list *);
558 static void fix_inter_tick (rtx, rtx);
559 static int fix_tick_ready (rtx);
560 static void change_queue_index (rtx, int);
561 static void resolve_dep (rtx, rtx);
563 /* The following functions are used to implement scheduling of data/control
564 speculative instructions. */
566 static void extend_h_i_d (void);
567 static void extend_ready (int);
568 static void extend_global (rtx);
569 static void extend_all (rtx);
570 static void init_h_i_d (rtx);
571 static void generate_recovery_code (rtx);
572 static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
573 static void begin_speculative_block (rtx);
574 static void add_to_speculative_block (rtx);
575 static dw_t dep_weak (ds_t);
576 static edge find_fallthru_edge (basic_block);
577 static void init_before_recovery (void);
578 static basic_block create_recovery_block (void);
579 static void create_check_block_twin (rtx, bool);
580 static void fix_recovery_deps (basic_block);
581 static void associate_line_notes_with_blocks (basic_block);
582 static void change_pattern (rtx, rtx);
583 static int speculate_insn (rtx, ds_t, rtx *);
584 static void dump_new_block_header (int, basic_block, rtx, rtx);
585 static void restore_bb_notes (basic_block);
586 static void extend_bb (basic_block);
587 static void fix_jump_move (rtx);
588 static void move_block_after_check (rtx);
589 static void move_succs (VEC(edge,gc) **, basic_block);
590 static void init_glat (void);
591 static void init_glat1 (basic_block);
592 static void attach_life_info1 (basic_block);
593 static void free_glat (void);
594 static void sched_remove_insn (rtx);
595 static void clear_priorities (rtx);
596 static void add_jump_dependencies (rtx, rtx);
597 static rtx bb_note (basic_block);
598 static void calc_priorities (rtx);
599 #ifdef ENABLE_CHECKING
600 static int has_edge_p (VEC(edge,gc) *, int);
601 static void check_cfg (rtx, rtx);
602 static void check_sched_flags (void);
605 #endif /* INSN_SCHEDULING */
607 /* Point to state used for the current scheduling pass. */
608 struct sched_info *current_sched_info;
610 #ifndef INSN_SCHEDULING
612 schedule_insns (void)
617 /* Working copy of frontend's sched_info variable. */
618 static struct sched_info current_sched_info_var;
620 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
621 so that insns independent of the last scheduled insn will be preferred
622 over dependent instructions. */
624 static rtx last_scheduled_insn;
626 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
627 This is the number of cycles between instruction issue and
628 instruction results. */
631 insn_cost (rtx insn, rtx link, rtx used)
633 return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
637 /* Compute cost of executing INSN given the dependence on the insn USED.
638 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
639 Otherwise, dependence between INSN and USED is assumed to be of type
640 DEP_TYPE. This function was introduced as a workaround for
641 targetm.adjust_cost hook.
642 This is the number of cycles between instruction issue and
643 instruction results. */
645 HAIFA_INLINE static int
646 insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
648 int cost = INSN_COST (insn);
652 /* A USE insn, or something else we don't need to
653 understand. We can't pass these directly to
654 result_ready_cost or insn_default_latency because it will
655 trigger a fatal error for unrecognizable insns. */
656 if (recog_memoized (insn) < 0)
658 INSN_COST (insn) = 0;
663 cost = insn_default_latency (insn);
667 INSN_COST (insn) = cost;
671 /* In this case estimate cost without caring how insn is used. */
675 /* A USE insn should never require the value used to be computed.
676 This allows the computation of a function's result and parameter
677 values to overlap the return and call. */
678 if (recog_memoized (used) < 0)
682 gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
684 if (INSN_CODE (insn) >= 0)
686 if (dep_type == REG_DEP_ANTI)
688 else if (dep_type == REG_DEP_OUTPUT)
690 cost = (insn_default_latency (insn)
691 - insn_default_latency (used));
695 else if (bypass_p (insn))
696 cost = insn_latency (insn, used);
699 if (targetm.sched.adjust_cost_2)
700 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
704 if (targetm.sched.adjust_cost)
705 cost = targetm.sched.adjust_cost (used, link, insn, cost);
715 /* Compute the priority number for INSN. */
725 if (! INSN_PRIORITY_KNOWN (insn))
727 int this_priority = 0;
729 if (INSN_DEPEND (insn) == 0)
730 this_priority = insn_cost (insn, 0, 0);
733 rtx prev_first, twin;
736 /* For recovery check instructions we calculate priority slightly
737 different than that of normal instructions. Instead of walking
738 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
739 of each instruction in the corresponding recovery block. */
741 rec = RECOVERY_BLOCK (insn);
742 if (!rec || rec == EXIT_BLOCK_PTR)
744 prev_first = PREV_INSN (insn);
749 prev_first = NEXT_INSN (BB_HEAD (rec));
750 twin = PREV_INSN (BB_END (rec));
755 for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
760 next = XEXP (link, 0);
762 if (BLOCK_FOR_INSN (next) != rec)
764 /* Critical path is meaningful in block boundaries
766 if (! (*current_sched_info->contributes_to_priority)
768 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
769 then speculative instructions will less likely be
770 scheduled. That is because the priority of
771 their producers will increase, and, thus, the
772 producers will more likely be scheduled, thus,
773 resolving the dependence. */
774 || ((current_sched_info->flags & DO_SPECULATION)
775 && (DEP_STATUS (link) & SPECULATIVE)
776 && !(spec_info->flags
777 & COUNT_SPEC_IN_CRITICAL_PATH)))
780 next_priority = insn_cost1 (insn,
782 REG_NOTE_KIND (link) :
784 twin == insn ? link : 0,
785 next) + priority (next);
787 if (next_priority > this_priority)
788 this_priority = next_priority;
792 twin = PREV_INSN (twin);
794 while (twin != prev_first);
796 INSN_PRIORITY (insn) = this_priority;
797 INSN_PRIORITY_KNOWN (insn) = 1;
800 return INSN_PRIORITY (insn);
803 /* Macros and functions for keeping the priority queue sorted, and
804 dealing with queuing and dequeuing of instructions. */
806 #define SCHED_SORT(READY, N_READY) \
807 do { if ((N_READY) == 2) \
808 swap_sort (READY, N_READY); \
809 else if ((N_READY) > 2) \
810 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
813 /* Returns a positive value if x is preferred; returns a negative value if
814 y is preferred. Should never return 0, since that will make the sort
818 rank_for_schedule (const void *x, const void *y)
820 rtx tmp = *(const rtx *) y;
821 rtx tmp2 = *(const rtx *) x;
823 int tmp_class, tmp2_class, depend_count1, depend_count2;
824 int val, priority_val, weight_val, info_val;
826 /* The insn in a schedule group should be issued the first. */
827 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
828 return SCHED_GROUP_P (tmp2) ? 1 : -1;
830 /* Prefer insn with higher priority. */
831 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
836 /* Prefer speculative insn with greater dependencies weakness. */
843 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
845 dw1 = dep_weak (ds1);
849 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
851 dw2 = dep_weak (ds2);
856 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
860 /* Prefer an insn with smaller contribution to registers-pressure. */
861 if (!reload_completed &&
862 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
865 info_val = (*current_sched_info->rank) (tmp, tmp2);
869 /* Compare insns based on their relation to the last-scheduled-insn. */
870 if (INSN_P (last_scheduled_insn))
872 /* Classify the instructions into three classes:
873 1) Data dependent on last schedule insn.
874 2) Anti/Output dependent on last scheduled insn.
875 3) Independent of last scheduled insn, or has latency of one.
876 Choose the insn from the highest numbered class if different. */
877 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
878 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
880 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
885 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
886 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
888 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
893 if ((val = tmp2_class - tmp_class))
897 /* Prefer the insn which has more later insns that depend on it.
898 This gives the scheduler more freedom when scheduling later
899 instructions at the expense of added register pressure. */
901 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
905 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
908 val = depend_count2 - depend_count1;
912 /* If insns are equally good, sort by INSN_LUID (original insn order),
913 so that we make the sort stable. This minimizes instruction movement,
914 thus minimizing sched's effect on debugging and cross-jumping. */
915 return INSN_LUID (tmp) - INSN_LUID (tmp2);
918 /* Resort the array A in which only element at index N may be out of order. */
920 HAIFA_INLINE static void
921 swap_sort (rtx *a, int n)
926 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
934 /* Add INSN to the insn queue so that it can be executed at least
935 N_CYCLES after the currently executing insn. Preserve insns
936 chain for debugging purposes. */
938 HAIFA_INLINE static void
939 queue_insn (rtx insn, int n_cycles)
941 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
942 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
944 gcc_assert (n_cycles <= max_insn_queue_index);
946 insn_queue[next_q] = link;
949 if (sched_verbose >= 2)
951 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
952 (*current_sched_info->print_insn) (insn, 0));
954 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
957 QUEUE_INDEX (insn) = next_q;
960 /* Remove INSN from queue. */
962 queue_remove (rtx insn)
964 gcc_assert (QUEUE_INDEX (insn) >= 0);
965 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
967 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
970 /* Return a pointer to the bottom of the ready list, i.e. the insn
971 with the lowest priority. */
973 HAIFA_INLINE static rtx *
974 ready_lastpos (struct ready_list *ready)
976 gcc_assert (ready->n_ready >= 1);
977 return ready->vec + ready->first - ready->n_ready + 1;
980 /* Add an element INSN to the ready list so that it ends up with the
981 lowest/highest priority dependending on FIRST_P. */
983 HAIFA_INLINE static void
984 ready_add (struct ready_list *ready, rtx insn, bool first_p)
988 if (ready->first == ready->n_ready)
990 memmove (ready->vec + ready->veclen - ready->n_ready,
991 ready_lastpos (ready),
992 ready->n_ready * sizeof (rtx));
993 ready->first = ready->veclen - 1;
995 ready->vec[ready->first - ready->n_ready] = insn;
999 if (ready->first == ready->veclen - 1)
1002 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1003 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1004 ready_lastpos (ready),
1005 ready->n_ready * sizeof (rtx));
1006 ready->first = ready->veclen - 2;
1008 ready->vec[++(ready->first)] = insn;
1013 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1014 QUEUE_INDEX (insn) = QUEUE_READY;
1017 /* Remove the element with the highest priority from the ready list and
1020 HAIFA_INLINE static rtx
1021 ready_remove_first (struct ready_list *ready)
1025 gcc_assert (ready->n_ready);
1026 t = ready->vec[ready->first--];
1028 /* If the queue becomes empty, reset it. */
1029 if (ready->n_ready == 0)
1030 ready->first = ready->veclen - 1;
1032 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1033 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1038 /* The following code implements multi-pass scheduling for the first
1039 cycle. In other words, we will try to choose ready insn which
1040 permits to start maximum number of insns on the same cycle. */
1042 /* Return a pointer to the element INDEX from the ready. INDEX for
1043 insn with the highest priority is 0, and the lowest priority has
1046 HAIFA_INLINE static rtx
1047 ready_element (struct ready_list *ready, int index)
1049 gcc_assert (ready->n_ready && index < ready->n_ready);
1051 return ready->vec[ready->first - index];
1054 /* Remove the element INDEX from the ready list and return it. INDEX
1055 for insn with the highest priority is 0, and the lowest priority
1058 HAIFA_INLINE static rtx
1059 ready_remove (struct ready_list *ready, int index)
1065 return ready_remove_first (ready);
1066 gcc_assert (ready->n_ready && index < ready->n_ready);
1067 t = ready->vec[ready->first - index];
1069 for (i = index; i < ready->n_ready; i++)
1070 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1071 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1075 /* Remove INSN from the ready list. */
1077 ready_remove_insn (rtx insn)
1081 for (i = 0; i < readyp->n_ready; i++)
1082 if (ready_element (readyp, i) == insn)
1084 ready_remove (readyp, i);
1090 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1093 HAIFA_INLINE static void
1094 ready_sort (struct ready_list *ready)
1096 rtx *first = ready_lastpos (ready);
1097 SCHED_SORT (first, ready->n_ready);
1100 /* PREV is an insn that is ready to execute. Adjust its priority if that
1101 will help shorten or lengthen register lifetimes as appropriate. Also
1102 provide a hook for the target to tweek itself. */
1104 HAIFA_INLINE static void
1105 adjust_priority (rtx prev)
1107 /* ??? There used to be code here to try and estimate how an insn
1108 affected register lifetimes, but it did it by looking at REG_DEAD
1109 notes, which we removed in schedule_region. Nor did it try to
1110 take into account register pressure or anything useful like that.
1112 Revisit when we have a machine model to work with and not before. */
1114 if (targetm.sched.adjust_priority)
1115 INSN_PRIORITY (prev) =
1116 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1119 /* Advance time on one cycle. */
1120 HAIFA_INLINE static void
1121 advance_one_cycle (void)
1123 if (targetm.sched.dfa_pre_cycle_insn)
1124 state_transition (curr_state,
1125 targetm.sched.dfa_pre_cycle_insn ());
1127 state_transition (curr_state, NULL);
1129 if (targetm.sched.dfa_post_cycle_insn)
1130 state_transition (curr_state,
1131 targetm.sched.dfa_post_cycle_insn ());
1134 /* Clock at which the previous instruction was issued. */
1135 static int last_clock_var;
1137 /* INSN is the "currently executing insn". Launch each insn which was
1138 waiting on INSN. READY is the ready list which contains the insns
1139 that are ready to fire. CLOCK is the current cycle. The function
1140 returns necessary cycle advance after issuing the insn (it is not
1141 zero for insns in a schedule group). */
1144 schedule_insn (rtx insn)
1149 if (sched_verbose >= 1)
1153 print_insn (buf, insn, 0);
1155 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1157 if (recog_memoized (insn) < 0)
1158 fprintf (sched_dump, "nothing");
1160 print_reservation (sched_dump, insn);
1161 fputc ('\n', sched_dump);
1164 /* Scheduling instruction should have all its dependencies resolved and
1165 should have been removed from the ready list. */
1166 gcc_assert (INSN_DEP_COUNT (insn) == 0);
1167 gcc_assert (!LOG_LINKS (insn));
1168 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1170 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1172 /* Now we can free RESOLVED_DEPS list. */
1173 if (current_sched_info->flags & USE_DEPS_LIST)
1174 free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1176 free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1178 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1179 if (INSN_TICK (insn) > clock_var)
1180 /* INSN has been prematurely moved from the queue to the ready list.
1181 This is possible only if following flag is set. */
1182 gcc_assert (flag_sched_stalled_insns);
1184 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1185 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1186 INSN_TICK (insn) = clock_var;
1188 /* Update dependent instructions. */
1189 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1191 rtx next = XEXP (link, 0);
1193 resolve_dep (next, insn);
1195 if (!RECOVERY_BLOCK (insn)
1196 || RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR)
1200 effective_cost = try_ready (next);
1202 if (effective_cost >= 0
1203 && SCHED_GROUP_P (next)
1204 && advance < effective_cost)
1205 advance = effective_cost;
1208 /* Check always has only one forward dependence (to the first insn in
1209 the recovery block), therefore, this will be executed only once. */
1211 gcc_assert (XEXP (link, 1) == 0);
1212 fix_recovery_deps (RECOVERY_BLOCK (insn));
1216 /* Annotate the instruction with issue information -- TImode
1217 indicates that the instruction is expected not to be able
1218 to issue on the same cycle as the previous insn. A machine
1219 may use this information to decide how the instruction should
1222 && GET_CODE (PATTERN (insn)) != USE
1223 && GET_CODE (PATTERN (insn)) != CLOBBER)
1225 if (reload_completed)
1226 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1227 last_clock_var = clock_var;
1233 /* Functions for handling of notes. */
1235 /* Delete notes beginning with INSN and put them in the chain
1236 of notes ended by NOTE_LIST.
1237 Returns the insn following the notes. */
1240 unlink_other_notes (rtx insn, rtx tail)
1242 rtx prev = PREV_INSN (insn);
1244 while (insn != tail && NOTE_NOT_BB_P (insn))
1246 rtx next = NEXT_INSN (insn);
1247 /* Delete the note from its current position. */
1249 NEXT_INSN (prev) = next;
1251 PREV_INSN (next) = prev;
1253 /* See sched_analyze to see how these are handled. */
1254 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1255 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1257 /* Insert the note at the end of the notes list. */
1258 PREV_INSN (insn) = note_list;
1260 NEXT_INSN (note_list) = insn;
1269 /* Delete line notes beginning with INSN. Record line-number notes so
1270 they can be reused. Returns the insn following the notes. */
1273 unlink_line_notes (rtx insn, rtx tail)
1275 rtx prev = PREV_INSN (insn);
1277 while (insn != tail && NOTE_P (insn))
1279 rtx next = NEXT_INSN (insn);
1281 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1283 /* Delete the note from its current position. */
1285 NEXT_INSN (prev) = next;
1287 PREV_INSN (next) = prev;
1289 /* Record line-number notes so they can be reused. */
1290 LINE_NOTE (insn) = insn;
1300 /* Return the head and tail pointers of ebb starting at BEG and ending
1304 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1306 rtx beg_head = BB_HEAD (beg);
1307 rtx beg_tail = BB_END (beg);
1308 rtx end_head = BB_HEAD (end);
1309 rtx end_tail = BB_END (end);
1311 /* Don't include any notes or labels at the beginning of the BEG
1312 basic block, or notes at the end of the END basic blocks. */
1314 if (LABEL_P (beg_head))
1315 beg_head = NEXT_INSN (beg_head);
1317 while (beg_head != beg_tail)
1318 if (NOTE_P (beg_head))
1319 beg_head = NEXT_INSN (beg_head);
1326 end_head = beg_head;
1327 else if (LABEL_P (end_head))
1328 end_head = NEXT_INSN (end_head);
1330 while (end_head != end_tail)
1331 if (NOTE_P (end_tail))
1332 end_tail = PREV_INSN (end_tail);
1339 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1342 no_real_insns_p (rtx head, rtx tail)
1344 while (head != NEXT_INSN (tail))
1346 if (!NOTE_P (head) && !LABEL_P (head))
1348 head = NEXT_INSN (head);
1353 /* Delete line notes from one block. Save them so they can be later restored
1354 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1355 block in which notes should be processed. */
1358 rm_line_notes (rtx head, rtx tail)
1363 next_tail = NEXT_INSN (tail);
1364 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1368 /* Farm out notes, and maybe save them in NOTE_LIST.
1369 This is needed to keep the debugger from
1370 getting completely deranged. */
1371 if (NOTE_NOT_BB_P (insn))
1374 insn = unlink_line_notes (insn, next_tail);
1376 gcc_assert (prev != tail && prev != head && insn != next_tail);
1381 /* Save line number notes for each insn in block B. HEAD and TAIL are
1382 the boundaries of the block in which notes should be processed. */
1385 save_line_notes (int b, rtx head, rtx tail)
1389 /* We must use the true line number for the first insn in the block
1390 that was computed and saved at the start of this pass. We can't
1391 use the current line number, because scheduling of the previous
1392 block may have changed the current line number. */
1394 rtx line = line_note_head[b];
1397 next_tail = NEXT_INSN (tail);
1399 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1400 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1403 LINE_NOTE (insn) = line;
1406 /* After a block was scheduled, insert line notes into the insns list.
1407 HEAD and TAIL are the boundaries of the block in which notes should
1411 restore_line_notes (rtx head, rtx tail)
1413 rtx line, note, prev, new;
1414 int added_notes = 0;
1415 rtx next_tail, insn;
1418 next_tail = NEXT_INSN (tail);
1420 /* Determine the current line-number. We want to know the current
1421 line number of the first insn of the block here, in case it is
1422 different from the true line number that was saved earlier. If
1423 different, then we need a line number note before the first insn
1424 of this block. If it happens to be the same, then we don't want to
1425 emit another line number note here. */
1426 for (line = head; line; line = PREV_INSN (line))
1427 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1430 /* Walk the insns keeping track of the current line-number and inserting
1431 the line-number notes as needed. */
1432 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1433 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1435 /* This used to emit line number notes before every non-deleted note.
1436 However, this confuses a debugger, because line notes not separated
1437 by real instructions all end up at the same address. I can find no
1438 use for line number notes before other notes, so none are emitted. */
1439 else if (!NOTE_P (insn)
1440 && INSN_UID (insn) < old_max_uid
1441 && (note = LINE_NOTE (insn)) != 0
1444 #ifdef USE_MAPPED_LOCATION
1445 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1447 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1448 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1453 prev = PREV_INSN (insn);
1454 if (LINE_NOTE (note))
1456 /* Re-use the original line-number note. */
1457 LINE_NOTE (note) = 0;
1458 PREV_INSN (note) = prev;
1459 NEXT_INSN (prev) = note;
1460 PREV_INSN (insn) = note;
1461 NEXT_INSN (note) = insn;
1462 set_block_for_insn (note, BLOCK_FOR_INSN (insn));
1467 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1468 #ifndef USE_MAPPED_LOCATION
1469 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1473 if (sched_verbose && added_notes)
1474 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1477 /* After scheduling the function, delete redundant line notes from the
1481 rm_redundant_line_notes (void)
1484 rtx insn = get_insns ();
1485 int active_insn = 0;
1488 /* Walk the insns deleting redundant line-number notes. Many of these
1489 are already present. The remainder tend to occur at basic
1490 block boundaries. */
1491 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1492 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1494 /* If there are no active insns following, INSN is redundant. */
1495 if (active_insn == 0)
1498 SET_INSN_DELETED (insn);
1500 /* If the line number is unchanged, LINE is redundant. */
1502 #ifdef USE_MAPPED_LOCATION
1503 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1505 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1506 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1511 SET_INSN_DELETED (line);
1518 else if (!((NOTE_P (insn)
1519 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1520 || (NONJUMP_INSN_P (insn)
1521 && (GET_CODE (PATTERN (insn)) == USE
1522 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1525 if (sched_verbose && notes)
1526 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1529 /* Delete notes between HEAD and TAIL and put them in the chain
1530 of notes ended by NOTE_LIST. */
1533 rm_other_notes (rtx head, rtx tail)
1539 if (head == tail && (! INSN_P (head)))
1542 next_tail = NEXT_INSN (tail);
1543 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1547 /* Farm out notes, and maybe save them in NOTE_LIST.
1548 This is needed to keep the debugger from
1549 getting completely deranged. */
1550 if (NOTE_NOT_BB_P (insn))
1554 insn = unlink_other_notes (insn, next_tail);
1556 gcc_assert (prev != tail && prev != head && insn != next_tail);
1561 /* Functions for computation of registers live/usage info. */
1563 /* This function looks for a new register being defined.
1564 If the destination register is already used by the source,
1565 a new register is not needed. */
1568 find_set_reg_weight (rtx x)
1570 if (GET_CODE (x) == CLOBBER
1571 && register_operand (SET_DEST (x), VOIDmode))
1573 if (GET_CODE (x) == SET
1574 && register_operand (SET_DEST (x), VOIDmode))
1576 if (REG_P (SET_DEST (x)))
1578 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1588 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1591 find_insn_reg_weight (basic_block bb)
1593 rtx insn, next_tail, head, tail;
1595 get_ebb_head_tail (bb, bb, &head, &tail);
1596 next_tail = NEXT_INSN (tail);
1598 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1599 find_insn_reg_weight1 (insn);
1602 /* Calculate INSN_REG_WEIGHT for single insntruction.
1603 Separated from find_insn_reg_weight because of need
1604 to initialize new instruction in generate_recovery_code. */
1606 find_insn_reg_weight1 (rtx insn)
1611 /* Handle register life information. */
1612 if (! INSN_P (insn))
1615 /* Increment weight for each register born here. */
1617 reg_weight += find_set_reg_weight (x);
1618 if (GET_CODE (x) == PARALLEL)
1621 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1623 x = XVECEXP (PATTERN (insn), 0, j);
1624 reg_weight += find_set_reg_weight (x);
1627 /* Decrement weight for each register that dies here. */
1628 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1630 if (REG_NOTE_KIND (x) == REG_DEAD
1631 || REG_NOTE_KIND (x) == REG_UNUSED)
1635 INSN_REG_WEIGHT (insn) = reg_weight;
1638 /* Move insns that became ready to fire from queue to ready list. */
1641 queue_to_ready (struct ready_list *ready)
1646 q_ptr = NEXT_Q (q_ptr);
1648 /* Add all pending insns that can be scheduled without stalls to the
1650 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1652 insn = XEXP (link, 0);
1655 if (sched_verbose >= 2)
1656 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1657 (*current_sched_info->print_insn) (insn, 0));
1659 ready_add (ready, insn, false);
1660 if (sched_verbose >= 2)
1661 fprintf (sched_dump, "moving to ready without stalls\n");
1663 free_INSN_LIST_list (&insn_queue[q_ptr]);
1665 /* If there are no ready insns, stall until one is ready and add all
1666 of the pending insns at that point to the ready list. */
1667 if (ready->n_ready == 0)
1671 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1673 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1675 for (; link; link = XEXP (link, 1))
1677 insn = XEXP (link, 0);
1680 if (sched_verbose >= 2)
1681 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1682 (*current_sched_info->print_insn) (insn, 0));
1684 ready_add (ready, insn, false);
1685 if (sched_verbose >= 2)
1686 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1688 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1690 advance_one_cycle ();
1695 advance_one_cycle ();
1698 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1699 clock_var += stalls;
1703 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1704 prematurely move INSN from the queue to the ready list. Currently,
1705 if a target defines the hook 'is_costly_dependence', this function
1706 uses the hook to check whether there exist any dependences which are
1707 considered costly by the target, between INSN and other insns that
1708 have already been scheduled. Dependences are checked up to Y cycles
1709 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1710 controlling this value.
1711 (Other considerations could be taken into account instead (or in
1712 addition) depending on user flags and target hooks. */
1715 ok_for_early_queue_removal (rtx insn)
1718 rtx prev_insn = last_scheduled_insn;
1720 if (targetm.sched.is_costly_dependence)
1722 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1724 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1729 if (!NOTE_P (prev_insn))
1731 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1734 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1735 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1737 flag_sched_stalled_insns_dep - n_cycles))
1742 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1748 prev_insn = PREV_INSN (prev_insn);
1756 /* Remove insns from the queue, before they become "ready" with respect
1757 to FU latency considerations. */
1760 early_queue_to_ready (state_t state, struct ready_list *ready)
1768 state_t temp_state = alloca (dfa_state_size);
1770 int insns_removed = 0;
1773 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1776 X == 0: There is no limit on how many queued insns can be removed
1777 prematurely. (flag_sched_stalled_insns = -1).
1779 X >= 1: Only X queued insns can be removed prematurely in each
1780 invocation. (flag_sched_stalled_insns = X).
1782 Otherwise: Early queue removal is disabled.
1783 (flag_sched_stalled_insns = 0)
1786 if (! flag_sched_stalled_insns)
1789 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1791 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1793 if (sched_verbose > 6)
1794 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1799 next_link = XEXP (link, 1);
1800 insn = XEXP (link, 0);
1801 if (insn && sched_verbose > 6)
1802 print_rtl_single (sched_dump, insn);
1804 memcpy (temp_state, state, dfa_state_size);
1805 if (recog_memoized (insn) < 0)
1806 /* non-negative to indicate that it's not ready
1807 to avoid infinite Q->R->Q->R... */
1810 cost = state_transition (temp_state, insn);
1812 if (sched_verbose >= 6)
1813 fprintf (sched_dump, "transition cost = %d\n", cost);
1815 move_to_ready = false;
1818 move_to_ready = ok_for_early_queue_removal (insn);
1819 if (move_to_ready == true)
1821 /* move from Q to R */
1823 ready_add (ready, insn, false);
1826 XEXP (prev_link, 1) = next_link;
1828 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1830 free_INSN_LIST_node (link);
1832 if (sched_verbose >= 2)
1833 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1834 (*current_sched_info->print_insn) (insn, 0));
1837 if (insns_removed == flag_sched_stalled_insns)
1838 /* Remove no more than flag_sched_stalled_insns insns
1839 from Q at a time. */
1840 return insns_removed;
1844 if (move_to_ready == false)
1851 } /* for stalls.. */
1853 return insns_removed;
1857 /* Print the ready list for debugging purposes. Callable from debugger. */
1860 debug_ready_list (struct ready_list *ready)
1865 if (ready->n_ready == 0)
1867 fprintf (sched_dump, "\n");
1871 p = ready_lastpos (ready);
1872 for (i = 0; i < ready->n_ready; i++)
1873 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1874 fprintf (sched_dump, "\n");
1877 /* Search INSN for REG_SAVE_NOTE note pairs for
1878 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1879 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1880 saved value for NOTE_BLOCK_NUMBER which is useful for
1881 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1884 reemit_notes (rtx insn)
1886 rtx note, last = insn;
1888 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1890 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1892 enum insn_note note_type = INTVAL (XEXP (note, 0));
1894 last = emit_note_before (note_type, last);
1895 remove_note (insn, note);
1900 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1902 move_insn (rtx insn)
1904 rtx last = last_scheduled_insn;
1906 if (PREV_INSN (insn) != last)
1912 bb = BLOCK_FOR_INSN (insn);
1914 /* BB_HEAD is either LABEL or NOTE. */
1915 gcc_assert (BB_HEAD (bb) != insn);
1917 if (BB_END (bb) == insn)
1918 /* If this is last instruction in BB, move end marker one
1921 /* Jumps are always placed at the end of basic block. */
1922 jump_p = control_flow_insn_p (insn);
1925 || ((current_sched_info->flags & SCHED_RGN)
1926 && RECOVERY_BLOCK (insn)
1927 && RECOVERY_BLOCK (insn) != EXIT_BLOCK_PTR)
1928 || (current_sched_info->flags & SCHED_EBB));
1930 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1932 BB_END (bb) = PREV_INSN (insn);
1935 gcc_assert (BB_END (bb) != last);
1938 /* We move the block note along with jump. */
1940 /* NT is needed for assertion below. */
1941 rtx nt = current_sched_info->next_tail;
1943 note = NEXT_INSN (insn);
1944 while (NOTE_NOT_BB_P (note) && note != nt)
1945 note = NEXT_INSN (note);
1949 || BARRIER_P (note)))
1950 note = NEXT_INSN (note);
1952 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1957 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1958 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1960 NEXT_INSN (note) = NEXT_INSN (last);
1961 PREV_INSN (NEXT_INSN (last)) = note;
1963 NEXT_INSN (last) = insn;
1964 PREV_INSN (insn) = last;
1966 bb = BLOCK_FOR_INSN (last);
1970 fix_jump_move (insn);
1972 if (BLOCK_FOR_INSN (insn) != bb)
1973 move_block_after_check (insn);
1975 gcc_assert (BB_END (bb) == last);
1978 set_block_for_insn (insn, bb);
1980 /* Update BB_END, if needed. */
1981 if (BB_END (bb) == last)
1985 reemit_notes (insn);
1987 SCHED_GROUP_P (insn) = 0;
1990 /* The following structure describe an entry of the stack of choices. */
1993 /* Ordinal number of the issued insn in the ready queue. */
1995 /* The number of the rest insns whose issues we should try. */
1997 /* The number of issued essential insns. */
1999 /* State after issuing the insn. */
2003 /* The following array is used to implement a stack of choices used in
2004 function max_issue. */
2005 static struct choice_entry *choice_stack;
2007 /* The following variable value is number of essential insns issued on
2008 the current cycle. An insn is essential one if it changes the
2009 processors state. */
2010 static int cycle_issued_insns;
2012 /* The following variable value is maximal number of tries of issuing
2013 insns for the first cycle multipass insn scheduling. We define
2014 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2015 need this constraint if all real insns (with non-negative codes)
2016 had reservations because in this case the algorithm complexity is
2017 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2018 might be incomplete and such insn might occur. For such
2019 descriptions, the complexity of algorithm (without the constraint)
2020 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2021 static int max_lookahead_tries;
2023 /* The following value is value of hook
2024 `first_cycle_multipass_dfa_lookahead' at the last call of
2026 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2028 /* The following value is value of `issue_rate' at the last call of
2030 static int cached_issue_rate = 0;
2032 /* The following function returns maximal (or close to maximal) number
2033 of insns which can be issued on the same cycle and one of which
2034 insns is insns with the best rank (the first insn in READY). To
2035 make this function tries different samples of ready insns. READY
2036 is current queue `ready'. Global array READY_TRY reflects what
2037 insns are already issued in this try. MAX_POINTS is the sum of points
2038 of all instructions in READY. The function stops immediatelly,
2039 if it reached the such a solution, that all instruction can be issued.
2040 INDEX will contain index of the best insn in READY. The following
2041 function is used only for first cycle multipass scheduling. */
2043 max_issue (struct ready_list *ready, int *index, int max_points)
2045 int n, i, all, n_ready, best, delay, tries_num, points = -1;
2046 struct choice_entry *top;
2050 memcpy (choice_stack->state, curr_state, dfa_state_size);
2052 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2054 n_ready = ready->n_ready;
2055 for (all = i = 0; i < n_ready; i++)
2062 if (top->rest == 0 || i >= n_ready)
2064 if (top == choice_stack)
2066 if (best < top - choice_stack && ready_try [0])
2068 best = top - choice_stack;
2069 *index = choice_stack [1].index;
2071 if (top->n == max_points || best == all)
2077 memcpy (curr_state, top->state, dfa_state_size);
2079 else if (!ready_try [i])
2082 if (tries_num > max_lookahead_tries)
2084 insn = ready_element (ready, i);
2085 delay = state_transition (curr_state, insn);
2088 if (state_dead_lock_p (curr_state))
2093 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2094 n += ISSUE_POINTS (insn);
2096 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2099 memcpy (top->state, curr_state, dfa_state_size);
2106 while (top != choice_stack)
2108 ready_try [top->index] = 0;
2111 memcpy (curr_state, choice_stack->state, dfa_state_size);
2113 if (sched_verbose >= 4)
2114 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2115 (*current_sched_info->print_insn) (ready_element (ready, *index),
2117 points, max_points);
2122 /* The following function chooses insn from READY and modifies
2123 *N_READY and READY. The following function is used only for first
2124 cycle multipass scheduling. */
2127 choose_ready (struct ready_list *ready)
2131 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2132 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2133 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2134 return ready_remove_first (ready);
2137 /* Try to choose the better insn. */
2138 int index = 0, i, n;
2140 int more_issue, max_points, try_data = 1, try_control = 1;
2142 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2144 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2145 max_lookahead_tries = 100;
2146 for (i = 0; i < issue_rate; i++)
2147 max_lookahead_tries *= lookahead;
2149 insn = ready_element (ready, 0);
2150 if (INSN_CODE (insn) < 0)
2151 return ready_remove_first (ready);
2154 && spec_info->flags & (PREFER_NON_DATA_SPEC
2155 | PREFER_NON_CONTROL_SPEC))
2157 for (i = 0, n = ready->n_ready; i < n; i++)
2162 x = ready_element (ready, i);
2165 if (spec_info->flags & PREFER_NON_DATA_SPEC
2166 && !(s & DATA_SPEC))
2169 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2174 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2175 && !(s & CONTROL_SPEC))
2178 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2184 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2185 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2186 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2187 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2189 /* Discard speculative instruction that stands first in the ready
2192 change_queue_index (insn, 1);
2196 max_points = ISSUE_POINTS (insn);
2197 more_issue = issue_rate - cycle_issued_insns - 1;
2199 for (i = 1; i < ready->n_ready; i++)
2201 insn = ready_element (ready, i);
2203 = (INSN_CODE (insn) < 0
2204 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2205 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2206 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2207 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2210 if (!ready_try [i] && more_issue-- > 0)
2211 max_points += ISSUE_POINTS (insn);
2214 if (max_issue (ready, &index, max_points) == 0)
2215 return ready_remove_first (ready);
2217 return ready_remove (ready, index);
2221 /* Use forward list scheduling to rearrange insns of block pointed to by
2222 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2226 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2228 struct ready_list ready;
2229 int i, first_cycle_insn_p;
2231 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2232 int sort_p, advance, start_clock_var;
2234 /* Head/tail info for this block. */
2235 rtx prev_head = current_sched_info->prev_head;
2236 rtx next_tail = current_sched_info->next_tail;
2237 rtx head = NEXT_INSN (prev_head);
2238 rtx tail = PREV_INSN (next_tail);
2240 /* We used to have code to avoid getting parameters moved from hard
2241 argument registers into pseudos.
2243 However, it was removed when it proved to be of marginal benefit
2244 and caused problems because schedule_block and compute_forward_dependences
2245 had different notions of what the "head" insn was. */
2247 gcc_assert (head != tail || INSN_P (head));
2249 added_recovery_block_p = false;
2253 dump_new_block_header (0, *target_bb, head, tail);
2255 state_reset (curr_state);
2257 /* Allocate the ready list. */
2261 choice_stack = NULL;
2264 extend_ready (rgn_n_insns1 + 1);
2266 ready.first = ready.veclen - 1;
2269 /* It is used for first cycle multipass scheduling. */
2270 temp_state = alloca (dfa_state_size);
2272 if (targetm.sched.md_init)
2273 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2275 /* We start inserting insns after PREV_HEAD. */
2276 last_scheduled_insn = prev_head;
2278 gcc_assert (NOTE_P (last_scheduled_insn)
2279 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2281 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2286 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2287 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2289 /* Start just before the beginning of time. */
2292 /* We need queue and ready lists and clock_var be initialized
2293 in try_ready () (which is called through init_ready_list ()). */
2294 (*current_sched_info->init_ready_list) ();
2296 /* Now we can restore basic block notes and maintain precise cfg. */
2297 restore_bb_notes (*target_bb);
2299 last_clock_var = -1;
2304 /* Loop until all the insns in BB are scheduled. */
2305 while ((*current_sched_info->schedule_more_p) ())
2309 start_clock_var = clock_var;
2313 advance_one_cycle ();
2315 /* Add to the ready list all pending insns that can be issued now.
2316 If there are no ready insns, increment clock until one
2317 is ready and add all pending insns at that point to the ready
2319 queue_to_ready (&ready);
2321 gcc_assert (ready.n_ready);
2323 if (sched_verbose >= 2)
2325 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2326 debug_ready_list (&ready);
2328 advance -= clock_var - start_clock_var;
2330 while (advance > 0);
2334 /* Sort the ready list based on priority. */
2335 ready_sort (&ready);
2337 if (sched_verbose >= 2)
2339 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2340 debug_ready_list (&ready);
2344 /* Allow the target to reorder the list, typically for
2345 better instruction bundling. */
2346 if (sort_p && targetm.sched.reorder
2347 && (ready.n_ready == 0
2348 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2350 targetm.sched.reorder (sched_dump, sched_verbose,
2351 ready_lastpos (&ready),
2352 &ready.n_ready, clock_var);
2354 can_issue_more = issue_rate;
2356 first_cycle_insn_p = 1;
2357 cycle_issued_insns = 0;
2364 if (sched_verbose >= 2)
2366 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2368 debug_ready_list (&ready);
2371 if (ready.n_ready == 0
2373 && reload_completed)
2375 /* Allow scheduling insns directly from the queue in case
2376 there's nothing better to do (ready list is empty) but
2377 there are still vacant dispatch slots in the current cycle. */
2378 if (sched_verbose >= 6)
2379 fprintf(sched_dump,";;\t\tSecond chance\n");
2380 memcpy (temp_state, curr_state, dfa_state_size);
2381 if (early_queue_to_ready (temp_state, &ready))
2382 ready_sort (&ready);
2385 if (ready.n_ready == 0 || !can_issue_more
2386 || state_dead_lock_p (curr_state)
2387 || !(*current_sched_info->schedule_more_p) ())
2390 /* Select and remove the insn from the ready list. */
2393 insn = choose_ready (&ready);
2398 insn = ready_remove_first (&ready);
2400 if (targetm.sched.dfa_new_cycle
2401 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2402 insn, last_clock_var,
2403 clock_var, &sort_p))
2404 /* SORT_P is used by the target to override sorting
2405 of the ready list. This is needed when the target
2406 has modified its internal structures expecting that
2407 the insn will be issued next. As we need the insn
2408 to have the highest priority (so it will be returned by
2409 the ready_remove_first call above), we invoke
2410 ready_add (&ready, insn, true).
2411 But, still, there is one issue: INSN can be later
2412 discarded by scheduler's front end through
2413 current_sched_info->can_schedule_ready_p, hence, won't
2416 ready_add (&ready, insn, true);
2421 memcpy (temp_state, curr_state, dfa_state_size);
2422 if (recog_memoized (insn) < 0)
2424 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2425 || asm_noperands (PATTERN (insn)) >= 0);
2426 if (!first_cycle_insn_p && asm_p)
2427 /* This is asm insn which is tryed to be issued on the
2428 cycle not first. Issue it on the next cycle. */
2431 /* A USE insn, or something else we don't need to
2432 understand. We can't pass these directly to
2433 state_transition because it will trigger a
2434 fatal error for unrecognizable insns. */
2439 cost = state_transition (temp_state, insn);
2448 queue_insn (insn, cost);
2449 if (SCHED_GROUP_P (insn))
2458 if (current_sched_info->can_schedule_ready_p
2459 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2460 /* We normally get here only if we don't want to move
2461 insn from the split block. */
2463 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2467 /* DECISSION is made. */
2469 if (TODO_SPEC (insn) & SPECULATIVE)
2470 generate_recovery_code (insn);
2472 if (control_flow_insn_p (last_scheduled_insn)
2473 /* This is used to to switch basic blocks by request
2474 from scheduler front-end (actually, sched-ebb.c only).
2475 This is used to process blocks with single fallthru
2476 edge. If successing block has jump, it [jump] will try
2477 move at the end of current bb, thus corrupting CFG. */
2478 || current_sched_info->advance_target_bb (*target_bb, insn))
2480 *target_bb = current_sched_info->advance_target_bb
2487 x = next_real_insn (last_scheduled_insn);
2489 dump_new_block_header (1, *target_bb, x, tail);
2492 last_scheduled_insn = bb_note (*target_bb);
2495 /* Update counters, etc in the scheduler's front end. */
2496 (*current_sched_info->begin_schedule_ready) (insn,
2497 last_scheduled_insn);
2500 last_scheduled_insn = insn;
2502 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2504 cycle_issued_insns++;
2505 memcpy (curr_state, temp_state, dfa_state_size);
2508 if (targetm.sched.variable_issue)
2510 targetm.sched.variable_issue (sched_dump, sched_verbose,
2511 insn, can_issue_more);
2512 /* A naked CLOBBER or USE generates no instruction, so do
2513 not count them against the issue rate. */
2514 else if (GET_CODE (PATTERN (insn)) != USE
2515 && GET_CODE (PATTERN (insn)) != CLOBBER)
2518 advance = schedule_insn (insn);
2520 /* After issuing an asm insn we should start a new cycle. */
2521 if (advance == 0 && asm_p)
2526 first_cycle_insn_p = 0;
2528 /* Sort the ready list based on priority. This must be
2529 redone here, as schedule_insn may have readied additional
2530 insns that will not be sorted correctly. */
2531 if (ready.n_ready > 0)
2532 ready_sort (&ready);
2534 if (targetm.sched.reorder2
2535 && (ready.n_ready == 0
2536 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2539 targetm.sched.reorder2 (sched_dump, sched_verbose,
2541 ? ready_lastpos (&ready) : NULL,
2542 &ready.n_ready, clock_var);
2550 fprintf (sched_dump, ";;\tReady list (final): ");
2551 debug_ready_list (&ready);
2554 if (current_sched_info->queue_must_finish_empty)
2555 /* Sanity check -- queue must be empty now. Meaningless if region has
2557 gcc_assert (!q_size && !ready.n_ready);
2560 /* We must maintain QUEUE_INDEX between blocks in region. */
2561 for (i = ready.n_ready - 1; i >= 0; i--)
2565 x = ready_element (&ready, i);
2566 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2567 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2571 for (i = 0; i <= max_insn_queue_index; i++)
2574 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2579 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2580 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2582 free_INSN_LIST_list (&insn_queue[i]);
2586 if (!current_sched_info->queue_must_finish_empty
2587 || added_recovery_block_p)
2589 /* INSN_TICK (minimum clock tick at which the insn becomes
2590 ready) may be not correct for the insn in the subsequent
2591 blocks of the region. We should use a correct value of
2592 `clock_var' or modify INSN_TICK. It is better to keep
2593 clock_var value equal to 0 at the start of a basic block.
2594 Therefore we modify INSN_TICK here. */
2595 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2598 #ifdef ENABLE_CHECKING
2599 /* After the reload the ia64 backend doesn't maintain BB_END, so
2600 if we want to check anything, better do it now.
2601 And it already clobbered previously scheduled code. */
2602 if (reload_completed)
2603 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head)), 0);
2606 if (targetm.sched.md_finish)
2607 targetm.sched.md_finish (sched_dump, sched_verbose);
2609 /* Update head/tail boundaries. */
2610 head = NEXT_INSN (prev_head);
2611 tail = last_scheduled_insn;
2613 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2614 previously found among the insns. Insert them at the beginning
2618 basic_block head_bb = BLOCK_FOR_INSN (head);
2619 rtx note_head = note_list;
2621 while (PREV_INSN (note_head))
2623 set_block_for_insn (note_head, head_bb);
2624 note_head = PREV_INSN (note_head);
2626 /* In the above cycle we've missed this note: */
2627 set_block_for_insn (note_head, head_bb);
2629 PREV_INSN (note_head) = PREV_INSN (head);
2630 NEXT_INSN (PREV_INSN (head)) = note_head;
2631 PREV_INSN (head) = note_list;
2632 NEXT_INSN (note_list) = head;
2639 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2640 clock_var, INSN_UID (head));
2641 fprintf (sched_dump, ";; new tail = %d\n\n",
2645 current_sched_info->head = head;
2646 current_sched_info->tail = tail;
2651 for (i = 0; i <= rgn_n_insns; i++)
2652 free (choice_stack [i].state);
2653 free (choice_stack);
2656 /* Set_priorities: compute priority of each insn in the block. */
2659 set_priorities (rtx head, rtx tail)
2663 int sched_max_insns_priority =
2664 current_sched_info->sched_max_insns_priority;
2667 if (head == tail && (! INSN_P (head)))
2672 prev_head = PREV_INSN (head);
2673 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2679 (void) priority (insn);
2681 if (INSN_PRIORITY_KNOWN (insn))
2682 sched_max_insns_priority =
2683 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2686 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2691 /* Next LUID to assign to an instruction. */
2694 /* Initialize some global state for the scheduler. */
2703 /* Switch to working copy of sched_info. */
2704 memcpy (¤t_sched_info_var, current_sched_info,
2705 sizeof (current_sched_info_var));
2706 current_sched_info = ¤t_sched_info_var;
2708 /* Disable speculative loads in their presence if cc0 defined. */
2710 flag_schedule_speculative_load = 0;
2713 /* Set dump and sched_verbose for the desired debugging output. If no
2714 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2715 For -fsched-verbose=N, N>=10, print everything to stderr. */
2716 sched_verbose = sched_verbose_param;
2717 if (sched_verbose_param == 0 && dump_file)
2719 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2720 ? stderr : dump_file);
2722 /* Initialize SPEC_INFO. */
2723 if (targetm.sched.set_sched_flags)
2725 spec_info = &spec_info_var;
2726 targetm.sched.set_sched_flags (spec_info);
2727 if (current_sched_info->flags & DO_SPECULATION)
2728 spec_info->weakness_cutoff =
2729 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2731 /* So we won't read anything accidently. */
2733 #ifdef ENABLE_CHECKING
2734 check_sched_flags ();
2738 /* So we won't read anything accidently. */
2741 /* Initialize issue_rate. */
2742 if (targetm.sched.issue_rate)
2743 issue_rate = targetm.sched.issue_rate ();
2747 if (cached_issue_rate != issue_rate)
2749 cached_issue_rate = issue_rate;
2750 /* To invalidate max_lookahead_tries: */
2751 cached_first_cycle_multipass_dfa_lookahead = 0;
2758 for (i = 0; i < old_max_uid; i++)
2761 h_i_d[i].todo_spec = HARD_DEP;
2762 h_i_d[i].queue_index = QUEUE_NOWHERE;
2763 h_i_d[i].tick = INVALID_TICK;
2764 h_i_d[i].inter_tick = INVALID_TICK;
2767 if (targetm.sched.init_dfa_pre_cycle_insn)
2768 targetm.sched.init_dfa_pre_cycle_insn ();
2770 if (targetm.sched.init_dfa_post_cycle_insn)
2771 targetm.sched.init_dfa_post_cycle_insn ();
2774 dfa_state_size = state_size ();
2775 curr_state = xmalloc (dfa_state_size);
2780 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2782 INSN_LUID (insn) = luid;
2784 /* Increment the next luid, unless this is a note. We don't
2785 really need separate IDs for notes and we don't want to
2786 schedule differently depending on whether or not there are
2787 line-number notes, i.e., depending on whether or not we're
2788 generating debugging information. */
2792 if (insn == BB_END (b))
2796 init_dependency_caches (luid);
2798 init_alias_analysis ();
2801 old_last_basic_block = 0;
2806 if (current_sched_info->flags & USE_GLAT)
2809 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2810 removing death notes. */
2811 FOR_EACH_BB_REVERSE (b)
2812 find_insn_reg_weight (b);
2814 if (targetm.sched.md_init_global)
2815 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2817 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2818 before_recovery = 0;
2820 #ifdef ENABLE_CHECKING
2821 /* This is used preferably for finding bugs in check_cfg () itself. */
2826 /* Free global data used during insn scheduling. */
2834 free_dependency_caches ();
2835 end_alias_analysis ();
2836 free (line_note_head);
2839 if (targetm.sched.md_finish_global)
2840 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2842 if (spec_info && spec_info->dump)
2844 char c = reload_completed ? 'a' : 'b';
2846 fprintf (spec_info->dump,
2847 ";; %s:\n", current_function_name ());
2849 fprintf (spec_info->dump,
2850 ";; Procedure %cr-begin-data-spec motions == %d\n",
2852 fprintf (spec_info->dump,
2853 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2855 fprintf (spec_info->dump,
2856 ";; Procedure %cr-begin-control-spec motions == %d\n",
2857 c, nr_begin_control);
2858 fprintf (spec_info->dump,
2859 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2860 c, nr_be_in_control);
2863 #ifdef ENABLE_CHECKING
2864 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2865 if (!reload_completed)
2869 current_sched_info = NULL;
2872 /* Fix INSN_TICKs of the instructions in the current block as well as
2873 INSN_TICKs of their dependants.
2874 HEAD and TAIL are the begin and the end of the current scheduled block. */
2876 fix_inter_tick (rtx head, rtx tail)
2878 /* Set of instructions with corrected INSN_TICK. */
2879 bitmap_head processed;
2880 int next_clock = clock_var + 1;
2882 bitmap_initialize (&processed, 0);
2884 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2885 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2886 across different blocks. */
2887 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2894 tick = INSN_TICK (head);
2895 gcc_assert (tick >= MIN_TICK);
2897 /* Fix INSN_TICK of instruction from just scheduled block. */
2898 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2900 bitmap_set_bit (&processed, INSN_LUID (head));
2903 if (tick < MIN_TICK)
2906 INSN_TICK (head) = tick;
2909 for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2913 next = XEXP (link, 0);
2914 tick = INSN_TICK (next);
2916 if (tick != INVALID_TICK
2917 /* If NEXT has its INSN_TICK calculated, fix it.
2918 If not - it will be properly calculated from
2919 scratch later in fix_tick_ready. */
2920 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2922 bitmap_set_bit (&processed, INSN_LUID (next));
2925 if (tick < MIN_TICK)
2928 if (tick > INTER_TICK (next))
2929 INTER_TICK (next) = tick;
2931 tick = INTER_TICK (next);
2933 INSN_TICK (next) = tick;
2938 bitmap_clear (&processed);
2941 /* Check if NEXT is ready to be added to the ready or queue list.
2942 If "yes", add it to the proper list.
2944 -1 - is not ready yet,
2945 0 - added to the ready list,
2946 0 < N - queued for N cycles. */
2948 try_ready (rtx next)
2953 ts = &TODO_SPEC (next);
2956 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
2957 && ((old_ts & HARD_DEP)
2958 || (old_ts & SPECULATIVE)));
2960 if (!(current_sched_info->flags & DO_SPECULATION))
2962 if (!LOG_LINKS (next))
2967 *ts &= ~SPECULATIVE & ~HARD_DEP;
2969 link = LOG_LINKS (next);
2972 /* LOG_LINKS are maintained sorted.
2973 So if DEP_STATUS of the first dep is SPECULATIVE,
2974 than all other deps are speculative too. */
2975 if (DEP_STATUS (link) & SPECULATIVE)
2977 /* Now we've got NEXT with speculative deps only.
2978 1. Look at the deps to see what we have to do.
2979 2. Check if we can do 'todo'. */
2980 *ts = DEP_STATUS (link) & SPECULATIVE;
2981 while ((link = XEXP (link, 1)))
2982 *ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
2984 if (dep_weak (*ts) < spec_info->weakness_cutoff)
2985 /* Too few points. */
2986 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
2994 gcc_assert (*ts == old_ts
2995 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
2996 else if (current_sched_info->new_ready)
2997 *ts = current_sched_info->new_ready (next, *ts);
2999 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3000 have its original pattern or changed (speculative) one. This is due
3001 to changing ebb in region scheduling.
3002 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3003 has speculative pattern.
3005 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3006 control-speculative NEXT could have been discarded by sched-rgn.c
3007 (the same case as when discarded by can_schedule_ready_p ()). */
3009 if ((*ts & SPECULATIVE)
3010 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3011 need to change anything. */
3017 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3019 res = speculate_insn (next, *ts, &new_pat);
3024 /* It would be nice to change DEP_STATUS of all dependences,
3025 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3026 so we won't reanalyze anything. */
3027 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3031 /* We follow the rule, that every speculative insn
3032 has non-null ORIG_PAT. */
3033 if (!ORIG_PAT (next))
3034 ORIG_PAT (next) = PATTERN (next);
3038 if (!ORIG_PAT (next))
3039 /* If we gonna to overwrite the original pattern of insn,
3041 ORIG_PAT (next) = PATTERN (next);
3043 change_pattern (next, new_pat);
3051 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3052 either correct (*ts & SPECULATIVE),
3053 or we simply don't care (*ts & HARD_DEP). */
3055 gcc_assert (!ORIG_PAT (next)
3056 || !RECOVERY_BLOCK (next)
3057 || RECOVERY_BLOCK (next) == EXIT_BLOCK_PTR);
3061 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3062 control-speculative NEXT could have been discarded by sched-rgn.c
3063 (the same case as when discarded by can_schedule_ready_p ()). */
3064 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3066 change_queue_index (next, QUEUE_NOWHERE);
3069 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !RECOVERY_BLOCK (next))
3070 /* We should change pattern of every previously speculative
3071 instruction - and we determine if NEXT was speculative by using
3072 ORIG_PAT field. Except one case - simple checks have ORIG_PAT
3073 pat too, hence we also check for the RECOVERY_BLOCK. */
3075 change_pattern (next, ORIG_PAT (next));
3076 ORIG_PAT (next) = 0;
3079 if (sched_verbose >= 2)
3081 int s = TODO_SPEC (next);
3083 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3084 (*current_sched_info->print_insn) (next, 0));
3086 if (spec_info && spec_info->dump)
3089 fprintf (spec_info->dump, "; data-spec;");
3090 if (s & BEGIN_CONTROL)
3091 fprintf (spec_info->dump, "; control-spec;");
3092 if (s & BE_IN_CONTROL)
3093 fprintf (spec_info->dump, "; in-control-spec;");
3096 fprintf (sched_dump, "\n");
3099 adjust_priority (next);
3101 return fix_tick_ready (next);
3104 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3106 fix_tick_ready (rtx next)
3111 link = RESOLVED_DEPS (next);
3117 tick = INSN_TICK (next);
3118 /* if tick is not equal to INVALID_TICK, then update
3119 INSN_TICK of NEXT with the most recent resolved dependence
3120 cost. Overwise, recalculate from scratch. */
3121 full_p = tick == INVALID_TICK;
3127 pro = XEXP (link, 0);
3128 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3130 tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3134 while ((link = XEXP (link, 1)) && full_p);
3139 INSN_TICK (next) = tick;
3141 delay = tick - clock_var;
3143 delay = QUEUE_READY;
3145 change_queue_index (next, delay);
3150 /* Move NEXT to the proper queue list with (DELAY >= 1),
3151 or add it to the ready list (DELAY == QUEUE_READY),
3152 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3154 change_queue_index (rtx next, int delay)
3156 int i = QUEUE_INDEX (next);
3158 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3160 gcc_assert (i != QUEUE_SCHEDULED);
3162 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3163 || (delay < 0 && delay == i))
3164 /* We have nothing to do. */
3167 /* Remove NEXT from whereever it is now. */
3168 if (i == QUEUE_READY)
3169 ready_remove_insn (next);
3171 queue_remove (next);
3173 /* Add it to the proper place. */
3174 if (delay == QUEUE_READY)
3175 ready_add (readyp, next, false);
3176 else if (delay >= 1)
3177 queue_insn (next, delay);
3179 if (sched_verbose >= 2)
3181 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3182 (*current_sched_info->print_insn) (next, 0));
3184 if (delay == QUEUE_READY)
3185 fprintf (sched_dump, " into ready\n");
3186 else if (delay >= 1)
3187 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3189 fprintf (sched_dump, " removed from ready or queue lists\n");
3193 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3195 resolve_dep (rtx next, rtx insn)
3199 INSN_DEP_COUNT (next)--;
3201 dep = remove_list_elem (insn, &LOG_LINKS (next));
3202 XEXP (dep, 1) = RESOLVED_DEPS (next);
3203 RESOLVED_DEPS (next) = dep;
3205 gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3206 && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3209 /* Extend H_I_D data. */
3213 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3214 pseudos which do not cross calls. */
3215 int new_max_uid = get_max_uid() + 1;
3217 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3218 old_max_uid = new_max_uid;
3220 if (targetm.sched.h_i_d_extended)
3221 targetm.sched.h_i_d_extended ();
3224 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3225 N_NEW_INSNS is the number of additional elements to allocate. */
3227 extend_ready (int n_new_insns)
3231 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3232 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3234 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3235 rgn_n_insns + 1, sizeof (char));
3237 rgn_n_insns += n_new_insns;
3239 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3242 for (i = rgn_n_insns; n_new_insns--; i--)
3243 choice_stack[i].state = xmalloc (dfa_state_size);
3246 /* Extend global scheduler structures (those, that live across calls to
3247 schedule_block) to include information about just emitted INSN. */
3249 extend_global (rtx insn)
3251 gcc_assert (INSN_P (insn));
3252 /* These structures have scheduler scope. */
3256 extend_dependency_caches (1, 0);
3259 /* Extends global and local scheduler structures to include information
3260 about just emitted INSN. */
3262 extend_all (rtx insn)
3264 extend_global (insn);
3266 /* These structures have block scope. */
3269 (*current_sched_info->add_remove_insn) (insn, 0);
3272 /* Initialize h_i_d entry of the new INSN with default values.
3273 Values, that are not explicitly initialized here, hold zero. */
3275 init_h_i_d (rtx insn)
3277 INSN_LUID (insn) = luid++;
3278 INSN_COST (insn) = -1;
3279 TODO_SPEC (insn) = HARD_DEP;
3280 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3281 INSN_TICK (insn) = INVALID_TICK;
3282 INTER_TICK (insn) = INVALID_TICK;
3283 find_insn_reg_weight1 (insn);
3286 /* Generates recovery code for INSN. */
3288 generate_recovery_code (rtx insn)
3290 if (TODO_SPEC (insn) & BEGIN_SPEC)
3291 begin_speculative_block (insn);
3293 /* Here we have insn with no dependencies to
3294 instructions other then CHECK_SPEC ones. */
3296 if (TODO_SPEC (insn) & BE_IN_SPEC)
3297 add_to_speculative_block (insn);
3301 Tries to add speculative dependencies of type FS between instructions
3302 in LINK list and TWIN. */
3304 process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3306 for (; link; link = XEXP (link, 1))
3311 consumer = XEXP (link, 0);
3313 ds = DEP_STATUS (link);
3315 if (/* If we want to create speculative dep. */
3317 /* And we can do that because this is a true dep. */
3318 && (ds & DEP_TYPES) == DEP_TRUE)
3320 gcc_assert (!(ds & BE_IN_SPEC));
3322 if (/* If this dep can be overcomed with 'begin speculation'. */
3324 /* Then we have a choice: keep the dep 'begin speculative'
3325 or transform it into 'be in speculative'. */
3327 if (/* In try_ready we assert that if insn once became ready
3328 it can be removed from the ready (or queue) list only
3329 due to backend decision. Hence we can't let the
3330 probability of the speculative dep to decrease. */
3331 dep_weak (ds) <= dep_weak (fs))
3332 /* Transform it to be in speculative. */
3333 ds = (ds & ~BEGIN_SPEC) | fs;
3336 /* Mark the dep as 'be in speculative'. */
3340 add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3344 /* Generates recovery code for BEGIN speculative INSN. */
3346 begin_speculative_block (rtx insn)
3348 if (TODO_SPEC (insn) & BEGIN_DATA)
3350 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3353 create_check_block_twin (insn, false);
3355 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3358 /* Generates recovery code for BE_IN speculative INSN. */
3360 add_to_speculative_block (rtx insn)
3363 rtx link, twins = NULL;
3365 ts = TODO_SPEC (insn);
3366 gcc_assert (!(ts & ~BE_IN_SPEC));
3368 if (ts & BE_IN_DATA)
3370 if (ts & BE_IN_CONTROL)
3373 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3374 gcc_assert (!TODO_SPEC (insn));
3376 DONE_SPEC (insn) |= ts;
3378 /* First we convert all simple checks to branchy. */
3379 for (link = LOG_LINKS (insn); link;)
3383 check = XEXP (link, 0);
3385 if (RECOVERY_BLOCK (check))
3387 create_check_block_twin (check, true);
3388 link = LOG_LINKS (insn);
3391 link = XEXP (link, 1);
3394 clear_priorities (insn);
3398 rtx link, check, twin;
3401 link = LOG_LINKS (insn);
3402 gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3403 && (DEP_STATUS (link) & BE_IN_SPEC)
3404 && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3406 check = XEXP (link, 0);
3407 gcc_assert (!RECOVERY_BLOCK (check) && !ORIG_PAT (check)
3408 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3410 rec = BLOCK_FOR_INSN (check);
3412 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3413 extend_global (twin);
3415 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3417 if (sched_verbose && spec_info->dump)
3418 /* INSN_BB (insn) isn't determined for twin insns yet.
3419 So we can't use current_sched_info->print_insn. */
3420 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3421 INSN_UID (twin), rec->index);
3423 twins = alloc_INSN_LIST (twin, twins);
3425 /* Add dependences between TWIN and all apropriate
3426 instructions from REC. */
3429 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3433 link = XEXP (link, 1);
3436 check = XEXP (link, 0);
3437 if (BLOCK_FOR_INSN (check) == rec)
3447 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3449 for (link = LOG_LINKS (insn); link;)
3451 check = XEXP (link, 0);
3453 if (BLOCK_FOR_INSN (check) == rec)
3455 delete_back_forw_dep (insn, check);
3456 link = LOG_LINKS (insn);
3459 link = XEXP (link, 1);
3462 while (LOG_LINKS (insn));
3464 /* We can't add the dependence between insn and twin earlier because
3465 that would make twin appear in the INSN_DEPEND (insn). */
3470 twin = XEXP (twins, 0);
3471 calc_priorities (twin);
3472 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3474 twin = XEXP (twins, 1);
3475 free_INSN_LIST_node (twins);
3480 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3482 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3484 gcc_assert (new_nmemb >= old_nmemb);
3485 p = XRESIZEVAR (void, p, new_nmemb * size);
3486 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3490 /* Return the probability of speculation success for the speculation
3498 dt = FIRST_SPEC_TYPE;
3503 res *= (ds_t) get_dep_weak (ds, dt);
3507 if (dt == LAST_SPEC_TYPE)
3509 dt <<= SPEC_TYPE_SHIFT;
3515 res /= MAX_DEP_WEAK;
3517 if (res < MIN_DEP_WEAK)
3520 gcc_assert (res <= MAX_DEP_WEAK);
3526 Find fallthru edge from PRED. */
3528 find_fallthru_edge (basic_block pred)
3534 succ = pred->next_bb;
3535 gcc_assert (succ->prev_bb == pred);
3537 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3539 FOR_EACH_EDGE (e, ei, pred->succs)
3540 if (e->flags & EDGE_FALLTHRU)
3542 gcc_assert (e->dest == succ);
3548 FOR_EACH_EDGE (e, ei, succ->preds)
3549 if (e->flags & EDGE_FALLTHRU)
3551 gcc_assert (e->src == pred);
3559 /* Initialize BEFORE_RECOVERY variable. */
3561 init_before_recovery (void)
3566 last = EXIT_BLOCK_PTR->prev_bb;
3567 e = find_fallthru_edge (last);
3571 /* We create two basic blocks:
3572 1. Single instruction block is inserted right after E->SRC
3574 2. Empty block right before EXIT_BLOCK.
3575 Between these two blocks recovery blocks will be emitted. */
3577 basic_block single, empty;
3580 single = create_empty_bb (last);
3581 empty = create_empty_bb (single);
3583 single->count = last->count;
3584 empty->count = last->count;
3585 single->frequency = last->frequency;
3586 empty->frequency = last->frequency;
3587 BB_COPY_PARTITION (single, last);
3588 BB_COPY_PARTITION (empty, last);
3590 redirect_edge_succ (e, single);
3591 make_single_succ_edge (single, empty, 0);
3592 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3593 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3595 label = block_label (empty);
3596 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3597 JUMP_LABEL (x) = label;
3598 LABEL_NUSES (label)++;
3601 emit_barrier_after (x);
3603 add_block (empty, 0);
3604 add_block (single, 0);
3606 before_recovery = single;
3608 if (sched_verbose >= 2 && spec_info->dump)
3609 fprintf (spec_info->dump,
3610 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3611 last->index, single->index, empty->index);
3614 before_recovery = last;
3617 /* Returns new recovery block. */
3619 create_recovery_block (void)
3624 added_recovery_block_p = true;
3626 if (!before_recovery)
3627 init_before_recovery ();
3629 label = gen_label_rtx ();
3630 gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery))));
3631 label = emit_label_after (label, NEXT_INSN (BB_END (before_recovery)));
3633 rec = create_basic_block (label, label, before_recovery);
3634 emit_barrier_after (BB_END (rec));
3636 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3637 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3639 if (sched_verbose && spec_info->dump)
3640 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3643 before_recovery = rec;
3648 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3649 INSN is a simple check, that should be converted to branchy one. */
3651 create_check_block_twin (rtx insn, bool mutate_p)
3654 rtx label, check, twin, link;
3657 gcc_assert (ORIG_PAT (insn)
3659 || (RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR
3660 && !(TODO_SPEC (insn) & SPECULATIVE))));
3662 /* Create recovery block. */
3663 if (mutate_p || targetm.sched.needs_block_p (insn))
3665 rec = create_recovery_block ();
3666 label = BB_HEAD (rec);
3670 rec = EXIT_BLOCK_PTR;
3675 check = targetm.sched.gen_check (insn, label, mutate_p);
3677 if (rec != EXIT_BLOCK_PTR)
3679 /* To have mem_reg alive at the beginning of second_bb,
3680 we emit check BEFORE insn, so insn after splitting
3681 insn will be at the beginning of second_bb, which will
3682 provide us with the correct life information. */
3683 check = emit_jump_insn_before (check, insn);
3684 JUMP_LABEL (check) = label;
3685 LABEL_NUSES (label)++;
3688 check = emit_insn_before (check, insn);
3690 /* Extend data structures. */
3692 RECOVERY_BLOCK (check) = rec;
3694 if (sched_verbose && spec_info->dump)
3695 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3696 (*current_sched_info->print_insn) (check, 0));
3698 gcc_assert (ORIG_PAT (insn));
3700 /* Initialize TWIN (twin is a dublicate of original instruction
3701 in the recovery block). */
3702 if (rec != EXIT_BLOCK_PTR)
3706 for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3707 if (DEP_STATUS (link) & DEP_OUTPUT)
3709 RESOLVED_DEPS (check) =
3710 alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3711 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3714 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3715 extend_global (twin);
3717 if (sched_verbose && spec_info->dump)
3718 /* INSN_BB (insn) isn't determined for twin insns yet.
3719 So we can't use current_sched_info->print_insn. */
3720 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3721 INSN_UID (twin), rec->index);
3725 ORIG_PAT (check) = ORIG_PAT (insn);
3726 HAS_INTERNAL_DEP (check) = 1;
3728 /* ??? We probably should change all OUTPUT dependencies to
3732 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3734 if (rec != EXIT_BLOCK_PTR)
3735 /* In case of branchy check, fix CFG. */
3737 basic_block first_bb, second_bb;
3742 first_bb = BLOCK_FOR_INSN (check);
3743 e = split_block (first_bb, check);
3744 /* split_block emits note if *check == BB_END. Probably it
3745 is better to rip that note off. */
3746 gcc_assert (e->src == first_bb);
3747 second_bb = e->dest;
3749 /* This is fixing of incoming edge. */
3750 /* ??? Which other flags should be specified? */
3751 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3752 /* Partition type is the same, if it is "unpartitioned". */
3753 edge_flags = EDGE_CROSSING;
3757 e = make_edge (first_bb, rec, edge_flags);
3759 add_block (second_bb, first_bb);
3761 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3762 label = block_label (second_bb);
3763 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3764 JUMP_LABEL (jump) = label;
3765 LABEL_NUSES (label)++;
3766 extend_global (jump);
3768 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3769 /* Partition type is the same, if it is "unpartitioned". */
3771 /* Rewritten from cfgrtl.c. */
3772 if (flag_reorder_blocks_and_partition
3773 && targetm.have_named_sections
3774 /*&& !any_condjump_p (jump)*/)
3775 /* any_condjump_p (jump) == false.
3776 We don't need the same note for the check because
3777 any_condjump_p (check) == true. */
3779 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3783 edge_flags = EDGE_CROSSING;
3788 make_single_succ_edge (rec, second_bb, edge_flags);
3790 add_block (rec, EXIT_BLOCK_PTR);
3793 /* Move backward dependences from INSN to CHECK and
3794 move forward dependences from INSN to TWIN. */
3795 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3799 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3800 check --TRUE--> producer ??? or ANTI ???
3801 twin --TRUE--> producer
3802 twin --ANTI--> check
3804 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3805 check --ANTI--> producer
3806 twin --ANTI--> producer
3807 twin --ANTI--> check
3809 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3810 check ~~TRUE~~> producer
3811 twin ~~TRUE~~> producer
3812 twin --ANTI--> check */
3814 ds = DEP_STATUS (link);
3816 if (ds & BEGIN_SPEC)
3818 gcc_assert (!mutate_p);
3822 if (rec != EXIT_BLOCK_PTR)
3824 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3825 add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3828 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3831 for (link = LOG_LINKS (insn); link;)
3832 if ((DEP_STATUS (link) & BEGIN_SPEC)
3834 /* We can delete this dep only if we totally overcome it with
3835 BEGIN_SPECULATION. */
3837 delete_back_forw_dep (insn, XEXP (link, 0));
3838 link = LOG_LINKS (insn);
3841 link = XEXP (link, 1);
3845 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3848 gcc_assert (!DONE_SPEC (insn));
3852 ds_t ts = TODO_SPEC (insn);
3854 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3855 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3857 if (ts & BEGIN_DATA)
3858 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3859 if (ts & BEGIN_CONTROL)
3860 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3863 CHECK_SPEC (check) = CHECK_SPEC (insn);
3865 /* Future speculations: call the helper. */
3866 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3868 if (rec != EXIT_BLOCK_PTR)
3870 /* Which types of dependencies should we use here is,
3871 generally, machine-dependent question... But, for now,
3876 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3877 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3881 if (spec_info->dump)
3882 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3883 (*current_sched_info->print_insn) (insn, 0));
3885 for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3886 delete_back_forw_dep (XEXP (link, 0), insn);
3888 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3891 sched_remove_insn (insn);
3894 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3897 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3900 /* Fix priorities. If MUTATE_P is nonzero, this is not neccessary,
3901 because it'll be done later in add_to_speculative_block. */
3903 clear_priorities (twin);
3904 calc_priorities (twin);
3908 /* Removes dependency between instructions in the recovery block REC
3909 and usual region instructions. It keeps inner dependences so it
3910 won't be neccessary to recompute them. */
3912 fix_recovery_deps (basic_block rec)
3914 rtx note, insn, link, jump, ready_list = 0;
3915 bitmap_head in_ready;
3917 bitmap_initialize (&in_ready, 0);
3919 /* NOTE - a basic block note. */
3920 note = NEXT_INSN (BB_HEAD (rec));
3921 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3922 insn = BB_END (rec);
3923 gcc_assert (JUMP_P (insn));
3924 insn = PREV_INSN (insn);
3928 for (link = INSN_DEPEND (insn); link;)
3932 consumer = XEXP (link, 0);
3934 if (BLOCK_FOR_INSN (consumer) != rec)
3936 delete_back_forw_dep (consumer, insn);
3938 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3940 ready_list = alloc_INSN_LIST (consumer, ready_list);
3941 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3944 link = INSN_DEPEND (insn);
3948 gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3950 link = XEXP (link, 1);
3954 insn = PREV_INSN (insn);
3956 while (insn != note);
3958 bitmap_clear (&in_ready);
3960 /* Try to add instructions to the ready or queue list. */
3961 for (link = ready_list; link; link = XEXP (link, 1))
3962 try_ready (XEXP (link, 0));
3963 free_INSN_LIST_list (&ready_list);
3965 /* Fixing jump's dependences. */
3966 insn = BB_HEAD (rec);
3967 jump = BB_END (rec);
3969 gcc_assert (LABEL_P (insn));
3970 insn = NEXT_INSN (insn);
3972 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
3973 add_jump_dependencies (insn, jump);
3976 /* The function saves line notes at the beginning of block B. */
3978 associate_line_notes_with_blocks (basic_block b)
3982 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
3983 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
3985 line_note_head[b->index] = line;
3988 /* Do a forward search as well, since we won't get to see the first
3989 notes in a basic block. */
3990 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
3994 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
3995 line_note_head[b->index] = line;
3999 /* Changes pattern of the INSN to NEW_PAT. */
4001 change_pattern (rtx insn, rtx new_pat)
4005 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4007 /* Invalidate INSN_COST, so it'll be recalculated. */
4008 INSN_COST (insn) = -1;
4009 /* Invalidate INSN_TICK, so it'll be recalculated. */
4010 INSN_TICK (insn) = INVALID_TICK;
4011 dfa_clear_single_insn_cache (insn);
4015 /* -1 - can't speculate,
4016 0 - for speculation with REQUEST mode it is OK to use
4017 current instruction pattern,
4018 1 - need to change pattern for *NEW_PAT to be speculative. */
4020 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4022 gcc_assert (current_sched_info->flags & DO_SPECULATION
4023 && (request & SPECULATIVE));
4025 if (!NONJUMP_INSN_P (insn)
4026 || HAS_INTERNAL_DEP (insn)
4027 || SCHED_GROUP_P (insn)
4028 || side_effects_p (PATTERN (insn))
4029 || (request & spec_info->mask) != request)
4032 gcc_assert (!RECOVERY_BLOCK (insn));
4034 if (request & BE_IN_SPEC)
4036 if (may_trap_p (PATTERN (insn)))
4039 if (!(request & BEGIN_SPEC))
4043 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4046 /* Print some information about block BB, which starts with HEAD and
4047 ends with TAIL, before scheduling it.
4048 I is zero, if scheduler is about to start with the fresh ebb. */
4050 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4053 fprintf (sched_dump,
4054 ";; ======================================================\n");
4056 fprintf (sched_dump,
4057 ";; =====================ADVANCING TO=====================\n");
4058 fprintf (sched_dump,
4059 ";; -- basic block %d from %d to %d -- %s reload\n",
4060 bb->index, INSN_UID (head), INSN_UID (tail),
4061 (reload_completed ? "after" : "before"));
4062 fprintf (sched_dump,
4063 ";; ======================================================\n");
4064 fprintf (sched_dump, "\n");
4067 /* Unlink basic block notes and labels and saves them, so they
4068 can be easily restored. We unlink basic block notes in EBB to
4069 provide back-compatability with the previous code, as target backends
4070 assume, that there'll be only instructions between
4071 current_sched_info->{head and tail}. We restore these notes as soon
4073 FIRST (LAST) is the first (last) basic block in the ebb.
4074 NB: In usual case (FIRST == LAST) nothing is really done. */
4076 unlink_bb_notes (basic_block first, basic_block last)
4078 /* We DON'T unlink basic block notes of the first block in the ebb. */
4082 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4084 /* Make a sentinel. */
4085 if (last->next_bb != EXIT_BLOCK_PTR)
4086 bb_header[last->next_bb->index] = 0;
4088 first = first->next_bb;
4091 rtx prev, label, note, next;
4093 label = BB_HEAD (last);
4094 if (LABEL_P (label))
4095 note = NEXT_INSN (label);
4098 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4100 prev = PREV_INSN (label);
4101 next = NEXT_INSN (note);
4102 gcc_assert (prev && next);
4104 NEXT_INSN (prev) = next;
4105 PREV_INSN (next) = prev;
4107 bb_header[last->index] = label;
4112 last = last->prev_bb;
4117 /* Restore basic block notes.
4118 FIRST is the first basic block in the ebb. */
4120 restore_bb_notes (basic_block first)
4125 /* We DON'T unlink basic block notes of the first block in the ebb. */
4126 first = first->next_bb;
4127 /* Remember: FIRST is actually a second basic block in the ebb. */
4129 while (first != EXIT_BLOCK_PTR
4130 && bb_header[first->index])
4132 rtx prev, label, note, next;
4134 label = bb_header[first->index];
4135 prev = PREV_INSN (label);
4136 next = NEXT_INSN (prev);
4138 if (LABEL_P (label))
4139 note = NEXT_INSN (label);
4142 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4144 bb_header[first->index] = 0;
4146 NEXT_INSN (prev) = label;
4147 NEXT_INSN (note) = next;
4148 PREV_INSN (next) = note;
4150 first = first->next_bb;
4157 /* Extend per basic block data structures of the scheduler.
4158 If BB is NULL, initialize structures for the whole CFG.
4159 Otherwise, initialize them for the just created BB. */
4161 extend_bb (basic_block bb)
4165 if (write_symbols != NO_DEBUG)
4167 /* Save-line-note-head:
4168 Determine the line-number at the start of each basic block.
4169 This must be computed and saved now, because after a basic block's
4170 predecessor has been scheduled, it is impossible to accurately
4171 determine the correct line number for the first insn of the block. */
4172 line_note_head = xrecalloc (line_note_head, last_basic_block,
4173 old_last_basic_block,
4174 sizeof (*line_note_head));
4177 associate_line_notes_with_blocks (bb);
4180 associate_line_notes_with_blocks (bb);
4183 old_last_basic_block = last_basic_block;
4185 if (current_sched_info->flags & USE_GLAT)
4187 glat_start = xrealloc (glat_start,
4188 last_basic_block * sizeof (*glat_start));
4189 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4192 /* The following is done to keep current_sched_info->next_tail non null. */
4194 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4195 if (NEXT_INSN (insn) == 0
4198 /* Don't emit a NOTE if it would end up before a BARRIER. */
4199 && !BARRIER_P (NEXT_INSN (insn))))
4201 emit_note_after (NOTE_INSN_DELETED, insn);
4202 /* Make insn to appear outside BB. */
4203 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4207 /* Add a basic block BB to extended basic block EBB.
4208 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4209 If EBB is NULL, then BB should be a new region. */
4211 add_block (basic_block bb, basic_block ebb)
4213 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4214 && bb->il.rtl->global_live_at_start == 0
4215 && bb->il.rtl->global_live_at_end == 0);
4219 glat_start[bb->index] = 0;
4220 glat_end[bb->index] = 0;
4222 if (current_sched_info->add_block)
4223 /* This changes only data structures of the front-end. */
4224 current_sched_info->add_block (bb, ebb);
4228 Fix CFG after both in- and inter-block movement of
4229 control_flow_insn_p JUMP. */
4231 fix_jump_move (rtx jump)
4233 basic_block bb, jump_bb, jump_bb_next;
4235 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4236 jump_bb = BLOCK_FOR_INSN (jump);
4237 jump_bb_next = jump_bb->next_bb;
4239 gcc_assert (current_sched_info->flags & SCHED_EBB
4240 || (RECOVERY_BLOCK (jump)
4241 && RECOVERY_BLOCK (jump) != EXIT_BLOCK_PTR));
4243 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4244 /* if jump_bb_next is not empty. */
4245 BB_END (jump_bb) = BB_END (jump_bb_next);
4247 if (BB_END (bb) != PREV_INSN (jump))
4248 /* Then there are instruction after jump that should be placed
4250 BB_END (jump_bb_next) = BB_END (bb);
4252 /* Otherwise jump_bb_next is empty. */
4253 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4255 /* To make assertion in move_insn happy. */
4256 BB_END (bb) = PREV_INSN (jump);
4258 update_bb_for_insn (jump_bb_next);
4261 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4263 move_block_after_check (rtx jump)
4265 basic_block bb, jump_bb, jump_bb_next;
4268 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4269 jump_bb = BLOCK_FOR_INSN (jump);
4270 jump_bb_next = jump_bb->next_bb;
4272 update_bb_for_insn (jump_bb);
4274 gcc_assert (RECOVERY_BLOCK (jump)
4275 || RECOVERY_BLOCK (BB_END (jump_bb_next)));
4277 unlink_block (jump_bb_next);
4278 link_block (jump_bb_next, bb);
4282 move_succs (&(jump_bb->succs), bb);
4283 move_succs (&(jump_bb_next->succs), jump_bb);
4284 move_succs (&t, jump_bb_next);
4286 if (current_sched_info->fix_recovery_cfg)
4287 current_sched_info->fix_recovery_cfg
4288 (bb->index, jump_bb->index, jump_bb_next->index);
4291 /* Helper function for move_block_after_check.
4292 This functions attaches edge vector pointed to by SUCCSP to
4295 move_succs (VEC(edge,gc) **succsp, basic_block to)
4300 gcc_assert (to->succs == 0);
4302 to->succs = *succsp;
4304 FOR_EACH_EDGE (e, ei, to->succs)
4310 /* Initialize GLAT (global_live_at_{start, end}) structures.
4311 GLAT structures are used to substitute global_live_{start, end}
4312 regsets during scheduling. This is neccessary to use such functions as
4313 split_block (), as they assume consistancy of register live information. */
4323 /* Helper function for init_glat. */
4325 init_glat1 (basic_block bb)
4327 gcc_assert (bb->il.rtl->global_live_at_start != 0
4328 && bb->il.rtl->global_live_at_end != 0);
4330 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4331 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4333 if (current_sched_info->flags & DETACH_LIFE_INFO)
4335 bb->il.rtl->global_live_at_start = 0;
4336 bb->il.rtl->global_live_at_end = 0;
4340 /* Attach reg_live_info back to basic blocks.
4341 Also save regsets, that should not have been changed during scheduling,
4342 for checking purposes (see check_reg_live). */
4344 attach_life_info (void)
4349 attach_life_info1 (bb);
4352 /* Helper function for attach_life_info. */
4354 attach_life_info1 (basic_block bb)
4356 gcc_assert (bb->il.rtl->global_live_at_start == 0
4357 && bb->il.rtl->global_live_at_end == 0);
4359 if (glat_start[bb->index])
4361 gcc_assert (glat_end[bb->index]);
4363 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4364 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4366 /* Make them NULL, so they won't be freed in free_glat. */
4367 glat_start[bb->index] = 0;
4368 glat_end[bb->index] = 0;
4370 #ifdef ENABLE_CHECKING
4371 if (bb->index < NUM_FIXED_BLOCKS
4372 || current_sched_info->region_head_or_leaf_p (bb, 0))
4374 glat_start[bb->index] = ALLOC_REG_SET (®_obstack);
4375 COPY_REG_SET (glat_start[bb->index],
4376 bb->il.rtl->global_live_at_start);
4379 if (bb->index < NUM_FIXED_BLOCKS
4380 || current_sched_info->region_head_or_leaf_p (bb, 1))
4382 glat_end[bb->index] = ALLOC_REG_SET (®_obstack);
4383 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4389 gcc_assert (!glat_end[bb->index]);
4391 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (®_obstack);
4392 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (®_obstack);
4396 /* Free GLAT information. */
4400 #ifdef ENABLE_CHECKING
4401 if (current_sched_info->flags & DETACH_LIFE_INFO)
4407 if (glat_start[bb->index])
4408 FREE_REG_SET (glat_start[bb->index]);
4409 if (glat_end[bb->index])
4410 FREE_REG_SET (glat_end[bb->index]);
4419 /* Remove INSN from the instruction stream.
4420 INSN should have any dependencies. */
4422 sched_remove_insn (rtx insn)
4424 change_queue_index (insn, QUEUE_NOWHERE);
4425 current_sched_info->add_remove_insn (insn, 1);
4429 /* Clear priorities of all instructions, that are
4430 forward dependent on INSN. */
4432 clear_priorities (rtx insn)
4436 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4440 pro = XEXP (link, 0);
4441 if (INSN_PRIORITY_KNOWN (pro))
4443 INSN_PRIORITY_KNOWN (pro) = 0;
4444 clear_priorities (pro);
4449 /* Recompute priorities of instructions, whose priorities might have been
4450 changed due to changes in INSN. */
4452 calc_priorities (rtx insn)
4456 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4460 pro = XEXP (link, 0);
4461 if (!INSN_PRIORITY_KNOWN (pro))
4464 calc_priorities (pro);
4470 /* Add dependences between JUMP and other instructions in the recovery
4471 block. INSN is the first insn the recovery block. */
4473 add_jump_dependencies (rtx insn, rtx jump)
4477 insn = NEXT_INSN (insn);
4481 if (!INSN_DEPEND (insn))
4482 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4485 gcc_assert (LOG_LINKS (jump));
4488 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4490 bb_note (basic_block bb)
4494 note = BB_HEAD (bb);
4496 note = NEXT_INSN (note);
4498 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4502 #ifdef ENABLE_CHECKING
4503 extern void debug_spec_status (ds_t);
4505 /* Dump information about the dependence status S. */
4507 debug_spec_status (ds_t s)
4512 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4514 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4515 if (s & BEGIN_CONTROL)
4516 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4517 if (s & BE_IN_CONTROL)
4518 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4521 fprintf (f, "HARD_DEP; ");
4524 fprintf (f, "DEP_TRUE; ");
4526 fprintf (f, "DEP_ANTI; ");
4528 fprintf (f, "DEP_OUTPUT; ");
4533 /* Helper function for check_cfg.
4534 Return non-zero, if edge vector pointed to by EL has edge with TYPE in
4537 has_edge_p (VEC(edge,gc) *el, int type)
4542 FOR_EACH_EDGE (e, ei, el)
4543 if (e->flags & type)
4548 /* Check few properties of CFG between HEAD and TAIL.
4549 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4550 instruction stream. */
4552 check_cfg (rtx head, rtx tail)
4556 int not_first = 0, not_last;
4559 head = get_insns ();
4561 tail = get_last_insn ();
4562 next_tail = NEXT_INSN (tail);
4566 not_last = head != tail;
4569 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4571 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4574 || (NOTE_INSN_BASIC_BLOCK_P (head)
4576 || (not_first && !LABEL_P (PREV_INSN (head))))))
4578 gcc_assert (bb == 0);
4579 bb = BLOCK_FOR_INSN (head);
4581 gcc_assert (BB_HEAD (bb) == head);
4583 /* This is the case of jump table. See inside_basic_block_p (). */
4584 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4589 gcc_assert (!inside_basic_block_p (head));
4590 head = NEXT_INSN (head);
4594 gcc_assert (inside_basic_block_p (head)
4596 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4600 head = NEXT_INSN (head);
4601 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4605 if (control_flow_insn_p (head))
4607 gcc_assert (BB_END (bb) == head);
4609 if (any_uncondjump_p (head))
4610 gcc_assert (EDGE_COUNT (bb->succs) == 1
4611 && BARRIER_P (NEXT_INSN (head)));
4612 else if (any_condjump_p (head))
4613 gcc_assert (EDGE_COUNT (bb->succs) > 1
4614 && !BARRIER_P (NEXT_INSN (head)));
4616 if (BB_END (bb) == head)
4618 if (EDGE_COUNT (bb->succs) > 1)
4619 gcc_assert (control_flow_insn_p (head)
4620 || has_edge_p (bb->succs, EDGE_COMPLEX));
4624 head = NEXT_INSN (head);
4630 while (head != next_tail);
4632 gcc_assert (bb == 0);
4635 /* Perform few consistancy checks of flags in different data structures. */
4637 check_sched_flags (void)
4639 unsigned int f = current_sched_info->flags;
4641 if (flag_sched_stalled_insns)
4642 gcc_assert (!(f & DO_SPECULATION));
4643 if (f & DO_SPECULATION)
4644 gcc_assert (!flag_sched_stalled_insns
4645 && (f & DETACH_LIFE_INFO)
4647 && spec_info->mask);
4648 if (f & DETACH_LIFE_INFO)
4649 gcc_assert (f & USE_GLAT);
4652 /* Check global_live_at_{start, end} regsets.
4653 If FATAL_P is TRUE, then abort execution at the first failure.
4654 Overwise, print diagnostics to STDERR (this mode is for calling
4657 check_reg_live (bool fatal_p)
4669 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4674 gcc_assert (!fatal_p);
4676 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4682 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4687 gcc_assert (!fatal_p);
4689 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4694 #endif /* ENABLE_CHECKING */
4696 #endif /* INSN_SCHEDULING */