1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
143 #include "sched-int.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param = 0;
166 int sched_verbose = 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump = 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
179 fix_sched_param (const char *param, const char *val)
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
184 warning (0, "fix_sched_param: unknown param: %s", param);
187 struct haifa_insn_data *h_i_d;
189 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
190 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
191 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
193 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
194 then it should be recalculated from scratch. */
195 #define INVALID_TICK (-(max_insn_queue_index + 1))
196 /* The minimal value of the INSN_TICK of an instruction. */
197 #define MIN_TICK (-max_insn_queue_index)
199 /* Issue points are used to distinguish between instructions in max_issue ().
200 For now, all instructions are equally good. */
201 #define ISSUE_POINTS(INSN) 1
203 /* Vector indexed by basic block number giving the starting line-number
204 for each basic block. */
205 static rtx *line_note_head;
207 /* List of important notes we must keep around. This is a pointer to the
208 last element in the list. */
209 static rtx note_list;
211 static struct spec_info_def spec_info_var;
212 /* Description of the speculative part of the scheduling.
213 If NULL - no speculation. */
214 static spec_info_t spec_info;
216 /* True, if recovery block was added during scheduling of current block.
217 Used to determine, if we need to fix INSN_TICKs. */
218 static bool added_recovery_block_p;
220 /* Counters of different types of speculative instructions. */
221 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
223 /* Pointers to GLAT data. See init_glat for more information. */
224 regset *glat_start, *glat_end;
226 /* Array used in {unlink, restore}_bb_notes. */
227 static rtx *bb_header = 0;
229 /* Number of basic_blocks. */
230 static int old_last_basic_block;
232 /* Basic block after which recovery blocks will be created. */
233 static basic_block before_recovery;
237 /* An instruction is ready to be scheduled when all insns preceding it
238 have already been scheduled. It is important to ensure that all
239 insns which use its result will not be executed until its result
240 has been computed. An insn is maintained in one of four structures:
242 (P) the "Pending" set of insns which cannot be scheduled until
243 their dependencies have been satisfied.
244 (Q) the "Queued" set of insns that can be scheduled when sufficient
246 (R) the "Ready" list of unscheduled, uncommitted insns.
247 (S) the "Scheduled" list of insns.
249 Initially, all insns are either "Pending" or "Ready" depending on
250 whether their dependencies are satisfied.
252 Insns move from the "Ready" list to the "Scheduled" list as they
253 are committed to the schedule. As this occurs, the insns in the
254 "Pending" list have their dependencies satisfied and move to either
255 the "Ready" list or the "Queued" set depending on whether
256 sufficient time has passed to make them ready. As time passes,
257 insns move from the "Queued" set to the "Ready" list.
259 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260 insns, i.e., those that are ready, queued, and pending.
261 The "Queued" set (Q) is implemented by the variable `insn_queue'.
262 The "Ready" list (R) is implemented by the variables `ready' and
264 The "Scheduled" list (S) is the new insn chain built by this pass.
266 The transition (R->S) is implemented in the scheduling loop in
267 `schedule_block' when the best insn to schedule is chosen.
268 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269 insns move from the ready list to the scheduled list.
270 The transition (Q->R) is implemented in 'queue_to_insn' as time
271 passes or stalls are introduced. */
273 /* Implement a circular buffer to delay instructions until sufficient
274 time has passed. For the new pipeline description interface,
275 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
276 than maximal time of instruction execution computed by genattr.c on
277 the base maximal time of functional unit reservations and getting a
278 result. This is the longest time an insn may be queued. */
280 static rtx *insn_queue;
281 static int q_ptr = 0;
282 static int q_size = 0;
283 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
286 #define QUEUE_SCHEDULED (-3)
287 #define QUEUE_NOWHERE (-2)
288 #define QUEUE_READY (-1)
289 /* QUEUE_SCHEDULED - INSN is scheduled.
290 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
292 QUEUE_READY - INSN is in ready list.
293 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
295 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
297 /* The following variable value refers for all current and future
298 reservations of the processor units. */
301 /* The following variable value is size of memory representing all
302 current and future reservations of the processor units. */
303 static size_t dfa_state_size;
305 /* The following array is used to find the best insn from ready when
306 the automaton pipeline interface is used. */
307 static char *ready_try;
309 /* Describe the ready list of the scheduler.
310 VEC holds space enough for all insns in the current region. VECLEN
311 says how many exactly.
312 FIRST is the index of the element with the highest priority; i.e. the
313 last one in the ready list, since elements are ordered by ascending
315 N_READY determines how many insns are on the ready list. */
325 /* The pointer to the ready list. */
326 static struct ready_list *readyp;
328 /* Scheduling clock. */
329 static int clock_var;
331 /* Number of instructions in current scheduling region. */
332 static int rgn_n_insns;
334 static int may_trap_exp (rtx, int);
336 /* Nonzero iff the address is comprised from at most 1 register. */
337 #define CONST_BASED_ADDRESS_P(x) \
339 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
340 || (GET_CODE (x) == LO_SUM)) \
341 && (CONSTANT_P (XEXP (x, 0)) \
342 || CONSTANT_P (XEXP (x, 1)))))
344 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
345 as found by analyzing insn's expression. */
348 may_trap_exp (rtx x, int is_store)
357 if (code == MEM && may_trap_p (x))
364 /* The insn uses memory: a volatile load. */
365 if (MEM_VOLATILE_P (x))
367 /* An exception-free load. */
370 /* A load with 1 base register, to be further checked. */
371 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
372 return PFREE_CANDIDATE;
373 /* No info on the load, to be further checked. */
374 return PRISKY_CANDIDATE;
379 int i, insn_class = TRAP_FREE;
381 /* Neither store nor load, check if it may cause a trap. */
384 /* Recursive step: walk the insn... */
385 fmt = GET_RTX_FORMAT (code);
386 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
390 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
391 insn_class = WORST_CLASS (insn_class, tmp_class);
393 else if (fmt[i] == 'E')
396 for (j = 0; j < XVECLEN (x, i); j++)
398 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
399 insn_class = WORST_CLASS (insn_class, tmp_class);
400 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
404 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
411 /* Classifies insn for the purpose of verifying that it can be
412 moved speculatively, by examining it's patterns, returning:
413 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414 TRAP_FREE: non-load insn.
415 IFREE: load from a globally safe location.
416 IRISKY: volatile load.
417 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418 being either PFREE or PRISKY. */
421 haifa_classify_insn (rtx insn)
423 rtx pat = PATTERN (insn);
424 int tmp_class = TRAP_FREE;
425 int insn_class = TRAP_FREE;
428 if (GET_CODE (pat) == PARALLEL)
430 int i, len = XVECLEN (pat, 0);
432 for (i = len - 1; i >= 0; i--)
434 code = GET_CODE (XVECEXP (pat, 0, i));
438 /* Test if it is a 'store'. */
439 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
442 /* Test if it is a store. */
443 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
444 if (tmp_class == TRAP_RISKY)
446 /* Test if it is a load. */
448 = WORST_CLASS (tmp_class,
449 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
454 tmp_class = TRAP_RISKY;
459 insn_class = WORST_CLASS (insn_class, tmp_class);
460 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
466 code = GET_CODE (pat);
470 /* Test if it is a 'store'. */
471 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
474 /* Test if it is a store. */
475 tmp_class = may_trap_exp (SET_DEST (pat), 1);
476 if (tmp_class == TRAP_RISKY)
478 /* Test if it is a load. */
480 WORST_CLASS (tmp_class,
481 may_trap_exp (SET_SRC (pat), 0));
485 tmp_class = TRAP_RISKY;
489 insn_class = tmp_class;
495 /* Forward declarations. */
497 HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
498 static int priority (rtx);
499 static int rank_for_schedule (const void *, const void *);
500 static void swap_sort (rtx *, int);
501 static void queue_insn (rtx, int);
502 static int schedule_insn (rtx);
503 static int find_set_reg_weight (rtx);
504 static void find_insn_reg_weight (basic_block);
505 static void find_insn_reg_weight1 (rtx);
506 static void adjust_priority (rtx);
507 static void advance_one_cycle (void);
509 /* Notes handling mechanism:
510 =========================
511 Generally, NOTES are saved before scheduling and restored after scheduling.
512 The scheduler distinguishes between three types of notes:
514 (1) LINE_NUMBER notes, generated and used for debugging. Here,
515 before scheduling a region, a pointer to the LINE_NUMBER note is
516 added to the insn following it (in save_line_notes()), and the note
517 is removed (in rm_line_notes() and unlink_line_notes()). After
518 scheduling the region, this pointer is used for regeneration of
519 the LINE_NUMBER note (in restore_line_notes()).
521 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522 Before scheduling a region, a pointer to the note is added to the insn
523 that follows or precedes it. (This happens as part of the data dependence
524 computation). After scheduling an insn, the pointer contained in it is
525 used for regenerating the corresponding note (in reemit_notes).
527 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
528 these notes are put in a list (in rm_other_notes() and
529 unlink_other_notes ()). After scheduling the block, these notes are
530 inserted at the beginning of the block (in schedule_block()). */
532 static rtx unlink_other_notes (rtx, rtx);
533 static rtx unlink_line_notes (rtx, rtx);
534 static void reemit_notes (rtx);
536 static rtx *ready_lastpos (struct ready_list *);
537 static void ready_add (struct ready_list *, rtx, bool);
538 static void ready_sort (struct ready_list *);
539 static rtx ready_remove_first (struct ready_list *);
541 static void queue_to_ready (struct ready_list *);
542 static int early_queue_to_ready (state_t, struct ready_list *);
544 static void debug_ready_list (struct ready_list *);
546 static void move_insn (rtx);
548 /* The following functions are used to implement multi-pass scheduling
549 on the first cycle. */
550 static rtx ready_element (struct ready_list *, int);
551 static rtx ready_remove (struct ready_list *, int);
552 static void ready_remove_insn (rtx);
553 static int max_issue (struct ready_list *, int *, int);
555 static rtx choose_ready (struct ready_list *);
557 static void fix_inter_tick (rtx, rtx);
558 static int fix_tick_ready (rtx);
559 static void change_queue_index (rtx, int);
560 static void resolve_dep (rtx, rtx);
562 /* The following functions are used to implement scheduling of data/control
563 speculative instructions. */
565 static void extend_h_i_d (void);
566 static void extend_ready (int);
567 static void extend_global (rtx);
568 static void extend_all (rtx);
569 static void init_h_i_d (rtx);
570 static void generate_recovery_code (rtx);
571 static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
572 static void begin_speculative_block (rtx);
573 static void add_to_speculative_block (rtx);
574 static dw_t dep_weak (ds_t);
575 static edge find_fallthru_edge (basic_block);
576 static void init_before_recovery (void);
577 static basic_block create_recovery_block (void);
578 static void create_check_block_twin (rtx, bool);
579 static void fix_recovery_deps (basic_block);
580 static void associate_line_notes_with_blocks (basic_block);
581 static void change_pattern (rtx, rtx);
582 static int speculate_insn (rtx, ds_t, rtx *);
583 static void dump_new_block_header (int, basic_block, rtx, rtx);
584 static void restore_bb_notes (basic_block);
585 static void extend_bb (basic_block);
586 static void fix_jump_move (rtx);
587 static void move_block_after_check (rtx);
588 static void move_succs (VEC(edge,gc) **, basic_block);
589 static void init_glat (void);
590 static void init_glat1 (basic_block);
591 static void attach_life_info1 (basic_block);
592 static void free_glat (void);
593 static void sched_remove_insn (rtx);
594 static void clear_priorities (rtx);
595 static void add_jump_dependencies (rtx, rtx);
596 static rtx bb_note (basic_block);
597 static void calc_priorities (rtx);
598 #ifdef ENABLE_CHECKING
599 static int has_edge_p (VEC(edge,gc) *, int);
600 static void check_cfg (rtx, rtx);
601 static void check_sched_flags (void);
604 #endif /* INSN_SCHEDULING */
606 /* Point to state used for the current scheduling pass. */
607 struct sched_info *current_sched_info;
609 #ifndef INSN_SCHEDULING
611 schedule_insns (void)
616 /* Working copy of frontend's sched_info variable. */
617 static struct sched_info current_sched_info_var;
619 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
620 so that insns independent of the last scheduled insn will be preferred
621 over dependent instructions. */
623 static rtx last_scheduled_insn;
625 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
626 This is the number of cycles between instruction issue and
627 instruction results. */
630 insn_cost (rtx insn, rtx link, rtx used)
632 return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
636 /* Compute cost of executing INSN given the dependence on the insn USED.
637 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
638 Otherwise, dependence between INSN and USED is assumed to be of type
639 DEP_TYPE. This function was introduced as a workaround for
640 targetm.adjust_cost hook.
641 This is the number of cycles between instruction issue and
642 instruction results. */
644 HAIFA_INLINE static int
645 insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
647 int cost = INSN_COST (insn);
651 /* A USE insn, or something else we don't need to
652 understand. We can't pass these directly to
653 result_ready_cost or insn_default_latency because it will
654 trigger a fatal error for unrecognizable insns. */
655 if (recog_memoized (insn) < 0)
657 INSN_COST (insn) = 0;
662 cost = insn_default_latency (insn);
666 INSN_COST (insn) = cost;
670 /* In this case estimate cost without caring how insn is used. */
674 /* A USE insn should never require the value used to be computed.
675 This allows the computation of a function's result and parameter
676 values to overlap the return and call. */
677 if (recog_memoized (used) < 0)
681 gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
683 if (INSN_CODE (insn) >= 0)
685 if (dep_type == REG_DEP_ANTI)
687 else if (dep_type == REG_DEP_OUTPUT)
689 cost = (insn_default_latency (insn)
690 - insn_default_latency (used));
694 else if (bypass_p (insn))
695 cost = insn_latency (insn, used);
698 if (targetm.sched.adjust_cost_2)
699 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
703 if (targetm.sched.adjust_cost)
704 cost = targetm.sched.adjust_cost (used, link, insn, cost);
714 /* Compute the priority number for INSN. */
724 if (! INSN_PRIORITY_KNOWN (insn))
726 int this_priority = 0;
728 if (INSN_DEPEND (insn) == 0)
729 this_priority = insn_cost (insn, 0, 0);
732 rtx prev_first, twin;
735 /* For recovery check instructions we calculate priority slightly
736 different than that of normal instructions. Instead of walking
737 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
738 of each instruction in the corresponding recovery block. */
740 rec = RECOVERY_BLOCK (insn);
741 if (!rec || rec == EXIT_BLOCK_PTR)
743 prev_first = PREV_INSN (insn);
748 prev_first = NEXT_INSN (BB_HEAD (rec));
749 twin = PREV_INSN (BB_END (rec));
754 for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
759 next = XEXP (link, 0);
761 if (BLOCK_FOR_INSN (next) != rec)
763 /* Critical path is meaningful in block boundaries
765 if (! (*current_sched_info->contributes_to_priority)
767 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
768 then speculative instructions will less likely be
769 scheduled. That is because the priority of
770 their producers will increase, and, thus, the
771 producers will more likely be scheduled, thus,
772 resolving the dependence. */
773 || ((current_sched_info->flags & DO_SPECULATION)
774 && (DEP_STATUS (link) & SPECULATIVE)
775 && !(spec_info->flags
776 & COUNT_SPEC_IN_CRITICAL_PATH)))
779 next_priority = insn_cost1 (insn,
781 REG_NOTE_KIND (link) :
783 twin == insn ? link : 0,
784 next) + priority (next);
786 if (next_priority > this_priority)
787 this_priority = next_priority;
791 twin = PREV_INSN (twin);
793 while (twin != prev_first);
795 INSN_PRIORITY (insn) = this_priority;
796 INSN_PRIORITY_KNOWN (insn) = 1;
799 return INSN_PRIORITY (insn);
802 /* Macros and functions for keeping the priority queue sorted, and
803 dealing with queuing and dequeuing of instructions. */
805 #define SCHED_SORT(READY, N_READY) \
806 do { if ((N_READY) == 2) \
807 swap_sort (READY, N_READY); \
808 else if ((N_READY) > 2) \
809 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
812 /* Returns a positive value if x is preferred; returns a negative value if
813 y is preferred. Should never return 0, since that will make the sort
817 rank_for_schedule (const void *x, const void *y)
819 rtx tmp = *(const rtx *) y;
820 rtx tmp2 = *(const rtx *) x;
822 int tmp_class, tmp2_class, depend_count1, depend_count2;
823 int val, priority_val, weight_val, info_val;
825 /* The insn in a schedule group should be issued the first. */
826 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
827 return SCHED_GROUP_P (tmp2) ? 1 : -1;
829 /* Prefer insn with higher priority. */
830 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
835 /* Prefer speculative insn with greater dependencies weakness. */
842 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
844 dw1 = dep_weak (ds1);
848 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
850 dw2 = dep_weak (ds2);
855 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
859 /* Prefer an insn with smaller contribution to registers-pressure. */
860 if (!reload_completed &&
861 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
864 info_val = (*current_sched_info->rank) (tmp, tmp2);
868 /* Compare insns based on their relation to the last-scheduled-insn. */
869 if (INSN_P (last_scheduled_insn))
871 /* Classify the instructions into three classes:
872 1) Data dependent on last schedule insn.
873 2) Anti/Output dependent on last scheduled insn.
874 3) Independent of last scheduled insn, or has latency of one.
875 Choose the insn from the highest numbered class if different. */
876 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
877 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
879 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
884 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
885 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
887 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
892 if ((val = tmp2_class - tmp_class))
896 /* Prefer the insn which has more later insns that depend on it.
897 This gives the scheduler more freedom when scheduling later
898 instructions at the expense of added register pressure. */
900 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
904 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
907 val = depend_count2 - depend_count1;
911 /* If insns are equally good, sort by INSN_LUID (original insn order),
912 so that we make the sort stable. This minimizes instruction movement,
913 thus minimizing sched's effect on debugging and cross-jumping. */
914 return INSN_LUID (tmp) - INSN_LUID (tmp2);
917 /* Resort the array A in which only element at index N may be out of order. */
919 HAIFA_INLINE static void
920 swap_sort (rtx *a, int n)
925 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
933 /* Add INSN to the insn queue so that it can be executed at least
934 N_CYCLES after the currently executing insn. Preserve insns
935 chain for debugging purposes. */
937 HAIFA_INLINE static void
938 queue_insn (rtx insn, int n_cycles)
940 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
941 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
943 gcc_assert (n_cycles <= max_insn_queue_index);
945 insn_queue[next_q] = link;
948 if (sched_verbose >= 2)
950 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
951 (*current_sched_info->print_insn) (insn, 0));
953 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
956 QUEUE_INDEX (insn) = next_q;
959 /* Remove INSN from queue. */
961 queue_remove (rtx insn)
963 gcc_assert (QUEUE_INDEX (insn) >= 0);
964 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
966 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
969 /* Return a pointer to the bottom of the ready list, i.e. the insn
970 with the lowest priority. */
972 HAIFA_INLINE static rtx *
973 ready_lastpos (struct ready_list *ready)
975 gcc_assert (ready->n_ready >= 1);
976 return ready->vec + ready->first - ready->n_ready + 1;
979 /* Add an element INSN to the ready list so that it ends up with the
980 lowest/highest priority depending on FIRST_P. */
982 HAIFA_INLINE static void
983 ready_add (struct ready_list *ready, rtx insn, bool first_p)
987 if (ready->first == ready->n_ready)
989 memmove (ready->vec + ready->veclen - ready->n_ready,
990 ready_lastpos (ready),
991 ready->n_ready * sizeof (rtx));
992 ready->first = ready->veclen - 1;
994 ready->vec[ready->first - ready->n_ready] = insn;
998 if (ready->first == ready->veclen - 1)
1001 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1002 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1003 ready_lastpos (ready),
1004 ready->n_ready * sizeof (rtx));
1005 ready->first = ready->veclen - 2;
1007 ready->vec[++(ready->first)] = insn;
1012 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1013 QUEUE_INDEX (insn) = QUEUE_READY;
1016 /* Remove the element with the highest priority from the ready list and
1019 HAIFA_INLINE static rtx
1020 ready_remove_first (struct ready_list *ready)
1024 gcc_assert (ready->n_ready);
1025 t = ready->vec[ready->first--];
1027 /* If the queue becomes empty, reset it. */
1028 if (ready->n_ready == 0)
1029 ready->first = ready->veclen - 1;
1031 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1032 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1037 /* The following code implements multi-pass scheduling for the first
1038 cycle. In other words, we will try to choose ready insn which
1039 permits to start maximum number of insns on the same cycle. */
1041 /* Return a pointer to the element INDEX from the ready. INDEX for
1042 insn with the highest priority is 0, and the lowest priority has
1045 HAIFA_INLINE static rtx
1046 ready_element (struct ready_list *ready, int index)
1048 gcc_assert (ready->n_ready && index < ready->n_ready);
1050 return ready->vec[ready->first - index];
1053 /* Remove the element INDEX from the ready list and return it. INDEX
1054 for insn with the highest priority is 0, and the lowest priority
1057 HAIFA_INLINE static rtx
1058 ready_remove (struct ready_list *ready, int index)
1064 return ready_remove_first (ready);
1065 gcc_assert (ready->n_ready && index < ready->n_ready);
1066 t = ready->vec[ready->first - index];
1068 for (i = index; i < ready->n_ready; i++)
1069 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1070 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1074 /* Remove INSN from the ready list. */
1076 ready_remove_insn (rtx insn)
1080 for (i = 0; i < readyp->n_ready; i++)
1081 if (ready_element (readyp, i) == insn)
1083 ready_remove (readyp, i);
1089 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1092 HAIFA_INLINE static void
1093 ready_sort (struct ready_list *ready)
1095 rtx *first = ready_lastpos (ready);
1096 SCHED_SORT (first, ready->n_ready);
1099 /* PREV is an insn that is ready to execute. Adjust its priority if that
1100 will help shorten or lengthen register lifetimes as appropriate. Also
1101 provide a hook for the target to tweek itself. */
1103 HAIFA_INLINE static void
1104 adjust_priority (rtx prev)
1106 /* ??? There used to be code here to try and estimate how an insn
1107 affected register lifetimes, but it did it by looking at REG_DEAD
1108 notes, which we removed in schedule_region. Nor did it try to
1109 take into account register pressure or anything useful like that.
1111 Revisit when we have a machine model to work with and not before. */
1113 if (targetm.sched.adjust_priority)
1114 INSN_PRIORITY (prev) =
1115 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1118 /* Advance time on one cycle. */
1119 HAIFA_INLINE static void
1120 advance_one_cycle (void)
1122 if (targetm.sched.dfa_pre_cycle_insn)
1123 state_transition (curr_state,
1124 targetm.sched.dfa_pre_cycle_insn ());
1126 state_transition (curr_state, NULL);
1128 if (targetm.sched.dfa_post_cycle_insn)
1129 state_transition (curr_state,
1130 targetm.sched.dfa_post_cycle_insn ());
1133 /* Clock at which the previous instruction was issued. */
1134 static int last_clock_var;
1136 /* INSN is the "currently executing insn". Launch each insn which was
1137 waiting on INSN. READY is the ready list which contains the insns
1138 that are ready to fire. CLOCK is the current cycle. The function
1139 returns necessary cycle advance after issuing the insn (it is not
1140 zero for insns in a schedule group). */
1143 schedule_insn (rtx insn)
1148 if (sched_verbose >= 1)
1152 print_insn (buf, insn, 0);
1154 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1156 if (recog_memoized (insn) < 0)
1157 fprintf (sched_dump, "nothing");
1159 print_reservation (sched_dump, insn);
1160 fputc ('\n', sched_dump);
1163 /* Scheduling instruction should have all its dependencies resolved and
1164 should have been removed from the ready list. */
1165 gcc_assert (INSN_DEP_COUNT (insn) == 0);
1166 gcc_assert (!LOG_LINKS (insn));
1167 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1169 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1171 /* Now we can free RESOLVED_DEPS list. */
1172 if (current_sched_info->flags & USE_DEPS_LIST)
1173 free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1175 free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1177 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1178 if (INSN_TICK (insn) > clock_var)
1179 /* INSN has been prematurely moved from the queue to the ready list.
1180 This is possible only if following flag is set. */
1181 gcc_assert (flag_sched_stalled_insns);
1183 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1184 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1185 INSN_TICK (insn) = clock_var;
1187 /* Update dependent instructions. */
1188 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1190 rtx next = XEXP (link, 0);
1192 resolve_dep (next, insn);
1194 if (!RECOVERY_BLOCK (insn)
1195 || RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR)
1199 effective_cost = try_ready (next);
1201 if (effective_cost >= 0
1202 && SCHED_GROUP_P (next)
1203 && advance < effective_cost)
1204 advance = effective_cost;
1207 /* Check always has only one forward dependence (to the first insn in
1208 the recovery block), therefore, this will be executed only once. */
1210 gcc_assert (XEXP (link, 1) == 0);
1211 fix_recovery_deps (RECOVERY_BLOCK (insn));
1215 /* Annotate the instruction with issue information -- TImode
1216 indicates that the instruction is expected not to be able
1217 to issue on the same cycle as the previous insn. A machine
1218 may use this information to decide how the instruction should
1221 && GET_CODE (PATTERN (insn)) != USE
1222 && GET_CODE (PATTERN (insn)) != CLOBBER)
1224 if (reload_completed)
1225 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1226 last_clock_var = clock_var;
1232 /* Functions for handling of notes. */
1234 /* Delete notes beginning with INSN and put them in the chain
1235 of notes ended by NOTE_LIST.
1236 Returns the insn following the notes. */
1239 unlink_other_notes (rtx insn, rtx tail)
1241 rtx prev = PREV_INSN (insn);
1243 while (insn != tail && NOTE_NOT_BB_P (insn))
1245 rtx next = NEXT_INSN (insn);
1246 basic_block bb = BLOCK_FOR_INSN (insn);
1248 /* Delete the note from its current position. */
1250 NEXT_INSN (prev) = next;
1252 PREV_INSN (next) = prev;
1256 /* Basic block can begin with either LABEL or
1257 NOTE_INSN_BASIC_BLOCK. */
1258 gcc_assert (BB_HEAD (bb) != insn);
1260 /* Check if we are removing last insn in the BB. */
1261 if (BB_END (bb) == insn)
1265 /* See sched_analyze to see how these are handled. */
1266 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1267 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1269 /* Insert the note at the end of the notes list. */
1270 PREV_INSN (insn) = note_list;
1272 NEXT_INSN (note_list) = insn;
1281 /* Delete line notes beginning with INSN. Record line-number notes so
1282 they can be reused. Returns the insn following the notes. */
1285 unlink_line_notes (rtx insn, rtx tail)
1287 rtx prev = PREV_INSN (insn);
1289 while (insn != tail && NOTE_NOT_BB_P (insn))
1291 rtx next = NEXT_INSN (insn);
1293 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1295 basic_block bb = BLOCK_FOR_INSN (insn);
1297 /* Delete the note from its current position. */
1299 NEXT_INSN (prev) = next;
1301 PREV_INSN (next) = prev;
1305 /* Basic block can begin with either LABEL or
1306 NOTE_INSN_BASIC_BLOCK. */
1307 gcc_assert (BB_HEAD (bb) != insn);
1309 /* Check if we are removing last insn in the BB. */
1310 if (BB_END (bb) == insn)
1314 /* Record line-number notes so they can be reused. */
1315 LINE_NOTE (insn) = insn;
1325 /* Return the head and tail pointers of ebb starting at BEG and ending
1329 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1331 rtx beg_head = BB_HEAD (beg);
1332 rtx beg_tail = BB_END (beg);
1333 rtx end_head = BB_HEAD (end);
1334 rtx end_tail = BB_END (end);
1336 /* Don't include any notes or labels at the beginning of the BEG
1337 basic block, or notes at the end of the END basic blocks. */
1339 if (LABEL_P (beg_head))
1340 beg_head = NEXT_INSN (beg_head);
1342 while (beg_head != beg_tail)
1343 if (NOTE_P (beg_head))
1344 beg_head = NEXT_INSN (beg_head);
1351 end_head = beg_head;
1352 else if (LABEL_P (end_head))
1353 end_head = NEXT_INSN (end_head);
1355 while (end_head != end_tail)
1356 if (NOTE_P (end_tail))
1357 end_tail = PREV_INSN (end_tail);
1364 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1367 no_real_insns_p (rtx head, rtx tail)
1369 while (head != NEXT_INSN (tail))
1371 if (!NOTE_P (head) && !LABEL_P (head))
1373 head = NEXT_INSN (head);
1378 /* Delete line notes from one block. Save them so they can be later restored
1379 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1380 block in which notes should be processed. */
1383 rm_line_notes (rtx head, rtx tail)
1388 next_tail = NEXT_INSN (tail);
1389 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1393 /* Farm out notes, and maybe save them in NOTE_LIST.
1394 This is needed to keep the debugger from
1395 getting completely deranged. */
1396 if (NOTE_NOT_BB_P (insn))
1399 insn = unlink_line_notes (insn, next_tail);
1401 gcc_assert (prev != tail && prev != head && insn != next_tail);
1406 /* Save line number notes for each insn in block B. HEAD and TAIL are
1407 the boundaries of the block in which notes should be processed. */
1410 save_line_notes (int b, rtx head, rtx tail)
1414 /* We must use the true line number for the first insn in the block
1415 that was computed and saved at the start of this pass. We can't
1416 use the current line number, because scheduling of the previous
1417 block may have changed the current line number. */
1419 rtx line = line_note_head[b];
1422 next_tail = NEXT_INSN (tail);
1424 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1425 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1428 LINE_NOTE (insn) = line;
1431 /* After a block was scheduled, insert line notes into the insns list.
1432 HEAD and TAIL are the boundaries of the block in which notes should
1436 restore_line_notes (rtx head, rtx tail)
1438 rtx line, note, prev, new;
1439 int added_notes = 0;
1440 rtx next_tail, insn;
1443 next_tail = NEXT_INSN (tail);
1445 /* Determine the current line-number. We want to know the current
1446 line number of the first insn of the block here, in case it is
1447 different from the true line number that was saved earlier. If
1448 different, then we need a line number note before the first insn
1449 of this block. If it happens to be the same, then we don't want to
1450 emit another line number note here. */
1451 for (line = head; line; line = PREV_INSN (line))
1452 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1455 /* Walk the insns keeping track of the current line-number and inserting
1456 the line-number notes as needed. */
1457 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1458 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1460 /* This used to emit line number notes before every non-deleted note.
1461 However, this confuses a debugger, because line notes not separated
1462 by real instructions all end up at the same address. I can find no
1463 use for line number notes before other notes, so none are emitted. */
1464 else if (!NOTE_P (insn)
1465 && INSN_UID (insn) < old_max_uid
1466 && (note = LINE_NOTE (insn)) != 0
1469 #ifdef USE_MAPPED_LOCATION
1470 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1472 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1473 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1478 prev = PREV_INSN (insn);
1479 if (LINE_NOTE (note))
1481 /* Re-use the original line-number note. */
1482 LINE_NOTE (note) = 0;
1483 PREV_INSN (note) = prev;
1484 NEXT_INSN (prev) = note;
1485 PREV_INSN (insn) = note;
1486 NEXT_INSN (note) = insn;
1487 set_block_for_insn (note, BLOCK_FOR_INSN (insn));
1492 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1493 #ifndef USE_MAPPED_LOCATION
1494 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1498 if (sched_verbose && added_notes)
1499 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1502 /* After scheduling the function, delete redundant line notes from the
1506 rm_redundant_line_notes (void)
1509 rtx insn = get_insns ();
1510 int active_insn = 0;
1513 /* Walk the insns deleting redundant line-number notes. Many of these
1514 are already present. The remainder tend to occur at basic
1515 block boundaries. */
1516 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1517 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1519 /* If there are no active insns following, INSN is redundant. */
1520 if (active_insn == 0)
1523 SET_INSN_DELETED (insn);
1525 /* If the line number is unchanged, LINE is redundant. */
1527 #ifdef USE_MAPPED_LOCATION
1528 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1530 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1531 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1536 SET_INSN_DELETED (line);
1543 else if (!((NOTE_P (insn)
1544 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1545 || (NONJUMP_INSN_P (insn)
1546 && (GET_CODE (PATTERN (insn)) == USE
1547 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1550 if (sched_verbose && notes)
1551 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1554 /* Delete notes between HEAD and TAIL and put them in the chain
1555 of notes ended by NOTE_LIST. */
1558 rm_other_notes (rtx head, rtx tail)
1564 if (head == tail && (! INSN_P (head)))
1567 next_tail = NEXT_INSN (tail);
1568 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1572 /* Farm out notes, and maybe save them in NOTE_LIST.
1573 This is needed to keep the debugger from
1574 getting completely deranged. */
1575 if (NOTE_NOT_BB_P (insn))
1579 insn = unlink_other_notes (insn, next_tail);
1581 gcc_assert (prev != tail && prev != head && insn != next_tail);
1586 /* Functions for computation of registers live/usage info. */
1588 /* This function looks for a new register being defined.
1589 If the destination register is already used by the source,
1590 a new register is not needed. */
1593 find_set_reg_weight (rtx x)
1595 if (GET_CODE (x) == CLOBBER
1596 && register_operand (SET_DEST (x), VOIDmode))
1598 if (GET_CODE (x) == SET
1599 && register_operand (SET_DEST (x), VOIDmode))
1601 if (REG_P (SET_DEST (x)))
1603 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1613 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1616 find_insn_reg_weight (basic_block bb)
1618 rtx insn, next_tail, head, tail;
1620 get_ebb_head_tail (bb, bb, &head, &tail);
1621 next_tail = NEXT_INSN (tail);
1623 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1624 find_insn_reg_weight1 (insn);
1627 /* Calculate INSN_REG_WEIGHT for single instruction.
1628 Separated from find_insn_reg_weight because of need
1629 to initialize new instruction in generate_recovery_code. */
1631 find_insn_reg_weight1 (rtx insn)
1636 /* Handle register life information. */
1637 if (! INSN_P (insn))
1640 /* Increment weight for each register born here. */
1642 reg_weight += find_set_reg_weight (x);
1643 if (GET_CODE (x) == PARALLEL)
1646 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1648 x = XVECEXP (PATTERN (insn), 0, j);
1649 reg_weight += find_set_reg_weight (x);
1652 /* Decrement weight for each register that dies here. */
1653 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1655 if (REG_NOTE_KIND (x) == REG_DEAD
1656 || REG_NOTE_KIND (x) == REG_UNUSED)
1660 INSN_REG_WEIGHT (insn) = reg_weight;
1663 /* Move insns that became ready to fire from queue to ready list. */
1666 queue_to_ready (struct ready_list *ready)
1671 q_ptr = NEXT_Q (q_ptr);
1673 /* Add all pending insns that can be scheduled without stalls to the
1675 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1677 insn = XEXP (link, 0);
1680 if (sched_verbose >= 2)
1681 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1682 (*current_sched_info->print_insn) (insn, 0));
1684 /* If the ready list is full, delay the insn for 1 cycle.
1685 See the comment in schedule_block for the rationale. */
1686 if (!reload_completed
1687 && ready->n_ready > MAX_SCHED_READY_INSNS
1688 && !SCHED_GROUP_P (insn))
1690 if (sched_verbose >= 2)
1691 fprintf (sched_dump, "requeued because ready full\n");
1692 queue_insn (insn, 1);
1696 ready_add (ready, insn, false);
1697 if (sched_verbose >= 2)
1698 fprintf (sched_dump, "moving to ready without stalls\n");
1701 free_INSN_LIST_list (&insn_queue[q_ptr]);
1703 /* If there are no ready insns, stall until one is ready and add all
1704 of the pending insns at that point to the ready list. */
1705 if (ready->n_ready == 0)
1709 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1711 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1713 for (; link; link = XEXP (link, 1))
1715 insn = XEXP (link, 0);
1718 if (sched_verbose >= 2)
1719 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1720 (*current_sched_info->print_insn) (insn, 0));
1722 ready_add (ready, insn, false);
1723 if (sched_verbose >= 2)
1724 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1726 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1728 advance_one_cycle ();
1733 advance_one_cycle ();
1736 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1737 clock_var += stalls;
1741 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1742 prematurely move INSN from the queue to the ready list. Currently,
1743 if a target defines the hook 'is_costly_dependence', this function
1744 uses the hook to check whether there exist any dependences which are
1745 considered costly by the target, between INSN and other insns that
1746 have already been scheduled. Dependences are checked up to Y cycles
1747 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1748 controlling this value.
1749 (Other considerations could be taken into account instead (or in
1750 addition) depending on user flags and target hooks. */
1753 ok_for_early_queue_removal (rtx insn)
1756 rtx prev_insn = last_scheduled_insn;
1758 if (targetm.sched.is_costly_dependence)
1760 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1762 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1767 if (!NOTE_P (prev_insn))
1769 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1772 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1773 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1775 flag_sched_stalled_insns_dep - n_cycles))
1780 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1786 prev_insn = PREV_INSN (prev_insn);
1794 /* Remove insns from the queue, before they become "ready" with respect
1795 to FU latency considerations. */
1798 early_queue_to_ready (state_t state, struct ready_list *ready)
1806 state_t temp_state = alloca (dfa_state_size);
1808 int insns_removed = 0;
1811 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1814 X == 0: There is no limit on how many queued insns can be removed
1815 prematurely. (flag_sched_stalled_insns = -1).
1817 X >= 1: Only X queued insns can be removed prematurely in each
1818 invocation. (flag_sched_stalled_insns = X).
1820 Otherwise: Early queue removal is disabled.
1821 (flag_sched_stalled_insns = 0)
1824 if (! flag_sched_stalled_insns)
1827 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1829 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1831 if (sched_verbose > 6)
1832 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1837 next_link = XEXP (link, 1);
1838 insn = XEXP (link, 0);
1839 if (insn && sched_verbose > 6)
1840 print_rtl_single (sched_dump, insn);
1842 memcpy (temp_state, state, dfa_state_size);
1843 if (recog_memoized (insn) < 0)
1844 /* non-negative to indicate that it's not ready
1845 to avoid infinite Q->R->Q->R... */
1848 cost = state_transition (temp_state, insn);
1850 if (sched_verbose >= 6)
1851 fprintf (sched_dump, "transition cost = %d\n", cost);
1853 move_to_ready = false;
1856 move_to_ready = ok_for_early_queue_removal (insn);
1857 if (move_to_ready == true)
1859 /* move from Q to R */
1861 ready_add (ready, insn, false);
1864 XEXP (prev_link, 1) = next_link;
1866 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1868 free_INSN_LIST_node (link);
1870 if (sched_verbose >= 2)
1871 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1872 (*current_sched_info->print_insn) (insn, 0));
1875 if (insns_removed == flag_sched_stalled_insns)
1876 /* Remove no more than flag_sched_stalled_insns insns
1877 from Q at a time. */
1878 return insns_removed;
1882 if (move_to_ready == false)
1889 } /* for stalls.. */
1891 return insns_removed;
1895 /* Print the ready list for debugging purposes. Callable from debugger. */
1898 debug_ready_list (struct ready_list *ready)
1903 if (ready->n_ready == 0)
1905 fprintf (sched_dump, "\n");
1909 p = ready_lastpos (ready);
1910 for (i = 0; i < ready->n_ready; i++)
1911 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1912 fprintf (sched_dump, "\n");
1915 /* Search INSN for REG_SAVE_NOTE note pairs for
1916 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1917 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1918 saved value for NOTE_BLOCK_NUMBER which is useful for
1919 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1922 reemit_notes (rtx insn)
1924 rtx note, last = insn;
1926 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1928 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1930 enum insn_note note_type = INTVAL (XEXP (note, 0));
1932 last = emit_note_before (note_type, last);
1933 remove_note (insn, note);
1938 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1940 move_insn (rtx insn)
1942 rtx last = last_scheduled_insn;
1944 if (PREV_INSN (insn) != last)
1950 bb = BLOCK_FOR_INSN (insn);
1952 /* BB_HEAD is either LABEL or NOTE. */
1953 gcc_assert (BB_HEAD (bb) != insn);
1955 if (BB_END (bb) == insn)
1956 /* If this is last instruction in BB, move end marker one
1959 /* Jumps are always placed at the end of basic block. */
1960 jump_p = control_flow_insn_p (insn);
1963 || ((current_sched_info->flags & SCHED_RGN)
1964 && RECOVERY_BLOCK (insn)
1965 && RECOVERY_BLOCK (insn) != EXIT_BLOCK_PTR)
1966 || (current_sched_info->flags & SCHED_EBB));
1968 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1970 BB_END (bb) = PREV_INSN (insn);
1973 gcc_assert (BB_END (bb) != last);
1976 /* We move the block note along with jump. */
1978 /* NT is needed for assertion below. */
1979 rtx nt = current_sched_info->next_tail;
1981 note = NEXT_INSN (insn);
1982 while (NOTE_NOT_BB_P (note) && note != nt)
1983 note = NEXT_INSN (note);
1987 || BARRIER_P (note)))
1988 note = NEXT_INSN (note);
1990 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1995 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1996 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1998 NEXT_INSN (note) = NEXT_INSN (last);
1999 PREV_INSN (NEXT_INSN (last)) = note;
2001 NEXT_INSN (last) = insn;
2002 PREV_INSN (insn) = last;
2004 bb = BLOCK_FOR_INSN (last);
2008 fix_jump_move (insn);
2010 if (BLOCK_FOR_INSN (insn) != bb)
2011 move_block_after_check (insn);
2013 gcc_assert (BB_END (bb) == last);
2016 set_block_for_insn (insn, bb);
2018 /* Update BB_END, if needed. */
2019 if (BB_END (bb) == last)
2023 reemit_notes (insn);
2025 SCHED_GROUP_P (insn) = 0;
2028 /* The following structure describe an entry of the stack of choices. */
2031 /* Ordinal number of the issued insn in the ready queue. */
2033 /* The number of the rest insns whose issues we should try. */
2035 /* The number of issued essential insns. */
2037 /* State after issuing the insn. */
2041 /* The following array is used to implement a stack of choices used in
2042 function max_issue. */
2043 static struct choice_entry *choice_stack;
2045 /* The following variable value is number of essential insns issued on
2046 the current cycle. An insn is essential one if it changes the
2047 processors state. */
2048 static int cycle_issued_insns;
2050 /* The following variable value is maximal number of tries of issuing
2051 insns for the first cycle multipass insn scheduling. We define
2052 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2053 need this constraint if all real insns (with non-negative codes)
2054 had reservations because in this case the algorithm complexity is
2055 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2056 might be incomplete and such insn might occur. For such
2057 descriptions, the complexity of algorithm (without the constraint)
2058 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2059 static int max_lookahead_tries;
2061 /* The following value is value of hook
2062 `first_cycle_multipass_dfa_lookahead' at the last call of
2064 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2066 /* The following value is value of `issue_rate' at the last call of
2068 static int cached_issue_rate = 0;
2070 /* The following function returns maximal (or close to maximal) number
2071 of insns which can be issued on the same cycle and one of which
2072 insns is insns with the best rank (the first insn in READY). To
2073 make this function tries different samples of ready insns. READY
2074 is current queue `ready'. Global array READY_TRY reflects what
2075 insns are already issued in this try. MAX_POINTS is the sum of points
2076 of all instructions in READY. The function stops immediately,
2077 if it reached the such a solution, that all instruction can be issued.
2078 INDEX will contain index of the best insn in READY. The following
2079 function is used only for first cycle multipass scheduling. */
2081 max_issue (struct ready_list *ready, int *index, int max_points)
2083 int n, i, all, n_ready, best, delay, tries_num, points = -1;
2084 struct choice_entry *top;
2088 memcpy (choice_stack->state, curr_state, dfa_state_size);
2090 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2092 n_ready = ready->n_ready;
2093 for (all = i = 0; i < n_ready; i++)
2100 if (top->rest == 0 || i >= n_ready)
2102 if (top == choice_stack)
2104 if (best < top - choice_stack && ready_try [0])
2106 best = top - choice_stack;
2107 *index = choice_stack [1].index;
2109 if (top->n == max_points || best == all)
2115 memcpy (curr_state, top->state, dfa_state_size);
2117 else if (!ready_try [i])
2120 if (tries_num > max_lookahead_tries)
2122 insn = ready_element (ready, i);
2123 delay = state_transition (curr_state, insn);
2126 if (state_dead_lock_p (curr_state))
2131 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2132 n += ISSUE_POINTS (insn);
2134 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2137 memcpy (top->state, curr_state, dfa_state_size);
2144 while (top != choice_stack)
2146 ready_try [top->index] = 0;
2149 memcpy (curr_state, choice_stack->state, dfa_state_size);
2151 if (sched_verbose >= 4)
2152 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2153 (*current_sched_info->print_insn) (ready_element (ready, *index),
2155 points, max_points);
2160 /* The following function chooses insn from READY and modifies
2161 *N_READY and READY. The following function is used only for first
2162 cycle multipass scheduling. */
2165 choose_ready (struct ready_list *ready)
2169 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2170 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2171 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2172 return ready_remove_first (ready);
2175 /* Try to choose the better insn. */
2176 int index = 0, i, n;
2178 int more_issue, max_points, try_data = 1, try_control = 1;
2180 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2182 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2183 max_lookahead_tries = 100;
2184 for (i = 0; i < issue_rate; i++)
2185 max_lookahead_tries *= lookahead;
2187 insn = ready_element (ready, 0);
2188 if (INSN_CODE (insn) < 0)
2189 return ready_remove_first (ready);
2192 && spec_info->flags & (PREFER_NON_DATA_SPEC
2193 | PREFER_NON_CONTROL_SPEC))
2195 for (i = 0, n = ready->n_ready; i < n; i++)
2200 x = ready_element (ready, i);
2203 if (spec_info->flags & PREFER_NON_DATA_SPEC
2204 && !(s & DATA_SPEC))
2207 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2212 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2213 && !(s & CONTROL_SPEC))
2216 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2222 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2223 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2224 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2225 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2227 /* Discard speculative instruction that stands first in the ready
2230 change_queue_index (insn, 1);
2234 max_points = ISSUE_POINTS (insn);
2235 more_issue = issue_rate - cycle_issued_insns - 1;
2237 for (i = 1; i < ready->n_ready; i++)
2239 insn = ready_element (ready, i);
2241 = (INSN_CODE (insn) < 0
2242 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2243 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2244 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2245 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2248 if (!ready_try [i] && more_issue-- > 0)
2249 max_points += ISSUE_POINTS (insn);
2252 if (max_issue (ready, &index, max_points) == 0)
2253 return ready_remove_first (ready);
2255 return ready_remove (ready, index);
2259 /* Use forward list scheduling to rearrange insns of block pointed to by
2260 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2264 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2266 struct ready_list ready;
2267 int i, first_cycle_insn_p;
2269 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2270 int sort_p, advance, start_clock_var;
2272 /* Head/tail info for this block. */
2273 rtx prev_head = current_sched_info->prev_head;
2274 rtx next_tail = current_sched_info->next_tail;
2275 rtx head = NEXT_INSN (prev_head);
2276 rtx tail = PREV_INSN (next_tail);
2278 /* We used to have code to avoid getting parameters moved from hard
2279 argument registers into pseudos.
2281 However, it was removed when it proved to be of marginal benefit
2282 and caused problems because schedule_block and compute_forward_dependences
2283 had different notions of what the "head" insn was. */
2285 gcc_assert (head != tail || INSN_P (head));
2287 added_recovery_block_p = false;
2291 dump_new_block_header (0, *target_bb, head, tail);
2293 state_reset (curr_state);
2295 /* Allocate the ready list. */
2299 choice_stack = NULL;
2302 extend_ready (rgn_n_insns1 + 1);
2304 ready.first = ready.veclen - 1;
2307 /* It is used for first cycle multipass scheduling. */
2308 temp_state = alloca (dfa_state_size);
2310 if (targetm.sched.md_init)
2311 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2313 /* We start inserting insns after PREV_HEAD. */
2314 last_scheduled_insn = prev_head;
2316 gcc_assert (NOTE_P (last_scheduled_insn)
2317 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2319 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2324 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2325 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2327 /* Start just before the beginning of time. */
2330 /* We need queue and ready lists and clock_var be initialized
2331 in try_ready () (which is called through init_ready_list ()). */
2332 (*current_sched_info->init_ready_list) ();
2334 /* The algorithm is O(n^2) in the number of ready insns at any given
2335 time in the worst case. Before reload we are more likely to have
2336 big lists so truncate them to a reasonable size. */
2337 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2339 ready_sort (&ready);
2341 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2342 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2343 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2346 if (sched_verbose >= 2)
2348 fprintf (sched_dump,
2349 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2350 fprintf (sched_dump,
2351 ";;\t\t before reload => truncated to %d insns\n", i);
2354 /* Delay all insns past it for 1 cycle. */
2355 while (i < ready.n_ready)
2356 queue_insn (ready_remove (&ready, i), 1);
2359 /* Now we can restore basic block notes and maintain precise cfg. */
2360 restore_bb_notes (*target_bb);
2362 last_clock_var = -1;
2367 /* Loop until all the insns in BB are scheduled. */
2368 while ((*current_sched_info->schedule_more_p) ())
2372 start_clock_var = clock_var;
2376 advance_one_cycle ();
2378 /* Add to the ready list all pending insns that can be issued now.
2379 If there are no ready insns, increment clock until one
2380 is ready and add all pending insns at that point to the ready
2382 queue_to_ready (&ready);
2384 gcc_assert (ready.n_ready);
2386 if (sched_verbose >= 2)
2388 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2389 debug_ready_list (&ready);
2391 advance -= clock_var - start_clock_var;
2393 while (advance > 0);
2397 /* Sort the ready list based on priority. */
2398 ready_sort (&ready);
2400 if (sched_verbose >= 2)
2402 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2403 debug_ready_list (&ready);
2407 /* Allow the target to reorder the list, typically for
2408 better instruction bundling. */
2409 if (sort_p && targetm.sched.reorder
2410 && (ready.n_ready == 0
2411 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2413 targetm.sched.reorder (sched_dump, sched_verbose,
2414 ready_lastpos (&ready),
2415 &ready.n_ready, clock_var);
2417 can_issue_more = issue_rate;
2419 first_cycle_insn_p = 1;
2420 cycle_issued_insns = 0;
2427 if (sched_verbose >= 2)
2429 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2431 debug_ready_list (&ready);
2434 if (ready.n_ready == 0
2436 && reload_completed)
2438 /* Allow scheduling insns directly from the queue in case
2439 there's nothing better to do (ready list is empty) but
2440 there are still vacant dispatch slots in the current cycle. */
2441 if (sched_verbose >= 6)
2442 fprintf(sched_dump,";;\t\tSecond chance\n");
2443 memcpy (temp_state, curr_state, dfa_state_size);
2444 if (early_queue_to_ready (temp_state, &ready))
2445 ready_sort (&ready);
2448 if (ready.n_ready == 0 || !can_issue_more
2449 || state_dead_lock_p (curr_state)
2450 || !(*current_sched_info->schedule_more_p) ())
2453 /* Select and remove the insn from the ready list. */
2456 insn = choose_ready (&ready);
2461 insn = ready_remove_first (&ready);
2463 if (targetm.sched.dfa_new_cycle
2464 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2465 insn, last_clock_var,
2466 clock_var, &sort_p))
2467 /* SORT_P is used by the target to override sorting
2468 of the ready list. This is needed when the target
2469 has modified its internal structures expecting that
2470 the insn will be issued next. As we need the insn
2471 to have the highest priority (so it will be returned by
2472 the ready_remove_first call above), we invoke
2473 ready_add (&ready, insn, true).
2474 But, still, there is one issue: INSN can be later
2475 discarded by scheduler's front end through
2476 current_sched_info->can_schedule_ready_p, hence, won't
2479 ready_add (&ready, insn, true);
2484 memcpy (temp_state, curr_state, dfa_state_size);
2485 if (recog_memoized (insn) < 0)
2487 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2488 || asm_noperands (PATTERN (insn)) >= 0);
2489 if (!first_cycle_insn_p && asm_p)
2490 /* This is asm insn which is tryed to be issued on the
2491 cycle not first. Issue it on the next cycle. */
2494 /* A USE insn, or something else we don't need to
2495 understand. We can't pass these directly to
2496 state_transition because it will trigger a
2497 fatal error for unrecognizable insns. */
2502 cost = state_transition (temp_state, insn);
2511 queue_insn (insn, cost);
2512 if (SCHED_GROUP_P (insn))
2521 if (current_sched_info->can_schedule_ready_p
2522 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2523 /* We normally get here only if we don't want to move
2524 insn from the split block. */
2526 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2530 /* DECISION is made. */
2532 if (TODO_SPEC (insn) & SPECULATIVE)
2533 generate_recovery_code (insn);
2535 if (control_flow_insn_p (last_scheduled_insn)
2536 /* This is used to to switch basic blocks by request
2537 from scheduler front-end (actually, sched-ebb.c only).
2538 This is used to process blocks with single fallthru
2539 edge. If succeeding block has jump, it [jump] will try
2540 move at the end of current bb, thus corrupting CFG. */
2541 || current_sched_info->advance_target_bb (*target_bb, insn))
2543 *target_bb = current_sched_info->advance_target_bb
2550 x = next_real_insn (last_scheduled_insn);
2552 dump_new_block_header (1, *target_bb, x, tail);
2555 last_scheduled_insn = bb_note (*target_bb);
2558 /* Update counters, etc in the scheduler's front end. */
2559 (*current_sched_info->begin_schedule_ready) (insn,
2560 last_scheduled_insn);
2563 last_scheduled_insn = insn;
2565 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2567 cycle_issued_insns++;
2568 memcpy (curr_state, temp_state, dfa_state_size);
2571 if (targetm.sched.variable_issue)
2573 targetm.sched.variable_issue (sched_dump, sched_verbose,
2574 insn, can_issue_more);
2575 /* A naked CLOBBER or USE generates no instruction, so do
2576 not count them against the issue rate. */
2577 else if (GET_CODE (PATTERN (insn)) != USE
2578 && GET_CODE (PATTERN (insn)) != CLOBBER)
2581 advance = schedule_insn (insn);
2583 /* After issuing an asm insn we should start a new cycle. */
2584 if (advance == 0 && asm_p)
2589 first_cycle_insn_p = 0;
2591 /* Sort the ready list based on priority. This must be
2592 redone here, as schedule_insn may have readied additional
2593 insns that will not be sorted correctly. */
2594 if (ready.n_ready > 0)
2595 ready_sort (&ready);
2597 if (targetm.sched.reorder2
2598 && (ready.n_ready == 0
2599 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2602 targetm.sched.reorder2 (sched_dump, sched_verbose,
2604 ? ready_lastpos (&ready) : NULL,
2605 &ready.n_ready, clock_var);
2613 fprintf (sched_dump, ";;\tReady list (final): ");
2614 debug_ready_list (&ready);
2617 if (current_sched_info->queue_must_finish_empty)
2618 /* Sanity check -- queue must be empty now. Meaningless if region has
2620 gcc_assert (!q_size && !ready.n_ready);
2623 /* We must maintain QUEUE_INDEX between blocks in region. */
2624 for (i = ready.n_ready - 1; i >= 0; i--)
2628 x = ready_element (&ready, i);
2629 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2630 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2634 for (i = 0; i <= max_insn_queue_index; i++)
2637 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2642 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2643 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2645 free_INSN_LIST_list (&insn_queue[i]);
2649 if (!current_sched_info->queue_must_finish_empty
2650 || added_recovery_block_p)
2652 /* INSN_TICK (minimum clock tick at which the insn becomes
2653 ready) may be not correct for the insn in the subsequent
2654 blocks of the region. We should use a correct value of
2655 `clock_var' or modify INSN_TICK. It is better to keep
2656 clock_var value equal to 0 at the start of a basic block.
2657 Therefore we modify INSN_TICK here. */
2658 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2661 #ifdef ENABLE_CHECKING
2662 /* After the reload the ia64 backend doesn't maintain BB_END, so
2663 if we want to check anything, better do it now.
2664 And it already clobbered previously scheduled code. */
2665 if (reload_completed)
2666 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head)), 0);
2669 if (targetm.sched.md_finish)
2670 targetm.sched.md_finish (sched_dump, sched_verbose);
2672 /* Update head/tail boundaries. */
2673 head = NEXT_INSN (prev_head);
2674 tail = last_scheduled_insn;
2676 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2677 previously found among the insns. Insert them at the beginning
2681 basic_block head_bb = BLOCK_FOR_INSN (head);
2682 rtx note_head = note_list;
2684 while (PREV_INSN (note_head))
2686 set_block_for_insn (note_head, head_bb);
2687 note_head = PREV_INSN (note_head);
2689 /* In the above cycle we've missed this note: */
2690 set_block_for_insn (note_head, head_bb);
2692 PREV_INSN (note_head) = PREV_INSN (head);
2693 NEXT_INSN (PREV_INSN (head)) = note_head;
2694 PREV_INSN (head) = note_list;
2695 NEXT_INSN (note_list) = head;
2702 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2703 clock_var, INSN_UID (head));
2704 fprintf (sched_dump, ";; new tail = %d\n\n",
2708 current_sched_info->head = head;
2709 current_sched_info->tail = tail;
2714 for (i = 0; i <= rgn_n_insns; i++)
2715 free (choice_stack [i].state);
2716 free (choice_stack);
2719 /* Set_priorities: compute priority of each insn in the block. */
2722 set_priorities (rtx head, rtx tail)
2726 int sched_max_insns_priority =
2727 current_sched_info->sched_max_insns_priority;
2730 if (head == tail && (! INSN_P (head)))
2735 prev_head = PREV_INSN (head);
2736 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2742 (void) priority (insn);
2744 if (INSN_PRIORITY_KNOWN (insn))
2745 sched_max_insns_priority =
2746 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2749 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2754 /* Next LUID to assign to an instruction. */
2757 /* Initialize some global state for the scheduler. */
2766 /* Switch to working copy of sched_info. */
2767 memcpy (¤t_sched_info_var, current_sched_info,
2768 sizeof (current_sched_info_var));
2769 current_sched_info = ¤t_sched_info_var;
2771 /* Disable speculative loads in their presence if cc0 defined. */
2773 flag_schedule_speculative_load = 0;
2776 /* Set dump and sched_verbose for the desired debugging output. If no
2777 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2778 For -fsched-verbose=N, N>=10, print everything to stderr. */
2779 sched_verbose = sched_verbose_param;
2780 if (sched_verbose_param == 0 && dump_file)
2782 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2783 ? stderr : dump_file);
2785 /* Initialize SPEC_INFO. */
2786 if (targetm.sched.set_sched_flags)
2788 spec_info = &spec_info_var;
2789 targetm.sched.set_sched_flags (spec_info);
2790 if (current_sched_info->flags & DO_SPECULATION)
2791 spec_info->weakness_cutoff =
2792 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2794 /* So we won't read anything accidentally. */
2796 #ifdef ENABLE_CHECKING
2797 check_sched_flags ();
2801 /* So we won't read anything accidentally. */
2804 /* Initialize issue_rate. */
2805 if (targetm.sched.issue_rate)
2806 issue_rate = targetm.sched.issue_rate ();
2810 if (cached_issue_rate != issue_rate)
2812 cached_issue_rate = issue_rate;
2813 /* To invalidate max_lookahead_tries: */
2814 cached_first_cycle_multipass_dfa_lookahead = 0;
2821 for (i = 0; i < old_max_uid; i++)
2824 h_i_d[i].todo_spec = HARD_DEP;
2825 h_i_d[i].queue_index = QUEUE_NOWHERE;
2826 h_i_d[i].tick = INVALID_TICK;
2827 h_i_d[i].inter_tick = INVALID_TICK;
2830 if (targetm.sched.init_dfa_pre_cycle_insn)
2831 targetm.sched.init_dfa_pre_cycle_insn ();
2833 if (targetm.sched.init_dfa_post_cycle_insn)
2834 targetm.sched.init_dfa_post_cycle_insn ();
2837 dfa_state_size = state_size ();
2838 curr_state = xmalloc (dfa_state_size);
2843 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2845 INSN_LUID (insn) = luid;
2847 /* Increment the next luid, unless this is a note. We don't
2848 really need separate IDs for notes and we don't want to
2849 schedule differently depending on whether or not there are
2850 line-number notes, i.e., depending on whether or not we're
2851 generating debugging information. */
2855 if (insn == BB_END (b))
2859 init_dependency_caches (luid);
2861 init_alias_analysis ();
2864 old_last_basic_block = 0;
2869 if (current_sched_info->flags & USE_GLAT)
2872 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2873 removing death notes. */
2874 FOR_EACH_BB_REVERSE (b)
2875 find_insn_reg_weight (b);
2877 if (targetm.sched.md_init_global)
2878 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2880 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2881 before_recovery = 0;
2883 #ifdef ENABLE_CHECKING
2884 /* This is used preferably for finding bugs in check_cfg () itself. */
2889 /* Free global data used during insn scheduling. */
2897 free_dependency_caches ();
2898 end_alias_analysis ();
2899 free (line_note_head);
2902 if (targetm.sched.md_finish_global)
2903 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2905 if (spec_info && spec_info->dump)
2907 char c = reload_completed ? 'a' : 'b';
2909 fprintf (spec_info->dump,
2910 ";; %s:\n", current_function_name ());
2912 fprintf (spec_info->dump,
2913 ";; Procedure %cr-begin-data-spec motions == %d\n",
2915 fprintf (spec_info->dump,
2916 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2918 fprintf (spec_info->dump,
2919 ";; Procedure %cr-begin-control-spec motions == %d\n",
2920 c, nr_begin_control);
2921 fprintf (spec_info->dump,
2922 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2923 c, nr_be_in_control);
2926 #ifdef ENABLE_CHECKING
2927 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2928 if (!reload_completed)
2932 current_sched_info = NULL;
2935 /* Fix INSN_TICKs of the instructions in the current block as well as
2936 INSN_TICKs of their dependents.
2937 HEAD and TAIL are the begin and the end of the current scheduled block. */
2939 fix_inter_tick (rtx head, rtx tail)
2941 /* Set of instructions with corrected INSN_TICK. */
2942 bitmap_head processed;
2943 int next_clock = clock_var + 1;
2945 bitmap_initialize (&processed, 0);
2947 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2948 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2949 across different blocks. */
2950 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2957 tick = INSN_TICK (head);
2958 gcc_assert (tick >= MIN_TICK);
2960 /* Fix INSN_TICK of instruction from just scheduled block. */
2961 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2963 bitmap_set_bit (&processed, INSN_LUID (head));
2966 if (tick < MIN_TICK)
2969 INSN_TICK (head) = tick;
2972 for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2976 next = XEXP (link, 0);
2977 tick = INSN_TICK (next);
2979 if (tick != INVALID_TICK
2980 /* If NEXT has its INSN_TICK calculated, fix it.
2981 If not - it will be properly calculated from
2982 scratch later in fix_tick_ready. */
2983 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2985 bitmap_set_bit (&processed, INSN_LUID (next));
2988 if (tick < MIN_TICK)
2991 if (tick > INTER_TICK (next))
2992 INTER_TICK (next) = tick;
2994 tick = INTER_TICK (next);
2996 INSN_TICK (next) = tick;
3001 bitmap_clear (&processed);
3004 /* Check if NEXT is ready to be added to the ready or queue list.
3005 If "yes", add it to the proper list.
3007 -1 - is not ready yet,
3008 0 - added to the ready list,
3009 0 < N - queued for N cycles. */
3011 try_ready (rtx next)
3016 ts = &TODO_SPEC (next);
3019 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3020 && ((old_ts & HARD_DEP)
3021 || (old_ts & SPECULATIVE)));
3023 if (!(current_sched_info->flags & DO_SPECULATION))
3025 if (!LOG_LINKS (next))
3030 *ts &= ~SPECULATIVE & ~HARD_DEP;
3032 link = LOG_LINKS (next);
3035 /* LOG_LINKS are maintained sorted.
3036 So if DEP_STATUS of the first dep is SPECULATIVE,
3037 than all other deps are speculative too. */
3038 if (DEP_STATUS (link) & SPECULATIVE)
3040 /* Now we've got NEXT with speculative deps only.
3041 1. Look at the deps to see what we have to do.
3042 2. Check if we can do 'todo'. */
3043 *ts = DEP_STATUS (link) & SPECULATIVE;
3044 while ((link = XEXP (link, 1)))
3045 *ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
3047 if (dep_weak (*ts) < spec_info->weakness_cutoff)
3048 /* Too few points. */
3049 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3057 gcc_assert (*ts == old_ts
3058 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3059 else if (current_sched_info->new_ready)
3060 *ts = current_sched_info->new_ready (next, *ts);
3062 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3063 have its original pattern or changed (speculative) one. This is due
3064 to changing ebb in region scheduling.
3065 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3066 has speculative pattern.
3068 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3069 control-speculative NEXT could have been discarded by sched-rgn.c
3070 (the same case as when discarded by can_schedule_ready_p ()). */
3072 if ((*ts & SPECULATIVE)
3073 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3074 need to change anything. */
3080 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3082 res = speculate_insn (next, *ts, &new_pat);
3087 /* It would be nice to change DEP_STATUS of all dependences,
3088 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3089 so we won't reanalyze anything. */
3090 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3094 /* We follow the rule, that every speculative insn
3095 has non-null ORIG_PAT. */
3096 if (!ORIG_PAT (next))
3097 ORIG_PAT (next) = PATTERN (next);
3101 if (!ORIG_PAT (next))
3102 /* If we gonna to overwrite the original pattern of insn,
3104 ORIG_PAT (next) = PATTERN (next);
3106 change_pattern (next, new_pat);
3114 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3115 either correct (*ts & SPECULATIVE),
3116 or we simply don't care (*ts & HARD_DEP). */
3118 gcc_assert (!ORIG_PAT (next)
3119 || !RECOVERY_BLOCK (next)
3120 || RECOVERY_BLOCK (next) == EXIT_BLOCK_PTR);
3124 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3125 control-speculative NEXT could have been discarded by sched-rgn.c
3126 (the same case as when discarded by can_schedule_ready_p ()). */
3127 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3129 change_queue_index (next, QUEUE_NOWHERE);
3132 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !RECOVERY_BLOCK (next))
3133 /* We should change pattern of every previously speculative
3134 instruction - and we determine if NEXT was speculative by using
3135 ORIG_PAT field. Except one case - simple checks have ORIG_PAT
3136 pat too, hence we also check for the RECOVERY_BLOCK. */
3138 change_pattern (next, ORIG_PAT (next));
3139 ORIG_PAT (next) = 0;
3142 if (sched_verbose >= 2)
3144 int s = TODO_SPEC (next);
3146 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3147 (*current_sched_info->print_insn) (next, 0));
3149 if (spec_info && spec_info->dump)
3152 fprintf (spec_info->dump, "; data-spec;");
3153 if (s & BEGIN_CONTROL)
3154 fprintf (spec_info->dump, "; control-spec;");
3155 if (s & BE_IN_CONTROL)
3156 fprintf (spec_info->dump, "; in-control-spec;");
3159 fprintf (sched_dump, "\n");
3162 adjust_priority (next);
3164 return fix_tick_ready (next);
3167 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3169 fix_tick_ready (rtx next)
3174 link = RESOLVED_DEPS (next);
3180 tick = INSN_TICK (next);
3181 /* if tick is not equal to INVALID_TICK, then update
3182 INSN_TICK of NEXT with the most recent resolved dependence
3183 cost. Otherwise, recalculate from scratch. */
3184 full_p = tick == INVALID_TICK;
3190 pro = XEXP (link, 0);
3191 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3193 tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3197 while ((link = XEXP (link, 1)) && full_p);
3202 INSN_TICK (next) = tick;
3204 delay = tick - clock_var;
3206 delay = QUEUE_READY;
3208 change_queue_index (next, delay);
3213 /* Move NEXT to the proper queue list with (DELAY >= 1),
3214 or add it to the ready list (DELAY == QUEUE_READY),
3215 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3217 change_queue_index (rtx next, int delay)
3219 int i = QUEUE_INDEX (next);
3221 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3223 gcc_assert (i != QUEUE_SCHEDULED);
3225 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3226 || (delay < 0 && delay == i))
3227 /* We have nothing to do. */
3230 /* Remove NEXT from wherever it is now. */
3231 if (i == QUEUE_READY)
3232 ready_remove_insn (next);
3234 queue_remove (next);
3236 /* Add it to the proper place. */
3237 if (delay == QUEUE_READY)
3238 ready_add (readyp, next, false);
3239 else if (delay >= 1)
3240 queue_insn (next, delay);
3242 if (sched_verbose >= 2)
3244 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3245 (*current_sched_info->print_insn) (next, 0));
3247 if (delay == QUEUE_READY)
3248 fprintf (sched_dump, " into ready\n");
3249 else if (delay >= 1)
3250 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3252 fprintf (sched_dump, " removed from ready or queue lists\n");
3256 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3258 resolve_dep (rtx next, rtx insn)
3262 INSN_DEP_COUNT (next)--;
3264 dep = remove_list_elem (insn, &LOG_LINKS (next));
3265 XEXP (dep, 1) = RESOLVED_DEPS (next);
3266 RESOLVED_DEPS (next) = dep;
3268 gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3269 && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3272 /* Extend H_I_D data. */
3276 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3277 pseudos which do not cross calls. */
3278 int new_max_uid = get_max_uid() + 1;
3280 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3281 old_max_uid = new_max_uid;
3283 if (targetm.sched.h_i_d_extended)
3284 targetm.sched.h_i_d_extended ();
3287 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3288 N_NEW_INSNS is the number of additional elements to allocate. */
3290 extend_ready (int n_new_insns)
3294 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3295 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3297 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3298 rgn_n_insns + 1, sizeof (char));
3300 rgn_n_insns += n_new_insns;
3302 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3305 for (i = rgn_n_insns; n_new_insns--; i--)
3306 choice_stack[i].state = xmalloc (dfa_state_size);
3309 /* Extend global scheduler structures (those, that live across calls to
3310 schedule_block) to include information about just emitted INSN. */
3312 extend_global (rtx insn)
3314 gcc_assert (INSN_P (insn));
3315 /* These structures have scheduler scope. */
3319 extend_dependency_caches (1, 0);
3322 /* Extends global and local scheduler structures to include information
3323 about just emitted INSN. */
3325 extend_all (rtx insn)
3327 extend_global (insn);
3329 /* These structures have block scope. */
3332 (*current_sched_info->add_remove_insn) (insn, 0);
3335 /* Initialize h_i_d entry of the new INSN with default values.
3336 Values, that are not explicitly initialized here, hold zero. */
3338 init_h_i_d (rtx insn)
3340 INSN_LUID (insn) = luid++;
3341 INSN_COST (insn) = -1;
3342 TODO_SPEC (insn) = HARD_DEP;
3343 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3344 INSN_TICK (insn) = INVALID_TICK;
3345 INTER_TICK (insn) = INVALID_TICK;
3346 find_insn_reg_weight1 (insn);
3349 /* Generates recovery code for INSN. */
3351 generate_recovery_code (rtx insn)
3353 if (TODO_SPEC (insn) & BEGIN_SPEC)
3354 begin_speculative_block (insn);
3356 /* Here we have insn with no dependencies to
3357 instructions other then CHECK_SPEC ones. */
3359 if (TODO_SPEC (insn) & BE_IN_SPEC)
3360 add_to_speculative_block (insn);
3364 Tries to add speculative dependencies of type FS between instructions
3365 in LINK list and TWIN. */
3367 process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3369 for (; link; link = XEXP (link, 1))
3374 consumer = XEXP (link, 0);
3376 ds = DEP_STATUS (link);
3378 if (/* If we want to create speculative dep. */
3380 /* And we can do that because this is a true dep. */
3381 && (ds & DEP_TYPES) == DEP_TRUE)
3383 gcc_assert (!(ds & BE_IN_SPEC));
3385 if (/* If this dep can be overcome with 'begin speculation'. */
3387 /* Then we have a choice: keep the dep 'begin speculative'
3388 or transform it into 'be in speculative'. */
3390 if (/* In try_ready we assert that if insn once became ready
3391 it can be removed from the ready (or queue) list only
3392 due to backend decision. Hence we can't let the
3393 probability of the speculative dep to decrease. */
3394 dep_weak (ds) <= dep_weak (fs))
3395 /* Transform it to be in speculative. */
3396 ds = (ds & ~BEGIN_SPEC) | fs;
3399 /* Mark the dep as 'be in speculative'. */
3403 add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3407 /* Generates recovery code for BEGIN speculative INSN. */
3409 begin_speculative_block (rtx insn)
3411 if (TODO_SPEC (insn) & BEGIN_DATA)
3413 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3416 create_check_block_twin (insn, false);
3418 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3421 /* Generates recovery code for BE_IN speculative INSN. */
3423 add_to_speculative_block (rtx insn)
3426 rtx link, twins = NULL;
3428 ts = TODO_SPEC (insn);
3429 gcc_assert (!(ts & ~BE_IN_SPEC));
3431 if (ts & BE_IN_DATA)
3433 if (ts & BE_IN_CONTROL)
3436 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3437 gcc_assert (!TODO_SPEC (insn));
3439 DONE_SPEC (insn) |= ts;
3441 /* First we convert all simple checks to branchy. */
3442 for (link = LOG_LINKS (insn); link;)
3446 check = XEXP (link, 0);
3448 if (RECOVERY_BLOCK (check))
3450 create_check_block_twin (check, true);
3451 link = LOG_LINKS (insn);
3454 link = XEXP (link, 1);
3457 clear_priorities (insn);
3461 rtx link, check, twin;
3464 link = LOG_LINKS (insn);
3465 gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3466 && (DEP_STATUS (link) & BE_IN_SPEC)
3467 && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3469 check = XEXP (link, 0);
3470 gcc_assert (!RECOVERY_BLOCK (check) && !ORIG_PAT (check)
3471 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3473 rec = BLOCK_FOR_INSN (check);
3475 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3476 extend_global (twin);
3478 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3480 if (sched_verbose && spec_info->dump)
3481 /* INSN_BB (insn) isn't determined for twin insns yet.
3482 So we can't use current_sched_info->print_insn. */
3483 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3484 INSN_UID (twin), rec->index);
3486 twins = alloc_INSN_LIST (twin, twins);
3488 /* Add dependences between TWIN and all appropriate
3489 instructions from REC. */
3492 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3496 link = XEXP (link, 1);
3499 check = XEXP (link, 0);
3500 if (BLOCK_FOR_INSN (check) == rec)
3510 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3512 for (link = LOG_LINKS (insn); link;)
3514 check = XEXP (link, 0);
3516 if (BLOCK_FOR_INSN (check) == rec)
3518 delete_back_forw_dep (insn, check);
3519 link = LOG_LINKS (insn);
3522 link = XEXP (link, 1);
3525 while (LOG_LINKS (insn));
3527 /* We can't add the dependence between insn and twin earlier because
3528 that would make twin appear in the INSN_DEPEND (insn). */
3533 twin = XEXP (twins, 0);
3534 calc_priorities (twin);
3535 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3537 twin = XEXP (twins, 1);
3538 free_INSN_LIST_node (twins);
3543 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3545 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3547 gcc_assert (new_nmemb >= old_nmemb);
3548 p = XRESIZEVAR (void, p, new_nmemb * size);
3549 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3553 /* Return the probability of speculation success for the speculation
3561 dt = FIRST_SPEC_TYPE;
3566 res *= (ds_t) get_dep_weak (ds, dt);
3570 if (dt == LAST_SPEC_TYPE)
3572 dt <<= SPEC_TYPE_SHIFT;
3578 res /= MAX_DEP_WEAK;
3580 if (res < MIN_DEP_WEAK)
3583 gcc_assert (res <= MAX_DEP_WEAK);
3589 Find fallthru edge from PRED. */
3591 find_fallthru_edge (basic_block pred)
3597 succ = pred->next_bb;
3598 gcc_assert (succ->prev_bb == pred);
3600 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3602 FOR_EACH_EDGE (e, ei, pred->succs)
3603 if (e->flags & EDGE_FALLTHRU)
3605 gcc_assert (e->dest == succ);
3611 FOR_EACH_EDGE (e, ei, succ->preds)
3612 if (e->flags & EDGE_FALLTHRU)
3614 gcc_assert (e->src == pred);
3622 /* Initialize BEFORE_RECOVERY variable. */
3624 init_before_recovery (void)
3629 last = EXIT_BLOCK_PTR->prev_bb;
3630 e = find_fallthru_edge (last);
3634 /* We create two basic blocks:
3635 1. Single instruction block is inserted right after E->SRC
3637 2. Empty block right before EXIT_BLOCK.
3638 Between these two blocks recovery blocks will be emitted. */
3640 basic_block single, empty;
3643 single = create_empty_bb (last);
3644 empty = create_empty_bb (single);
3646 single->count = last->count;
3647 empty->count = last->count;
3648 single->frequency = last->frequency;
3649 empty->frequency = last->frequency;
3650 BB_COPY_PARTITION (single, last);
3651 BB_COPY_PARTITION (empty, last);
3653 redirect_edge_succ (e, single);
3654 make_single_succ_edge (single, empty, 0);
3655 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3656 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3658 label = block_label (empty);
3659 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3660 JUMP_LABEL (x) = label;
3661 LABEL_NUSES (label)++;
3664 emit_barrier_after (x);
3666 add_block (empty, 0);
3667 add_block (single, 0);
3669 before_recovery = single;
3671 if (sched_verbose >= 2 && spec_info->dump)
3672 fprintf (spec_info->dump,
3673 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3674 last->index, single->index, empty->index);
3677 before_recovery = last;
3680 /* Returns new recovery block. */
3682 create_recovery_block (void)
3687 added_recovery_block_p = true;
3689 if (!before_recovery)
3690 init_before_recovery ();
3692 label = gen_label_rtx ();
3693 gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery))));
3694 label = emit_label_after (label, NEXT_INSN (BB_END (before_recovery)));
3696 rec = create_basic_block (label, label, before_recovery);
3697 emit_barrier_after (BB_END (rec));
3699 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3700 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3702 if (sched_verbose && spec_info->dump)
3703 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3706 before_recovery = rec;
3711 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3712 INSN is a simple check, that should be converted to branchy one. */
3714 create_check_block_twin (rtx insn, bool mutate_p)
3717 rtx label, check, twin, link;
3720 gcc_assert (ORIG_PAT (insn)
3722 || (RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR
3723 && !(TODO_SPEC (insn) & SPECULATIVE))));
3725 /* Create recovery block. */
3726 if (mutate_p || targetm.sched.needs_block_p (insn))
3728 rec = create_recovery_block ();
3729 label = BB_HEAD (rec);
3733 rec = EXIT_BLOCK_PTR;
3738 check = targetm.sched.gen_check (insn, label, mutate_p);
3740 if (rec != EXIT_BLOCK_PTR)
3742 /* To have mem_reg alive at the beginning of second_bb,
3743 we emit check BEFORE insn, so insn after splitting
3744 insn will be at the beginning of second_bb, which will
3745 provide us with the correct life information. */
3746 check = emit_jump_insn_before (check, insn);
3747 JUMP_LABEL (check) = label;
3748 LABEL_NUSES (label)++;
3751 check = emit_insn_before (check, insn);
3753 /* Extend data structures. */
3755 RECOVERY_BLOCK (check) = rec;
3757 if (sched_verbose && spec_info->dump)
3758 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3759 (*current_sched_info->print_insn) (check, 0));
3761 gcc_assert (ORIG_PAT (insn));
3763 /* Initialize TWIN (twin is a duplicate of original instruction
3764 in the recovery block). */
3765 if (rec != EXIT_BLOCK_PTR)
3769 for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3770 if (DEP_STATUS (link) & DEP_OUTPUT)
3772 RESOLVED_DEPS (check) =
3773 alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3774 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3777 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3778 extend_global (twin);
3780 if (sched_verbose && spec_info->dump)
3781 /* INSN_BB (insn) isn't determined for twin insns yet.
3782 So we can't use current_sched_info->print_insn. */
3783 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3784 INSN_UID (twin), rec->index);
3788 ORIG_PAT (check) = ORIG_PAT (insn);
3789 HAS_INTERNAL_DEP (check) = 1;
3791 /* ??? We probably should change all OUTPUT dependencies to
3795 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3797 if (rec != EXIT_BLOCK_PTR)
3798 /* In case of branchy check, fix CFG. */
3800 basic_block first_bb, second_bb;
3805 first_bb = BLOCK_FOR_INSN (check);
3806 e = split_block (first_bb, check);
3807 /* split_block emits note if *check == BB_END. Probably it
3808 is better to rip that note off. */
3809 gcc_assert (e->src == first_bb);
3810 second_bb = e->dest;
3812 /* This is fixing of incoming edge. */
3813 /* ??? Which other flags should be specified? */
3814 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3815 /* Partition type is the same, if it is "unpartitioned". */
3816 edge_flags = EDGE_CROSSING;
3820 e = make_edge (first_bb, rec, edge_flags);
3822 add_block (second_bb, first_bb);
3824 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3825 label = block_label (second_bb);
3826 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3827 JUMP_LABEL (jump) = label;
3828 LABEL_NUSES (label)++;
3829 extend_global (jump);
3831 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3832 /* Partition type is the same, if it is "unpartitioned". */
3834 /* Rewritten from cfgrtl.c. */
3835 if (flag_reorder_blocks_and_partition
3836 && targetm.have_named_sections
3837 /*&& !any_condjump_p (jump)*/)
3838 /* any_condjump_p (jump) == false.
3839 We don't need the same note for the check because
3840 any_condjump_p (check) == true. */
3842 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3846 edge_flags = EDGE_CROSSING;
3851 make_single_succ_edge (rec, second_bb, edge_flags);
3853 add_block (rec, EXIT_BLOCK_PTR);
3856 /* Move backward dependences from INSN to CHECK and
3857 move forward dependences from INSN to TWIN. */
3858 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3862 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3863 check --TRUE--> producer ??? or ANTI ???
3864 twin --TRUE--> producer
3865 twin --ANTI--> check
3867 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3868 check --ANTI--> producer
3869 twin --ANTI--> producer
3870 twin --ANTI--> check
3872 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3873 check ~~TRUE~~> producer
3874 twin ~~TRUE~~> producer
3875 twin --ANTI--> check */
3877 ds = DEP_STATUS (link);
3879 if (ds & BEGIN_SPEC)
3881 gcc_assert (!mutate_p);
3885 if (rec != EXIT_BLOCK_PTR)
3887 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3888 add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3891 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3894 for (link = LOG_LINKS (insn); link;)
3895 if ((DEP_STATUS (link) & BEGIN_SPEC)
3897 /* We can delete this dep only if we totally overcome it with
3898 BEGIN_SPECULATION. */
3900 delete_back_forw_dep (insn, XEXP (link, 0));
3901 link = LOG_LINKS (insn);
3904 link = XEXP (link, 1);
3908 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3911 gcc_assert (!DONE_SPEC (insn));
3915 ds_t ts = TODO_SPEC (insn);
3917 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3918 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3920 if (ts & BEGIN_DATA)
3921 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3922 if (ts & BEGIN_CONTROL)
3923 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3926 CHECK_SPEC (check) = CHECK_SPEC (insn);
3928 /* Future speculations: call the helper. */
3929 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3931 if (rec != EXIT_BLOCK_PTR)
3933 /* Which types of dependencies should we use here is,
3934 generally, machine-dependent question... But, for now,
3939 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3940 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3944 if (spec_info->dump)
3945 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3946 (*current_sched_info->print_insn) (insn, 0));
3948 for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3949 delete_back_forw_dep (XEXP (link, 0), insn);
3951 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3954 sched_remove_insn (insn);
3957 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3960 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3963 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3964 because it'll be done later in add_to_speculative_block. */
3966 clear_priorities (twin);
3967 calc_priorities (twin);
3971 /* Removes dependency between instructions in the recovery block REC
3972 and usual region instructions. It keeps inner dependences so it
3973 won't be necessary to recompute them. */
3975 fix_recovery_deps (basic_block rec)
3977 rtx note, insn, link, jump, ready_list = 0;
3978 bitmap_head in_ready;
3980 bitmap_initialize (&in_ready, 0);
3982 /* NOTE - a basic block note. */
3983 note = NEXT_INSN (BB_HEAD (rec));
3984 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3985 insn = BB_END (rec);
3986 gcc_assert (JUMP_P (insn));
3987 insn = PREV_INSN (insn);
3991 for (link = INSN_DEPEND (insn); link;)
3995 consumer = XEXP (link, 0);
3997 if (BLOCK_FOR_INSN (consumer) != rec)
3999 delete_back_forw_dep (consumer, insn);
4001 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4003 ready_list = alloc_INSN_LIST (consumer, ready_list);
4004 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4007 link = INSN_DEPEND (insn);
4011 gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
4013 link = XEXP (link, 1);
4017 insn = PREV_INSN (insn);
4019 while (insn != note);
4021 bitmap_clear (&in_ready);
4023 /* Try to add instructions to the ready or queue list. */
4024 for (link = ready_list; link; link = XEXP (link, 1))
4025 try_ready (XEXP (link, 0));
4026 free_INSN_LIST_list (&ready_list);
4028 /* Fixing jump's dependences. */
4029 insn = BB_HEAD (rec);
4030 jump = BB_END (rec);
4032 gcc_assert (LABEL_P (insn));
4033 insn = NEXT_INSN (insn);
4035 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4036 add_jump_dependencies (insn, jump);
4039 /* The function saves line notes at the beginning of block B. */
4041 associate_line_notes_with_blocks (basic_block b)
4045 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
4046 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4048 line_note_head[b->index] = line;
4051 /* Do a forward search as well, since we won't get to see the first
4052 notes in a basic block. */
4053 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
4057 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4058 line_note_head[b->index] = line;
4062 /* Changes pattern of the INSN to NEW_PAT. */
4064 change_pattern (rtx insn, rtx new_pat)
4068 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4070 /* Invalidate INSN_COST, so it'll be recalculated. */
4071 INSN_COST (insn) = -1;
4072 /* Invalidate INSN_TICK, so it'll be recalculated. */
4073 INSN_TICK (insn) = INVALID_TICK;
4074 dfa_clear_single_insn_cache (insn);
4078 /* -1 - can't speculate,
4079 0 - for speculation with REQUEST mode it is OK to use
4080 current instruction pattern,
4081 1 - need to change pattern for *NEW_PAT to be speculative. */
4083 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4085 gcc_assert (current_sched_info->flags & DO_SPECULATION
4086 && (request & SPECULATIVE));
4088 if (!NONJUMP_INSN_P (insn)
4089 || HAS_INTERNAL_DEP (insn)
4090 || SCHED_GROUP_P (insn)
4091 || side_effects_p (PATTERN (insn))
4092 || (request & spec_info->mask) != request)
4095 gcc_assert (!RECOVERY_BLOCK (insn));
4097 if (request & BE_IN_SPEC)
4099 if (may_trap_p (PATTERN (insn)))
4102 if (!(request & BEGIN_SPEC))
4106 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4109 /* Print some information about block BB, which starts with HEAD and
4110 ends with TAIL, before scheduling it.
4111 I is zero, if scheduler is about to start with the fresh ebb. */
4113 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4116 fprintf (sched_dump,
4117 ";; ======================================================\n");
4119 fprintf (sched_dump,
4120 ";; =====================ADVANCING TO=====================\n");
4121 fprintf (sched_dump,
4122 ";; -- basic block %d from %d to %d -- %s reload\n",
4123 bb->index, INSN_UID (head), INSN_UID (tail),
4124 (reload_completed ? "after" : "before"));
4125 fprintf (sched_dump,
4126 ";; ======================================================\n");
4127 fprintf (sched_dump, "\n");
4130 /* Unlink basic block notes and labels and saves them, so they
4131 can be easily restored. We unlink basic block notes in EBB to
4132 provide back-compatibility with the previous code, as target backends
4133 assume, that there'll be only instructions between
4134 current_sched_info->{head and tail}. We restore these notes as soon
4136 FIRST (LAST) is the first (last) basic block in the ebb.
4137 NB: In usual case (FIRST == LAST) nothing is really done. */
4139 unlink_bb_notes (basic_block first, basic_block last)
4141 /* We DON'T unlink basic block notes of the first block in the ebb. */
4145 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4147 /* Make a sentinel. */
4148 if (last->next_bb != EXIT_BLOCK_PTR)
4149 bb_header[last->next_bb->index] = 0;
4151 first = first->next_bb;
4154 rtx prev, label, note, next;
4156 label = BB_HEAD (last);
4157 if (LABEL_P (label))
4158 note = NEXT_INSN (label);
4161 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4163 prev = PREV_INSN (label);
4164 next = NEXT_INSN (note);
4165 gcc_assert (prev && next);
4167 NEXT_INSN (prev) = next;
4168 PREV_INSN (next) = prev;
4170 bb_header[last->index] = label;
4175 last = last->prev_bb;
4180 /* Restore basic block notes.
4181 FIRST is the first basic block in the ebb. */
4183 restore_bb_notes (basic_block first)
4188 /* We DON'T unlink basic block notes of the first block in the ebb. */
4189 first = first->next_bb;
4190 /* Remember: FIRST is actually a second basic block in the ebb. */
4192 while (first != EXIT_BLOCK_PTR
4193 && bb_header[first->index])
4195 rtx prev, label, note, next;
4197 label = bb_header[first->index];
4198 prev = PREV_INSN (label);
4199 next = NEXT_INSN (prev);
4201 if (LABEL_P (label))
4202 note = NEXT_INSN (label);
4205 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4207 bb_header[first->index] = 0;
4209 NEXT_INSN (prev) = label;
4210 NEXT_INSN (note) = next;
4211 PREV_INSN (next) = note;
4213 first = first->next_bb;
4220 /* Extend per basic block data structures of the scheduler.
4221 If BB is NULL, initialize structures for the whole CFG.
4222 Otherwise, initialize them for the just created BB. */
4224 extend_bb (basic_block bb)
4228 if (write_symbols != NO_DEBUG)
4230 /* Save-line-note-head:
4231 Determine the line-number at the start of each basic block.
4232 This must be computed and saved now, because after a basic block's
4233 predecessor has been scheduled, it is impossible to accurately
4234 determine the correct line number for the first insn of the block. */
4235 line_note_head = xrecalloc (line_note_head, last_basic_block,
4236 old_last_basic_block,
4237 sizeof (*line_note_head));
4240 associate_line_notes_with_blocks (bb);
4243 associate_line_notes_with_blocks (bb);
4246 old_last_basic_block = last_basic_block;
4248 if (current_sched_info->flags & USE_GLAT)
4250 glat_start = xrealloc (glat_start,
4251 last_basic_block * sizeof (*glat_start));
4252 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4255 /* The following is done to keep current_sched_info->next_tail non null. */
4257 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4258 if (NEXT_INSN (insn) == 0
4261 /* Don't emit a NOTE if it would end up before a BARRIER. */
4262 && !BARRIER_P (NEXT_INSN (insn))))
4264 emit_note_after (NOTE_INSN_DELETED, insn);
4265 /* Make insn to appear outside BB. */
4266 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4270 /* Add a basic block BB to extended basic block EBB.
4271 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4272 If EBB is NULL, then BB should be a new region. */
4274 add_block (basic_block bb, basic_block ebb)
4276 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4277 && bb->il.rtl->global_live_at_start == 0
4278 && bb->il.rtl->global_live_at_end == 0);
4282 glat_start[bb->index] = 0;
4283 glat_end[bb->index] = 0;
4285 if (current_sched_info->add_block)
4286 /* This changes only data structures of the front-end. */
4287 current_sched_info->add_block (bb, ebb);
4291 Fix CFG after both in- and inter-block movement of
4292 control_flow_insn_p JUMP. */
4294 fix_jump_move (rtx jump)
4296 basic_block bb, jump_bb, jump_bb_next;
4298 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4299 jump_bb = BLOCK_FOR_INSN (jump);
4300 jump_bb_next = jump_bb->next_bb;
4302 gcc_assert (current_sched_info->flags & SCHED_EBB
4303 || (RECOVERY_BLOCK (jump)
4304 && RECOVERY_BLOCK (jump) != EXIT_BLOCK_PTR));
4306 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4307 /* if jump_bb_next is not empty. */
4308 BB_END (jump_bb) = BB_END (jump_bb_next);
4310 if (BB_END (bb) != PREV_INSN (jump))
4311 /* Then there are instruction after jump that should be placed
4313 BB_END (jump_bb_next) = BB_END (bb);
4315 /* Otherwise jump_bb_next is empty. */
4316 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4318 /* To make assertion in move_insn happy. */
4319 BB_END (bb) = PREV_INSN (jump);
4321 update_bb_for_insn (jump_bb_next);
4324 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4326 move_block_after_check (rtx jump)
4328 basic_block bb, jump_bb, jump_bb_next;
4331 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4332 jump_bb = BLOCK_FOR_INSN (jump);
4333 jump_bb_next = jump_bb->next_bb;
4335 update_bb_for_insn (jump_bb);
4337 gcc_assert (RECOVERY_BLOCK (jump)
4338 || RECOVERY_BLOCK (BB_END (jump_bb_next)));
4340 unlink_block (jump_bb_next);
4341 link_block (jump_bb_next, bb);
4345 move_succs (&(jump_bb->succs), bb);
4346 move_succs (&(jump_bb_next->succs), jump_bb);
4347 move_succs (&t, jump_bb_next);
4349 if (current_sched_info->fix_recovery_cfg)
4350 current_sched_info->fix_recovery_cfg
4351 (bb->index, jump_bb->index, jump_bb_next->index);
4354 /* Helper function for move_block_after_check.
4355 This functions attaches edge vector pointed to by SUCCSP to
4358 move_succs (VEC(edge,gc) **succsp, basic_block to)
4363 gcc_assert (to->succs == 0);
4365 to->succs = *succsp;
4367 FOR_EACH_EDGE (e, ei, to->succs)
4373 /* Initialize GLAT (global_live_at_{start, end}) structures.
4374 GLAT structures are used to substitute global_live_{start, end}
4375 regsets during scheduling. This is necessary to use such functions as
4376 split_block (), as they assume consistency of register live information. */
4386 /* Helper function for init_glat. */
4388 init_glat1 (basic_block bb)
4390 gcc_assert (bb->il.rtl->global_live_at_start != 0
4391 && bb->il.rtl->global_live_at_end != 0);
4393 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4394 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4396 if (current_sched_info->flags & DETACH_LIFE_INFO)
4398 bb->il.rtl->global_live_at_start = 0;
4399 bb->il.rtl->global_live_at_end = 0;
4403 /* Attach reg_live_info back to basic blocks.
4404 Also save regsets, that should not have been changed during scheduling,
4405 for checking purposes (see check_reg_live). */
4407 attach_life_info (void)
4412 attach_life_info1 (bb);
4415 /* Helper function for attach_life_info. */
4417 attach_life_info1 (basic_block bb)
4419 gcc_assert (bb->il.rtl->global_live_at_start == 0
4420 && bb->il.rtl->global_live_at_end == 0);
4422 if (glat_start[bb->index])
4424 gcc_assert (glat_end[bb->index]);
4426 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4427 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4429 /* Make them NULL, so they won't be freed in free_glat. */
4430 glat_start[bb->index] = 0;
4431 glat_end[bb->index] = 0;
4433 #ifdef ENABLE_CHECKING
4434 if (bb->index < NUM_FIXED_BLOCKS
4435 || current_sched_info->region_head_or_leaf_p (bb, 0))
4437 glat_start[bb->index] = ALLOC_REG_SET (®_obstack);
4438 COPY_REG_SET (glat_start[bb->index],
4439 bb->il.rtl->global_live_at_start);
4442 if (bb->index < NUM_FIXED_BLOCKS
4443 || current_sched_info->region_head_or_leaf_p (bb, 1))
4445 glat_end[bb->index] = ALLOC_REG_SET (®_obstack);
4446 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4452 gcc_assert (!glat_end[bb->index]);
4454 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (®_obstack);
4455 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (®_obstack);
4459 /* Free GLAT information. */
4463 #ifdef ENABLE_CHECKING
4464 if (current_sched_info->flags & DETACH_LIFE_INFO)
4470 if (glat_start[bb->index])
4471 FREE_REG_SET (glat_start[bb->index]);
4472 if (glat_end[bb->index])
4473 FREE_REG_SET (glat_end[bb->index]);
4482 /* Remove INSN from the instruction stream.
4483 INSN should have any dependencies. */
4485 sched_remove_insn (rtx insn)
4487 change_queue_index (insn, QUEUE_NOWHERE);
4488 current_sched_info->add_remove_insn (insn, 1);
4492 /* Clear priorities of all instructions, that are
4493 forward dependent on INSN. */
4495 clear_priorities (rtx insn)
4499 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4503 pro = XEXP (link, 0);
4504 if (INSN_PRIORITY_KNOWN (pro))
4506 INSN_PRIORITY_KNOWN (pro) = 0;
4507 clear_priorities (pro);
4512 /* Recompute priorities of instructions, whose priorities might have been
4513 changed due to changes in INSN. */
4515 calc_priorities (rtx insn)
4519 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4523 pro = XEXP (link, 0);
4524 if (!INSN_PRIORITY_KNOWN (pro))
4527 calc_priorities (pro);
4533 /* Add dependences between JUMP and other instructions in the recovery
4534 block. INSN is the first insn the recovery block. */
4536 add_jump_dependencies (rtx insn, rtx jump)
4540 insn = NEXT_INSN (insn);
4544 if (!INSN_DEPEND (insn))
4545 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4548 gcc_assert (LOG_LINKS (jump));
4551 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4553 bb_note (basic_block bb)
4557 note = BB_HEAD (bb);
4559 note = NEXT_INSN (note);
4561 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4565 #ifdef ENABLE_CHECKING
4566 extern void debug_spec_status (ds_t);
4568 /* Dump information about the dependence status S. */
4570 debug_spec_status (ds_t s)
4575 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4577 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4578 if (s & BEGIN_CONTROL)
4579 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4580 if (s & BE_IN_CONTROL)
4581 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4584 fprintf (f, "HARD_DEP; ");
4587 fprintf (f, "DEP_TRUE; ");
4589 fprintf (f, "DEP_ANTI; ");
4591 fprintf (f, "DEP_OUTPUT; ");
4596 /* Helper function for check_cfg.
4597 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4600 has_edge_p (VEC(edge,gc) *el, int type)
4605 FOR_EACH_EDGE (e, ei, el)
4606 if (e->flags & type)
4611 /* Check few properties of CFG between HEAD and TAIL.
4612 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4613 instruction stream. */
4615 check_cfg (rtx head, rtx tail)
4619 int not_first = 0, not_last;
4622 head = get_insns ();
4624 tail = get_last_insn ();
4625 next_tail = NEXT_INSN (tail);
4629 not_last = head != tail;
4632 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4634 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4637 || (NOTE_INSN_BASIC_BLOCK_P (head)
4639 || (not_first && !LABEL_P (PREV_INSN (head))))))
4641 gcc_assert (bb == 0);
4642 bb = BLOCK_FOR_INSN (head);
4644 gcc_assert (BB_HEAD (bb) == head);
4646 /* This is the case of jump table. See inside_basic_block_p (). */
4647 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4652 gcc_assert (!inside_basic_block_p (head));
4653 head = NEXT_INSN (head);
4657 gcc_assert (inside_basic_block_p (head)
4659 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4663 head = NEXT_INSN (head);
4664 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4668 if (control_flow_insn_p (head))
4670 gcc_assert (BB_END (bb) == head);
4672 if (any_uncondjump_p (head))
4673 gcc_assert (EDGE_COUNT (bb->succs) == 1
4674 && BARRIER_P (NEXT_INSN (head)));
4675 else if (any_condjump_p (head))
4676 gcc_assert (EDGE_COUNT (bb->succs) > 1
4677 && !BARRIER_P (NEXT_INSN (head)));
4679 if (BB_END (bb) == head)
4681 if (EDGE_COUNT (bb->succs) > 1)
4682 gcc_assert (control_flow_insn_p (head)
4683 || has_edge_p (bb->succs, EDGE_COMPLEX));
4687 head = NEXT_INSN (head);
4693 while (head != next_tail);
4695 gcc_assert (bb == 0);
4698 /* Perform a few consistency checks of flags in different data structures. */
4700 check_sched_flags (void)
4702 unsigned int f = current_sched_info->flags;
4704 if (flag_sched_stalled_insns)
4705 gcc_assert (!(f & DO_SPECULATION));
4706 if (f & DO_SPECULATION)
4707 gcc_assert (!flag_sched_stalled_insns
4708 && (f & DETACH_LIFE_INFO)
4710 && spec_info->mask);
4711 if (f & DETACH_LIFE_INFO)
4712 gcc_assert (f & USE_GLAT);
4715 /* Check global_live_at_{start, end} regsets.
4716 If FATAL_P is TRUE, then abort execution at the first failure.
4717 Otherwise, print diagnostics to STDERR (this mode is for calling
4720 check_reg_live (bool fatal_p)
4732 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4737 gcc_assert (!fatal_p);
4739 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4745 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4750 gcc_assert (!fatal_p);
4752 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4757 #endif /* ENABLE_CHECKING */
4759 #endif /* INSN_SCHEDULING */