1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
143 #include "sched-int.h"
152 #ifdef INSN_SCHEDULING
154 /* issue_rate is the number of insns that can be scheduled in the same
155 machine cycle. It can be defined in the config/mach/mach.h file,
156 otherwise we set it to 1. */
160 /* sched-verbose controls the amount of debugging output the
161 scheduler prints. It is controlled by -fsched-verbose=N:
162 N>0 and no -DSR : the output is directed to stderr.
163 N>=10 will direct the printouts to stderr (regardless of -dSR).
165 N=2: bb's probabilities, detailed ready list info, unit/insn info.
166 N=3: rtl at abort point, control-flow, regions info.
167 N=5: dependences info. */
169 static int sched_verbose_param = 0;
170 int sched_verbose = 0;
172 /* Debugging file. All printouts are sent to dump, which is always set,
173 either to stderr, or to the dump listing file (-dRS). */
174 FILE *sched_dump = 0;
176 /* fix_sched_param() is called from toplev.c upon detection
177 of the -fsched-verbose=N option. */
180 fix_sched_param (const char *param, const char *val)
182 if (!strcmp (param, "verbose"))
183 sched_verbose_param = atoi (val);
185 warning (0, "fix_sched_param: unknown param: %s", param);
188 /* This is a placeholder for the scheduler parameters common
189 to all schedulers. */
190 struct common_sched_info_def *common_sched_info;
192 #define INSN_TICK(INSN) (HID (INSN)->tick)
193 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
195 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
196 then it should be recalculated from scratch. */
197 #define INVALID_TICK (-(max_insn_queue_index + 1))
198 /* The minimal value of the INSN_TICK of an instruction. */
199 #define MIN_TICK (-max_insn_queue_index)
201 /* Issue points are used to distinguish between instructions in max_issue ().
202 For now, all instructions are equally good. */
203 #define ISSUE_POINTS(INSN) 1
205 /* List of important notes we must keep around. This is a pointer to the
206 last element in the list. */
209 static struct spec_info_def spec_info_var;
210 /* Description of the speculative part of the scheduling.
211 If NULL - no speculation. */
212 spec_info_t spec_info = NULL;
214 /* True, if recovery block was added during scheduling of current block.
215 Used to determine, if we need to fix INSN_TICKs. */
216 static bool haifa_recovery_bb_recently_added_p;
218 /* True, if recovery block was added during this scheduling pass.
219 Used to determine if we should have empty memory pools of dependencies
220 after finishing current region. */
221 bool haifa_recovery_bb_ever_added_p;
223 /* Counters of different types of speculative instructions. */
224 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
226 /* Array used in {unlink, restore}_bb_notes. */
227 static rtx *bb_header = 0;
229 /* Basic block after which recovery blocks will be created. */
230 static basic_block before_recovery;
232 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
234 basic_block after_recovery;
236 /* FALSE if we add bb to another region, so we don't need to initialize it. */
237 bool adding_bb_to_current_region_p = true;
241 /* An instruction is ready to be scheduled when all insns preceding it
242 have already been scheduled. It is important to ensure that all
243 insns which use its result will not be executed until its result
244 has been computed. An insn is maintained in one of four structures:
246 (P) the "Pending" set of insns which cannot be scheduled until
247 their dependencies have been satisfied.
248 (Q) the "Queued" set of insns that can be scheduled when sufficient
250 (R) the "Ready" list of unscheduled, uncommitted insns.
251 (S) the "Scheduled" list of insns.
253 Initially, all insns are either "Pending" or "Ready" depending on
254 whether their dependencies are satisfied.
256 Insns move from the "Ready" list to the "Scheduled" list as they
257 are committed to the schedule. As this occurs, the insns in the
258 "Pending" list have their dependencies satisfied and move to either
259 the "Ready" list or the "Queued" set depending on whether
260 sufficient time has passed to make them ready. As time passes,
261 insns move from the "Queued" set to the "Ready" list.
263 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
264 unscheduled insns, i.e., those that are ready, queued, and pending.
265 The "Queued" set (Q) is implemented by the variable `insn_queue'.
266 The "Ready" list (R) is implemented by the variables `ready' and
268 The "Scheduled" list (S) is the new insn chain built by this pass.
270 The transition (R->S) is implemented in the scheduling loop in
271 `schedule_block' when the best insn to schedule is chosen.
272 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
273 insns move from the ready list to the scheduled list.
274 The transition (Q->R) is implemented in 'queue_to_insn' as time
275 passes or stalls are introduced. */
277 /* Implement a circular buffer to delay instructions until sufficient
278 time has passed. For the new pipeline description interface,
279 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
280 than maximal time of instruction execution computed by genattr.c on
281 the base maximal time of functional unit reservations and getting a
282 result. This is the longest time an insn may be queued. */
284 static rtx *insn_queue;
285 static int q_ptr = 0;
286 static int q_size = 0;
287 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
288 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
290 #define QUEUE_SCHEDULED (-3)
291 #define QUEUE_NOWHERE (-2)
292 #define QUEUE_READY (-1)
293 /* QUEUE_SCHEDULED - INSN is scheduled.
294 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
296 QUEUE_READY - INSN is in ready list.
297 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
299 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
301 /* The following variable value refers for all current and future
302 reservations of the processor units. */
305 /* The following variable value is size of memory representing all
306 current and future reservations of the processor units. */
307 size_t dfa_state_size;
309 /* The following array is used to find the best insn from ready when
310 the automaton pipeline interface is used. */
311 char *ready_try = NULL;
313 /* The ready list. */
314 struct ready_list ready = {NULL, 0, 0, 0, 0};
316 /* The pointer to the ready list (to be removed). */
317 static struct ready_list *readyp = &ready;
319 /* Scheduling clock. */
320 static int clock_var;
322 static int may_trap_exp (const_rtx, int);
324 /* Nonzero iff the address is comprised from at most 1 register. */
325 #define CONST_BASED_ADDRESS_P(x) \
327 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
328 || (GET_CODE (x) == LO_SUM)) \
329 && (CONSTANT_P (XEXP (x, 0)) \
330 || CONSTANT_P (XEXP (x, 1)))))
332 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
333 as found by analyzing insn's expression. */
336 static int haifa_luid_for_non_insn (rtx x);
338 /* Haifa version of sched_info hooks common to all headers. */
339 const struct common_sched_info_def haifa_common_sched_info =
341 NULL, /* fix_recovery_cfg */
342 NULL, /* add_block */
343 NULL, /* estimate_number_of_insns */
344 haifa_luid_for_non_insn, /* luid_for_non_insn */
345 SCHED_PASS_UNKNOWN /* sched_pass_id */
348 const struct sched_scan_info_def *sched_scan_info;
350 /* Mapping from instruction UID to its Logical UID. */
351 VEC (int, heap) *sched_luids = NULL;
353 /* Next LUID to assign to an instruction. */
354 int sched_max_luid = 1;
356 /* Haifa Instruction Data. */
357 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
359 void (* sched_init_only_bb) (basic_block, basic_block);
361 /* Split block function. Different schedulers might use different functions
362 to handle their internal data consistent. */
363 basic_block (* sched_split_block) (basic_block, rtx);
365 /* Create empty basic block after the specified block. */
366 basic_block (* sched_create_empty_bb) (basic_block);
369 may_trap_exp (const_rtx x, int is_store)
378 if (code == MEM && may_trap_p (x))
385 /* The insn uses memory: a volatile load. */
386 if (MEM_VOLATILE_P (x))
388 /* An exception-free load. */
391 /* A load with 1 base register, to be further checked. */
392 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
393 return PFREE_CANDIDATE;
394 /* No info on the load, to be further checked. */
395 return PRISKY_CANDIDATE;
400 int i, insn_class = TRAP_FREE;
402 /* Neither store nor load, check if it may cause a trap. */
405 /* Recursive step: walk the insn... */
406 fmt = GET_RTX_FORMAT (code);
407 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
411 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
412 insn_class = WORST_CLASS (insn_class, tmp_class);
414 else if (fmt[i] == 'E')
417 for (j = 0; j < XVECLEN (x, i); j++)
419 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
420 insn_class = WORST_CLASS (insn_class, tmp_class);
421 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
425 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
432 /* Classifies rtx X of an insn for the purpose of verifying that X can be
433 executed speculatively (and consequently the insn can be moved
434 speculatively), by examining X, returning:
435 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
436 TRAP_FREE: non-load insn.
437 IFREE: load from a globally safe location.
438 IRISKY: volatile load.
439 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
440 being either PFREE or PRISKY. */
443 haifa_classify_rtx (const_rtx x)
445 int tmp_class = TRAP_FREE;
446 int insn_class = TRAP_FREE;
449 if (GET_CODE (x) == PARALLEL)
451 int i, len = XVECLEN (x, 0);
453 for (i = len - 1; i >= 0; i--)
455 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
456 insn_class = WORST_CLASS (insn_class, tmp_class);
457 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
467 /* Test if it is a 'store'. */
468 tmp_class = may_trap_exp (XEXP (x, 0), 1);
471 /* Test if it is a store. */
472 tmp_class = may_trap_exp (SET_DEST (x), 1);
473 if (tmp_class == TRAP_RISKY)
475 /* Test if it is a load. */
477 WORST_CLASS (tmp_class,
478 may_trap_exp (SET_SRC (x), 0));
481 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
482 if (tmp_class == TRAP_RISKY)
484 tmp_class = WORST_CLASS (tmp_class,
485 may_trap_exp (COND_EXEC_TEST (x), 0));
488 tmp_class = TRAP_RISKY;
492 insn_class = tmp_class;
499 haifa_classify_insn (const_rtx insn)
501 return haifa_classify_rtx (PATTERN (insn));
504 /* Forward declarations. */
506 static int priority (rtx);
507 static int rank_for_schedule (const void *, const void *);
508 static void swap_sort (rtx *, int);
509 static void queue_insn (rtx, int);
510 static int schedule_insn (rtx);
511 static void adjust_priority (rtx);
512 static void advance_one_cycle (void);
513 static void extend_h_i_d (void);
516 /* Notes handling mechanism:
517 =========================
518 Generally, NOTES are saved before scheduling and restored after scheduling.
519 The scheduler distinguishes between two types of notes:
521 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522 Before scheduling a region, a pointer to the note is added to the insn
523 that follows or precedes it. (This happens as part of the data dependence
524 computation). After scheduling an insn, the pointer contained in it is
525 used for regenerating the corresponding note (in reemit_notes).
527 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
528 these notes are put in a list (in rm_other_notes() and
529 unlink_other_notes ()). After scheduling the block, these notes are
530 inserted at the beginning of the block (in schedule_block()). */
532 static void ready_add (struct ready_list *, rtx, bool);
533 static rtx ready_remove_first (struct ready_list *);
535 static void queue_to_ready (struct ready_list *);
536 static int early_queue_to_ready (state_t, struct ready_list *);
538 static void debug_ready_list (struct ready_list *);
540 /* The following functions are used to implement multi-pass scheduling
541 on the first cycle. */
542 static rtx ready_remove (struct ready_list *, int);
543 static void ready_remove_insn (rtx);
545 static int choose_ready (struct ready_list *, rtx *);
547 static void fix_inter_tick (rtx, rtx);
548 static int fix_tick_ready (rtx);
549 static void change_queue_index (rtx, int);
551 /* The following functions are used to implement scheduling of data/control
552 speculative instructions. */
554 static void extend_h_i_d (void);
555 static void init_h_i_d (rtx);
556 static void generate_recovery_code (rtx);
557 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
558 static void begin_speculative_block (rtx);
559 static void add_to_speculative_block (rtx);
560 static void init_before_recovery (basic_block *);
561 static void create_check_block_twin (rtx, bool);
562 static void fix_recovery_deps (basic_block);
563 static void haifa_change_pattern (rtx, rtx);
564 static void dump_new_block_header (int, basic_block, rtx, rtx);
565 static void restore_bb_notes (basic_block);
566 static void fix_jump_move (rtx);
567 static void move_block_after_check (rtx);
568 static void move_succs (VEC(edge,gc) **, basic_block);
569 static void sched_remove_insn (rtx);
570 static void clear_priorities (rtx, rtx_vec_t *);
571 static void calc_priorities (rtx_vec_t);
572 static void add_jump_dependencies (rtx, rtx);
573 #ifdef ENABLE_CHECKING
574 static int has_edge_p (VEC(edge,gc) *, int);
575 static void check_cfg (rtx, rtx);
578 #endif /* INSN_SCHEDULING */
580 /* Point to state used for the current scheduling pass. */
581 struct haifa_sched_info *current_sched_info;
583 #ifndef INSN_SCHEDULING
585 schedule_insns (void)
590 /* Do register pressure sensitive insn scheduling if the flag is set
592 bool sched_pressure_p;
594 /* Map regno -> its cover class. The map defined only when
595 SCHED_PRESSURE_P is true. */
596 enum reg_class *sched_regno_cover_class;
598 /* The current register pressure. Only elements corresponding cover
599 classes are defined. */
600 static int curr_reg_pressure[N_REG_CLASSES];
602 /* Saved value of the previous array. */
603 static int saved_reg_pressure[N_REG_CLASSES];
605 /* Register living at given scheduling point. */
606 static bitmap curr_reg_live;
608 /* Saved value of the previous array. */
609 static bitmap saved_reg_live;
611 /* Registers mentioned in the current region. */
612 static bitmap region_ref_regs;
614 /* Initiate register pressure relative info for scheduling the current
615 region. Currently it is only clearing register mentioned in the
618 sched_init_region_reg_pressure_info (void)
620 bitmap_clear (region_ref_regs);
623 /* Update current register pressure related info after birth (if
624 BIRTH_P) or death of register REGNO. */
626 mark_regno_birth_or_death (int regno, bool birth_p)
628 enum reg_class cover_class;
630 cover_class = sched_regno_cover_class[regno];
631 if (regno >= FIRST_PSEUDO_REGISTER)
633 if (cover_class != NO_REGS)
637 bitmap_set_bit (curr_reg_live, regno);
638 curr_reg_pressure[cover_class]
639 += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
643 bitmap_clear_bit (curr_reg_live, regno);
644 curr_reg_pressure[cover_class]
645 -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
649 else if (cover_class != NO_REGS
650 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
654 bitmap_set_bit (curr_reg_live, regno);
655 curr_reg_pressure[cover_class]++;
659 bitmap_clear_bit (curr_reg_live, regno);
660 curr_reg_pressure[cover_class]--;
665 /* Initiate current register pressure related info from living
666 registers given by LIVE. */
668 initiate_reg_pressure_info (bitmap live)
674 for (i = 0; i < ira_reg_class_cover_size; i++)
675 curr_reg_pressure[ira_reg_class_cover[i]] = 0;
676 bitmap_clear (curr_reg_live);
677 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
678 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
679 mark_regno_birth_or_death (j, true);
682 /* Mark registers in X as mentioned in the current region. */
684 setup_ref_regs (rtx x)
687 const RTX_CODE code = GET_CODE (x);
693 if (regno >= FIRST_PSEUDO_REGISTER)
694 bitmap_set_bit (region_ref_regs, REGNO (x));
696 for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
697 bitmap_set_bit (region_ref_regs, regno + i);
700 fmt = GET_RTX_FORMAT (code);
701 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
703 setup_ref_regs (XEXP (x, i));
704 else if (fmt[i] == 'E')
706 for (j = 0; j < XVECLEN (x, i); j++)
707 setup_ref_regs (XVECEXP (x, i, j));
711 /* Initiate current register pressure related info at the start of
714 initiate_bb_reg_pressure_info (basic_block bb)
719 if (current_nr_blocks > 1)
720 FOR_BB_INSNS (bb, insn)
722 setup_ref_regs (PATTERN (insn));
723 initiate_reg_pressure_info (df_get_live_in (bb));
724 #ifdef EH_RETURN_DATA_REGNO
725 if (bb_has_eh_pred (bb))
728 unsigned int regno = EH_RETURN_DATA_REGNO (i);
730 if (regno == INVALID_REGNUM)
732 if (! bitmap_bit_p (df_get_live_in (bb), regno))
733 mark_regno_birth_or_death (regno, true);
738 /* Save current register pressure related info. */
740 save_reg_pressure (void)
744 for (i = 0; i < ira_reg_class_cover_size; i++)
745 saved_reg_pressure[ira_reg_class_cover[i]]
746 = curr_reg_pressure[ira_reg_class_cover[i]];
747 bitmap_copy (saved_reg_live, curr_reg_live);
750 /* Restore saved register pressure related info. */
752 restore_reg_pressure (void)
756 for (i = 0; i < ira_reg_class_cover_size; i++)
757 curr_reg_pressure[ira_reg_class_cover[i]]
758 = saved_reg_pressure[ira_reg_class_cover[i]];
759 bitmap_copy (curr_reg_live, saved_reg_live);
762 /* Return TRUE if the register is dying after its USE. */
764 dying_use_p (struct reg_use_data *use)
766 struct reg_use_data *next;
768 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
769 if (QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
774 /* Print info about the current register pressure and its excess for
777 print_curr_reg_pressure (void)
782 fprintf (sched_dump, ";;\t");
783 for (i = 0; i < ira_reg_class_cover_size; i++)
785 cl = ira_reg_class_cover[i];
786 gcc_assert (curr_reg_pressure[cl] >= 0);
787 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
788 curr_reg_pressure[cl],
789 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
791 fprintf (sched_dump, "\n");
794 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
795 so that insns independent of the last scheduled insn will be preferred
796 over dependent instructions. */
798 static rtx last_scheduled_insn;
800 /* Cached cost of the instruction. Use below function to get cost of the
801 insn. -1 here means that the field is not initialized. */
802 #define INSN_COST(INSN) (HID (INSN)->cost)
804 /* Compute cost of executing INSN.
805 This is the number of cycles between instruction issue and
806 instruction results. */
814 if (recog_memoized (insn) < 0)
817 cost = insn_default_latency (insn);
824 cost = INSN_COST (insn);
828 /* A USE insn, or something else we don't need to
829 understand. We can't pass these directly to
830 result_ready_cost or insn_default_latency because it will
831 trigger a fatal error for unrecognizable insns. */
832 if (recog_memoized (insn) < 0)
834 INSN_COST (insn) = 0;
839 cost = insn_default_latency (insn);
843 INSN_COST (insn) = cost;
850 /* Compute cost of dependence LINK.
851 This is the number of cycles between instruction issue and
853 ??? We also use this function to call recog_memoized on all insns. */
855 dep_cost_1 (dep_t link, dw_t dw)
857 rtx insn = DEP_PRO (link);
858 rtx used = DEP_CON (link);
861 /* A USE insn should never require the value used to be computed.
862 This allows the computation of a function's result and parameter
863 values to overlap the return and call. We don't care about the
864 the dependence cost when only decreasing register pressure. */
865 if (recog_memoized (used) < 0)
868 recog_memoized (insn);
872 enum reg_note dep_type = DEP_TYPE (link);
874 cost = insn_cost (insn);
876 if (INSN_CODE (insn) >= 0)
878 if (dep_type == REG_DEP_ANTI)
880 else if (dep_type == REG_DEP_OUTPUT)
882 cost = (insn_default_latency (insn)
883 - insn_default_latency (used));
887 else if (bypass_p (insn))
888 cost = insn_latency (insn, used);
892 if (targetm.sched.adjust_cost_2)
893 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
895 else if (targetm.sched.adjust_cost != NULL)
897 /* This variable is used for backward compatibility with the
899 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
901 /* Make it self-cycled, so that if some tries to walk over this
902 incomplete list he/she will be caught in an endless loop. */
903 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
905 /* Targets use only REG_NOTE_KIND of the link. */
906 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
908 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
911 free_INSN_LIST_node (dep_cost_rtx_link);
921 /* Compute cost of dependence LINK.
922 This is the number of cycles between instruction issue and
923 instruction results. */
925 dep_cost (dep_t link)
927 return dep_cost_1 (link, 0);
930 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
931 INSN_PRIORITY explicitly. */
933 increase_insn_priority (rtx insn, int amount)
937 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
938 if (INSN_PRIORITY_KNOWN (insn))
939 INSN_PRIORITY (insn) += amount;
943 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
944 Use EXPR_PRIORITY instead. */
945 sel_add_to_insn_priority (insn, amount);
949 /* Return 'true' if DEP should be included in priority calculations. */
951 contributes_to_priority_p (dep_t dep)
953 if (DEBUG_INSN_P (DEP_CON (dep))
954 || DEBUG_INSN_P (DEP_PRO (dep)))
957 /* Critical path is meaningful in block boundaries only. */
958 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
962 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
963 then speculative instructions will less likely be
964 scheduled. That is because the priority of
965 their producers will increase, and, thus, the
966 producers will more likely be scheduled, thus,
967 resolving the dependence. */
968 if (sched_deps_info->generate_spec_deps
969 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
970 && (DEP_STATUS (dep) & SPECULATIVE))
976 /* Compute the number of nondebug forward deps of an insn. */
979 dep_list_size (rtx insn)
981 sd_iterator_def sd_it;
983 int dbgcount = 0, nodbgcount = 0;
985 if (!MAY_HAVE_DEBUG_INSNS)
986 return sd_lists_size (insn, SD_LIST_FORW);
988 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
990 if (DEBUG_INSN_P (DEP_CON (dep)))
992 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
996 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
1001 /* Compute the priority number for INSN. */
1005 if (! INSN_P (insn))
1008 /* We should not be interested in priority of an already scheduled insn. */
1009 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1011 if (!INSN_PRIORITY_KNOWN (insn))
1013 int this_priority = -1;
1015 if (dep_list_size (insn) == 0)
1016 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1017 some forward deps but all of them are ignored by
1018 contributes_to_priority hook. At the moment we set priority of
1020 this_priority = insn_cost (insn);
1023 rtx prev_first, twin;
1026 /* For recovery check instructions we calculate priority slightly
1027 different than that of normal instructions. Instead of walking
1028 through INSN_FORW_DEPS (check) list, we walk through
1029 INSN_FORW_DEPS list of each instruction in the corresponding
1032 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1033 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1034 if (!rec || rec == EXIT_BLOCK_PTR)
1036 prev_first = PREV_INSN (insn);
1041 prev_first = NEXT_INSN (BB_HEAD (rec));
1042 twin = PREV_INSN (BB_END (rec));
1047 sd_iterator_def sd_it;
1050 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1055 next = DEP_CON (dep);
1057 if (BLOCK_FOR_INSN (next) != rec)
1061 if (!contributes_to_priority_p (dep))
1065 cost = dep_cost (dep);
1068 struct _dep _dep1, *dep1 = &_dep1;
1070 init_dep (dep1, insn, next, REG_DEP_ANTI);
1072 cost = dep_cost (dep1);
1075 next_priority = cost + priority (next);
1077 if (next_priority > this_priority)
1078 this_priority = next_priority;
1082 twin = PREV_INSN (twin);
1084 while (twin != prev_first);
1087 if (this_priority < 0)
1089 gcc_assert (this_priority == -1);
1091 this_priority = insn_cost (insn);
1094 INSN_PRIORITY (insn) = this_priority;
1095 INSN_PRIORITY_STATUS (insn) = 1;
1098 return INSN_PRIORITY (insn);
1101 /* Macros and functions for keeping the priority queue sorted, and
1102 dealing with queuing and dequeuing of instructions. */
1104 #define SCHED_SORT(READY, N_READY) \
1105 do { if ((N_READY) == 2) \
1106 swap_sort (READY, N_READY); \
1107 else if ((N_READY) > 2) \
1108 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1111 /* Setup info about the current register pressure impact of scheduling
1112 INSN at the current scheduling point. */
1114 setup_insn_reg_pressure_info (rtx insn)
1116 int i, change, before, after, hard_regno;
1117 int excess_cost_change;
1118 enum machine_mode mode;
1120 struct reg_pressure_data *pressure_info;
1121 int *max_reg_pressure;
1122 struct reg_use_data *use;
1123 static int death[N_REG_CLASSES];
1125 excess_cost_change = 0;
1126 for (i = 0; i < ira_reg_class_cover_size; i++)
1127 death[ira_reg_class_cover[i]] = 0;
1128 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1129 if (dying_use_p (use))
1131 cl = sched_regno_cover_class[use->regno];
1132 if (use->regno < FIRST_PSEUDO_REGISTER)
1135 death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1137 pressure_info = INSN_REG_PRESSURE (insn);
1138 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1139 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1140 for (i = 0; i < ira_reg_class_cover_size; i++)
1142 cl = ira_reg_class_cover[i];
1143 gcc_assert (curr_reg_pressure[cl] >= 0);
1144 change = (int) pressure_info[i].set_increase - death[cl];
1145 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1146 after = MAX (0, max_reg_pressure[i] + change
1147 - ira_available_class_regs[cl]);
1148 hard_regno = ira_class_hard_regs[cl][0];
1149 gcc_assert (hard_regno >= 0);
1150 mode = reg_raw_mode[hard_regno];
1151 excess_cost_change += ((after - before)
1152 * (ira_memory_move_cost[mode][cl][0]
1153 + ira_memory_move_cost[mode][cl][1]));
1155 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1158 /* Returns a positive value if x is preferred; returns a negative value if
1159 y is preferred. Should never return 0, since that will make the sort
1163 rank_for_schedule (const void *x, const void *y)
1165 rtx tmp = *(const rtx *) y;
1166 rtx tmp2 = *(const rtx *) x;
1168 int tmp_class, tmp2_class;
1169 int val, priority_val, info_val;
1171 if (MAY_HAVE_DEBUG_INSNS)
1173 /* Schedule debug insns as early as possible. */
1174 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1176 else if (DEBUG_INSN_P (tmp2))
1180 /* The insn in a schedule group should be issued the first. */
1181 if (flag_sched_group_heuristic &&
1182 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1183 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1185 /* Make sure that priority of TMP and TMP2 are initialized. */
1186 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1188 if (sched_pressure_p)
1192 /* Prefer insn whose scheduling results in the smallest register
1194 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1195 + (INSN_TICK (tmp) > clock_var
1196 ? INSN_TICK (tmp) - clock_var : 0)
1197 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1198 - (INSN_TICK (tmp2) > clock_var
1199 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1204 if (sched_pressure_p
1205 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1207 if (INSN_TICK (tmp) <= clock_var)
1209 else if (INSN_TICK (tmp2) <= clock_var)
1212 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1214 /* Prefer insn with higher priority. */
1215 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1217 if (flag_sched_critical_path_heuristic && priority_val)
1218 return priority_val;
1220 /* Prefer speculative insn with greater dependencies weakness. */
1221 if (flag_sched_spec_insn_heuristic && spec_info)
1227 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1229 dw1 = ds_weak (ds1);
1233 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1235 dw2 = ds_weak (ds2);
1240 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1244 info_val = (*current_sched_info->rank) (tmp, tmp2);
1245 if(flag_sched_rank_heuristic && info_val)
1248 if (flag_sched_last_insn_heuristic)
1250 last = last_scheduled_insn;
1252 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1254 last = PREV_INSN (last);
1255 while (!NONDEBUG_INSN_P (last)
1256 && last != current_sched_info->prev_head);
1259 /* Compare insns based on their relation to the last scheduled
1261 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1266 /* Classify the instructions into three classes:
1267 1) Data dependent on last schedule insn.
1268 2) Anti/Output dependent on last scheduled insn.
1269 3) Independent of last scheduled insn, or has latency of one.
1270 Choose the insn from the highest numbered class if different. */
1271 dep1 = sd_find_dep_between (last, tmp, true);
1273 if (dep1 == NULL || dep_cost (dep1) == 1)
1275 else if (/* Data dependence. */
1276 DEP_TYPE (dep1) == REG_DEP_TRUE)
1281 dep2 = sd_find_dep_between (last, tmp2, true);
1283 if (dep2 == NULL || dep_cost (dep2) == 1)
1285 else if (/* Data dependence. */
1286 DEP_TYPE (dep2) == REG_DEP_TRUE)
1291 if ((val = tmp2_class - tmp_class))
1295 /* Prefer the insn which has more later insns that depend on it.
1296 This gives the scheduler more freedom when scheduling later
1297 instructions at the expense of added register pressure. */
1299 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1301 if (flag_sched_dep_count_heuristic && val != 0)
1304 /* If insns are equally good, sort by INSN_LUID (original insn order),
1305 so that we make the sort stable. This minimizes instruction movement,
1306 thus minimizing sched's effect on debugging and cross-jumping. */
1307 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1310 /* Resort the array A in which only element at index N may be out of order. */
1312 HAIFA_INLINE static void
1313 swap_sort (rtx *a, int n)
1315 rtx insn = a[n - 1];
1318 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1326 /* Add INSN to the insn queue so that it can be executed at least
1327 N_CYCLES after the currently executing insn. Preserve insns
1328 chain for debugging purposes. */
1330 HAIFA_INLINE static void
1331 queue_insn (rtx insn, int n_cycles)
1333 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1334 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1336 gcc_assert (n_cycles <= max_insn_queue_index);
1337 gcc_assert (!DEBUG_INSN_P (insn));
1339 insn_queue[next_q] = link;
1342 if (sched_verbose >= 2)
1344 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1345 (*current_sched_info->print_insn) (insn, 0));
1347 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1350 QUEUE_INDEX (insn) = next_q;
1353 /* Remove INSN from queue. */
1355 queue_remove (rtx insn)
1357 gcc_assert (QUEUE_INDEX (insn) >= 0);
1358 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1360 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1363 /* Return a pointer to the bottom of the ready list, i.e. the insn
1364 with the lowest priority. */
1367 ready_lastpos (struct ready_list *ready)
1369 gcc_assert (ready->n_ready >= 1);
1370 return ready->vec + ready->first - ready->n_ready + 1;
1373 /* Add an element INSN to the ready list so that it ends up with the
1374 lowest/highest priority depending on FIRST_P. */
1376 HAIFA_INLINE static void
1377 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1381 if (ready->first == ready->n_ready)
1383 memmove (ready->vec + ready->veclen - ready->n_ready,
1384 ready_lastpos (ready),
1385 ready->n_ready * sizeof (rtx));
1386 ready->first = ready->veclen - 1;
1388 ready->vec[ready->first - ready->n_ready] = insn;
1392 if (ready->first == ready->veclen - 1)
1395 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1396 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1397 ready_lastpos (ready),
1398 ready->n_ready * sizeof (rtx));
1399 ready->first = ready->veclen - 2;
1401 ready->vec[++(ready->first)] = insn;
1405 if (DEBUG_INSN_P (insn))
1408 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1409 QUEUE_INDEX (insn) = QUEUE_READY;
1412 /* Remove the element with the highest priority from the ready list and
1415 HAIFA_INLINE static rtx
1416 ready_remove_first (struct ready_list *ready)
1420 gcc_assert (ready->n_ready);
1421 t = ready->vec[ready->first--];
1423 if (DEBUG_INSN_P (t))
1425 /* If the queue becomes empty, reset it. */
1426 if (ready->n_ready == 0)
1427 ready->first = ready->veclen - 1;
1429 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1430 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1435 /* The following code implements multi-pass scheduling for the first
1436 cycle. In other words, we will try to choose ready insn which
1437 permits to start maximum number of insns on the same cycle. */
1439 /* Return a pointer to the element INDEX from the ready. INDEX for
1440 insn with the highest priority is 0, and the lowest priority has
1444 ready_element (struct ready_list *ready, int index)
1446 gcc_assert (ready->n_ready && index < ready->n_ready);
1448 return ready->vec[ready->first - index];
1451 /* Remove the element INDEX from the ready list and return it. INDEX
1452 for insn with the highest priority is 0, and the lowest priority
1455 HAIFA_INLINE static rtx
1456 ready_remove (struct ready_list *ready, int index)
1462 return ready_remove_first (ready);
1463 gcc_assert (ready->n_ready && index < ready->n_ready);
1464 t = ready->vec[ready->first - index];
1466 if (DEBUG_INSN_P (t))
1468 for (i = index; i < ready->n_ready; i++)
1469 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1470 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1474 /* Remove INSN from the ready list. */
1476 ready_remove_insn (rtx insn)
1480 for (i = 0; i < readyp->n_ready; i++)
1481 if (ready_element (readyp, i) == insn)
1483 ready_remove (readyp, i);
1489 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1493 ready_sort (struct ready_list *ready)
1496 rtx *first = ready_lastpos (ready);
1498 if (sched_pressure_p)
1500 for (i = 0; i < ready->n_ready; i++)
1501 setup_insn_reg_pressure_info (first[i]);
1503 SCHED_SORT (first, ready->n_ready);
1506 /* PREV is an insn that is ready to execute. Adjust its priority if that
1507 will help shorten or lengthen register lifetimes as appropriate. Also
1508 provide a hook for the target to tweak itself. */
1510 HAIFA_INLINE static void
1511 adjust_priority (rtx prev)
1513 /* ??? There used to be code here to try and estimate how an insn
1514 affected register lifetimes, but it did it by looking at REG_DEAD
1515 notes, which we removed in schedule_region. Nor did it try to
1516 take into account register pressure or anything useful like that.
1518 Revisit when we have a machine model to work with and not before. */
1520 if (targetm.sched.adjust_priority)
1521 INSN_PRIORITY (prev) =
1522 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1525 /* Advance DFA state STATE on one cycle. */
1527 advance_state (state_t state)
1529 if (targetm.sched.dfa_pre_advance_cycle)
1530 targetm.sched.dfa_pre_advance_cycle ();
1532 if (targetm.sched.dfa_pre_cycle_insn)
1533 state_transition (state,
1534 targetm.sched.dfa_pre_cycle_insn ());
1536 state_transition (state, NULL);
1538 if (targetm.sched.dfa_post_cycle_insn)
1539 state_transition (state,
1540 targetm.sched.dfa_post_cycle_insn ());
1542 if (targetm.sched.dfa_post_advance_cycle)
1543 targetm.sched.dfa_post_advance_cycle ();
1546 /* Advance time on one cycle. */
1547 HAIFA_INLINE static void
1548 advance_one_cycle (void)
1550 advance_state (curr_state);
1551 if (sched_verbose >= 6)
1552 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1555 /* Clock at which the previous instruction was issued. */
1556 static int last_clock_var;
1558 /* Update register pressure after scheduling INSN. */
1560 update_register_pressure (rtx insn)
1562 struct reg_use_data *use;
1563 struct reg_set_data *set;
1565 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1566 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1567 mark_regno_birth_or_death (use->regno, false);
1568 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1569 mark_regno_birth_or_death (set->regno, true);
1572 /* Set up or update (if UPDATE_P) max register pressure (see its
1573 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1574 after insn AFTER. */
1576 setup_insn_max_reg_pressure (rtx after, bool update_p)
1581 static int max_reg_pressure[N_REG_CLASSES];
1583 save_reg_pressure ();
1584 for (i = 0; i < ira_reg_class_cover_size; i++)
1585 max_reg_pressure[ira_reg_class_cover[i]]
1586 = curr_reg_pressure[ira_reg_class_cover[i]];
1587 for (insn = NEXT_INSN (after);
1588 insn != NULL_RTX && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1589 insn = NEXT_INSN (insn))
1590 if (NONDEBUG_INSN_P (insn))
1593 for (i = 0; i < ira_reg_class_cover_size; i++)
1595 p = max_reg_pressure[ira_reg_class_cover[i]];
1596 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1599 INSN_MAX_REG_PRESSURE (insn)[i]
1600 = max_reg_pressure[ira_reg_class_cover[i]];
1603 if (update_p && eq_p)
1605 update_register_pressure (insn);
1606 for (i = 0; i < ira_reg_class_cover_size; i++)
1607 if (max_reg_pressure[ira_reg_class_cover[i]]
1608 < curr_reg_pressure[ira_reg_class_cover[i]])
1609 max_reg_pressure[ira_reg_class_cover[i]]
1610 = curr_reg_pressure[ira_reg_class_cover[i]];
1612 restore_reg_pressure ();
1615 /* Update the current register pressure after scheduling INSN. Update
1616 also max register pressure for unscheduled insns of the current
1619 update_reg_and_insn_max_reg_pressure (rtx insn)
1622 int before[N_REG_CLASSES];
1624 for (i = 0; i < ira_reg_class_cover_size; i++)
1625 before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
1626 update_register_pressure (insn);
1627 for (i = 0; i < ira_reg_class_cover_size; i++)
1628 if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
1630 if (i < ira_reg_class_cover_size)
1631 setup_insn_max_reg_pressure (insn, true);
1634 /* Set up register pressure at the beginning of basic block BB whose
1635 insns starting after insn AFTER. Set up also max register pressure
1636 for all insns of the basic block. */
1638 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1640 gcc_assert (sched_pressure_p);
1641 initiate_bb_reg_pressure_info (bb);
1642 setup_insn_max_reg_pressure (after, false);
1645 /* INSN is the "currently executing insn". Launch each insn which was
1646 waiting on INSN. READY is the ready list which contains the insns
1647 that are ready to fire. CLOCK is the current cycle. The function
1648 returns necessary cycle advance after issuing the insn (it is not
1649 zero for insns in a schedule group). */
1652 schedule_insn (rtx insn)
1654 sd_iterator_def sd_it;
1659 if (sched_verbose >= 1)
1661 struct reg_pressure_data *pressure_info;
1664 print_insn (buf, insn, 0);
1666 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1668 if (recog_memoized (insn) < 0)
1669 fprintf (sched_dump, "nothing");
1671 print_reservation (sched_dump, insn);
1672 pressure_info = INSN_REG_PRESSURE (insn);
1673 if (pressure_info != NULL)
1675 fputc (':', sched_dump);
1676 for (i = 0; i < ira_reg_class_cover_size; i++)
1677 fprintf (sched_dump, "%s%+d(%d)",
1678 reg_class_names[ira_reg_class_cover[i]],
1679 pressure_info[i].set_increase, pressure_info[i].change);
1681 fputc ('\n', sched_dump);
1684 if (sched_pressure_p)
1685 update_reg_and_insn_max_reg_pressure (insn);
1687 /* Scheduling instruction should have all its dependencies resolved and
1688 should have been removed from the ready list. */
1689 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1691 /* Reset debug insns invalidated by moving this insn. */
1692 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1693 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1694 sd_iterator_cond (&sd_it, &dep);)
1696 rtx dbg = DEP_PRO (dep);
1698 gcc_assert (DEBUG_INSN_P (dbg));
1700 if (sched_verbose >= 6)
1701 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1704 /* ??? Rather than resetting the debug insn, we might be able
1705 to emit a debug temp before the just-scheduled insn, but
1706 this would involve checking that the expression at the
1707 point of the debug insn is equivalent to the expression
1708 before the just-scheduled insn. They might not be: the
1709 expression in the debug insn may depend on other insns not
1710 yet scheduled that set MEMs, REGs or even other debug
1711 insns. It's not clear that attempting to preserve debug
1712 information in these cases is worth the effort, given how
1713 uncommon these resets are and the likelihood that the debug
1714 temps introduced won't survive the schedule change. */
1715 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1716 df_insn_rescan (dbg);
1718 /* We delete rather than resolve these deps, otherwise we
1719 crash in sched_free_deps(), because forward deps are
1720 expected to be released before backward deps. */
1721 sd_delete_dep (sd_it);
1724 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1725 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1727 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1728 if (INSN_TICK (insn) > clock_var)
1729 /* INSN has been prematurely moved from the queue to the ready list.
1730 This is possible only if following flag is set. */
1731 gcc_assert (flag_sched_stalled_insns);
1733 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1734 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1735 INSN_TICK (insn) = clock_var;
1737 /* Update dependent instructions. */
1738 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1739 sd_iterator_cond (&sd_it, &dep);)
1741 rtx next = DEP_CON (dep);
1743 /* Resolve the dependence between INSN and NEXT.
1744 sd_resolve_dep () moves current dep to another list thus
1745 advancing the iterator. */
1746 sd_resolve_dep (sd_it);
1748 /* Don't bother trying to mark next as ready if insn is a debug
1749 insn. If insn is the last hard dependency, it will have
1750 already been discounted. */
1751 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1754 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1758 effective_cost = try_ready (next);
1760 if (effective_cost >= 0
1761 && SCHED_GROUP_P (next)
1762 && advance < effective_cost)
1763 advance = effective_cost;
1766 /* Check always has only one forward dependence (to the first insn in
1767 the recovery block), therefore, this will be executed only once. */
1769 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1770 fix_recovery_deps (RECOVERY_BLOCK (insn));
1774 /* This is the place where scheduler doesn't *basically* need backward and
1775 forward dependencies for INSN anymore. Nevertheless they are used in
1776 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1777 some targets (e.g. rs6000). Thus the earliest place where we *can*
1778 remove dependencies is after targetm.sched.md_finish () call in
1779 schedule_block (). But, on the other side, the safest place to remove
1780 dependencies is when we are finishing scheduling entire region. As we
1781 don't generate [many] dependencies during scheduling itself, we won't
1782 need memory until beginning of next region.
1783 Bottom line: Dependencies are removed for all insns in the end of
1784 scheduling the region. */
1786 /* Annotate the instruction with issue information -- TImode
1787 indicates that the instruction is expected not to be able
1788 to issue on the same cycle as the previous insn. A machine
1789 may use this information to decide how the instruction should
1792 && GET_CODE (PATTERN (insn)) != USE
1793 && GET_CODE (PATTERN (insn)) != CLOBBER
1794 && !DEBUG_INSN_P (insn))
1796 if (reload_completed)
1797 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1798 last_clock_var = clock_var;
1804 /* Functions for handling of notes. */
1806 /* Insert the INSN note at the end of the notes list. */
1808 add_to_note_list (rtx insn, rtx *note_list_end_p)
1810 PREV_INSN (insn) = *note_list_end_p;
1811 if (*note_list_end_p)
1812 NEXT_INSN (*note_list_end_p) = insn;
1813 *note_list_end_p = insn;
1816 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1818 concat_note_lists (rtx from_end, rtx *to_endp)
1822 if (from_end == NULL)
1823 /* It's easy when have nothing to concat. */
1826 if (*to_endp == NULL)
1827 /* It's also easy when destination is empty. */
1829 *to_endp = from_end;
1833 from_start = from_end;
1834 /* A note list should be traversed via PREV_INSN. */
1835 while (PREV_INSN (from_start) != NULL)
1836 from_start = PREV_INSN (from_start);
1838 add_to_note_list (from_start, to_endp);
1839 *to_endp = from_end;
1842 /* Delete notes beginning with INSN and put them in the chain
1843 of notes ended by NOTE_LIST.
1844 Returns the insn following the notes. */
1846 unlink_other_notes (rtx insn, rtx tail)
1848 rtx prev = PREV_INSN (insn);
1850 while (insn != tail && NOTE_NOT_BB_P (insn))
1852 rtx next = NEXT_INSN (insn);
1853 basic_block bb = BLOCK_FOR_INSN (insn);
1855 /* Delete the note from its current position. */
1857 NEXT_INSN (prev) = next;
1859 PREV_INSN (next) = prev;
1863 /* Basic block can begin with either LABEL or
1864 NOTE_INSN_BASIC_BLOCK. */
1865 gcc_assert (BB_HEAD (bb) != insn);
1867 /* Check if we are removing last insn in the BB. */
1868 if (BB_END (bb) == insn)
1872 /* See sched_analyze to see how these are handled. */
1873 if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
1874 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END)
1875 add_to_note_list (insn, ¬e_list);
1882 gcc_assert (sel_sched_p ());
1889 /* Return the head and tail pointers of ebb starting at BEG and ending
1892 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1894 rtx beg_head = BB_HEAD (beg);
1895 rtx beg_tail = BB_END (beg);
1896 rtx end_head = BB_HEAD (end);
1897 rtx end_tail = BB_END (end);
1899 /* Don't include any notes or labels at the beginning of the BEG
1900 basic block, or notes at the end of the END basic blocks. */
1902 if (LABEL_P (beg_head))
1903 beg_head = NEXT_INSN (beg_head);
1905 while (beg_head != beg_tail)
1906 if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
1907 beg_head = NEXT_INSN (beg_head);
1914 end_head = beg_head;
1915 else if (LABEL_P (end_head))
1916 end_head = NEXT_INSN (end_head);
1918 while (end_head != end_tail)
1919 if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
1920 end_tail = PREV_INSN (end_tail);
1927 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1930 no_real_insns_p (const_rtx head, const_rtx tail)
1932 while (head != NEXT_INSN (tail))
1934 if (!NOTE_P (head) && !LABEL_P (head)
1935 && !BOUNDARY_DEBUG_INSN_P (head))
1937 head = NEXT_INSN (head);
1942 /* Delete notes between HEAD and TAIL and put them in the chain
1943 of notes ended by NOTE_LIST. */
1945 rm_other_notes (rtx head, rtx tail)
1951 if (head == tail && (! INSN_P (head)))
1954 next_tail = NEXT_INSN (tail);
1955 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1959 /* Farm out notes, and maybe save them in NOTE_LIST.
1960 This is needed to keep the debugger from
1961 getting completely deranged. */
1962 if (NOTE_NOT_BB_P (insn))
1965 insn = unlink_other_notes (insn, next_tail);
1967 gcc_assert ((sel_sched_p ()
1968 || prev != tail) && prev != head && insn != next_tail);
1973 /* Same as above, but also process REG_SAVE_NOTEs of HEAD. */
1975 remove_notes (rtx head, rtx tail)
1977 /* rm_other_notes only removes notes which are _inside_ the
1978 block---that is, it won't remove notes before the first real insn
1979 or after the last real insn of the block. So if the first insn
1980 has a REG_SAVE_NOTE which would otherwise be emitted before the
1981 insn, it is redundant with the note before the start of the
1982 block, and so we have to take it out. */
1987 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
1988 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1989 remove_note (head, note);
1992 /* Remove remaining note insns from the block, save them in
1993 note_list. These notes are restored at the end of
1994 schedule_block (). */
1995 rm_other_notes (head, tail);
1998 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1999 previously found among the insns. Insert them just before HEAD. */
2001 restore_other_notes (rtx head, basic_block head_bb)
2005 rtx note_head = note_list;
2008 head_bb = BLOCK_FOR_INSN (head);
2010 head = NEXT_INSN (bb_note (head_bb));
2012 while (PREV_INSN (note_head))
2014 set_block_for_insn (note_head, head_bb);
2015 note_head = PREV_INSN (note_head);
2017 /* In the above cycle we've missed this note. */
2018 set_block_for_insn (note_head, head_bb);
2020 PREV_INSN (note_head) = PREV_INSN (head);
2021 NEXT_INSN (PREV_INSN (head)) = note_head;
2022 PREV_INSN (head) = note_list;
2023 NEXT_INSN (note_list) = head;
2025 if (BLOCK_FOR_INSN (head) != head_bb)
2026 BB_END (head_bb) = note_list;
2034 /* Move insns that became ready to fire from queue to ready list. */
2037 queue_to_ready (struct ready_list *ready)
2043 q_ptr = NEXT_Q (q_ptr);
2045 if (dbg_cnt (sched_insn) == false)
2047 /* If debug counter is activated do not requeue insn next after
2048 last_scheduled_insn. */
2049 skip_insn = next_nonnote_insn (last_scheduled_insn);
2050 while (skip_insn && DEBUG_INSN_P (skip_insn))
2051 skip_insn = next_nonnote_insn (skip_insn);
2054 skip_insn = NULL_RTX;
2056 /* Add all pending insns that can be scheduled without stalls to the
2058 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2060 insn = XEXP (link, 0);
2063 if (sched_verbose >= 2)
2064 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2065 (*current_sched_info->print_insn) (insn, 0));
2067 /* If the ready list is full, delay the insn for 1 cycle.
2068 See the comment in schedule_block for the rationale. */
2069 if (!reload_completed
2070 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2071 && !SCHED_GROUP_P (insn)
2072 && insn != skip_insn)
2074 if (sched_verbose >= 2)
2075 fprintf (sched_dump, "requeued because ready full\n");
2076 queue_insn (insn, 1);
2080 ready_add (ready, insn, false);
2081 if (sched_verbose >= 2)
2082 fprintf (sched_dump, "moving to ready without stalls\n");
2085 free_INSN_LIST_list (&insn_queue[q_ptr]);
2087 /* If there are no ready insns, stall until one is ready and add all
2088 of the pending insns at that point to the ready list. */
2089 if (ready->n_ready == 0)
2093 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2095 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2097 for (; link; link = XEXP (link, 1))
2099 insn = XEXP (link, 0);
2102 if (sched_verbose >= 2)
2103 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2104 (*current_sched_info->print_insn) (insn, 0));
2106 ready_add (ready, insn, false);
2107 if (sched_verbose >= 2)
2108 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2110 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2112 advance_one_cycle ();
2117 advance_one_cycle ();
2120 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2121 clock_var += stalls;
2125 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2126 prematurely move INSN from the queue to the ready list. Currently,
2127 if a target defines the hook 'is_costly_dependence', this function
2128 uses the hook to check whether there exist any dependences which are
2129 considered costly by the target, between INSN and other insns that
2130 have already been scheduled. Dependences are checked up to Y cycles
2131 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2132 controlling this value.
2133 (Other considerations could be taken into account instead (or in
2134 addition) depending on user flags and target hooks. */
2137 ok_for_early_queue_removal (rtx insn)
2140 rtx prev_insn = last_scheduled_insn;
2142 if (targetm.sched.is_costly_dependence)
2144 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2146 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2150 if (prev_insn == current_sched_info->prev_head)
2156 if (!NOTE_P (prev_insn))
2160 dep = sd_find_dep_between (prev_insn, insn, true);
2164 cost = dep_cost (dep);
2166 if (targetm.sched.is_costly_dependence (dep, cost,
2167 flag_sched_stalled_insns_dep - n_cycles))
2172 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2178 prev_insn = PREV_INSN (prev_insn);
2186 /* Remove insns from the queue, before they become "ready" with respect
2187 to FU latency considerations. */
2190 early_queue_to_ready (state_t state, struct ready_list *ready)
2198 state_t temp_state = alloca (dfa_state_size);
2200 int insns_removed = 0;
2203 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2206 X == 0: There is no limit on how many queued insns can be removed
2207 prematurely. (flag_sched_stalled_insns = -1).
2209 X >= 1: Only X queued insns can be removed prematurely in each
2210 invocation. (flag_sched_stalled_insns = X).
2212 Otherwise: Early queue removal is disabled.
2213 (flag_sched_stalled_insns = 0)
2216 if (! flag_sched_stalled_insns)
2219 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2221 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2223 if (sched_verbose > 6)
2224 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2229 next_link = XEXP (link, 1);
2230 insn = XEXP (link, 0);
2231 if (insn && sched_verbose > 6)
2232 print_rtl_single (sched_dump, insn);
2234 memcpy (temp_state, state, dfa_state_size);
2235 if (recog_memoized (insn) < 0)
2236 /* non-negative to indicate that it's not ready
2237 to avoid infinite Q->R->Q->R... */
2240 cost = state_transition (temp_state, insn);
2242 if (sched_verbose >= 6)
2243 fprintf (sched_dump, "transition cost = %d\n", cost);
2245 move_to_ready = false;
2248 move_to_ready = ok_for_early_queue_removal (insn);
2249 if (move_to_ready == true)
2251 /* move from Q to R */
2253 ready_add (ready, insn, false);
2256 XEXP (prev_link, 1) = next_link;
2258 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2260 free_INSN_LIST_node (link);
2262 if (sched_verbose >= 2)
2263 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2264 (*current_sched_info->print_insn) (insn, 0));
2267 if (insns_removed == flag_sched_stalled_insns)
2268 /* Remove no more than flag_sched_stalled_insns insns
2269 from Q at a time. */
2270 return insns_removed;
2274 if (move_to_ready == false)
2281 } /* for stalls.. */
2283 return insns_removed;
2287 /* Print the ready list for debugging purposes. Callable from debugger. */
2290 debug_ready_list (struct ready_list *ready)
2295 if (ready->n_ready == 0)
2297 fprintf (sched_dump, "\n");
2301 p = ready_lastpos (ready);
2302 for (i = 0; i < ready->n_ready; i++)
2304 fprintf (sched_dump, " %s:%d",
2305 (*current_sched_info->print_insn) (p[i], 0),
2307 if (sched_pressure_p)
2308 fprintf (sched_dump, "(cost=%d",
2309 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2310 if (INSN_TICK (p[i]) > clock_var)
2311 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2312 if (sched_pressure_p)
2313 fprintf (sched_dump, ")");
2315 fprintf (sched_dump, "\n");
2318 /* Search INSN for REG_SAVE_NOTE note pairs for
2319 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
2320 NOTEs. The REG_SAVE_NOTE note following first one is contains the
2321 saved value for NOTE_BLOCK_NUMBER which is useful for
2322 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
2324 reemit_notes (rtx insn)
2326 rtx note, last = insn;
2328 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2330 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2332 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2334 last = emit_note_before (note_type, last);
2335 remove_note (insn, note);
2340 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2342 move_insn (rtx insn, rtx last, rtx nt)
2344 if (PREV_INSN (insn) != last)
2350 bb = BLOCK_FOR_INSN (insn);
2352 /* BB_HEAD is either LABEL or NOTE. */
2353 gcc_assert (BB_HEAD (bb) != insn);
2355 if (BB_END (bb) == insn)
2356 /* If this is last instruction in BB, move end marker one
2359 /* Jumps are always placed at the end of basic block. */
2360 jump_p = control_flow_insn_p (insn);
2363 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2364 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2365 || (common_sched_info->sched_pass_id
2366 == SCHED_EBB_PASS));
2368 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2370 BB_END (bb) = PREV_INSN (insn);
2373 gcc_assert (BB_END (bb) != last);
2376 /* We move the block note along with jump. */
2380 note = NEXT_INSN (insn);
2381 while (NOTE_NOT_BB_P (note) && note != nt)
2382 note = NEXT_INSN (note);
2386 || BARRIER_P (note)))
2387 note = NEXT_INSN (note);
2389 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2394 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2395 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2397 NEXT_INSN (note) = NEXT_INSN (last);
2398 PREV_INSN (NEXT_INSN (last)) = note;
2400 NEXT_INSN (last) = insn;
2401 PREV_INSN (insn) = last;
2403 bb = BLOCK_FOR_INSN (last);
2407 fix_jump_move (insn);
2409 if (BLOCK_FOR_INSN (insn) != bb)
2410 move_block_after_check (insn);
2412 gcc_assert (BB_END (bb) == last);
2415 df_insn_change_bb (insn, bb);
2417 /* Update BB_END, if needed. */
2418 if (BB_END (bb) == last)
2422 SCHED_GROUP_P (insn) = 0;
2425 /* Return true if scheduling INSN will finish current clock cycle. */
2427 insn_finishes_cycle_p (rtx insn)
2429 if (SCHED_GROUP_P (insn))
2430 /* After issuing INSN, rest of the sched_group will be forced to issue
2431 in order. Don't make any plans for the rest of cycle. */
2434 /* Finishing the block will, apparently, finish the cycle. */
2435 if (current_sched_info->insn_finishes_block_p
2436 && current_sched_info->insn_finishes_block_p (insn))
2442 /* The following structure describe an entry of the stack of choices. */
2445 /* Ordinal number of the issued insn in the ready queue. */
2447 /* The number of the rest insns whose issues we should try. */
2449 /* The number of issued essential insns. */
2451 /* State after issuing the insn. */
2455 /* The following array is used to implement a stack of choices used in
2456 function max_issue. */
2457 static struct choice_entry *choice_stack;
2459 /* The following variable value is number of essential insns issued on
2460 the current cycle. An insn is essential one if it changes the
2461 processors state. */
2462 int cycle_issued_insns;
2464 /* This holds the value of the target dfa_lookahead hook. */
2467 /* The following variable value is maximal number of tries of issuing
2468 insns for the first cycle multipass insn scheduling. We define
2469 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2470 need this constraint if all real insns (with non-negative codes)
2471 had reservations because in this case the algorithm complexity is
2472 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2473 might be incomplete and such insn might occur. For such
2474 descriptions, the complexity of algorithm (without the constraint)
2475 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2476 static int max_lookahead_tries;
2478 /* The following value is value of hook
2479 `first_cycle_multipass_dfa_lookahead' at the last call of
2481 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2483 /* The following value is value of `issue_rate' at the last call of
2485 static int cached_issue_rate = 0;
2487 /* The following function returns maximal (or close to maximal) number
2488 of insns which can be issued on the same cycle and one of which
2489 insns is insns with the best rank (the first insn in READY). To
2490 make this function tries different samples of ready insns. READY
2491 is current queue `ready'. Global array READY_TRY reflects what
2492 insns are already issued in this try. MAX_POINTS is the sum of points
2493 of all instructions in READY. The function stops immediately,
2494 if it reached the such a solution, that all instruction can be issued.
2495 INDEX will contain index of the best insn in READY. The following
2496 function is used only for first cycle multipass scheduling.
2500 This function expects recognized insns only. All USEs,
2501 CLOBBERs, etc must be filtered elsewhere. */
2503 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2506 int n, i, all, n_ready, best, delay, tries_num, points = -1, max_points;
2508 struct choice_entry *top;
2511 n_ready = ready->n_ready;
2512 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2513 && privileged_n <= n_ready);
2515 /* Init MAX_LOOKAHEAD_TRIES. */
2516 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2518 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2519 max_lookahead_tries = 100;
2520 for (i = 0; i < issue_rate; i++)
2521 max_lookahead_tries *= dfa_lookahead;
2524 /* Init max_points. */
2526 more_issue = issue_rate - cycle_issued_insns;
2528 /* ??? We used to assert here that we never issue more insns than issue_rate.
2529 However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
2530 achieved to get better performance. Until these targets are fixed to use
2531 scheduler hooks to manipulate insns priority instead, the assert should
2534 gcc_assert (more_issue >= 0); */
2536 for (i = 0; i < n_ready; i++)
2539 if (more_issue-- > 0)
2540 max_points += ISSUE_POINTS (ready_element (ready, i));
2545 /* The number of the issued insns in the best solution. */
2550 /* Set initial state of the search. */
2551 memcpy (top->state, state, dfa_state_size);
2552 top->rest = dfa_lookahead;
2555 /* Count the number of the insns to search among. */
2556 for (all = i = 0; i < n_ready; i++)
2560 /* I is the index of the insn to try next. */
2565 if (/* If we've reached a dead end or searched enough of what we have
2568 /* Or have nothing else to try. */
2571 /* ??? (... || i == n_ready). */
2572 gcc_assert (i <= n_ready);
2574 if (top == choice_stack)
2577 if (best < top - choice_stack)
2582 /* Try to find issued privileged insn. */
2583 while (n && !ready_try[--n]);
2586 if (/* If all insns are equally good... */
2588 /* Or a privileged insn will be issued. */
2590 /* Then we have a solution. */
2592 best = top - choice_stack;
2593 /* This is the index of the insn issued first in this
2595 *index = choice_stack [1].index;
2597 if (top->n == max_points || best == all)
2602 /* Set ready-list index to point to the last insn
2603 ('i++' below will advance it to the next insn). */
2609 memcpy (state, top->state, dfa_state_size);
2611 else if (!ready_try [i])
2614 if (tries_num > max_lookahead_tries)
2616 insn = ready_element (ready, i);
2617 delay = state_transition (state, insn);
2620 if (state_dead_lock_p (state)
2621 || insn_finishes_cycle_p (insn))
2622 /* We won't issue any more instructions in the next
2629 if (memcmp (top->state, state, dfa_state_size) != 0)
2630 n += ISSUE_POINTS (insn);
2632 /* Advance to the next choice_entry. */
2634 /* Initialize it. */
2635 top->rest = dfa_lookahead;
2638 memcpy (top->state, state, dfa_state_size);
2645 /* Increase ready-list index. */
2649 /* Restore the original state of the DFA. */
2650 memcpy (state, choice_stack->state, dfa_state_size);
2655 /* The following function chooses insn from READY and modifies
2656 READY. The following function is used only for first
2657 cycle multipass scheduling.
2659 -1 if cycle should be advanced,
2660 0 if INSN_PTR is set to point to the desirable insn,
2661 1 if choose_ready () should be restarted without advancing the cycle. */
2663 choose_ready (struct ready_list *ready, rtx *insn_ptr)
2667 if (dbg_cnt (sched_insn) == false)
2671 insn = next_nonnote_insn (last_scheduled_insn);
2673 if (QUEUE_INDEX (insn) == QUEUE_READY)
2674 /* INSN is in the ready_list. */
2676 ready_remove_insn (insn);
2681 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2687 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2688 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2689 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2690 || DEBUG_INSN_P (ready_element (ready, 0)))
2692 *insn_ptr = ready_remove_first (ready);
2697 /* Try to choose the better insn. */
2698 int index = 0, i, n;
2700 int try_data = 1, try_control = 1;
2703 insn = ready_element (ready, 0);
2704 if (INSN_CODE (insn) < 0)
2706 *insn_ptr = ready_remove_first (ready);
2711 && spec_info->flags & (PREFER_NON_DATA_SPEC
2712 | PREFER_NON_CONTROL_SPEC))
2714 for (i = 0, n = ready->n_ready; i < n; i++)
2719 x = ready_element (ready, i);
2722 if (spec_info->flags & PREFER_NON_DATA_SPEC
2723 && !(s & DATA_SPEC))
2726 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2731 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2732 && !(s & CONTROL_SPEC))
2735 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2741 ts = TODO_SPEC (insn);
2742 if ((ts & SPECULATIVE)
2743 && (((!try_data && (ts & DATA_SPEC))
2744 || (!try_control && (ts & CONTROL_SPEC)))
2745 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2747 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2748 /* Discard speculative instruction that stands first in the ready
2751 change_queue_index (insn, 1);
2757 for (i = 1; i < ready->n_ready; i++)
2759 insn = ready_element (ready, i);
2762 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2763 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2766 /* Let the target filter the search space. */
2767 for (i = 1; i < ready->n_ready; i++)
2770 insn = ready_element (ready, i);
2772 #ifdef ENABLE_CHECKING
2773 /* If this insn is recognizable we should have already
2774 recognized it earlier.
2775 ??? Not very clear where this is supposed to be done.
2777 gcc_assert (INSN_CODE (insn) >= 0
2778 || recog_memoized (insn) < 0);
2782 = (/* INSN_CODE check can be omitted here as it is also done later
2784 INSN_CODE (insn) < 0
2785 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2786 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2790 if (max_issue (ready, 1, curr_state, &index) == 0)
2792 *insn_ptr = ready_remove_first (ready);
2793 if (sched_verbose >= 4)
2794 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2795 (*current_sched_info->print_insn) (*insn_ptr, 0));
2800 if (sched_verbose >= 4)
2801 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2802 (*current_sched_info->print_insn)
2803 (ready_element (ready, index), 0));
2805 *insn_ptr = ready_remove (ready, index);
2811 /* Use forward list scheduling to rearrange insns of block pointed to by
2812 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2816 schedule_block (basic_block *target_bb)
2818 int i, first_cycle_insn_p;
2820 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2821 int sort_p, advance, start_clock_var;
2823 /* Head/tail info for this block. */
2824 rtx prev_head = current_sched_info->prev_head;
2825 rtx next_tail = current_sched_info->next_tail;
2826 rtx head = NEXT_INSN (prev_head);
2827 rtx tail = PREV_INSN (next_tail);
2829 /* We used to have code to avoid getting parameters moved from hard
2830 argument registers into pseudos.
2832 However, it was removed when it proved to be of marginal benefit
2833 and caused problems because schedule_block and compute_forward_dependences
2834 had different notions of what the "head" insn was. */
2836 gcc_assert (head != tail || INSN_P (head));
2838 haifa_recovery_bb_recently_added_p = false;
2842 dump_new_block_header (0, *target_bb, head, tail);
2844 state_reset (curr_state);
2846 /* Clear the ready list. */
2847 ready.first = ready.veclen - 1;
2851 /* It is used for first cycle multipass scheduling. */
2852 temp_state = alloca (dfa_state_size);
2854 if (targetm.sched.md_init)
2855 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2857 /* We start inserting insns after PREV_HEAD. */
2858 last_scheduled_insn = prev_head;
2860 gcc_assert ((NOTE_P (last_scheduled_insn)
2861 || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
2862 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2864 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2869 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2870 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2872 /* Start just before the beginning of time. */
2875 /* We need queue and ready lists and clock_var be initialized
2876 in try_ready () (which is called through init_ready_list ()). */
2877 (*current_sched_info->init_ready_list) ();
2879 /* The algorithm is O(n^2) in the number of ready insns at any given
2880 time in the worst case. Before reload we are more likely to have
2881 big lists so truncate them to a reasonable size. */
2882 if (!reload_completed
2883 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2885 ready_sort (&ready);
2887 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2888 If there are debug insns, we know they're first. */
2889 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2890 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2893 if (sched_verbose >= 2)
2895 fprintf (sched_dump,
2896 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2897 fprintf (sched_dump,
2898 ";;\t\t before reload => truncated to %d insns\n", i);
2901 /* Delay all insns past it for 1 cycle. If debug counter is
2902 activated make an exception for the insn right after
2903 last_scheduled_insn. */
2907 if (dbg_cnt (sched_insn) == false)
2908 skip_insn = next_nonnote_insn (last_scheduled_insn);
2910 skip_insn = NULL_RTX;
2912 while (i < ready.n_ready)
2916 insn = ready_remove (&ready, i);
2918 if (insn != skip_insn)
2919 queue_insn (insn, 1);
2924 /* Now we can restore basic block notes and maintain precise cfg. */
2925 restore_bb_notes (*target_bb);
2927 last_clock_var = -1;
2932 /* Loop until all the insns in BB are scheduled. */
2933 while ((*current_sched_info->schedule_more_p) ())
2937 start_clock_var = clock_var;
2941 advance_one_cycle ();
2943 /* Add to the ready list all pending insns that can be issued now.
2944 If there are no ready insns, increment clock until one
2945 is ready and add all pending insns at that point to the ready
2947 queue_to_ready (&ready);
2949 gcc_assert (ready.n_ready);
2951 if (sched_verbose >= 2)
2953 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2954 debug_ready_list (&ready);
2956 advance -= clock_var - start_clock_var;
2958 while (advance > 0);
2962 /* Sort the ready list based on priority. */
2963 ready_sort (&ready);
2965 if (sched_verbose >= 2)
2967 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2968 debug_ready_list (&ready);
2972 /* We don't want md sched reorder to even see debug isns, so put
2973 them out right away. */
2974 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2976 if (control_flow_insn_p (last_scheduled_insn))
2978 *target_bb = current_sched_info->advance_target_bb
2985 x = next_real_insn (last_scheduled_insn);
2987 dump_new_block_header (1, *target_bb, x, tail);
2990 last_scheduled_insn = bb_note (*target_bb);
2993 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2995 rtx insn = ready_remove_first (&ready);
2996 gcc_assert (DEBUG_INSN_P (insn));
2997 (*current_sched_info->begin_schedule_ready) (insn,
2998 last_scheduled_insn);
2999 move_insn (insn, last_scheduled_insn,
3000 current_sched_info->next_tail);
3001 last_scheduled_insn = insn;
3002 advance = schedule_insn (insn);
3003 gcc_assert (advance == 0);
3004 if (ready.n_ready > 0)
3005 ready_sort (&ready);
3012 /* Allow the target to reorder the list, typically for
3013 better instruction bundling. */
3014 if (sort_p && targetm.sched.reorder
3015 && (ready.n_ready == 0
3016 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3018 targetm.sched.reorder (sched_dump, sched_verbose,
3019 ready_lastpos (&ready),
3020 &ready.n_ready, clock_var);
3022 can_issue_more = issue_rate;
3024 first_cycle_insn_p = 1;
3025 cycle_issued_insns = 0;
3032 if (sched_verbose >= 2)
3034 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
3036 debug_ready_list (&ready);
3037 if (sched_pressure_p)
3038 print_curr_reg_pressure ();
3041 if (ready.n_ready == 0
3043 && reload_completed)
3045 /* Allow scheduling insns directly from the queue in case
3046 there's nothing better to do (ready list is empty) but
3047 there are still vacant dispatch slots in the current cycle. */
3048 if (sched_verbose >= 6)
3049 fprintf (sched_dump,";;\t\tSecond chance\n");
3050 memcpy (temp_state, curr_state, dfa_state_size);
3051 if (early_queue_to_ready (temp_state, &ready))
3052 ready_sort (&ready);
3055 if (ready.n_ready == 0
3057 || state_dead_lock_p (curr_state)
3058 || !(*current_sched_info->schedule_more_p) ())
3061 /* Select and remove the insn from the ready list. */
3067 res = choose_ready (&ready, &insn);
3073 /* Restart choose_ready (). */
3076 gcc_assert (insn != NULL_RTX);
3079 insn = ready_remove_first (&ready);
3081 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3083 ready_add (&ready, insn, true);
3088 if (targetm.sched.dfa_new_cycle
3089 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3090 insn, last_clock_var,
3091 clock_var, &sort_p))
3092 /* SORT_P is used by the target to override sorting
3093 of the ready list. This is needed when the target
3094 has modified its internal structures expecting that
3095 the insn will be issued next. As we need the insn
3096 to have the highest priority (so it will be returned by
3097 the ready_remove_first call above), we invoke
3098 ready_add (&ready, insn, true).
3099 But, still, there is one issue: INSN can be later
3100 discarded by scheduler's front end through
3101 current_sched_info->can_schedule_ready_p, hence, won't
3104 ready_add (&ready, insn, true);
3109 memcpy (temp_state, curr_state, dfa_state_size);
3110 if (recog_memoized (insn) < 0)
3112 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3113 || asm_noperands (PATTERN (insn)) >= 0);
3114 if (!first_cycle_insn_p && asm_p)
3115 /* This is asm insn which is tried to be issued on the
3116 cycle not first. Issue it on the next cycle. */
3119 /* A USE insn, or something else we don't need to
3120 understand. We can't pass these directly to
3121 state_transition because it will trigger a
3122 fatal error for unrecognizable insns. */
3125 else if (sched_pressure_p)
3129 cost = state_transition (temp_state, insn);
3138 queue_insn (insn, cost);
3139 if (SCHED_GROUP_P (insn))
3148 if (current_sched_info->can_schedule_ready_p
3149 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3150 /* We normally get here only if we don't want to move
3151 insn from the split block. */
3153 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3157 /* DECISION is made. */
3159 if (TODO_SPEC (insn) & SPECULATIVE)
3160 generate_recovery_code (insn);
3162 if (control_flow_insn_p (last_scheduled_insn)
3163 /* This is used to switch basic blocks by request
3164 from scheduler front-end (actually, sched-ebb.c only).
3165 This is used to process blocks with single fallthru
3166 edge. If succeeding block has jump, it [jump] will try
3167 move at the end of current bb, thus corrupting CFG. */
3168 || current_sched_info->advance_target_bb (*target_bb, insn))
3170 *target_bb = current_sched_info->advance_target_bb
3177 x = next_real_insn (last_scheduled_insn);
3179 dump_new_block_header (1, *target_bb, x, tail);
3182 last_scheduled_insn = bb_note (*target_bb);
3185 /* Update counters, etc in the scheduler's front end. */
3186 (*current_sched_info->begin_schedule_ready) (insn,
3187 last_scheduled_insn);
3189 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3190 reemit_notes (insn);
3191 last_scheduled_insn = insn;
3193 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3195 cycle_issued_insns++;
3196 memcpy (curr_state, temp_state, dfa_state_size);
3199 if (targetm.sched.variable_issue)
3201 targetm.sched.variable_issue (sched_dump, sched_verbose,
3202 insn, can_issue_more);
3203 /* A naked CLOBBER or USE generates no instruction, so do
3204 not count them against the issue rate. */
3205 else if (GET_CODE (PATTERN (insn)) != USE
3206 && GET_CODE (PATTERN (insn)) != CLOBBER)
3208 advance = schedule_insn (insn);
3210 /* After issuing an asm insn we should start a new cycle. */
3211 if (advance == 0 && asm_p)
3216 first_cycle_insn_p = 0;
3218 /* Sort the ready list based on priority. This must be
3219 redone here, as schedule_insn may have readied additional
3220 insns that will not be sorted correctly. */
3221 if (ready.n_ready > 0)
3222 ready_sort (&ready);
3224 /* Quickly go through debug insns such that md sched
3225 reorder2 doesn't have to deal with debug insns. */
3226 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3227 && (*current_sched_info->schedule_more_p) ())
3229 if (control_flow_insn_p (last_scheduled_insn))
3231 *target_bb = current_sched_info->advance_target_bb
3238 x = next_real_insn (last_scheduled_insn);
3240 dump_new_block_header (1, *target_bb, x, tail);
3243 last_scheduled_insn = bb_note (*target_bb);
3246 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3248 insn = ready_remove_first (&ready);
3249 gcc_assert (DEBUG_INSN_P (insn));
3250 (*current_sched_info->begin_schedule_ready)
3251 (insn, last_scheduled_insn);
3252 move_insn (insn, last_scheduled_insn,
3253 current_sched_info->next_tail);
3254 advance = schedule_insn (insn);
3255 last_scheduled_insn = insn;
3256 gcc_assert (advance == 0);
3257 if (ready.n_ready > 0)
3258 ready_sort (&ready);
3262 if (targetm.sched.reorder2
3263 && (ready.n_ready == 0
3264 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3267 targetm.sched.reorder2 (sched_dump, sched_verbose,
3269 ? ready_lastpos (&ready) : NULL,
3270 &ready.n_ready, clock_var);
3278 fprintf (sched_dump, ";;\tReady list (final): ");
3279 debug_ready_list (&ready);
3282 if (current_sched_info->queue_must_finish_empty)
3283 /* Sanity check -- queue must be empty now. Meaningless if region has
3285 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3288 /* We must maintain QUEUE_INDEX between blocks in region. */
3289 for (i = ready.n_ready - 1; i >= 0; i--)
3293 x = ready_element (&ready, i);
3294 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3295 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3299 for (i = 0; i <= max_insn_queue_index; i++)
3302 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3307 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3308 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3310 free_INSN_LIST_list (&insn_queue[i]);
3315 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3317 if (!current_sched_info->queue_must_finish_empty
3318 || haifa_recovery_bb_recently_added_p)
3320 /* INSN_TICK (minimum clock tick at which the insn becomes
3321 ready) may be not correct for the insn in the subsequent
3322 blocks of the region. We should use a correct value of
3323 `clock_var' or modify INSN_TICK. It is better to keep
3324 clock_var value equal to 0 at the start of a basic block.
3325 Therefore we modify INSN_TICK here. */
3326 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3329 if (targetm.sched.md_finish)
3331 targetm.sched.md_finish (sched_dump, sched_verbose);
3332 /* Target might have added some instructions to the scheduled block
3333 in its md_finish () hook. These new insns don't have any data
3334 initialized and to identify them we extend h_i_d so that they'll
3336 sched_init_luids (NULL, NULL, NULL, NULL);
3340 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3341 INSN_UID (head), INSN_UID (tail));
3343 /* Update head/tail boundaries. */
3344 head = NEXT_INSN (prev_head);
3345 tail = last_scheduled_insn;
3347 head = restore_other_notes (head, NULL);
3349 current_sched_info->head = head;
3350 current_sched_info->tail = tail;
3353 /* Set_priorities: compute priority of each insn in the block. */
3356 set_priorities (rtx head, rtx tail)
3360 int sched_max_insns_priority =
3361 current_sched_info->sched_max_insns_priority;
3364 if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
3369 prev_head = PREV_INSN (head);
3370 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3376 (void) priority (insn);
3378 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3380 sched_max_insns_priority = MAX (sched_max_insns_priority,
3381 INSN_PRIORITY (insn));
3384 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3389 /* Set dump and sched_verbose for the desired debugging output. If no
3390 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3391 For -fsched-verbose=N, N>=10, print everything to stderr. */
3393 setup_sched_dump (void)
3395 sched_verbose = sched_verbose_param;
3396 if (sched_verbose_param == 0 && dump_file)
3398 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3399 ? stderr : dump_file);
3402 /* Initialize some global state for the scheduler. This function works
3403 with the common data shared between all the schedulers. It is called
3404 from the scheduler specific initialization routine. */
3409 /* Disable speculative loads in their presence if cc0 defined. */
3411 flag_schedule_speculative_load = 0;
3414 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3415 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3416 if (sched_pressure_p)
3417 ira_setup_eliminable_regset ();
3419 /* Initialize SPEC_INFO. */
3420 if (targetm.sched.set_sched_flags)
3422 spec_info = &spec_info_var;
3423 targetm.sched.set_sched_flags (spec_info);
3425 if (spec_info->mask != 0)
3427 spec_info->data_weakness_cutoff =
3428 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3429 spec_info->control_weakness_cutoff =
3430 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3431 * REG_BR_PROB_BASE) / 100;
3434 /* So we won't read anything accidentally. */
3439 /* So we won't read anything accidentally. */
3442 /* Initialize issue_rate. */
3443 if (targetm.sched.issue_rate)
3444 issue_rate = targetm.sched.issue_rate ();
3448 if (cached_issue_rate != issue_rate)
3450 cached_issue_rate = issue_rate;
3451 /* To invalidate max_lookahead_tries: */
3452 cached_first_cycle_multipass_dfa_lookahead = 0;
3455 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3456 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3460 if (targetm.sched.init_dfa_pre_cycle_insn)
3461 targetm.sched.init_dfa_pre_cycle_insn ();
3463 if (targetm.sched.init_dfa_post_cycle_insn)
3464 targetm.sched.init_dfa_post_cycle_insn ();
3467 dfa_state_size = state_size ();
3469 init_alias_analysis ();
3471 df_set_flags (DF_LR_RUN_DCE);
3472 df_note_add_problem ();
3474 /* More problems needed for interloop dep calculation in SMS. */
3475 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3477 df_rd_add_problem ();
3478 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3483 /* Do not run DCE after reload, as this can kill nops inserted
3485 if (reload_completed)
3486 df_clear_flags (DF_LR_RUN_DCE);
3488 regstat_compute_calls_crossed ();
3490 if (targetm.sched.md_init_global)
3491 targetm.sched.md_init_global (sched_dump, sched_verbose,
3492 get_max_uid () + 1);
3494 if (sched_pressure_p)
3496 int i, max_regno = max_reg_num ();
3498 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3499 sched_regno_cover_class
3500 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3501 for (i = 0; i < max_regno; i++)
3502 sched_regno_cover_class[i]
3503 = (i < FIRST_PSEUDO_REGISTER
3504 ? ira_class_translate[REGNO_REG_CLASS (i)]
3505 : reg_cover_class (i));
3506 curr_reg_live = BITMAP_ALLOC (NULL);
3507 saved_reg_live = BITMAP_ALLOC (NULL);
3508 region_ref_regs = BITMAP_ALLOC (NULL);
3511 curr_state = xmalloc (dfa_state_size);
3514 static void haifa_init_only_bb (basic_block, basic_block);
3516 /* Initialize data structures specific to the Haifa scheduler. */
3518 haifa_sched_init (void)
3520 setup_sched_dump ();
3523 if (spec_info != NULL)
3525 sched_deps_info->use_deps_list = 1;
3526 sched_deps_info->generate_spec_deps = 1;
3529 /* Initialize luids, dependency caches, target and h_i_d for the
3532 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3538 VEC_quick_push (basic_block, bbs, bb);
3539 sched_init_luids (bbs, NULL, NULL, NULL);
3540 sched_deps_init (true);
3541 sched_extend_target ();
3542 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3544 VEC_free (basic_block, heap, bbs);
3547 sched_init_only_bb = haifa_init_only_bb;
3548 sched_split_block = sched_split_block_1;
3549 sched_create_empty_bb = sched_create_empty_bb_1;
3550 haifa_recovery_bb_ever_added_p = false;
3552 #ifdef ENABLE_CHECKING
3553 /* This is used preferably for finding bugs in check_cfg () itself.
3554 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3555 assumes that the last insn in the last bb has a non-null successor. */
3559 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3560 before_recovery = 0;
3564 /* Finish work with the data specific to the Haifa scheduler. */
3566 haifa_sched_finish (void)
3568 sched_create_empty_bb = NULL;
3569 sched_split_block = NULL;
3570 sched_init_only_bb = NULL;
3572 if (spec_info && spec_info->dump)
3574 char c = reload_completed ? 'a' : 'b';
3576 fprintf (spec_info->dump,
3577 ";; %s:\n", current_function_name ());
3579 fprintf (spec_info->dump,
3580 ";; Procedure %cr-begin-data-spec motions == %d\n",
3582 fprintf (spec_info->dump,
3583 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3585 fprintf (spec_info->dump,
3586 ";; Procedure %cr-begin-control-spec motions == %d\n",
3587 c, nr_begin_control);
3588 fprintf (spec_info->dump,
3589 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3590 c, nr_be_in_control);
3593 /* Finalize h_i_d, dependency caches, and luids for the whole
3594 function. Target will be finalized in md_global_finish (). */
3595 sched_deps_finish ();
3596 sched_finish_luids ();
3597 current_sched_info = NULL;
3601 /* Free global data used during insn scheduling. This function works with
3602 the common data shared between the schedulers. */
3607 haifa_finish_h_i_d ();
3608 if (sched_pressure_p)
3610 free (sched_regno_cover_class);
3611 BITMAP_FREE (region_ref_regs);
3612 BITMAP_FREE (saved_reg_live);
3613 BITMAP_FREE (curr_reg_live);
3617 if (targetm.sched.md_finish_global)
3618 targetm.sched.md_finish_global (sched_dump, sched_verbose);
3620 end_alias_analysis ();
3622 regstat_free_calls_crossed ();
3626 #ifdef ENABLE_CHECKING
3627 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3628 if (!reload_completed)
3633 /* Fix INSN_TICKs of the instructions in the current block as well as
3634 INSN_TICKs of their dependents.
3635 HEAD and TAIL are the begin and the end of the current scheduled block. */
3637 fix_inter_tick (rtx head, rtx tail)
3639 /* Set of instructions with corrected INSN_TICK. */
3640 bitmap_head processed;
3641 /* ??? It is doubtful if we should assume that cycle advance happens on
3642 basic block boundaries. Basically insns that are unconditionally ready
3643 on the start of the block are more preferable then those which have
3644 a one cycle dependency over insn from the previous block. */
3645 int next_clock = clock_var + 1;
3647 bitmap_initialize (&processed, 0);
3649 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3650 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3651 across different blocks. */
3652 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3657 sd_iterator_def sd_it;
3660 tick = INSN_TICK (head);
3661 gcc_assert (tick >= MIN_TICK);
3663 /* Fix INSN_TICK of instruction from just scheduled block. */
3664 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
3666 bitmap_set_bit (&processed, INSN_LUID (head));
3669 if (tick < MIN_TICK)
3672 INSN_TICK (head) = tick;
3675 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3679 next = DEP_CON (dep);
3680 tick = INSN_TICK (next);
3682 if (tick != INVALID_TICK
3683 /* If NEXT has its INSN_TICK calculated, fix it.
3684 If not - it will be properly calculated from
3685 scratch later in fix_tick_ready. */
3686 && !bitmap_bit_p (&processed, INSN_LUID (next)))
3688 bitmap_set_bit (&processed, INSN_LUID (next));
3691 if (tick < MIN_TICK)
3694 if (tick > INTER_TICK (next))
3695 INTER_TICK (next) = tick;
3697 tick = INTER_TICK (next);
3699 INSN_TICK (next) = tick;
3704 bitmap_clear (&processed);
3707 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3709 /* Check if NEXT is ready to be added to the ready or queue list.
3710 If "yes", add it to the proper list.
3712 -1 - is not ready yet,
3713 0 - added to the ready list,
3714 0 < N - queued for N cycles. */
3716 try_ready (rtx next)
3720 ts = &TODO_SPEC (next);
3723 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3724 && ((old_ts & HARD_DEP)
3725 || (old_ts & SPECULATIVE)));
3727 if (sd_lists_empty_p (next, SD_LIST_BACK))
3728 /* NEXT has all its dependencies resolved. */
3730 /* Remove HARD_DEP bit from NEXT's status. */
3733 if (current_sched_info->flags & DO_SPECULATION)
3734 /* Remove all speculative bits from NEXT's status. */
3735 *ts &= ~SPECULATIVE;
3739 /* One of the NEXT's dependencies has been resolved.
3740 Recalculate NEXT's status. */
3742 *ts &= ~SPECULATIVE & ~HARD_DEP;
3744 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3745 /* Now we've got NEXT with speculative deps only.
3746 1. Look at the deps to see what we have to do.
3747 2. Check if we can do 'todo'. */
3749 sd_iterator_def sd_it;
3751 bool first_p = true;
3753 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3755 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3764 *ts = ds_merge (*ts, ds);
3767 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3768 /* Too few points. */
3769 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3776 gcc_assert (*ts == old_ts
3777 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3778 else if (current_sched_info->new_ready)
3779 *ts = current_sched_info->new_ready (next, *ts);
3781 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3782 have its original pattern or changed (speculative) one. This is due
3783 to changing ebb in region scheduling.
3784 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3785 has speculative pattern.
3787 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3788 control-speculative NEXT could have been discarded by sched-rgn.c
3789 (the same case as when discarded by can_schedule_ready_p ()). */
3791 if ((*ts & SPECULATIVE)
3792 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3793 need to change anything. */
3799 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3801 res = haifa_speculate_insn (next, *ts, &new_pat);
3806 /* It would be nice to change DEP_STATUS of all dependences,
3807 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3808 so we won't reanalyze anything. */
3809 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3813 /* We follow the rule, that every speculative insn
3814 has non-null ORIG_PAT. */
3815 if (!ORIG_PAT (next))
3816 ORIG_PAT (next) = PATTERN (next);
3820 if (!ORIG_PAT (next))
3821 /* If we gonna to overwrite the original pattern of insn,
3823 ORIG_PAT (next) = PATTERN (next);
3825 haifa_change_pattern (next, new_pat);
3833 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3834 either correct (*ts & SPECULATIVE),
3835 or we simply don't care (*ts & HARD_DEP). */
3837 gcc_assert (!ORIG_PAT (next)
3838 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3842 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3843 control-speculative NEXT could have been discarded by sched-rgn.c
3844 (the same case as when discarded by can_schedule_ready_p ()). */
3845 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3847 change_queue_index (next, QUEUE_NOWHERE);
3850 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3851 /* We should change pattern of every previously speculative
3852 instruction - and we determine if NEXT was speculative by using
3853 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3854 pat too, so skip them. */
3856 haifa_change_pattern (next, ORIG_PAT (next));
3857 ORIG_PAT (next) = 0;
3860 if (sched_verbose >= 2)
3862 int s = TODO_SPEC (next);
3864 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3865 (*current_sched_info->print_insn) (next, 0));
3867 if (spec_info && spec_info->dump)
3870 fprintf (spec_info->dump, "; data-spec;");
3871 if (s & BEGIN_CONTROL)
3872 fprintf (spec_info->dump, "; control-spec;");
3873 if (s & BE_IN_CONTROL)
3874 fprintf (spec_info->dump, "; in-control-spec;");
3877 fprintf (sched_dump, "\n");
3880 adjust_priority (next);
3882 return fix_tick_ready (next);
3885 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3887 fix_tick_ready (rtx next)
3891 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3894 sd_iterator_def sd_it;
3897 tick = INSN_TICK (next);
3898 /* if tick is not equal to INVALID_TICK, then update
3899 INSN_TICK of NEXT with the most recent resolved dependence
3900 cost. Otherwise, recalculate from scratch. */
3901 full_p = (tick == INVALID_TICK);
3903 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3905 rtx pro = DEP_PRO (dep);
3908 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3910 tick1 = INSN_TICK (pro) + dep_cost (dep);
3921 INSN_TICK (next) = tick;
3923 delay = tick - clock_var;
3924 if (delay <= 0 || sched_pressure_p)
3925 delay = QUEUE_READY;
3927 change_queue_index (next, delay);
3932 /* Move NEXT to the proper queue list with (DELAY >= 1),
3933 or add it to the ready list (DELAY == QUEUE_READY),
3934 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3936 change_queue_index (rtx next, int delay)
3938 int i = QUEUE_INDEX (next);
3940 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3942 gcc_assert (i != QUEUE_SCHEDULED);
3944 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3945 || (delay < 0 && delay == i))
3946 /* We have nothing to do. */
3949 /* Remove NEXT from wherever it is now. */
3950 if (i == QUEUE_READY)
3951 ready_remove_insn (next);
3953 queue_remove (next);
3955 /* Add it to the proper place. */
3956 if (delay == QUEUE_READY)
3957 ready_add (readyp, next, false);
3958 else if (delay >= 1)
3959 queue_insn (next, delay);
3961 if (sched_verbose >= 2)
3963 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3964 (*current_sched_info->print_insn) (next, 0));
3966 if (delay == QUEUE_READY)
3967 fprintf (sched_dump, " into ready\n");
3968 else if (delay >= 1)
3969 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3971 fprintf (sched_dump, " removed from ready or queue lists\n");
3975 static int sched_ready_n_insns = -1;
3977 /* Initialize per region data structures. */
3979 sched_extend_ready_list (int new_sched_ready_n_insns)
3983 if (sched_ready_n_insns == -1)
3984 /* At the first call we need to initialize one more choice_stack
3988 sched_ready_n_insns = 0;
3991 i = sched_ready_n_insns + 1;
3993 ready.veclen = new_sched_ready_n_insns + issue_rate;
3994 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3996 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3998 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3999 sched_ready_n_insns, sizeof (*ready_try));
4001 /* We allocate +1 element to save initial state in the choice_stack[0]
4003 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
4004 new_sched_ready_n_insns + 1);
4006 for (; i <= new_sched_ready_n_insns; i++)
4007 choice_stack[i].state = xmalloc (dfa_state_size);
4009 sched_ready_n_insns = new_sched_ready_n_insns;
4012 /* Free per region data structures. */
4014 sched_finish_ready_list (void)
4025 for (i = 0; i <= sched_ready_n_insns; i++)
4026 free (choice_stack [i].state);
4027 free (choice_stack);
4028 choice_stack = NULL;
4030 sched_ready_n_insns = -1;
4034 haifa_luid_for_non_insn (rtx x)
4036 gcc_assert (NOTE_P (x) || LABEL_P (x));
4041 /* Generates recovery code for INSN. */
4043 generate_recovery_code (rtx insn)
4045 if (TODO_SPEC (insn) & BEGIN_SPEC)
4046 begin_speculative_block (insn);
4048 /* Here we have insn with no dependencies to
4049 instructions other then CHECK_SPEC ones. */
4051 if (TODO_SPEC (insn) & BE_IN_SPEC)
4052 add_to_speculative_block (insn);
4056 Tries to add speculative dependencies of type FS between instructions
4057 in deps_list L and TWIN. */
4059 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4061 sd_iterator_def sd_it;
4064 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4069 consumer = DEP_CON (dep);
4071 ds = DEP_STATUS (dep);
4073 if (/* If we want to create speculative dep. */
4075 /* And we can do that because this is a true dep. */
4076 && (ds & DEP_TYPES) == DEP_TRUE)
4078 gcc_assert (!(ds & BE_IN_SPEC));
4080 if (/* If this dep can be overcome with 'begin speculation'. */
4082 /* Then we have a choice: keep the dep 'begin speculative'
4083 or transform it into 'be in speculative'. */
4085 if (/* In try_ready we assert that if insn once became ready
4086 it can be removed from the ready (or queue) list only
4087 due to backend decision. Hence we can't let the
4088 probability of the speculative dep to decrease. */
4089 ds_weak (ds) <= ds_weak (fs))
4093 new_ds = (ds & ~BEGIN_SPEC) | fs;
4095 if (/* consumer can 'be in speculative'. */
4096 sched_insn_is_legitimate_for_speculation_p (consumer,
4098 /* Transform it to be in speculative. */
4103 /* Mark the dep as 'be in speculative'. */
4108 dep_def _new_dep, *new_dep = &_new_dep;
4110 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4111 sd_add_dep (new_dep, false);
4116 /* Generates recovery code for BEGIN speculative INSN. */
4118 begin_speculative_block (rtx insn)
4120 if (TODO_SPEC (insn) & BEGIN_DATA)
4122 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4125 create_check_block_twin (insn, false);
4127 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4130 static void haifa_init_insn (rtx);
4132 /* Generates recovery code for BE_IN speculative INSN. */
4134 add_to_speculative_block (rtx insn)
4137 sd_iterator_def sd_it;
4140 rtx_vec_t priorities_roots;
4142 ts = TODO_SPEC (insn);
4143 gcc_assert (!(ts & ~BE_IN_SPEC));
4145 if (ts & BE_IN_DATA)
4147 if (ts & BE_IN_CONTROL)
4150 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4151 gcc_assert (!TODO_SPEC (insn));
4153 DONE_SPEC (insn) |= ts;
4155 /* First we convert all simple checks to branchy. */
4156 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4157 sd_iterator_cond (&sd_it, &dep);)
4159 rtx check = DEP_PRO (dep);
4161 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4163 create_check_block_twin (check, true);
4165 /* Restart search. */
4166 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4169 /* Continue search. */
4170 sd_iterator_next (&sd_it);
4173 priorities_roots = NULL;
4174 clear_priorities (insn, &priorities_roots);
4181 /* Get the first backward dependency of INSN. */
4182 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4183 if (!sd_iterator_cond (&sd_it, &dep))
4184 /* INSN has no backward dependencies left. */
4187 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4188 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4189 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4191 check = DEP_PRO (dep);
4193 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4194 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4196 rec = BLOCK_FOR_INSN (check);
4198 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4199 haifa_init_insn (twin);
4201 sd_copy_back_deps (twin, insn, true);
4203 if (sched_verbose && spec_info->dump)
4204 /* INSN_BB (insn) isn't determined for twin insns yet.
4205 So we can't use current_sched_info->print_insn. */
4206 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4207 INSN_UID (twin), rec->index);
4209 twins = alloc_INSN_LIST (twin, twins);
4211 /* Add dependences between TWIN and all appropriate
4212 instructions from REC. */
4213 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4215 rtx pro = DEP_PRO (dep);
4217 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4219 /* INSN might have dependencies from the instructions from
4220 several recovery blocks. At this iteration we process those
4221 producers that reside in REC. */
4222 if (BLOCK_FOR_INSN (pro) == rec)
4224 dep_def _new_dep, *new_dep = &_new_dep;
4226 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4227 sd_add_dep (new_dep, false);
4231 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4233 /* Remove all dependencies between INSN and insns in REC. */
4234 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4235 sd_iterator_cond (&sd_it, &dep);)
4237 rtx pro = DEP_PRO (dep);
4239 if (BLOCK_FOR_INSN (pro) == rec)
4240 sd_delete_dep (sd_it);
4242 sd_iterator_next (&sd_it);
4246 /* We couldn't have added the dependencies between INSN and TWINS earlier
4247 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4252 twin = XEXP (twins, 0);
4255 dep_def _new_dep, *new_dep = &_new_dep;
4257 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4258 sd_add_dep (new_dep, false);
4261 twin = XEXP (twins, 1);
4262 free_INSN_LIST_node (twins);
4266 calc_priorities (priorities_roots);
4267 VEC_free (rtx, heap, priorities_roots);
4270 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4272 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4274 gcc_assert (new_nmemb >= old_nmemb);
4275 p = XRESIZEVAR (void, p, new_nmemb * size);
4276 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4281 Find fallthru edge from PRED. */
4283 find_fallthru_edge (basic_block pred)
4289 succ = pred->next_bb;
4290 gcc_assert (succ->prev_bb == pred);
4292 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4294 FOR_EACH_EDGE (e, ei, pred->succs)
4295 if (e->flags & EDGE_FALLTHRU)
4297 gcc_assert (e->dest == succ);
4303 FOR_EACH_EDGE (e, ei, succ->preds)
4304 if (e->flags & EDGE_FALLTHRU)
4306 gcc_assert (e->src == pred);
4314 /* Extend per basic block data structures. */
4316 sched_extend_bb (void)
4320 /* The following is done to keep current_sched_info->next_tail non null. */
4321 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4322 if (NEXT_INSN (insn) == 0
4325 /* Don't emit a NOTE if it would end up before a BARRIER. */
4326 && !BARRIER_P (NEXT_INSN (insn))))
4328 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4329 /* Make insn appear outside BB. */
4330 set_block_for_insn (note, NULL);
4331 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4335 /* Init per basic block data structures. */
4337 sched_init_bbs (void)
4342 /* Initialize BEFORE_RECOVERY variable. */
4344 init_before_recovery (basic_block *before_recovery_ptr)
4349 last = EXIT_BLOCK_PTR->prev_bb;
4350 e = find_fallthru_edge (last);
4354 /* We create two basic blocks:
4355 1. Single instruction block is inserted right after E->SRC
4357 2. Empty block right before EXIT_BLOCK.
4358 Between these two blocks recovery blocks will be emitted. */
4360 basic_block single, empty;
4363 /* If the fallthrough edge to exit we've found is from the block we've
4364 created before, don't do anything more. */
4365 if (last == after_recovery)
4368 adding_bb_to_current_region_p = false;
4370 single = sched_create_empty_bb (last);
4371 empty = sched_create_empty_bb (single);
4373 /* Add new blocks to the root loop. */
4374 if (current_loops != NULL)
4376 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4377 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4380 single->count = last->count;
4381 empty->count = last->count;
4382 single->frequency = last->frequency;
4383 empty->frequency = last->frequency;
4384 BB_COPY_PARTITION (single, last);
4385 BB_COPY_PARTITION (empty, last);
4387 redirect_edge_succ (e, single);
4388 make_single_succ_edge (single, empty, 0);
4389 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4390 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4392 label = block_label (empty);
4393 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4394 JUMP_LABEL (x) = label;
4395 LABEL_NUSES (label)++;
4396 haifa_init_insn (x);
4398 emit_barrier_after (x);
4400 sched_init_only_bb (empty, NULL);
4401 sched_init_only_bb (single, NULL);
4404 adding_bb_to_current_region_p = true;
4405 before_recovery = single;
4406 after_recovery = empty;
4408 if (before_recovery_ptr)
4409 *before_recovery_ptr = before_recovery;
4411 if (sched_verbose >= 2 && spec_info->dump)
4412 fprintf (spec_info->dump,
4413 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4414 last->index, single->index, empty->index);
4417 before_recovery = last;
4420 /* Returns new recovery block. */
4422 sched_create_recovery_block (basic_block *before_recovery_ptr)
4428 haifa_recovery_bb_recently_added_p = true;
4429 haifa_recovery_bb_ever_added_p = true;
4431 init_before_recovery (before_recovery_ptr);
4433 barrier = get_last_bb_insn (before_recovery);
4434 gcc_assert (BARRIER_P (barrier));
4436 label = emit_label_after (gen_label_rtx (), barrier);
4438 rec = create_basic_block (label, label, before_recovery);
4440 /* A recovery block always ends with an unconditional jump. */
4441 emit_barrier_after (BB_END (rec));
4443 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4444 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4446 if (sched_verbose && spec_info->dump)
4447 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4453 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4454 and emit necessary jumps. */
4456 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4457 basic_block second_bb)
4464 /* This is fixing of incoming edge. */
4465 /* ??? Which other flags should be specified? */
4466 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4467 /* Partition type is the same, if it is "unpartitioned". */
4468 edge_flags = EDGE_CROSSING;
4472 e = make_edge (first_bb, rec, edge_flags);
4473 label = block_label (second_bb);
4474 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4475 JUMP_LABEL (jump) = label;
4476 LABEL_NUSES (label)++;
4478 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4479 /* Partition type is the same, if it is "unpartitioned". */
4481 /* Rewritten from cfgrtl.c. */
4482 if (flag_reorder_blocks_and_partition
4483 && targetm.have_named_sections)
4485 /* We don't need the same note for the check because
4486 any_condjump_p (check) == true. */
4487 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4489 edge_flags = EDGE_CROSSING;
4494 make_single_succ_edge (rec, second_bb, edge_flags);
4497 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4498 INSN is a simple check, that should be converted to branchy one. */
4500 create_check_block_twin (rtx insn, bool mutate_p)
4503 rtx label, check, twin;
4505 sd_iterator_def sd_it;
4507 dep_def _new_dep, *new_dep = &_new_dep;
4510 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4513 todo_spec = TODO_SPEC (insn);
4516 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4517 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4519 todo_spec = CHECK_SPEC (insn);
4522 todo_spec &= SPECULATIVE;
4524 /* Create recovery block. */
4525 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4527 rec = sched_create_recovery_block (NULL);
4528 label = BB_HEAD (rec);
4532 rec = EXIT_BLOCK_PTR;
4537 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4539 if (rec != EXIT_BLOCK_PTR)
4541 /* To have mem_reg alive at the beginning of second_bb,
4542 we emit check BEFORE insn, so insn after splitting
4543 insn will be at the beginning of second_bb, which will
4544 provide us with the correct life information. */
4545 check = emit_jump_insn_before (check, insn);
4546 JUMP_LABEL (check) = label;
4547 LABEL_NUSES (label)++;
4550 check = emit_insn_before (check, insn);
4552 /* Extend data structures. */
4553 haifa_init_insn (check);
4555 /* CHECK is being added to current region. Extend ready list. */
4556 gcc_assert (sched_ready_n_insns != -1);
4557 sched_extend_ready_list (sched_ready_n_insns + 1);
4559 if (current_sched_info->add_remove_insn)
4560 current_sched_info->add_remove_insn (insn, 0);
4562 RECOVERY_BLOCK (check) = rec;
4564 if (sched_verbose && spec_info->dump)
4565 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4566 (*current_sched_info->print_insn) (check, 0));
4568 gcc_assert (ORIG_PAT (insn));
4570 /* Initialize TWIN (twin is a duplicate of original instruction
4571 in the recovery block). */
4572 if (rec != EXIT_BLOCK_PTR)
4574 sd_iterator_def sd_it;
4577 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4578 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4580 struct _dep _dep2, *dep2 = &_dep2;
4582 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4584 sd_add_dep (dep2, true);
4587 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4588 haifa_init_insn (twin);
4590 if (sched_verbose && spec_info->dump)
4591 /* INSN_BB (insn) isn't determined for twin insns yet.
4592 So we can't use current_sched_info->print_insn. */
4593 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4594 INSN_UID (twin), rec->index);
4598 ORIG_PAT (check) = ORIG_PAT (insn);
4599 HAS_INTERNAL_DEP (check) = 1;
4601 /* ??? We probably should change all OUTPUT dependencies to
4605 /* Copy all resolved back dependencies of INSN to TWIN. This will
4606 provide correct value for INSN_TICK (TWIN). */
4607 sd_copy_back_deps (twin, insn, true);
4609 if (rec != EXIT_BLOCK_PTR)
4610 /* In case of branchy check, fix CFG. */
4612 basic_block first_bb, second_bb;
4615 first_bb = BLOCK_FOR_INSN (check);
4616 second_bb = sched_split_block (first_bb, check);
4618 sched_create_recovery_edges (first_bb, rec, second_bb);
4620 sched_init_only_bb (second_bb, first_bb);
4621 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4623 jump = BB_END (rec);
4624 haifa_init_insn (jump);
4627 /* Move backward dependences from INSN to CHECK and
4628 move forward dependences from INSN to TWIN. */
4630 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4631 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4633 rtx pro = DEP_PRO (dep);
4636 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4637 check --TRUE--> producer ??? or ANTI ???
4638 twin --TRUE--> producer
4639 twin --ANTI--> check
4641 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4642 check --ANTI--> producer
4643 twin --ANTI--> producer
4644 twin --ANTI--> check
4646 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4647 check ~~TRUE~~> producer
4648 twin ~~TRUE~~> producer
4649 twin --ANTI--> check */
4651 ds = DEP_STATUS (dep);
4653 if (ds & BEGIN_SPEC)
4655 gcc_assert (!mutate_p);
4659 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4660 sd_add_dep (new_dep, false);
4662 if (rec != EXIT_BLOCK_PTR)
4664 DEP_CON (new_dep) = twin;
4665 sd_add_dep (new_dep, false);
4669 /* Second, remove backward dependencies of INSN. */
4670 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4671 sd_iterator_cond (&sd_it, &dep);)
4673 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4675 /* We can delete this dep because we overcome it with
4676 BEGIN_SPECULATION. */
4677 sd_delete_dep (sd_it);
4679 sd_iterator_next (&sd_it);
4682 /* Future Speculations. Determine what BE_IN speculations will be like. */
4685 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4688 gcc_assert (!DONE_SPEC (insn));
4692 ds_t ts = TODO_SPEC (insn);
4694 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4695 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4697 /* Luckiness of future speculations solely depends upon initial
4698 BEGIN speculation. */
4699 if (ts & BEGIN_DATA)
4700 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4701 if (ts & BEGIN_CONTROL)
4702 fs = set_dep_weak (fs, BE_IN_CONTROL,
4703 get_dep_weak (ts, BEGIN_CONTROL));
4706 CHECK_SPEC (check) = CHECK_SPEC (insn);
4708 /* Future speculations: call the helper. */
4709 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4711 if (rec != EXIT_BLOCK_PTR)
4713 /* Which types of dependencies should we use here is,
4714 generally, machine-dependent question... But, for now,
4719 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4720 sd_add_dep (new_dep, false);
4722 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4723 sd_add_dep (new_dep, false);
4727 if (spec_info->dump)
4728 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4729 (*current_sched_info->print_insn) (insn, 0));
4731 /* Remove all dependencies of the INSN. */
4733 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4735 | SD_LIST_RES_BACK));
4736 while (sd_iterator_cond (&sd_it, &dep))
4737 sd_delete_dep (sd_it);
4740 /* If former check (INSN) already was moved to the ready (or queue)
4741 list, add new check (CHECK) there too. */
4742 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4745 /* Remove old check from instruction stream and free its
4747 sched_remove_insn (insn);
4750 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4751 sd_add_dep (new_dep, false);
4755 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4756 sd_add_dep (new_dep, false);
4760 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4761 because it'll be done later in add_to_speculative_block. */
4763 rtx_vec_t priorities_roots = NULL;
4765 clear_priorities (twin, &priorities_roots);
4766 calc_priorities (priorities_roots);
4767 VEC_free (rtx, heap, priorities_roots);
4771 /* Removes dependency between instructions in the recovery block REC
4772 and usual region instructions. It keeps inner dependences so it
4773 won't be necessary to recompute them. */
4775 fix_recovery_deps (basic_block rec)
4777 rtx note, insn, jump, ready_list = 0;
4778 bitmap_head in_ready;
4781 bitmap_initialize (&in_ready, 0);
4783 /* NOTE - a basic block note. */
4784 note = NEXT_INSN (BB_HEAD (rec));
4785 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4786 insn = BB_END (rec);
4787 gcc_assert (JUMP_P (insn));
4788 insn = PREV_INSN (insn);
4792 sd_iterator_def sd_it;
4795 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4796 sd_iterator_cond (&sd_it, &dep);)
4798 rtx consumer = DEP_CON (dep);
4800 if (BLOCK_FOR_INSN (consumer) != rec)
4802 sd_delete_dep (sd_it);
4804 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4806 ready_list = alloc_INSN_LIST (consumer, ready_list);
4807 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4812 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4814 sd_iterator_next (&sd_it);
4818 insn = PREV_INSN (insn);
4820 while (insn != note);
4822 bitmap_clear (&in_ready);
4824 /* Try to add instructions to the ready or queue list. */
4825 for (link = ready_list; link; link = XEXP (link, 1))
4826 try_ready (XEXP (link, 0));
4827 free_INSN_LIST_list (&ready_list);
4829 /* Fixing jump's dependences. */
4830 insn = BB_HEAD (rec);
4831 jump = BB_END (rec);
4833 gcc_assert (LABEL_P (insn));
4834 insn = NEXT_INSN (insn);
4836 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4837 add_jump_dependencies (insn, jump);
4840 /* Change pattern of INSN to NEW_PAT. */
4842 sched_change_pattern (rtx insn, rtx new_pat)
4846 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4848 dfa_clear_single_insn_cache (insn);
4851 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4852 instruction data. */
4854 haifa_change_pattern (rtx insn, rtx new_pat)
4856 sched_change_pattern (insn, new_pat);
4858 /* Invalidate INSN_COST, so it'll be recalculated. */
4859 INSN_COST (insn) = -1;
4860 /* Invalidate INSN_TICK, so it'll be recalculated. */
4861 INSN_TICK (insn) = INVALID_TICK;
4864 /* -1 - can't speculate,
4865 0 - for speculation with REQUEST mode it is OK to use
4866 current instruction pattern,
4867 1 - need to change pattern for *NEW_PAT to be speculative. */
4869 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4871 gcc_assert (current_sched_info->flags & DO_SPECULATION
4872 && (request & SPECULATIVE)
4873 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4875 if ((request & spec_info->mask) != request)
4878 if (request & BE_IN_SPEC
4879 && !(request & BEGIN_SPEC))
4882 return targetm.sched.speculate_insn (insn, request, new_pat);
4886 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4888 gcc_assert (sched_deps_info->generate_spec_deps
4889 && !IS_SPECULATION_CHECK_P (insn));
4891 if (HAS_INTERNAL_DEP (insn)
4892 || SCHED_GROUP_P (insn))
4895 return sched_speculate_insn (insn, request, new_pat);
4898 /* Print some information about block BB, which starts with HEAD and
4899 ends with TAIL, before scheduling it.
4900 I is zero, if scheduler is about to start with the fresh ebb. */
4902 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4905 fprintf (sched_dump,
4906 ";; ======================================================\n");
4908 fprintf (sched_dump,
4909 ";; =====================ADVANCING TO=====================\n");
4910 fprintf (sched_dump,
4911 ";; -- basic block %d from %d to %d -- %s reload\n",
4912 bb->index, INSN_UID (head), INSN_UID (tail),
4913 (reload_completed ? "after" : "before"));
4914 fprintf (sched_dump,
4915 ";; ======================================================\n");
4916 fprintf (sched_dump, "\n");
4919 /* Unlink basic block notes and labels and saves them, so they
4920 can be easily restored. We unlink basic block notes in EBB to
4921 provide back-compatibility with the previous code, as target backends
4922 assume, that there'll be only instructions between
4923 current_sched_info->{head and tail}. We restore these notes as soon
4925 FIRST (LAST) is the first (last) basic block in the ebb.
4926 NB: In usual case (FIRST == LAST) nothing is really done. */
4928 unlink_bb_notes (basic_block first, basic_block last)
4930 /* We DON'T unlink basic block notes of the first block in the ebb. */
4934 bb_header = XNEWVEC (rtx, last_basic_block);
4936 /* Make a sentinel. */
4937 if (last->next_bb != EXIT_BLOCK_PTR)
4938 bb_header[last->next_bb->index] = 0;
4940 first = first->next_bb;
4943 rtx prev, label, note, next;
4945 label = BB_HEAD (last);
4946 if (LABEL_P (label))
4947 note = NEXT_INSN (label);
4950 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4952 prev = PREV_INSN (label);
4953 next = NEXT_INSN (note);
4954 gcc_assert (prev && next);
4956 NEXT_INSN (prev) = next;
4957 PREV_INSN (next) = prev;
4959 bb_header[last->index] = label;
4964 last = last->prev_bb;
4969 /* Restore basic block notes.
4970 FIRST is the first basic block in the ebb. */
4972 restore_bb_notes (basic_block first)
4977 /* We DON'T unlink basic block notes of the first block in the ebb. */
4978 first = first->next_bb;
4979 /* Remember: FIRST is actually a second basic block in the ebb. */
4981 while (first != EXIT_BLOCK_PTR
4982 && bb_header[first->index])
4984 rtx prev, label, note, next;
4986 label = bb_header[first->index];
4987 prev = PREV_INSN (label);
4988 next = NEXT_INSN (prev);
4990 if (LABEL_P (label))
4991 note = NEXT_INSN (label);
4994 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4996 bb_header[first->index] = 0;
4998 NEXT_INSN (prev) = label;
4999 NEXT_INSN (note) = next;
5000 PREV_INSN (next) = note;
5002 first = first->next_bb;
5010 Fix CFG after both in- and inter-block movement of
5011 control_flow_insn_p JUMP. */
5013 fix_jump_move (rtx jump)
5015 basic_block bb, jump_bb, jump_bb_next;
5017 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5018 jump_bb = BLOCK_FOR_INSN (jump);
5019 jump_bb_next = jump_bb->next_bb;
5021 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
5022 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
5024 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
5025 /* if jump_bb_next is not empty. */
5026 BB_END (jump_bb) = BB_END (jump_bb_next);
5028 if (BB_END (bb) != PREV_INSN (jump))
5029 /* Then there are instruction after jump that should be placed
5031 BB_END (jump_bb_next) = BB_END (bb);
5033 /* Otherwise jump_bb_next is empty. */
5034 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
5036 /* To make assertion in move_insn happy. */
5037 BB_END (bb) = PREV_INSN (jump);
5039 update_bb_for_insn (jump_bb_next);
5042 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
5044 move_block_after_check (rtx jump)
5046 basic_block bb, jump_bb, jump_bb_next;
5049 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5050 jump_bb = BLOCK_FOR_INSN (jump);
5051 jump_bb_next = jump_bb->next_bb;
5053 update_bb_for_insn (jump_bb);
5055 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5056 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5058 unlink_block (jump_bb_next);
5059 link_block (jump_bb_next, bb);
5063 move_succs (&(jump_bb->succs), bb);
5064 move_succs (&(jump_bb_next->succs), jump_bb);
5065 move_succs (&t, jump_bb_next);
5067 df_mark_solutions_dirty ();
5069 common_sched_info->fix_recovery_cfg
5070 (bb->index, jump_bb->index, jump_bb_next->index);
5073 /* Helper function for move_block_after_check.
5074 This functions attaches edge vector pointed to by SUCCSP to
5077 move_succs (VEC(edge,gc) **succsp, basic_block to)
5082 gcc_assert (to->succs == 0);
5084 to->succs = *succsp;
5086 FOR_EACH_EDGE (e, ei, to->succs)
5092 /* Remove INSN from the instruction stream.
5093 INSN should have any dependencies. */
5095 sched_remove_insn (rtx insn)
5097 sd_finish_insn (insn);
5099 change_queue_index (insn, QUEUE_NOWHERE);
5100 current_sched_info->add_remove_insn (insn, 1);
5104 /* Clear priorities of all instructions, that are forward dependent on INSN.
5105 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5106 be invoked to initialize all cleared priorities. */
5108 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5110 sd_iterator_def sd_it;
5112 bool insn_is_root_p = true;
5114 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5116 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5118 rtx pro = DEP_PRO (dep);
5120 if (INSN_PRIORITY_STATUS (pro) >= 0
5121 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5123 /* If DEP doesn't contribute to priority then INSN itself should
5124 be added to priority roots. */
5125 if (contributes_to_priority_p (dep))
5126 insn_is_root_p = false;
5128 INSN_PRIORITY_STATUS (pro) = -1;
5129 clear_priorities (pro, roots_ptr);
5134 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5137 /* Recompute priorities of instructions, whose priorities might have been
5138 changed. ROOTS is a vector of instructions whose priority computation will
5139 trigger initialization of all cleared priorities. */
5141 calc_priorities (rtx_vec_t roots)
5146 for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
5151 /* Add dependences between JUMP and other instructions in the recovery
5152 block. INSN is the first insn the recovery block. */
5154 add_jump_dependencies (rtx insn, rtx jump)
5158 insn = NEXT_INSN (insn);
5162 if (dep_list_size (insn) == 0)
5164 dep_def _new_dep, *new_dep = &_new_dep;
5166 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5167 sd_add_dep (new_dep, false);
5172 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5175 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5177 bb_note (basic_block bb)
5181 note = BB_HEAD (bb);
5183 note = NEXT_INSN (note);
5185 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5189 #ifdef ENABLE_CHECKING
5190 /* Helper function for check_cfg.
5191 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5194 has_edge_p (VEC(edge,gc) *el, int type)
5199 FOR_EACH_EDGE (e, ei, el)
5200 if (e->flags & type)
5205 /* Search back, starting at INSN, for an insn that is not a
5206 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5207 no such insn can be found. */
5209 prev_non_location_insn (rtx insn, rtx head)
5211 while (insn != head && NOTE_P (insn)
5212 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5213 insn = PREV_INSN (insn);
5218 /* Check few properties of CFG between HEAD and TAIL.
5219 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5220 instruction stream. */
5222 check_cfg (rtx head, rtx tail)
5226 int not_first = 0, not_last;
5229 head = get_insns ();
5231 tail = get_last_insn ();
5232 next_tail = NEXT_INSN (tail);
5236 not_last = head != tail;
5239 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5241 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5244 || (NOTE_INSN_BASIC_BLOCK_P (head)
5246 || (not_first && !LABEL_P (PREV_INSN (head))))))
5248 gcc_assert (bb == 0);
5249 bb = BLOCK_FOR_INSN (head);
5251 gcc_assert (BB_HEAD (bb) == head);
5253 /* This is the case of jump table. See inside_basic_block_p (). */
5254 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5259 gcc_assert (!inside_basic_block_p (head));
5260 head = NEXT_INSN (head);
5264 gcc_assert (inside_basic_block_p (head)
5266 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5270 head = NEXT_INSN (head);
5271 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5275 if (control_flow_insn_p (head))
5277 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5280 if (any_uncondjump_p (head))
5281 gcc_assert (EDGE_COUNT (bb->succs) == 1
5282 && BARRIER_P (NEXT_INSN (head)));
5283 else if (any_condjump_p (head))
5284 gcc_assert (/* Usual case. */
5285 (EDGE_COUNT (bb->succs) > 1
5286 && !BARRIER_P (NEXT_INSN (head)))
5287 /* Or jump to the next instruction. */
5288 || (EDGE_COUNT (bb->succs) == 1
5289 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5290 == JUMP_LABEL (head))));
5292 if (BB_END (bb) == head)
5294 if (EDGE_COUNT (bb->succs) > 1)
5295 gcc_assert (control_flow_insn_p (prev_non_location_insn
5296 (head, BB_HEAD (bb)))
5297 || has_edge_p (bb->succs, EDGE_COMPLEX));
5301 head = NEXT_INSN (head);
5307 while (head != next_tail);
5309 gcc_assert (bb == 0);
5312 #endif /* ENABLE_CHECKING */
5314 /* Extend per basic block data structures. */
5318 if (sched_scan_info->extend_bb)
5319 sched_scan_info->extend_bb ();
5322 /* Init data for BB. */
5324 init_bb (basic_block bb)
5326 if (sched_scan_info->init_bb)
5327 sched_scan_info->init_bb (bb);
5330 /* Extend per insn data structures. */
5334 if (sched_scan_info->extend_insn)
5335 sched_scan_info->extend_insn ();
5338 /* Init data structures for INSN. */
5340 init_insn (rtx insn)
5342 if (sched_scan_info->init_insn)
5343 sched_scan_info->init_insn (insn);
5346 /* Init all insns in BB. */
5348 init_insns_in_bb (basic_block bb)
5352 FOR_BB_INSNS (bb, insn)
5356 /* A driver function to add a set of basic blocks (BBS),
5357 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5358 to the scheduling region. */
5360 sched_scan (const struct sched_scan_info_def *ssi,
5361 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5363 sched_scan_info = ssi;
5365 if (bbs != NULL || bb != NULL)
5374 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
5389 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
5390 init_insns_in_bb (x);
5394 init_insns_in_bb (bb);
5401 for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
5410 /* Extend data structures for logical insn UID. */
5412 luids_extend_insn (void)
5414 int new_luids_max_uid = get_max_uid () + 1;
5416 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5419 /* Initialize LUID for INSN. */
5421 luids_init_insn (rtx insn)
5423 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5428 luid = sched_max_luid;
5429 sched_max_luid += i;
5434 SET_INSN_LUID (insn, luid);
5437 /* Initialize luids for BBS, BB, INSNS and INSN.
5438 The hook common_sched_info->luid_for_non_insn () is used to determine
5439 if notes, labels, etc. need luids. */
5441 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5443 const struct sched_scan_info_def ssi =
5445 NULL, /* extend_bb */
5447 luids_extend_insn, /* extend_insn */
5448 luids_init_insn /* init_insn */
5451 sched_scan (&ssi, bbs, bb, insns, insn);
5456 sched_finish_luids (void)
5458 VEC_free (int, heap, sched_luids);
5462 /* Return logical uid of INSN. Helpful while debugging. */
5464 insn_luid (rtx insn)
5466 return INSN_LUID (insn);
5469 /* Extend per insn data in the target. */
5471 sched_extend_target (void)
5473 if (targetm.sched.h_i_d_extended)
5474 targetm.sched.h_i_d_extended ();
5477 /* Extend global scheduler structures (those, that live across calls to
5478 schedule_block) to include information about just emitted INSN. */
5482 int reserve = (get_max_uid () + 1
5483 - VEC_length (haifa_insn_data_def, h_i_d));
5485 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5487 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5488 3 * get_max_uid () / 2);
5489 sched_extend_target ();
5493 /* Initialize h_i_d entry of the INSN with default values.
5494 Values, that are not explicitly initialized here, hold zero. */
5496 init_h_i_d (rtx insn)
5498 if (INSN_LUID (insn) > 0)
5500 INSN_COST (insn) = -1;
5501 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5502 INSN_TICK (insn) = INVALID_TICK;
5503 INTER_TICK (insn) = INVALID_TICK;
5504 TODO_SPEC (insn) = HARD_DEP;
5508 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5510 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5512 const struct sched_scan_info_def ssi =
5514 NULL, /* extend_bb */
5516 extend_h_i_d, /* extend_insn */
5517 init_h_i_d /* init_insn */
5520 sched_scan (&ssi, bbs, bb, insns, insn);
5523 /* Finalize haifa_insn_data. */
5525 haifa_finish_h_i_d (void)
5528 haifa_insn_data_t data;
5529 struct reg_use_data *use, *next;
5531 for (i = 0; VEC_iterate (haifa_insn_data_def, h_i_d, i, data); i++)
5533 if (data->reg_pressure != NULL)
5534 free (data->reg_pressure);
5535 for (use = data->reg_use_list; use != NULL; use = next)
5537 next = use->next_insn_use;
5541 VEC_free (haifa_insn_data_def, heap, h_i_d);
5544 /* Init data for the new insn INSN. */
5546 haifa_init_insn (rtx insn)
5548 gcc_assert (insn != NULL);
5550 sched_init_luids (NULL, NULL, NULL, insn);
5551 sched_extend_target ();
5552 sched_deps_init (false);
5553 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5555 if (adding_bb_to_current_region_p)
5557 sd_init_insn (insn);
5559 /* Extend dependency caches by one element. */
5560 extend_dependency_caches (1, false);
5564 /* Init data for the new basic block BB which comes after AFTER. */
5566 haifa_init_only_bb (basic_block bb, basic_block after)
5568 gcc_assert (bb != NULL);
5572 if (common_sched_info->add_block)
5573 /* This changes only data structures of the front-end. */
5574 common_sched_info->add_block (bb, after);
5577 /* A generic version of sched_split_block (). */
5579 sched_split_block_1 (basic_block first_bb, rtx after)
5583 e = split_block (first_bb, after);
5584 gcc_assert (e->src == first_bb);
5586 /* sched_split_block emits note if *check == BB_END. Probably it
5587 is better to rip that note off. */
5592 /* A generic version of sched_create_empty_bb (). */
5594 sched_create_empty_bb_1 (basic_block after)
5596 return create_empty_bb (after);
5599 /* Insert PAT as an INSN into the schedule and update the necessary data
5600 structures to account for it. */
5602 sched_emit_insn (rtx pat)
5604 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5605 last_scheduled_insn = insn;
5606 haifa_init_insn (insn);
5610 #endif /* INSN_SCHEDULING */