1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
143 #include "sched-int.h"
152 #ifdef INSN_SCHEDULING
154 /* issue_rate is the number of insns that can be scheduled in the same
155 machine cycle. It can be defined in the config/mach/mach.h file,
156 otherwise we set it to 1. */
160 /* sched-verbose controls the amount of debugging output the
161 scheduler prints. It is controlled by -fsched-verbose=N:
162 N>0 and no -DSR : the output is directed to stderr.
163 N>=10 will direct the printouts to stderr (regardless of -dSR).
165 N=2: bb's probabilities, detailed ready list info, unit/insn info.
166 N=3: rtl at abort point, control-flow, regions info.
167 N=5: dependences info. */
169 static int sched_verbose_param = 0;
170 int sched_verbose = 0;
172 /* Debugging file. All printouts are sent to dump, which is always set,
173 either to stderr, or to the dump listing file (-dRS). */
174 FILE *sched_dump = 0;
176 /* fix_sched_param() is called from toplev.c upon detection
177 of the -fsched-verbose=N option. */
180 fix_sched_param (const char *param, const char *val)
182 if (!strcmp (param, "verbose"))
183 sched_verbose_param = atoi (val);
185 warning (0, "fix_sched_param: unknown param: %s", param);
188 /* This is a placeholder for the scheduler parameters common
189 to all schedulers. */
190 struct common_sched_info_def *common_sched_info;
192 #define INSN_TICK(INSN) (HID (INSN)->tick)
193 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
195 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
196 then it should be recalculated from scratch. */
197 #define INVALID_TICK (-(max_insn_queue_index + 1))
198 /* The minimal value of the INSN_TICK of an instruction. */
199 #define MIN_TICK (-max_insn_queue_index)
201 /* Issue points are used to distinguish between instructions in max_issue ().
202 For now, all instructions are equally good. */
203 #define ISSUE_POINTS(INSN) 1
205 /* List of important notes we must keep around. This is a pointer to the
206 last element in the list. */
209 static struct spec_info_def spec_info_var;
210 /* Description of the speculative part of the scheduling.
211 If NULL - no speculation. */
212 spec_info_t spec_info = NULL;
214 /* True, if recovery block was added during scheduling of current block.
215 Used to determine, if we need to fix INSN_TICKs. */
216 static bool haifa_recovery_bb_recently_added_p;
218 /* True, if recovery block was added during this scheduling pass.
219 Used to determine if we should have empty memory pools of dependencies
220 after finishing current region. */
221 bool haifa_recovery_bb_ever_added_p;
223 /* Counters of different types of speculative instructions. */
224 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
226 /* Array used in {unlink, restore}_bb_notes. */
227 static rtx *bb_header = 0;
229 /* Basic block after which recovery blocks will be created. */
230 static basic_block before_recovery;
232 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
234 basic_block after_recovery;
236 /* FALSE if we add bb to another region, so we don't need to initialize it. */
237 bool adding_bb_to_current_region_p = true;
241 /* An instruction is ready to be scheduled when all insns preceding it
242 have already been scheduled. It is important to ensure that all
243 insns which use its result will not be executed until its result
244 has been computed. An insn is maintained in one of four structures:
246 (P) the "Pending" set of insns which cannot be scheduled until
247 their dependencies have been satisfied.
248 (Q) the "Queued" set of insns that can be scheduled when sufficient
250 (R) the "Ready" list of unscheduled, uncommitted insns.
251 (S) the "Scheduled" list of insns.
253 Initially, all insns are either "Pending" or "Ready" depending on
254 whether their dependencies are satisfied.
256 Insns move from the "Ready" list to the "Scheduled" list as they
257 are committed to the schedule. As this occurs, the insns in the
258 "Pending" list have their dependencies satisfied and move to either
259 the "Ready" list or the "Queued" set depending on whether
260 sufficient time has passed to make them ready. As time passes,
261 insns move from the "Queued" set to the "Ready" list.
263 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
264 unscheduled insns, i.e., those that are ready, queued, and pending.
265 The "Queued" set (Q) is implemented by the variable `insn_queue'.
266 The "Ready" list (R) is implemented by the variables `ready' and
268 The "Scheduled" list (S) is the new insn chain built by this pass.
270 The transition (R->S) is implemented in the scheduling loop in
271 `schedule_block' when the best insn to schedule is chosen.
272 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
273 insns move from the ready list to the scheduled list.
274 The transition (Q->R) is implemented in 'queue_to_insn' as time
275 passes or stalls are introduced. */
277 /* Implement a circular buffer to delay instructions until sufficient
278 time has passed. For the new pipeline description interface,
279 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
280 than maximal time of instruction execution computed by genattr.c on
281 the base maximal time of functional unit reservations and getting a
282 result. This is the longest time an insn may be queued. */
284 static rtx *insn_queue;
285 static int q_ptr = 0;
286 static int q_size = 0;
287 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
288 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
290 #define QUEUE_SCHEDULED (-3)
291 #define QUEUE_NOWHERE (-2)
292 #define QUEUE_READY (-1)
293 /* QUEUE_SCHEDULED - INSN is scheduled.
294 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
296 QUEUE_READY - INSN is in ready list.
297 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
299 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
301 /* The following variable value refers for all current and future
302 reservations of the processor units. */
305 /* The following variable value is size of memory representing all
306 current and future reservations of the processor units. */
307 size_t dfa_state_size;
309 /* The following array is used to find the best insn from ready when
310 the automaton pipeline interface is used. */
311 char *ready_try = NULL;
313 /* The ready list. */
314 struct ready_list ready = {NULL, 0, 0, 0, 0};
316 /* The pointer to the ready list (to be removed). */
317 static struct ready_list *readyp = &ready;
319 /* Scheduling clock. */
320 static int clock_var;
322 static int may_trap_exp (const_rtx, int);
324 /* Nonzero iff the address is comprised from at most 1 register. */
325 #define CONST_BASED_ADDRESS_P(x) \
327 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
328 || (GET_CODE (x) == LO_SUM)) \
329 && (CONSTANT_P (XEXP (x, 0)) \
330 || CONSTANT_P (XEXP (x, 1)))))
332 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
333 as found by analyzing insn's expression. */
336 static int haifa_luid_for_non_insn (rtx x);
338 /* Haifa version of sched_info hooks common to all headers. */
339 const struct common_sched_info_def haifa_common_sched_info =
341 NULL, /* fix_recovery_cfg */
342 NULL, /* add_block */
343 NULL, /* estimate_number_of_insns */
344 haifa_luid_for_non_insn, /* luid_for_non_insn */
345 SCHED_PASS_UNKNOWN /* sched_pass_id */
348 const struct sched_scan_info_def *sched_scan_info;
350 /* Mapping from instruction UID to its Logical UID. */
351 VEC (int, heap) *sched_luids = NULL;
353 /* Next LUID to assign to an instruction. */
354 int sched_max_luid = 1;
356 /* Haifa Instruction Data. */
357 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
359 void (* sched_init_only_bb) (basic_block, basic_block);
361 /* Split block function. Different schedulers might use different functions
362 to handle their internal data consistent. */
363 basic_block (* sched_split_block) (basic_block, rtx);
365 /* Create empty basic block after the specified block. */
366 basic_block (* sched_create_empty_bb) (basic_block);
369 may_trap_exp (const_rtx x, int is_store)
378 if (code == MEM && may_trap_p (x))
385 /* The insn uses memory: a volatile load. */
386 if (MEM_VOLATILE_P (x))
388 /* An exception-free load. */
391 /* A load with 1 base register, to be further checked. */
392 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
393 return PFREE_CANDIDATE;
394 /* No info on the load, to be further checked. */
395 return PRISKY_CANDIDATE;
400 int i, insn_class = TRAP_FREE;
402 /* Neither store nor load, check if it may cause a trap. */
405 /* Recursive step: walk the insn... */
406 fmt = GET_RTX_FORMAT (code);
407 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
411 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
412 insn_class = WORST_CLASS (insn_class, tmp_class);
414 else if (fmt[i] == 'E')
417 for (j = 0; j < XVECLEN (x, i); j++)
419 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
420 insn_class = WORST_CLASS (insn_class, tmp_class);
421 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
425 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
432 /* Classifies rtx X of an insn for the purpose of verifying that X can be
433 executed speculatively (and consequently the insn can be moved
434 speculatively), by examining X, returning:
435 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
436 TRAP_FREE: non-load insn.
437 IFREE: load from a globally safe location.
438 IRISKY: volatile load.
439 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
440 being either PFREE or PRISKY. */
443 haifa_classify_rtx (const_rtx x)
445 int tmp_class = TRAP_FREE;
446 int insn_class = TRAP_FREE;
449 if (GET_CODE (x) == PARALLEL)
451 int i, len = XVECLEN (x, 0);
453 for (i = len - 1; i >= 0; i--)
455 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
456 insn_class = WORST_CLASS (insn_class, tmp_class);
457 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
467 /* Test if it is a 'store'. */
468 tmp_class = may_trap_exp (XEXP (x, 0), 1);
471 /* Test if it is a store. */
472 tmp_class = may_trap_exp (SET_DEST (x), 1);
473 if (tmp_class == TRAP_RISKY)
475 /* Test if it is a load. */
477 WORST_CLASS (tmp_class,
478 may_trap_exp (SET_SRC (x), 0));
481 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
482 if (tmp_class == TRAP_RISKY)
484 tmp_class = WORST_CLASS (tmp_class,
485 may_trap_exp (COND_EXEC_TEST (x), 0));
488 tmp_class = TRAP_RISKY;
492 insn_class = tmp_class;
499 haifa_classify_insn (const_rtx insn)
501 return haifa_classify_rtx (PATTERN (insn));
504 /* Forward declarations. */
506 static int priority (rtx);
507 static int rank_for_schedule (const void *, const void *);
508 static void swap_sort (rtx *, int);
509 static void queue_insn (rtx, int);
510 static int schedule_insn (rtx);
511 static void adjust_priority (rtx);
512 static void advance_one_cycle (void);
513 static void extend_h_i_d (void);
516 /* Notes handling mechanism:
517 =========================
518 Generally, NOTES are saved before scheduling and restored after scheduling.
519 The scheduler distinguishes between two types of notes:
521 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522 Before scheduling a region, a pointer to the note is added to the insn
523 that follows or precedes it. (This happens as part of the data dependence
524 computation). After scheduling an insn, the pointer contained in it is
525 used for regenerating the corresponding note (in reemit_notes).
527 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
528 these notes are put in a list (in rm_other_notes() and
529 unlink_other_notes ()). After scheduling the block, these notes are
530 inserted at the beginning of the block (in schedule_block()). */
532 static void ready_add (struct ready_list *, rtx, bool);
533 static rtx ready_remove_first (struct ready_list *);
535 static void queue_to_ready (struct ready_list *);
536 static int early_queue_to_ready (state_t, struct ready_list *);
538 static void debug_ready_list (struct ready_list *);
540 /* The following functions are used to implement multi-pass scheduling
541 on the first cycle. */
542 static rtx ready_remove (struct ready_list *, int);
543 static void ready_remove_insn (rtx);
545 static int choose_ready (struct ready_list *, rtx *);
547 static void fix_inter_tick (rtx, rtx);
548 static int fix_tick_ready (rtx);
549 static void change_queue_index (rtx, int);
551 /* The following functions are used to implement scheduling of data/control
552 speculative instructions. */
554 static void extend_h_i_d (void);
555 static void init_h_i_d (rtx);
556 static void generate_recovery_code (rtx);
557 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
558 static void begin_speculative_block (rtx);
559 static void add_to_speculative_block (rtx);
560 static void init_before_recovery (basic_block *);
561 static void create_check_block_twin (rtx, bool);
562 static void fix_recovery_deps (basic_block);
563 static void haifa_change_pattern (rtx, rtx);
564 static void dump_new_block_header (int, basic_block, rtx, rtx);
565 static void restore_bb_notes (basic_block);
566 static void fix_jump_move (rtx);
567 static void move_block_after_check (rtx);
568 static void move_succs (VEC(edge,gc) **, basic_block);
569 static void sched_remove_insn (rtx);
570 static void clear_priorities (rtx, rtx_vec_t *);
571 static void calc_priorities (rtx_vec_t);
572 static void add_jump_dependencies (rtx, rtx);
573 #ifdef ENABLE_CHECKING
574 static int has_edge_p (VEC(edge,gc) *, int);
575 static void check_cfg (rtx, rtx);
578 #endif /* INSN_SCHEDULING */
580 /* Point to state used for the current scheduling pass. */
581 struct haifa_sched_info *current_sched_info;
583 #ifndef INSN_SCHEDULING
585 schedule_insns (void)
590 /* Do register pressure sensitive insn scheduling if the flag is set
592 bool sched_pressure_p;
594 /* Map regno -> its cover class. The map defined only when
595 SCHED_PRESSURE_P is true. */
596 enum reg_class *sched_regno_cover_class;
598 /* The current register pressure. Only elements corresponding cover
599 classes are defined. */
600 static int curr_reg_pressure[N_REG_CLASSES];
602 /* Saved value of the previous array. */
603 static int saved_reg_pressure[N_REG_CLASSES];
605 /* Register living at given scheduling point. */
606 static bitmap curr_reg_live;
608 /* Saved value of the previous array. */
609 static bitmap saved_reg_live;
611 /* Registers mentioned in the current region. */
612 static bitmap region_ref_regs;
614 /* Initiate register pressure relative info for scheduling the current
615 region. Currently it is only clearing register mentioned in the
618 sched_init_region_reg_pressure_info (void)
620 bitmap_clear (region_ref_regs);
623 /* Update current register pressure related info after birth (if
624 BIRTH_P) or death of register REGNO. */
626 mark_regno_birth_or_death (int regno, bool birth_p)
628 enum reg_class cover_class;
630 cover_class = sched_regno_cover_class[regno];
631 if (regno >= FIRST_PSEUDO_REGISTER)
633 if (cover_class != NO_REGS)
637 bitmap_set_bit (curr_reg_live, regno);
638 curr_reg_pressure[cover_class]
639 += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
643 bitmap_clear_bit (curr_reg_live, regno);
644 curr_reg_pressure[cover_class]
645 -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
649 else if (cover_class != NO_REGS
650 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
654 bitmap_set_bit (curr_reg_live, regno);
655 curr_reg_pressure[cover_class]++;
659 bitmap_clear_bit (curr_reg_live, regno);
660 curr_reg_pressure[cover_class]--;
665 /* Initiate current register pressure related info from living
666 registers given by LIVE. */
668 initiate_reg_pressure_info (bitmap live)
674 for (i = 0; i < ira_reg_class_cover_size; i++)
675 curr_reg_pressure[ira_reg_class_cover[i]] = 0;
676 bitmap_clear (curr_reg_live);
677 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
678 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
679 mark_regno_birth_or_death (j, true);
682 /* Mark registers in X as mentioned in the current region. */
684 setup_ref_regs (rtx x)
687 const RTX_CODE code = GET_CODE (x);
693 if (regno >= FIRST_PSEUDO_REGISTER)
694 bitmap_set_bit (region_ref_regs, REGNO (x));
696 for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
697 bitmap_set_bit (region_ref_regs, regno + i);
700 fmt = GET_RTX_FORMAT (code);
701 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
703 setup_ref_regs (XEXP (x, i));
704 else if (fmt[i] == 'E')
706 for (j = 0; j < XVECLEN (x, i); j++)
707 setup_ref_regs (XVECEXP (x, i, j));
711 /* Initiate current register pressure related info at the start of
714 initiate_bb_reg_pressure_info (basic_block bb)
719 if (current_nr_blocks > 1)
720 FOR_BB_INSNS (bb, insn)
722 setup_ref_regs (PATTERN (insn));
723 initiate_reg_pressure_info (df_get_live_in (bb));
724 #ifdef EH_RETURN_DATA_REGNO
725 if (bb_has_eh_pred (bb))
728 unsigned int regno = EH_RETURN_DATA_REGNO (i);
730 if (regno == INVALID_REGNUM)
732 if (! bitmap_bit_p (df_get_live_in (bb), regno))
733 mark_regno_birth_or_death (regno, true);
738 /* Save current register pressure related info. */
740 save_reg_pressure (void)
744 for (i = 0; i < ira_reg_class_cover_size; i++)
745 saved_reg_pressure[ira_reg_class_cover[i]]
746 = curr_reg_pressure[ira_reg_class_cover[i]];
747 bitmap_copy (saved_reg_live, curr_reg_live);
750 /* Restore saved register pressure related info. */
752 restore_reg_pressure (void)
756 for (i = 0; i < ira_reg_class_cover_size; i++)
757 curr_reg_pressure[ira_reg_class_cover[i]]
758 = saved_reg_pressure[ira_reg_class_cover[i]];
759 bitmap_copy (curr_reg_live, saved_reg_live);
762 /* Return TRUE if the register is dying after its USE. */
764 dying_use_p (struct reg_use_data *use)
766 struct reg_use_data *next;
768 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
769 if (NONDEBUG_INSN_P (next->insn)
770 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
775 /* Print info about the current register pressure and its excess for
778 print_curr_reg_pressure (void)
783 fprintf (sched_dump, ";;\t");
784 for (i = 0; i < ira_reg_class_cover_size; i++)
786 cl = ira_reg_class_cover[i];
787 gcc_assert (curr_reg_pressure[cl] >= 0);
788 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
789 curr_reg_pressure[cl],
790 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
792 fprintf (sched_dump, "\n");
795 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
796 so that insns independent of the last scheduled insn will be preferred
797 over dependent instructions. */
799 static rtx last_scheduled_insn;
801 /* Cached cost of the instruction. Use below function to get cost of the
802 insn. -1 here means that the field is not initialized. */
803 #define INSN_COST(INSN) (HID (INSN)->cost)
805 /* Compute cost of executing INSN.
806 This is the number of cycles between instruction issue and
807 instruction results. */
815 if (recog_memoized (insn) < 0)
818 cost = insn_default_latency (insn);
825 cost = INSN_COST (insn);
829 /* A USE insn, or something else we don't need to
830 understand. We can't pass these directly to
831 result_ready_cost or insn_default_latency because it will
832 trigger a fatal error for unrecognizable insns. */
833 if (recog_memoized (insn) < 0)
835 INSN_COST (insn) = 0;
840 cost = insn_default_latency (insn);
844 INSN_COST (insn) = cost;
851 /* Compute cost of dependence LINK.
852 This is the number of cycles between instruction issue and
854 ??? We also use this function to call recog_memoized on all insns. */
856 dep_cost_1 (dep_t link, dw_t dw)
858 rtx insn = DEP_PRO (link);
859 rtx used = DEP_CON (link);
862 /* A USE insn should never require the value used to be computed.
863 This allows the computation of a function's result and parameter
864 values to overlap the return and call. We don't care about the
865 the dependence cost when only decreasing register pressure. */
866 if (recog_memoized (used) < 0)
869 recog_memoized (insn);
873 enum reg_note dep_type = DEP_TYPE (link);
875 cost = insn_cost (insn);
877 if (INSN_CODE (insn) >= 0)
879 if (dep_type == REG_DEP_ANTI)
881 else if (dep_type == REG_DEP_OUTPUT)
883 cost = (insn_default_latency (insn)
884 - insn_default_latency (used));
888 else if (bypass_p (insn))
889 cost = insn_latency (insn, used);
893 if (targetm.sched.adjust_cost_2)
894 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
896 else if (targetm.sched.adjust_cost != NULL)
898 /* This variable is used for backward compatibility with the
900 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
902 /* Make it self-cycled, so that if some tries to walk over this
903 incomplete list he/she will be caught in an endless loop. */
904 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
906 /* Targets use only REG_NOTE_KIND of the link. */
907 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
909 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
912 free_INSN_LIST_node (dep_cost_rtx_link);
922 /* Compute cost of dependence LINK.
923 This is the number of cycles between instruction issue and
924 instruction results. */
926 dep_cost (dep_t link)
928 return dep_cost_1 (link, 0);
931 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
932 INSN_PRIORITY explicitly. */
934 increase_insn_priority (rtx insn, int amount)
938 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
939 if (INSN_PRIORITY_KNOWN (insn))
940 INSN_PRIORITY (insn) += amount;
944 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
945 Use EXPR_PRIORITY instead. */
946 sel_add_to_insn_priority (insn, amount);
950 /* Return 'true' if DEP should be included in priority calculations. */
952 contributes_to_priority_p (dep_t dep)
954 if (DEBUG_INSN_P (DEP_CON (dep))
955 || DEBUG_INSN_P (DEP_PRO (dep)))
958 /* Critical path is meaningful in block boundaries only. */
959 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
963 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
964 then speculative instructions will less likely be
965 scheduled. That is because the priority of
966 their producers will increase, and, thus, the
967 producers will more likely be scheduled, thus,
968 resolving the dependence. */
969 if (sched_deps_info->generate_spec_deps
970 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
971 && (DEP_STATUS (dep) & SPECULATIVE))
977 /* Compute the number of nondebug forward deps of an insn. */
980 dep_list_size (rtx insn)
982 sd_iterator_def sd_it;
984 int dbgcount = 0, nodbgcount = 0;
986 if (!MAY_HAVE_DEBUG_INSNS)
987 return sd_lists_size (insn, SD_LIST_FORW);
989 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
991 if (DEBUG_INSN_P (DEP_CON (dep)))
993 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
997 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
1002 /* Compute the priority number for INSN. */
1006 if (! INSN_P (insn))
1009 /* We should not be interested in priority of an already scheduled insn. */
1010 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1012 if (!INSN_PRIORITY_KNOWN (insn))
1014 int this_priority = -1;
1016 if (dep_list_size (insn) == 0)
1017 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1018 some forward deps but all of them are ignored by
1019 contributes_to_priority hook. At the moment we set priority of
1021 this_priority = insn_cost (insn);
1024 rtx prev_first, twin;
1027 /* For recovery check instructions we calculate priority slightly
1028 different than that of normal instructions. Instead of walking
1029 through INSN_FORW_DEPS (check) list, we walk through
1030 INSN_FORW_DEPS list of each instruction in the corresponding
1033 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1034 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1035 if (!rec || rec == EXIT_BLOCK_PTR)
1037 prev_first = PREV_INSN (insn);
1042 prev_first = NEXT_INSN (BB_HEAD (rec));
1043 twin = PREV_INSN (BB_END (rec));
1048 sd_iterator_def sd_it;
1051 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1056 next = DEP_CON (dep);
1058 if (BLOCK_FOR_INSN (next) != rec)
1062 if (!contributes_to_priority_p (dep))
1066 cost = dep_cost (dep);
1069 struct _dep _dep1, *dep1 = &_dep1;
1071 init_dep (dep1, insn, next, REG_DEP_ANTI);
1073 cost = dep_cost (dep1);
1076 next_priority = cost + priority (next);
1078 if (next_priority > this_priority)
1079 this_priority = next_priority;
1083 twin = PREV_INSN (twin);
1085 while (twin != prev_first);
1088 if (this_priority < 0)
1090 gcc_assert (this_priority == -1);
1092 this_priority = insn_cost (insn);
1095 INSN_PRIORITY (insn) = this_priority;
1096 INSN_PRIORITY_STATUS (insn) = 1;
1099 return INSN_PRIORITY (insn);
1102 /* Macros and functions for keeping the priority queue sorted, and
1103 dealing with queuing and dequeuing of instructions. */
1105 #define SCHED_SORT(READY, N_READY) \
1106 do { if ((N_READY) == 2) \
1107 swap_sort (READY, N_READY); \
1108 else if ((N_READY) > 2) \
1109 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1112 /* Setup info about the current register pressure impact of scheduling
1113 INSN at the current scheduling point. */
1115 setup_insn_reg_pressure_info (rtx insn)
1117 int i, change, before, after, hard_regno;
1118 int excess_cost_change;
1119 enum machine_mode mode;
1121 struct reg_pressure_data *pressure_info;
1122 int *max_reg_pressure;
1123 struct reg_use_data *use;
1124 static int death[N_REG_CLASSES];
1126 excess_cost_change = 0;
1127 for (i = 0; i < ira_reg_class_cover_size; i++)
1128 death[ira_reg_class_cover[i]] = 0;
1129 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1130 if (dying_use_p (use))
1132 cl = sched_regno_cover_class[use->regno];
1133 if (use->regno < FIRST_PSEUDO_REGISTER)
1136 death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1138 pressure_info = INSN_REG_PRESSURE (insn);
1139 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1140 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1141 for (i = 0; i < ira_reg_class_cover_size; i++)
1143 cl = ira_reg_class_cover[i];
1144 gcc_assert (curr_reg_pressure[cl] >= 0);
1145 change = (int) pressure_info[i].set_increase - death[cl];
1146 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1147 after = MAX (0, max_reg_pressure[i] + change
1148 - ira_available_class_regs[cl]);
1149 hard_regno = ira_class_hard_regs[cl][0];
1150 gcc_assert (hard_regno >= 0);
1151 mode = reg_raw_mode[hard_regno];
1152 excess_cost_change += ((after - before)
1153 * (ira_memory_move_cost[mode][cl][0]
1154 + ira_memory_move_cost[mode][cl][1]));
1156 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1159 /* Returns a positive value if x is preferred; returns a negative value if
1160 y is preferred. Should never return 0, since that will make the sort
1164 rank_for_schedule (const void *x, const void *y)
1166 rtx tmp = *(const rtx *) y;
1167 rtx tmp2 = *(const rtx *) x;
1169 int tmp_class, tmp2_class;
1170 int val, priority_val, info_val;
1172 if (MAY_HAVE_DEBUG_INSNS)
1174 /* Schedule debug insns as early as possible. */
1175 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1177 else if (DEBUG_INSN_P (tmp2))
1181 /* The insn in a schedule group should be issued the first. */
1182 if (flag_sched_group_heuristic &&
1183 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1184 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1186 /* Make sure that priority of TMP and TMP2 are initialized. */
1187 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1189 if (sched_pressure_p)
1193 /* Prefer insn whose scheduling results in the smallest register
1195 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1196 + (INSN_TICK (tmp) > clock_var
1197 ? INSN_TICK (tmp) - clock_var : 0)
1198 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1199 - (INSN_TICK (tmp2) > clock_var
1200 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1205 if (sched_pressure_p
1206 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1208 if (INSN_TICK (tmp) <= clock_var)
1210 else if (INSN_TICK (tmp2) <= clock_var)
1213 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1215 /* Prefer insn with higher priority. */
1216 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1218 if (flag_sched_critical_path_heuristic && priority_val)
1219 return priority_val;
1221 /* Prefer speculative insn with greater dependencies weakness. */
1222 if (flag_sched_spec_insn_heuristic && spec_info)
1228 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1230 dw1 = ds_weak (ds1);
1234 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1236 dw2 = ds_weak (ds2);
1241 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1245 info_val = (*current_sched_info->rank) (tmp, tmp2);
1246 if(flag_sched_rank_heuristic && info_val)
1249 if (flag_sched_last_insn_heuristic)
1251 last = last_scheduled_insn;
1253 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1255 last = PREV_INSN (last);
1256 while (!NONDEBUG_INSN_P (last)
1257 && last != current_sched_info->prev_head);
1260 /* Compare insns based on their relation to the last scheduled
1262 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1267 /* Classify the instructions into three classes:
1268 1) Data dependent on last schedule insn.
1269 2) Anti/Output dependent on last scheduled insn.
1270 3) Independent of last scheduled insn, or has latency of one.
1271 Choose the insn from the highest numbered class if different. */
1272 dep1 = sd_find_dep_between (last, tmp, true);
1274 if (dep1 == NULL || dep_cost (dep1) == 1)
1276 else if (/* Data dependence. */
1277 DEP_TYPE (dep1) == REG_DEP_TRUE)
1282 dep2 = sd_find_dep_between (last, tmp2, true);
1284 if (dep2 == NULL || dep_cost (dep2) == 1)
1286 else if (/* Data dependence. */
1287 DEP_TYPE (dep2) == REG_DEP_TRUE)
1292 if ((val = tmp2_class - tmp_class))
1296 /* Prefer the insn which has more later insns that depend on it.
1297 This gives the scheduler more freedom when scheduling later
1298 instructions at the expense of added register pressure. */
1300 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1302 if (flag_sched_dep_count_heuristic && val != 0)
1305 /* If insns are equally good, sort by INSN_LUID (original insn order),
1306 so that we make the sort stable. This minimizes instruction movement,
1307 thus minimizing sched's effect on debugging and cross-jumping. */
1308 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1311 /* Resort the array A in which only element at index N may be out of order. */
1313 HAIFA_INLINE static void
1314 swap_sort (rtx *a, int n)
1316 rtx insn = a[n - 1];
1319 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1327 /* Add INSN to the insn queue so that it can be executed at least
1328 N_CYCLES after the currently executing insn. Preserve insns
1329 chain for debugging purposes. */
1331 HAIFA_INLINE static void
1332 queue_insn (rtx insn, int n_cycles)
1334 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1335 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1337 gcc_assert (n_cycles <= max_insn_queue_index);
1338 gcc_assert (!DEBUG_INSN_P (insn));
1340 insn_queue[next_q] = link;
1343 if (sched_verbose >= 2)
1345 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1346 (*current_sched_info->print_insn) (insn, 0));
1348 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1351 QUEUE_INDEX (insn) = next_q;
1354 /* Remove INSN from queue. */
1356 queue_remove (rtx insn)
1358 gcc_assert (QUEUE_INDEX (insn) >= 0);
1359 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1361 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1364 /* Return a pointer to the bottom of the ready list, i.e. the insn
1365 with the lowest priority. */
1368 ready_lastpos (struct ready_list *ready)
1370 gcc_assert (ready->n_ready >= 1);
1371 return ready->vec + ready->first - ready->n_ready + 1;
1374 /* Add an element INSN to the ready list so that it ends up with the
1375 lowest/highest priority depending on FIRST_P. */
1377 HAIFA_INLINE static void
1378 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1382 if (ready->first == ready->n_ready)
1384 memmove (ready->vec + ready->veclen - ready->n_ready,
1385 ready_lastpos (ready),
1386 ready->n_ready * sizeof (rtx));
1387 ready->first = ready->veclen - 1;
1389 ready->vec[ready->first - ready->n_ready] = insn;
1393 if (ready->first == ready->veclen - 1)
1396 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1397 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1398 ready_lastpos (ready),
1399 ready->n_ready * sizeof (rtx));
1400 ready->first = ready->veclen - 2;
1402 ready->vec[++(ready->first)] = insn;
1406 if (DEBUG_INSN_P (insn))
1409 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1410 QUEUE_INDEX (insn) = QUEUE_READY;
1413 /* Remove the element with the highest priority from the ready list and
1416 HAIFA_INLINE static rtx
1417 ready_remove_first (struct ready_list *ready)
1421 gcc_assert (ready->n_ready);
1422 t = ready->vec[ready->first--];
1424 if (DEBUG_INSN_P (t))
1426 /* If the queue becomes empty, reset it. */
1427 if (ready->n_ready == 0)
1428 ready->first = ready->veclen - 1;
1430 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1431 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1436 /* The following code implements multi-pass scheduling for the first
1437 cycle. In other words, we will try to choose ready insn which
1438 permits to start maximum number of insns on the same cycle. */
1440 /* Return a pointer to the element INDEX from the ready. INDEX for
1441 insn with the highest priority is 0, and the lowest priority has
1445 ready_element (struct ready_list *ready, int index)
1447 gcc_assert (ready->n_ready && index < ready->n_ready);
1449 return ready->vec[ready->first - index];
1452 /* Remove the element INDEX from the ready list and return it. INDEX
1453 for insn with the highest priority is 0, and the lowest priority
1456 HAIFA_INLINE static rtx
1457 ready_remove (struct ready_list *ready, int index)
1463 return ready_remove_first (ready);
1464 gcc_assert (ready->n_ready && index < ready->n_ready);
1465 t = ready->vec[ready->first - index];
1467 if (DEBUG_INSN_P (t))
1469 for (i = index; i < ready->n_ready; i++)
1470 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1471 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1475 /* Remove INSN from the ready list. */
1477 ready_remove_insn (rtx insn)
1481 for (i = 0; i < readyp->n_ready; i++)
1482 if (ready_element (readyp, i) == insn)
1484 ready_remove (readyp, i);
1490 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1494 ready_sort (struct ready_list *ready)
1497 rtx *first = ready_lastpos (ready);
1499 if (sched_pressure_p)
1501 for (i = 0; i < ready->n_ready; i++)
1502 setup_insn_reg_pressure_info (first[i]);
1504 SCHED_SORT (first, ready->n_ready);
1507 /* PREV is an insn that is ready to execute. Adjust its priority if that
1508 will help shorten or lengthen register lifetimes as appropriate. Also
1509 provide a hook for the target to tweak itself. */
1511 HAIFA_INLINE static void
1512 adjust_priority (rtx prev)
1514 /* ??? There used to be code here to try and estimate how an insn
1515 affected register lifetimes, but it did it by looking at REG_DEAD
1516 notes, which we removed in schedule_region. Nor did it try to
1517 take into account register pressure or anything useful like that.
1519 Revisit when we have a machine model to work with and not before. */
1521 if (targetm.sched.adjust_priority)
1522 INSN_PRIORITY (prev) =
1523 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1526 /* Advance DFA state STATE on one cycle. */
1528 advance_state (state_t state)
1530 if (targetm.sched.dfa_pre_advance_cycle)
1531 targetm.sched.dfa_pre_advance_cycle ();
1533 if (targetm.sched.dfa_pre_cycle_insn)
1534 state_transition (state,
1535 targetm.sched.dfa_pre_cycle_insn ());
1537 state_transition (state, NULL);
1539 if (targetm.sched.dfa_post_cycle_insn)
1540 state_transition (state,
1541 targetm.sched.dfa_post_cycle_insn ());
1543 if (targetm.sched.dfa_post_advance_cycle)
1544 targetm.sched.dfa_post_advance_cycle ();
1547 /* Advance time on one cycle. */
1548 HAIFA_INLINE static void
1549 advance_one_cycle (void)
1551 advance_state (curr_state);
1552 if (sched_verbose >= 6)
1553 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1556 /* Clock at which the previous instruction was issued. */
1557 static int last_clock_var;
1559 /* Update register pressure after scheduling INSN. */
1561 update_register_pressure (rtx insn)
1563 struct reg_use_data *use;
1564 struct reg_set_data *set;
1566 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1567 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1568 mark_regno_birth_or_death (use->regno, false);
1569 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1570 mark_regno_birth_or_death (set->regno, true);
1573 /* Set up or update (if UPDATE_P) max register pressure (see its
1574 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1575 after insn AFTER. */
1577 setup_insn_max_reg_pressure (rtx after, bool update_p)
1582 static int max_reg_pressure[N_REG_CLASSES];
1584 save_reg_pressure ();
1585 for (i = 0; i < ira_reg_class_cover_size; i++)
1586 max_reg_pressure[ira_reg_class_cover[i]]
1587 = curr_reg_pressure[ira_reg_class_cover[i]];
1588 for (insn = NEXT_INSN (after);
1589 insn != NULL_RTX && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1590 insn = NEXT_INSN (insn))
1591 if (NONDEBUG_INSN_P (insn))
1594 for (i = 0; i < ira_reg_class_cover_size; i++)
1596 p = max_reg_pressure[ira_reg_class_cover[i]];
1597 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1600 INSN_MAX_REG_PRESSURE (insn)[i]
1601 = max_reg_pressure[ira_reg_class_cover[i]];
1604 if (update_p && eq_p)
1606 update_register_pressure (insn);
1607 for (i = 0; i < ira_reg_class_cover_size; i++)
1608 if (max_reg_pressure[ira_reg_class_cover[i]]
1609 < curr_reg_pressure[ira_reg_class_cover[i]])
1610 max_reg_pressure[ira_reg_class_cover[i]]
1611 = curr_reg_pressure[ira_reg_class_cover[i]];
1613 restore_reg_pressure ();
1616 /* Update the current register pressure after scheduling INSN. Update
1617 also max register pressure for unscheduled insns of the current
1620 update_reg_and_insn_max_reg_pressure (rtx insn)
1623 int before[N_REG_CLASSES];
1625 for (i = 0; i < ira_reg_class_cover_size; i++)
1626 before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
1627 update_register_pressure (insn);
1628 for (i = 0; i < ira_reg_class_cover_size; i++)
1629 if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
1631 if (i < ira_reg_class_cover_size)
1632 setup_insn_max_reg_pressure (insn, true);
1635 /* Set up register pressure at the beginning of basic block BB whose
1636 insns starting after insn AFTER. Set up also max register pressure
1637 for all insns of the basic block. */
1639 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1641 gcc_assert (sched_pressure_p);
1642 initiate_bb_reg_pressure_info (bb);
1643 setup_insn_max_reg_pressure (after, false);
1646 /* INSN is the "currently executing insn". Launch each insn which was
1647 waiting on INSN. READY is the ready list which contains the insns
1648 that are ready to fire. CLOCK is the current cycle. The function
1649 returns necessary cycle advance after issuing the insn (it is not
1650 zero for insns in a schedule group). */
1653 schedule_insn (rtx insn)
1655 sd_iterator_def sd_it;
1660 if (sched_verbose >= 1)
1662 struct reg_pressure_data *pressure_info;
1665 print_insn (buf, insn, 0);
1667 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1669 if (recog_memoized (insn) < 0)
1670 fprintf (sched_dump, "nothing");
1672 print_reservation (sched_dump, insn);
1673 pressure_info = INSN_REG_PRESSURE (insn);
1674 if (pressure_info != NULL)
1676 fputc (':', sched_dump);
1677 for (i = 0; i < ira_reg_class_cover_size; i++)
1678 fprintf (sched_dump, "%s%+d(%d)",
1679 reg_class_names[ira_reg_class_cover[i]],
1680 pressure_info[i].set_increase, pressure_info[i].change);
1682 fputc ('\n', sched_dump);
1685 if (sched_pressure_p)
1686 update_reg_and_insn_max_reg_pressure (insn);
1688 /* Scheduling instruction should have all its dependencies resolved and
1689 should have been removed from the ready list. */
1690 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1692 /* Reset debug insns invalidated by moving this insn. */
1693 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1694 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1695 sd_iterator_cond (&sd_it, &dep);)
1697 rtx dbg = DEP_PRO (dep);
1698 struct reg_use_data *use, *next;
1700 gcc_assert (DEBUG_INSN_P (dbg));
1702 if (sched_verbose >= 6)
1703 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1706 /* ??? Rather than resetting the debug insn, we might be able
1707 to emit a debug temp before the just-scheduled insn, but
1708 this would involve checking that the expression at the
1709 point of the debug insn is equivalent to the expression
1710 before the just-scheduled insn. They might not be: the
1711 expression in the debug insn may depend on other insns not
1712 yet scheduled that set MEMs, REGs or even other debug
1713 insns. It's not clear that attempting to preserve debug
1714 information in these cases is worth the effort, given how
1715 uncommon these resets are and the likelihood that the debug
1716 temps introduced won't survive the schedule change. */
1717 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1718 df_insn_rescan (dbg);
1720 /* Unknown location doesn't use any registers. */
1721 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1723 next = use->next_insn_use;
1726 INSN_REG_USE_LIST (dbg) = NULL;
1728 /* We delete rather than resolve these deps, otherwise we
1729 crash in sched_free_deps(), because forward deps are
1730 expected to be released before backward deps. */
1731 sd_delete_dep (sd_it);
1734 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1735 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1737 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1738 if (INSN_TICK (insn) > clock_var)
1739 /* INSN has been prematurely moved from the queue to the ready list.
1740 This is possible only if following flag is set. */
1741 gcc_assert (flag_sched_stalled_insns);
1743 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1744 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1745 INSN_TICK (insn) = clock_var;
1747 /* Update dependent instructions. */
1748 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1749 sd_iterator_cond (&sd_it, &dep);)
1751 rtx next = DEP_CON (dep);
1753 /* Resolve the dependence between INSN and NEXT.
1754 sd_resolve_dep () moves current dep to another list thus
1755 advancing the iterator. */
1756 sd_resolve_dep (sd_it);
1758 /* Don't bother trying to mark next as ready if insn is a debug
1759 insn. If insn is the last hard dependency, it will have
1760 already been discounted. */
1761 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1764 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1768 effective_cost = try_ready (next);
1770 if (effective_cost >= 0
1771 && SCHED_GROUP_P (next)
1772 && advance < effective_cost)
1773 advance = effective_cost;
1776 /* Check always has only one forward dependence (to the first insn in
1777 the recovery block), therefore, this will be executed only once. */
1779 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1780 fix_recovery_deps (RECOVERY_BLOCK (insn));
1784 /* This is the place where scheduler doesn't *basically* need backward and
1785 forward dependencies for INSN anymore. Nevertheless they are used in
1786 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1787 some targets (e.g. rs6000). Thus the earliest place where we *can*
1788 remove dependencies is after targetm.sched.md_finish () call in
1789 schedule_block (). But, on the other side, the safest place to remove
1790 dependencies is when we are finishing scheduling entire region. As we
1791 don't generate [many] dependencies during scheduling itself, we won't
1792 need memory until beginning of next region.
1793 Bottom line: Dependencies are removed for all insns in the end of
1794 scheduling the region. */
1796 /* Annotate the instruction with issue information -- TImode
1797 indicates that the instruction is expected not to be able
1798 to issue on the same cycle as the previous insn. A machine
1799 may use this information to decide how the instruction should
1802 && GET_CODE (PATTERN (insn)) != USE
1803 && GET_CODE (PATTERN (insn)) != CLOBBER
1804 && !DEBUG_INSN_P (insn))
1806 if (reload_completed)
1807 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1808 last_clock_var = clock_var;
1814 /* Functions for handling of notes. */
1816 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1818 concat_note_lists (rtx from_end, rtx *to_endp)
1822 /* It's easy when have nothing to concat. */
1823 if (from_end == NULL)
1826 /* It's also easy when destination is empty. */
1827 if (*to_endp == NULL)
1829 *to_endp = from_end;
1833 from_start = from_end;
1834 while (PREV_INSN (from_start) != NULL)
1835 from_start = PREV_INSN (from_start);
1837 PREV_INSN (from_start) = *to_endp;
1838 NEXT_INSN (*to_endp) = from_start;
1839 *to_endp = from_end;
1842 /* Delete notes between HEAD and TAIL and put them in the chain
1843 of notes ended by NOTE_LIST. */
1845 remove_notes (rtx head, rtx tail)
1847 rtx next_tail, insn, next;
1850 if (head == tail && !INSN_P (head))
1853 next_tail = NEXT_INSN (tail);
1854 for (insn = head; insn != next_tail; insn = next)
1856 next = NEXT_INSN (insn);
1860 switch (NOTE_KIND (insn))
1862 case NOTE_INSN_BASIC_BLOCK:
1865 case NOTE_INSN_EPILOGUE_BEG:
1869 add_reg_note (next, REG_SAVE_NOTE,
1870 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1878 /* Add the note to list that ends at NOTE_LIST. */
1879 PREV_INSN (insn) = note_list;
1880 NEXT_INSN (insn) = NULL_RTX;
1882 NEXT_INSN (note_list) = insn;
1887 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1892 /* Return the head and tail pointers of ebb starting at BEG and ending
1895 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1897 rtx beg_head = BB_HEAD (beg);
1898 rtx beg_tail = BB_END (beg);
1899 rtx end_head = BB_HEAD (end);
1900 rtx end_tail = BB_END (end);
1902 /* Don't include any notes or labels at the beginning of the BEG
1903 basic block, or notes at the end of the END basic blocks. */
1905 if (LABEL_P (beg_head))
1906 beg_head = NEXT_INSN (beg_head);
1908 while (beg_head != beg_tail)
1909 if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
1910 beg_head = NEXT_INSN (beg_head);
1917 end_head = beg_head;
1918 else if (LABEL_P (end_head))
1919 end_head = NEXT_INSN (end_head);
1921 while (end_head != end_tail)
1922 if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
1923 end_tail = PREV_INSN (end_tail);
1930 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1933 no_real_insns_p (const_rtx head, const_rtx tail)
1935 while (head != NEXT_INSN (tail))
1937 if (!NOTE_P (head) && !LABEL_P (head)
1938 && !BOUNDARY_DEBUG_INSN_P (head))
1940 head = NEXT_INSN (head);
1945 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1946 previously found among the insns. Insert them just before HEAD. */
1948 restore_other_notes (rtx head, basic_block head_bb)
1952 rtx note_head = note_list;
1955 head_bb = BLOCK_FOR_INSN (head);
1957 head = NEXT_INSN (bb_note (head_bb));
1959 while (PREV_INSN (note_head))
1961 set_block_for_insn (note_head, head_bb);
1962 note_head = PREV_INSN (note_head);
1964 /* In the above cycle we've missed this note. */
1965 set_block_for_insn (note_head, head_bb);
1967 PREV_INSN (note_head) = PREV_INSN (head);
1968 NEXT_INSN (PREV_INSN (head)) = note_head;
1969 PREV_INSN (head) = note_list;
1970 NEXT_INSN (note_list) = head;
1972 if (BLOCK_FOR_INSN (head) != head_bb)
1973 BB_END (head_bb) = note_list;
1981 /* Move insns that became ready to fire from queue to ready list. */
1984 queue_to_ready (struct ready_list *ready)
1990 q_ptr = NEXT_Q (q_ptr);
1992 if (dbg_cnt (sched_insn) == false)
1994 /* If debug counter is activated do not requeue insn next after
1995 last_scheduled_insn. */
1996 skip_insn = next_nonnote_insn (last_scheduled_insn);
1997 while (skip_insn && DEBUG_INSN_P (skip_insn))
1998 skip_insn = next_nonnote_insn (skip_insn);
2001 skip_insn = NULL_RTX;
2003 /* Add all pending insns that can be scheduled without stalls to the
2005 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2007 insn = XEXP (link, 0);
2010 if (sched_verbose >= 2)
2011 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2012 (*current_sched_info->print_insn) (insn, 0));
2014 /* If the ready list is full, delay the insn for 1 cycle.
2015 See the comment in schedule_block for the rationale. */
2016 if (!reload_completed
2017 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2018 && !SCHED_GROUP_P (insn)
2019 && insn != skip_insn)
2021 if (sched_verbose >= 2)
2022 fprintf (sched_dump, "requeued because ready full\n");
2023 queue_insn (insn, 1);
2027 ready_add (ready, insn, false);
2028 if (sched_verbose >= 2)
2029 fprintf (sched_dump, "moving to ready without stalls\n");
2032 free_INSN_LIST_list (&insn_queue[q_ptr]);
2034 /* If there are no ready insns, stall until one is ready and add all
2035 of the pending insns at that point to the ready list. */
2036 if (ready->n_ready == 0)
2040 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2042 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2044 for (; link; link = XEXP (link, 1))
2046 insn = XEXP (link, 0);
2049 if (sched_verbose >= 2)
2050 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2051 (*current_sched_info->print_insn) (insn, 0));
2053 ready_add (ready, insn, false);
2054 if (sched_verbose >= 2)
2055 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2057 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2059 advance_one_cycle ();
2064 advance_one_cycle ();
2067 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2068 clock_var += stalls;
2072 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2073 prematurely move INSN from the queue to the ready list. Currently,
2074 if a target defines the hook 'is_costly_dependence', this function
2075 uses the hook to check whether there exist any dependences which are
2076 considered costly by the target, between INSN and other insns that
2077 have already been scheduled. Dependences are checked up to Y cycles
2078 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2079 controlling this value.
2080 (Other considerations could be taken into account instead (or in
2081 addition) depending on user flags and target hooks. */
2084 ok_for_early_queue_removal (rtx insn)
2087 rtx prev_insn = last_scheduled_insn;
2089 if (targetm.sched.is_costly_dependence)
2091 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2093 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2097 if (prev_insn == current_sched_info->prev_head)
2103 if (!NOTE_P (prev_insn))
2107 dep = sd_find_dep_between (prev_insn, insn, true);
2111 cost = dep_cost (dep);
2113 if (targetm.sched.is_costly_dependence (dep, cost,
2114 flag_sched_stalled_insns_dep - n_cycles))
2119 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2125 prev_insn = PREV_INSN (prev_insn);
2133 /* Remove insns from the queue, before they become "ready" with respect
2134 to FU latency considerations. */
2137 early_queue_to_ready (state_t state, struct ready_list *ready)
2145 state_t temp_state = alloca (dfa_state_size);
2147 int insns_removed = 0;
2150 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2153 X == 0: There is no limit on how many queued insns can be removed
2154 prematurely. (flag_sched_stalled_insns = -1).
2156 X >= 1: Only X queued insns can be removed prematurely in each
2157 invocation. (flag_sched_stalled_insns = X).
2159 Otherwise: Early queue removal is disabled.
2160 (flag_sched_stalled_insns = 0)
2163 if (! flag_sched_stalled_insns)
2166 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2168 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2170 if (sched_verbose > 6)
2171 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2176 next_link = XEXP (link, 1);
2177 insn = XEXP (link, 0);
2178 if (insn && sched_verbose > 6)
2179 print_rtl_single (sched_dump, insn);
2181 memcpy (temp_state, state, dfa_state_size);
2182 if (recog_memoized (insn) < 0)
2183 /* non-negative to indicate that it's not ready
2184 to avoid infinite Q->R->Q->R... */
2187 cost = state_transition (temp_state, insn);
2189 if (sched_verbose >= 6)
2190 fprintf (sched_dump, "transition cost = %d\n", cost);
2192 move_to_ready = false;
2195 move_to_ready = ok_for_early_queue_removal (insn);
2196 if (move_to_ready == true)
2198 /* move from Q to R */
2200 ready_add (ready, insn, false);
2203 XEXP (prev_link, 1) = next_link;
2205 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2207 free_INSN_LIST_node (link);
2209 if (sched_verbose >= 2)
2210 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2211 (*current_sched_info->print_insn) (insn, 0));
2214 if (insns_removed == flag_sched_stalled_insns)
2215 /* Remove no more than flag_sched_stalled_insns insns
2216 from Q at a time. */
2217 return insns_removed;
2221 if (move_to_ready == false)
2228 } /* for stalls.. */
2230 return insns_removed;
2234 /* Print the ready list for debugging purposes. Callable from debugger. */
2237 debug_ready_list (struct ready_list *ready)
2242 if (ready->n_ready == 0)
2244 fprintf (sched_dump, "\n");
2248 p = ready_lastpos (ready);
2249 for (i = 0; i < ready->n_ready; i++)
2251 fprintf (sched_dump, " %s:%d",
2252 (*current_sched_info->print_insn) (p[i], 0),
2254 if (sched_pressure_p)
2255 fprintf (sched_dump, "(cost=%d",
2256 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2257 if (INSN_TICK (p[i]) > clock_var)
2258 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2259 if (sched_pressure_p)
2260 fprintf (sched_dump, ")");
2262 fprintf (sched_dump, "\n");
2265 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2266 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2267 replaces the epilogue note in the correct basic block. */
2269 reemit_notes (rtx insn)
2271 rtx note, last = insn;
2273 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2275 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2277 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2279 last = emit_note_before (note_type, last);
2280 remove_note (insn, note);
2285 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2287 move_insn (rtx insn, rtx last, rtx nt)
2289 if (PREV_INSN (insn) != last)
2295 bb = BLOCK_FOR_INSN (insn);
2297 /* BB_HEAD is either LABEL or NOTE. */
2298 gcc_assert (BB_HEAD (bb) != insn);
2300 if (BB_END (bb) == insn)
2301 /* If this is last instruction in BB, move end marker one
2304 /* Jumps are always placed at the end of basic block. */
2305 jump_p = control_flow_insn_p (insn);
2308 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2309 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2310 || (common_sched_info->sched_pass_id
2311 == SCHED_EBB_PASS));
2313 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2315 BB_END (bb) = PREV_INSN (insn);
2318 gcc_assert (BB_END (bb) != last);
2321 /* We move the block note along with jump. */
2325 note = NEXT_INSN (insn);
2326 while (NOTE_NOT_BB_P (note) && note != nt)
2327 note = NEXT_INSN (note);
2331 || BARRIER_P (note)))
2332 note = NEXT_INSN (note);
2334 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2339 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2340 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2342 NEXT_INSN (note) = NEXT_INSN (last);
2343 PREV_INSN (NEXT_INSN (last)) = note;
2345 NEXT_INSN (last) = insn;
2346 PREV_INSN (insn) = last;
2348 bb = BLOCK_FOR_INSN (last);
2352 fix_jump_move (insn);
2354 if (BLOCK_FOR_INSN (insn) != bb)
2355 move_block_after_check (insn);
2357 gcc_assert (BB_END (bb) == last);
2360 df_insn_change_bb (insn, bb);
2362 /* Update BB_END, if needed. */
2363 if (BB_END (bb) == last)
2367 SCHED_GROUP_P (insn) = 0;
2370 /* Return true if scheduling INSN will finish current clock cycle. */
2372 insn_finishes_cycle_p (rtx insn)
2374 if (SCHED_GROUP_P (insn))
2375 /* After issuing INSN, rest of the sched_group will be forced to issue
2376 in order. Don't make any plans for the rest of cycle. */
2379 /* Finishing the block will, apparently, finish the cycle. */
2380 if (current_sched_info->insn_finishes_block_p
2381 && current_sched_info->insn_finishes_block_p (insn))
2387 /* The following structure describe an entry of the stack of choices. */
2390 /* Ordinal number of the issued insn in the ready queue. */
2392 /* The number of the rest insns whose issues we should try. */
2394 /* The number of issued essential insns. */
2396 /* State after issuing the insn. */
2400 /* The following array is used to implement a stack of choices used in
2401 function max_issue. */
2402 static struct choice_entry *choice_stack;
2404 /* The following variable value is number of essential insns issued on
2405 the current cycle. An insn is essential one if it changes the
2406 processors state. */
2407 int cycle_issued_insns;
2409 /* This holds the value of the target dfa_lookahead hook. */
2412 /* The following variable value is maximal number of tries of issuing
2413 insns for the first cycle multipass insn scheduling. We define
2414 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2415 need this constraint if all real insns (with non-negative codes)
2416 had reservations because in this case the algorithm complexity is
2417 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2418 might be incomplete and such insn might occur. For such
2419 descriptions, the complexity of algorithm (without the constraint)
2420 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2421 static int max_lookahead_tries;
2423 /* The following value is value of hook
2424 `first_cycle_multipass_dfa_lookahead' at the last call of
2426 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2428 /* The following value is value of `issue_rate' at the last call of
2430 static int cached_issue_rate = 0;
2432 /* The following function returns maximal (or close to maximal) number
2433 of insns which can be issued on the same cycle and one of which
2434 insns is insns with the best rank (the first insn in READY). To
2435 make this function tries different samples of ready insns. READY
2436 is current queue `ready'. Global array READY_TRY reflects what
2437 insns are already issued in this try. MAX_POINTS is the sum of points
2438 of all instructions in READY. The function stops immediately,
2439 if it reached the such a solution, that all instruction can be issued.
2440 INDEX will contain index of the best insn in READY. The following
2441 function is used only for first cycle multipass scheduling.
2445 This function expects recognized insns only. All USEs,
2446 CLOBBERs, etc must be filtered elsewhere. */
2448 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2451 int n, i, all, n_ready, best, delay, tries_num, max_points;
2453 struct choice_entry *top;
2456 n_ready = ready->n_ready;
2457 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2458 && privileged_n <= n_ready);
2460 /* Init MAX_LOOKAHEAD_TRIES. */
2461 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2463 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2464 max_lookahead_tries = 100;
2465 for (i = 0; i < issue_rate; i++)
2466 max_lookahead_tries *= dfa_lookahead;
2469 /* Init max_points. */
2471 more_issue = issue_rate - cycle_issued_insns;
2473 /* ??? We used to assert here that we never issue more insns than issue_rate.
2474 However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
2475 achieved to get better performance. Until these targets are fixed to use
2476 scheduler hooks to manipulate insns priority instead, the assert should
2479 gcc_assert (more_issue >= 0); */
2481 for (i = 0; i < n_ready; i++)
2484 if (more_issue-- > 0)
2485 max_points += ISSUE_POINTS (ready_element (ready, i));
2490 /* The number of the issued insns in the best solution. */
2495 /* Set initial state of the search. */
2496 memcpy (top->state, state, dfa_state_size);
2497 top->rest = dfa_lookahead;
2500 /* Count the number of the insns to search among. */
2501 for (all = i = 0; i < n_ready; i++)
2505 /* I is the index of the insn to try next. */
2510 if (/* If we've reached a dead end or searched enough of what we have
2513 /* Or have nothing else to try. */
2516 /* ??? (... || i == n_ready). */
2517 gcc_assert (i <= n_ready);
2519 if (top == choice_stack)
2522 if (best < top - choice_stack)
2527 /* Try to find issued privileged insn. */
2528 while (n && !ready_try[--n]);
2531 if (/* If all insns are equally good... */
2533 /* Or a privileged insn will be issued. */
2535 /* Then we have a solution. */
2537 best = top - choice_stack;
2538 /* This is the index of the insn issued first in this
2540 *index = choice_stack [1].index;
2541 if (top->n == max_points || best == all)
2546 /* Set ready-list index to point to the last insn
2547 ('i++' below will advance it to the next insn). */
2553 memcpy (state, top->state, dfa_state_size);
2555 else if (!ready_try [i])
2558 if (tries_num > max_lookahead_tries)
2560 insn = ready_element (ready, i);
2561 delay = state_transition (state, insn);
2564 if (state_dead_lock_p (state)
2565 || insn_finishes_cycle_p (insn))
2566 /* We won't issue any more instructions in the next
2573 if (memcmp (top->state, state, dfa_state_size) != 0)
2574 n += ISSUE_POINTS (insn);
2576 /* Advance to the next choice_entry. */
2578 /* Initialize it. */
2579 top->rest = dfa_lookahead;
2582 memcpy (top->state, state, dfa_state_size);
2589 /* Increase ready-list index. */
2593 /* Restore the original state of the DFA. */
2594 memcpy (state, choice_stack->state, dfa_state_size);
2599 /* The following function chooses insn from READY and modifies
2600 READY. The following function is used only for first
2601 cycle multipass scheduling.
2603 -1 if cycle should be advanced,
2604 0 if INSN_PTR is set to point to the desirable insn,
2605 1 if choose_ready () should be restarted without advancing the cycle. */
2607 choose_ready (struct ready_list *ready, rtx *insn_ptr)
2611 if (dbg_cnt (sched_insn) == false)
2615 insn = next_nonnote_insn (last_scheduled_insn);
2617 if (QUEUE_INDEX (insn) == QUEUE_READY)
2618 /* INSN is in the ready_list. */
2620 ready_remove_insn (insn);
2625 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2631 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2632 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2633 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2634 || DEBUG_INSN_P (ready_element (ready, 0)))
2636 *insn_ptr = ready_remove_first (ready);
2641 /* Try to choose the better insn. */
2642 int index = 0, i, n;
2644 int try_data = 1, try_control = 1;
2647 insn = ready_element (ready, 0);
2648 if (INSN_CODE (insn) < 0)
2650 *insn_ptr = ready_remove_first (ready);
2655 && spec_info->flags & (PREFER_NON_DATA_SPEC
2656 | PREFER_NON_CONTROL_SPEC))
2658 for (i = 0, n = ready->n_ready; i < n; i++)
2663 x = ready_element (ready, i);
2666 if (spec_info->flags & PREFER_NON_DATA_SPEC
2667 && !(s & DATA_SPEC))
2670 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2675 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2676 && !(s & CONTROL_SPEC))
2679 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2685 ts = TODO_SPEC (insn);
2686 if ((ts & SPECULATIVE)
2687 && (((!try_data && (ts & DATA_SPEC))
2688 || (!try_control && (ts & CONTROL_SPEC)))
2689 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2691 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2692 /* Discard speculative instruction that stands first in the ready
2695 change_queue_index (insn, 1);
2701 for (i = 1; i < ready->n_ready; i++)
2703 insn = ready_element (ready, i);
2706 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2707 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2710 /* Let the target filter the search space. */
2711 for (i = 1; i < ready->n_ready; i++)
2714 insn = ready_element (ready, i);
2716 #ifdef ENABLE_CHECKING
2717 /* If this insn is recognizable we should have already
2718 recognized it earlier.
2719 ??? Not very clear where this is supposed to be done.
2721 gcc_assert (INSN_CODE (insn) >= 0
2722 || recog_memoized (insn) < 0);
2726 = (/* INSN_CODE check can be omitted here as it is also done later
2728 INSN_CODE (insn) < 0
2729 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2730 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2734 if (max_issue (ready, 1, curr_state, &index) == 0)
2736 *insn_ptr = ready_remove_first (ready);
2737 if (sched_verbose >= 4)
2738 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2739 (*current_sched_info->print_insn) (*insn_ptr, 0));
2744 if (sched_verbose >= 4)
2745 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2746 (*current_sched_info->print_insn)
2747 (ready_element (ready, index), 0));
2749 *insn_ptr = ready_remove (ready, index);
2755 /* Use forward list scheduling to rearrange insns of block pointed to by
2756 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2760 schedule_block (basic_block *target_bb)
2762 int i, first_cycle_insn_p;
2764 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2765 int sort_p, advance, start_clock_var;
2767 /* Head/tail info for this block. */
2768 rtx prev_head = current_sched_info->prev_head;
2769 rtx next_tail = current_sched_info->next_tail;
2770 rtx head = NEXT_INSN (prev_head);
2771 rtx tail = PREV_INSN (next_tail);
2773 /* We used to have code to avoid getting parameters moved from hard
2774 argument registers into pseudos.
2776 However, it was removed when it proved to be of marginal benefit
2777 and caused problems because schedule_block and compute_forward_dependences
2778 had different notions of what the "head" insn was. */
2780 gcc_assert (head != tail || INSN_P (head));
2782 haifa_recovery_bb_recently_added_p = false;
2786 dump_new_block_header (0, *target_bb, head, tail);
2788 state_reset (curr_state);
2790 /* Clear the ready list. */
2791 ready.first = ready.veclen - 1;
2795 /* It is used for first cycle multipass scheduling. */
2796 temp_state = alloca (dfa_state_size);
2798 if (targetm.sched.md_init)
2799 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2801 /* We start inserting insns after PREV_HEAD. */
2802 last_scheduled_insn = prev_head;
2804 gcc_assert ((NOTE_P (last_scheduled_insn)
2805 || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
2806 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2808 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2813 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2814 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2816 /* Start just before the beginning of time. */
2819 /* We need queue and ready lists and clock_var be initialized
2820 in try_ready () (which is called through init_ready_list ()). */
2821 (*current_sched_info->init_ready_list) ();
2823 /* The algorithm is O(n^2) in the number of ready insns at any given
2824 time in the worst case. Before reload we are more likely to have
2825 big lists so truncate them to a reasonable size. */
2826 if (!reload_completed
2827 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2829 ready_sort (&ready);
2831 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2832 If there are debug insns, we know they're first. */
2833 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2834 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2837 if (sched_verbose >= 2)
2839 fprintf (sched_dump,
2840 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2841 fprintf (sched_dump,
2842 ";;\t\t before reload => truncated to %d insns\n", i);
2845 /* Delay all insns past it for 1 cycle. If debug counter is
2846 activated make an exception for the insn right after
2847 last_scheduled_insn. */
2851 if (dbg_cnt (sched_insn) == false)
2852 skip_insn = next_nonnote_insn (last_scheduled_insn);
2854 skip_insn = NULL_RTX;
2856 while (i < ready.n_ready)
2860 insn = ready_remove (&ready, i);
2862 if (insn != skip_insn)
2863 queue_insn (insn, 1);
2868 /* Now we can restore basic block notes and maintain precise cfg. */
2869 restore_bb_notes (*target_bb);
2871 last_clock_var = -1;
2876 /* Loop until all the insns in BB are scheduled. */
2877 while ((*current_sched_info->schedule_more_p) ())
2881 start_clock_var = clock_var;
2885 advance_one_cycle ();
2887 /* Add to the ready list all pending insns that can be issued now.
2888 If there are no ready insns, increment clock until one
2889 is ready and add all pending insns at that point to the ready
2891 queue_to_ready (&ready);
2893 gcc_assert (ready.n_ready);
2895 if (sched_verbose >= 2)
2897 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2898 debug_ready_list (&ready);
2900 advance -= clock_var - start_clock_var;
2902 while (advance > 0);
2906 /* Sort the ready list based on priority. */
2907 ready_sort (&ready);
2909 if (sched_verbose >= 2)
2911 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2912 debug_ready_list (&ready);
2916 /* We don't want md sched reorder to even see debug isns, so put
2917 them out right away. */
2918 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2920 if (control_flow_insn_p (last_scheduled_insn))
2922 *target_bb = current_sched_info->advance_target_bb
2929 x = next_real_insn (last_scheduled_insn);
2931 dump_new_block_header (1, *target_bb, x, tail);
2934 last_scheduled_insn = bb_note (*target_bb);
2937 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2939 rtx insn = ready_remove_first (&ready);
2940 gcc_assert (DEBUG_INSN_P (insn));
2941 (*current_sched_info->begin_schedule_ready) (insn,
2942 last_scheduled_insn);
2943 move_insn (insn, last_scheduled_insn,
2944 current_sched_info->next_tail);
2945 last_scheduled_insn = insn;
2946 advance = schedule_insn (insn);
2947 gcc_assert (advance == 0);
2948 if (ready.n_ready > 0)
2949 ready_sort (&ready);
2956 /* Allow the target to reorder the list, typically for
2957 better instruction bundling. */
2958 if (sort_p && targetm.sched.reorder
2959 && (ready.n_ready == 0
2960 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2962 targetm.sched.reorder (sched_dump, sched_verbose,
2963 ready_lastpos (&ready),
2964 &ready.n_ready, clock_var);
2966 can_issue_more = issue_rate;
2968 first_cycle_insn_p = 1;
2969 cycle_issued_insns = 0;
2976 if (sched_verbose >= 2)
2978 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2980 debug_ready_list (&ready);
2981 if (sched_pressure_p)
2982 print_curr_reg_pressure ();
2985 if (ready.n_ready == 0
2987 && reload_completed)
2989 /* Allow scheduling insns directly from the queue in case
2990 there's nothing better to do (ready list is empty) but
2991 there are still vacant dispatch slots in the current cycle. */
2992 if (sched_verbose >= 6)
2993 fprintf (sched_dump,";;\t\tSecond chance\n");
2994 memcpy (temp_state, curr_state, dfa_state_size);
2995 if (early_queue_to_ready (temp_state, &ready))
2996 ready_sort (&ready);
2999 if (ready.n_ready == 0
3001 || state_dead_lock_p (curr_state)
3002 || !(*current_sched_info->schedule_more_p) ())
3005 /* Select and remove the insn from the ready list. */
3011 res = choose_ready (&ready, &insn);
3017 /* Restart choose_ready (). */
3020 gcc_assert (insn != NULL_RTX);
3023 insn = ready_remove_first (&ready);
3025 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3027 ready_add (&ready, insn, true);
3032 if (targetm.sched.dfa_new_cycle
3033 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3034 insn, last_clock_var,
3035 clock_var, &sort_p))
3036 /* SORT_P is used by the target to override sorting
3037 of the ready list. This is needed when the target
3038 has modified its internal structures expecting that
3039 the insn will be issued next. As we need the insn
3040 to have the highest priority (so it will be returned by
3041 the ready_remove_first call above), we invoke
3042 ready_add (&ready, insn, true).
3043 But, still, there is one issue: INSN can be later
3044 discarded by scheduler's front end through
3045 current_sched_info->can_schedule_ready_p, hence, won't
3048 ready_add (&ready, insn, true);
3053 memcpy (temp_state, curr_state, dfa_state_size);
3054 if (recog_memoized (insn) < 0)
3056 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3057 || asm_noperands (PATTERN (insn)) >= 0);
3058 if (!first_cycle_insn_p && asm_p)
3059 /* This is asm insn which is tried to be issued on the
3060 cycle not first. Issue it on the next cycle. */
3063 /* A USE insn, or something else we don't need to
3064 understand. We can't pass these directly to
3065 state_transition because it will trigger a
3066 fatal error for unrecognizable insns. */
3069 else if (sched_pressure_p)
3073 cost = state_transition (temp_state, insn);
3082 queue_insn (insn, cost);
3083 if (SCHED_GROUP_P (insn))
3092 if (current_sched_info->can_schedule_ready_p
3093 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3094 /* We normally get here only if we don't want to move
3095 insn from the split block. */
3097 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3101 /* DECISION is made. */
3103 if (TODO_SPEC (insn) & SPECULATIVE)
3104 generate_recovery_code (insn);
3106 if (control_flow_insn_p (last_scheduled_insn)
3107 /* This is used to switch basic blocks by request
3108 from scheduler front-end (actually, sched-ebb.c only).
3109 This is used to process blocks with single fallthru
3110 edge. If succeeding block has jump, it [jump] will try
3111 move at the end of current bb, thus corrupting CFG. */
3112 || current_sched_info->advance_target_bb (*target_bb, insn))
3114 *target_bb = current_sched_info->advance_target_bb
3121 x = next_real_insn (last_scheduled_insn);
3123 dump_new_block_header (1, *target_bb, x, tail);
3126 last_scheduled_insn = bb_note (*target_bb);
3129 /* Update counters, etc in the scheduler's front end. */
3130 (*current_sched_info->begin_schedule_ready) (insn,
3131 last_scheduled_insn);
3133 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3134 reemit_notes (insn);
3135 last_scheduled_insn = insn;
3137 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3139 cycle_issued_insns++;
3140 memcpy (curr_state, temp_state, dfa_state_size);
3143 if (targetm.sched.variable_issue)
3145 targetm.sched.variable_issue (sched_dump, sched_verbose,
3146 insn, can_issue_more);
3147 /* A naked CLOBBER or USE generates no instruction, so do
3148 not count them against the issue rate. */
3149 else if (GET_CODE (PATTERN (insn)) != USE
3150 && GET_CODE (PATTERN (insn)) != CLOBBER)
3152 advance = schedule_insn (insn);
3154 /* After issuing an asm insn we should start a new cycle. */
3155 if (advance == 0 && asm_p)
3160 first_cycle_insn_p = 0;
3162 /* Sort the ready list based on priority. This must be
3163 redone here, as schedule_insn may have readied additional
3164 insns that will not be sorted correctly. */
3165 if (ready.n_ready > 0)
3166 ready_sort (&ready);
3168 /* Quickly go through debug insns such that md sched
3169 reorder2 doesn't have to deal with debug insns. */
3170 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3171 && (*current_sched_info->schedule_more_p) ())
3173 if (control_flow_insn_p (last_scheduled_insn))
3175 *target_bb = current_sched_info->advance_target_bb
3182 x = next_real_insn (last_scheduled_insn);
3184 dump_new_block_header (1, *target_bb, x, tail);
3187 last_scheduled_insn = bb_note (*target_bb);
3190 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3192 insn = ready_remove_first (&ready);
3193 gcc_assert (DEBUG_INSN_P (insn));
3194 (*current_sched_info->begin_schedule_ready)
3195 (insn, last_scheduled_insn);
3196 move_insn (insn, last_scheduled_insn,
3197 current_sched_info->next_tail);
3198 advance = schedule_insn (insn);
3199 last_scheduled_insn = insn;
3200 gcc_assert (advance == 0);
3201 if (ready.n_ready > 0)
3202 ready_sort (&ready);
3206 if (targetm.sched.reorder2
3207 && (ready.n_ready == 0
3208 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3211 targetm.sched.reorder2 (sched_dump, sched_verbose,
3213 ? ready_lastpos (&ready) : NULL,
3214 &ready.n_ready, clock_var);
3222 fprintf (sched_dump, ";;\tReady list (final): ");
3223 debug_ready_list (&ready);
3226 if (current_sched_info->queue_must_finish_empty)
3227 /* Sanity check -- queue must be empty now. Meaningless if region has
3229 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3232 /* We must maintain QUEUE_INDEX between blocks in region. */
3233 for (i = ready.n_ready - 1; i >= 0; i--)
3237 x = ready_element (&ready, i);
3238 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3239 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3243 for (i = 0; i <= max_insn_queue_index; i++)
3246 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3251 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3252 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3254 free_INSN_LIST_list (&insn_queue[i]);
3259 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3261 if (!current_sched_info->queue_must_finish_empty
3262 || haifa_recovery_bb_recently_added_p)
3264 /* INSN_TICK (minimum clock tick at which the insn becomes
3265 ready) may be not correct for the insn in the subsequent
3266 blocks of the region. We should use a correct value of
3267 `clock_var' or modify INSN_TICK. It is better to keep
3268 clock_var value equal to 0 at the start of a basic block.
3269 Therefore we modify INSN_TICK here. */
3270 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3273 if (targetm.sched.md_finish)
3275 targetm.sched.md_finish (sched_dump, sched_verbose);
3276 /* Target might have added some instructions to the scheduled block
3277 in its md_finish () hook. These new insns don't have any data
3278 initialized and to identify them we extend h_i_d so that they'll
3280 sched_init_luids (NULL, NULL, NULL, NULL);
3284 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3285 INSN_UID (head), INSN_UID (tail));
3287 /* Update head/tail boundaries. */
3288 head = NEXT_INSN (prev_head);
3289 tail = last_scheduled_insn;
3291 head = restore_other_notes (head, NULL);
3293 current_sched_info->head = head;
3294 current_sched_info->tail = tail;
3297 /* Set_priorities: compute priority of each insn in the block. */
3300 set_priorities (rtx head, rtx tail)
3304 int sched_max_insns_priority =
3305 current_sched_info->sched_max_insns_priority;
3308 if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
3313 prev_head = PREV_INSN (head);
3314 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3320 (void) priority (insn);
3322 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3324 sched_max_insns_priority = MAX (sched_max_insns_priority,
3325 INSN_PRIORITY (insn));
3328 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3333 /* Set dump and sched_verbose for the desired debugging output. If no
3334 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3335 For -fsched-verbose=N, N>=10, print everything to stderr. */
3337 setup_sched_dump (void)
3339 sched_verbose = sched_verbose_param;
3340 if (sched_verbose_param == 0 && dump_file)
3342 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3343 ? stderr : dump_file);
3346 /* Initialize some global state for the scheduler. This function works
3347 with the common data shared between all the schedulers. It is called
3348 from the scheduler specific initialization routine. */
3353 /* Disable speculative loads in their presence if cc0 defined. */
3355 flag_schedule_speculative_load = 0;
3358 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3359 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3360 if (sched_pressure_p)
3361 ira_setup_eliminable_regset ();
3363 /* Initialize SPEC_INFO. */
3364 if (targetm.sched.set_sched_flags)
3366 spec_info = &spec_info_var;
3367 targetm.sched.set_sched_flags (spec_info);
3369 if (spec_info->mask != 0)
3371 spec_info->data_weakness_cutoff =
3372 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3373 spec_info->control_weakness_cutoff =
3374 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3375 * REG_BR_PROB_BASE) / 100;
3378 /* So we won't read anything accidentally. */
3383 /* So we won't read anything accidentally. */
3386 /* Initialize issue_rate. */
3387 if (targetm.sched.issue_rate)
3388 issue_rate = targetm.sched.issue_rate ();
3392 if (cached_issue_rate != issue_rate)
3394 cached_issue_rate = issue_rate;
3395 /* To invalidate max_lookahead_tries: */
3396 cached_first_cycle_multipass_dfa_lookahead = 0;
3399 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3400 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3404 if (targetm.sched.init_dfa_pre_cycle_insn)
3405 targetm.sched.init_dfa_pre_cycle_insn ();
3407 if (targetm.sched.init_dfa_post_cycle_insn)
3408 targetm.sched.init_dfa_post_cycle_insn ();
3411 dfa_state_size = state_size ();
3413 init_alias_analysis ();
3415 df_set_flags (DF_LR_RUN_DCE);
3416 df_note_add_problem ();
3418 /* More problems needed for interloop dep calculation in SMS. */
3419 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3421 df_rd_add_problem ();
3422 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3427 /* Do not run DCE after reload, as this can kill nops inserted
3429 if (reload_completed)
3430 df_clear_flags (DF_LR_RUN_DCE);
3432 regstat_compute_calls_crossed ();
3434 if (targetm.sched.md_init_global)
3435 targetm.sched.md_init_global (sched_dump, sched_verbose,
3436 get_max_uid () + 1);
3438 if (sched_pressure_p)
3440 int i, max_regno = max_reg_num ();
3442 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3443 sched_regno_cover_class
3444 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3445 for (i = 0; i < max_regno; i++)
3446 sched_regno_cover_class[i]
3447 = (i < FIRST_PSEUDO_REGISTER
3448 ? ira_class_translate[REGNO_REG_CLASS (i)]
3449 : reg_cover_class (i));
3450 curr_reg_live = BITMAP_ALLOC (NULL);
3451 saved_reg_live = BITMAP_ALLOC (NULL);
3452 region_ref_regs = BITMAP_ALLOC (NULL);
3455 curr_state = xmalloc (dfa_state_size);
3458 static void haifa_init_only_bb (basic_block, basic_block);
3460 /* Initialize data structures specific to the Haifa scheduler. */
3462 haifa_sched_init (void)
3464 setup_sched_dump ();
3467 if (spec_info != NULL)
3469 sched_deps_info->use_deps_list = 1;
3470 sched_deps_info->generate_spec_deps = 1;
3473 /* Initialize luids, dependency caches, target and h_i_d for the
3476 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3482 VEC_quick_push (basic_block, bbs, bb);
3483 sched_init_luids (bbs, NULL, NULL, NULL);
3484 sched_deps_init (true);
3485 sched_extend_target ();
3486 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3488 VEC_free (basic_block, heap, bbs);
3491 sched_init_only_bb = haifa_init_only_bb;
3492 sched_split_block = sched_split_block_1;
3493 sched_create_empty_bb = sched_create_empty_bb_1;
3494 haifa_recovery_bb_ever_added_p = false;
3496 #ifdef ENABLE_CHECKING
3497 /* This is used preferably for finding bugs in check_cfg () itself.
3498 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3499 assumes that the last insn in the last bb has a non-null successor. */
3503 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3504 before_recovery = 0;
3508 /* Finish work with the data specific to the Haifa scheduler. */
3510 haifa_sched_finish (void)
3512 sched_create_empty_bb = NULL;
3513 sched_split_block = NULL;
3514 sched_init_only_bb = NULL;
3516 if (spec_info && spec_info->dump)
3518 char c = reload_completed ? 'a' : 'b';
3520 fprintf (spec_info->dump,
3521 ";; %s:\n", current_function_name ());
3523 fprintf (spec_info->dump,
3524 ";; Procedure %cr-begin-data-spec motions == %d\n",
3526 fprintf (spec_info->dump,
3527 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3529 fprintf (spec_info->dump,
3530 ";; Procedure %cr-begin-control-spec motions == %d\n",
3531 c, nr_begin_control);
3532 fprintf (spec_info->dump,
3533 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3534 c, nr_be_in_control);
3537 /* Finalize h_i_d, dependency caches, and luids for the whole
3538 function. Target will be finalized in md_global_finish (). */
3539 sched_deps_finish ();
3540 sched_finish_luids ();
3541 current_sched_info = NULL;
3545 /* Free global data used during insn scheduling. This function works with
3546 the common data shared between the schedulers. */
3551 haifa_finish_h_i_d ();
3552 if (sched_pressure_p)
3554 free (sched_regno_cover_class);
3555 BITMAP_FREE (region_ref_regs);
3556 BITMAP_FREE (saved_reg_live);
3557 BITMAP_FREE (curr_reg_live);
3561 if (targetm.sched.md_finish_global)
3562 targetm.sched.md_finish_global (sched_dump, sched_verbose);
3564 end_alias_analysis ();
3566 regstat_free_calls_crossed ();
3570 #ifdef ENABLE_CHECKING
3571 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3572 if (!reload_completed)
3577 /* Fix INSN_TICKs of the instructions in the current block as well as
3578 INSN_TICKs of their dependents.
3579 HEAD and TAIL are the begin and the end of the current scheduled block. */
3581 fix_inter_tick (rtx head, rtx tail)
3583 /* Set of instructions with corrected INSN_TICK. */
3584 bitmap_head processed;
3585 /* ??? It is doubtful if we should assume that cycle advance happens on
3586 basic block boundaries. Basically insns that are unconditionally ready
3587 on the start of the block are more preferable then those which have
3588 a one cycle dependency over insn from the previous block. */
3589 int next_clock = clock_var + 1;
3591 bitmap_initialize (&processed, 0);
3593 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3594 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3595 across different blocks. */
3596 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3601 sd_iterator_def sd_it;
3604 tick = INSN_TICK (head);
3605 gcc_assert (tick >= MIN_TICK);
3607 /* Fix INSN_TICK of instruction from just scheduled block. */
3608 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
3610 bitmap_set_bit (&processed, INSN_LUID (head));
3613 if (tick < MIN_TICK)
3616 INSN_TICK (head) = tick;
3619 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3623 next = DEP_CON (dep);
3624 tick = INSN_TICK (next);
3626 if (tick != INVALID_TICK
3627 /* If NEXT has its INSN_TICK calculated, fix it.
3628 If not - it will be properly calculated from
3629 scratch later in fix_tick_ready. */
3630 && !bitmap_bit_p (&processed, INSN_LUID (next)))
3632 bitmap_set_bit (&processed, INSN_LUID (next));
3635 if (tick < MIN_TICK)
3638 if (tick > INTER_TICK (next))
3639 INTER_TICK (next) = tick;
3641 tick = INTER_TICK (next);
3643 INSN_TICK (next) = tick;
3648 bitmap_clear (&processed);
3651 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3653 /* Check if NEXT is ready to be added to the ready or queue list.
3654 If "yes", add it to the proper list.
3656 -1 - is not ready yet,
3657 0 - added to the ready list,
3658 0 < N - queued for N cycles. */
3660 try_ready (rtx next)
3664 ts = &TODO_SPEC (next);
3667 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3668 && ((old_ts & HARD_DEP)
3669 || (old_ts & SPECULATIVE)));
3671 if (sd_lists_empty_p (next, SD_LIST_BACK))
3672 /* NEXT has all its dependencies resolved. */
3674 /* Remove HARD_DEP bit from NEXT's status. */
3677 if (current_sched_info->flags & DO_SPECULATION)
3678 /* Remove all speculative bits from NEXT's status. */
3679 *ts &= ~SPECULATIVE;
3683 /* One of the NEXT's dependencies has been resolved.
3684 Recalculate NEXT's status. */
3686 *ts &= ~SPECULATIVE & ~HARD_DEP;
3688 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3689 /* Now we've got NEXT with speculative deps only.
3690 1. Look at the deps to see what we have to do.
3691 2. Check if we can do 'todo'. */
3693 sd_iterator_def sd_it;
3695 bool first_p = true;
3697 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3699 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3701 if (DEBUG_INSN_P (DEP_PRO (dep))
3702 && !DEBUG_INSN_P (next))
3712 *ts = ds_merge (*ts, ds);
3715 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3716 /* Too few points. */
3717 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3724 gcc_assert (*ts == old_ts
3725 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3726 else if (current_sched_info->new_ready)
3727 *ts = current_sched_info->new_ready (next, *ts);
3729 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3730 have its original pattern or changed (speculative) one. This is due
3731 to changing ebb in region scheduling.
3732 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3733 has speculative pattern.
3735 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3736 control-speculative NEXT could have been discarded by sched-rgn.c
3737 (the same case as when discarded by can_schedule_ready_p ()). */
3739 if ((*ts & SPECULATIVE)
3740 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3741 need to change anything. */
3747 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3749 res = haifa_speculate_insn (next, *ts, &new_pat);
3754 /* It would be nice to change DEP_STATUS of all dependences,
3755 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3756 so we won't reanalyze anything. */
3757 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3761 /* We follow the rule, that every speculative insn
3762 has non-null ORIG_PAT. */
3763 if (!ORIG_PAT (next))
3764 ORIG_PAT (next) = PATTERN (next);
3768 if (!ORIG_PAT (next))
3769 /* If we gonna to overwrite the original pattern of insn,
3771 ORIG_PAT (next) = PATTERN (next);
3773 haifa_change_pattern (next, new_pat);
3781 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3782 either correct (*ts & SPECULATIVE),
3783 or we simply don't care (*ts & HARD_DEP). */
3785 gcc_assert (!ORIG_PAT (next)
3786 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3790 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3791 control-speculative NEXT could have been discarded by sched-rgn.c
3792 (the same case as when discarded by can_schedule_ready_p ()). */
3793 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3795 change_queue_index (next, QUEUE_NOWHERE);
3798 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3799 /* We should change pattern of every previously speculative
3800 instruction - and we determine if NEXT was speculative by using
3801 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3802 pat too, so skip them. */
3804 haifa_change_pattern (next, ORIG_PAT (next));
3805 ORIG_PAT (next) = 0;
3808 if (sched_verbose >= 2)
3810 int s = TODO_SPEC (next);
3812 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3813 (*current_sched_info->print_insn) (next, 0));
3815 if (spec_info && spec_info->dump)
3818 fprintf (spec_info->dump, "; data-spec;");
3819 if (s & BEGIN_CONTROL)
3820 fprintf (spec_info->dump, "; control-spec;");
3821 if (s & BE_IN_CONTROL)
3822 fprintf (spec_info->dump, "; in-control-spec;");
3825 fprintf (sched_dump, "\n");
3828 adjust_priority (next);
3830 return fix_tick_ready (next);
3833 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3835 fix_tick_ready (rtx next)
3839 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3842 sd_iterator_def sd_it;
3845 tick = INSN_TICK (next);
3846 /* if tick is not equal to INVALID_TICK, then update
3847 INSN_TICK of NEXT with the most recent resolved dependence
3848 cost. Otherwise, recalculate from scratch. */
3849 full_p = (tick == INVALID_TICK);
3851 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3853 rtx pro = DEP_PRO (dep);
3856 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3858 tick1 = INSN_TICK (pro) + dep_cost (dep);
3869 INSN_TICK (next) = tick;
3871 delay = tick - clock_var;
3872 if (delay <= 0 || sched_pressure_p)
3873 delay = QUEUE_READY;
3875 change_queue_index (next, delay);
3880 /* Move NEXT to the proper queue list with (DELAY >= 1),
3881 or add it to the ready list (DELAY == QUEUE_READY),
3882 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3884 change_queue_index (rtx next, int delay)
3886 int i = QUEUE_INDEX (next);
3888 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3890 gcc_assert (i != QUEUE_SCHEDULED);
3892 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3893 || (delay < 0 && delay == i))
3894 /* We have nothing to do. */
3897 /* Remove NEXT from wherever it is now. */
3898 if (i == QUEUE_READY)
3899 ready_remove_insn (next);
3901 queue_remove (next);
3903 /* Add it to the proper place. */
3904 if (delay == QUEUE_READY)
3905 ready_add (readyp, next, false);
3906 else if (delay >= 1)
3907 queue_insn (next, delay);
3909 if (sched_verbose >= 2)
3911 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3912 (*current_sched_info->print_insn) (next, 0));
3914 if (delay == QUEUE_READY)
3915 fprintf (sched_dump, " into ready\n");
3916 else if (delay >= 1)
3917 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3919 fprintf (sched_dump, " removed from ready or queue lists\n");
3923 static int sched_ready_n_insns = -1;
3925 /* Initialize per region data structures. */
3927 sched_extend_ready_list (int new_sched_ready_n_insns)
3931 if (sched_ready_n_insns == -1)
3932 /* At the first call we need to initialize one more choice_stack
3936 sched_ready_n_insns = 0;
3939 i = sched_ready_n_insns + 1;
3941 ready.veclen = new_sched_ready_n_insns + issue_rate;
3942 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3944 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3946 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3947 sched_ready_n_insns, sizeof (*ready_try));
3949 /* We allocate +1 element to save initial state in the choice_stack[0]
3951 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3952 new_sched_ready_n_insns + 1);
3954 for (; i <= new_sched_ready_n_insns; i++)
3955 choice_stack[i].state = xmalloc (dfa_state_size);
3957 sched_ready_n_insns = new_sched_ready_n_insns;
3960 /* Free per region data structures. */
3962 sched_finish_ready_list (void)
3973 for (i = 0; i <= sched_ready_n_insns; i++)
3974 free (choice_stack [i].state);
3975 free (choice_stack);
3976 choice_stack = NULL;
3978 sched_ready_n_insns = -1;
3982 haifa_luid_for_non_insn (rtx x)
3984 gcc_assert (NOTE_P (x) || LABEL_P (x));
3989 /* Generates recovery code for INSN. */
3991 generate_recovery_code (rtx insn)
3993 if (TODO_SPEC (insn) & BEGIN_SPEC)
3994 begin_speculative_block (insn);
3996 /* Here we have insn with no dependencies to
3997 instructions other then CHECK_SPEC ones. */
3999 if (TODO_SPEC (insn) & BE_IN_SPEC)
4000 add_to_speculative_block (insn);
4004 Tries to add speculative dependencies of type FS between instructions
4005 in deps_list L and TWIN. */
4007 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4009 sd_iterator_def sd_it;
4012 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4017 consumer = DEP_CON (dep);
4019 ds = DEP_STATUS (dep);
4021 if (/* If we want to create speculative dep. */
4023 /* And we can do that because this is a true dep. */
4024 && (ds & DEP_TYPES) == DEP_TRUE)
4026 gcc_assert (!(ds & BE_IN_SPEC));
4028 if (/* If this dep can be overcome with 'begin speculation'. */
4030 /* Then we have a choice: keep the dep 'begin speculative'
4031 or transform it into 'be in speculative'. */
4033 if (/* In try_ready we assert that if insn once became ready
4034 it can be removed from the ready (or queue) list only
4035 due to backend decision. Hence we can't let the
4036 probability of the speculative dep to decrease. */
4037 ds_weak (ds) <= ds_weak (fs))
4041 new_ds = (ds & ~BEGIN_SPEC) | fs;
4043 if (/* consumer can 'be in speculative'. */
4044 sched_insn_is_legitimate_for_speculation_p (consumer,
4046 /* Transform it to be in speculative. */
4051 /* Mark the dep as 'be in speculative'. */
4056 dep_def _new_dep, *new_dep = &_new_dep;
4058 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4059 sd_add_dep (new_dep, false);
4064 /* Generates recovery code for BEGIN speculative INSN. */
4066 begin_speculative_block (rtx insn)
4068 if (TODO_SPEC (insn) & BEGIN_DATA)
4070 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4073 create_check_block_twin (insn, false);
4075 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4078 static void haifa_init_insn (rtx);
4080 /* Generates recovery code for BE_IN speculative INSN. */
4082 add_to_speculative_block (rtx insn)
4085 sd_iterator_def sd_it;
4088 rtx_vec_t priorities_roots;
4090 ts = TODO_SPEC (insn);
4091 gcc_assert (!(ts & ~BE_IN_SPEC));
4093 if (ts & BE_IN_DATA)
4095 if (ts & BE_IN_CONTROL)
4098 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4099 gcc_assert (!TODO_SPEC (insn));
4101 DONE_SPEC (insn) |= ts;
4103 /* First we convert all simple checks to branchy. */
4104 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4105 sd_iterator_cond (&sd_it, &dep);)
4107 rtx check = DEP_PRO (dep);
4109 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4111 create_check_block_twin (check, true);
4113 /* Restart search. */
4114 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4117 /* Continue search. */
4118 sd_iterator_next (&sd_it);
4121 priorities_roots = NULL;
4122 clear_priorities (insn, &priorities_roots);
4129 /* Get the first backward dependency of INSN. */
4130 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4131 if (!sd_iterator_cond (&sd_it, &dep))
4132 /* INSN has no backward dependencies left. */
4135 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4136 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4137 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4139 check = DEP_PRO (dep);
4141 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4142 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4144 rec = BLOCK_FOR_INSN (check);
4146 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4147 haifa_init_insn (twin);
4149 sd_copy_back_deps (twin, insn, true);
4151 if (sched_verbose && spec_info->dump)
4152 /* INSN_BB (insn) isn't determined for twin insns yet.
4153 So we can't use current_sched_info->print_insn. */
4154 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4155 INSN_UID (twin), rec->index);
4157 twins = alloc_INSN_LIST (twin, twins);
4159 /* Add dependences between TWIN and all appropriate
4160 instructions from REC. */
4161 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4163 rtx pro = DEP_PRO (dep);
4165 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4167 /* INSN might have dependencies from the instructions from
4168 several recovery blocks. At this iteration we process those
4169 producers that reside in REC. */
4170 if (BLOCK_FOR_INSN (pro) == rec)
4172 dep_def _new_dep, *new_dep = &_new_dep;
4174 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4175 sd_add_dep (new_dep, false);
4179 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4181 /* Remove all dependencies between INSN and insns in REC. */
4182 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4183 sd_iterator_cond (&sd_it, &dep);)
4185 rtx pro = DEP_PRO (dep);
4187 if (BLOCK_FOR_INSN (pro) == rec)
4188 sd_delete_dep (sd_it);
4190 sd_iterator_next (&sd_it);
4194 /* We couldn't have added the dependencies between INSN and TWINS earlier
4195 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4200 twin = XEXP (twins, 0);
4203 dep_def _new_dep, *new_dep = &_new_dep;
4205 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4206 sd_add_dep (new_dep, false);
4209 twin = XEXP (twins, 1);
4210 free_INSN_LIST_node (twins);
4214 calc_priorities (priorities_roots);
4215 VEC_free (rtx, heap, priorities_roots);
4218 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4220 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4222 gcc_assert (new_nmemb >= old_nmemb);
4223 p = XRESIZEVAR (void, p, new_nmemb * size);
4224 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4229 Find fallthru edge from PRED. */
4231 find_fallthru_edge (basic_block pred)
4237 succ = pred->next_bb;
4238 gcc_assert (succ->prev_bb == pred);
4240 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4242 FOR_EACH_EDGE (e, ei, pred->succs)
4243 if (e->flags & EDGE_FALLTHRU)
4245 gcc_assert (e->dest == succ);
4251 FOR_EACH_EDGE (e, ei, succ->preds)
4252 if (e->flags & EDGE_FALLTHRU)
4254 gcc_assert (e->src == pred);
4262 /* Extend per basic block data structures. */
4264 sched_extend_bb (void)
4268 /* The following is done to keep current_sched_info->next_tail non null. */
4269 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4270 if (NEXT_INSN (insn) == 0
4273 /* Don't emit a NOTE if it would end up before a BARRIER. */
4274 && !BARRIER_P (NEXT_INSN (insn))))
4276 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4277 /* Make insn appear outside BB. */
4278 set_block_for_insn (note, NULL);
4279 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4283 /* Init per basic block data structures. */
4285 sched_init_bbs (void)
4290 /* Initialize BEFORE_RECOVERY variable. */
4292 init_before_recovery (basic_block *before_recovery_ptr)
4297 last = EXIT_BLOCK_PTR->prev_bb;
4298 e = find_fallthru_edge (last);
4302 /* We create two basic blocks:
4303 1. Single instruction block is inserted right after E->SRC
4305 2. Empty block right before EXIT_BLOCK.
4306 Between these two blocks recovery blocks will be emitted. */
4308 basic_block single, empty;
4311 /* If the fallthrough edge to exit we've found is from the block we've
4312 created before, don't do anything more. */
4313 if (last == after_recovery)
4316 adding_bb_to_current_region_p = false;
4318 single = sched_create_empty_bb (last);
4319 empty = sched_create_empty_bb (single);
4321 /* Add new blocks to the root loop. */
4322 if (current_loops != NULL)
4324 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4325 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4328 single->count = last->count;
4329 empty->count = last->count;
4330 single->frequency = last->frequency;
4331 empty->frequency = last->frequency;
4332 BB_COPY_PARTITION (single, last);
4333 BB_COPY_PARTITION (empty, last);
4335 redirect_edge_succ (e, single);
4336 make_single_succ_edge (single, empty, 0);
4337 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4338 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4340 label = block_label (empty);
4341 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4342 JUMP_LABEL (x) = label;
4343 LABEL_NUSES (label)++;
4344 haifa_init_insn (x);
4346 emit_barrier_after (x);
4348 sched_init_only_bb (empty, NULL);
4349 sched_init_only_bb (single, NULL);
4352 adding_bb_to_current_region_p = true;
4353 before_recovery = single;
4354 after_recovery = empty;
4356 if (before_recovery_ptr)
4357 *before_recovery_ptr = before_recovery;
4359 if (sched_verbose >= 2 && spec_info->dump)
4360 fprintf (spec_info->dump,
4361 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4362 last->index, single->index, empty->index);
4365 before_recovery = last;
4368 /* Returns new recovery block. */
4370 sched_create_recovery_block (basic_block *before_recovery_ptr)
4376 haifa_recovery_bb_recently_added_p = true;
4377 haifa_recovery_bb_ever_added_p = true;
4379 init_before_recovery (before_recovery_ptr);
4381 barrier = get_last_bb_insn (before_recovery);
4382 gcc_assert (BARRIER_P (barrier));
4384 label = emit_label_after (gen_label_rtx (), barrier);
4386 rec = create_basic_block (label, label, before_recovery);
4388 /* A recovery block always ends with an unconditional jump. */
4389 emit_barrier_after (BB_END (rec));
4391 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4392 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4394 if (sched_verbose && spec_info->dump)
4395 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4401 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4402 and emit necessary jumps. */
4404 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4405 basic_block second_bb)
4411 /* This is fixing of incoming edge. */
4412 /* ??? Which other flags should be specified? */
4413 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4414 /* Partition type is the same, if it is "unpartitioned". */
4415 edge_flags = EDGE_CROSSING;
4419 make_edge (first_bb, rec, edge_flags);
4420 label = block_label (second_bb);
4421 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4422 JUMP_LABEL (jump) = label;
4423 LABEL_NUSES (label)++;
4425 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4426 /* Partition type is the same, if it is "unpartitioned". */
4428 /* Rewritten from cfgrtl.c. */
4429 if (flag_reorder_blocks_and_partition
4430 && targetm.have_named_sections)
4432 /* We don't need the same note for the check because
4433 any_condjump_p (check) == true. */
4434 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4436 edge_flags = EDGE_CROSSING;
4441 make_single_succ_edge (rec, second_bb, edge_flags);
4444 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4445 INSN is a simple check, that should be converted to branchy one. */
4447 create_check_block_twin (rtx insn, bool mutate_p)
4450 rtx label, check, twin;
4452 sd_iterator_def sd_it;
4454 dep_def _new_dep, *new_dep = &_new_dep;
4457 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4460 todo_spec = TODO_SPEC (insn);
4463 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4464 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4466 todo_spec = CHECK_SPEC (insn);
4469 todo_spec &= SPECULATIVE;
4471 /* Create recovery block. */
4472 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4474 rec = sched_create_recovery_block (NULL);
4475 label = BB_HEAD (rec);
4479 rec = EXIT_BLOCK_PTR;
4484 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4486 if (rec != EXIT_BLOCK_PTR)
4488 /* To have mem_reg alive at the beginning of second_bb,
4489 we emit check BEFORE insn, so insn after splitting
4490 insn will be at the beginning of second_bb, which will
4491 provide us with the correct life information. */
4492 check = emit_jump_insn_before (check, insn);
4493 JUMP_LABEL (check) = label;
4494 LABEL_NUSES (label)++;
4497 check = emit_insn_before (check, insn);
4499 /* Extend data structures. */
4500 haifa_init_insn (check);
4502 /* CHECK is being added to current region. Extend ready list. */
4503 gcc_assert (sched_ready_n_insns != -1);
4504 sched_extend_ready_list (sched_ready_n_insns + 1);
4506 if (current_sched_info->add_remove_insn)
4507 current_sched_info->add_remove_insn (insn, 0);
4509 RECOVERY_BLOCK (check) = rec;
4511 if (sched_verbose && spec_info->dump)
4512 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4513 (*current_sched_info->print_insn) (check, 0));
4515 gcc_assert (ORIG_PAT (insn));
4517 /* Initialize TWIN (twin is a duplicate of original instruction
4518 in the recovery block). */
4519 if (rec != EXIT_BLOCK_PTR)
4521 sd_iterator_def sd_it;
4524 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4525 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4527 struct _dep _dep2, *dep2 = &_dep2;
4529 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4531 sd_add_dep (dep2, true);
4534 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4535 haifa_init_insn (twin);
4537 if (sched_verbose && spec_info->dump)
4538 /* INSN_BB (insn) isn't determined for twin insns yet.
4539 So we can't use current_sched_info->print_insn. */
4540 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4541 INSN_UID (twin), rec->index);
4545 ORIG_PAT (check) = ORIG_PAT (insn);
4546 HAS_INTERNAL_DEP (check) = 1;
4548 /* ??? We probably should change all OUTPUT dependencies to
4552 /* Copy all resolved back dependencies of INSN to TWIN. This will
4553 provide correct value for INSN_TICK (TWIN). */
4554 sd_copy_back_deps (twin, insn, true);
4556 if (rec != EXIT_BLOCK_PTR)
4557 /* In case of branchy check, fix CFG. */
4559 basic_block first_bb, second_bb;
4562 first_bb = BLOCK_FOR_INSN (check);
4563 second_bb = sched_split_block (first_bb, check);
4565 sched_create_recovery_edges (first_bb, rec, second_bb);
4567 sched_init_only_bb (second_bb, first_bb);
4568 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4570 jump = BB_END (rec);
4571 haifa_init_insn (jump);
4574 /* Move backward dependences from INSN to CHECK and
4575 move forward dependences from INSN to TWIN. */
4577 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4578 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4580 rtx pro = DEP_PRO (dep);
4583 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4584 check --TRUE--> producer ??? or ANTI ???
4585 twin --TRUE--> producer
4586 twin --ANTI--> check
4588 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4589 check --ANTI--> producer
4590 twin --ANTI--> producer
4591 twin --ANTI--> check
4593 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4594 check ~~TRUE~~> producer
4595 twin ~~TRUE~~> producer
4596 twin --ANTI--> check */
4598 ds = DEP_STATUS (dep);
4600 if (ds & BEGIN_SPEC)
4602 gcc_assert (!mutate_p);
4606 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4607 sd_add_dep (new_dep, false);
4609 if (rec != EXIT_BLOCK_PTR)
4611 DEP_CON (new_dep) = twin;
4612 sd_add_dep (new_dep, false);
4616 /* Second, remove backward dependencies of INSN. */
4617 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4618 sd_iterator_cond (&sd_it, &dep);)
4620 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4622 /* We can delete this dep because we overcome it with
4623 BEGIN_SPECULATION. */
4624 sd_delete_dep (sd_it);
4626 sd_iterator_next (&sd_it);
4629 /* Future Speculations. Determine what BE_IN speculations will be like. */
4632 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4635 gcc_assert (!DONE_SPEC (insn));
4639 ds_t ts = TODO_SPEC (insn);
4641 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4642 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4644 /* Luckiness of future speculations solely depends upon initial
4645 BEGIN speculation. */
4646 if (ts & BEGIN_DATA)
4647 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4648 if (ts & BEGIN_CONTROL)
4649 fs = set_dep_weak (fs, BE_IN_CONTROL,
4650 get_dep_weak (ts, BEGIN_CONTROL));
4653 CHECK_SPEC (check) = CHECK_SPEC (insn);
4655 /* Future speculations: call the helper. */
4656 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4658 if (rec != EXIT_BLOCK_PTR)
4660 /* Which types of dependencies should we use here is,
4661 generally, machine-dependent question... But, for now,
4666 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4667 sd_add_dep (new_dep, false);
4669 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4670 sd_add_dep (new_dep, false);
4674 if (spec_info->dump)
4675 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4676 (*current_sched_info->print_insn) (insn, 0));
4678 /* Remove all dependencies of the INSN. */
4680 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4682 | SD_LIST_RES_BACK));
4683 while (sd_iterator_cond (&sd_it, &dep))
4684 sd_delete_dep (sd_it);
4687 /* If former check (INSN) already was moved to the ready (or queue)
4688 list, add new check (CHECK) there too. */
4689 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4692 /* Remove old check from instruction stream and free its
4694 sched_remove_insn (insn);
4697 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4698 sd_add_dep (new_dep, false);
4702 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4703 sd_add_dep (new_dep, false);
4707 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4708 because it'll be done later in add_to_speculative_block. */
4710 rtx_vec_t priorities_roots = NULL;
4712 clear_priorities (twin, &priorities_roots);
4713 calc_priorities (priorities_roots);
4714 VEC_free (rtx, heap, priorities_roots);
4718 /* Removes dependency between instructions in the recovery block REC
4719 and usual region instructions. It keeps inner dependences so it
4720 won't be necessary to recompute them. */
4722 fix_recovery_deps (basic_block rec)
4724 rtx note, insn, jump, ready_list = 0;
4725 bitmap_head in_ready;
4728 bitmap_initialize (&in_ready, 0);
4730 /* NOTE - a basic block note. */
4731 note = NEXT_INSN (BB_HEAD (rec));
4732 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4733 insn = BB_END (rec);
4734 gcc_assert (JUMP_P (insn));
4735 insn = PREV_INSN (insn);
4739 sd_iterator_def sd_it;
4742 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4743 sd_iterator_cond (&sd_it, &dep);)
4745 rtx consumer = DEP_CON (dep);
4747 if (BLOCK_FOR_INSN (consumer) != rec)
4749 sd_delete_dep (sd_it);
4751 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4753 ready_list = alloc_INSN_LIST (consumer, ready_list);
4754 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4759 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4761 sd_iterator_next (&sd_it);
4765 insn = PREV_INSN (insn);
4767 while (insn != note);
4769 bitmap_clear (&in_ready);
4771 /* Try to add instructions to the ready or queue list. */
4772 for (link = ready_list; link; link = XEXP (link, 1))
4773 try_ready (XEXP (link, 0));
4774 free_INSN_LIST_list (&ready_list);
4776 /* Fixing jump's dependences. */
4777 insn = BB_HEAD (rec);
4778 jump = BB_END (rec);
4780 gcc_assert (LABEL_P (insn));
4781 insn = NEXT_INSN (insn);
4783 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4784 add_jump_dependencies (insn, jump);
4787 /* Change pattern of INSN to NEW_PAT. */
4789 sched_change_pattern (rtx insn, rtx new_pat)
4793 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4795 dfa_clear_single_insn_cache (insn);
4798 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4799 instruction data. */
4801 haifa_change_pattern (rtx insn, rtx new_pat)
4803 sched_change_pattern (insn, new_pat);
4805 /* Invalidate INSN_COST, so it'll be recalculated. */
4806 INSN_COST (insn) = -1;
4807 /* Invalidate INSN_TICK, so it'll be recalculated. */
4808 INSN_TICK (insn) = INVALID_TICK;
4811 /* -1 - can't speculate,
4812 0 - for speculation with REQUEST mode it is OK to use
4813 current instruction pattern,
4814 1 - need to change pattern for *NEW_PAT to be speculative. */
4816 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4818 gcc_assert (current_sched_info->flags & DO_SPECULATION
4819 && (request & SPECULATIVE)
4820 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4822 if ((request & spec_info->mask) != request)
4825 if (request & BE_IN_SPEC
4826 && !(request & BEGIN_SPEC))
4829 return targetm.sched.speculate_insn (insn, request, new_pat);
4833 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4835 gcc_assert (sched_deps_info->generate_spec_deps
4836 && !IS_SPECULATION_CHECK_P (insn));
4838 if (HAS_INTERNAL_DEP (insn)
4839 || SCHED_GROUP_P (insn))
4842 return sched_speculate_insn (insn, request, new_pat);
4845 /* Print some information about block BB, which starts with HEAD and
4846 ends with TAIL, before scheduling it.
4847 I is zero, if scheduler is about to start with the fresh ebb. */
4849 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4852 fprintf (sched_dump,
4853 ";; ======================================================\n");
4855 fprintf (sched_dump,
4856 ";; =====================ADVANCING TO=====================\n");
4857 fprintf (sched_dump,
4858 ";; -- basic block %d from %d to %d -- %s reload\n",
4859 bb->index, INSN_UID (head), INSN_UID (tail),
4860 (reload_completed ? "after" : "before"));
4861 fprintf (sched_dump,
4862 ";; ======================================================\n");
4863 fprintf (sched_dump, "\n");
4866 /* Unlink basic block notes and labels and saves them, so they
4867 can be easily restored. We unlink basic block notes in EBB to
4868 provide back-compatibility with the previous code, as target backends
4869 assume, that there'll be only instructions between
4870 current_sched_info->{head and tail}. We restore these notes as soon
4872 FIRST (LAST) is the first (last) basic block in the ebb.
4873 NB: In usual case (FIRST == LAST) nothing is really done. */
4875 unlink_bb_notes (basic_block first, basic_block last)
4877 /* We DON'T unlink basic block notes of the first block in the ebb. */
4881 bb_header = XNEWVEC (rtx, last_basic_block);
4883 /* Make a sentinel. */
4884 if (last->next_bb != EXIT_BLOCK_PTR)
4885 bb_header[last->next_bb->index] = 0;
4887 first = first->next_bb;
4890 rtx prev, label, note, next;
4892 label = BB_HEAD (last);
4893 if (LABEL_P (label))
4894 note = NEXT_INSN (label);
4897 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4899 prev = PREV_INSN (label);
4900 next = NEXT_INSN (note);
4901 gcc_assert (prev && next);
4903 NEXT_INSN (prev) = next;
4904 PREV_INSN (next) = prev;
4906 bb_header[last->index] = label;
4911 last = last->prev_bb;
4916 /* Restore basic block notes.
4917 FIRST is the first basic block in the ebb. */
4919 restore_bb_notes (basic_block first)
4924 /* We DON'T unlink basic block notes of the first block in the ebb. */
4925 first = first->next_bb;
4926 /* Remember: FIRST is actually a second basic block in the ebb. */
4928 while (first != EXIT_BLOCK_PTR
4929 && bb_header[first->index])
4931 rtx prev, label, note, next;
4933 label = bb_header[first->index];
4934 prev = PREV_INSN (label);
4935 next = NEXT_INSN (prev);
4937 if (LABEL_P (label))
4938 note = NEXT_INSN (label);
4941 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4943 bb_header[first->index] = 0;
4945 NEXT_INSN (prev) = label;
4946 NEXT_INSN (note) = next;
4947 PREV_INSN (next) = note;
4949 first = first->next_bb;
4957 Fix CFG after both in- and inter-block movement of
4958 control_flow_insn_p JUMP. */
4960 fix_jump_move (rtx jump)
4962 basic_block bb, jump_bb, jump_bb_next;
4964 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4965 jump_bb = BLOCK_FOR_INSN (jump);
4966 jump_bb_next = jump_bb->next_bb;
4968 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4969 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4971 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4972 /* if jump_bb_next is not empty. */
4973 BB_END (jump_bb) = BB_END (jump_bb_next);
4975 if (BB_END (bb) != PREV_INSN (jump))
4976 /* Then there are instruction after jump that should be placed
4978 BB_END (jump_bb_next) = BB_END (bb);
4980 /* Otherwise jump_bb_next is empty. */
4981 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4983 /* To make assertion in move_insn happy. */
4984 BB_END (bb) = PREV_INSN (jump);
4986 update_bb_for_insn (jump_bb_next);
4989 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4991 move_block_after_check (rtx jump)
4993 basic_block bb, jump_bb, jump_bb_next;
4996 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4997 jump_bb = BLOCK_FOR_INSN (jump);
4998 jump_bb_next = jump_bb->next_bb;
5000 update_bb_for_insn (jump_bb);
5002 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5003 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5005 unlink_block (jump_bb_next);
5006 link_block (jump_bb_next, bb);
5010 move_succs (&(jump_bb->succs), bb);
5011 move_succs (&(jump_bb_next->succs), jump_bb);
5012 move_succs (&t, jump_bb_next);
5014 df_mark_solutions_dirty ();
5016 common_sched_info->fix_recovery_cfg
5017 (bb->index, jump_bb->index, jump_bb_next->index);
5020 /* Helper function for move_block_after_check.
5021 This functions attaches edge vector pointed to by SUCCSP to
5024 move_succs (VEC(edge,gc) **succsp, basic_block to)
5029 gcc_assert (to->succs == 0);
5031 to->succs = *succsp;
5033 FOR_EACH_EDGE (e, ei, to->succs)
5039 /* Remove INSN from the instruction stream.
5040 INSN should have any dependencies. */
5042 sched_remove_insn (rtx insn)
5044 sd_finish_insn (insn);
5046 change_queue_index (insn, QUEUE_NOWHERE);
5047 current_sched_info->add_remove_insn (insn, 1);
5051 /* Clear priorities of all instructions, that are forward dependent on INSN.
5052 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5053 be invoked to initialize all cleared priorities. */
5055 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5057 sd_iterator_def sd_it;
5059 bool insn_is_root_p = true;
5061 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5063 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5065 rtx pro = DEP_PRO (dep);
5067 if (INSN_PRIORITY_STATUS (pro) >= 0
5068 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5070 /* If DEP doesn't contribute to priority then INSN itself should
5071 be added to priority roots. */
5072 if (contributes_to_priority_p (dep))
5073 insn_is_root_p = false;
5075 INSN_PRIORITY_STATUS (pro) = -1;
5076 clear_priorities (pro, roots_ptr);
5081 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5084 /* Recompute priorities of instructions, whose priorities might have been
5085 changed. ROOTS is a vector of instructions whose priority computation will
5086 trigger initialization of all cleared priorities. */
5088 calc_priorities (rtx_vec_t roots)
5093 for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
5098 /* Add dependences between JUMP and other instructions in the recovery
5099 block. INSN is the first insn the recovery block. */
5101 add_jump_dependencies (rtx insn, rtx jump)
5105 insn = NEXT_INSN (insn);
5109 if (dep_list_size (insn) == 0)
5111 dep_def _new_dep, *new_dep = &_new_dep;
5113 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5114 sd_add_dep (new_dep, false);
5119 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5122 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5124 bb_note (basic_block bb)
5128 note = BB_HEAD (bb);
5130 note = NEXT_INSN (note);
5132 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5136 #ifdef ENABLE_CHECKING
5137 /* Helper function for check_cfg.
5138 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5141 has_edge_p (VEC(edge,gc) *el, int type)
5146 FOR_EACH_EDGE (e, ei, el)
5147 if (e->flags & type)
5152 /* Search back, starting at INSN, for an insn that is not a
5153 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5154 no such insn can be found. */
5156 prev_non_location_insn (rtx insn, rtx head)
5158 while (insn != head && NOTE_P (insn)
5159 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5160 insn = PREV_INSN (insn);
5165 /* Check few properties of CFG between HEAD and TAIL.
5166 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5167 instruction stream. */
5169 check_cfg (rtx head, rtx tail)
5173 int not_first = 0, not_last;
5176 head = get_insns ();
5178 tail = get_last_insn ();
5179 next_tail = NEXT_INSN (tail);
5183 not_last = head != tail;
5186 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5188 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5191 || (NOTE_INSN_BASIC_BLOCK_P (head)
5193 || (not_first && !LABEL_P (PREV_INSN (head))))))
5195 gcc_assert (bb == 0);
5196 bb = BLOCK_FOR_INSN (head);
5198 gcc_assert (BB_HEAD (bb) == head);
5200 /* This is the case of jump table. See inside_basic_block_p (). */
5201 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5206 gcc_assert (!inside_basic_block_p (head));
5207 head = NEXT_INSN (head);
5211 gcc_assert (inside_basic_block_p (head)
5213 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5217 head = NEXT_INSN (head);
5218 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5222 if (control_flow_insn_p (head))
5224 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5227 if (any_uncondjump_p (head))
5228 gcc_assert (EDGE_COUNT (bb->succs) == 1
5229 && BARRIER_P (NEXT_INSN (head)));
5230 else if (any_condjump_p (head))
5231 gcc_assert (/* Usual case. */
5232 (EDGE_COUNT (bb->succs) > 1
5233 && !BARRIER_P (NEXT_INSN (head)))
5234 /* Or jump to the next instruction. */
5235 || (EDGE_COUNT (bb->succs) == 1
5236 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5237 == JUMP_LABEL (head))));
5239 if (BB_END (bb) == head)
5241 if (EDGE_COUNT (bb->succs) > 1)
5242 gcc_assert (control_flow_insn_p (prev_non_location_insn
5243 (head, BB_HEAD (bb)))
5244 || has_edge_p (bb->succs, EDGE_COMPLEX));
5248 head = NEXT_INSN (head);
5254 while (head != next_tail);
5256 gcc_assert (bb == 0);
5259 #endif /* ENABLE_CHECKING */
5261 /* Extend per basic block data structures. */
5265 if (sched_scan_info->extend_bb)
5266 sched_scan_info->extend_bb ();
5269 /* Init data for BB. */
5271 init_bb (basic_block bb)
5273 if (sched_scan_info->init_bb)
5274 sched_scan_info->init_bb (bb);
5277 /* Extend per insn data structures. */
5281 if (sched_scan_info->extend_insn)
5282 sched_scan_info->extend_insn ();
5285 /* Init data structures for INSN. */
5287 init_insn (rtx insn)
5289 if (sched_scan_info->init_insn)
5290 sched_scan_info->init_insn (insn);
5293 /* Init all insns in BB. */
5295 init_insns_in_bb (basic_block bb)
5299 FOR_BB_INSNS (bb, insn)
5303 /* A driver function to add a set of basic blocks (BBS),
5304 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5305 to the scheduling region. */
5307 sched_scan (const struct sched_scan_info_def *ssi,
5308 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5310 sched_scan_info = ssi;
5312 if (bbs != NULL || bb != NULL)
5321 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
5336 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
5337 init_insns_in_bb (x);
5341 init_insns_in_bb (bb);
5348 for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
5357 /* Extend data structures for logical insn UID. */
5359 luids_extend_insn (void)
5361 int new_luids_max_uid = get_max_uid () + 1;
5363 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5366 /* Initialize LUID for INSN. */
5368 luids_init_insn (rtx insn)
5370 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5375 luid = sched_max_luid;
5376 sched_max_luid += i;
5381 SET_INSN_LUID (insn, luid);
5384 /* Initialize luids for BBS, BB, INSNS and INSN.
5385 The hook common_sched_info->luid_for_non_insn () is used to determine
5386 if notes, labels, etc. need luids. */
5388 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5390 const struct sched_scan_info_def ssi =
5392 NULL, /* extend_bb */
5394 luids_extend_insn, /* extend_insn */
5395 luids_init_insn /* init_insn */
5398 sched_scan (&ssi, bbs, bb, insns, insn);
5403 sched_finish_luids (void)
5405 VEC_free (int, heap, sched_luids);
5409 /* Return logical uid of INSN. Helpful while debugging. */
5411 insn_luid (rtx insn)
5413 return INSN_LUID (insn);
5416 /* Extend per insn data in the target. */
5418 sched_extend_target (void)
5420 if (targetm.sched.h_i_d_extended)
5421 targetm.sched.h_i_d_extended ();
5424 /* Extend global scheduler structures (those, that live across calls to
5425 schedule_block) to include information about just emitted INSN. */
5429 int reserve = (get_max_uid () + 1
5430 - VEC_length (haifa_insn_data_def, h_i_d));
5432 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5434 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5435 3 * get_max_uid () / 2);
5436 sched_extend_target ();
5440 /* Initialize h_i_d entry of the INSN with default values.
5441 Values, that are not explicitly initialized here, hold zero. */
5443 init_h_i_d (rtx insn)
5445 if (INSN_LUID (insn) > 0)
5447 INSN_COST (insn) = -1;
5448 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5449 INSN_TICK (insn) = INVALID_TICK;
5450 INTER_TICK (insn) = INVALID_TICK;
5451 TODO_SPEC (insn) = HARD_DEP;
5455 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5457 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5459 const struct sched_scan_info_def ssi =
5461 NULL, /* extend_bb */
5463 extend_h_i_d, /* extend_insn */
5464 init_h_i_d /* init_insn */
5467 sched_scan (&ssi, bbs, bb, insns, insn);
5470 /* Finalize haifa_insn_data. */
5472 haifa_finish_h_i_d (void)
5475 haifa_insn_data_t data;
5476 struct reg_use_data *use, *next;
5478 for (i = 0; VEC_iterate (haifa_insn_data_def, h_i_d, i, data); i++)
5480 if (data->reg_pressure != NULL)
5481 free (data->reg_pressure);
5482 for (use = data->reg_use_list; use != NULL; use = next)
5484 next = use->next_insn_use;
5488 VEC_free (haifa_insn_data_def, heap, h_i_d);
5491 /* Init data for the new insn INSN. */
5493 haifa_init_insn (rtx insn)
5495 gcc_assert (insn != NULL);
5497 sched_init_luids (NULL, NULL, NULL, insn);
5498 sched_extend_target ();
5499 sched_deps_init (false);
5500 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5502 if (adding_bb_to_current_region_p)
5504 sd_init_insn (insn);
5506 /* Extend dependency caches by one element. */
5507 extend_dependency_caches (1, false);
5511 /* Init data for the new basic block BB which comes after AFTER. */
5513 haifa_init_only_bb (basic_block bb, basic_block after)
5515 gcc_assert (bb != NULL);
5519 if (common_sched_info->add_block)
5520 /* This changes only data structures of the front-end. */
5521 common_sched_info->add_block (bb, after);
5524 /* A generic version of sched_split_block (). */
5526 sched_split_block_1 (basic_block first_bb, rtx after)
5530 e = split_block (first_bb, after);
5531 gcc_assert (e->src == first_bb);
5533 /* sched_split_block emits note if *check == BB_END. Probably it
5534 is better to rip that note off. */
5539 /* A generic version of sched_create_empty_bb (). */
5541 sched_create_empty_bb_1 (basic_block after)
5543 return create_empty_bb (after);
5546 /* Insert PAT as an INSN into the schedule and update the necessary data
5547 structures to account for it. */
5549 sched_emit_insn (rtx pat)
5551 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5552 last_scheduled_insn = insn;
5553 haifa_init_insn (insn);
5557 #endif /* INSN_SCHEDULING */