1 /* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
22 /* Instruction scheduling pass.
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we also add it to the ready list. When all insns down
50 to the lowest priority have been scheduled, the critical path of the
51 basic block has been made as short as possible. The remaining insns
52 are then scheduled in remaining slots.
54 The following list shows the order in which we want to break ties:
56 1. choose insn with lowest conflict cost, ties broken by
57 2. choose insn with the longest path to end of bb, ties broken by
58 3. choose insn that kills the most registers, ties broken by
59 4. choose insn that conflicts with the most ready insns, or finally
60 5. choose insn with lowest UID.
62 Memory references complicate matters. Only if we can be certain
63 that memory references are not part of the data dependency graph
64 (via true, anti, or output dependence), can we move operations past
65 memory references. To first approximation, reads can be done
66 independently, while writes introduce dependencies. Better
67 approximations will yield fewer dependencies.
69 Dependencies set up by memory references are treated in exactly the
70 same way as other dependencies, by using LOG_LINKS.
72 Having optimized the critical path, we may have also unduly
73 extended the lifetimes of some registers. If an operation requires
74 that constants be loaded into registers, it is certainly desirable
75 to load those constants as early as necessary, but no earlier.
76 I.e., it will not do to load up a bunch of registers at the
77 beginning of a basic block only to use them at the end, if they
78 could be loaded later, since this may result in excessive register
81 Note that since branches are never in basic blocks, but only end
82 basic blocks, this pass will not do any branch scheduling. But
83 that is ok, since we can use GNU's delayed branch scheduling
84 pass to take care of this case.
86 Also note that no further optimizations based on algebraic identities
87 are performed, so this pass would be a good one to perform instruction
88 splitting, such as breaking up a multiply instruction into shifts
89 and adds where that is profitable.
91 Given the memory aliasing analysis that this pass should perform,
92 it should be possible to remove redundant stores to memory, and to
93 load values from registers instead of hitting memory.
95 This pass must update information that subsequent passes expect to be
96 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
97 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
100 The information in the line number notes is carefully retained by this
101 pass. All other NOTE insns are grouped in their same relative order at
102 the beginning of basic blocks that have been scheduled. */
107 #include "basic-block.h"
109 #include "hard-reg-set.h"
111 #include "insn-config.h"
112 #include "insn-attr.h"
114 /* Arrays set up by scheduling for the same respective purposes as
115 similar-named arrays set up by flow analysis. We work with these
116 arrays during the scheduling pass so we can compare values against
119 Values of these arrays are copied at the end of this pass into the
120 arrays set up by flow analysis. */
121 static short *sched_reg_n_deaths;
122 static int *sched_reg_n_calls_crossed;
123 static int *sched_reg_live_length;
125 /* Element N is the next insn that sets (hard or pseudo) register
126 N within the current basic block; or zero, if there is no
127 such insn. Needed for new registers which may be introduced
128 by splitting insns. */
129 static rtx *reg_last_uses;
130 static rtx *reg_last_sets;
132 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
133 static int *insn_luid;
134 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
136 /* Vector indexed by INSN_UID giving each instruction a priority. */
137 static int *insn_priority;
138 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
140 static short *insn_costs;
141 #define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
143 #define DONE_PRIORITY -1
144 #define MAX_PRIORITY 0x7fffffff
145 #define TAIL_PRIORITY 0x7ffffffe
146 #define LAUNCH_PRIORITY 0x7f000001
147 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
148 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
150 /* Vector indexed by INSN_UID giving number of insns referring to this insn. */
151 static int *insn_ref_count;
152 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
154 /* Vector indexed by INSN_UID giving line-number note in effect for each
155 insn. For line-number notes, this indicates whether the note may be
157 static rtx *line_note;
158 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
160 /* Vector indexed by basic block number giving the starting line-number
161 for each basic block. */
162 static rtx *line_note_head;
164 /* List of important notes we must keep around. This is a pointer to the
165 last element in the list. */
166 static rtx note_list;
168 /* Regsets telling whether a given register is live or dead before the last
169 scheduled insn. Must scan the instructions once before scheduling to
170 determine what registers are live or dead at the end of the block. */
171 static regset bb_dead_regs;
172 static regset bb_live_regs;
174 /* Regset telling whether a given register is live after the insn currently
175 being scheduled. Before processing an insn, this is equal to bb_live_regs
176 above. This is used so that we can find registers that are newly born/dead
177 after processing an insn. */
178 static regset old_live_regs;
180 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
181 during the initial scan and reused later. If there are not exactly as
182 many REG_DEAD notes in the post scheduled code as there were in the
183 prescheduled code then we trigger an abort because this indicates a bug. */
184 static rtx dead_notes;
188 /* An instruction is ready to be scheduled when all insns following it
189 have already been scheduled. It is important to ensure that all
190 insns which use its result will not be executed until its result
191 has been computed. We maintain three lists (conceptually):
193 (1) a "Ready" list of unscheduled, uncommitted insns
194 (2) a "Scheduled" list of scheduled insns
195 (3) a "Pending" list of insns which can be scheduled, but
198 Insns move from the "Ready" list to the "Pending" list when
199 all insns following them have been scheduled.
201 Insns move from the "Pending" list to the "Scheduled" list
202 when there is sufficient space in the pipeline to prevent
203 stalls between the insn and scheduled insns which use it.
205 The "Pending" list acts as a buffer to prevent insns
208 The "Ready" list is implemented by the variable `ready'.
209 The "Pending" list are the insns in the LOG_LINKS of ready insns.
210 The "Scheduled" list is the new insn chain built by this pass. */
212 /* Implement a circular buffer from which instructions are issued. */
214 static rtx insn_queue[Q_SIZE];
215 static int q_ptr = 0;
216 static int q_size = 0;
217 #define NEXT_Q(X) (((X)+1) & (Q_SIZE-1))
218 #define NEXT_Q_AFTER(X,C) (((X)+C) & (Q_SIZE-1))
220 /* Forward declarations. */
221 static void sched_analyze_2 ();
222 static void schedule_block ();
224 /* Main entry point of this file. */
225 void schedule_insns ();
227 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
229 /* Vector indexed by N giving the initial (unchanging) value known
230 for pseudo-register N. */
231 static rtx *reg_known_value;
233 /* Indicates number of valid entries in reg_known_value. */
234 static int reg_known_value_size;
240 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
241 && REGNO (x) <= reg_known_value_size)
242 return reg_known_value[REGNO (x)];
243 else if (GET_CODE (x) == PLUS)
245 rtx x0 = canon_rtx (XEXP (x, 0));
246 rtx x1 = canon_rtx (XEXP (x, 1));
248 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
250 /* We can tolerate LO_SUMs being offset here; these
251 rtl are used for nothing other than comparisons. */
252 if (GET_CODE (x0) == CONST_INT)
253 return plus_constant_for_output (x1, INTVAL (x0));
254 else if (GET_CODE (x1) == CONST_INT)
255 return plus_constant_for_output (x0, INTVAL (x1));
256 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
262 /* Set up all info needed to perform alias analysis on memory references. */
265 init_alias_analysis ()
267 int maxreg = max_reg_num ();
272 reg_known_value_size = maxreg;
275 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
276 - FIRST_PSEUDO_REGISTER;
277 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
278 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
280 /* Fill in the entries with known constant values. */
281 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
282 if ((set = single_set (insn)) != 0
283 && GET_CODE (SET_DEST (set)) == REG
284 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
285 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
286 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
287 || (note = find_reg_note (insn, REG_EQUIV, 0)) != 0)
288 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
289 reg_known_value[REGNO (SET_DEST (set))] = XEXP (note, 0);
291 /* Fill in the remaining entries. */
292 while (--maxreg >= FIRST_PSEUDO_REGISTER)
293 if (reg_known_value[maxreg] == 0)
294 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
297 /* Return 1 if X and Y are identical-looking rtx's.
299 We use the data in reg_known_value above to see if two registers with
300 different numbers are, in fact, equivalent. */
303 rtx_equal_for_memref_p (x, y)
308 register enum rtx_code code;
311 if (x == 0 && y == 0)
313 if (x == 0 || y == 0)
322 /* Rtx's of different codes cannot be equal. */
323 if (code != GET_CODE (y))
326 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
327 (REG:SI x) and (REG:HI x) are NOT equivalent. */
329 if (GET_MODE (x) != GET_MODE (y))
332 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
335 return REGNO (x) == REGNO (y);
336 if (code == LABEL_REF)
337 return XEXP (x, 0) == XEXP (y, 0);
338 if (code == SYMBOL_REF)
339 return XSTR (x, 0) == XSTR (y, 0);
341 /* Compare the elements. If any pair of corresponding elements
342 fail to match, return 0 for the whole things. */
344 fmt = GET_RTX_FORMAT (code);
345 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
351 if (XINT (x, i) != XINT (y, i))
357 /* Two vectors must have the same length. */
358 if (XVECLEN (x, i) != XVECLEN (y, i))
361 /* And the corresponding elements must match. */
362 for (j = 0; j < XVECLEN (x, i); j++)
363 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
368 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
374 if (strcmp (XSTR (x, i), XSTR (y, i)))
379 /* These are just backpointers, so they don't matter. */
385 /* It is believed that rtx's at this level will never
386 contain anything but integers and other rtx's,
387 except for within LABEL_REFs and SYMBOL_REFs. */
395 /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
396 X and return it, or return 0 if none found. */
399 find_symbolic_term (x)
403 register enum rtx_code code;
407 if (code == SYMBOL_REF || code == LABEL_REF)
409 if (GET_RTX_CLASS (code) == 'o')
412 fmt = GET_RTX_FORMAT (code);
413 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
419 t = find_symbolic_term (XEXP (x, i));
423 else if (fmt[i] == 'E')
429 /* Return nonzero if X and Y (memory addresses) could reference the
430 same location in memory. C is an offset accumulator. When
431 C is nonzero, we are testing aliases between X and Y + C.
432 XSIZE is the size in bytes of the X reference,
433 similarly YSIZE is the size in bytes for Y.
435 If XSIZE or YSIZE is zero, we do not know the amount of memory being
436 referenced (the reference was BLKmode), so make the most pessimistic
439 We recognize the following cases of non-conflicting memory:
441 (1) addresses involving the frame pointer cannot conflict
442 with addresses involving static variables.
443 (2) static variables with different addresses cannot conflict.
445 Nice to notice that varying addresses cannot conflict with fp if no
446 local variables had their addresses taken, but that's too hard now. */
449 memrefs_conflict_p (xsize, x, ysize, y, c)
454 if (GET_CODE (x) == HIGH)
456 else if (GET_CODE (x) == LO_SUM)
460 if (GET_CODE (y) == HIGH)
462 else if (GET_CODE (y) == LO_SUM)
467 if (rtx_equal_for_memref_p (x, y))
468 return (xsize == 0 || ysize == 0 ||
469 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
471 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
475 y = x; ysize = xsize;
476 x = t; xsize = tsize;
479 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
486 if (GET_CODE (y) == PLUS
487 && canon_rtx (XEXP (y, 0)) == x
488 && (y1 = canon_rtx (XEXP (y, 1)))
489 && GET_CODE (y1) == CONST_INT)
492 return (xsize == 0 || ysize == 0
493 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
496 if (GET_CODE (y) == PLUS
497 && (y1 = canon_rtx (XEXP (y, 0)))
504 if (GET_CODE (x) == PLUS)
506 /* The fact that X is canonicalized means that this
507 PLUS rtx is canonicalized. */
508 rtx x0 = XEXP (x, 0);
509 rtx x1 = XEXP (x, 1);
511 if (GET_CODE (y) == PLUS)
513 /* The fact that Y is canonicalized means that this
514 PLUS rtx is canonicalized. */
515 rtx y0 = XEXP (y, 0);
516 rtx y1 = XEXP (y, 1);
518 if (rtx_equal_for_memref_p (x1, y1))
519 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
520 if (rtx_equal_for_memref_p (x0, y0))
521 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
522 if (GET_CODE (x1) == CONST_INT)
523 if (GET_CODE (y1) == CONST_INT)
524 return memrefs_conflict_p (xsize, x0, ysize, y0,
525 c - INTVAL (x1) + INTVAL (y1));
527 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
528 else if (GET_CODE (y1) == CONST_INT)
529 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
531 /* Handle case where we cannot understand iteration operators,
532 but we notice that the base addresses are distinct objects. */
533 x = find_symbolic_term (x);
536 y = find_symbolic_term (y);
539 return rtx_equal_for_memref_p (x, y);
541 else if (GET_CODE (x1) == CONST_INT)
542 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
544 else if (GET_CODE (y) == PLUS)
546 /* The fact that Y is canonicalized means that this
547 PLUS rtx is canonicalized. */
548 rtx y0 = XEXP (y, 0);
549 rtx y1 = XEXP (y, 1);
551 if (GET_CODE (y1) == CONST_INT)
552 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
557 if (GET_CODE (x) == GET_CODE (y))
558 switch (GET_CODE (x))
562 /* Handle cases where we expect the second operands to be the
563 same, and check only whether the first operand would conflict
566 rtx x1 = canon_rtx (XEXP (x, 1));
567 rtx y1 = canon_rtx (XEXP (y, 1));
568 if (! rtx_equal_for_memref_p (x1, y1))
570 x0 = canon_rtx (XEXP (x, 0));
571 y0 = canon_rtx (XEXP (y, 0));
572 if (rtx_equal_for_memref_p (x0, y0))
573 return (xsize == 0 || ysize == 0
574 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
576 /* Can't properly adjust our sizes. */
577 if (GET_CODE (x1) != CONST_INT)
579 xsize /= INTVAL (x1);
580 ysize /= INTVAL (x1);
582 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
588 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
590 c += (INTVAL (y) - INTVAL (x));
591 return (xsize == 0 || ysize == 0
592 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
595 if (GET_CODE (x) == CONST)
597 if (GET_CODE (y) == CONST)
598 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
599 ysize, canon_rtx (XEXP (y, 0)), c);
601 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
604 if (GET_CODE (y) == CONST)
605 return memrefs_conflict_p (xsize, x, ysize,
606 canon_rtx (XEXP (y, 0)), c);
609 return (rtx_equal_for_memref_p (x, y)
610 && (xsize == 0 || ysize == 0
611 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
618 /* Functions to compute memory dependencies.
620 Since we process the insns in execution order, we can build tables
621 to keep track of what registers are fixed (and not aliased), what registers
622 are varying in known ways, and what registers are varying in unknown
625 If both memory references are volatile, then there must always be a
626 dependence between the two references, since their order can not be
627 changed. A volatile and non-volatile reference can be interchanged
630 A MEM_IN_STRUCT reference at a varying address can never conflict with a
631 non-MEM_IN_STRUCT reference at a fixed address. */
633 /* Read dependence: X is read after read in MEM takes place. There can
634 only be a dependence here if both reads are volatile. */
637 read_dependence (mem, x)
641 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
644 /* True dependence: X is read after store in MEM takes place. */
647 true_dependence (mem, x)
651 if (RTX_UNCHANGING_P (x))
654 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
655 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
656 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
657 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
658 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
659 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
660 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
663 /* Anti dependence: X is written after read in MEM takes place. */
666 anti_dependence (mem, x)
670 if (RTX_UNCHANGING_P (mem))
673 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
674 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
675 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
676 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
677 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
678 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
679 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
682 /* Output dependence: X is written after store in MEM takes place. */
685 output_dependence (mem, x)
689 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
690 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
691 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
692 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
693 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
694 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
695 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
698 /* Helper functions for instruction scheduling. */
700 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
701 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
702 of dependence that this link represents. */
705 add_dependence (insn, elem, dep_type)
708 enum reg_note dep_type;
712 /* Don't depend an insn on itself. */
716 /* If elem is part of a sequence that must be scheduled together, then
717 make the dependence point to the last insn of the sequence.
718 When HAVE_cc0, it is possible for NOTEs to exist between users and
719 setters of the condition codes, so we must skip past notes here.
720 Otherwise, NOTEs are impossible here. */
722 next = NEXT_INSN (elem);
725 while (next && GET_CODE (next) == NOTE)
726 next = NEXT_INSN (next);
729 if (next && SCHED_GROUP_P (next))
731 /* Notes will never intervene here though, so don't bother checking
733 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next)))
734 next = NEXT_INSN (next);
736 /* Again, don't depend an insn on itself. */
740 /* Make the dependence to NEXT, the last insn of the group, instead
741 of the original ELEM. */
745 /* Check that we don't already have this dependence. */
746 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
747 if (XEXP (link, 0) == elem)
749 /* If this is a more restrictive type of dependence than the existing
750 one, then change the existing dependence to this type. */
751 if ((int) dep_type < (int) REG_NOTE_KIND (link))
752 PUT_REG_NOTE_KIND (link, dep_type);
755 /* Might want to check one level of transitivity to save conses. */
757 link = rtx_alloc (INSN_LIST);
758 /* Insn dependency, not data dependency. */
759 PUT_REG_NOTE_KIND (link, dep_type);
760 XEXP (link, 0) = elem;
761 XEXP (link, 1) = LOG_LINKS (insn);
762 LOG_LINKS (insn) = link;
765 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
766 of INSN. Abort if not found. */
768 remove_dependence (insn, elem)
775 for (prev = 0, link = LOG_LINKS (insn); link;
776 prev = link, link = XEXP (link, 1))
778 if (XEXP (link, 0) == elem)
781 XEXP (prev, 1) = XEXP (link, 1);
783 LOG_LINKS (insn) = XEXP (link, 1);
793 #ifndef INSN_SCHEDULING
794 void schedule_insns () {}
800 /* Computation of memory dependencies. */
802 /* The *_insns and *_mems are paired lists. Each pending memory operation
803 will have a pointer to the MEM rtx on one list and a pointer to the
804 containing insn on the other list in the same place in the list. */
806 /* We can't use add_dependence like the old code did, because a single insn
807 may have multiple memory accesses, and hence needs to be on the list
808 once for each memory access. Add_dependence won't let you add an insn
809 to a list more than once. */
811 /* An INSN_LIST containing all insns with pending read operations. */
812 static rtx pending_read_insns;
814 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
815 static rtx pending_read_mems;
817 /* An INSN_LIST containing all insns with pending write operations. */
818 static rtx pending_write_insns;
820 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
821 static rtx pending_write_mems;
823 /* Indicates the combined length of the two pending lists. We must prevent
824 these lists from ever growing too large since the number of dependencies
825 produced is at least O(N*N), and execution time is at least O(4*N*N), as
826 a function of the length of these pending lists. */
828 static int pending_lists_length;
830 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
832 static rtx unused_insn_list;
834 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
836 static rtx unused_expr_list;
838 /* The last insn upon which all memory references must depend.
839 This is an insn which flushed the pending lists, creating a dependency
840 between it and all previously pending memory references. This creates
841 a barrier (or a checkpoint) which no memory reference is allowed to cross.
843 This includes all non constant CALL_INSNs. When we do interprocedural
844 alias analysis, this restriction can be relaxed.
845 This may also be an INSN that writes memory if the pending lists grow
848 static rtx last_pending_memory_flush;
850 /* The last function call we have seen. All hard regs, and, of course,
851 the last function call, must depend on this. */
853 static rtx last_function_call;
855 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
856 that does not already cross a call. We create dependencies between each
857 of those insn and the next call insn, to ensure that they won't cross a call
858 after scheduling is done. */
860 static rtx sched_before_next_call;
862 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
863 so that insns independent of the last scheduled insn will be preferred
864 over dependent instructions. */
866 static rtx last_scheduled_insn;
868 /* Process an insn's memory dependencies. There are four kinds of
871 (0) read dependence: read follows read
872 (1) true dependence: read follows write
873 (2) anti dependence: write follows read
874 (3) output dependence: write follows write
876 We are careful to build only dependencies which actually exist, and
877 use transitivity to avoid building too many links. */
879 /* Return the INSN_LIST containing INSN in LIST, or NULL
880 if LIST does not contain INSN. */
883 find_insn_list (insn, list)
889 if (XEXP (list, 0) == insn)
891 list = XEXP (list, 1);
896 /* Compute cost of executing INSN. This is the number of virtual
897 cycles taken between instruction issue and instruction results. */
905 if (INSN_COST (insn))
906 return INSN_COST (insn);
908 recog_memoized (insn);
910 /* A USE insn, or something else we don't need to understand.
911 We can't pass these directly to result_ready_cost because it will trigger
912 a fatal error for unrecognizable insns. */
913 if (INSN_CODE (insn) < 0)
915 INSN_COST (insn) = 1;
920 cost = result_ready_cost (insn);
925 INSN_COST (insn) = cost;
930 /* Compute the priority number for INSN. */
936 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
940 int this_priority = INSN_PRIORITY (insn);
943 if (this_priority > 0)
944 return this_priority;
948 /* Nonzero if these insns must be scheduled together. */
949 if (SCHED_GROUP_P (insn))
952 while (SCHED_GROUP_P (prev))
954 prev = PREV_INSN (prev);
955 INSN_REF_COUNT (prev) += 1;
959 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
961 rtx x = XEXP (prev, 0);
963 /* A dependence pointing to a note is always obsolete, because
964 sched_analyze_insn will have created any necessary new dependences
965 which replace it. Notes can be created when instructions are
966 deleted by insn splitting, or by register allocation. */
967 if (GET_CODE (x) == NOTE)
969 remove_dependence (insn, x);
973 /* This priority calculation was chosen because it results in the
974 least instruction movement, and does not hurt the performance
975 of the resulting code compared to the old algorithm.
976 This makes the sched algorithm more stable, which results
977 in better code, because there is less register pressure,
978 cross jumping is more likely to work, and debugging is easier.
980 When all instructions have a latency of 1, there is no need to
981 move any instructions. Subtracting one here ensures that in such
982 cases all instructions will end up with a priority of one, and
983 hence no scheduling will be done.
985 The original code did not subtract the one, and added the
986 insn_cost of the current instruction to its priority (e.g.
987 move the insn_cost call down to the end). */
989 if (REG_NOTE_KIND (prev) == 0)
990 /* Data dependence. */
991 prev_priority = priority (x) + insn_cost (x) - 1;
993 /* Anti or output dependence. Don't add the latency of this
994 insn's result, because it isn't being used. */
995 prev_priority = priority (x);
997 if (prev_priority > max_priority)
998 max_priority = prev_priority;
999 INSN_REF_COUNT (x) += 1;
1002 INSN_PRIORITY (insn) = max_priority;
1003 return INSN_PRIORITY (insn);
1008 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
1009 them to the unused_*_list variables, so that they can be reused. */
1012 free_pending_lists ()
1014 register rtx link, prev_link;
1016 if (pending_read_insns)
1018 prev_link = pending_read_insns;
1019 link = XEXP (prev_link, 1);
1024 link = XEXP (link, 1);
1027 XEXP (prev_link, 1) = unused_insn_list;
1028 unused_insn_list = pending_read_insns;
1029 pending_read_insns = 0;
1032 if (pending_write_insns)
1034 prev_link = pending_write_insns;
1035 link = XEXP (prev_link, 1);
1040 link = XEXP (link, 1);
1043 XEXP (prev_link, 1) = unused_insn_list;
1044 unused_insn_list = pending_write_insns;
1045 pending_write_insns = 0;
1048 if (pending_read_mems)
1050 prev_link = pending_read_mems;
1051 link = XEXP (prev_link, 1);
1056 link = XEXP (link, 1);
1059 XEXP (prev_link, 1) = unused_expr_list;
1060 unused_expr_list = pending_read_mems;
1061 pending_read_mems = 0;
1064 if (pending_write_mems)
1066 prev_link = pending_write_mems;
1067 link = XEXP (prev_link, 1);
1072 link = XEXP (link, 1);
1075 XEXP (prev_link, 1) = unused_expr_list;
1076 unused_expr_list = pending_write_mems;
1077 pending_write_mems = 0;
1081 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1082 The MEM is a memory reference contained within INSN, which we are saving
1083 so that we can do memory aliasing on it. */
1086 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1087 rtx *insn_list, *mem_list, insn, mem;
1091 if (unused_insn_list)
1093 link = unused_insn_list;
1094 unused_insn_list = XEXP (link, 1);
1097 link = rtx_alloc (INSN_LIST);
1098 XEXP (link, 0) = insn;
1099 XEXP (link, 1) = *insn_list;
1102 if (unused_expr_list)
1104 link = unused_expr_list;
1105 unused_expr_list = XEXP (link, 1);
1108 link = rtx_alloc (EXPR_LIST);
1109 XEXP (link, 0) = mem;
1110 XEXP (link, 1) = *mem_list;
1113 pending_lists_length++;
1116 /* Make a dependency between every memory reference on the pending lists
1117 and INSN, thus flushing the pending lists. */
1120 flush_pending_lists (insn)
1125 while (pending_read_insns)
1127 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1129 link = pending_read_insns;
1130 pending_read_insns = XEXP (pending_read_insns, 1);
1131 XEXP (link, 1) = unused_insn_list;
1132 unused_insn_list = link;
1134 link = pending_read_mems;
1135 pending_read_mems = XEXP (pending_read_mems, 1);
1136 XEXP (link, 1) = unused_expr_list;
1137 unused_expr_list = link;
1139 while (pending_write_insns)
1141 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1143 link = pending_write_insns;
1144 pending_write_insns = XEXP (pending_write_insns, 1);
1145 XEXP (link, 1) = unused_insn_list;
1146 unused_insn_list = link;
1148 link = pending_write_mems;
1149 pending_write_mems = XEXP (pending_write_mems, 1);
1150 XEXP (link, 1) = unused_expr_list;
1151 unused_expr_list = link;
1153 pending_lists_length = 0;
1155 if (last_pending_memory_flush)
1156 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1158 last_pending_memory_flush = insn;
1161 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1162 by the write to the destination of X, and reads of everything mentioned. */
1165 sched_analyze_1 (x, insn)
1170 register rtx dest = SET_DEST (x);
1175 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1176 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1178 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1180 /* The second and third arguments are values read by this insn. */
1181 sched_analyze_2 (XEXP (dest, 1), insn);
1182 sched_analyze_2 (XEXP (dest, 2), insn);
1184 dest = SUBREG_REG (dest);
1187 if (GET_CODE (dest) == REG)
1189 register int offset, bit, i;
1191 regno = REGNO (dest);
1193 /* A hard reg in a wide mode may really be multiple registers.
1194 If so, mark all of them just like the first. */
1195 if (regno < FIRST_PSEUDO_REGISTER)
1197 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1202 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1203 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1204 reg_last_uses[regno + i] = 0;
1205 if (reg_last_sets[regno + i])
1206 add_dependence (insn, reg_last_sets[regno + i],
1208 reg_last_sets[regno + i] = insn;
1209 if ((call_used_regs[i] || global_regs[i])
1210 && last_function_call)
1211 /* Function calls clobber all call_used regs. */
1212 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1219 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1220 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1221 reg_last_uses[regno] = 0;
1222 if (reg_last_sets[regno])
1223 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1224 reg_last_sets[regno] = insn;
1226 /* Don't let it cross a call after scheduling if it doesn't
1227 already cross one. */
1228 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1229 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1232 else if (GET_CODE (dest) == MEM)
1234 /* Writing memory. */
1236 if (pending_lists_length > 32)
1238 /* Flush all pending reads and writes to prevent the pending lists
1239 from getting any larger. Insn scheduling runs too slowly when
1240 these lists get long. The number 32 was chosen because it
1241 seems like a reasonable number. When compiling GCC with itself,
1242 this flush occurs 8 times for sparc, and 10 times for m88k using
1244 flush_pending_lists (insn);
1248 rtx pending, pending_mem;
1250 pending = pending_read_insns;
1251 pending_mem = pending_read_mems;
1254 /* If a dependency already exists, don't create a new one. */
1255 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1256 if (anti_dependence (XEXP (pending_mem, 0), dest, insn))
1257 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1259 pending = XEXP (pending, 1);
1260 pending_mem = XEXP (pending_mem, 1);
1263 pending = pending_write_insns;
1264 pending_mem = pending_write_mems;
1267 /* If a dependency already exists, don't create a new one. */
1268 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1269 if (output_dependence (XEXP (pending_mem, 0), dest))
1270 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1272 pending = XEXP (pending, 1);
1273 pending_mem = XEXP (pending_mem, 1);
1276 if (last_pending_memory_flush)
1277 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1279 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1282 sched_analyze_2 (XEXP (dest, 0), insn);
1285 /* Analyze reads. */
1286 if (GET_CODE (x) == SET)
1287 sched_analyze_2 (SET_SRC (x), insn);
1290 /* Analyze the uses of memory and registers in rtx X in INSN. */
1293 sched_analyze_2 (x, insn)
1299 register enum rtx_code code;
1305 code = GET_CODE (x);
1314 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1315 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1316 this does not mean that this insn is using cc0. */
1324 /* There may be a note before this insn now, but all notes will
1325 be removed before we actually try to schedule the insns, so
1326 it won't cause a problem later. We must avoid it here though. */
1328 /* User of CC0 depends on immediately preceding insn. */
1329 SCHED_GROUP_P (insn) = 1;
1331 /* Make a copy of all dependencies on the immediately previous insn,
1332 and add to this insn. This is so that all the dependencies will
1333 apply to the group. */
1335 prev = PREV_INSN (insn);
1336 while (GET_CODE (prev) == NOTE)
1337 prev = PREV_INSN (prev);
1339 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1340 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1348 int regno = REGNO (x);
1349 if (regno < FIRST_PSEUDO_REGISTER)
1353 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1356 reg_last_uses[regno + i]
1357 = gen_rtx (INSN_LIST, VOIDmode,
1358 insn, reg_last_uses[regno + i]);
1359 if (reg_last_sets[regno + i])
1360 add_dependence (insn, reg_last_sets[regno + i], 0);
1361 if ((call_used_regs[regno + i] || global_regs[regno + i])
1362 && last_function_call)
1363 /* Function calls clobber all call_used regs. */
1364 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1369 reg_last_uses[regno]
1370 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1371 if (reg_last_sets[regno])
1372 add_dependence (insn, reg_last_sets[regno], 0);
1374 /* If the register does not already cross any calls, then add this
1375 insn to the sched_before_next_call list so that it will still
1376 not cross calls after scheduling. */
1377 if (reg_n_calls_crossed[regno] == 0)
1378 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1385 /* Reading memory. */
1387 /* Don't create a dependence for memory references which are known to
1388 be unchanging, such as constant pool accesses. These will never
1389 conflict with any other memory access. */
1390 if (RTX_UNCHANGING_P (x) == 0)
1392 rtx pending, pending_mem;
1394 pending = pending_read_insns;
1395 pending_mem = pending_read_mems;
1398 /* If a dependency already exists, don't create a new one. */
1399 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1400 if (read_dependence (XEXP (pending_mem, 0), x))
1401 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1403 pending = XEXP (pending, 1);
1404 pending_mem = XEXP (pending_mem, 1);
1407 pending = pending_write_insns;
1408 pending_mem = pending_write_mems;
1411 /* If a dependency already exists, don't create a new one. */
1412 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1413 if (true_dependence (XEXP (pending_mem, 0), x))
1414 add_dependence (insn, XEXP (pending, 0), 0);
1416 pending = XEXP (pending, 1);
1417 pending_mem = XEXP (pending_mem, 1);
1419 if (last_pending_memory_flush)
1420 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1422 /* Always add these dependencies to pending_reads, since
1423 this insn may be followed by a write. */
1424 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1427 /* Take advantage of tail recursion here. */
1428 sched_analyze_2 (XEXP (x, 0), insn);
1434 case UNSPEC_VOLATILE:
1439 /* Traditional and volatile asm instructions must be considered to use
1440 and clobber all hard registers and all of memory. So must
1441 TRAP_IF and UNSPEC_VOLATILE operations. */
1442 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1444 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1446 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1447 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1448 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1449 reg_last_uses[i] = 0;
1450 if (reg_last_sets[i]
1451 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1452 add_dependence (insn, reg_last_sets[i], 0);
1453 reg_last_sets[i] = insn;
1456 flush_pending_lists (insn);
1459 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1460 We can not just fall through here since then we would be confused
1461 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1462 traditional asms unlike their normal usage. */
1464 if (code == ASM_OPERANDS)
1466 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1467 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1477 /* These read and modify the result; just consider them writes. */
1478 sched_analyze_1 (x, insn);
1482 /* Other cases: walk the insn. */
1483 fmt = GET_RTX_FORMAT (code);
1484 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1487 sched_analyze_2 (XEXP (x, i), insn);
1488 else if (fmt[i] == 'E')
1489 for (j = 0; j < XVECLEN (x, i); j++)
1490 sched_analyze_2 (XVECEXP (x, i, j), insn);
1494 /* Analyze an INSN with pattern X to find all dependencies. */
1497 sched_analyze_insn (x, insn)
1500 register RTX_CODE code = GET_CODE (x);
1503 if (code == SET || code == CLOBBER)
1504 sched_analyze_1 (x, insn);
1505 else if (code == PARALLEL)
1508 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1510 code = GET_CODE (XVECEXP (x, 0, i));
1511 if (code == SET || code == CLOBBER)
1512 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1514 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1518 sched_analyze_2 (x, insn);
1520 /* Handle function calls. */
1521 if (GET_CODE (insn) == CALL_INSN)
1526 /* When scheduling instructions, we make sure calls don't lose their
1527 accompanying USE insns by depending them one on another in order. */
1529 prev_dep_insn = insn;
1530 dep_insn = PREV_INSN (insn);
1531 while (GET_CODE (dep_insn) == INSN
1532 && GET_CODE (PATTERN (dep_insn)) == USE)
1534 SCHED_GROUP_P (prev_dep_insn) = 1;
1536 /* Make a copy of all dependencies on dep_insn, and add to insn.
1537 This is so that all of the dependencies will apply to the
1540 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1541 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1543 prev_dep_insn = dep_insn;
1544 dep_insn = PREV_INSN (dep_insn);
1549 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1550 for every dependency. */
1553 sched_analyze (head, tail)
1557 register int n_insns = 0;
1559 register int luid = 0;
1561 for (insn = head; ; insn = NEXT_INSN (insn))
1563 INSN_LUID (insn) = luid++;
1565 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1567 sched_analyze_insn (PATTERN (insn), insn);
1570 else if (GET_CODE (insn) == CALL_INSN)
1576 /* Any instruction using a hard register which may get clobbered
1577 by a call needs to be marked as dependent on this call.
1578 This prevents a use of a hard return reg from being moved
1579 past a void call (i.e. it does not explicitly set the hard
1582 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1583 if (call_used_regs[i] || global_regs[i])
1585 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1586 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1587 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1588 reg_last_uses[i] = 0;
1589 if (reg_last_sets[i]
1590 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1591 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1592 reg_last_sets[i] = insn;
1593 /* Insn, being a CALL_INSN, magically depends on
1594 `last_function_call' already. */
1597 /* For each insn which shouldn't cross a call, add a dependence
1598 between that insn and this call insn. */
1599 x = LOG_LINKS (sched_before_next_call);
1602 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1605 LOG_LINKS (sched_before_next_call) = 0;
1607 sched_analyze_insn (PATTERN (insn), insn);
1609 /* We don't need to flush memory for a function call which does
1610 not involve memory. */
1611 if (! CONST_CALL_P (insn))
1613 /* In the absence of interprocedural alias analysis,
1614 we must flush all pending reads and writes, and
1615 start new dependencies starting from here. */
1616 flush_pending_lists (insn);
1619 /* Depend this function call (actually, the user of this
1620 function call) on all hard register clobberage. */
1621 last_function_call = insn;
1630 /* Called when we see a set of a register. If death is true, then we are
1631 scanning backwards. Mark that register as unborn. If nobody says
1632 otherwise, that is how things will remain. If death is false, then we
1633 are scanning forwards. Mark that register as being born. */
1636 sched_note_set (b, x, death)
1641 register int regno, j;
1642 register rtx reg = SET_DEST (x);
1648 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1649 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1651 /* Must treat modification of just one hardware register of a multi-reg
1652 value or just a byte field of a register exactly the same way that
1653 mark_set_1 in flow.c does. */
1654 if (GET_CODE (reg) == ZERO_EXTRACT
1655 || GET_CODE (reg) == SIGN_EXTRACT
1656 || (GET_CODE (reg) == SUBREG
1657 && REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)))
1660 reg = SUBREG_REG (reg);
1663 if (GET_CODE (reg) != REG)
1666 /* Global registers are always live, so the code below does not apply
1669 regno = REGNO (reg);
1670 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1672 register int offset = regno / REGSET_ELT_BITS;
1673 register int bit = 1 << (regno % REGSET_ELT_BITS);
1677 /* If we only set part of the register, then this set does not
1682 /* Try killing this register. */
1683 if (regno < FIRST_PSEUDO_REGISTER)
1685 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1688 offset = (regno + j) / REGSET_ELT_BITS;
1689 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1691 bb_live_regs[offset] &= ~bit;
1692 bb_dead_regs[offset] |= bit;
1697 bb_live_regs[offset] &= ~bit;
1698 bb_dead_regs[offset] |= bit;
1703 /* Make the register live again. */
1704 if (regno < FIRST_PSEUDO_REGISTER)
1706 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1709 offset = (regno + j) / REGSET_ELT_BITS;
1710 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1712 bb_live_regs[offset] |= bit;
1713 bb_dead_regs[offset] &= ~bit;
1718 bb_live_regs[offset] |= bit;
1719 bb_dead_regs[offset] &= ~bit;
1725 /* Macros and functions for keeping the priority queue sorted, and
1726 dealing with queueing and unqueueing of instructions. */
1728 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1729 do { if ((NEW_READY) - (OLD_READY) == 1) \
1730 swap_sort (READY, NEW_READY); \
1731 else if ((NEW_READY) - (OLD_READY) > 1) \
1732 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1735 /* Returns a positive value if y is preferred; returns a negative value if
1736 x is preferred. Should never return 0, since that will make the sort
1740 rank_for_schedule (x, y)
1745 rtx tmp_dep, tmp2_dep;
1746 int tmp_class, tmp2_class;
1749 /* Choose the instruction with the highest priority, if different. */
1750 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
1753 if (last_scheduled_insn)
1755 /* Classify the instructions into three classes:
1756 1) Data dependent on last schedule insn.
1757 2) Anti/Output dependent on last scheduled insn.
1758 3) Independent of last scheduled insn, or has latency of one.
1759 Choose the insn from the highest numbered class if different. */
1760 tmp_dep = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1761 if (tmp_dep == 0 || insn_cost (tmp) == 1)
1763 else if (REG_NOTE_KIND (tmp_dep) == 0)
1768 tmp2_dep = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1769 if (tmp2_dep == 0 || insn_cost (tmp2) == 1)
1771 else if (REG_NOTE_KIND (tmp2_dep) == 0)
1776 if (value = tmp_class - tmp2_class)
1780 /* If insns are equally good, sort by INSN_LUID (original insn order),
1781 so that we make the sort stable. This minimizes instruction movement,
1782 thus minimizing sched's effect on debugging and cross-jumping. */
1783 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1786 /* Resort the array A in which only element at index N may be out of order. */
1788 __inline static void
1796 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1804 static int max_priority;
1806 /* Add INSN to the insn queue so that it fires at least N_CYCLES
1807 before the currently executing insn. */
1809 __inline static void
1810 queue_insn (insn, n_cycles)
1814 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1815 NEXT_INSN (insn) = insn_queue[next_q];
1816 insn_queue[next_q] = insn;
1820 /* Return nonzero if PAT is the pattern of an insn which makes a
1824 birthing_insn_p (pat)
1829 if (reload_completed == 1)
1832 if (GET_CODE (pat) == SET
1833 && GET_CODE (SET_DEST (pat)) == REG)
1835 rtx dest = SET_DEST (pat);
1836 int i = REGNO (dest);
1837 int offset = i / REGSET_ELT_BITS;
1838 int bit = 1 << (i % REGSET_ELT_BITS);
1840 /* It would be more accurate to use refers_to_regno_p or
1841 reg_mentioned_p to determine when the dest is not live before this
1844 if (bb_live_regs[offset] & bit)
1845 return (reg_n_sets[i] == 1);
1849 if (GET_CODE (pat) == PARALLEL)
1851 for (j = 0; j < XVECLEN (pat, 0); j++)
1852 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1858 /* If PREV is an insn which is immediately ready to execute, return 1,
1859 otherwise return 0. We may adjust its priority if that will help shorten
1860 register lifetimes. */
1866 rtx pat = PATTERN (prev);
1868 /* MAX of (a) number of cycles needed by prev
1869 (b) number of cycles before needed resources are free. */
1870 int n_cycles = insn_cost (prev);
1873 /* Trying to shorten register lives after reload has completed
1874 is useless and wrong. It gives inaccurate schedules. */
1875 if (reload_completed == 0)
1877 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1878 if (REG_NOTE_KIND (note) == REG_DEAD)
1881 /* Defer scheduling insns which kill registers, since that
1882 shortens register lives. Prefer scheduling insns which
1883 make registers live for the same reason. */
1887 INSN_PRIORITY (prev) >>= 3;
1890 INSN_PRIORITY (prev) >>= 2;
1894 INSN_PRIORITY (prev) >>= 1;
1897 if (birthing_insn_p (pat))
1899 int max = max_priority;
1901 if (max > INSN_PRIORITY (prev))
1902 INSN_PRIORITY (prev) = max;
1910 queue_insn (prev, n_cycles);
1914 /* INSN is the "currently executing insn". Launch each insn which was
1915 waiting on INSN (in the backwards dataflow sense). READY is a
1916 vector of insns which are ready to fire. N_READY is the number of
1917 elements in READY. */
1920 launch_links (insn, ready, n_ready)
1926 int new_ready = n_ready;
1928 if (LOG_LINKS (insn) == 0)
1931 /* This is used by the function launch_link above. */
1933 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
1935 max_priority = INSN_PRIORITY (insn);
1937 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
1939 rtx prev = XEXP (link, 0);
1941 if ((INSN_REF_COUNT (prev) -= 1) == 0 && launch_link (prev))
1942 ready[new_ready++] = prev;
1948 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
1952 create_reg_dead_note (reg, insn)
1955 rtx link = dead_notes;
1958 /* In theory, we should not end up with more REG_DEAD reg notes than we
1959 started with. In practice, this can occur as the result of bugs in
1960 flow, combine and/or sched. */
1965 link = rtx_alloc (EXPR_LIST);
1966 PUT_REG_NOTE_KIND (link, REG_DEAD);
1970 dead_notes = XEXP (dead_notes, 1);
1972 XEXP (link, 0) = reg;
1973 XEXP (link, 1) = REG_NOTES (insn);
1974 REG_NOTES (insn) = link;
1977 /* Subroutine on attach_deaths_insn--handles the recursive search
1978 through INSN. If SET_P is true, then x is being modified by the insn. */
1981 attach_deaths (x, insn, set_p)
1988 register enum rtx_code code;
1994 code = GET_CODE (x);
2006 /* Get rid of the easy cases first. */
2011 /* If the register dies in this insn, queue that note, and mark
2012 this register as needing to die. */
2013 /* This code is very similar to mark_used_1 (if set_p is false)
2014 and mark_set_1 (if set_p is true) in flow.c. */
2016 register int regno = REGNO (x);
2017 register int offset = regno / REGSET_ELT_BITS;
2018 register int bit = 1 << (regno % REGSET_ELT_BITS);
2019 int all_needed = (old_live_regs[offset] & bit);
2020 int some_needed = (old_live_regs[offset] & bit);
2025 if (regno < FIRST_PSEUDO_REGISTER)
2029 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2032 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
2033 & 1 << ((regno + n) % REGSET_ELT_BITS));
2034 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
2035 & 1 << ((regno + n) % REGSET_ELT_BITS));
2039 /* If it wasn't live before we started, then add a REG_DEAD note.
2040 We must check the previous lifetime info not the current info,
2041 because we may have to execute this code several times, e.g.
2042 once for a clobber (which doesn't add a note) and later
2043 for a use (which does add a note).
2045 Always make the register live. We must do this even if it was
2046 live before, because this may be an insn which sets and uses
2047 the same register, in which case the register has already been
2048 killed, so we must make it live again.
2050 Global registers are always live, and should never have a REG_DEAD
2051 note added for them, so none of the code below applies to them. */
2053 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2055 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2056 STACK_POINTER_REGNUM, since these are always considered to be
2057 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2058 if (regno != FRAME_POINTER_REGNUM
2059 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2060 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2062 && regno != STACK_POINTER_REGNUM)
2064 if (! all_needed && ! dead_or_set_p (insn, x))
2066 /* If none of the words in X is needed, make a REG_DEAD
2067 note. Otherwise, we must make partial REG_DEAD
2070 create_reg_dead_note (x, insn);
2075 /* Don't make a REG_DEAD note for a part of a
2076 register that is set in the insn. */
2077 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2079 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
2080 & 1 << ((regno +i) % REGSET_ELT_BITS)) == 0
2081 && ! dead_or_set_regno_p (insn, regno + i))
2082 create_reg_dead_note (gen_rtx (REG, word_mode,
2089 if (regno < FIRST_PSEUDO_REGISTER)
2091 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2094 offset = (regno + j) / REGSET_ELT_BITS;
2095 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2097 bb_dead_regs[offset] &= ~bit;
2098 bb_live_regs[offset] |= bit;
2103 bb_dead_regs[offset] &= ~bit;
2104 bb_live_regs[offset] |= bit;
2111 /* Handle tail-recursive case. */
2112 attach_deaths (XEXP (x, 0), insn, 0);
2116 case STRICT_LOW_PART:
2117 /* These two cases preserve the value of SET_P, so handle them
2119 attach_deaths (XEXP (x, 0), insn, set_p);
2124 /* This case preserves the value of SET_P for the first operand, but
2125 clears it for the other two. */
2126 attach_deaths (XEXP (x, 0), insn, set_p);
2127 attach_deaths (XEXP (x, 1), insn, 0);
2128 attach_deaths (XEXP (x, 2), insn, 0);
2132 /* Other cases: walk the insn. */
2133 fmt = GET_RTX_FORMAT (code);
2134 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2137 attach_deaths (XEXP (x, i), insn, 0);
2138 else if (fmt[i] == 'E')
2139 for (j = 0; j < XVECLEN (x, i); j++)
2140 attach_deaths (XVECEXP (x, i, j), insn, 0);
2145 /* After INSN has executed, add register death notes for each register
2146 that is dead after INSN. */
2149 attach_deaths_insn (insn)
2152 rtx x = PATTERN (insn);
2153 register RTX_CODE code = GET_CODE (x);
2157 attach_deaths (SET_SRC (x), insn, 0);
2159 /* A register might die here even if it is the destination, e.g.
2160 it is the target of a volatile read and is otherwise unused.
2161 Hence we must always call attach_deaths for the SET_DEST. */
2162 attach_deaths (SET_DEST (x), insn, 1);
2164 else if (code == PARALLEL)
2167 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2169 code = GET_CODE (XVECEXP (x, 0, i));
2172 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2174 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2176 else if (code == CLOBBER)
2177 attach_deaths (XEXP (XVECEXP (x, 0, i), 0), insn, 1);
2179 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2182 else if (code == CLOBBER)
2183 attach_deaths (XEXP (x, 0), insn, 1);
2185 attach_deaths (x, insn, 0);
2188 /* Delete notes beginning with INSN and maybe put them in the chain
2189 of notes ended by NOTE_LIST.
2190 Returns the insn following the notes. */
2193 unlink_notes (insn, tail)
2196 rtx prev = PREV_INSN (insn);
2198 while (insn != tail && GET_CODE (insn) == NOTE)
2200 rtx next = NEXT_INSN (insn);
2201 /* Delete the note from its current position. */
2203 NEXT_INSN (prev) = next;
2205 PREV_INSN (next) = prev;
2207 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2208 /* Record line-number notes so they can be reused. */
2209 LINE_NOTE (insn) = insn;
2212 /* Insert the note at the end of the notes list. */
2213 PREV_INSN (insn) = note_list;
2215 NEXT_INSN (note_list) = insn;
2224 /* Data structure for keeping track of register information
2225 during that register's life. */
2229 short offset; short bit;
2230 short live_length; short calls_crossed;
2233 /* Constructor for `sometimes' data structure. */
2236 new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2237 struct sometimes *regs_sometimes_live;
2241 register struct sometimes *p;
2242 register int regno = offset * REGSET_ELT_BITS + bit;
2245 /* There should never be a register greater than max_regno here. If there
2246 is, it means that a define_split has created a new pseudo reg. This
2247 is not allowed, since there will not be flow info available for any
2248 new register, so catch the error here. */
2249 if (regno >= max_regno)
2252 p = ®s_sometimes_live[sometimes_max];
2256 p->calls_crossed = 0;
2258 return sometimes_max;
2261 /* Count lengths of all regs we are currently tracking,
2262 and find new registers no longer live. */
2265 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2266 struct sometimes *regs_sometimes_live;
2271 for (i = 0; i < sometimes_max; i++)
2273 register struct sometimes *p = ®s_sometimes_live[i];
2276 regno = p->offset * REGSET_ELT_BITS + p->bit;
2278 sched_reg_live_length[regno] += p->live_length;
2279 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2283 /* Use modified list scheduling to rearrange insns in basic block
2284 B. FILE, if nonzero, is where we dump interesting output about
2288 schedule_block (b, file)
2295 int i, j, n_ready = 0, new_ready, n_insns = 0;
2296 int sched_n_insns = 0;
2297 #define NEED_NOTHING 0
2302 /* HEAD and TAIL delimit the region being scheduled. */
2303 rtx head = basic_block_head[b];
2304 rtx tail = basic_block_end[b];
2305 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2306 being scheduled. When the insns have been ordered,
2307 these insns delimit where the new insns are to be
2308 spliced back into the insn chain. */
2312 /* Keep life information accurate. */
2313 register struct sometimes *regs_sometimes_live;
2317 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2318 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2321 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2322 bzero (reg_last_uses, i * sizeof (rtx));
2323 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2324 bzero (reg_last_sets, i * sizeof (rtx));
2326 /* Remove certain insns at the beginning from scheduling,
2327 by advancing HEAD. */
2329 /* At the start of a function, before reload has run, don't delay getting
2330 parameters from hard registers into pseudo registers. */
2331 if (reload_completed == 0 && b == 0)
2334 && GET_CODE (head) == NOTE
2335 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2336 head = NEXT_INSN (head);
2338 && GET_CODE (head) == INSN
2339 && GET_CODE (PATTERN (head)) == SET)
2341 rtx src = SET_SRC (PATTERN (head));
2342 while (GET_CODE (src) == SUBREG
2343 || GET_CODE (src) == SIGN_EXTEND
2344 || GET_CODE (src) == ZERO_EXTEND
2345 || GET_CODE (src) == SIGN_EXTRACT
2346 || GET_CODE (src) == ZERO_EXTRACT)
2347 src = XEXP (src, 0);
2348 if (GET_CODE (src) != REG
2349 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2351 /* Keep this insn from ever being scheduled. */
2352 INSN_REF_COUNT (head) = 1;
2353 head = NEXT_INSN (head);
2357 /* Don't include any notes or labels at the beginning of the
2358 basic block, or notes at the ends of basic blocks. */
2359 while (head != tail)
2361 if (GET_CODE (head) == NOTE)
2362 head = NEXT_INSN (head);
2363 else if (GET_CODE (tail) == NOTE)
2364 tail = PREV_INSN (tail);
2365 else if (GET_CODE (head) == CODE_LABEL)
2366 head = NEXT_INSN (head);
2369 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2370 to schedule this block. */
2372 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2376 /* This short-cut doesn't work. It does not count call insns crossed by
2377 registers in reg_sometimes_live. It does not mark these registers as
2378 dead if they die in this block. It does not mark these registers live
2379 (or create new reg_sometimes_live entries if necessary) if they are born
2382 The easy solution is to just always schedule a block. This block only
2383 has one insn, so this won't slow down this pass by much. */
2389 /* Exclude certain insns at the end of the basic block by advancing TAIL. */
2390 /* This isn't correct. Instead of advancing TAIL, should assign very
2391 high priorities to these insns to guarantee that they get scheduled last.
2392 If these insns are ignored, as is currently done, the register life info
2393 may be incorrectly computed. */
2394 if (GET_CODE (tail) == INSN && GET_CODE (PATTERN (tail)) == USE)
2396 /* Don't try to reorder any USE insns at the end of any block.
2397 They must be last to ensure proper register allocation.
2398 Exclude them all from scheduling. */
2401 /* If we are down to one USE insn, then there are no insns to
2406 tail = prev_nonnote_insn (tail);
2408 while (GET_CODE (tail) == INSN
2409 && GET_CODE (PATTERN (tail)) == USE);
2412 /* This short-cut does not work. See comment above. */
2417 else if (GET_CODE (tail) == JUMP_INSN
2418 && SCHED_GROUP_P (tail) == 0
2419 && GET_CODE (PREV_INSN (tail)) == INSN
2420 && GET_CODE (PATTERN (PREV_INSN (tail))) == USE
2421 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (PREV_INSN (tail)), 0)))
2423 /* Don't let the setting of the function's return value register
2424 move from this jump. For the same reason we want to get the
2425 parameters into pseudo registers as quickly as possible, we
2426 want to set the function's return value register as late as
2429 /* If this is the only insn in the block, then there is no need to
2430 schedule the block. */
2434 tail = PREV_INSN (tail);
2438 tail = prev_nonnote_insn (tail);
2441 /* This shortcut does not work. See comment above. */
2448 /* This is probably wrong. Instead of doing this, should give this insn
2449 a very high priority to guarantee that it gets scheduled last. */
2450 /* Can not separate an insn that sets the condition code from one that
2451 uses it. So we must leave an insn that sets cc0 where it is. */
2452 if (sets_cc0_p (PATTERN (tail)))
2453 tail = PREV_INSN (tail);
2456 /* Now HEAD through TAIL are the insns actually to be rearranged;
2457 Let PREV_HEAD and NEXT_TAIL enclose them. */
2458 prev_head = PREV_INSN (head);
2459 next_tail = NEXT_INSN (tail);
2461 /* Initialize basic block data structures. */
2463 pending_read_insns = 0;
2464 pending_read_mems = 0;
2465 pending_write_insns = 0;
2466 pending_write_mems = 0;
2467 pending_lists_length = 0;
2468 last_pending_memory_flush = 0;
2469 last_function_call = 0;
2470 last_scheduled_insn = 0;
2472 LOG_LINKS (sched_before_next_call) = 0;
2474 n_insns += sched_analyze (head, tail);
2477 free_pending_lists ();
2481 /* Allocate vector to hold insns to be rearranged (except those
2482 insns which are controlled by an insn with SCHED_GROUP_P set).
2483 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2484 as those variables ultimately are set up. */
2485 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2487 /* TAIL is now the last of the insns to be rearranged.
2488 Put those insns into the READY vector. */
2491 /* If the last insn is a branch, force it to be the last insn after
2492 scheduling. Also, don't try to reorder calls at the ends the basic
2493 block -- this will only lead to worse register allocation. */
2494 if (GET_CODE (tail) == CALL_INSN || GET_CODE (tail) == JUMP_INSN)
2497 ready[n_ready++] = tail;
2498 INSN_PRIORITY (tail) = TAIL_PRIORITY;
2499 INSN_REF_COUNT (tail) = 0;
2500 insn = PREV_INSN (tail);
2503 /* Assign priorities to instructions. Also check whether they
2504 are in priority order already. If so then I will be nonnegative.
2505 We use this shortcut only before reloading. */
2507 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2510 for (; insn != prev_head; insn = PREV_INSN (insn))
2512 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2515 if (INSN_REF_COUNT (insn) == 0)
2516 ready[n_ready++] = insn;
2517 if (SCHED_GROUP_P (insn))
2519 while (SCHED_GROUP_P (insn))
2521 insn = PREV_INSN (insn);
2522 while (GET_CODE (insn) == NOTE)
2523 insn = PREV_INSN (insn);
2531 if (INSN_PRIORITY (insn) < i)
2532 i = INSN_PRIORITY (insn);
2533 else if (INSN_PRIORITY (insn) > i)
2540 /* This short-cut doesn't work. It does not count call insns crossed by
2541 registers in reg_sometimes_live. It does not mark these registers as
2542 dead if they die in this block. It does not mark these registers live
2543 (or create new reg_sometimes_live entries if necessary) if they are born
2546 The easy solution is to just always schedule a block. These blocks tend
2547 to be very short, so this doesn't slow down this pass by much. */
2549 /* If existing order is good, don't bother to reorder. */
2550 if (i != DONE_PRIORITY)
2553 fprintf (file, ";; already scheduled\n");
2555 if (reload_completed == 0)
2557 for (i = 0; i < sometimes_max; i++)
2558 regs_sometimes_live[i].live_length += n_insns;
2560 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2562 free_pending_lists ();
2567 /* Scan all the insns to be scheduled, removing NOTE insns
2568 and register death notes.
2569 Line number NOTE insns end up in NOTE_LIST.
2570 Register death notes end up in DEAD_NOTES.
2572 Recreate the register life information for the end of this basic
2575 if (reload_completed == 0)
2577 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
2578 bzero (bb_dead_regs, regset_bytes);
2582 /* This is the first block in the function. There may be insns
2583 before head that we can't schedule. We still need to examine
2584 them though for accurate register lifetime analysis. */
2586 /* We don't want to remove any REG_DEAD notes as the code below
2589 for (insn = basic_block_head[b]; insn != head;
2590 insn = NEXT_INSN (insn))
2591 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2593 /* See if the register gets born here. */
2594 /* We must check for registers being born before we check for
2595 registers dying. It is possible for a register to be born
2596 and die in the same insn, e.g. reading from a volatile
2597 memory location into an otherwise unused register. Such
2598 a register must be marked as dead after this insn. */
2599 if (GET_CODE (PATTERN (insn)) == SET
2600 || GET_CODE (PATTERN (insn)) == CLOBBER)
2601 sched_note_set (b, PATTERN (insn), 0);
2602 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2605 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2606 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2607 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2608 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2610 /* ??? This code is obsolete and should be deleted. It
2611 is harmless though, so we will leave it in for now. */
2612 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2613 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2614 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2617 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2619 if ((REG_NOTE_KIND (link) == REG_DEAD
2620 || REG_NOTE_KIND (link) == REG_UNUSED)
2621 /* Verify that the REG_NOTE has a legal value. */
2622 && GET_CODE (XEXP (link, 0)) == REG)
2624 register int regno = REGNO (XEXP (link, 0));
2625 register int offset = regno / REGSET_ELT_BITS;
2626 register int bit = 1 << (regno % REGSET_ELT_BITS);
2628 if (regno < FIRST_PSEUDO_REGISTER)
2630 int j = HARD_REGNO_NREGS (regno,
2631 GET_MODE (XEXP (link, 0)));
2634 offset = (regno + j) / REGSET_ELT_BITS;
2635 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2637 bb_live_regs[offset] &= ~bit;
2638 bb_dead_regs[offset] |= bit;
2643 bb_live_regs[offset] &= ~bit;
2644 bb_dead_regs[offset] |= bit;
2652 /* If debugging information is being produced, keep track of the line
2653 number notes for each insn. */
2654 if (write_symbols != NO_DEBUG)
2656 /* We must use the true line number for the first insn in the block
2657 that was computed and saved at the start of this pass. We can't
2658 use the current line number, because scheduling of the previous
2659 block may have changed the current line number. */
2660 rtx line = line_note_head[b];
2662 for (insn = basic_block_head[b];
2664 insn = NEXT_INSN (insn))
2665 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2668 LINE_NOTE (insn) = line;
2671 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2673 rtx prev, next, link;
2675 /* Farm out notes. This is needed to keep the debugger from
2676 getting completely deranged. */
2677 if (GET_CODE (insn) == NOTE)
2680 insn = unlink_notes (insn, next_tail);
2685 if (insn == next_tail)
2689 if (reload_completed == 0
2690 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2692 /* See if the register gets born here. */
2693 /* We must check for registers being born before we check for
2694 registers dying. It is possible for a register to be born and
2695 die in the same insn, e.g. reading from a volatile memory
2696 location into an otherwise unused register. Such a register
2697 must be marked as dead after this insn. */
2698 if (GET_CODE (PATTERN (insn)) == SET
2699 || GET_CODE (PATTERN (insn)) == CLOBBER)
2700 sched_note_set (b, PATTERN (insn), 0);
2701 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2704 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2705 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2706 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2707 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2709 /* ??? This code is obsolete and should be deleted. It
2710 is harmless though, so we will leave it in for now. */
2711 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2712 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2713 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2716 /* Need to know what registers this insn kills. */
2717 for (prev = 0, link = REG_NOTES (insn); link; link = next)
2721 next = XEXP (link, 1);
2722 if ((REG_NOTE_KIND (link) == REG_DEAD
2723 || REG_NOTE_KIND (link) == REG_UNUSED)
2724 /* Verify that the REG_NOTE has a legal value. */
2725 && GET_CODE (XEXP (link, 0)) == REG)
2727 register int regno = REGNO (XEXP (link, 0));
2728 register int offset = regno / REGSET_ELT_BITS;
2729 register int bit = 1 << (regno % REGSET_ELT_BITS);
2731 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
2733 if (REG_NOTE_KIND (link) == REG_DEAD)
2736 XEXP (prev, 1) = next;
2738 REG_NOTES (insn) = next;
2739 XEXP (link, 1) = dead_notes;
2745 if (regno < FIRST_PSEUDO_REGISTER)
2747 int j = HARD_REGNO_NREGS (regno,
2748 GET_MODE (XEXP (link, 0)));
2751 offset = (regno + j) / REGSET_ELT_BITS;
2752 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2754 bb_live_regs[offset] &= ~bit;
2755 bb_dead_regs[offset] |= bit;
2760 bb_live_regs[offset] &= ~bit;
2761 bb_dead_regs[offset] |= bit;
2770 if (reload_completed == 0)
2772 /* Keep track of register lives. */
2773 old_live_regs = (regset) alloca (regset_bytes);
2775 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
2778 /* Start with registers live at end. */
2779 for (j = 0; j < regset_size; j++)
2781 int live = bb_live_regs[j];
2782 old_live_regs[j] = live;
2786 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2787 if (live & (1 << bit))
2788 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
2789 bit, sometimes_max);
2794 SCHED_SORT (ready, n_ready, 1);
2798 fprintf (file, ";; ready list initially:\n;; ");
2799 for (i = 0; i < n_ready; i++)
2800 fprintf (file, "%d ", INSN_UID (ready[i]));
2801 fprintf (file, "\n\n");
2803 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2804 if (INSN_PRIORITY (insn) > 0)
2805 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
2806 INSN_UID (insn), INSN_PRIORITY (insn),
2807 INSN_REF_COUNT (insn));
2810 /* Now HEAD and TAIL are going to become disconnected
2811 entirely from the insn chain. */
2814 /* Q_SIZE will always be zero here. */
2816 bzero (insn_queue, sizeof (insn_queue));
2818 /* Now, perform list scheduling. */
2820 /* Where we start inserting insns is after TAIL. */
2823 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
2824 ? NEED_HEAD : NEED_NOTHING);
2825 if (PREV_INSN (next_tail) == basic_block_end[b])
2826 new_needs |= NEED_TAIL;
2828 new_ready = n_ready;
2829 while (sched_n_insns < n_insns)
2831 q_ptr = NEXT_Q (q_ptr);
2833 /* Add all pending insns that can be scheduled without stalls to the
2835 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
2838 fprintf (file, ";; launching %d before %d with no stalls\n",
2839 INSN_UID (insn), INSN_UID (last));
2840 ready[new_ready++] = insn;
2843 insn_queue[q_ptr] = 0;
2845 /* If there are no ready insns, stall until one is ready and add all
2846 of the pending insns at that point to the ready list. */
2849 register int stalls;
2851 for (stalls = 1; stalls < Q_SIZE; stalls++)
2852 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
2854 for (; insn; insn = NEXT_INSN (insn))
2857 fprintf (file, ";; issue insn %d before %d with %d stalls\n",
2858 INSN_UID (insn), INSN_UID (last), stalls);
2859 ready[new_ready++] = insn;
2862 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
2867 /* This looks logically correct, but on the SPEC benchmark set on
2868 the SPARC, I get better code without it. */
2869 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2873 /* There should be some instructions waiting to fire. */
2877 /* Sort the ready list and choose the best insn to schedule.
2878 N_READY holds the number of items that were scheduled the last time,
2879 minus the one instruction scheduled on the last loop iteration; it
2880 is not modified for any other reason in this loop. */
2881 SCHED_SORT (ready, new_ready, n_ready);
2882 n_ready = new_ready;
2883 last_scheduled_insn = insn = ready[0];
2885 if (DONE_PRIORITY_P (insn))
2888 if (reload_completed == 0)
2890 /* Process this insn, and each insn linked to this one which must
2891 be immediately output after this insn. */
2894 /* First we kill registers set by this insn, and then we
2895 make registers used by this insn live. This is the opposite
2896 order used above because we are traversing the instructions
2899 /* Strictly speaking, we should scan REG_UNUSED notes and make
2900 every register mentioned there live, however, we will just
2901 kill them again immediately below, so there doesn't seem to
2902 be any reason why we bother to do this. */
2904 /* See if this is the last notice we must take of a register. */
2905 if (GET_CODE (PATTERN (insn)) == SET
2906 || GET_CODE (PATTERN (insn)) == CLOBBER)
2907 sched_note_set (b, PATTERN (insn), 1);
2908 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2911 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2912 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2913 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2914 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
2917 /* This code keeps life analysis information up to date. */
2918 if (GET_CODE (insn) == CALL_INSN)
2920 register struct sometimes *p;
2922 /* A call kills all call used and global registers, except
2923 for those mentioned in the call pattern which will be
2924 made live again later. */
2925 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2926 if (call_used_regs[i] || global_regs[i])
2928 register int offset = i / REGSET_ELT_BITS;
2929 register int bit = 1 << (i % REGSET_ELT_BITS);
2931 bb_live_regs[offset] &= ~bit;
2932 bb_dead_regs[offset] |= bit;
2935 /* Regs live at the time of a call instruction must not
2936 go in a register clobbered by calls. Record this for
2937 all regs now live. Note that insns which are born or
2938 die in a call do not cross a call, so this must be done
2939 after the killings (above) and before the births
2941 p = regs_sometimes_live;
2942 for (i = 0; i < sometimes_max; i++, p++)
2943 if (bb_live_regs[p->offset] & (1 << p->bit))
2944 p->calls_crossed += 1;
2947 /* Make every register used live, and add REG_DEAD notes for
2948 registers which were not live before we started. */
2949 attach_deaths_insn (insn);
2951 /* Find registers now made live by that instruction. */
2952 for (i = 0; i < regset_size; i++)
2954 int diff = bb_live_regs[i] & ~old_live_regs[i];
2958 old_live_regs[i] |= diff;
2959 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2960 if (diff & (1 << bit))
2962 = new_sometimes_live (regs_sometimes_live, i, bit,
2967 /* Count lengths of all regs we are worrying about now,
2968 and handle registers no longer live. */
2970 for (i = 0; i < sometimes_max; i++)
2972 register struct sometimes *p = ®s_sometimes_live[i];
2973 int regno = p->offset*REGSET_ELT_BITS + p->bit;
2975 p->live_length += 1;
2977 if ((bb_live_regs[p->offset] & (1 << p->bit)) == 0)
2979 /* This is the end of one of this register's lifetime
2980 segments. Save the lifetime info collected so far,
2981 and clear its bit in the old_live_regs entry. */
2982 sched_reg_live_length[regno] += p->live_length;
2983 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2984 old_live_regs[p->offset] &= ~(1 << p->bit);
2986 /* Delete the reg_sometimes_live entry for this reg by
2987 copying the last entry over top of it. */
2988 *p = regs_sometimes_live[--sometimes_max];
2989 /* ...and decrement i so that this newly copied entry
2990 will be processed. */
2996 insn = PREV_INSN (insn);
2998 while (SCHED_GROUP_P (link));
3000 /* Set INSN back to the insn we are scheduling now. */
3004 /* Schedule INSN. Remove it from the ready list. */
3009 NEXT_INSN (insn) = last;
3010 PREV_INSN (last) = insn;
3013 /* Everything that precedes INSN now either becomes "ready", if
3014 it can execute immediately before INSN, or "pending", if
3015 there must be a delay. Give INSN high enough priority that
3016 at least one (maybe more) reg-killing insns can be launched
3017 ahead of all others. Mark INSN as scheduled by changing its
3019 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3020 new_ready = launch_links (insn, ready, n_ready);
3021 INSN_PRIORITY (insn) = DONE_PRIORITY;
3023 /* Schedule all prior insns that must not be moved. */
3024 if (SCHED_GROUP_P (insn))
3026 /* Disable these insns from being launched. */
3028 while (SCHED_GROUP_P (link))
3030 /* Disable these insns from being launched by anybody. */
3031 link = PREV_INSN (link);
3032 INSN_REF_COUNT (link) = 0;
3035 /* None of these insns can move forward into delay slots. */
3036 while (SCHED_GROUP_P (insn))
3038 insn = PREV_INSN (insn);
3039 new_ready = launch_links (insn, ready, new_ready);
3040 INSN_PRIORITY (insn) = DONE_PRIORITY;
3043 NEXT_INSN (insn) = last;
3044 PREV_INSN (last) = insn;
3052 if (reload_completed == 0)
3053 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3055 /* HEAD is now the first insn in the chain of insns that
3056 been scheduled by the loop above.
3057 TAIL is the last of those insns. */
3060 /* NOTE_LIST is the end of a chain of notes previously found
3061 among the insns. Insert them at the beginning of the insns. */
3064 rtx note_head = note_list;
3065 while (PREV_INSN (note_head))
3066 note_head = PREV_INSN (note_head);
3068 PREV_INSN (head) = note_list;
3069 NEXT_INSN (note_list) = head;
3073 /* In theory, there should be no REG_DEAD notes leftover at the end.
3074 In practice, this can occur as the result of bugs in flow, combine.c,
3075 and/or sched.c. The values of the REG_DEAD notes remaining are
3076 meaningless, because dead_notes is just used as a free list. */
3078 if (dead_notes != 0)
3082 if (new_needs & NEED_HEAD)
3083 basic_block_head[b] = head;
3084 PREV_INSN (head) = prev_head;
3085 NEXT_INSN (prev_head) = head;
3087 if (new_needs & NEED_TAIL)
3088 basic_block_end[b] = tail;
3089 NEXT_INSN (tail) = next_tail;
3090 PREV_INSN (next_tail) = tail;
3092 /* Restore the line-number notes of each insn. */
3093 if (write_symbols != NO_DEBUG)
3095 rtx line, note, prev, new;
3098 head = basic_block_head[b];
3099 next_tail = NEXT_INSN (basic_block_end[b]);
3101 /* Determine the current line-number. We want to know the current
3102 line number of the first insn of the block here, in case it is
3103 different from the true line number that was saved earlier. If
3104 different, then we need a line number note before the first insn
3105 of this block. If it happens to be the same, then we don't want to
3106 emit another line number note here. */
3107 for (line = head; line; line = PREV_INSN (line))
3108 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3111 /* Walk the insns keeping track of the current line-number and inserting
3112 the line-number notes as needed. */
3113 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3114 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3116 else if (! (GET_CODE (insn) == NOTE
3117 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3118 && (note = LINE_NOTE (insn)) != 0
3121 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3122 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3125 prev = PREV_INSN (insn);
3126 if (LINE_NOTE (note))
3128 /* Re-use the original line-number note. */
3129 LINE_NOTE (note) = 0;
3130 PREV_INSN (note) = prev;
3131 NEXT_INSN (prev) = note;
3132 PREV_INSN (insn) = note;
3133 NEXT_INSN (note) = insn;
3138 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3139 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3143 fprintf (file, ";; added %d line-number notes\n", notes);
3148 fprintf (file, ";; new basic block head = %d\n;; new basic block end = %d\n\n",
3149 INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3152 /* Yow! We're done! */
3153 free_pending_lists ();
3158 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
3159 REGNO, returning the rtx of the reference found if any. Otherwise,
3163 regno_use_in (regno, x)
3171 if (GET_CODE (x) == REG && REGNO (x) == regno)
3174 fmt = GET_RTX_FORMAT (GET_CODE (x));
3175 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3179 if (tem = regno_use_in (regno, XEXP (x, i)))
3182 else if (fmt[i] == 'E')
3183 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3184 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3191 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3192 needed for the hard register mentioned in the note. This can happen
3193 if the reference to the hard register in the original insn was split into
3194 several smaller hard register references in the split insns. */
3197 split_hard_reg_notes (note, first, last, orig_insn)
3198 rtx note, first, last, orig_insn;
3200 rtx reg, temp, link;
3201 int n_regs, i, new_reg;
3204 /* Assume that this is a REG_DEAD note. */
3205 if (REG_NOTE_KIND (note) != REG_DEAD)
3208 reg = XEXP (note, 0);
3210 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3212 /* ??? Could add check here to see whether, the hard register is referenced
3213 in the same mode as in the original insn. If so, then it has not been
3214 split, and the rest of the code below is unnecessary. */
3216 for (i = 1; i < n_regs; i++)
3218 new_reg = REGNO (reg) + i;
3220 /* Check for references to new_reg in the split insns. */
3221 for (insn = last; ; insn = PREV_INSN (insn))
3223 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3224 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3226 /* Create a new reg dead note here. */
3227 link = rtx_alloc (EXPR_LIST);
3228 PUT_REG_NOTE_KIND (link, REG_DEAD);
3229 XEXP (link, 0) = temp;
3230 XEXP (link, 1) = REG_NOTES (insn);
3231 REG_NOTES (insn) = link;
3234 /* It isn't mentioned anywhere, so no new reg note is needed for
3242 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3243 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3246 new_insn_dead_notes (pat, insn, last, orig_insn)
3247 rtx pat, insn, last, orig_insn;
3251 /* PAT is either a CLOBBER or a SET here. */
3252 dest = XEXP (pat, 0);
3254 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3255 || GET_CODE (dest) == STRICT_LOW_PART
3256 || GET_CODE (dest) == SIGN_EXTRACT)
3257 dest = XEXP (dest, 0);
3259 if (GET_CODE (dest) == REG)
3261 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3263 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3264 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3265 && (set = single_set (tem)))
3267 rtx tem_dest = SET_DEST (set);
3269 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3270 || GET_CODE (tem_dest) == SUBREG
3271 || GET_CODE (tem_dest) == STRICT_LOW_PART
3272 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3273 tem_dest = XEXP (tem_dest, 0);
3275 if (tem_dest != dest)
3277 /* Use the same scheme as combine.c, don't put both REG_DEAD
3278 and REG_UNUSED notes on the same insn. */
3279 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3280 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3282 rtx note = rtx_alloc (EXPR_LIST);
3283 PUT_REG_NOTE_KIND (note, REG_DEAD);
3284 XEXP (note, 0) = dest;
3285 XEXP (note, 1) = REG_NOTES (tem);
3286 REG_NOTES (tem) = note;
3288 /* The reg only dies in one insn, the last one that uses
3292 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3293 /* We found an instruction that both uses the register,
3294 and sets it, so no new REG_NOTE is needed for this set. */
3298 /* If this is a set, it must die somewhere, unless it is the dest of
3299 the original insn, and hence is live after the original insn. Abort
3300 if it isn't supposed to be live after the original insn.
3302 If this is a clobber, then just add a REG_UNUSED note. */
3305 int live_after_orig_insn = 0;
3306 rtx pattern = PATTERN (orig_insn);
3309 if (GET_CODE (pat) == CLOBBER)
3311 rtx note = rtx_alloc (EXPR_LIST);
3312 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3313 XEXP (note, 0) = dest;
3314 XEXP (note, 1) = REG_NOTES (insn);
3315 REG_NOTES (insn) = note;
3319 /* The original insn could have multiple sets, so search the
3320 insn for all sets. */
3321 if (GET_CODE (pattern) == SET)
3323 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3324 live_after_orig_insn = 1;
3326 else if (GET_CODE (pattern) == PARALLEL)
3328 for (i = 0; i < XVECLEN (pattern, 0); i++)
3329 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3330 && reg_overlap_mentioned_p (dest,
3331 SET_DEST (XVECEXP (pattern,
3333 live_after_orig_insn = 1;
3336 if (! live_after_orig_insn)
3342 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3343 registers modified by X. INC is -1 if the containing insn is being deleted,
3344 and is 1 if the containing insn is a newly generated insn. */
3347 update_n_sets (x, inc)
3351 rtx dest = SET_DEST (x);
3353 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3354 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3355 dest = SUBREG_REG (dest);
3357 if (GET_CODE (dest) == REG)
3359 int regno = REGNO (dest);
3361 if (regno < FIRST_PSEUDO_REGISTER)
3364 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3366 for (i = regno; i < endregno; i++)
3367 reg_n_sets[i] += inc;
3370 reg_n_sets[regno] += inc;
3374 /* Updates all flow-analysis related quantities (including REG_NOTES) for
3375 the insns from FIRST to LAST inclusive that were created by splitting
3376 ORIG_INSN. NOTES are the original REG_NOTES. */
3379 update_flow_info (notes, first, last, orig_insn)
3386 rtx orig_dest, temp;
3389 /* Get and save the destination set by the original insn. */
3391 orig_dest = single_set (orig_insn);
3393 orig_dest = SET_DEST (orig_dest);
3395 /* Move REG_NOTES from the original insn to where they now belong. */
3397 for (note = notes; note; note = next)
3399 next = XEXP (note, 1);
3400 switch (REG_NOTE_KIND (note))
3404 /* Move these notes from the original insn to the last new insn where
3405 the register is now set. */
3407 for (insn = last; ; insn = PREV_INSN (insn))
3409 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3410 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3412 XEXP (note, 1) = REG_NOTES (insn);
3413 REG_NOTES (insn) = note;
3415 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3417 /* ??? This won't handle multiple word registers correctly,
3418 but should be good enough for now. */
3419 if (REG_NOTE_KIND (note) == REG_UNUSED
3420 && ! dead_or_set_p (insn, XEXP (note, 0)))
3421 PUT_REG_NOTE_KIND (note, REG_DEAD);
3423 /* The reg only dies in one insn, the last one that uses
3427 /* It must die somewhere, fail it we couldn't find where it died.
3429 If this is a REG_UNUSED note, then it must be a temporary
3430 register that was not needed by this instantiation of the
3431 pattern, so we can safely ignore it. */
3434 if (REG_NOTE_KIND (note) != REG_UNUSED)
3441 /* If this note refers to a multiple word hard register, it may
3442 have been split into several smaller hard register references.
3443 Check to see if there are any new register references that
3444 need REG_NOTES added for them. */
3445 temp = XEXP (note, 0);
3446 if (REG_NOTE_KIND (note) == REG_DEAD
3447 && GET_CODE (temp) == REG
3448 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3449 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)))
3450 split_hard_reg_notes (note, first, last, orig_insn);
3454 /* This note applies to the dest of the original insn. Find the
3455 first new insn that now has the same dest, and move the note
3461 for (insn = first; ; insn = NEXT_INSN (insn))
3463 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3464 && (temp = single_set (insn))
3465 && rtx_equal_p (SET_DEST (temp), orig_dest))
3467 XEXP (note, 1) = REG_NOTES (insn);
3468 REG_NOTES (insn) = note;
3469 /* The reg is only zero before one insn, the first that
3473 /* It must be set somewhere, fail if we couldn't find where it
3482 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3483 set is meaningless. Just drop the note. */
3487 case REG_NO_CONFLICT:
3488 /* These notes apply to the dest of the original insn. Find the last
3489 new insn that now has the same dest, and move the note there. */
3494 for (insn = last; ; insn = PREV_INSN (insn))
3496 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3497 && (temp = single_set (insn))
3498 && rtx_equal_p (SET_DEST (temp), orig_dest))
3500 XEXP (note, 1) = REG_NOTES (insn);
3501 REG_NOTES (insn) = note;
3502 /* Only put this note on one of the new insns. */
3506 /* The original dest must still be set someplace. Abort if we
3507 couldn't find it. */
3514 /* Move a REG_LIBCALL note to the first insn created, and update
3515 the corresponding REG_RETVAL note. */
3516 XEXP (note, 1) = REG_NOTES (first);
3517 REG_NOTES (first) = note;
3519 insn = XEXP (note, 0);
3520 note = find_reg_note (insn, REG_RETVAL, 0);
3522 XEXP (note, 0) = first;
3526 /* Move a REG_RETVAL note to the last insn created, and update
3527 the corresponding REG_LIBCALL note. */
3528 XEXP (note, 1) = REG_NOTES (last);
3529 REG_NOTES (last) = note;
3531 insn = XEXP (note, 0);
3532 note = find_reg_note (insn, REG_LIBCALL, 0);
3534 XEXP (note, 0) = last;
3538 /* This should be moved to whichever instruction is a JUMP_INSN. */
3540 for (insn = last; ; insn = PREV_INSN (insn))
3542 if (GET_CODE (insn) == JUMP_INSN)
3544 XEXP (note, 1) = REG_NOTES (insn);
3545 REG_NOTES (insn) = note;
3546 /* Only put this note on one of the new insns. */
3549 /* Fail if we couldn't find a JUMP_INSN. */
3556 /* This should be moved to whichever instruction now has the
3557 increment operation. */
3561 /* Should be moved to the new insn(s) which use the label. */
3562 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
3563 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3564 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3565 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
3566 XEXP (note, 0), REG_NOTES (insn));
3571 /* These two notes will never appear until after reorg, so we don't
3572 have to handle them here. */
3578 /* Each new insn created, except the last, has a new set. If the destination
3579 is a register, then this reg is now live across several insns, whereas
3580 previously the dest reg was born and died within the same insn. To
3581 reflect this, we now need a REG_DEAD note on the insn where this
3584 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
3586 for (insn = first; insn != last; insn = NEXT_INSN (insn))
3591 pat = PATTERN (insn);
3592 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
3593 new_insn_dead_notes (pat, insn, last, orig_insn);
3594 else if (GET_CODE (pat) == PARALLEL)
3596 for (i = 0; i < XVECLEN (pat, 0); i++)
3597 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3598 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
3599 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
3603 /* If any insn, except the last, uses the register set by the last insn,
3604 then we need a new REG_DEAD note on that insn. In this case, there
3605 would not have been a REG_DEAD note for this register in the original
3606 insn because it was used and set within one insn.
3608 There is no new REG_DEAD note needed if the last insn uses the register
3609 that it is setting. */
3611 set = single_set (last);
3614 rtx dest = SET_DEST (set);
3616 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3617 || GET_CODE (dest) == STRICT_LOW_PART
3618 || GET_CODE (dest) == SIGN_EXTRACT)
3619 dest = XEXP (dest, 0);
3621 if (GET_CODE (dest) == REG
3622 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
3624 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
3626 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3627 && reg_mentioned_p (dest, PATTERN (insn))
3628 && (set = single_set (insn)))
3630 rtx insn_dest = SET_DEST (set);
3632 while (GET_CODE (insn_dest) == ZERO_EXTRACT
3633 || GET_CODE (insn_dest) == SUBREG
3634 || GET_CODE (insn_dest) == STRICT_LOW_PART
3635 || GET_CODE (insn_dest) == SIGN_EXTRACT)
3636 insn_dest = XEXP (insn_dest, 0);
3638 if (insn_dest != dest)
3640 note = rtx_alloc (EXPR_LIST);
3641 PUT_REG_NOTE_KIND (note, REG_DEAD);
3642 XEXP (note, 0) = dest;
3643 XEXP (note, 1) = REG_NOTES (insn);
3644 REG_NOTES (insn) = note;
3645 /* The reg only dies in one insn, the last one
3656 /* If the original dest is modifying a multiple register target, and the
3657 original instruction was split such that the original dest is now set
3658 by two or more SUBREG sets, then the split insns no longer kill the
3659 destination of the original insn.
3661 In this case, if there exists an instruction in the same basic block,
3662 before the split insn, which uses the original dest, and this use is
3663 killed by the original insn, then we must remove the REG_DEAD note on
3664 this insn, because it is now superfluous.
3666 This does not apply when a hard register gets split, because the code
3667 knows how to handle overlapping hard registers properly. */
3668 if (orig_dest && GET_CODE (orig_dest) == REG)
3670 int found_orig_dest = 0;
3671 int found_split_dest = 0;
3673 for (insn = first; ; insn = NEXT_INSN (insn))
3675 set = single_set (insn);
3678 if (GET_CODE (SET_DEST (set)) == REG
3679 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
3681 found_orig_dest = 1;
3684 else if (GET_CODE (SET_DEST (set)) == SUBREG
3685 && SUBREG_REG (SET_DEST (set)) == orig_dest)
3687 found_split_dest = 1;
3696 if (found_split_dest)
3698 /* Search backwards from FIRST, looking for the first insn that uses
3699 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
3700 If we find an insn, and it has a REG_DEAD note, then delete the
3703 for (insn = first; insn; insn = PREV_INSN (insn))
3705 if (GET_CODE (insn) == CODE_LABEL
3706 || GET_CODE (insn) == JUMP_INSN)
3708 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3709 && reg_mentioned_p (orig_dest, insn))
3711 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
3713 remove_note (insn, note);
3717 else if (! found_orig_dest)
3719 /* This should never happen. */
3724 /* Update reg_n_sets. This is necessary to prevent local alloc from
3725 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
3726 a reg from set once to set multiple times. */
3729 rtx x = PATTERN (orig_insn);
3730 RTX_CODE code = GET_CODE (x);
3732 if (code == SET || code == CLOBBER)
3733 update_n_sets (x, -1);
3734 else if (code == PARALLEL)
3737 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3739 code = GET_CODE (XVECEXP (x, 0, i));
3740 if (code == SET || code == CLOBBER)
3741 update_n_sets (XVECEXP (x, 0, i), -1);
3745 for (insn = first; ; insn = NEXT_INSN (insn))
3748 code = GET_CODE (x);
3750 if (code == SET || code == CLOBBER)
3751 update_n_sets (x, 1);
3752 else if (code == PARALLEL)
3755 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3757 code = GET_CODE (XVECEXP (x, 0, i));
3758 if (code == SET || code == CLOBBER)
3759 update_n_sets (XVECEXP (x, 0, i), 1);
3769 /* The one entry point in this file. DUMP_FILE is the dump file for
3773 schedule_insns (dump_file)
3776 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
3780 /* Taking care of this degenerate case makes the rest of
3781 this code simpler. */
3782 if (n_basic_blocks == 0)
3785 /* Create an insn here so that we can hang dependencies off of it later. */
3786 sched_before_next_call = gen_rtx (INSN, VOIDmode, 0, 0, 0, 0, 0, 0, 0);
3788 /* Initialize the unused_*_lists. We can't use the ones left over from
3789 the previous function, because gcc has freed that memory. We can use
3790 the ones left over from the first sched pass in the second pass however,
3791 so only clear them on the first sched pass. The first pass is before
3792 reload if flag_schedule_insns is set, otherwise it is afterwards. */
3794 if (reload_completed == 0 || ! flag_schedule_insns)
3796 unused_insn_list = 0;
3797 unused_expr_list = 0;
3800 /* We create no insns here, only reorder them, so we
3801 remember how far we can cut back the stack on exit. */
3803 /* Allocate data for this pass. See comments, above,
3804 for what these vectors do. */
3805 insn_luid = (int *) alloca (max_uid * sizeof (int));
3806 insn_priority = (int *) alloca (max_uid * sizeof (int));
3807 insn_costs = (short *) alloca (max_uid * sizeof (short));
3808 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
3810 if (reload_completed == 0)
3812 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
3813 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
3814 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
3815 bb_dead_regs = (regset) alloca (regset_bytes);
3816 bb_live_regs = (regset) alloca (regset_bytes);
3817 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
3818 bzero (sched_reg_live_length, max_regno * sizeof (int));
3819 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
3820 init_alias_analysis ();
3824 sched_reg_n_deaths = 0;
3825 sched_reg_n_calls_crossed = 0;
3826 sched_reg_live_length = 0;
3829 if (! flag_schedule_insns)
3830 init_alias_analysis ();
3833 if (write_symbols != NO_DEBUG)
3837 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
3838 bzero (line_note, max_uid * sizeof (rtx));
3839 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
3840 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
3842 /* Determine the line-number at the start of each basic block.
3843 This must be computed and saved now, because after a basic block's
3844 predecessor has been scheduled, it is impossible to accurately
3845 determine the correct line number for the first insn of the block. */
3847 for (b = 0; b < n_basic_blocks; b++)
3848 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
3849 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3851 line_note_head[b] = line;
3856 bzero (insn_luid, max_uid * sizeof (int));
3857 bzero (insn_priority, max_uid * sizeof (int));
3858 bzero (insn_costs, max_uid * sizeof (short));
3859 bzero (insn_ref_count, max_uid * sizeof (int));
3861 /* Schedule each basic block, block by block. */
3863 if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
3864 || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
3865 && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
3866 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
3868 for (b = 0; b < n_basic_blocks; b++)
3875 for (insn = basic_block_head[b]; ; insn = next)
3880 /* Can't use `next_real_insn' because that
3881 might go across CODE_LABELS and short-out basic blocks. */
3882 next = NEXT_INSN (insn);
3883 if (GET_CODE (insn) != INSN)
3885 if (insn == basic_block_end[b])
3891 /* Don't split no-op move insns. These should silently disappear
3892 later in final. Splitting such insns would break the code
3893 that handles REG_NO_CONFLICT blocks. */
3894 set = single_set (insn);
3895 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
3897 if (insn == basic_block_end[b])
3900 /* Nops get in the way while scheduling, so delete them now if
3901 register allocation has already been done. It is too risky
3902 to try to do this before register allocation, and there are
3903 unlikely to be very many nops then anyways. */
3904 if (reload_completed)
3906 PUT_CODE (insn, NOTE);
3907 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3908 NOTE_SOURCE_FILE (insn) = 0;
3914 /* Split insns here to get max fine-grain parallelism. */
3915 prev = PREV_INSN (insn);
3916 if (reload_completed == 0)
3918 rtx last, first = PREV_INSN (insn);
3919 rtx notes = REG_NOTES (insn);
3921 last = try_split (PATTERN (insn), insn, 1);
3924 /* try_split returns the NOTE that INSN became. */
3925 first = NEXT_INSN (first);
3926 update_flow_info (notes, first, last, insn);
3928 PUT_CODE (insn, NOTE);
3929 NOTE_SOURCE_FILE (insn) = 0;
3930 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3931 if (insn == basic_block_head[b])
3932 basic_block_head[b] = first;
3933 if (insn == basic_block_end[b])
3935 basic_block_end[b] = last;
3941 if (insn == basic_block_end[b])
3945 schedule_block (b, dump_file);
3952 /* Reposition the prologue and epilogue notes in case we moved the
3953 prologue/epilogue insns. */
3954 if (reload_completed)
3955 reposition_prologue_and_epilogue_notes (get_insns ());
3957 if (write_symbols != NO_DEBUG)
3960 rtx insn = get_insns ();
3961 int active_insn = 0;
3964 /* Walk the insns deleting redundant line-number notes. Many of these
3965 are already present. The remainder tend to occur at basic
3966 block boundaries. */
3967 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
3968 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3970 /* If there are no active insns following, INSN is redundant. */
3971 if (active_insn == 0)
3974 NOTE_SOURCE_FILE (insn) = 0;
3975 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3977 /* If the line number is unchanged, LINE is redundant. */
3979 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
3980 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
3983 NOTE_SOURCE_FILE (line) = 0;
3984 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
3991 else if (! ((GET_CODE (insn) == NOTE
3992 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3993 || (GET_CODE (insn) == INSN
3994 && (GET_CODE (PATTERN (insn)) == USE
3995 || GET_CODE (PATTERN (insn)) == CLOBBER))))
3998 if (dump_file && notes)
3999 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4002 if (reload_completed == 0)
4005 for (regno = 0; regno < max_regno; regno++)
4006 if (sched_reg_live_length[regno])
4010 if (reg_live_length[regno] > sched_reg_live_length[regno])
4012 ";; register %d life shortened from %d to %d\n",
4013 regno, reg_live_length[regno],
4014 sched_reg_live_length[regno]);
4015 /* Negative values are special; don't overwrite the current
4016 reg_live_length value if it is negative. */
4017 else if (reg_live_length[regno] < sched_reg_live_length[regno]
4018 && reg_live_length[regno] >= 0)
4020 ";; register %d life extended from %d to %d\n",
4021 regno, reg_live_length[regno],
4022 sched_reg_live_length[regno]);
4024 if (reg_n_calls_crossed[regno]
4025 && ! sched_reg_n_calls_crossed[regno])
4027 ";; register %d no longer crosses calls\n", regno);
4028 else if (! reg_n_calls_crossed[regno]
4029 && sched_reg_n_calls_crossed[regno])
4031 ";; register %d now crosses calls\n", regno);
4033 /* Negative values are special; don't overwrite the current
4034 reg_live_length value if it is negative. */
4035 if (reg_live_length[regno] >= 0)
4036 reg_live_length[regno] = sched_reg_live_length[regno];
4037 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
4041 #endif /* INSN_SCHEDULING */