1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
42 extern char *reg_known_equiv_p;
43 extern rtx *reg_known_value;
45 static regset_head reg_pending_sets_head;
46 static regset_head reg_pending_clobbers_head;
48 static regset reg_pending_sets;
49 static regset reg_pending_clobbers;
50 static int reg_pending_sets_all;
52 /* To speed up the test for duplicate dependency links we keep a
53 record of dependencies created by add_dependence when the average
54 number of instructions in a basic block is very large.
56 Studies have shown that there is typically around 5 instructions between
57 branches for typical C code. So we can make a guess that the average
58 basic block is approximately 5 instructions long; we will choose 100X
59 the average size as a very large basic block.
61 Each insn has associated bitmaps for its dependencies. Each bitmap
62 has enough entries to represent a dependency on any other insn in
63 the insn chain. All bitmap for true dependencies cache is
64 allocated then the rest two ones are also allocated. */
65 static sbitmap *true_dependency_cache;
66 static sbitmap *anti_dependency_cache;
67 static sbitmap *output_dependency_cache;
69 /* To speed up checking consistency of formed forward insn
70 dependencies we use the following cache. Another possible solution
71 could be switching off checking duplication of insns in forward
73 #ifdef ENABLE_CHECKING
74 static sbitmap *forward_dependency_cache;
77 static int deps_may_trap_p PARAMS ((rtx));
78 static void remove_dependence PARAMS ((rtx, rtx));
79 static void set_sched_group_p PARAMS ((rtx));
81 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
82 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
83 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
84 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
85 static rtx group_leader PARAMS ((rtx));
87 static rtx get_condition PARAMS ((rtx));
88 static int conditions_mutex_p PARAMS ((rtx, rtx));
90 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
96 rtx addr = XEXP (mem, 0);
99 && ORIGINAL_REGNO (addr) >= FIRST_PSEUDO_REGISTER
100 && reg_known_value[ORIGINAL_REGNO (addr)])
101 addr = reg_known_value[ORIGINAL_REGNO (addr)];
102 return rtx_addr_can_trap_p (addr);
105 /* Return the INSN_LIST containing INSN in LIST, or NULL
106 if LIST does not contain INSN. */
109 find_insn_list (insn, list)
115 if (XEXP (list, 0) == insn)
117 list = XEXP (list, 1);
122 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
126 find_insn_mem_list (insn, x, list, list1)
132 if (XEXP (list, 0) == insn
133 && XEXP (list1, 0) == x)
135 list = XEXP (list, 1);
136 list1 = XEXP (list1, 1);
141 /* Find the condition under which INSN is executed. */
147 rtx pat = PATTERN (insn);
152 if (GET_CODE (pat) == COND_EXEC)
153 return COND_EXEC_TEST (pat);
154 if (GET_CODE (insn) != JUMP_INSN)
156 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
158 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
160 pat = SET_DEST (pat);
161 cond = XEXP (pat, 0);
162 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
163 && XEXP (cond, 2) == pc_rtx)
165 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
166 && XEXP (cond, 1) == pc_rtx)
167 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
168 XEXP (cond, 0), XEXP (cond, 1));
173 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
176 conditions_mutex_p (cond1, cond2)
179 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
180 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
181 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
182 && XEXP (cond1, 0) == XEXP (cond2, 0)
183 && XEXP (cond1, 1) == XEXP (cond2, 1))
188 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
189 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
190 of dependence that this link represents. */
193 add_dependence (insn, elem, dep_type)
196 enum reg_note dep_type;
200 enum reg_note present_dep_type;
203 /* Don't depend an insn on itself. */
207 /* We can get a dependency on deleted insns due to optimizations in
208 the register allocation and reloading or due to splitting. Any
209 such dependency is useless and can be ignored. */
210 if (GET_CODE (elem) == NOTE)
213 /* flow.c doesn't handle conditional lifetimes entirely correctly;
214 calls mess up the conditional lifetimes. */
215 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
217 cond1 = get_condition (insn);
218 cond2 = get_condition (elem);
219 if (cond1 && cond2 && conditions_mutex_p (cond1, cond2))
223 /* If elem is part of a sequence that must be scheduled together, then
224 make the dependence point to the last insn of the sequence.
225 When HAVE_cc0, it is possible for NOTEs to exist between users and
226 setters of the condition codes, so we must skip past notes here.
227 Otherwise, NOTEs are impossible here. */
228 next = next_nonnote_insn (elem);
229 if (next && SCHED_GROUP_P (next)
230 && GET_CODE (next) != CODE_LABEL)
232 /* Notes will never intervene here though, so don't bother checking
235 /* We must reject CODE_LABELs, so that we don't get confused by one
236 that has LABEL_PRESERVE_P set, which is represented by the same
237 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
241 while ((nnext = next_nonnote_insn (next)) != NULL
242 && SCHED_GROUP_P (nnext)
243 && GET_CODE (nnext) != CODE_LABEL)
246 /* Again, don't depend an insn on itself. */
250 /* Make the dependence to NEXT, the last insn of the group, instead
251 of the original ELEM. */
256 #ifdef INSN_SCHEDULING
257 /* ??? No good way to tell from here whether we're doing interblock
258 scheduling. Possibly add another callback. */
260 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
261 No need for interblock dependences with calls, since
262 calls are not moved between blocks. Note: the edge where
263 elem is a CALL is still required. */
264 if (GET_CODE (insn) == CALL_INSN
265 && (INSN_BB (elem) != INSN_BB (insn)))
269 /* If we already have a dependency for ELEM, then we do not need to
270 do anything. Avoiding the list walk below can cut compile times
271 dramatically for some code. */
272 if (true_dependency_cache != NULL)
274 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
276 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
277 present_dep_type = 0;
278 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
280 present_dep_type = REG_DEP_ANTI;
281 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
283 present_dep_type = REG_DEP_OUTPUT;
286 if (present_p && (int) dep_type >= (int) present_dep_type)
291 /* Check that we don't already have this dependence. */
293 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
294 if (XEXP (link, 0) == elem)
296 #ifdef INSN_SCHEDULING
297 /* Clear corresponding cache entry because type of the link
299 if (true_dependency_cache != NULL)
301 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
302 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
304 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
305 && output_dependency_cache)
306 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
313 /* If this is a more restrictive type of dependence than the existing
314 one, then change the existing dependence to this type. */
315 if ((int) dep_type < (int) REG_NOTE_KIND (link))
316 PUT_REG_NOTE_KIND (link, dep_type);
318 #ifdef INSN_SCHEDULING
319 /* If we are adding a dependency to INSN's LOG_LINKs, then
320 note that in the bitmap caches of dependency information. */
321 if (true_dependency_cache != NULL)
323 if ((int)REG_NOTE_KIND (link) == 0)
324 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
326 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
327 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
329 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
330 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
336 /* Might want to check one level of transitivity to save conses. */
338 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
339 LOG_LINKS (insn) = link;
341 /* Insn dependency, not data dependency. */
342 PUT_REG_NOTE_KIND (link, dep_type);
344 #ifdef INSN_SCHEDULING
345 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
346 in the bitmap caches of dependency information. */
347 if (true_dependency_cache != NULL)
349 if ((int)dep_type == 0)
350 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
351 else if (dep_type == REG_DEP_ANTI)
352 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
353 else if (dep_type == REG_DEP_OUTPUT)
354 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
359 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
360 of INSN. Abort if not found. */
363 remove_dependence (insn, elem)
367 rtx prev, link, next;
370 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
372 next = XEXP (link, 1);
373 if (XEXP (link, 0) == elem)
376 XEXP (prev, 1) = next;
378 LOG_LINKS (insn) = next;
380 #ifdef INSN_SCHEDULING
381 /* If we are removing a dependency from the LOG_LINKS list,
382 make sure to remove it from the cache too. */
383 if (true_dependency_cache != NULL)
385 if (REG_NOTE_KIND (link) == 0)
386 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
388 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
389 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
391 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
392 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
397 free_INSN_LIST_node (link);
410 /* Return an insn which represents a SCHED_GROUP, which is
411 the last insn in the group. */
422 insn = next_nonnote_insn (insn);
424 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
429 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
430 goes along with that. */
433 set_sched_group_p (insn)
438 SCHED_GROUP_P (insn) = 1;
440 /* There may be a note before this insn now, but all notes will
441 be removed before we actually try to schedule the insns, so
442 it won't cause a problem later. We must avoid it here though. */
443 prev = prev_nonnote_insn (insn);
445 /* Make a copy of all dependencies on the immediately previous insn,
446 and add to this insn. This is so that all the dependencies will
447 apply to the group. Remove an explicit dependence on this insn
448 as SCHED_GROUP_P now represents it. */
450 if (find_insn_list (prev, LOG_LINKS (insn)))
451 remove_dependence (insn, prev);
453 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
454 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
457 /* Process an insn's memory dependencies. There are four kinds of
460 (0) read dependence: read follows read
461 (1) true dependence: read follows write
462 (2) anti dependence: write follows read
463 (3) output dependence: write follows write
465 We are careful to build only dependencies which actually exist, and
466 use transitivity to avoid building too many links. */
468 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
469 The MEM is a memory reference contained within INSN, which we are saving
470 so that we can do memory aliasing on it. */
473 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
475 rtx *insn_list, *mem_list, insn, mem;
479 link = alloc_INSN_LIST (insn, *insn_list);
482 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
485 deps->pending_lists_length++;
488 /* Make a dependency between every memory reference on the pending lists
489 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
493 flush_pending_lists (deps, insn, only_write)
501 while (deps->pending_read_insns && ! only_write)
503 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
506 link = deps->pending_read_insns;
507 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
508 free_INSN_LIST_node (link);
510 link = deps->pending_read_mems;
511 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
512 free_EXPR_LIST_node (link);
514 while (deps->pending_write_insns)
516 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
519 link = deps->pending_write_insns;
520 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
521 free_INSN_LIST_node (link);
523 link = deps->pending_write_mems;
524 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
525 free_EXPR_LIST_node (link);
527 deps->pending_lists_length = 0;
529 /* last_pending_memory_flush is now a list of insns. */
530 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
531 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
533 free_INSN_LIST_list (&deps->last_pending_memory_flush);
534 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
537 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
538 rtx, X, creating all dependencies generated by the write to the
539 destination of X, and reads of everything mentioned. */
542 sched_analyze_1 (deps, x, insn)
548 register rtx dest = XEXP (x, 0);
549 enum rtx_code code = GET_CODE (x);
554 if (GET_CODE (dest) == PARALLEL
555 && GET_MODE (dest) == BLKmode)
558 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
559 sched_analyze_1 (deps, XVECEXP (dest, 0, i), insn);
560 if (GET_CODE (x) == SET)
561 sched_analyze_2 (deps, SET_SRC (x), insn);
565 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
566 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
568 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
570 /* The second and third arguments are values read by this insn. */
571 sched_analyze_2 (deps, XEXP (dest, 1), insn);
572 sched_analyze_2 (deps, XEXP (dest, 2), insn);
574 dest = XEXP (dest, 0);
577 if (GET_CODE (dest) == REG)
581 regno = REGNO (dest);
583 /* A hard reg in a wide mode may really be multiple registers.
584 If so, mark all of them just like the first. */
585 if (regno < FIRST_PSEUDO_REGISTER)
587 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
593 for (u = deps->reg_last_uses[r]; u; u = XEXP (u, 1))
594 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
596 for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
597 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
599 /* Clobbers need not be ordered with respect to one
600 another, but sets must be ordered with respect to a
604 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
605 free_INSN_LIST_list (&deps->reg_last_uses[r]);
606 for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
607 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
608 SET_REGNO_REG_SET (reg_pending_sets, r);
611 SET_REGNO_REG_SET (reg_pending_clobbers, r);
613 /* Function calls clobber all call_used regs. */
614 if (global_regs[r] || (code == SET && call_used_regs[r]))
615 for (u = deps->last_function_call; u; u = XEXP (u, 1))
616 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
623 for (u = deps->reg_last_uses[regno]; u; u = XEXP (u, 1))
624 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
626 for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
627 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
631 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
632 free_INSN_LIST_list (&deps->reg_last_uses[regno]);
633 for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
634 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
635 SET_REGNO_REG_SET (reg_pending_sets, regno);
638 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
640 /* Pseudos that are REG_EQUIV to something may be replaced
641 by that during reloading. We need only add dependencies for
642 the address in the REG_EQUIV note. */
643 if (!reload_completed
644 && reg_known_equiv_p[regno]
645 && GET_CODE (reg_known_value[regno]) == MEM)
646 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
648 /* Don't let it cross a call after scheduling if it doesn't
649 already cross one. */
651 if (REG_N_CALLS_CROSSED (regno) == 0)
652 for (u = deps->last_function_call; u; u = XEXP (u, 1))
653 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
656 else if (GET_CODE (dest) == MEM)
658 /* Writing memory. */
660 if (deps->pending_lists_length > 32)
662 /* Flush all pending reads and writes to prevent the pending lists
663 from getting any larger. Insn scheduling runs too slowly when
664 these lists get long. The number 32 was chosen because it
665 seems like a reasonable number. When compiling GCC with itself,
666 this flush occurs 8 times for sparc, and 10 times for m88k using
668 flush_pending_lists (deps, insn, 0);
673 rtx pending, pending_mem;
675 pending = deps->pending_read_insns;
676 pending_mem = deps->pending_read_mems;
679 if (anti_dependence (XEXP (pending_mem, 0), dest))
680 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
682 pending = XEXP (pending, 1);
683 pending_mem = XEXP (pending_mem, 1);
686 pending = deps->pending_write_insns;
687 pending_mem = deps->pending_write_mems;
690 if (output_dependence (XEXP (pending_mem, 0), dest))
691 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
693 pending = XEXP (pending, 1);
694 pending_mem = XEXP (pending_mem, 1);
697 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
698 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
700 add_insn_mem_dependence (deps, &deps->pending_write_insns,
701 &deps->pending_write_mems, insn, dest);
703 sched_analyze_2 (deps, XEXP (dest, 0), insn);
707 if (GET_CODE (x) == SET)
708 sched_analyze_2 (deps, SET_SRC (x), insn);
711 /* Analyze the uses of memory and registers in rtx X in INSN. */
714 sched_analyze_2 (deps, x, insn)
721 register enum rtx_code code;
722 register const char *fmt;
736 /* Ignore constants. Note that we must handle CONST_DOUBLE here
737 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
738 this does not mean that this insn is using cc0. */
743 /* User of CC0 depends on immediately preceding insn. */
744 set_sched_group_p (insn);
751 int regno = REGNO (x);
752 if (regno < FIRST_PSEUDO_REGISTER)
756 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
760 deps->reg_last_uses[r]
761 = alloc_INSN_LIST (insn, deps->reg_last_uses[r]);
763 for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
764 add_dependence (insn, XEXP (u, 0), 0);
766 /* ??? This should never happen. */
767 for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
768 add_dependence (insn, XEXP (u, 0), 0);
770 if (call_used_regs[r] || global_regs[r])
771 /* Function calls clobber all call_used regs. */
772 for (u = deps->last_function_call; u; u = XEXP (u, 1))
773 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
778 deps->reg_last_uses[regno]
779 = alloc_INSN_LIST (insn, deps->reg_last_uses[regno]);
781 for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
782 add_dependence (insn, XEXP (u, 0), 0);
784 /* ??? This should never happen. */
785 for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
786 add_dependence (insn, XEXP (u, 0), 0);
788 /* Pseudos that are REG_EQUIV to something may be replaced
789 by that during reloading. We need only add dependencies for
790 the address in the REG_EQUIV note. */
791 if (!reload_completed
792 && reg_known_equiv_p[regno]
793 && GET_CODE (reg_known_value[regno]) == MEM)
794 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
796 /* If the register does not already cross any calls, then add this
797 insn to the sched_before_next_call list so that it will still
798 not cross calls after scheduling. */
799 if (REG_N_CALLS_CROSSED (regno) == 0)
800 add_dependence (deps->sched_before_next_call, insn,
808 /* Reading memory. */
810 rtx pending, pending_mem;
812 pending = deps->pending_read_insns;
813 pending_mem = deps->pending_read_mems;
816 if (read_dependence (XEXP (pending_mem, 0), x))
817 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
819 pending = XEXP (pending, 1);
820 pending_mem = XEXP (pending_mem, 1);
823 pending = deps->pending_write_insns;
824 pending_mem = deps->pending_write_mems;
827 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
829 add_dependence (insn, XEXP (pending, 0), 0);
831 pending = XEXP (pending, 1);
832 pending_mem = XEXP (pending_mem, 1);
835 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
836 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
837 || deps_may_trap_p (x))
838 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
840 /* Always add these dependencies to pending_reads, since
841 this insn may be followed by a write. */
842 add_insn_mem_dependence (deps, &deps->pending_read_insns,
843 &deps->pending_read_mems, insn, x);
845 /* Take advantage of tail recursion here. */
846 sched_analyze_2 (deps, XEXP (x, 0), insn);
850 /* Force pending stores to memory in case a trap handler needs them. */
852 flush_pending_lists (deps, insn, 1);
857 case UNSPEC_VOLATILE:
861 /* Traditional and volatile asm instructions must be considered to use
862 and clobber all hard registers, all pseudo-registers and all of
863 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
865 Consider for instance a volatile asm that changes the fpu rounding
866 mode. An insn should not be moved across this even if it only uses
867 pseudo-regs because it might give an incorrectly rounded result. */
868 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
870 int max_reg = max_reg_num ();
871 for (i = 0; i < max_reg; i++)
873 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
874 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
875 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
876 free_INSN_LIST_list (&deps->reg_last_uses[i]);
878 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
879 add_dependence (insn, XEXP (u, 0), 0);
881 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
882 add_dependence (insn, XEXP (u, 0), 0);
884 reg_pending_sets_all = 1;
886 flush_pending_lists (deps, insn, 0);
889 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
890 We can not just fall through here since then we would be confused
891 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
892 traditional asms unlike their normal usage. */
894 if (code == ASM_OPERANDS)
896 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
897 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
907 /* These both read and modify the result. We must handle them as writes
908 to get proper dependencies for following instructions. We must handle
909 them as reads to get proper dependencies from this to previous
910 instructions. Thus we need to pass them to both sched_analyze_1
911 and sched_analyze_2. We must call sched_analyze_2 first in order
912 to get the proper antecedent for the read. */
913 sched_analyze_2 (deps, XEXP (x, 0), insn);
914 sched_analyze_1 (deps, x, insn);
919 /* op0 = op0 + op1 */
920 sched_analyze_2 (deps, XEXP (x, 0), insn);
921 sched_analyze_2 (deps, XEXP (x, 1), insn);
922 sched_analyze_1 (deps, x, insn);
929 /* Other cases: walk the insn. */
930 fmt = GET_RTX_FORMAT (code);
931 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
934 sched_analyze_2 (deps, XEXP (x, i), insn);
935 else if (fmt[i] == 'E')
936 for (j = 0; j < XVECLEN (x, i); j++)
937 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
941 /* Analyze an INSN with pattern X to find all dependencies. */
944 sched_analyze_insn (deps, x, insn, loop_notes)
949 register RTX_CODE code = GET_CODE (x);
951 int maxreg = max_reg_num ();
954 if (code == COND_EXEC)
956 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
958 /* ??? Should be recording conditions so we reduce the number of
959 false dependancies. */
960 x = COND_EXEC_CODE (x);
963 if (code == SET || code == CLOBBER)
964 sched_analyze_1 (deps, x, insn);
965 else if (code == PARALLEL)
968 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
970 rtx sub = XVECEXP (x, 0, i);
971 code = GET_CODE (sub);
973 if (code == COND_EXEC)
975 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
976 sub = COND_EXEC_CODE (sub);
977 code = GET_CODE (sub);
979 if (code == SET || code == CLOBBER)
980 sched_analyze_1 (deps, sub, insn);
982 sched_analyze_2 (deps, sub, insn);
986 sched_analyze_2 (deps, x, insn);
988 /* Mark registers CLOBBERED or used by called function. */
989 if (GET_CODE (insn) == CALL_INSN)
990 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
992 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
993 sched_analyze_1 (deps, XEXP (link, 0), insn);
995 sched_analyze_2 (deps, XEXP (link, 0), insn);
998 if (GET_CODE (insn) == JUMP_INSN)
1000 rtx next, u, pending, pending_mem;
1001 next = next_nonnote_insn (insn);
1002 if (next && GET_CODE (next) == BARRIER)
1004 for (i = 0; i < maxreg; i++)
1006 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1007 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1008 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
1009 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1010 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1011 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1017 INIT_REG_SET (&tmp);
1019 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1020 EXECUTE_IF_SET_IN_REG_SET
1023 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1024 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1025 deps->reg_last_uses[i]
1026 = alloc_INSN_LIST (insn, deps->reg_last_uses[i]);
1029 CLEAR_REG_SET (&tmp);
1031 pending = deps->pending_write_insns;
1032 pending_mem = deps->pending_write_mems;
1035 add_dependence (insn, XEXP (pending, 0), 0);
1037 pending = XEXP (pending, 1);
1038 pending_mem = XEXP (pending_mem, 1);
1041 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1042 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1045 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1046 block, then we must be sure that no instructions are scheduled across it.
1047 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1048 become incorrect. */
1052 int max_reg = max_reg_num ();
1053 int schedule_barrier_found = 0;
1056 /* Update loop_notes with any notes from this insn. Also determine
1057 if any of the notes on the list correspond to instruction scheduling
1058 barriers (loop, eh & setjmp notes, but not range notes. */
1060 while (XEXP (link, 1))
1062 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1063 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1064 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1065 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
1066 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
1067 schedule_barrier_found = 1;
1069 link = XEXP (link, 1);
1071 XEXP (link, 1) = REG_NOTES (insn);
1072 REG_NOTES (insn) = loop_notes;
1074 /* Add dependencies if a scheduling barrier was found. */
1075 if (schedule_barrier_found)
1077 for (i = 0; i < max_reg; i++)
1080 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1081 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1082 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1083 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1085 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1086 add_dependence (insn, XEXP (u, 0), 0);
1088 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
1089 add_dependence (insn, XEXP (u, 0), 0);
1091 reg_pending_sets_all = 1;
1093 flush_pending_lists (deps, insn, 0);
1098 /* Accumulate clobbers until the next set so that it will be output dependent
1099 on all of them. At the next set we can clear the clobber list, since
1100 subsequent sets will be output dependent on it. */
1101 EXECUTE_IF_SET_IN_REG_SET
1102 (reg_pending_sets, 0, i,
1104 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1106 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1107 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1108 deps->reg_last_sets[i] = 0;
1110 deps->reg_last_sets[i]
1111 = alloc_INSN_LIST (insn, deps->reg_last_sets[i]);
1113 EXECUTE_IF_SET_IN_REG_SET
1114 (reg_pending_clobbers, 0, i,
1116 deps->reg_last_clobbers[i]
1117 = alloc_INSN_LIST (insn, deps->reg_last_clobbers[i]);
1119 CLEAR_REG_SET (reg_pending_sets);
1120 CLEAR_REG_SET (reg_pending_clobbers);
1122 if (reg_pending_sets_all)
1124 for (i = 0; i < maxreg; i++)
1126 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1128 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1129 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1130 deps->reg_last_sets[i] = 0;
1132 deps->reg_last_sets[i]
1133 = alloc_INSN_LIST (insn, deps->reg_last_sets[i]);
1136 reg_pending_sets_all = 0;
1139 /* If a post-call group is still open, see if it should remain so.
1140 This insn must be a simple move of a hard reg to a pseudo or
1143 We must avoid moving these insns for correctness on
1144 SMALL_REGISTER_CLASS machines, and for special registers like
1145 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1146 hard regs for all targets. */
1148 if (deps->in_post_call_group_p)
1150 rtx tmp, set = single_set (insn);
1151 int src_regno, dest_regno;
1154 goto end_call_group;
1156 tmp = SET_DEST (set);
1157 if (GET_CODE (tmp) == SUBREG)
1158 tmp = SUBREG_REG (tmp);
1159 if (GET_CODE (tmp) == REG)
1160 dest_regno = REGNO (tmp);
1162 goto end_call_group;
1164 tmp = SET_SRC (set);
1165 if (GET_CODE (tmp) == SUBREG)
1166 tmp = SUBREG_REG (tmp);
1167 if (GET_CODE (tmp) == REG)
1168 src_regno = REGNO (tmp);
1170 goto end_call_group;
1172 if (src_regno < FIRST_PSEUDO_REGISTER
1173 || dest_regno < FIRST_PSEUDO_REGISTER)
1175 set_sched_group_p (insn);
1176 CANT_MOVE (insn) = 1;
1181 deps->in_post_call_group_p = 0;
1186 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1187 for every dependency. */
1190 sched_analyze (deps, head, tail)
1198 for (insn = head;; insn = NEXT_INSN (insn))
1200 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1202 /* Clear out the stale LOG_LINKS from flow. */
1203 free_INSN_LIST_list (&LOG_LINKS (insn));
1205 /* Clear out stale SCHED_GROUP_P. */
1206 SCHED_GROUP_P (insn) = 0;
1208 /* Make each JUMP_INSN a scheduling barrier for memory
1210 if (GET_CODE (insn) == JUMP_INSN)
1211 deps->last_pending_memory_flush
1212 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1213 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1216 else if (GET_CODE (insn) == CALL_INSN)
1221 /* Clear out stale SCHED_GROUP_P. */
1222 SCHED_GROUP_P (insn) = 0;
1224 CANT_MOVE (insn) = 1;
1226 /* Clear out the stale LOG_LINKS from flow. */
1227 free_INSN_LIST_list (&LOG_LINKS (insn));
1229 /* Any instruction using a hard register which may get clobbered
1230 by a call needs to be marked as dependent on this call.
1231 This prevents a use of a hard return reg from being moved
1232 past a void call (i.e. it does not explicitly set the hard
1235 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1236 all registers, not just hard registers, may be clobbered by this
1239 /* Insn, being a CALL_INSN, magically depends on
1240 `last_function_call' already. */
1242 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1243 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1245 int max_reg = max_reg_num ();
1246 for (i = 0; i < max_reg; i++)
1248 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1249 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1250 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1252 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1253 add_dependence (insn, XEXP (u, 0), 0);
1255 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
1256 add_dependence (insn, XEXP (u, 0), 0);
1258 reg_pending_sets_all = 1;
1260 /* Add a pair of REG_SAVE_NOTEs which we will later
1261 convert back into a NOTE_INSN_SETJMP note. See
1262 reemit_notes for why we use a pair of NOTEs. */
1263 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1266 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1267 GEN_INT (NOTE_INSN_SETJMP),
1272 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1273 if (call_used_regs[i] || global_regs[i])
1275 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1276 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1278 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1279 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1281 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1285 /* For each insn which shouldn't cross a call, add a dependence
1286 between that insn and this call insn. */
1287 x = LOG_LINKS (deps->sched_before_next_call);
1290 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1293 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1295 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1298 /* In the absence of interprocedural alias analysis, we must flush
1299 all pending reads and writes, and start new dependencies starting
1300 from here. But only flush writes for constant calls (which may
1301 be passed a pointer to something we haven't written yet). */
1302 flush_pending_lists (deps, insn, CONST_CALL_P (insn));
1304 /* Depend this function call (actually, the user of this
1305 function call) on all hard register clobberage. */
1307 /* last_function_call is now a list of insns. */
1308 free_INSN_LIST_list (&deps->last_function_call);
1309 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1311 /* Before reload, begin a post-call group, so as to keep the
1312 lifetimes of hard registers correct. */
1313 if (! reload_completed)
1314 deps->in_post_call_group_p = 1;
1317 /* See comments on reemit_notes as to why we do this.
1318 ??? Actually, the reemit_notes just say what is done, not why. */
1320 else if (GET_CODE (insn) == NOTE
1321 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1322 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1324 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1326 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1327 GEN_INT (NOTE_LINE_NUMBER (insn)),
1330 else if (GET_CODE (insn) == NOTE
1331 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1332 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1333 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1334 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1335 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1336 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1340 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1341 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1342 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1344 rtx_region = GEN_INT (0);
1346 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1349 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1350 GEN_INT (NOTE_LINE_NUMBER (insn)),
1352 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1361 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1362 dependences from LOG_LINKS to build forward dependences in
1366 compute_forward_dependences (head, tail)
1371 enum reg_note dep_type;
1373 next_tail = NEXT_INSN (tail);
1374 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1376 if (! INSN_P (insn))
1379 insn = group_leader (insn);
1381 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1383 rtx x = group_leader (XEXP (link, 0));
1386 if (x != XEXP (link, 0))
1389 #ifdef ENABLE_CHECKING
1390 /* If add_dependence is working properly there should never
1391 be notes, deleted insns or duplicates in the backward
1392 links. Thus we need not check for them here.
1394 However, if we have enabled checking we might as well go
1395 ahead and verify that add_dependence worked properly. */
1396 if (GET_CODE (x) == NOTE
1397 || INSN_DELETED_P (x)
1398 || (forward_dependency_cache != NULL
1399 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1401 || (forward_dependency_cache == NULL
1402 && find_insn_list (insn, INSN_DEPEND (x))))
1404 if (forward_dependency_cache != NULL)
1405 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1409 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1411 dep_type = REG_NOTE_KIND (link);
1412 PUT_REG_NOTE_KIND (new_link, dep_type);
1414 INSN_DEPEND (x) = new_link;
1415 INSN_DEP_COUNT (insn) += 1;
1420 /* Initialize variables for region data dependence analysis.
1421 n_bbs is the number of region blocks. */
1427 int maxreg = max_reg_num ();
1428 deps->reg_last_uses = (rtx *) xcalloc (maxreg, sizeof (rtx));
1429 deps->reg_last_sets = (rtx *) xcalloc (maxreg, sizeof (rtx));
1430 deps->reg_last_clobbers = (rtx *) xcalloc (maxreg, sizeof (rtx));
1432 deps->pending_read_insns = 0;
1433 deps->pending_read_mems = 0;
1434 deps->pending_write_insns = 0;
1435 deps->pending_write_mems = 0;
1436 deps->pending_lists_length = 0;
1437 deps->last_pending_memory_flush = 0;
1438 deps->last_function_call = 0;
1439 deps->in_post_call_group_p = 0;
1441 deps->sched_before_next_call
1442 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1443 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1444 LOG_LINKS (deps->sched_before_next_call) = 0;
1447 /* Free insn lists found in DEPS. */
1453 int max_reg = max_reg_num ();
1456 /* Note this loop is executed max_reg * nr_regions times. It's first
1457 implementation accounted for over 90% of the calls to free_INSN_LIST_list.
1458 The list was empty for the vast majority of those calls. On the PA, not
1459 calling free_INSN_LIST_list in those cases improves -O2 compile times by
1461 for (i = 0; i < max_reg; ++i)
1463 if (deps->reg_last_clobbers[i])
1464 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1465 if (deps->reg_last_sets[i])
1466 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1467 if (deps->reg_last_uses[i])
1468 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1470 free (deps->reg_last_clobbers);
1471 free (deps->reg_last_sets);
1472 free (deps->reg_last_uses);
1475 /* If it is profitable to use them, initialize caches for tracking
1476 dependency informatino. LUID is the number of insns to be scheduled,
1477 it is used in the estimate of profitability. */
1480 init_dependency_caches (luid)
1483 /* ?!? We could save some memory by computing a per-region luid mapping
1484 which could reduce both the number of vectors in the cache and the size
1485 of each vector. Instead we just avoid the cache entirely unless the
1486 average number of instructions in a basic block is very high. See
1487 the comment before the declaration of true_dependency_cache for
1488 what we consider "very high". */
1489 if (luid / n_basic_blocks > 100 * 5)
1491 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1492 sbitmap_vector_zero (true_dependency_cache, luid);
1493 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1494 sbitmap_vector_zero (anti_dependency_cache, luid);
1495 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1496 sbitmap_vector_zero (output_dependency_cache, luid);
1497 #ifdef ENABLE_CHECKING
1498 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1499 sbitmap_vector_zero (forward_dependency_cache, luid);
1504 /* Free the caches allocated in init_dependency_caches. */
1507 free_dependency_caches ()
1509 if (true_dependency_cache)
1511 free (true_dependency_cache);
1512 true_dependency_cache = NULL;
1513 free (anti_dependency_cache);
1514 anti_dependency_cache = NULL;
1515 free (output_dependency_cache);
1516 output_dependency_cache = NULL;
1517 #ifdef ENABLE_CHECKING
1518 free (forward_dependency_cache);
1519 forward_dependency_cache = NULL;
1524 /* Initialize some global variables needed by the dependency analysis
1530 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1531 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1532 reg_pending_sets_all = 0;
1535 /* Free everything used by the dependency analysis code. */
1538 finish_deps_global ()
1540 FREE_REG_SET (reg_pending_sets);
1541 FREE_REG_SET (reg_pending_clobbers);