1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
42 extern char *reg_known_equiv_p;
43 extern rtx *reg_known_value;
45 static regset_head reg_pending_sets_head;
46 static regset_head reg_pending_clobbers_head;
48 static regset reg_pending_sets;
49 static regset reg_pending_clobbers;
50 static int reg_pending_sets_all;
52 /* To speed up the test for duplicate dependency links we keep a
53 record of dependencies created by add_dependence when the average
54 number of instructions in a basic block is very large.
56 Studies have shown that there is typically around 5 instructions between
57 branches for typical C code. So we can make a guess that the average
58 basic block is approximately 5 instructions long; we will choose 100X
59 the average size as a very large basic block.
61 Each insn has associated bitmaps for its dependencies. Each bitmap
62 has enough entries to represent a dependency on any other insn in
63 the insn chain. All bitmap for true dependencies cache is
64 allocated then the rest two ones are also allocated. */
65 static sbitmap *true_dependency_cache;
66 static sbitmap *anti_dependency_cache;
67 static sbitmap *output_dependency_cache;
69 /* To speed up checking consistency of formed forward insn
70 dependencies we use the following cache. Another possible solution
71 could be switching off checking duplication of insns in forward
73 #ifdef ENABLE_CHECKING
74 static sbitmap *forward_dependency_cache;
77 static int deps_may_trap_p PARAMS ((rtx));
78 static void remove_dependence PARAMS ((rtx, rtx));
79 static void set_sched_group_p PARAMS ((rtx));
81 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
82 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
83 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
84 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
85 static rtx group_leader PARAMS ((rtx));
87 static rtx get_condition PARAMS ((rtx));
88 static int conditions_mutex_p PARAMS ((rtx, rtx));
90 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
96 rtx addr = XEXP (mem, 0);
99 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
100 && reg_known_value[REGNO (addr)])
101 addr = reg_known_value[REGNO (addr)];
102 return rtx_addr_can_trap_p (addr);
105 /* Return the INSN_LIST containing INSN in LIST, or NULL
106 if LIST does not contain INSN. */
109 find_insn_list (insn, list)
115 if (XEXP (list, 0) == insn)
117 list = XEXP (list, 1);
122 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
126 find_insn_mem_list (insn, x, list, list1)
132 if (XEXP (list, 0) == insn
133 && XEXP (list1, 0) == x)
135 list = XEXP (list, 1);
136 list1 = XEXP (list1, 1);
141 /* Find the condition under which INSN is executed. */
147 rtx pat = PATTERN (insn);
152 if (GET_CODE (pat) == COND_EXEC)
153 return COND_EXEC_TEST (pat);
154 if (GET_CODE (insn) != JUMP_INSN)
156 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
158 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
160 pat = SET_DEST (pat);
161 cond = XEXP (pat, 0);
162 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
163 && XEXP (cond, 2) == pc_rtx)
165 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
166 && XEXP (cond, 1) == pc_rtx)
167 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
168 XEXP (cond, 0), XEXP (cond, 1));
173 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
176 conditions_mutex_p (cond1, cond2)
179 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
180 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
181 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
182 && XEXP (cond1, 0) == XEXP (cond2, 0)
183 && XEXP (cond1, 1) == XEXP (cond2, 1))
188 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
189 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
190 of dependence that this link represents. */
193 add_dependence (insn, elem, dep_type)
196 enum reg_note dep_type;
200 enum reg_note present_dep_type;
203 /* Don't depend an insn on itself. */
207 /* We can get a dependency on deleted insns due to optimizations in
208 the register allocation and reloading or due to splitting. Any
209 such dependency is useless and can be ignored. */
210 if (GET_CODE (elem) == NOTE)
213 /* flow.c doesn't handle conditional lifetimes entirely correctly;
214 calls mess up the conditional lifetimes. */
215 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
217 cond1 = get_condition (insn);
218 cond2 = get_condition (elem);
219 if (cond1 && cond2 && conditions_mutex_p (cond1, cond2))
223 /* If elem is part of a sequence that must be scheduled together, then
224 make the dependence point to the last insn of the sequence.
225 When HAVE_cc0, it is possible for NOTEs to exist between users and
226 setters of the condition codes, so we must skip past notes here.
227 Otherwise, NOTEs are impossible here. */
228 next = next_nonnote_insn (elem);
229 if (next && SCHED_GROUP_P (next)
230 && GET_CODE (next) != CODE_LABEL)
232 /* Notes will never intervene here though, so don't bother checking
235 /* We must reject CODE_LABELs, so that we don't get confused by one
236 that has LABEL_PRESERVE_P set, which is represented by the same
237 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
241 while ((nnext = next_nonnote_insn (next)) != NULL
242 && SCHED_GROUP_P (nnext)
243 && GET_CODE (nnext) != CODE_LABEL)
246 /* Again, don't depend an insn on itself. */
250 /* Make the dependence to NEXT, the last insn of the group, instead
251 of the original ELEM. */
256 #ifdef INSN_SCHEDULING
257 /* ??? No good way to tell from here whether we're doing interblock
258 scheduling. Possibly add another callback. */
260 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
261 No need for interblock dependences with calls, since
262 calls are not moved between blocks. Note: the edge where
263 elem is a CALL is still required. */
264 if (GET_CODE (insn) == CALL_INSN
265 && (INSN_BB (elem) != INSN_BB (insn)))
269 /* If we already have a dependency for ELEM, then we do not need to
270 do anything. Avoiding the list walk below can cut compile times
271 dramatically for some code. */
272 if (true_dependency_cache != NULL)
274 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
276 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
277 present_dep_type = 0;
278 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
280 present_dep_type = REG_DEP_ANTI;
281 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
283 present_dep_type = REG_DEP_OUTPUT;
286 if (present_p && (int) dep_type >= (int) present_dep_type)
291 /* Check that we don't already have this dependence. */
293 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
294 if (XEXP (link, 0) == elem)
296 #ifdef INSN_SCHEDULING
297 /* Clear corresponding cache entry because type of the link
299 if (true_dependency_cache != NULL)
301 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
302 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
304 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
305 && output_dependency_cache)
306 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
313 /* If this is a more restrictive type of dependence than the existing
314 one, then change the existing dependence to this type. */
315 if ((int) dep_type < (int) REG_NOTE_KIND (link))
316 PUT_REG_NOTE_KIND (link, dep_type);
318 #ifdef INSN_SCHEDULING
319 /* If we are adding a dependency to INSN's LOG_LINKs, then
320 note that in the bitmap caches of dependency information. */
321 if (true_dependency_cache != NULL)
323 if ((int)REG_NOTE_KIND (link) == 0)
324 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
326 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
327 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
329 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
330 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
336 /* Might want to check one level of transitivity to save conses. */
338 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
339 LOG_LINKS (insn) = link;
341 /* Insn dependency, not data dependency. */
342 PUT_REG_NOTE_KIND (link, dep_type);
344 #ifdef INSN_SCHEDULING
345 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
346 in the bitmap caches of dependency information. */
347 if (true_dependency_cache != NULL)
349 if ((int)dep_type == 0)
350 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
351 else if (dep_type == REG_DEP_ANTI)
352 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
353 else if (dep_type == REG_DEP_OUTPUT)
354 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
359 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
360 of INSN. Abort if not found. */
363 remove_dependence (insn, elem)
367 rtx prev, link, next;
370 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
372 next = XEXP (link, 1);
373 if (XEXP (link, 0) == elem)
376 XEXP (prev, 1) = next;
378 LOG_LINKS (insn) = next;
380 #ifdef INSN_SCHEDULING
381 /* If we are removing a dependency from the LOG_LINKS list,
382 make sure to remove it from the cache too. */
383 if (true_dependency_cache != NULL)
385 if (REG_NOTE_KIND (link) == 0)
386 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
388 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
389 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
391 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
392 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
397 free_INSN_LIST_node (link);
410 /* Return an insn which represents a SCHED_GROUP, which is
411 the last insn in the group. */
422 insn = next_nonnote_insn (insn);
424 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
429 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
430 goes along with that. */
433 set_sched_group_p (insn)
438 SCHED_GROUP_P (insn) = 1;
440 /* There may be a note before this insn now, but all notes will
441 be removed before we actually try to schedule the insns, so
442 it won't cause a problem later. We must avoid it here though. */
443 prev = prev_nonnote_insn (insn);
445 /* Make a copy of all dependencies on the immediately previous insn,
446 and add to this insn. This is so that all the dependencies will
447 apply to the group. Remove an explicit dependence on this insn
448 as SCHED_GROUP_P now represents it. */
450 if (find_insn_list (prev, LOG_LINKS (insn)))
451 remove_dependence (insn, prev);
453 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
454 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
457 /* Process an insn's memory dependencies. There are four kinds of
460 (0) read dependence: read follows read
461 (1) true dependence: read follows write
462 (2) anti dependence: write follows read
463 (3) output dependence: write follows write
465 We are careful to build only dependencies which actually exist, and
466 use transitivity to avoid building too many links. */
468 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
469 The MEM is a memory reference contained within INSN, which we are saving
470 so that we can do memory aliasing on it. */
473 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
475 rtx *insn_list, *mem_list, insn, mem;
479 link = alloc_INSN_LIST (insn, *insn_list);
482 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
485 deps->pending_lists_length++;
488 /* Make a dependency between every memory reference on the pending lists
489 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
493 flush_pending_lists (deps, insn, only_write)
501 while (deps->pending_read_insns && ! only_write)
503 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
506 link = deps->pending_read_insns;
507 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
508 free_INSN_LIST_node (link);
510 link = deps->pending_read_mems;
511 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
512 free_EXPR_LIST_node (link);
514 while (deps->pending_write_insns)
516 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
519 link = deps->pending_write_insns;
520 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
521 free_INSN_LIST_node (link);
523 link = deps->pending_write_mems;
524 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
525 free_EXPR_LIST_node (link);
527 deps->pending_lists_length = 0;
529 /* last_pending_memory_flush is now a list of insns. */
530 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
531 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
533 free_INSN_LIST_list (&deps->last_pending_memory_flush);
534 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
537 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
538 rtx, X, creating all dependencies generated by the write to the
539 destination of X, and reads of everything mentioned. */
542 sched_analyze_1 (deps, x, insn)
548 register rtx dest = XEXP (x, 0);
549 enum rtx_code code = GET_CODE (x);
554 if (GET_CODE (dest) == PARALLEL
555 && GET_MODE (dest) == BLKmode)
558 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
559 sched_analyze_1 (deps, XVECEXP (dest, 0, i), insn);
560 if (GET_CODE (x) == SET)
561 sched_analyze_2 (deps, SET_SRC (x), insn);
565 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
566 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
568 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
570 /* The second and third arguments are values read by this insn. */
571 sched_analyze_2 (deps, XEXP (dest, 1), insn);
572 sched_analyze_2 (deps, XEXP (dest, 2), insn);
574 dest = XEXP (dest, 0);
577 if (GET_CODE (dest) == REG)
581 regno = REGNO (dest);
583 /* A hard reg in a wide mode may really be multiple registers.
584 If so, mark all of them just like the first. */
585 if (regno < FIRST_PSEUDO_REGISTER)
587 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
593 for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
594 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
596 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
597 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
599 /* Clobbers need not be ordered with respect to one
600 another, but sets must be ordered with respect to a
604 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
605 free_INSN_LIST_list (&deps->reg_last[r].uses);
606 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
607 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
608 SET_REGNO_REG_SET (reg_pending_sets, r);
611 SET_REGNO_REG_SET (reg_pending_clobbers, r);
613 /* Function calls clobber all call_used regs. */
614 if (global_regs[r] || (code == SET && call_used_regs[r]))
615 for (u = deps->last_function_call; u; u = XEXP (u, 1))
616 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
619 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
620 it does not reload. Ignore these as they have served their
622 else if (regno >= deps->max_reg)
624 if (GET_CODE (PATTERN (insn)) != USE
625 && GET_CODE (PATTERN (insn)) != CLOBBER)
632 for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
633 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
635 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
636 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
640 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
641 free_INSN_LIST_list (&deps->reg_last[regno].uses);
642 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
643 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
644 SET_REGNO_REG_SET (reg_pending_sets, regno);
647 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
649 /* Pseudos that are REG_EQUIV to something may be replaced
650 by that during reloading. We need only add dependencies for
651 the address in the REG_EQUIV note. */
652 if (!reload_completed
653 && reg_known_equiv_p[regno]
654 && GET_CODE (reg_known_value[regno]) == MEM)
655 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
657 /* Don't let it cross a call after scheduling if it doesn't
658 already cross one. */
660 if (REG_N_CALLS_CROSSED (regno) == 0)
661 for (u = deps->last_function_call; u; u = XEXP (u, 1))
662 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
665 else if (GET_CODE (dest) == MEM)
667 /* Writing memory. */
669 if (deps->pending_lists_length > 32)
671 /* Flush all pending reads and writes to prevent the pending lists
672 from getting any larger. Insn scheduling runs too slowly when
673 these lists get long. The number 32 was chosen because it
674 seems like a reasonable number. When compiling GCC with itself,
675 this flush occurs 8 times for sparc, and 10 times for m88k using
677 flush_pending_lists (deps, insn, 0);
682 rtx pending, pending_mem;
684 pending = deps->pending_read_insns;
685 pending_mem = deps->pending_read_mems;
688 if (anti_dependence (XEXP (pending_mem, 0), dest))
689 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
691 pending = XEXP (pending, 1);
692 pending_mem = XEXP (pending_mem, 1);
695 pending = deps->pending_write_insns;
696 pending_mem = deps->pending_write_mems;
699 if (output_dependence (XEXP (pending_mem, 0), dest))
700 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
702 pending = XEXP (pending, 1);
703 pending_mem = XEXP (pending_mem, 1);
706 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
707 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
709 add_insn_mem_dependence (deps, &deps->pending_write_insns,
710 &deps->pending_write_mems, insn, dest);
712 sched_analyze_2 (deps, XEXP (dest, 0), insn);
716 if (GET_CODE (x) == SET)
717 sched_analyze_2 (deps, SET_SRC (x), insn);
720 /* Analyze the uses of memory and registers in rtx X in INSN. */
723 sched_analyze_2 (deps, x, insn)
730 register enum rtx_code code;
731 register const char *fmt;
745 /* Ignore constants. Note that we must handle CONST_DOUBLE here
746 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
747 this does not mean that this insn is using cc0. */
752 /* User of CC0 depends on immediately preceding insn. */
753 set_sched_group_p (insn);
760 int regno = REGNO (x);
761 if (regno < FIRST_PSEUDO_REGISTER)
765 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
769 deps->reg_last[r].uses
770 = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
771 SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
773 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
774 add_dependence (insn, XEXP (u, 0), 0);
776 /* ??? This should never happen. */
777 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
778 add_dependence (insn, XEXP (u, 0), 0);
780 if (call_used_regs[r] || global_regs[r])
781 /* Function calls clobber all call_used regs. */
782 for (u = deps->last_function_call; u; u = XEXP (u, 1))
783 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
786 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
787 it does not reload. Ignore these as they have served their
789 else if (regno >= deps->max_reg)
791 if (GET_CODE (PATTERN (insn)) != USE
792 && GET_CODE (PATTERN (insn)) != CLOBBER)
797 deps->reg_last[regno].uses
798 = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
799 SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
801 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
802 add_dependence (insn, XEXP (u, 0), 0);
804 /* ??? This should never happen. */
805 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
806 add_dependence (insn, XEXP (u, 0), 0);
808 /* Pseudos that are REG_EQUIV to something may be replaced
809 by that during reloading. We need only add dependencies for
810 the address in the REG_EQUIV note. */
811 if (!reload_completed
812 && reg_known_equiv_p[regno]
813 && GET_CODE (reg_known_value[regno]) == MEM)
814 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
816 /* If the register does not already cross any calls, then add this
817 insn to the sched_before_next_call list so that it will still
818 not cross calls after scheduling. */
819 if (REG_N_CALLS_CROSSED (regno) == 0)
820 add_dependence (deps->sched_before_next_call, insn,
828 /* Reading memory. */
830 rtx pending, pending_mem;
832 pending = deps->pending_read_insns;
833 pending_mem = deps->pending_read_mems;
836 if (read_dependence (XEXP (pending_mem, 0), x))
837 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
839 pending = XEXP (pending, 1);
840 pending_mem = XEXP (pending_mem, 1);
843 pending = deps->pending_write_insns;
844 pending_mem = deps->pending_write_mems;
847 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
849 add_dependence (insn, XEXP (pending, 0), 0);
851 pending = XEXP (pending, 1);
852 pending_mem = XEXP (pending_mem, 1);
855 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
856 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
857 || deps_may_trap_p (x))
858 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
860 /* Always add these dependencies to pending_reads, since
861 this insn may be followed by a write. */
862 add_insn_mem_dependence (deps, &deps->pending_read_insns,
863 &deps->pending_read_mems, insn, x);
865 /* Take advantage of tail recursion here. */
866 sched_analyze_2 (deps, XEXP (x, 0), insn);
870 /* Force pending stores to memory in case a trap handler needs them. */
872 flush_pending_lists (deps, insn, 1);
877 case UNSPEC_VOLATILE:
881 /* Traditional and volatile asm instructions must be considered to use
882 and clobber all hard registers, all pseudo-registers and all of
883 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
885 Consider for instance a volatile asm that changes the fpu rounding
886 mode. An insn should not be moved across this even if it only uses
887 pseudo-regs because it might give an incorrectly rounded result. */
888 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
890 for (i = 0; i < deps->max_reg; i++)
892 struct deps_reg *reg_last = &deps->reg_last[i];
894 for (u = reg_last->uses; u; u = XEXP (u, 1))
895 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
896 for (u = reg_last->sets; u; u = XEXP (u, 1))
897 add_dependence (insn, XEXP (u, 0), 0);
898 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
899 add_dependence (insn, XEXP (u, 0), 0);
901 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
902 free_INSN_LIST_list (®_last->uses);
904 reg_pending_sets_all = 1;
906 flush_pending_lists (deps, insn, 0);
909 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
910 We can not just fall through here since then we would be confused
911 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
912 traditional asms unlike their normal usage. */
914 if (code == ASM_OPERANDS)
916 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
917 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
927 /* These both read and modify the result. We must handle them as writes
928 to get proper dependencies for following instructions. We must handle
929 them as reads to get proper dependencies from this to previous
930 instructions. Thus we need to pass them to both sched_analyze_1
931 and sched_analyze_2. We must call sched_analyze_2 first in order
932 to get the proper antecedent for the read. */
933 sched_analyze_2 (deps, XEXP (x, 0), insn);
934 sched_analyze_1 (deps, x, insn);
939 /* op0 = op0 + op1 */
940 sched_analyze_2 (deps, XEXP (x, 0), insn);
941 sched_analyze_2 (deps, XEXP (x, 1), insn);
942 sched_analyze_1 (deps, x, insn);
949 /* Other cases: walk the insn. */
950 fmt = GET_RTX_FORMAT (code);
951 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
954 sched_analyze_2 (deps, XEXP (x, i), insn);
955 else if (fmt[i] == 'E')
956 for (j = 0; j < XVECLEN (x, i); j++)
957 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
961 /* Analyze an INSN with pattern X to find all dependencies. */
964 sched_analyze_insn (deps, x, insn, loop_notes)
969 register RTX_CODE code = GET_CODE (x);
973 if (code == COND_EXEC)
975 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
977 /* ??? Should be recording conditions so we reduce the number of
978 false dependancies. */
979 x = COND_EXEC_CODE (x);
982 if (code == SET || code == CLOBBER)
983 sched_analyze_1 (deps, x, insn);
984 else if (code == PARALLEL)
987 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
989 rtx sub = XVECEXP (x, 0, i);
990 code = GET_CODE (sub);
992 if (code == COND_EXEC)
994 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
995 sub = COND_EXEC_CODE (sub);
996 code = GET_CODE (sub);
998 if (code == SET || code == CLOBBER)
999 sched_analyze_1 (deps, sub, insn);
1001 sched_analyze_2 (deps, sub, insn);
1005 sched_analyze_2 (deps, x, insn);
1007 /* Mark registers CLOBBERED or used by called function. */
1008 if (GET_CODE (insn) == CALL_INSN)
1009 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1011 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1012 sched_analyze_1 (deps, XEXP (link, 0), insn);
1014 sched_analyze_2 (deps, XEXP (link, 0), insn);
1017 if (GET_CODE (insn) == JUMP_INSN)
1019 rtx next, u, pending, pending_mem;
1020 next = next_nonnote_insn (insn);
1021 if (next && GET_CODE (next) == BARRIER)
1023 for (i = 0; i < deps->max_reg; i++)
1025 struct deps_reg *reg_last = &deps->reg_last[i];
1027 for (u = reg_last->uses; u; u = XEXP (u, 1))
1028 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1029 for (u = reg_last->sets; u; u = XEXP (u, 1))
1030 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1031 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1032 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1038 INIT_REG_SET (&tmp);
1040 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1041 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
1043 struct deps_reg *reg_last = &deps->reg_last[i];
1044 for (u = reg_last->sets; u; u = XEXP (u, 1))
1045 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1046 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1047 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1050 CLEAR_REG_SET (&tmp);
1052 pending = deps->pending_write_insns;
1053 pending_mem = deps->pending_write_mems;
1056 add_dependence (insn, XEXP (pending, 0), 0);
1058 pending = XEXP (pending, 1);
1059 pending_mem = XEXP (pending_mem, 1);
1062 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1063 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1066 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1067 block, then we must be sure that no instructions are scheduled across it.
1068 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1069 become incorrect. */
1073 int schedule_barrier_found = 0;
1076 /* Update loop_notes with any notes from this insn. Also determine
1077 if any of the notes on the list correspond to instruction scheduling
1078 barriers (loop, eh & setjmp notes, but not range notes. */
1080 while (XEXP (link, 1))
1082 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1083 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1084 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1085 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
1086 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
1087 schedule_barrier_found = 1;
1089 link = XEXP (link, 1);
1091 XEXP (link, 1) = REG_NOTES (insn);
1092 REG_NOTES (insn) = loop_notes;
1094 /* Add dependencies if a scheduling barrier was found. */
1095 if (schedule_barrier_found)
1097 for (i = 0; i < deps->max_reg; i++)
1099 struct deps_reg *reg_last = &deps->reg_last[i];
1102 for (u = reg_last->uses; u; u = XEXP (u, 1))
1103 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1104 for (u = reg_last->sets; u; u = XEXP (u, 1))
1105 add_dependence (insn, XEXP (u, 0), 0);
1106 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1107 add_dependence (insn, XEXP (u, 0), 0);
1109 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1110 free_INSN_LIST_list (®_last->uses);
1112 reg_pending_sets_all = 1;
1114 flush_pending_lists (deps, insn, 0);
1119 /* Accumulate clobbers until the next set so that it will be output
1120 dependent on all of them. At the next set we can clear the clobber
1121 list, since subsequent sets will be output dependent on it. */
1122 if (reg_pending_sets_all)
1124 reg_pending_sets_all = 0;
1125 for (i = 0; i < deps->max_reg; i++)
1127 struct deps_reg *reg_last = &deps->reg_last[i];
1128 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1130 free_INSN_LIST_list (®_last->sets);
1131 free_INSN_LIST_list (®_last->clobbers);
1133 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1134 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1139 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1141 struct deps_reg *reg_last = &deps->reg_last[i];
1142 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1144 free_INSN_LIST_list (®_last->sets);
1145 free_INSN_LIST_list (®_last->clobbers);
1147 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1148 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1150 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1152 struct deps_reg *reg_last = &deps->reg_last[i];
1153 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1154 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1157 CLEAR_REG_SET (reg_pending_sets);
1158 CLEAR_REG_SET (reg_pending_clobbers);
1160 /* If a post-call group is still open, see if it should remain so.
1161 This insn must be a simple move of a hard reg to a pseudo or
1164 We must avoid moving these insns for correctness on
1165 SMALL_REGISTER_CLASS machines, and for special registers like
1166 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1167 hard regs for all targets. */
1169 if (deps->in_post_call_group_p)
1171 rtx tmp, set = single_set (insn);
1172 int src_regno, dest_regno;
1175 goto end_call_group;
1177 tmp = SET_DEST (set);
1178 if (GET_CODE (tmp) == SUBREG)
1179 tmp = SUBREG_REG (tmp);
1180 if (GET_CODE (tmp) == REG)
1181 dest_regno = REGNO (tmp);
1183 goto end_call_group;
1185 tmp = SET_SRC (set);
1186 if (GET_CODE (tmp) == SUBREG)
1187 tmp = SUBREG_REG (tmp);
1188 if (GET_CODE (tmp) == REG)
1189 src_regno = REGNO (tmp);
1191 goto end_call_group;
1193 if (src_regno < FIRST_PSEUDO_REGISTER
1194 || dest_regno < FIRST_PSEUDO_REGISTER)
1196 set_sched_group_p (insn);
1197 CANT_MOVE (insn) = 1;
1202 deps->in_post_call_group_p = 0;
1207 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1208 for every dependency. */
1211 sched_analyze (deps, head, tail)
1219 for (insn = head;; insn = NEXT_INSN (insn))
1221 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1223 /* Clear out the stale LOG_LINKS from flow. */
1224 free_INSN_LIST_list (&LOG_LINKS (insn));
1226 /* Clear out stale SCHED_GROUP_P. */
1227 SCHED_GROUP_P (insn) = 0;
1229 /* Make each JUMP_INSN a scheduling barrier for memory
1231 if (GET_CODE (insn) == JUMP_INSN)
1232 deps->last_pending_memory_flush
1233 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1234 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1237 else if (GET_CODE (insn) == CALL_INSN)
1242 /* Clear out stale SCHED_GROUP_P. */
1243 SCHED_GROUP_P (insn) = 0;
1245 CANT_MOVE (insn) = 1;
1247 /* Clear out the stale LOG_LINKS from flow. */
1248 free_INSN_LIST_list (&LOG_LINKS (insn));
1250 /* Any instruction using a hard register which may get clobbered
1251 by a call needs to be marked as dependent on this call.
1252 This prevents a use of a hard return reg from being moved
1253 past a void call (i.e. it does not explicitly set the hard
1256 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1257 all registers, not just hard registers, may be clobbered by this
1260 /* Insn, being a CALL_INSN, magically depends on
1261 `last_function_call' already. */
1263 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1264 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1266 for (i = 0; i < deps->max_reg; i++)
1268 struct deps_reg *reg_last = &deps->reg_last[i];
1270 for (u = reg_last->uses; u; u = XEXP (u, 1))
1271 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1272 for (u = reg_last->sets; u; u = XEXP (u, 1))
1273 add_dependence (insn, XEXP (u, 0), 0);
1274 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1275 add_dependence (insn, XEXP (u, 0), 0);
1277 free_INSN_LIST_list (®_last->uses);
1279 reg_pending_sets_all = 1;
1281 /* Add a pair of REG_SAVE_NOTEs which we will later
1282 convert back into a NOTE_INSN_SETJMP note. See
1283 reemit_notes for why we use a pair of NOTEs. */
1284 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1287 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1288 GEN_INT (NOTE_INSN_SETJMP),
1293 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1294 if (call_used_regs[i] || global_regs[i])
1296 for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
1297 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1298 for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
1299 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1301 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1305 /* For each insn which shouldn't cross a call, add a dependence
1306 between that insn and this call insn. */
1307 x = LOG_LINKS (deps->sched_before_next_call);
1310 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1313 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1315 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1318 /* In the absence of interprocedural alias analysis, we must flush
1319 all pending reads and writes, and start new dependencies starting
1320 from here. But only flush writes for constant calls (which may
1321 be passed a pointer to something we haven't written yet). */
1322 flush_pending_lists (deps, insn, CONST_CALL_P (insn));
1324 /* Depend this function call (actually, the user of this
1325 function call) on all hard register clobberage. */
1327 /* last_function_call is now a list of insns. */
1328 free_INSN_LIST_list (&deps->last_function_call);
1329 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1331 /* Before reload, begin a post-call group, so as to keep the
1332 lifetimes of hard registers correct. */
1333 if (! reload_completed)
1334 deps->in_post_call_group_p = 1;
1337 /* See comments on reemit_notes as to why we do this.
1338 ??? Actually, the reemit_notes just say what is done, not why. */
1340 else if (GET_CODE (insn) == NOTE
1341 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1342 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1344 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1346 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1347 GEN_INT (NOTE_LINE_NUMBER (insn)),
1350 else if (GET_CODE (insn) == NOTE
1351 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1352 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1353 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1354 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1355 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1356 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1360 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1361 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1362 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1364 rtx_region = GEN_INT (0);
1366 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1369 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1370 GEN_INT (NOTE_LINE_NUMBER (insn)),
1372 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1381 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1382 dependences from LOG_LINKS to build forward dependences in
1386 compute_forward_dependences (head, tail)
1391 enum reg_note dep_type;
1393 next_tail = NEXT_INSN (tail);
1394 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1396 if (! INSN_P (insn))
1399 insn = group_leader (insn);
1401 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1403 rtx x = group_leader (XEXP (link, 0));
1406 if (x != XEXP (link, 0))
1409 #ifdef ENABLE_CHECKING
1410 /* If add_dependence is working properly there should never
1411 be notes, deleted insns or duplicates in the backward
1412 links. Thus we need not check for them here.
1414 However, if we have enabled checking we might as well go
1415 ahead and verify that add_dependence worked properly. */
1416 if (GET_CODE (x) == NOTE
1417 || INSN_DELETED_P (x)
1418 || (forward_dependency_cache != NULL
1419 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1421 || (forward_dependency_cache == NULL
1422 && find_insn_list (insn, INSN_DEPEND (x))))
1424 if (forward_dependency_cache != NULL)
1425 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1429 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1431 dep_type = REG_NOTE_KIND (link);
1432 PUT_REG_NOTE_KIND (new_link, dep_type);
1434 INSN_DEPEND (x) = new_link;
1435 INSN_DEP_COUNT (insn) += 1;
1440 /* Initialize variables for region data dependence analysis.
1441 n_bbs is the number of region blocks. */
1447 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1449 deps->max_reg = max_reg;
1450 deps->reg_last = (struct deps_reg *)
1451 xcalloc (max_reg, sizeof (struct deps_reg));
1452 INIT_REG_SET (&deps->reg_last_in_use);
1454 deps->pending_read_insns = 0;
1455 deps->pending_read_mems = 0;
1456 deps->pending_write_insns = 0;
1457 deps->pending_write_mems = 0;
1458 deps->pending_lists_length = 0;
1459 deps->last_pending_memory_flush = 0;
1460 deps->last_function_call = 0;
1461 deps->in_post_call_group_p = 0;
1463 deps->sched_before_next_call
1464 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1465 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1466 LOG_LINKS (deps->sched_before_next_call) = 0;
1469 /* Free insn lists found in DEPS. */
1477 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1478 times. For a test case with 42000 regs and 8000 small basic blocks,
1479 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1480 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1482 struct deps_reg *reg_last = &deps->reg_last[i];
1483 free_INSN_LIST_list (®_last->uses);
1484 free_INSN_LIST_list (®_last->sets);
1485 free_INSN_LIST_list (®_last->clobbers);
1487 CLEAR_REG_SET (&deps->reg_last_in_use);
1489 free (deps->reg_last);
1490 deps->reg_last = NULL;
1493 /* If it is profitable to use them, initialize caches for tracking
1494 dependency informatino. LUID is the number of insns to be scheduled,
1495 it is used in the estimate of profitability. */
1498 init_dependency_caches (luid)
1501 /* ?!? We could save some memory by computing a per-region luid mapping
1502 which could reduce both the number of vectors in the cache and the size
1503 of each vector. Instead we just avoid the cache entirely unless the
1504 average number of instructions in a basic block is very high. See
1505 the comment before the declaration of true_dependency_cache for
1506 what we consider "very high". */
1507 if (luid / n_basic_blocks > 100 * 5)
1509 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1510 sbitmap_vector_zero (true_dependency_cache, luid);
1511 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1512 sbitmap_vector_zero (anti_dependency_cache, luid);
1513 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1514 sbitmap_vector_zero (output_dependency_cache, luid);
1515 #ifdef ENABLE_CHECKING
1516 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1517 sbitmap_vector_zero (forward_dependency_cache, luid);
1522 /* Free the caches allocated in init_dependency_caches. */
1525 free_dependency_caches ()
1527 if (true_dependency_cache)
1529 free (true_dependency_cache);
1530 true_dependency_cache = NULL;
1531 free (anti_dependency_cache);
1532 anti_dependency_cache = NULL;
1533 free (output_dependency_cache);
1534 output_dependency_cache = NULL;
1535 #ifdef ENABLE_CHECKING
1536 free (forward_dependency_cache);
1537 forward_dependency_cache = NULL;
1542 /* Initialize some global variables needed by the dependency analysis
1548 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1549 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1550 reg_pending_sets_all = 0;
1553 /* Free everything used by the dependency analysis code. */
1556 finish_deps_global ()
1558 FREE_REG_SET (reg_pending_sets);
1559 FREE_REG_SET (reg_pending_clobbers);