1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
42 extern char *reg_known_equiv_p;
43 extern rtx *reg_known_value;
45 static regset_head reg_pending_sets_head;
46 static regset_head reg_pending_clobbers_head;
48 static regset reg_pending_sets;
49 static regset reg_pending_clobbers;
50 static int reg_pending_sets_all;
52 /* To speed up the test for duplicate dependency links we keep a
53 record of dependencies created by add_dependence when the average
54 number of instructions in a basic block is very large.
56 Studies have shown that there is typically around 5 instructions between
57 branches for typical C code. So we can make a guess that the average
58 basic block is approximately 5 instructions long; we will choose 100X
59 the average size as a very large basic block.
61 Each insn has associated bitmaps for its dependencies. Each bitmap
62 has enough entries to represent a dependency on any other insn in
63 the insn chain. All bitmap for true dependencies cache is
64 allocated then the rest two ones are also allocated. */
65 static sbitmap *true_dependency_cache;
66 static sbitmap *anti_dependency_cache;
67 static sbitmap *output_dependency_cache;
69 /* To speed up checking consistency of formed forward insn
70 dependencies we use the following cache. Another possible solution
71 could be switching off checking duplication of insns in forward
73 #ifdef ENABLE_CHECKING
74 static sbitmap *forward_dependency_cache;
77 static int deps_may_trap_p PARAMS ((rtx));
78 static void remove_dependence PARAMS ((rtx, rtx));
79 static void set_sched_group_p PARAMS ((rtx));
81 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
82 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
83 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
84 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
85 static rtx group_leader PARAMS ((rtx));
87 static rtx get_condition PARAMS ((rtx));
88 static int conditions_mutex_p PARAMS ((rtx, rtx));
90 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
96 rtx addr = XEXP (mem, 0);
99 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
100 && reg_known_value[REGNO (addr)])
101 addr = reg_known_value[REGNO (addr)];
102 return rtx_addr_can_trap_p (addr);
105 /* Return the INSN_LIST containing INSN in LIST, or NULL
106 if LIST does not contain INSN. */
109 find_insn_list (insn, list)
115 if (XEXP (list, 0) == insn)
117 list = XEXP (list, 1);
122 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
126 find_insn_mem_list (insn, x, list, list1)
132 if (XEXP (list, 0) == insn
133 && XEXP (list1, 0) == x)
135 list = XEXP (list, 1);
136 list1 = XEXP (list1, 1);
141 /* Find the condition under which INSN is executed. */
147 rtx pat = PATTERN (insn);
152 if (GET_CODE (pat) == COND_EXEC)
153 return COND_EXEC_TEST (pat);
154 if (GET_CODE (insn) != JUMP_INSN)
156 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
158 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
160 pat = SET_DEST (pat);
161 cond = XEXP (pat, 0);
162 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
163 && XEXP (cond, 2) == pc_rtx)
165 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
166 && XEXP (cond, 1) == pc_rtx)
167 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
168 XEXP (cond, 0), XEXP (cond, 1));
173 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
176 conditions_mutex_p (cond1, cond2)
179 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
180 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
181 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
182 && XEXP (cond1, 0) == XEXP (cond2, 0)
183 && XEXP (cond1, 1) == XEXP (cond2, 1))
188 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
189 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
190 of dependence that this link represents. */
193 add_dependence (insn, elem, dep_type)
196 enum reg_note dep_type;
202 /* Don't depend an insn on itself. */
206 /* We can get a dependency on deleted insns due to optimizations in
207 the register allocation and reloading or due to splitting. Any
208 such dependency is useless and can be ignored. */
209 if (GET_CODE (elem) == NOTE)
212 /* flow.c doesn't handle conditional lifetimes entirely correctly;
213 calls mess up the conditional lifetimes. */
214 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
216 cond1 = get_condition (insn);
217 cond2 = get_condition (elem);
218 if (cond1 && cond2 && conditions_mutex_p (cond1, cond2))
222 /* If elem is part of a sequence that must be scheduled together, then
223 make the dependence point to the last insn of the sequence.
224 When HAVE_cc0, it is possible for NOTEs to exist between users and
225 setters of the condition codes, so we must skip past notes here.
226 Otherwise, NOTEs are impossible here. */
227 next = next_nonnote_insn (elem);
228 if (next && SCHED_GROUP_P (next)
229 && GET_CODE (next) != CODE_LABEL)
231 /* Notes will never intervene here though, so don't bother checking
234 /* We must reject CODE_LABELs, so that we don't get confused by one
235 that has LABEL_PRESERVE_P set, which is represented by the same
236 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
240 while ((nnext = next_nonnote_insn (next)) != NULL
241 && SCHED_GROUP_P (nnext)
242 && GET_CODE (nnext) != CODE_LABEL)
245 /* Again, don't depend an insn on itself. */
249 /* Make the dependence to NEXT, the last insn of the group, instead
250 of the original ELEM. */
255 #ifdef INSN_SCHEDULING
256 /* ??? No good way to tell from here whether we're doing interblock
257 scheduling. Possibly add another callback. */
259 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
260 No need for interblock dependences with calls, since
261 calls are not moved between blocks. Note: the edge where
262 elem is a CALL is still required. */
263 if (GET_CODE (insn) == CALL_INSN
264 && (INSN_BB (elem) != INSN_BB (insn)))
268 /* If we already have a dependency for ELEM, then we do not need to
269 do anything. Avoiding the list walk below can cut compile times
270 dramatically for some code. */
271 if (true_dependency_cache != NULL)
273 enum reg_note present_dep_type = 0;
275 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
277 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
278 /* Do nothing (present_set_type is already 0). */
280 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
282 present_dep_type = REG_DEP_ANTI;
283 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
285 present_dep_type = REG_DEP_OUTPUT;
288 if (present_p && (int) dep_type >= (int) present_dep_type)
293 /* Check that we don't already have this dependence. */
295 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
296 if (XEXP (link, 0) == elem)
298 #ifdef INSN_SCHEDULING
299 /* Clear corresponding cache entry because type of the link
301 if (true_dependency_cache != NULL)
303 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
304 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
306 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
307 && output_dependency_cache)
308 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
315 /* If this is a more restrictive type of dependence than the existing
316 one, then change the existing dependence to this type. */
317 if ((int) dep_type < (int) REG_NOTE_KIND (link))
318 PUT_REG_NOTE_KIND (link, dep_type);
320 #ifdef INSN_SCHEDULING
321 /* If we are adding a dependency to INSN's LOG_LINKs, then
322 note that in the bitmap caches of dependency information. */
323 if (true_dependency_cache != NULL)
325 if ((int)REG_NOTE_KIND (link) == 0)
326 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
328 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
329 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
331 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
332 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
338 /* Might want to check one level of transitivity to save conses. */
340 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
341 LOG_LINKS (insn) = link;
343 /* Insn dependency, not data dependency. */
344 PUT_REG_NOTE_KIND (link, dep_type);
346 #ifdef INSN_SCHEDULING
347 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
348 in the bitmap caches of dependency information. */
349 if (true_dependency_cache != NULL)
351 if ((int)dep_type == 0)
352 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
353 else if (dep_type == REG_DEP_ANTI)
354 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
355 else if (dep_type == REG_DEP_OUTPUT)
356 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
361 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
362 of INSN. Abort if not found. */
365 remove_dependence (insn, elem)
369 rtx prev, link, next;
372 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
374 next = XEXP (link, 1);
375 if (XEXP (link, 0) == elem)
378 XEXP (prev, 1) = next;
380 LOG_LINKS (insn) = next;
382 #ifdef INSN_SCHEDULING
383 /* If we are removing a dependency from the LOG_LINKS list,
384 make sure to remove it from the cache too. */
385 if (true_dependency_cache != NULL)
387 if (REG_NOTE_KIND (link) == 0)
388 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
390 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
391 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
393 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
394 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
399 free_INSN_LIST_node (link);
412 /* Return an insn which represents a SCHED_GROUP, which is
413 the last insn in the group. */
424 insn = next_nonnote_insn (insn);
426 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
431 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
432 goes along with that. */
435 set_sched_group_p (insn)
440 SCHED_GROUP_P (insn) = 1;
442 /* There may be a note before this insn now, but all notes will
443 be removed before we actually try to schedule the insns, so
444 it won't cause a problem later. We must avoid it here though. */
445 prev = prev_nonnote_insn (insn);
447 /* Make a copy of all dependencies on the immediately previous insn,
448 and add to this insn. This is so that all the dependencies will
449 apply to the group. Remove an explicit dependence on this insn
450 as SCHED_GROUP_P now represents it. */
452 if (find_insn_list (prev, LOG_LINKS (insn)))
453 remove_dependence (insn, prev);
455 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
456 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
459 /* Process an insn's memory dependencies. There are four kinds of
462 (0) read dependence: read follows read
463 (1) true dependence: read follows write
464 (2) anti dependence: write follows read
465 (3) output dependence: write follows write
467 We are careful to build only dependencies which actually exist, and
468 use transitivity to avoid building too many links. */
470 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
471 The MEM is a memory reference contained within INSN, which we are saving
472 so that we can do memory aliasing on it. */
475 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
477 rtx *insn_list, *mem_list, insn, mem;
481 link = alloc_INSN_LIST (insn, *insn_list);
484 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
487 deps->pending_lists_length++;
490 /* Make a dependency between every memory reference on the pending lists
491 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
495 flush_pending_lists (deps, insn, only_write)
503 while (deps->pending_read_insns && ! only_write)
505 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
508 link = deps->pending_read_insns;
509 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
510 free_INSN_LIST_node (link);
512 link = deps->pending_read_mems;
513 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
514 free_EXPR_LIST_node (link);
516 while (deps->pending_write_insns)
518 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
521 link = deps->pending_write_insns;
522 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
523 free_INSN_LIST_node (link);
525 link = deps->pending_write_mems;
526 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
527 free_EXPR_LIST_node (link);
529 deps->pending_lists_length = 0;
531 /* last_pending_memory_flush is now a list of insns. */
532 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
533 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
535 free_INSN_LIST_list (&deps->last_pending_memory_flush);
536 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
539 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
540 rtx, X, creating all dependencies generated by the write to the
541 destination of X, and reads of everything mentioned. */
544 sched_analyze_1 (deps, x, insn)
550 register rtx dest = XEXP (x, 0);
551 enum rtx_code code = GET_CODE (x);
556 if (GET_CODE (dest) == PARALLEL)
560 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
561 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
562 sched_analyze_1 (deps,
563 gen_rtx_CLOBBER (VOIDmode,
564 XEXP (XVECEXP (dest, 0, i), 0)),
567 if (GET_CODE (x) == SET)
568 sched_analyze_2 (deps, SET_SRC (x), insn);
572 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
573 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
575 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
577 /* The second and third arguments are values read by this insn. */
578 sched_analyze_2 (deps, XEXP (dest, 1), insn);
579 sched_analyze_2 (deps, XEXP (dest, 2), insn);
581 dest = XEXP (dest, 0);
584 if (GET_CODE (dest) == REG)
588 regno = REGNO (dest);
590 /* A hard reg in a wide mode may really be multiple registers.
591 If so, mark all of them just like the first. */
592 if (regno < FIRST_PSEUDO_REGISTER)
594 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
600 for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
601 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
603 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
604 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
606 /* Clobbers need not be ordered with respect to one
607 another, but sets must be ordered with respect to a
611 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
612 free_INSN_LIST_list (&deps->reg_last[r].uses);
613 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
614 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
615 SET_REGNO_REG_SET (reg_pending_sets, r);
618 SET_REGNO_REG_SET (reg_pending_clobbers, r);
620 /* Function calls clobber all call_used regs. */
621 if (global_regs[r] || (code == SET && call_used_regs[r]))
622 for (u = deps->last_function_call; u; u = XEXP (u, 1))
623 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
626 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
627 it does not reload. Ignore these as they have served their
629 else if (regno >= deps->max_reg)
631 if (GET_CODE (PATTERN (insn)) != USE
632 && GET_CODE (PATTERN (insn)) != CLOBBER)
639 for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
640 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
642 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
643 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
647 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
648 free_INSN_LIST_list (&deps->reg_last[regno].uses);
649 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
650 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
651 SET_REGNO_REG_SET (reg_pending_sets, regno);
654 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
656 /* Pseudos that are REG_EQUIV to something may be replaced
657 by that during reloading. We need only add dependencies for
658 the address in the REG_EQUIV note. */
659 if (!reload_completed
660 && reg_known_equiv_p[regno]
661 && GET_CODE (reg_known_value[regno]) == MEM)
662 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
664 /* Don't let it cross a call after scheduling if it doesn't
665 already cross one. */
667 if (REG_N_CALLS_CROSSED (regno) == 0)
668 for (u = deps->last_function_call; u; u = XEXP (u, 1))
669 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
672 else if (GET_CODE (dest) == MEM)
674 /* Writing memory. */
676 if (deps->pending_lists_length > 32)
678 /* Flush all pending reads and writes to prevent the pending lists
679 from getting any larger. Insn scheduling runs too slowly when
680 these lists get long. The number 32 was chosen because it
681 seems like a reasonable number. When compiling GCC with itself,
682 this flush occurs 8 times for sparc, and 10 times for m88k using
684 flush_pending_lists (deps, insn, 0);
689 rtx pending, pending_mem;
691 pending = deps->pending_read_insns;
692 pending_mem = deps->pending_read_mems;
695 if (anti_dependence (XEXP (pending_mem, 0), dest))
696 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
698 pending = XEXP (pending, 1);
699 pending_mem = XEXP (pending_mem, 1);
702 pending = deps->pending_write_insns;
703 pending_mem = deps->pending_write_mems;
706 if (output_dependence (XEXP (pending_mem, 0), dest))
707 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
709 pending = XEXP (pending, 1);
710 pending_mem = XEXP (pending_mem, 1);
713 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
714 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
716 add_insn_mem_dependence (deps, &deps->pending_write_insns,
717 &deps->pending_write_mems, insn, dest);
719 sched_analyze_2 (deps, XEXP (dest, 0), insn);
723 if (GET_CODE (x) == SET)
724 sched_analyze_2 (deps, SET_SRC (x), insn);
727 /* Analyze the uses of memory and registers in rtx X in INSN. */
730 sched_analyze_2 (deps, x, insn)
737 register enum rtx_code code;
738 register const char *fmt;
752 /* Ignore constants. Note that we must handle CONST_DOUBLE here
753 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
754 this does not mean that this insn is using cc0. */
759 /* User of CC0 depends on immediately preceding insn. */
760 set_sched_group_p (insn);
767 int regno = REGNO (x);
768 if (regno < FIRST_PSEUDO_REGISTER)
772 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
776 deps->reg_last[r].uses
777 = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
778 SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
780 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
781 add_dependence (insn, XEXP (u, 0), 0);
783 /* ??? This should never happen. */
784 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
785 add_dependence (insn, XEXP (u, 0), 0);
787 if (call_used_regs[r] || global_regs[r])
788 /* Function calls clobber all call_used regs. */
789 for (u = deps->last_function_call; u; u = XEXP (u, 1))
790 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
793 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
794 it does not reload. Ignore these as they have served their
796 else if (regno >= deps->max_reg)
798 if (GET_CODE (PATTERN (insn)) != USE
799 && GET_CODE (PATTERN (insn)) != CLOBBER)
804 deps->reg_last[regno].uses
805 = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
806 SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
808 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
809 add_dependence (insn, XEXP (u, 0), 0);
811 /* ??? This should never happen. */
812 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
813 add_dependence (insn, XEXP (u, 0), 0);
815 /* Pseudos that are REG_EQUIV to something may be replaced
816 by that during reloading. We need only add dependencies for
817 the address in the REG_EQUIV note. */
818 if (!reload_completed
819 && reg_known_equiv_p[regno]
820 && GET_CODE (reg_known_value[regno]) == MEM)
821 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
823 /* If the register does not already cross any calls, then add this
824 insn to the sched_before_next_call list so that it will still
825 not cross calls after scheduling. */
826 if (REG_N_CALLS_CROSSED (regno) == 0)
827 add_dependence (deps->sched_before_next_call, insn,
835 /* Reading memory. */
837 rtx pending, pending_mem;
839 pending = deps->pending_read_insns;
840 pending_mem = deps->pending_read_mems;
843 if (read_dependence (XEXP (pending_mem, 0), x))
844 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
846 pending = XEXP (pending, 1);
847 pending_mem = XEXP (pending_mem, 1);
850 pending = deps->pending_write_insns;
851 pending_mem = deps->pending_write_mems;
854 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
856 add_dependence (insn, XEXP (pending, 0), 0);
858 pending = XEXP (pending, 1);
859 pending_mem = XEXP (pending_mem, 1);
862 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
863 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
864 || deps_may_trap_p (x))
865 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
867 /* Always add these dependencies to pending_reads, since
868 this insn may be followed by a write. */
869 add_insn_mem_dependence (deps, &deps->pending_read_insns,
870 &deps->pending_read_mems, insn, x);
872 /* Take advantage of tail recursion here. */
873 sched_analyze_2 (deps, XEXP (x, 0), insn);
877 /* Force pending stores to memory in case a trap handler needs them. */
879 flush_pending_lists (deps, insn, 1);
884 case UNSPEC_VOLATILE:
888 /* Traditional and volatile asm instructions must be considered to use
889 and clobber all hard registers, all pseudo-registers and all of
890 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
892 Consider for instance a volatile asm that changes the fpu rounding
893 mode. An insn should not be moved across this even if it only uses
894 pseudo-regs because it might give an incorrectly rounded result. */
895 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
897 for (i = 0; i < deps->max_reg; i++)
899 struct deps_reg *reg_last = &deps->reg_last[i];
901 for (u = reg_last->uses; u; u = XEXP (u, 1))
902 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
903 for (u = reg_last->sets; u; u = XEXP (u, 1))
904 add_dependence (insn, XEXP (u, 0), 0);
905 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
906 add_dependence (insn, XEXP (u, 0), 0);
908 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
909 free_INSN_LIST_list (®_last->uses);
911 reg_pending_sets_all = 1;
913 flush_pending_lists (deps, insn, 0);
916 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
917 We can not just fall through here since then we would be confused
918 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
919 traditional asms unlike their normal usage. */
921 if (code == ASM_OPERANDS)
923 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
924 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
934 /* These both read and modify the result. We must handle them as writes
935 to get proper dependencies for following instructions. We must handle
936 them as reads to get proper dependencies from this to previous
937 instructions. Thus we need to pass them to both sched_analyze_1
938 and sched_analyze_2. We must call sched_analyze_2 first in order
939 to get the proper antecedent for the read. */
940 sched_analyze_2 (deps, XEXP (x, 0), insn);
941 sched_analyze_1 (deps, x, insn);
946 /* op0 = op0 + op1 */
947 sched_analyze_2 (deps, XEXP (x, 0), insn);
948 sched_analyze_2 (deps, XEXP (x, 1), insn);
949 sched_analyze_1 (deps, x, insn);
956 /* Other cases: walk the insn. */
957 fmt = GET_RTX_FORMAT (code);
958 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
961 sched_analyze_2 (deps, XEXP (x, i), insn);
962 else if (fmt[i] == 'E')
963 for (j = 0; j < XVECLEN (x, i); j++)
964 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
968 /* Analyze an INSN with pattern X to find all dependencies. */
971 sched_analyze_insn (deps, x, insn, loop_notes)
976 register RTX_CODE code = GET_CODE (x);
977 int schedule_barrier_found = 0;
981 if (code == COND_EXEC)
983 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
985 /* ??? Should be recording conditions so we reduce the number of
986 false dependancies. */
987 x = COND_EXEC_CODE (x);
990 if (code == SET || code == CLOBBER)
991 sched_analyze_1 (deps, x, insn);
992 else if (code == PARALLEL)
995 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
997 rtx sub = XVECEXP (x, 0, i);
998 code = GET_CODE (sub);
1000 if (code == COND_EXEC)
1002 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1003 sub = COND_EXEC_CODE (sub);
1004 code = GET_CODE (sub);
1006 if (code == SET || code == CLOBBER)
1007 sched_analyze_1 (deps, sub, insn);
1009 sched_analyze_2 (deps, sub, insn);
1013 sched_analyze_2 (deps, x, insn);
1015 /* Mark registers CLOBBERED or used by called function. */
1016 if (GET_CODE (insn) == CALL_INSN)
1017 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1019 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1020 sched_analyze_1 (deps, XEXP (link, 0), insn);
1022 sched_analyze_2 (deps, XEXP (link, 0), insn);
1025 if (GET_CODE (insn) == JUMP_INSN)
1028 next = next_nonnote_insn (insn);
1029 if (next && GET_CODE (next) == BARRIER)
1030 schedule_barrier_found = 1;
1033 rtx pending, pending_mem, u;
1035 INIT_REG_SET (&tmp);
1037 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1038 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
1040 struct deps_reg *reg_last = &deps->reg_last[i];
1041 for (u = reg_last->sets; u; u = XEXP (u, 1))
1042 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1043 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1044 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1047 CLEAR_REG_SET (&tmp);
1049 /* All memory writes and volatile reads must happen before the
1050 jump. Non-volatile reads must happen before the jump iff
1051 the result is needed by the above register used mask. */
1053 pending = deps->pending_write_insns;
1054 pending_mem = deps->pending_write_mems;
1057 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1058 pending = XEXP (pending, 1);
1059 pending_mem = XEXP (pending_mem, 1);
1062 pending = deps->pending_read_insns;
1063 pending_mem = deps->pending_read_mems;
1066 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1067 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1068 pending = XEXP (pending, 1);
1069 pending_mem = XEXP (pending_mem, 1);
1072 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1073 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1077 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1078 block, then we must be sure that no instructions are scheduled across it.
1079 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1080 become incorrect. */
1085 /* Update loop_notes with any notes from this insn. Also determine
1086 if any of the notes on the list correspond to instruction scheduling
1087 barriers (loop, eh & setjmp notes, but not range notes). */
1089 while (XEXP (link, 1))
1091 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1092 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1093 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1094 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
1095 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
1096 schedule_barrier_found = 1;
1098 link = XEXP (link, 1);
1100 XEXP (link, 1) = REG_NOTES (insn);
1101 REG_NOTES (insn) = loop_notes;
1104 /* If this instruction can throw an exception, then moving it changes
1105 where block boundaries fall. This is mighty confusing elsewhere.
1106 Therefore, prevent such an instruction from being moved. */
1107 if (flag_non_call_exceptions && can_throw_internal (insn))
1108 schedule_barrier_found = 1;
1110 /* Add dependencies if a scheduling barrier was found. */
1111 if (schedule_barrier_found)
1115 for (i = 0; i < deps->max_reg; i++)
1117 struct deps_reg *reg_last = &deps->reg_last[i];
1119 for (u = reg_last->uses; u; u = XEXP (u, 1))
1120 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1121 for (u = reg_last->sets; u; u = XEXP (u, 1))
1122 add_dependence (insn, XEXP (u, 0), 0);
1123 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1124 add_dependence (insn, XEXP (u, 0), 0);
1126 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1127 free_INSN_LIST_list (®_last->uses);
1129 flush_pending_lists (deps, insn, 0);
1131 reg_pending_sets_all = 1;
1134 /* Accumulate clobbers until the next set so that it will be output
1135 dependent on all of them. At the next set we can clear the clobber
1136 list, since subsequent sets will be output dependent on it. */
1137 if (reg_pending_sets_all)
1139 reg_pending_sets_all = 0;
1140 for (i = 0; i < deps->max_reg; i++)
1142 struct deps_reg *reg_last = &deps->reg_last[i];
1143 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1145 free_INSN_LIST_list (®_last->sets);
1146 free_INSN_LIST_list (®_last->clobbers);
1148 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1149 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1154 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1156 struct deps_reg *reg_last = &deps->reg_last[i];
1157 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1159 free_INSN_LIST_list (®_last->sets);
1160 free_INSN_LIST_list (®_last->clobbers);
1162 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1163 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1165 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1167 struct deps_reg *reg_last = &deps->reg_last[i];
1168 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1169 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1172 CLEAR_REG_SET (reg_pending_sets);
1173 CLEAR_REG_SET (reg_pending_clobbers);
1175 /* If a post-call group is still open, see if it should remain so.
1176 This insn must be a simple move of a hard reg to a pseudo or
1179 We must avoid moving these insns for correctness on
1180 SMALL_REGISTER_CLASS machines, and for special registers like
1181 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1182 hard regs for all targets. */
1184 if (deps->in_post_call_group_p)
1186 rtx tmp, set = single_set (insn);
1187 int src_regno, dest_regno;
1190 goto end_call_group;
1192 tmp = SET_DEST (set);
1193 if (GET_CODE (tmp) == SUBREG)
1194 tmp = SUBREG_REG (tmp);
1195 if (GET_CODE (tmp) == REG)
1196 dest_regno = REGNO (tmp);
1198 goto end_call_group;
1200 tmp = SET_SRC (set);
1201 if (GET_CODE (tmp) == SUBREG)
1202 tmp = SUBREG_REG (tmp);
1203 if (GET_CODE (tmp) == REG)
1204 src_regno = REGNO (tmp);
1206 goto end_call_group;
1208 if (src_regno < FIRST_PSEUDO_REGISTER
1209 || dest_regno < FIRST_PSEUDO_REGISTER)
1211 set_sched_group_p (insn);
1212 CANT_MOVE (insn) = 1;
1217 deps->in_post_call_group_p = 0;
1222 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1223 for every dependency. */
1226 sched_analyze (deps, head, tail)
1234 for (insn = head;; insn = NEXT_INSN (insn))
1236 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1238 /* Clear out the stale LOG_LINKS from flow. */
1239 free_INSN_LIST_list (&LOG_LINKS (insn));
1241 /* Clear out stale SCHED_GROUP_P. */
1242 SCHED_GROUP_P (insn) = 0;
1244 /* Make each JUMP_INSN a scheduling barrier for memory
1246 if (GET_CODE (insn) == JUMP_INSN)
1247 deps->last_pending_memory_flush
1248 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1249 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1252 else if (GET_CODE (insn) == CALL_INSN)
1257 /* Clear out stale SCHED_GROUP_P. */
1258 SCHED_GROUP_P (insn) = 0;
1260 CANT_MOVE (insn) = 1;
1262 /* Clear out the stale LOG_LINKS from flow. */
1263 free_INSN_LIST_list (&LOG_LINKS (insn));
1265 /* Any instruction using a hard register which may get clobbered
1266 by a call needs to be marked as dependent on this call.
1267 This prevents a use of a hard return reg from being moved
1268 past a void call (i.e. it does not explicitly set the hard
1271 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1272 all registers, not just hard registers, may be clobbered by this
1275 /* Insn, being a CALL_INSN, magically depends on
1276 `last_function_call' already. */
1278 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1279 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1281 for (i = 0; i < deps->max_reg; i++)
1283 struct deps_reg *reg_last = &deps->reg_last[i];
1285 for (u = reg_last->uses; u; u = XEXP (u, 1))
1286 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1287 for (u = reg_last->sets; u; u = XEXP (u, 1))
1288 add_dependence (insn, XEXP (u, 0), 0);
1289 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1290 add_dependence (insn, XEXP (u, 0), 0);
1292 free_INSN_LIST_list (®_last->uses);
1294 reg_pending_sets_all = 1;
1296 /* Add a pair of REG_SAVE_NOTEs which we will later
1297 convert back into a NOTE_INSN_SETJMP note. See
1298 reemit_notes for why we use a pair of NOTEs. */
1299 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1302 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1303 GEN_INT (NOTE_INSN_SETJMP),
1308 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1309 if (call_used_regs[i] || global_regs[i])
1311 for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
1312 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1313 for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
1314 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1316 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1320 /* For each insn which shouldn't cross a call, add a dependence
1321 between that insn and this call insn. */
1322 x = LOG_LINKS (deps->sched_before_next_call);
1325 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1328 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1330 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1333 /* In the absence of interprocedural alias analysis, we must flush
1334 all pending reads and writes, and start new dependencies starting
1335 from here. But only flush writes for constant calls (which may
1336 be passed a pointer to something we haven't written yet). */
1337 flush_pending_lists (deps, insn, CONST_CALL_P (insn));
1339 /* Depend this function call (actually, the user of this
1340 function call) on all hard register clobberage. */
1342 /* last_function_call is now a list of insns. */
1343 free_INSN_LIST_list (&deps->last_function_call);
1344 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1346 /* Before reload, begin a post-call group, so as to keep the
1347 lifetimes of hard registers correct. */
1348 if (! reload_completed)
1349 deps->in_post_call_group_p = 1;
1352 /* See comments on reemit_notes as to why we do this.
1353 ??? Actually, the reemit_notes just say what is done, not why. */
1355 else if (GET_CODE (insn) == NOTE
1356 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1357 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1359 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1361 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1362 GEN_INT (NOTE_LINE_NUMBER (insn)),
1365 else if (GET_CODE (insn) == NOTE
1366 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1367 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1368 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1369 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1370 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1371 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1375 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1376 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1377 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1379 rtx_region = GEN_INT (0);
1381 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1384 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1385 GEN_INT (NOTE_LINE_NUMBER (insn)),
1387 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1396 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1397 dependences from LOG_LINKS to build forward dependences in
1401 compute_forward_dependences (head, tail)
1406 enum reg_note dep_type;
1408 next_tail = NEXT_INSN (tail);
1409 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1411 if (! INSN_P (insn))
1414 insn = group_leader (insn);
1416 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1418 rtx x = group_leader (XEXP (link, 0));
1421 if (x != XEXP (link, 0))
1424 #ifdef ENABLE_CHECKING
1425 /* If add_dependence is working properly there should never
1426 be notes, deleted insns or duplicates in the backward
1427 links. Thus we need not check for them here.
1429 However, if we have enabled checking we might as well go
1430 ahead and verify that add_dependence worked properly. */
1431 if (GET_CODE (x) == NOTE
1432 || INSN_DELETED_P (x)
1433 || (forward_dependency_cache != NULL
1434 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1436 || (forward_dependency_cache == NULL
1437 && find_insn_list (insn, INSN_DEPEND (x))))
1439 if (forward_dependency_cache != NULL)
1440 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1444 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1446 dep_type = REG_NOTE_KIND (link);
1447 PUT_REG_NOTE_KIND (new_link, dep_type);
1449 INSN_DEPEND (x) = new_link;
1450 INSN_DEP_COUNT (insn) += 1;
1455 /* Initialize variables for region data dependence analysis.
1456 n_bbs is the number of region blocks. */
1462 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1464 deps->max_reg = max_reg;
1465 deps->reg_last = (struct deps_reg *)
1466 xcalloc (max_reg, sizeof (struct deps_reg));
1467 INIT_REG_SET (&deps->reg_last_in_use);
1469 deps->pending_read_insns = 0;
1470 deps->pending_read_mems = 0;
1471 deps->pending_write_insns = 0;
1472 deps->pending_write_mems = 0;
1473 deps->pending_lists_length = 0;
1474 deps->last_pending_memory_flush = 0;
1475 deps->last_function_call = 0;
1476 deps->in_post_call_group_p = 0;
1478 deps->sched_before_next_call
1479 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1480 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1481 LOG_LINKS (deps->sched_before_next_call) = 0;
1484 /* Free insn lists found in DEPS. */
1492 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1493 times. For a test case with 42000 regs and 8000 small basic blocks,
1494 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1495 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1497 struct deps_reg *reg_last = &deps->reg_last[i];
1498 free_INSN_LIST_list (®_last->uses);
1499 free_INSN_LIST_list (®_last->sets);
1500 free_INSN_LIST_list (®_last->clobbers);
1502 CLEAR_REG_SET (&deps->reg_last_in_use);
1504 free (deps->reg_last);
1505 deps->reg_last = NULL;
1508 /* If it is profitable to use them, initialize caches for tracking
1509 dependency informatino. LUID is the number of insns to be scheduled,
1510 it is used in the estimate of profitability. */
1513 init_dependency_caches (luid)
1516 /* ?!? We could save some memory by computing a per-region luid mapping
1517 which could reduce both the number of vectors in the cache and the size
1518 of each vector. Instead we just avoid the cache entirely unless the
1519 average number of instructions in a basic block is very high. See
1520 the comment before the declaration of true_dependency_cache for
1521 what we consider "very high". */
1522 if (luid / n_basic_blocks > 100 * 5)
1524 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1525 sbitmap_vector_zero (true_dependency_cache, luid);
1526 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1527 sbitmap_vector_zero (anti_dependency_cache, luid);
1528 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1529 sbitmap_vector_zero (output_dependency_cache, luid);
1530 #ifdef ENABLE_CHECKING
1531 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1532 sbitmap_vector_zero (forward_dependency_cache, luid);
1537 /* Free the caches allocated in init_dependency_caches. */
1540 free_dependency_caches ()
1542 if (true_dependency_cache)
1544 sbitmap_vector_free (true_dependency_cache);
1545 true_dependency_cache = NULL;
1546 sbitmap_vector_free (anti_dependency_cache);
1547 anti_dependency_cache = NULL;
1548 sbitmap_vector_free (output_dependency_cache);
1549 output_dependency_cache = NULL;
1550 #ifdef ENABLE_CHECKING
1551 sbitmap_vector_free (forward_dependency_cache);
1552 forward_dependency_cache = NULL;
1557 /* Initialize some global variables needed by the dependency analysis
1563 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1564 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1565 reg_pending_sets_all = 0;
1568 /* Free everything used by the dependency analysis code. */
1571 finish_deps_global ()
1573 FREE_REG_SET (reg_pending_sets);
1574 FREE_REG_SET (reg_pending_clobbers);