1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free the
22 Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
44 extern char *reg_known_equiv_p;
45 extern rtx *reg_known_value;
47 static regset_head reg_pending_sets_head;
48 static regset_head reg_pending_clobbers_head;
50 static regset reg_pending_sets;
51 static regset reg_pending_clobbers;
52 static int reg_pending_sets_all;
54 /* To speed up the test for duplicate dependency links we keep a
55 record of dependencies created by add_dependence when the average
56 number of instructions in a basic block is very large.
58 Studies have shown that there is typically around 5 instructions between
59 branches for typical C code. So we can make a guess that the average
60 basic block is approximately 5 instructions long; we will choose 100X
61 the average size as a very large basic block.
63 Each insn has associated bitmaps for its dependencies. Each bitmap
64 has enough entries to represent a dependency on any other insn in
65 the insn chain. All bitmap for true dependencies cache is
66 allocated then the rest two ones are also allocated. */
67 static sbitmap *true_dependency_cache;
68 static sbitmap *anti_dependency_cache;
69 static sbitmap *output_dependency_cache;
71 /* To speed up checking consistency of formed forward insn
72 dependencies we use the following cache. Another possible solution
73 could be switching off checking duplication of insns in forward
75 #ifdef ENABLE_CHECKING
76 static sbitmap *forward_dependency_cache;
79 static int deps_may_trap_p PARAMS ((rtx));
80 static void remove_dependence PARAMS ((rtx, rtx));
81 static void set_sched_group_p PARAMS ((rtx));
83 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
84 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
85 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
86 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
87 static rtx group_leader PARAMS ((rtx));
89 static rtx get_condition PARAMS ((rtx));
90 static int conditions_mutex_p PARAMS ((rtx, rtx));
92 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
98 rtx addr = XEXP (mem, 0);
101 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
102 && reg_known_value[REGNO (addr)])
103 addr = reg_known_value[REGNO (addr)];
104 return rtx_addr_can_trap_p (addr);
107 /* Return the INSN_LIST containing INSN in LIST, or NULL
108 if LIST does not contain INSN. */
111 find_insn_list (insn, list)
117 if (XEXP (list, 0) == insn)
119 list = XEXP (list, 1);
124 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
128 find_insn_mem_list (insn, x, list, list1)
134 if (XEXP (list, 0) == insn
135 && XEXP (list1, 0) == x)
137 list = XEXP (list, 1);
138 list1 = XEXP (list1, 1);
143 /* Find the condition under which INSN is executed. */
149 rtx pat = PATTERN (insn);
154 if (GET_CODE (pat) == COND_EXEC)
155 return COND_EXEC_TEST (pat);
156 if (GET_CODE (insn) != JUMP_INSN)
158 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
160 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
162 pat = SET_DEST (pat);
163 cond = XEXP (pat, 0);
164 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
165 && XEXP (cond, 2) == pc_rtx)
167 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
168 && XEXP (cond, 1) == pc_rtx)
169 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
170 XEXP (cond, 0), XEXP (cond, 1));
175 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
178 conditions_mutex_p (cond1, cond2)
181 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
182 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
183 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
184 && XEXP (cond1, 0) == XEXP (cond2, 0)
185 && XEXP (cond1, 1) == XEXP (cond2, 1))
190 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
191 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
192 of dependence that this link represents. */
195 add_dependence (insn, elem, dep_type)
198 enum reg_note dep_type;
204 /* Don't depend an insn on itself. */
208 /* We can get a dependency on deleted insns due to optimizations in
209 the register allocation and reloading or due to splitting. Any
210 such dependency is useless and can be ignored. */
211 if (GET_CODE (elem) == NOTE)
214 /* flow.c doesn't handle conditional lifetimes entirely correctly;
215 calls mess up the conditional lifetimes. */
216 /* ??? add_dependence is the wrong place to be eliding dependencies,
217 as that forgets that the condition expressions themselves may
219 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
221 cond1 = get_condition (insn);
222 cond2 = get_condition (elem);
224 && conditions_mutex_p (cond1, cond2)
225 && !modified_in_p (cond1, elem))
229 /* If elem is part of a sequence that must be scheduled together, then
230 make the dependence point to the last insn of the sequence.
231 When HAVE_cc0, it is possible for NOTEs to exist between users and
232 setters of the condition codes, so we must skip past notes here.
233 Otherwise, NOTEs are impossible here. */
234 next = next_nonnote_insn (elem);
235 if (next && SCHED_GROUP_P (next)
236 && GET_CODE (next) != CODE_LABEL)
238 /* Notes will never intervene here though, so don't bother checking
241 /* We must reject CODE_LABELs, so that we don't get confused by one
242 that has LABEL_PRESERVE_P set, which is represented by the same
243 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
247 while ((nnext = next_nonnote_insn (next)) != NULL
248 && SCHED_GROUP_P (nnext)
249 && GET_CODE (nnext) != CODE_LABEL)
252 /* Again, don't depend an insn on itself. */
256 /* Make the dependence to NEXT, the last insn of the group, instead
257 of the original ELEM. */
262 #ifdef INSN_SCHEDULING
263 /* ??? No good way to tell from here whether we're doing interblock
264 scheduling. Possibly add another callback. */
266 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
267 No need for interblock dependences with calls, since
268 calls are not moved between blocks. Note: the edge where
269 elem is a CALL is still required. */
270 if (GET_CODE (insn) == CALL_INSN
271 && (INSN_BB (elem) != INSN_BB (insn)))
275 /* If we already have a dependency for ELEM, then we do not need to
276 do anything. Avoiding the list walk below can cut compile times
277 dramatically for some code. */
278 if (true_dependency_cache != NULL)
280 enum reg_note present_dep_type = 0;
282 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
284 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
285 /* Do nothing (present_set_type is already 0). */
287 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
289 present_dep_type = REG_DEP_ANTI;
290 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
292 present_dep_type = REG_DEP_OUTPUT;
295 if (present_p && (int) dep_type >= (int) present_dep_type)
300 /* Check that we don't already have this dependence. */
302 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
303 if (XEXP (link, 0) == elem)
305 #ifdef INSN_SCHEDULING
306 /* Clear corresponding cache entry because type of the link
308 if (true_dependency_cache != NULL)
310 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
311 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
313 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
314 && output_dependency_cache)
315 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
322 /* If this is a more restrictive type of dependence than the existing
323 one, then change the existing dependence to this type. */
324 if ((int) dep_type < (int) REG_NOTE_KIND (link))
325 PUT_REG_NOTE_KIND (link, dep_type);
327 #ifdef INSN_SCHEDULING
328 /* If we are adding a dependency to INSN's LOG_LINKs, then
329 note that in the bitmap caches of dependency information. */
330 if (true_dependency_cache != NULL)
332 if ((int)REG_NOTE_KIND (link) == 0)
333 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
335 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
336 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
338 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
339 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
345 /* Might want to check one level of transitivity to save conses. */
347 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
348 LOG_LINKS (insn) = link;
350 /* Insn dependency, not data dependency. */
351 PUT_REG_NOTE_KIND (link, dep_type);
353 #ifdef INSN_SCHEDULING
354 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
355 in the bitmap caches of dependency information. */
356 if (true_dependency_cache != NULL)
358 if ((int)dep_type == 0)
359 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
360 else if (dep_type == REG_DEP_ANTI)
361 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
362 else if (dep_type == REG_DEP_OUTPUT)
363 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
368 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
369 of INSN. Abort if not found. */
372 remove_dependence (insn, elem)
376 rtx prev, link, next;
379 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
381 next = XEXP (link, 1);
382 if (XEXP (link, 0) == elem)
385 XEXP (prev, 1) = next;
387 LOG_LINKS (insn) = next;
389 #ifdef INSN_SCHEDULING
390 /* If we are removing a dependency from the LOG_LINKS list,
391 make sure to remove it from the cache too. */
392 if (true_dependency_cache != NULL)
394 if (REG_NOTE_KIND (link) == 0)
395 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
397 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
398 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
400 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
401 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
406 free_INSN_LIST_node (link);
419 /* Return an insn which represents a SCHED_GROUP, which is
420 the last insn in the group. */
431 insn = next_nonnote_insn (insn);
433 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
438 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
439 goes along with that. */
442 set_sched_group_p (insn)
447 SCHED_GROUP_P (insn) = 1;
449 /* There may be a note before this insn now, but all notes will
450 be removed before we actually try to schedule the insns, so
451 it won't cause a problem later. We must avoid it here though. */
452 prev = prev_nonnote_insn (insn);
454 /* Make a copy of all dependencies on the immediately previous insn,
455 and add to this insn. This is so that all the dependencies will
456 apply to the group. Remove an explicit dependence on this insn
457 as SCHED_GROUP_P now represents it. */
459 if (find_insn_list (prev, LOG_LINKS (insn)))
460 remove_dependence (insn, prev);
462 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
463 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
466 /* Process an insn's memory dependencies. There are four kinds of
469 (0) read dependence: read follows read
470 (1) true dependence: read follows write
471 (2) anti dependence: write follows read
472 (3) output dependence: write follows write
474 We are careful to build only dependencies which actually exist, and
475 use transitivity to avoid building too many links. */
477 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
478 The MEM is a memory reference contained within INSN, which we are saving
479 so that we can do memory aliasing on it. */
482 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
484 rtx *insn_list, *mem_list, insn, mem;
488 link = alloc_INSN_LIST (insn, *insn_list);
491 if (current_sched_info->use_cselib)
493 mem = shallow_copy_rtx (mem);
494 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
496 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
499 deps->pending_lists_length++;
502 /* Make a dependency between every memory reference on the pending lists
503 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
507 flush_pending_lists (deps, insn, only_write)
515 while (deps->pending_read_insns && ! only_write)
517 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
520 link = deps->pending_read_insns;
521 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
522 free_INSN_LIST_node (link);
524 link = deps->pending_read_mems;
525 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
526 free_EXPR_LIST_node (link);
528 while (deps->pending_write_insns)
530 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
533 link = deps->pending_write_insns;
534 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
535 free_INSN_LIST_node (link);
537 link = deps->pending_write_mems;
538 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
539 free_EXPR_LIST_node (link);
541 deps->pending_lists_length = 0;
543 /* last_pending_memory_flush is now a list of insns. */
544 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
545 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
547 free_INSN_LIST_list (&deps->last_pending_memory_flush);
548 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
549 deps->pending_flush_length = 1;
552 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
553 rtx, X, creating all dependencies generated by the write to the
554 destination of X, and reads of everything mentioned. */
557 sched_analyze_1 (deps, x, insn)
563 register rtx dest = XEXP (x, 0);
564 enum rtx_code code = GET_CODE (x);
569 if (GET_CODE (dest) == PARALLEL)
573 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
574 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
575 sched_analyze_1 (deps,
576 gen_rtx_CLOBBER (VOIDmode,
577 XEXP (XVECEXP (dest, 0, i), 0)),
580 if (GET_CODE (x) == SET)
581 sched_analyze_2 (deps, SET_SRC (x), insn);
585 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
586 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
588 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
590 /* The second and third arguments are values read by this insn. */
591 sched_analyze_2 (deps, XEXP (dest, 1), insn);
592 sched_analyze_2 (deps, XEXP (dest, 2), insn);
594 dest = XEXP (dest, 0);
597 if (GET_CODE (dest) == REG)
601 regno = REGNO (dest);
603 /* A hard reg in a wide mode may really be multiple registers.
604 If so, mark all of them just like the first. */
605 if (regno < FIRST_PSEUDO_REGISTER)
607 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
613 for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
614 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
616 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
617 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
619 /* Clobbers need not be ordered with respect to one
620 another, but sets must be ordered with respect to a
624 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
625 free_INSN_LIST_list (&deps->reg_last[r].uses);
626 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
627 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
628 SET_REGNO_REG_SET (reg_pending_sets, r);
631 SET_REGNO_REG_SET (reg_pending_clobbers, r);
633 /* Function calls clobber all call_used regs. */
636 && TEST_HARD_REG_BIT (regs_invalidated_by_call, r)))
637 for (u = deps->last_function_call; u; u = XEXP (u, 1))
638 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
641 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
642 it does not reload. Ignore these as they have served their
644 else if (regno >= deps->max_reg)
646 if (GET_CODE (PATTERN (insn)) != USE
647 && GET_CODE (PATTERN (insn)) != CLOBBER)
654 for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
655 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
657 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
658 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
662 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
663 free_INSN_LIST_list (&deps->reg_last[regno].uses);
664 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
665 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
666 SET_REGNO_REG_SET (reg_pending_sets, regno);
669 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
671 /* Pseudos that are REG_EQUIV to something may be replaced
672 by that during reloading. We need only add dependencies for
673 the address in the REG_EQUIV note. */
674 if (!reload_completed
675 && reg_known_equiv_p[regno]
676 && GET_CODE (reg_known_value[regno]) == MEM)
677 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
679 /* Don't let it cross a call after scheduling if it doesn't
680 already cross one. */
682 if (REG_N_CALLS_CROSSED (regno) == 0)
683 for (u = deps->last_function_call; u; u = XEXP (u, 1))
684 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
687 else if (GET_CODE (dest) == MEM)
689 /* Writing memory. */
692 if (current_sched_info->use_cselib)
694 t = shallow_copy_rtx (dest);
695 cselib_lookup (XEXP (t, 0), Pmode, 1);
696 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
699 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
701 /* Flush all pending reads and writes to prevent the pending lists
702 from getting any larger. Insn scheduling runs too slowly when
703 these lists get long. When compiling GCC with itself,
704 this flush occurs 8 times for sparc, and 10 times for m88k using
705 the default value of 32. */
706 flush_pending_lists (deps, insn, 0);
711 rtx pending, pending_mem;
713 pending = deps->pending_read_insns;
714 pending_mem = deps->pending_read_mems;
717 if (anti_dependence (XEXP (pending_mem, 0), t))
718 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
720 pending = XEXP (pending, 1);
721 pending_mem = XEXP (pending_mem, 1);
724 pending = deps->pending_write_insns;
725 pending_mem = deps->pending_write_mems;
728 if (output_dependence (XEXP (pending_mem, 0), t))
729 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
731 pending = XEXP (pending, 1);
732 pending_mem = XEXP (pending_mem, 1);
735 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
736 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
738 add_insn_mem_dependence (deps, &deps->pending_write_insns,
739 &deps->pending_write_mems, insn, dest);
741 sched_analyze_2 (deps, XEXP (dest, 0), insn);
745 if (GET_CODE (x) == SET)
746 sched_analyze_2 (deps, SET_SRC (x), insn);
749 /* Analyze the uses of memory and registers in rtx X in INSN. */
752 sched_analyze_2 (deps, x, insn)
759 register enum rtx_code code;
760 register const char *fmt;
774 /* Ignore constants. Note that we must handle CONST_DOUBLE here
775 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
776 this does not mean that this insn is using cc0. */
781 /* User of CC0 depends on immediately preceding insn. */
782 set_sched_group_p (insn);
789 int regno = REGNO (x);
790 if (regno < FIRST_PSEUDO_REGISTER)
794 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
798 deps->reg_last[r].uses
799 = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
800 SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
802 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
803 add_dependence (insn, XEXP (u, 0), 0);
805 /* ??? This should never happen. */
806 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
807 add_dependence (insn, XEXP (u, 0), 0);
809 if (call_used_regs[r] || global_regs[r])
810 /* Function calls clobber all call_used regs. */
811 for (u = deps->last_function_call; u; u = XEXP (u, 1))
812 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
815 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
816 it does not reload. Ignore these as they have served their
818 else if (regno >= deps->max_reg)
820 if (GET_CODE (PATTERN (insn)) != USE
821 && GET_CODE (PATTERN (insn)) != CLOBBER)
826 deps->reg_last[regno].uses
827 = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
828 SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
830 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
831 add_dependence (insn, XEXP (u, 0), 0);
833 /* ??? This should never happen. */
834 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
835 add_dependence (insn, XEXP (u, 0), 0);
837 /* Pseudos that are REG_EQUIV to something may be replaced
838 by that during reloading. We need only add dependencies for
839 the address in the REG_EQUIV note. */
840 if (!reload_completed
841 && reg_known_equiv_p[regno]
842 && GET_CODE (reg_known_value[regno]) == MEM)
843 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
845 /* If the register does not already cross any calls, then add this
846 insn to the sched_before_next_call list so that it will still
847 not cross calls after scheduling. */
848 if (REG_N_CALLS_CROSSED (regno) == 0)
849 add_dependence (deps->sched_before_next_call, insn,
857 /* Reading memory. */
859 rtx pending, pending_mem;
862 if (current_sched_info->use_cselib)
864 t = shallow_copy_rtx (t);
865 cselib_lookup (XEXP (t, 0), Pmode, 1);
866 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
868 pending = deps->pending_read_insns;
869 pending_mem = deps->pending_read_mems;
872 if (read_dependence (XEXP (pending_mem, 0), t))
873 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
875 pending = XEXP (pending, 1);
876 pending_mem = XEXP (pending_mem, 1);
879 pending = deps->pending_write_insns;
880 pending_mem = deps->pending_write_mems;
883 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
885 add_dependence (insn, XEXP (pending, 0), 0);
887 pending = XEXP (pending, 1);
888 pending_mem = XEXP (pending_mem, 1);
891 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
892 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
893 || deps_may_trap_p (x))
894 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
896 /* Always add these dependencies to pending_reads, since
897 this insn may be followed by a write. */
898 add_insn_mem_dependence (deps, &deps->pending_read_insns,
899 &deps->pending_read_mems, insn, x);
901 /* Take advantage of tail recursion here. */
902 sched_analyze_2 (deps, XEXP (x, 0), insn);
906 /* Force pending stores to memory in case a trap handler needs them. */
908 flush_pending_lists (deps, insn, 1);
913 case UNSPEC_VOLATILE:
917 /* Traditional and volatile asm instructions must be considered to use
918 and clobber all hard registers, all pseudo-registers and all of
919 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
921 Consider for instance a volatile asm that changes the fpu rounding
922 mode. An insn should not be moved across this even if it only uses
923 pseudo-regs because it might give an incorrectly rounded result. */
924 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
926 for (i = 0; i < deps->max_reg; i++)
928 struct deps_reg *reg_last = &deps->reg_last[i];
930 for (u = reg_last->uses; u; u = XEXP (u, 1))
931 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
932 for (u = reg_last->sets; u; u = XEXP (u, 1))
933 add_dependence (insn, XEXP (u, 0), 0);
934 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
935 add_dependence (insn, XEXP (u, 0), 0);
937 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
938 free_INSN_LIST_list (®_last->uses);
940 reg_pending_sets_all = 1;
942 flush_pending_lists (deps, insn, 0);
945 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
946 We can not just fall through here since then we would be confused
947 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
948 traditional asms unlike their normal usage. */
950 if (code == ASM_OPERANDS)
952 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
953 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
963 /* These both read and modify the result. We must handle them as writes
964 to get proper dependencies for following instructions. We must handle
965 them as reads to get proper dependencies from this to previous
966 instructions. Thus we need to pass them to both sched_analyze_1
967 and sched_analyze_2. We must call sched_analyze_2 first in order
968 to get the proper antecedent for the read. */
969 sched_analyze_2 (deps, XEXP (x, 0), insn);
970 sched_analyze_1 (deps, x, insn);
975 /* op0 = op0 + op1 */
976 sched_analyze_2 (deps, XEXP (x, 0), insn);
977 sched_analyze_2 (deps, XEXP (x, 1), insn);
978 sched_analyze_1 (deps, x, insn);
985 /* Other cases: walk the insn. */
986 fmt = GET_RTX_FORMAT (code);
987 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
990 sched_analyze_2 (deps, XEXP (x, i), insn);
991 else if (fmt[i] == 'E')
992 for (j = 0; j < XVECLEN (x, i); j++)
993 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
997 /* Analyze an INSN with pattern X to find all dependencies. */
1000 sched_analyze_insn (deps, x, insn, loop_notes)
1005 register RTX_CODE code = GET_CODE (x);
1006 int schedule_barrier_found = 0;
1010 if (code == COND_EXEC)
1012 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
1014 /* ??? Should be recording conditions so we reduce the number of
1015 false dependancies. */
1016 x = COND_EXEC_CODE (x);
1017 code = GET_CODE (x);
1019 if (code == SET || code == CLOBBER)
1020 sched_analyze_1 (deps, x, insn);
1021 else if (code == PARALLEL)
1024 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1026 rtx sub = XVECEXP (x, 0, i);
1027 code = GET_CODE (sub);
1029 if (code == COND_EXEC)
1031 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1032 sub = COND_EXEC_CODE (sub);
1033 code = GET_CODE (sub);
1035 if (code == SET || code == CLOBBER)
1036 sched_analyze_1 (deps, sub, insn);
1038 sched_analyze_2 (deps, sub, insn);
1042 sched_analyze_2 (deps, x, insn);
1044 /* Mark registers CLOBBERED or used by called function. */
1045 if (GET_CODE (insn) == CALL_INSN)
1047 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1049 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1050 sched_analyze_1 (deps, XEXP (link, 0), insn);
1052 sched_analyze_2 (deps, XEXP (link, 0), insn);
1054 if (find_reg_note (insn, REG_SETJMP, NULL))
1055 schedule_barrier_found = 1;
1058 if (GET_CODE (insn) == JUMP_INSN)
1061 next = next_nonnote_insn (insn);
1062 if (next && GET_CODE (next) == BARRIER)
1063 schedule_barrier_found = 1;
1066 rtx pending, pending_mem, u;
1068 INIT_REG_SET (&tmp);
1070 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1071 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
1073 struct deps_reg *reg_last = &deps->reg_last[i];
1074 for (u = reg_last->sets; u; u = XEXP (u, 1))
1075 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1076 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1077 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1080 CLEAR_REG_SET (&tmp);
1082 /* All memory writes and volatile reads must happen before the
1083 jump. Non-volatile reads must happen before the jump iff
1084 the result is needed by the above register used mask. */
1086 pending = deps->pending_write_insns;
1087 pending_mem = deps->pending_write_mems;
1090 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1091 pending = XEXP (pending, 1);
1092 pending_mem = XEXP (pending_mem, 1);
1095 pending = deps->pending_read_insns;
1096 pending_mem = deps->pending_read_mems;
1099 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1100 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1101 pending = XEXP (pending, 1);
1102 pending_mem = XEXP (pending_mem, 1);
1105 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1106 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1110 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1111 block, then we must be sure that no instructions are scheduled across it.
1112 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1113 become incorrect. */
1118 /* Update loop_notes with any notes from this insn. Also determine
1119 if any of the notes on the list correspond to instruction scheduling
1120 barriers (loop, eh & setjmp notes, but not range notes). */
1122 while (XEXP (link, 1))
1124 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1125 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1126 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1127 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
1128 schedule_barrier_found = 1;
1130 link = XEXP (link, 1);
1132 XEXP (link, 1) = REG_NOTES (insn);
1133 REG_NOTES (insn) = loop_notes;
1136 /* If this instruction can throw an exception, then moving it changes
1137 where block boundaries fall. This is mighty confusing elsewhere.
1138 Therefore, prevent such an instruction from being moved. */
1139 if (flag_non_call_exceptions && can_throw_internal (insn))
1140 schedule_barrier_found = 1;
1142 /* Add dependencies if a scheduling barrier was found. */
1143 if (schedule_barrier_found)
1147 for (i = 0; i < deps->max_reg; i++)
1149 struct deps_reg *reg_last = &deps->reg_last[i];
1151 for (u = reg_last->uses; u; u = XEXP (u, 1))
1152 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1153 for (u = reg_last->sets; u; u = XEXP (u, 1))
1154 add_dependence (insn, XEXP (u, 0), 0);
1155 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1156 add_dependence (insn, XEXP (u, 0), 0);
1158 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1159 free_INSN_LIST_list (®_last->uses);
1161 flush_pending_lists (deps, insn, 0);
1163 reg_pending_sets_all = 1;
1166 /* Accumulate clobbers until the next set so that it will be output
1167 dependent on all of them. At the next set we can clear the clobber
1168 list, since subsequent sets will be output dependent on it. */
1169 if (reg_pending_sets_all)
1171 reg_pending_sets_all = 0;
1172 for (i = 0; i < deps->max_reg; i++)
1174 struct deps_reg *reg_last = &deps->reg_last[i];
1175 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1177 free_INSN_LIST_list (®_last->sets);
1178 free_INSN_LIST_list (®_last->clobbers);
1180 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1181 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1186 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1188 struct deps_reg *reg_last = &deps->reg_last[i];
1189 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1191 free_INSN_LIST_list (®_last->sets);
1192 free_INSN_LIST_list (®_last->clobbers);
1194 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1195 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1197 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1199 struct deps_reg *reg_last = &deps->reg_last[i];
1200 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1201 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1204 CLEAR_REG_SET (reg_pending_sets);
1205 CLEAR_REG_SET (reg_pending_clobbers);
1207 /* If a post-call group is still open, see if it should remain so.
1208 This insn must be a simple move of a hard reg to a pseudo or
1211 We must avoid moving these insns for correctness on
1212 SMALL_REGISTER_CLASS machines, and for special registers like
1213 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1214 hard regs for all targets. */
1216 if (deps->in_post_call_group_p)
1218 rtx tmp, set = single_set (insn);
1219 int src_regno, dest_regno;
1222 goto end_call_group;
1224 tmp = SET_DEST (set);
1225 if (GET_CODE (tmp) == SUBREG)
1226 tmp = SUBREG_REG (tmp);
1227 if (GET_CODE (tmp) == REG)
1228 dest_regno = REGNO (tmp);
1230 goto end_call_group;
1232 tmp = SET_SRC (set);
1233 if (GET_CODE (tmp) == SUBREG)
1234 tmp = SUBREG_REG (tmp);
1235 if (GET_CODE (tmp) == REG)
1236 src_regno = REGNO (tmp);
1238 goto end_call_group;
1240 if (src_regno < FIRST_PSEUDO_REGISTER
1241 || dest_regno < FIRST_PSEUDO_REGISTER)
1243 set_sched_group_p (insn);
1244 CANT_MOVE (insn) = 1;
1249 deps->in_post_call_group_p = 0;
1254 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1255 for every dependency. */
1258 sched_analyze (deps, head, tail)
1266 if (current_sched_info->use_cselib)
1269 for (insn = head;; insn = NEXT_INSN (insn))
1271 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1273 /* Clear out the stale LOG_LINKS from flow. */
1274 free_INSN_LIST_list (&LOG_LINKS (insn));
1276 /* Clear out stale SCHED_GROUP_P. */
1277 SCHED_GROUP_P (insn) = 0;
1279 /* Make each JUMP_INSN a scheduling barrier for memory
1281 if (GET_CODE (insn) == JUMP_INSN)
1283 /* Keep the list a reasonable size. */
1284 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1285 flush_pending_lists (deps, insn, 0);
1287 deps->last_pending_memory_flush
1288 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1290 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1293 else if (GET_CODE (insn) == CALL_INSN)
1298 /* Clear out stale SCHED_GROUP_P. */
1299 SCHED_GROUP_P (insn) = 0;
1301 CANT_MOVE (insn) = 1;
1303 /* Clear out the stale LOG_LINKS from flow. */
1304 free_INSN_LIST_list (&LOG_LINKS (insn));
1306 /* Any instruction using a hard register which may get clobbered
1307 by a call needs to be marked as dependent on this call.
1308 This prevents a use of a hard return reg from being moved
1309 past a void call (i.e. it does not explicitly set the hard
1312 /* If this call has REG_SETJMP, then assume that
1313 all registers, not just hard registers, may be clobbered by this
1316 /* Insn, being a CALL_INSN, magically depends on
1317 `last_function_call' already. */
1319 if (find_reg_note (insn, REG_SETJMP, NULL))
1321 for (i = 0; i < deps->max_reg; i++)
1323 struct deps_reg *reg_last = &deps->reg_last[i];
1325 for (u = reg_last->uses; u; u = XEXP (u, 1))
1326 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1327 for (u = reg_last->sets; u; u = XEXP (u, 1))
1328 add_dependence (insn, XEXP (u, 0), 0);
1329 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1330 add_dependence (insn, XEXP (u, 0), 0);
1332 free_INSN_LIST_list (®_last->uses);
1334 reg_pending_sets_all = 1;
1338 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1339 if (call_used_regs[i] || global_regs[i])
1341 for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
1342 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1343 for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
1344 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1346 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1350 /* For each insn which shouldn't cross a call, add a dependence
1351 between that insn and this call insn. */
1352 x = LOG_LINKS (deps->sched_before_next_call);
1355 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1358 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1360 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1363 /* In the absence of interprocedural alias analysis, we must flush
1364 all pending reads and writes, and start new dependencies starting
1365 from here. But only flush writes for constant calls (which may
1366 be passed a pointer to something we haven't written yet). */
1367 flush_pending_lists (deps, insn, CONST_OR_PURE_CALL_P (insn));
1369 /* Depend this function call (actually, the user of this
1370 function call) on all hard register clobberage. */
1372 /* last_function_call is now a list of insns. */
1373 free_INSN_LIST_list (&deps->last_function_call);
1374 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1376 /* Before reload, begin a post-call group, so as to keep the
1377 lifetimes of hard registers correct. */
1378 if (! reload_completed)
1379 deps->in_post_call_group_p = 1;
1382 /* See comments on reemit_notes as to why we do this.
1383 ??? Actually, the reemit_notes just say what is done, not why. */
1385 else if (GET_CODE (insn) == NOTE
1386 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1387 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1389 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1391 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1392 GEN_INT (NOTE_LINE_NUMBER (insn)),
1395 else if (GET_CODE (insn) == NOTE
1396 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1397 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1398 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1399 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1403 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1404 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1405 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1407 rtx_region = GEN_INT (0);
1409 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1412 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1413 GEN_INT (NOTE_LINE_NUMBER (insn)),
1415 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1418 if (current_sched_info->use_cselib)
1419 cselib_process_insn (insn);
1422 if (current_sched_info->use_cselib)
1430 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1431 dependences from LOG_LINKS to build forward dependences in
1435 compute_forward_dependences (head, tail)
1440 enum reg_note dep_type;
1442 next_tail = NEXT_INSN (tail);
1443 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1445 if (! INSN_P (insn))
1448 insn = group_leader (insn);
1450 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1452 rtx x = group_leader (XEXP (link, 0));
1455 if (x != XEXP (link, 0))
1458 #ifdef ENABLE_CHECKING
1459 /* If add_dependence is working properly there should never
1460 be notes, deleted insns or duplicates in the backward
1461 links. Thus we need not check for them here.
1463 However, if we have enabled checking we might as well go
1464 ahead and verify that add_dependence worked properly. */
1465 if (GET_CODE (x) == NOTE
1466 || INSN_DELETED_P (x)
1467 || (forward_dependency_cache != NULL
1468 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1470 || (forward_dependency_cache == NULL
1471 && find_insn_list (insn, INSN_DEPEND (x))))
1473 if (forward_dependency_cache != NULL)
1474 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1478 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1480 dep_type = REG_NOTE_KIND (link);
1481 PUT_REG_NOTE_KIND (new_link, dep_type);
1483 INSN_DEPEND (x) = new_link;
1484 INSN_DEP_COUNT (insn) += 1;
1489 /* Initialize variables for region data dependence analysis.
1490 n_bbs is the number of region blocks. */
1496 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1498 deps->max_reg = max_reg;
1499 deps->reg_last = (struct deps_reg *)
1500 xcalloc (max_reg, sizeof (struct deps_reg));
1501 INIT_REG_SET (&deps->reg_last_in_use);
1503 deps->pending_read_insns = 0;
1504 deps->pending_read_mems = 0;
1505 deps->pending_write_insns = 0;
1506 deps->pending_write_mems = 0;
1507 deps->pending_lists_length = 0;
1508 deps->pending_flush_length = 0;
1509 deps->last_pending_memory_flush = 0;
1510 deps->last_function_call = 0;
1511 deps->in_post_call_group_p = 0;
1513 deps->sched_before_next_call
1514 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1515 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1516 LOG_LINKS (deps->sched_before_next_call) = 0;
1519 /* Free insn lists found in DEPS. */
1527 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1528 times. For a test case with 42000 regs and 8000 small basic blocks,
1529 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1530 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1532 struct deps_reg *reg_last = &deps->reg_last[i];
1533 free_INSN_LIST_list (®_last->uses);
1534 free_INSN_LIST_list (®_last->sets);
1535 free_INSN_LIST_list (®_last->clobbers);
1537 CLEAR_REG_SET (&deps->reg_last_in_use);
1539 free (deps->reg_last);
1540 deps->reg_last = NULL;
1543 /* If it is profitable to use them, initialize caches for tracking
1544 dependency informatino. LUID is the number of insns to be scheduled,
1545 it is used in the estimate of profitability. */
1548 init_dependency_caches (luid)
1551 /* ?!? We could save some memory by computing a per-region luid mapping
1552 which could reduce both the number of vectors in the cache and the size
1553 of each vector. Instead we just avoid the cache entirely unless the
1554 average number of instructions in a basic block is very high. See
1555 the comment before the declaration of true_dependency_cache for
1556 what we consider "very high". */
1557 if (luid / n_basic_blocks > 100 * 5)
1559 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1560 sbitmap_vector_zero (true_dependency_cache, luid);
1561 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1562 sbitmap_vector_zero (anti_dependency_cache, luid);
1563 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1564 sbitmap_vector_zero (output_dependency_cache, luid);
1565 #ifdef ENABLE_CHECKING
1566 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1567 sbitmap_vector_zero (forward_dependency_cache, luid);
1572 /* Free the caches allocated in init_dependency_caches. */
1575 free_dependency_caches ()
1577 if (true_dependency_cache)
1579 sbitmap_vector_free (true_dependency_cache);
1580 true_dependency_cache = NULL;
1581 sbitmap_vector_free (anti_dependency_cache);
1582 anti_dependency_cache = NULL;
1583 sbitmap_vector_free (output_dependency_cache);
1584 output_dependency_cache = NULL;
1585 #ifdef ENABLE_CHECKING
1586 sbitmap_vector_free (forward_dependency_cache);
1587 forward_dependency_cache = NULL;
1592 /* Initialize some global variables needed by the dependency analysis
1598 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1599 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1600 reg_pending_sets_all = 0;
1603 /* Free everything used by the dependency analysis code. */
1606 finish_deps_global ()
1608 FREE_REG_SET (reg_pending_sets);
1609 FREE_REG_SET (reg_pending_clobbers);