1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
44 extern char *reg_known_equiv_p;
45 extern rtx *reg_known_value;
47 static regset_head reg_pending_sets_head;
48 static regset_head reg_pending_clobbers_head;
49 static regset_head reg_pending_uses_head;
51 static regset reg_pending_sets;
52 static regset reg_pending_clobbers;
53 static regset reg_pending_uses;
54 static bool reg_pending_barrier;
56 /* To speed up the test for duplicate dependency links we keep a
57 record of dependencies created by add_dependence when the average
58 number of instructions in a basic block is very large.
60 Studies have shown that there is typically around 5 instructions between
61 branches for typical C code. So we can make a guess that the average
62 basic block is approximately 5 instructions long; we will choose 100X
63 the average size as a very large basic block.
65 Each insn has associated bitmaps for its dependencies. Each bitmap
66 has enough entries to represent a dependency on any other insn in
67 the insn chain. All bitmap for true dependencies cache is
68 allocated then the rest two ones are also allocated. */
69 static sbitmap *true_dependency_cache;
70 static sbitmap *anti_dependency_cache;
71 static sbitmap *output_dependency_cache;
73 /* To speed up checking consistency of formed forward insn
74 dependencies we use the following cache. Another possible solution
75 could be switching off checking duplication of insns in forward
77 #ifdef ENABLE_CHECKING
78 static sbitmap *forward_dependency_cache;
81 static int deps_may_trap_p PARAMS ((rtx));
82 static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
83 static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
84 static void remove_dependence PARAMS ((rtx, rtx));
85 static void set_sched_group_p PARAMS ((rtx));
87 static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
88 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
89 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
90 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
91 static rtx group_leader PARAMS ((rtx));
93 static rtx get_condition PARAMS ((rtx));
94 static int conditions_mutex_p PARAMS ((rtx, rtx));
96 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
102 rtx addr = XEXP (mem, 0);
105 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
106 && reg_known_value[REGNO (addr)])
107 addr = reg_known_value[REGNO (addr)];
108 return rtx_addr_can_trap_p (addr);
111 /* Return the INSN_LIST containing INSN in LIST, or NULL
112 if LIST does not contain INSN. */
115 find_insn_list (insn, list)
121 if (XEXP (list, 0) == insn)
123 list = XEXP (list, 1);
128 /* Find the condition under which INSN is executed. */
134 rtx pat = PATTERN (insn);
139 if (GET_CODE (pat) == COND_EXEC)
140 return COND_EXEC_TEST (pat);
141 if (GET_CODE (insn) != JUMP_INSN)
143 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
145 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
147 pat = SET_DEST (pat);
148 cond = XEXP (pat, 0);
149 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
150 && XEXP (cond, 2) == pc_rtx)
152 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
153 && XEXP (cond, 1) == pc_rtx)
154 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
155 XEXP (cond, 0), XEXP (cond, 1));
160 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
163 conditions_mutex_p (cond1, cond2)
166 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
167 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
168 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
169 && XEXP (cond1, 0) == XEXP (cond2, 0)
170 && XEXP (cond1, 1) == XEXP (cond2, 1))
175 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
176 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
177 of dependence that this link represents. */
180 add_dependence (insn, elem, dep_type)
183 enum reg_note dep_type;
189 /* Don't depend an insn on itself. */
193 /* We can get a dependency on deleted insns due to optimizations in
194 the register allocation and reloading or due to splitting. Any
195 such dependency is useless and can be ignored. */
196 if (GET_CODE (elem) == NOTE)
199 /* flow.c doesn't handle conditional lifetimes entirely correctly;
200 calls mess up the conditional lifetimes. */
201 /* ??? add_dependence is the wrong place to be eliding dependencies,
202 as that forgets that the condition expressions themselves may
204 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
206 cond1 = get_condition (insn);
207 cond2 = get_condition (elem);
209 && conditions_mutex_p (cond1, cond2)
210 /* Make sure first instruction doesn't affect condition of second
211 instruction if switched. */
212 && !modified_in_p (cond1, elem)
213 /* Make sure second instruction doesn't affect condition of first
214 instruction if switched. */
215 && !modified_in_p (cond2, insn))
219 /* If elem is part of a sequence that must be scheduled together, then
220 make the dependence point to the last insn of the sequence.
221 When HAVE_cc0, it is possible for NOTEs to exist between users and
222 setters of the condition codes, so we must skip past notes here.
223 Otherwise, NOTEs are impossible here. */
224 next = next_nonnote_insn (elem);
225 if (next && INSN_P (next) && SCHED_GROUP_P (next))
227 /* Notes will never intervene here though, so don't bother checking
230 /* We must reject CODE_LABELs, so that we don't get confused by one
231 that has LABEL_PRESERVE_P set, which is represented by the same
232 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
236 while ((nnext = next_nonnote_insn (next)) != NULL
238 && SCHED_GROUP_P (nnext))
241 /* Again, don't depend an insn on itself. */
245 /* Make the dependence to NEXT, the last insn of the group, instead
246 of the original ELEM. */
251 #ifdef INSN_SCHEDULING
252 /* ??? No good way to tell from here whether we're doing interblock
253 scheduling. Possibly add another callback. */
255 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
256 No need for interblock dependences with calls, since
257 calls are not moved between blocks. Note: the edge where
258 elem is a CALL is still required. */
259 if (GET_CODE (insn) == CALL_INSN
260 && (INSN_BB (elem) != INSN_BB (insn)))
264 /* If we already have a dependency for ELEM, then we do not need to
265 do anything. Avoiding the list walk below can cut compile times
266 dramatically for some code. */
267 if (true_dependency_cache != NULL)
269 enum reg_note present_dep_type = 0;
271 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
273 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
274 /* Do nothing (present_set_type is already 0). */
276 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
278 present_dep_type = REG_DEP_ANTI;
279 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
281 present_dep_type = REG_DEP_OUTPUT;
284 if (present_p && (int) dep_type >= (int) present_dep_type)
289 /* Check that we don't already have this dependence. */
291 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
292 if (XEXP (link, 0) == elem)
294 #ifdef INSN_SCHEDULING
295 /* Clear corresponding cache entry because type of the link
297 if (true_dependency_cache != NULL)
299 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
300 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
302 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
303 && output_dependency_cache)
304 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
311 /* If this is a more restrictive type of dependence than the existing
312 one, then change the existing dependence to this type. */
313 if ((int) dep_type < (int) REG_NOTE_KIND (link))
314 PUT_REG_NOTE_KIND (link, dep_type);
316 #ifdef INSN_SCHEDULING
317 /* If we are adding a dependency to INSN's LOG_LINKs, then
318 note that in the bitmap caches of dependency information. */
319 if (true_dependency_cache != NULL)
321 if ((int) REG_NOTE_KIND (link) == 0)
322 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
324 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
325 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
327 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
328 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
334 /* Might want to check one level of transitivity to save conses. */
336 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
337 LOG_LINKS (insn) = link;
339 /* Insn dependency, not data dependency. */
340 PUT_REG_NOTE_KIND (link, dep_type);
342 #ifdef INSN_SCHEDULING
343 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
344 in the bitmap caches of dependency information. */
345 if (true_dependency_cache != NULL)
347 if ((int) dep_type == 0)
348 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
349 else if (dep_type == REG_DEP_ANTI)
350 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
351 else if (dep_type == REG_DEP_OUTPUT)
352 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
357 /* A convenience wrapper to operate on an entire list. */
360 add_dependence_list (insn, list, dep_type)
362 enum reg_note dep_type;
364 for (; list; list = XEXP (list, 1))
365 add_dependence (insn, XEXP (list, 0), dep_type);
368 /* Similar, but free *LISTP at the same time. */
371 add_dependence_list_and_free (insn, listp, dep_type)
374 enum reg_note dep_type;
377 for (list = *listp, *listp = NULL; list ; list = next)
379 next = XEXP (list, 1);
380 add_dependence (insn, XEXP (list, 0), dep_type);
381 free_INSN_LIST_node (list);
385 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
386 of INSN. Abort if not found. */
389 remove_dependence (insn, elem)
393 rtx prev, link, next;
396 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
398 next = XEXP (link, 1);
399 if (XEXP (link, 0) == elem)
402 XEXP (prev, 1) = next;
404 LOG_LINKS (insn) = next;
406 #ifdef INSN_SCHEDULING
407 /* If we are removing a dependency from the LOG_LINKS list,
408 make sure to remove it from the cache too. */
409 if (true_dependency_cache != NULL)
411 if (REG_NOTE_KIND (link) == 0)
412 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
414 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
415 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
417 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
418 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
423 free_INSN_LIST_node (link);
436 /* Return an insn which represents a SCHED_GROUP, which is
437 the last insn in the group. */
448 insn = next_nonnote_insn (insn);
450 while (insn && INSN_P (insn) && SCHED_GROUP_P (insn));
455 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
456 goes along with that. */
459 set_sched_group_p (insn)
464 SCHED_GROUP_P (insn) = 1;
466 /* There may be a note before this insn now, but all notes will
467 be removed before we actually try to schedule the insns, so
468 it won't cause a problem later. We must avoid it here though. */
469 prev = prev_nonnote_insn (insn);
471 /* Make a copy of all dependencies on the immediately previous insn,
472 and add to this insn. This is so that all the dependencies will
473 apply to the group. Remove an explicit dependence on this insn
474 as SCHED_GROUP_P now represents it. */
476 if (find_insn_list (prev, LOG_LINKS (insn)))
477 remove_dependence (insn, prev);
479 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
480 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
483 /* Process an insn's memory dependencies. There are four kinds of
486 (0) read dependence: read follows read
487 (1) true dependence: read follows write
488 (2) anti dependence: write follows read
489 (3) output dependence: write follows write
491 We are careful to build only dependencies which actually exist, and
492 use transitivity to avoid building too many links. */
494 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
495 The MEM is a memory reference contained within INSN, which we are saving
496 so that we can do memory aliasing on it. */
499 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
501 rtx *insn_list, *mem_list, insn, mem;
505 link = alloc_INSN_LIST (insn, *insn_list);
508 if (current_sched_info->use_cselib)
510 mem = shallow_copy_rtx (mem);
511 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
513 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
516 deps->pending_lists_length++;
519 /* Make a dependency between every memory reference on the pending lists
520 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
521 dependencies for a read operation, similarly with FOR_WRITE. */
524 flush_pending_lists (deps, insn, for_read, for_write)
527 int for_read, for_write;
531 add_dependence_list_and_free (insn, &deps->pending_read_insns,
533 free_EXPR_LIST_list (&deps->pending_read_mems);
536 add_dependence_list_and_free (insn, &deps->pending_write_insns,
537 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
538 free_EXPR_LIST_list (&deps->pending_write_mems);
539 deps->pending_lists_length = 0;
541 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
542 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
543 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
544 deps->pending_flush_length = 1;
547 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
548 rtx, X, creating all dependencies generated by the write to the
549 destination of X, and reads of everything mentioned. */
552 sched_analyze_1 (deps, x, insn)
558 rtx dest = XEXP (x, 0);
559 enum rtx_code code = GET_CODE (x);
564 if (GET_CODE (dest) == PARALLEL)
568 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
569 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
570 sched_analyze_1 (deps,
571 gen_rtx_CLOBBER (VOIDmode,
572 XEXP (XVECEXP (dest, 0, i), 0)),
575 if (GET_CODE (x) == SET)
576 sched_analyze_2 (deps, SET_SRC (x), insn);
580 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
581 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
583 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
585 /* The second and third arguments are values read by this insn. */
586 sched_analyze_2 (deps, XEXP (dest, 1), insn);
587 sched_analyze_2 (deps, XEXP (dest, 2), insn);
589 dest = XEXP (dest, 0);
592 if (GET_CODE (dest) == REG)
594 regno = REGNO (dest);
596 /* A hard reg in a wide mode may really be multiple registers.
597 If so, mark all of them just like the first. */
598 if (regno < FIRST_PSEUDO_REGISTER)
600 int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
604 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
609 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
612 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
613 it does not reload. Ignore these as they have served their
615 else if (regno >= deps->max_reg)
617 if (GET_CODE (PATTERN (insn)) != USE
618 && GET_CODE (PATTERN (insn)) != CLOBBER)
624 SET_REGNO_REG_SET (reg_pending_sets, regno);
626 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
628 /* Pseudos that are REG_EQUIV to something may be replaced
629 by that during reloading. We need only add dependencies for
630 the address in the REG_EQUIV note. */
631 if (!reload_completed
632 && reg_known_equiv_p[regno]
633 && GET_CODE (reg_known_value[regno]) == MEM)
634 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
636 /* Don't let it cross a call after scheduling if it doesn't
637 already cross one. */
638 if (REG_N_CALLS_CROSSED (regno) == 0)
639 add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
642 else if (GET_CODE (dest) == MEM)
644 /* Writing memory. */
647 if (current_sched_info->use_cselib)
649 t = shallow_copy_rtx (dest);
650 cselib_lookup (XEXP (t, 0), Pmode, 1);
651 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
654 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
656 /* Flush all pending reads and writes to prevent the pending lists
657 from getting any larger. Insn scheduling runs too slowly when
658 these lists get long. When compiling GCC with itself,
659 this flush occurs 8 times for sparc, and 10 times for m88k using
660 the default value of 32. */
661 flush_pending_lists (deps, insn, false, true);
665 rtx pending, pending_mem;
667 pending = deps->pending_read_insns;
668 pending_mem = deps->pending_read_mems;
671 if (anti_dependence (XEXP (pending_mem, 0), t))
672 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
674 pending = XEXP (pending, 1);
675 pending_mem = XEXP (pending_mem, 1);
678 pending = deps->pending_write_insns;
679 pending_mem = deps->pending_write_mems;
682 if (output_dependence (XEXP (pending_mem, 0), t))
683 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
685 pending = XEXP (pending, 1);
686 pending_mem = XEXP (pending_mem, 1);
689 add_dependence_list (insn, deps->last_pending_memory_flush,
692 add_insn_mem_dependence (deps, &deps->pending_write_insns,
693 &deps->pending_write_mems, insn, dest);
695 sched_analyze_2 (deps, XEXP (dest, 0), insn);
699 if (GET_CODE (x) == SET)
700 sched_analyze_2 (deps, SET_SRC (x), insn);
703 /* Analyze the uses of memory and registers in rtx X in INSN. */
706 sched_analyze_2 (deps, x, insn)
729 /* Ignore constants. Note that we must handle CONST_DOUBLE here
730 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
731 this does not mean that this insn is using cc0. */
736 /* User of CC0 depends on immediately preceding insn. */
737 set_sched_group_p (insn);
743 int regno = REGNO (x);
744 if (regno < FIRST_PSEUDO_REGISTER)
746 int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
748 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
750 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
751 it does not reload. Ignore these as they have served their
753 else if (regno >= deps->max_reg)
755 if (GET_CODE (PATTERN (insn)) != USE
756 && GET_CODE (PATTERN (insn)) != CLOBBER)
761 SET_REGNO_REG_SET (reg_pending_uses, regno);
763 /* Pseudos that are REG_EQUIV to something may be replaced
764 by that during reloading. We need only add dependencies for
765 the address in the REG_EQUIV note. */
766 if (!reload_completed
767 && reg_known_equiv_p[regno]
768 && GET_CODE (reg_known_value[regno]) == MEM)
769 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
771 /* If the register does not already cross any calls, then add this
772 insn to the sched_before_next_call list so that it will still
773 not cross calls after scheduling. */
774 if (REG_N_CALLS_CROSSED (regno) == 0)
775 deps->sched_before_next_call
776 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
783 /* Reading memory. */
785 rtx pending, pending_mem;
788 if (current_sched_info->use_cselib)
790 t = shallow_copy_rtx (t);
791 cselib_lookup (XEXP (t, 0), Pmode, 1);
792 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
794 pending = deps->pending_read_insns;
795 pending_mem = deps->pending_read_mems;
798 if (read_dependence (XEXP (pending_mem, 0), t))
799 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
801 pending = XEXP (pending, 1);
802 pending_mem = XEXP (pending_mem, 1);
805 pending = deps->pending_write_insns;
806 pending_mem = deps->pending_write_mems;
809 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
811 add_dependence (insn, XEXP (pending, 0), 0);
813 pending = XEXP (pending, 1);
814 pending_mem = XEXP (pending_mem, 1);
817 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
818 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
819 || deps_may_trap_p (x))
820 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
822 /* Always add these dependencies to pending_reads, since
823 this insn may be followed by a write. */
824 add_insn_mem_dependence (deps, &deps->pending_read_insns,
825 &deps->pending_read_mems, insn, x);
827 /* Take advantage of tail recursion here. */
828 sched_analyze_2 (deps, XEXP (x, 0), insn);
832 /* Force pending stores to memory in case a trap handler needs them. */
834 flush_pending_lists (deps, insn, true, false);
839 case UNSPEC_VOLATILE:
841 /* Traditional and volatile asm instructions must be considered to use
842 and clobber all hard registers, all pseudo-registers and all of
843 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
845 Consider for instance a volatile asm that changes the fpu rounding
846 mode. An insn should not be moved across this even if it only uses
847 pseudo-regs because it might give an incorrectly rounded result. */
848 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
849 reg_pending_barrier = true;
851 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
852 We can not just fall through here since then we would be confused
853 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
854 traditional asms unlike their normal usage. */
856 if (code == ASM_OPERANDS)
858 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
859 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
869 /* These both read and modify the result. We must handle them as writes
870 to get proper dependencies for following instructions. We must handle
871 them as reads to get proper dependencies from this to previous
872 instructions. Thus we need to pass them to both sched_analyze_1
873 and sched_analyze_2. We must call sched_analyze_2 first in order
874 to get the proper antecedent for the read. */
875 sched_analyze_2 (deps, XEXP (x, 0), insn);
876 sched_analyze_1 (deps, x, insn);
881 /* op0 = op0 + op1 */
882 sched_analyze_2 (deps, XEXP (x, 0), insn);
883 sched_analyze_2 (deps, XEXP (x, 1), insn);
884 sched_analyze_1 (deps, x, insn);
891 /* Other cases: walk the insn. */
892 fmt = GET_RTX_FORMAT (code);
893 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
896 sched_analyze_2 (deps, XEXP (x, i), insn);
897 else if (fmt[i] == 'E')
898 for (j = 0; j < XVECLEN (x, i); j++)
899 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
903 /* Analyze an INSN with pattern X to find all dependencies. */
906 sched_analyze_insn (deps, x, insn, loop_notes)
911 RTX_CODE code = GET_CODE (x);
915 if (code == COND_EXEC)
917 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
919 /* ??? Should be recording conditions so we reduce the number of
920 false dependencies. */
921 x = COND_EXEC_CODE (x);
924 if (code == SET || code == CLOBBER)
926 sched_analyze_1 (deps, x, insn);
928 /* Bare clobber insns are used for letting life analysis, reg-stack
929 and others know that a value is dead. Depend on the last call
930 instruction so that reg-stack won't get confused. */
932 add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
934 else if (code == PARALLEL)
937 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
939 rtx sub = XVECEXP (x, 0, i);
940 code = GET_CODE (sub);
942 if (code == COND_EXEC)
944 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
945 sub = COND_EXEC_CODE (sub);
946 code = GET_CODE (sub);
948 if (code == SET || code == CLOBBER)
949 sched_analyze_1 (deps, sub, insn);
951 sched_analyze_2 (deps, sub, insn);
955 sched_analyze_2 (deps, x, insn);
957 /* Mark registers CLOBBERED or used by called function. */
958 if (GET_CODE (insn) == CALL_INSN)
960 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
962 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
963 sched_analyze_1 (deps, XEXP (link, 0), insn);
965 sched_analyze_2 (deps, XEXP (link, 0), insn);
967 if (find_reg_note (insn, REG_SETJMP, NULL))
968 reg_pending_barrier = true;
971 if (GET_CODE (insn) == JUMP_INSN)
974 next = next_nonnote_insn (insn);
975 if (next && GET_CODE (next) == BARRIER)
976 reg_pending_barrier = true;
979 rtx pending, pending_mem;
983 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
984 IOR_REG_SET (reg_pending_uses, &tmp);
985 CLEAR_REG_SET (&tmp);
987 /* All memory writes and volatile reads must happen before the
988 jump. Non-volatile reads must happen before the jump iff
989 the result is needed by the above register used mask. */
991 pending = deps->pending_write_insns;
992 pending_mem = deps->pending_write_mems;
995 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
996 pending = XEXP (pending, 1);
997 pending_mem = XEXP (pending_mem, 1);
1000 pending = deps->pending_read_insns;
1001 pending_mem = deps->pending_read_mems;
1004 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1005 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1006 pending = XEXP (pending, 1);
1007 pending_mem = XEXP (pending_mem, 1);
1010 add_dependence_list (insn, deps->last_pending_memory_flush,
1015 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1016 block, then we must be sure that no instructions are scheduled across it.
1017 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1018 become incorrect. */
1023 /* Update loop_notes with any notes from this insn. Also determine
1024 if any of the notes on the list correspond to instruction scheduling
1025 barriers (loop, eh & setjmp notes, but not range notes). */
1027 while (XEXP (link, 1))
1029 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1030 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1031 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1032 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
1033 reg_pending_barrier = true;
1035 link = XEXP (link, 1);
1037 XEXP (link, 1) = REG_NOTES (insn);
1038 REG_NOTES (insn) = loop_notes;
1041 /* If this instruction can throw an exception, then moving it changes
1042 where block boundaries fall. This is mighty confusing elsewhere.
1043 Therefore, prevent such an instruction from being moved. */
1044 if (can_throw_internal (insn))
1045 reg_pending_barrier = true;
1047 /* Add dependencies if a scheduling barrier was found. */
1048 if (reg_pending_barrier)
1050 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1052 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1054 struct deps_reg *reg_last = &deps->reg_last[i];
1055 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1056 add_dependence_list (insn, reg_last->sets, 0);
1057 add_dependence_list (insn, reg_last->clobbers, 0);
1062 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1064 struct deps_reg *reg_last = &deps->reg_last[i];
1065 add_dependence_list_and_free (insn, ®_last->uses,
1067 add_dependence_list_and_free (insn, ®_last->sets, 0);
1068 add_dependence_list_and_free (insn, ®_last->clobbers, 0);
1069 reg_last->uses_length = 0;
1070 reg_last->clobbers_length = 0;
1074 for (i = 0; i < deps->max_reg; i++)
1076 struct deps_reg *reg_last = &deps->reg_last[i];
1077 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1078 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1081 flush_pending_lists (deps, insn, true, true);
1082 reg_pending_barrier = false;
1086 /* If the current insn is conditional, we can't free any
1088 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1090 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1092 struct deps_reg *reg_last = &deps->reg_last[i];
1093 add_dependence_list (insn, reg_last->sets, 0);
1094 add_dependence_list (insn, reg_last->clobbers, 0);
1095 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1096 reg_last->uses_length++;
1098 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1100 struct deps_reg *reg_last = &deps->reg_last[i];
1101 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1102 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1103 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1104 reg_last->clobbers_length++;
1106 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1108 struct deps_reg *reg_last = &deps->reg_last[i];
1109 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1110 add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
1111 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1112 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1117 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1119 struct deps_reg *reg_last = &deps->reg_last[i];
1120 add_dependence_list (insn, reg_last->sets, 0);
1121 add_dependence_list (insn, reg_last->clobbers, 0);
1122 reg_last->uses_length++;
1123 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1125 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1127 struct deps_reg *reg_last = &deps->reg_last[i];
1128 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1129 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1131 add_dependence_list_and_free (insn, ®_last->sets,
1133 add_dependence_list_and_free (insn, ®_last->uses,
1135 add_dependence_list_and_free (insn, ®_last->clobbers,
1137 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1138 reg_last->clobbers_length = 0;
1139 reg_last->uses_length = 0;
1143 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1144 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1146 reg_last->clobbers_length++;
1147 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1149 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1151 struct deps_reg *reg_last = &deps->reg_last[i];
1152 add_dependence_list_and_free (insn, ®_last->sets,
1154 add_dependence_list_and_free (insn, ®_last->clobbers,
1156 add_dependence_list_and_free (insn, ®_last->uses,
1158 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1159 reg_last->uses_length = 0;
1160 reg_last->clobbers_length = 0;
1164 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1165 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1166 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1168 CLEAR_REG_SET (reg_pending_uses);
1169 CLEAR_REG_SET (reg_pending_clobbers);
1170 CLEAR_REG_SET (reg_pending_sets);
1172 /* If we are currently in a libcall scheduling group, then mark the
1173 current insn as being in a scheduling group and that it can not
1174 be moved into a different basic block. */
1176 if (deps->libcall_block_tail_insn)
1178 set_sched_group_p (insn);
1179 CANT_MOVE (insn) = 1;
1182 /* If a post-call group is still open, see if it should remain so.
1183 This insn must be a simple move of a hard reg to a pseudo or
1186 We must avoid moving these insns for correctness on
1187 SMALL_REGISTER_CLASS machines, and for special registers like
1188 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1189 hard regs for all targets. */
1191 if (deps->in_post_call_group_p)
1193 rtx tmp, set = single_set (insn);
1194 int src_regno, dest_regno;
1197 goto end_call_group;
1199 tmp = SET_DEST (set);
1200 if (GET_CODE (tmp) == SUBREG)
1201 tmp = SUBREG_REG (tmp);
1202 if (GET_CODE (tmp) == REG)
1203 dest_regno = REGNO (tmp);
1205 goto end_call_group;
1207 tmp = SET_SRC (set);
1208 if (GET_CODE (tmp) == SUBREG)
1209 tmp = SUBREG_REG (tmp);
1210 if (GET_CODE (tmp) == REG)
1211 src_regno = REGNO (tmp);
1213 goto end_call_group;
1215 if (src_regno < FIRST_PSEUDO_REGISTER
1216 || dest_regno < FIRST_PSEUDO_REGISTER)
1218 set_sched_group_p (insn);
1219 CANT_MOVE (insn) = 1;
1224 deps->in_post_call_group_p = false;
1229 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1230 for every dependency. */
1233 sched_analyze (deps, head, tail)
1240 if (current_sched_info->use_cselib)
1243 for (insn = head;; insn = NEXT_INSN (insn))
1245 rtx link, end_seq, r0, set;
1247 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1249 /* Clear out the stale LOG_LINKS from flow. */
1250 free_INSN_LIST_list (&LOG_LINKS (insn));
1252 /* Make each JUMP_INSN a scheduling barrier for memory
1254 if (GET_CODE (insn) == JUMP_INSN)
1256 /* Keep the list a reasonable size. */
1257 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1258 flush_pending_lists (deps, insn, true, true);
1260 deps->last_pending_memory_flush
1261 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1263 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1266 else if (GET_CODE (insn) == CALL_INSN)
1270 CANT_MOVE (insn) = 1;
1272 /* Clear out the stale LOG_LINKS from flow. */
1273 free_INSN_LIST_list (&LOG_LINKS (insn));
1275 if (find_reg_note (insn, REG_SETJMP, NULL))
1277 /* This is setjmp. Assume that all registers, not just
1278 hard registers, may be clobbered by this call. */
1279 reg_pending_barrier = true;
1283 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1284 /* A call may read and modify global register variables. */
1287 SET_REGNO_REG_SET (reg_pending_sets, i);
1288 SET_REGNO_REG_SET (reg_pending_uses, i);
1290 /* Other call-clobbered hard regs may be clobbered.
1291 Since we only have a choice between 'might be clobbered'
1292 and 'definitely not clobbered', we must include all
1293 partly call-clobbered registers here. */
1294 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1295 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1296 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1297 /* We don't know what set of fixed registers might be used
1298 by the function, but it is certain that the stack pointer
1299 is among them, but be conservative. */
1300 else if (fixed_regs[i])
1301 SET_REGNO_REG_SET (reg_pending_uses, i);
1302 /* The frame pointer is normally not used by the function
1303 itself, but by the debugger. */
1304 /* ??? MIPS o32 is an exception. It uses the frame pointer
1305 in the macro expansion of jal but does not represent this
1306 fact in the call_insn rtl. */
1307 else if (i == FRAME_POINTER_REGNUM
1308 || (i == HARD_FRAME_POINTER_REGNUM
1309 && (! reload_completed || frame_pointer_needed)))
1310 SET_REGNO_REG_SET (reg_pending_uses, i);
1313 /* For each insn which shouldn't cross a call, add a dependence
1314 between that insn and this call insn. */
1315 add_dependence_list_and_free (insn, &deps->sched_before_next_call,
1318 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1321 /* In the absence of interprocedural alias analysis, we must flush
1322 all pending reads and writes, and start new dependencies starting
1323 from here. But only flush writes for constant calls (which may
1324 be passed a pointer to something we haven't written yet). */
1325 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1327 /* Remember the last function call for limiting lifetimes. */
1328 free_INSN_LIST_list (&deps->last_function_call);
1329 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1331 /* Before reload, begin a post-call group, so as to keep the
1332 lifetimes of hard registers correct. */
1333 if (! reload_completed)
1334 deps->in_post_call_group_p = true;
1337 /* See comments on reemit_notes as to why we do this.
1338 ??? Actually, the reemit_notes just say what is done, not why. */
1340 if (GET_CODE (insn) == NOTE
1341 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1342 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1343 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1344 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1348 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1349 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1350 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1352 rtx_region = GEN_INT (0);
1354 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1357 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1358 GEN_INT (NOTE_LINE_NUMBER (insn)),
1360 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1363 if (current_sched_info->use_cselib)
1364 cselib_process_insn (insn);
1366 /* Now that we have completed handling INSN, check and see if it is
1367 a CLOBBER beginning a libcall block. If it is, record the
1368 end of the libcall sequence.
1370 We want to schedule libcall blocks as a unit before reload. While
1371 this restricts scheduling, it preserves the meaning of a libcall
1374 As a side effect, we may get better code due to decreased register
1375 pressure as well as less chance of a foreign insn appearing in
1377 if (!reload_completed
1378 /* Note we may have nested libcall sequences. We only care about
1379 the outermost libcall sequence. */
1380 && deps->libcall_block_tail_insn == 0
1381 /* The sequence must start with a clobber of a register. */
1382 && GET_CODE (insn) == INSN
1383 && GET_CODE (PATTERN (insn)) == CLOBBER
1384 && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
1385 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
1386 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1387 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1388 && (end_seq = XEXP (link, 0)) != 0
1389 /* The insn referenced by the REG_LIBCALL note must be a
1390 simple nop copy with the same destination as the register
1391 mentioned in the clobber. */
1392 && (set = single_set (end_seq)) != 0
1393 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1394 /* And finally the insn referenced by the REG_LIBCALL must
1395 also contain a REG_EQUAL note and a REG_RETVAL note. */
1396 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1397 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1398 deps->libcall_block_tail_insn = XEXP (link, 0);
1400 /* If we have reached the end of a libcall block, then close the
1402 if (deps->libcall_block_tail_insn == insn)
1403 deps->libcall_block_tail_insn = 0;
1407 if (current_sched_info->use_cselib)
1415 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1416 dependences from LOG_LINKS to build forward dependences in
1420 compute_forward_dependences (head, tail)
1425 enum reg_note dep_type;
1427 next_tail = NEXT_INSN (tail);
1428 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1430 if (! INSN_P (insn))
1433 insn = group_leader (insn);
1435 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1437 rtx x = group_leader (XEXP (link, 0));
1440 if (x != XEXP (link, 0))
1443 #ifdef ENABLE_CHECKING
1444 /* If add_dependence is working properly there should never
1445 be notes, deleted insns or duplicates in the backward
1446 links. Thus we need not check for them here.
1448 However, if we have enabled checking we might as well go
1449 ahead and verify that add_dependence worked properly. */
1450 if (GET_CODE (x) == NOTE
1451 || INSN_DELETED_P (x)
1452 || (forward_dependency_cache != NULL
1453 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1455 || (forward_dependency_cache == NULL
1456 && find_insn_list (insn, INSN_DEPEND (x))))
1458 if (forward_dependency_cache != NULL)
1459 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1463 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1465 dep_type = REG_NOTE_KIND (link);
1466 PUT_REG_NOTE_KIND (new_link, dep_type);
1468 INSN_DEPEND (x) = new_link;
1469 INSN_DEP_COUNT (insn) += 1;
1474 /* Initialize variables for region data dependence analysis.
1475 n_bbs is the number of region blocks. */
1481 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1483 deps->max_reg = max_reg;
1484 deps->reg_last = (struct deps_reg *)
1485 xcalloc (max_reg, sizeof (struct deps_reg));
1486 INIT_REG_SET (&deps->reg_last_in_use);
1488 deps->pending_read_insns = 0;
1489 deps->pending_read_mems = 0;
1490 deps->pending_write_insns = 0;
1491 deps->pending_write_mems = 0;
1492 deps->pending_lists_length = 0;
1493 deps->pending_flush_length = 0;
1494 deps->last_pending_memory_flush = 0;
1495 deps->last_function_call = 0;
1496 deps->sched_before_next_call = 0;
1497 deps->in_post_call_group_p = false;
1498 deps->libcall_block_tail_insn = 0;
1501 /* Free insn lists found in DEPS. */
1509 free_INSN_LIST_list (&deps->pending_read_insns);
1510 free_EXPR_LIST_list (&deps->pending_read_mems);
1511 free_INSN_LIST_list (&deps->pending_write_insns);
1512 free_EXPR_LIST_list (&deps->pending_write_mems);
1513 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1515 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1516 times. For a test case with 42000 regs and 8000 small basic blocks,
1517 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1518 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1520 struct deps_reg *reg_last = &deps->reg_last[i];
1522 free_INSN_LIST_list (®_last->uses);
1524 free_INSN_LIST_list (®_last->sets);
1525 if (reg_last->clobbers)
1526 free_INSN_LIST_list (®_last->clobbers);
1528 CLEAR_REG_SET (&deps->reg_last_in_use);
1530 free (deps->reg_last);
1533 /* If it is profitable to use them, initialize caches for tracking
1534 dependency informatino. LUID is the number of insns to be scheduled,
1535 it is used in the estimate of profitability. */
1538 init_dependency_caches (luid)
1541 /* ?!? We could save some memory by computing a per-region luid mapping
1542 which could reduce both the number of vectors in the cache and the size
1543 of each vector. Instead we just avoid the cache entirely unless the
1544 average number of instructions in a basic block is very high. See
1545 the comment before the declaration of true_dependency_cache for
1546 what we consider "very high". */
1547 if (luid / n_basic_blocks > 100 * 5)
1549 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1550 sbitmap_vector_zero (true_dependency_cache, luid);
1551 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1552 sbitmap_vector_zero (anti_dependency_cache, luid);
1553 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1554 sbitmap_vector_zero (output_dependency_cache, luid);
1555 #ifdef ENABLE_CHECKING
1556 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1557 sbitmap_vector_zero (forward_dependency_cache, luid);
1562 /* Free the caches allocated in init_dependency_caches. */
1565 free_dependency_caches ()
1567 if (true_dependency_cache)
1569 sbitmap_vector_free (true_dependency_cache);
1570 true_dependency_cache = NULL;
1571 sbitmap_vector_free (anti_dependency_cache);
1572 anti_dependency_cache = NULL;
1573 sbitmap_vector_free (output_dependency_cache);
1574 output_dependency_cache = NULL;
1575 #ifdef ENABLE_CHECKING
1576 sbitmap_vector_free (forward_dependency_cache);
1577 forward_dependency_cache = NULL;
1582 /* Initialize some global variables needed by the dependency analysis
1588 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1589 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1590 reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
1591 reg_pending_barrier = false;
1594 /* Free everything used by the dependency analysis code. */
1597 finish_deps_global ()
1599 FREE_REG_SET (reg_pending_sets);
1600 FREE_REG_SET (reg_pending_clobbers);
1601 FREE_REG_SET (reg_pending_uses);