1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
28 #include "diagnostic-core.h"
32 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
41 #include "cfglayout.h"
43 #include "sched-int.h"
48 #ifdef INSN_SCHEDULING
50 /* The number of insns to be scheduled in total. */
51 static int rgn_n_insns;
53 /* The number of insns scheduled so far. */
54 static int sched_rgn_n_insns;
56 /* Set of blocks, that already have their dependencies calculated. */
57 static bitmap_head dont_calc_deps;
59 /* Last basic block in current ebb. */
60 static basic_block last_bb;
62 /* Implementations of the sched_info functions for region scheduling. */
63 static void init_ready_list (void);
64 static void begin_schedule_ready (rtx, rtx);
65 static int schedule_more_p (void);
66 static const char *ebb_print_insn (const_rtx, int);
67 static int rank (rtx, rtx);
68 static int ebb_contributes_to_priority (rtx, rtx);
69 static basic_block earliest_block_with_similiar_load (basic_block, rtx);
70 static void add_deps_for_risky_insns (rtx, rtx);
71 static basic_block schedule_ebb (rtx, rtx);
72 static void debug_ebb_dependencies (rtx, rtx);
74 static void ebb_add_remove_insn (rtx, int);
75 static void ebb_add_block (basic_block, basic_block);
76 static basic_block advance_target_bb (basic_block, rtx);
77 static void ebb_fix_recovery_cfg (int, int, int);
79 /* Return nonzero if there are more insns that should be scheduled. */
82 schedule_more_p (void)
84 return sched_rgn_n_insns < rgn_n_insns;
87 /* Print dependency information about ebb between HEAD and TAIL. */
89 debug_ebb_dependencies (rtx head, rtx tail)
92 ";; --------------- forward dependences: ------------ \n");
94 fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
95 BLOCK_NUM (head), BLOCK_NUM (tail));
97 debug_dependencies (head, tail);
100 /* Add all insns that are initially ready to the ready list READY. Called
101 once before scheduling a set of insns. */
104 init_ready_list (void)
107 rtx prev_head = current_sched_info->prev_head;
108 rtx next_tail = current_sched_info->next_tail;
111 sched_rgn_n_insns = 0;
113 /* Print debugging information. */
114 if (sched_verbose >= 5)
115 debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
117 /* Initialize ready list with all 'ready' insns in target block.
118 Count number of insns in the target block being scheduled. */
119 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
125 gcc_assert (n == rgn_n_insns);
128 /* INSN is being scheduled after LAST. Update counters. */
130 begin_schedule_ready (rtx insn, rtx last)
134 if (BLOCK_FOR_INSN (insn) == last_bb
135 /* INSN is a jump in the last block, ... */
136 && control_flow_insn_p (insn)
137 /* that is going to be moved over some instructions. */
138 && last != PREV_INSN (insn))
143 /* An obscure special case, where we do have partially dead
144 instruction scheduled after last control flow instruction.
145 In this case we can create new basic block. It is
146 always exactly one basic block last in the sequence. */
148 e = find_fallthru_edge (last_bb->succs);
150 #ifdef ENABLE_CHECKING
151 gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
153 gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
154 && !IS_SPECULATION_CHECK_P (insn)
155 && BB_HEAD (last_bb) != insn
156 && BB_END (last_bb) == insn);
161 x = NEXT_INSN (insn);
163 gcc_assert (NOTE_P (x) || LABEL_P (x));
165 gcc_assert (BARRIER_P (x));
172 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
175 /* Create an empty unreachable block after the INSN. */
176 bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);
178 /* split_edge () creates BB before E->DEST. Keep in mind, that
179 this operation extends scheduling region till the end of BB.
180 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
181 of the scheduling region. */
182 current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
183 gcc_assert (current_sched_info->next_tail);
185 /* Append new basic block to the end of the ebb. */
186 sched_init_only_bb (bb, last_bb);
187 gcc_assert (last_bb == bb);
191 /* Return a string that contains the insn uid and optionally anything else
192 necessary to identify this insn in an output. It's valid to use a
193 static buffer for this. The ALIGNED parameter should cause the string
194 to be formatted so that multiple output lines will line up nicely. */
197 ebb_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
201 /* '+' before insn means it is a new cycle start. */
202 if (GET_MODE (insn) == TImode)
203 sprintf (tmp, "+ %4d", INSN_UID (insn));
205 sprintf (tmp, " %4d", INSN_UID (insn));
210 /* Compare priority of two insns. Return a positive number if the second
211 insn is to be preferred for scheduling, and a negative one if the first
212 is to be preferred. Zero if they are equally good. */
215 rank (rtx insn1, rtx insn2)
217 basic_block bb1 = BLOCK_FOR_INSN (insn1);
218 basic_block bb2 = BLOCK_FOR_INSN (insn2);
220 if (bb1->count > bb2->count
221 || bb1->frequency > bb2->frequency)
223 if (bb1->count < bb2->count
224 || bb1->frequency < bb2->frequency)
229 /* NEXT is an instruction that depends on INSN (a backward dependence);
230 return nonzero if we should include this dependence in priority
234 ebb_contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
235 rtx insn ATTRIBUTE_UNUSED)
240 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
241 conditionally set before INSN. Store the set of registers that
242 must be considered as used by this jump in USED and that of
243 registers that must be considered as set in SET. */
246 ebb_compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
249 basic_block b = BLOCK_FOR_INSN (insn);
253 FOR_EACH_EDGE (e, ei, b->succs)
254 if (e->flags & EDGE_FALLTHRU)
255 /* The jump may be a by-product of a branch that has been merged
256 in the main codepath after being conditionalized. Therefore
257 it may guard the fallthrough block from using a value that has
258 conditionally overwritten that of the main codepath. So we
259 consider that it restores the value of the main codepath. */
260 bitmap_and (set, df_get_live_in (e->dest), cond_set);
262 bitmap_ior_into (used, df_get_live_in (e->dest));
265 /* Used in schedule_insns to initialize current_sched_info for scheduling
266 regions (or single basic blocks). */
268 static struct common_sched_info_def ebb_common_sched_info;
270 static struct sched_deps_info_def ebb_sched_deps_info =
272 ebb_compute_jump_reg_dependencies,
273 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
278 static struct haifa_sched_info ebb_sched_info =
286 ebb_contributes_to_priority,
287 NULL, /* insn_finishes_block_p */
294 begin_schedule_ready,
297 /* We can create new blocks in begin_schedule_ready (). */
301 /* Returns the earliest block in EBB currently being processed where a
302 "similar load" 'insn2' is found, and hence LOAD_INSN can move
303 speculatively into the found block. All the following must hold:
305 (1) both loads have 1 base register (PFREE_CANDIDATEs).
306 (2) load_insn and load2 have a def-use dependence upon
307 the same insn 'insn1'.
309 From all these we can conclude that the two loads access memory
310 addresses that differ at most by a constant, and hence if moving
311 load_insn would cause an exception, it would have been caused by
314 The function uses list (given by LAST_BLOCK) of already processed
315 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
318 earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
320 sd_iterator_def back_sd_it;
322 basic_block bb, earliest_block = NULL;
324 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
326 rtx insn1 = DEP_PRO (back_dep);
328 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
329 /* Found a DEF-USE dependence (insn1, load_insn). */
331 sd_iterator_def fore_sd_it;
334 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
336 rtx insn2 = DEP_CON (fore_dep);
337 basic_block insn2_block = BLOCK_FOR_INSN (insn2);
339 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
341 if (earliest_block != NULL
342 && earliest_block->index < insn2_block->index)
345 /* Found a DEF-USE dependence (insn1, insn2). */
346 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
347 /* insn2 not guaranteed to be a 1 base reg load. */
350 for (bb = last_block; bb; bb = (basic_block) bb->aux)
351 if (insn2_block == bb)
355 /* insn2 is the similar load. */
356 earliest_block = insn2_block;
362 return earliest_block;
365 /* The following function adds dependencies between jumps and risky
366 insns in given ebb. */
369 add_deps_for_risky_insns (rtx head, rtx tail)
373 rtx last_jump = NULL_RTX;
374 rtx next_tail = NEXT_INSN (tail);
375 basic_block last_block = NULL, bb;
377 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
378 if (control_flow_insn_p (insn))
380 bb = BLOCK_FOR_INSN (insn);
381 bb->aux = last_block;
385 else if (INSN_P (insn) && last_jump != NULL_RTX)
387 classification = haifa_classify_insn (insn);
389 switch (classification)
391 case PFREE_CANDIDATE:
392 if (flag_schedule_speculative_load)
394 bb = earliest_block_with_similiar_load (last_block, insn);
397 bb = (basic_block) bb->aux;
406 case PRISKY_CANDIDATE:
407 /* ??? We could implement better checking PRISKY_CANDIDATEs
408 analogous to sched-rgn.c. */
409 /* We can not change the mode of the backward
410 dependency because REG_DEP_ANTI has the lowest
412 if (! sched_insns_conditions_mutex_p (insn, prev))
414 dep_def _dep, *dep = &_dep;
416 init_dep (dep, prev, insn, REG_DEP_ANTI);
418 if (!(current_sched_info->flags & USE_DEPS_LIST))
420 enum DEPS_ADJUST_RESULT res;
422 res = sd_add_or_update_dep (dep, false);
424 /* We can't change an existing dependency with
426 gcc_assert (res != DEP_CHANGED);
430 if ((current_sched_info->flags & DO_SPECULATION)
431 && (spec_info->mask & BEGIN_CONTROL))
432 DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
435 sd_add_or_update_dep (dep, false);
437 /* Dep_status could have been changed.
438 No assertion here. */
448 /* Maintain the invariant that bb->aux is clear after use. */
451 bb = (basic_block) last_block->aux;
452 last_block->aux = NULL;
457 /* Schedule a single extended basic block, defined by the boundaries HEAD
461 schedule_ebb (rtx head, rtx tail)
463 basic_block first_bb, target_bb;
464 struct deps_desc tmp_deps;
466 first_bb = BLOCK_FOR_INSN (head);
467 last_bb = BLOCK_FOR_INSN (tail);
469 if (no_real_insns_p (head, tail))
470 return BLOCK_FOR_INSN (tail);
472 gcc_assert (INSN_P (head) && INSN_P (tail));
474 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
478 /* Compute dependencies. */
479 init_deps (&tmp_deps, false);
480 sched_analyze (&tmp_deps, head, tail);
481 free_deps (&tmp_deps);
483 add_deps_for_risky_insns (head, tail);
485 if (targetm.sched.dependencies_evaluation_hook)
486 targetm.sched.dependencies_evaluation_hook (head, tail);
488 finish_deps_global ();
491 /* Only recovery blocks can have their dependencies already calculated,
492 and they always are single block ebbs. */
493 gcc_assert (first_bb == last_bb);
495 /* Set priorities. */
496 current_sched_info->sched_max_insns_priority = 0;
497 rgn_n_insns = set_priorities (head, tail);
498 current_sched_info->sched_max_insns_priority++;
500 current_sched_info->prev_head = PREV_INSN (head);
501 current_sched_info->next_tail = NEXT_INSN (tail);
503 remove_notes (head, tail);
505 unlink_bb_notes (first_bb, last_bb);
507 target_bb = first_bb;
509 /* Make ready list big enough to hold all the instructions from the ebb. */
510 sched_extend_ready_list (rgn_n_insns);
511 schedule_block (&target_bb);
512 /* Free ready list. */
513 sched_finish_ready_list ();
515 /* We might pack all instructions into fewer blocks,
516 so we may made some of them empty. Can't assert (b == last_bb). */
518 /* Sanity check: verify that all region insns were scheduled. */
519 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
521 /* Free dependencies. */
522 sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
524 gcc_assert (haifa_recovery_bb_ever_added_p
525 || deps_pools_are_empty_p ());
527 if (EDGE_COUNT (last_bb->preds) == 0)
528 /* LAST_BB is unreachable. */
530 gcc_assert (first_bb != last_bb
531 && EDGE_COUNT (last_bb->succs) == 0);
532 last_bb = last_bb->prev_bb;
533 delete_basic_block (last_bb->next_bb);
539 /* The one entry point in this file. */
545 int probability_cutoff;
548 if (profile_info && flag_branch_probabilities)
549 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
551 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
552 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
554 /* Taking care of this degenerate case makes the rest of
555 this code simpler. */
556 if (n_basic_blocks == NUM_FIXED_BLOCKS)
561 memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
562 sizeof (ebb_common_sched_info));
564 ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
565 ebb_common_sched_info.add_block = ebb_add_block;
566 ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
568 common_sched_info = &ebb_common_sched_info;
569 sched_deps_info = &ebb_sched_deps_info;
570 current_sched_info = &ebb_sched_info;
575 compute_bb_for_insn ();
577 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
578 bitmap_initialize (&dont_calc_deps, 0);
579 bitmap_clear (&dont_calc_deps);
581 /* Schedule every region in the subroutine. */
584 rtx head = BB_HEAD (bb);
590 if (bb->next_bb == EXIT_BLOCK_PTR
591 || LABEL_P (BB_HEAD (bb->next_bb)))
593 e = find_fallthru_edge (bb->succs);
596 if (e->probability <= probability_cutoff)
601 /* Blah. We should fix the rest of the code not to get confused by
605 if (NOTE_P (head) || BOUNDARY_DEBUG_INSN_P (head))
606 head = NEXT_INSN (head);
607 else if (NOTE_P (tail) || BOUNDARY_DEBUG_INSN_P (tail))
608 tail = PREV_INSN (tail);
609 else if (LABEL_P (head))
610 head = NEXT_INSN (head);
615 bb = schedule_ebb (head, tail);
617 bitmap_clear (&dont_calc_deps);
619 /* Reposition the prologue and epilogue notes in case we moved the
620 prologue/epilogue insns. */
621 if (reload_completed)
622 reposition_prologue_and_epilogue_notes ();
624 haifa_sched_finish ();
627 /* INSN has been added to/removed from current ebb. */
629 ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
637 /* BB was added to ebb after AFTER. */
639 ebb_add_block (basic_block bb, basic_block after)
641 /* Recovery blocks are always bounded by BARRIERS,
642 therefore, they always form single block EBB,
643 therefore, we can use rec->index to identify such EBBs. */
644 if (after == EXIT_BLOCK_PTR)
645 bitmap_set_bit (&dont_calc_deps, bb->index);
646 else if (after == last_bb)
650 /* Return next block in ebb chain. For parameter meaning please refer to
651 sched-int.h: struct sched_info: advance_target_bb. */
653 advance_target_bb (basic_block bb, rtx insn)
657 if (BLOCK_FOR_INSN (insn) != bb
658 && control_flow_insn_p (insn)
659 /* We handle interblock movement of the speculation check
660 or over a speculation check in
661 haifa-sched.c: move_block_after_check (). */
662 && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
663 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
665 /* Assert that we don't move jumps across blocks. */
666 gcc_assert (!control_flow_insn_p (BB_END (bb))
667 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
674 /* Return next non empty block. */
678 gcc_assert (bb != last_bb);
682 while (bb_note (bb) == BB_END (bb));
688 /* Fix internal data after interblock movement of jump instruction.
689 For parameter meaning please refer to
690 sched-int.h: struct sched_info: fix_recovery_cfg. */
692 ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
695 gcc_assert (last_bb->index != bbi);
697 if (jump_bb_nexti == last_bb->index)
698 last_bb = BASIC_BLOCK (jump_bbi);
701 #endif /* INSN_SCHEDULING */