/* Instruction scheduling pass.
- Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
Used to determine, if we need to fix INSN_TICKs. */
static bool added_recovery_block_p;
-/* Counters of different types of speculative isntructions. */
+/* Counters of different types of speculative instructions. */
static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
/* Pointers to GLAT data. See init_glat for more information. */
static void sched_remove_insn (rtx);
static void clear_priorities (rtx);
static void add_jump_dependencies (rtx, rtx);
-static rtx bb_note (basic_block);
static void calc_priorities (rtx);
#ifdef ENABLE_CHECKING
static int has_edge_p (VEC(edge,gc) *, int);
}
/* Add an element INSN to the ready list so that it ends up with the
- lowest/highest priority dependending on FIRST_P. */
+ lowest/highest priority depending on FIRST_P. */
HAIFA_INLINE static void
ready_add (struct ready_list *ready, rtx insn, bool first_p)
while (insn != tail && NOTE_NOT_BB_P (insn))
{
rtx next = NEXT_INSN (insn);
+ basic_block bb = BLOCK_FOR_INSN (insn);
+
/* Delete the note from its current position. */
if (prev)
NEXT_INSN (prev) = next;
if (next)
PREV_INSN (next) = prev;
+ if (bb)
+ {
+ /* Basic block can begin with either LABEL or
+ NOTE_INSN_BASIC_BLOCK. */
+ gcc_assert (BB_HEAD (bb) != insn);
+
+ /* Check if we are removing last insn in the BB. */
+ if (BB_END (bb) == insn)
+ BB_END (bb) = prev;
+ }
+
/* See sched_analyze to see how these are handled. */
if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
{
rtx prev = PREV_INSN (insn);
- while (insn != tail && NOTE_P (insn))
+ while (insn != tail && NOTE_NOT_BB_P (insn))
{
rtx next = NEXT_INSN (insn);
if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
{
+ basic_block bb = BLOCK_FOR_INSN (insn);
+
/* Delete the note from its current position. */
if (prev)
NEXT_INSN (prev) = next;
if (next)
PREV_INSN (next) = prev;
+ if (bb)
+ {
+ /* Basic block can begin with either LABEL or
+ NOTE_INSN_BASIC_BLOCK. */
+ gcc_assert (BB_HEAD (bb) != insn);
+
+ /* Check if we are removing last insn in the BB. */
+ if (BB_END (bb) == insn)
+ BB_END (bb) = prev;
+ }
+
/* Record line-number notes so they can be reused. */
LINE_NOTE (insn) = insn;
}
find_insn_reg_weight1 (insn);
}
-/* Calculate INSN_REG_WEIGHT for single insntruction.
+/* Calculate INSN_REG_WEIGHT for single instruction.
Separated from find_insn_reg_weight because of need
to initialize new instruction in generate_recovery_code. */
static void
fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
(*current_sched_info->print_insn) (insn, 0));
- ready_add (ready, insn, false);
- if (sched_verbose >= 2)
- fprintf (sched_dump, "moving to ready without stalls\n");
+ /* If the ready list is full, delay the insn for 1 cycle.
+ See the comment in schedule_block for the rationale. */
+ if (!reload_completed
+ && ready->n_ready > MAX_SCHED_READY_INSNS
+ && !SCHED_GROUP_P (insn))
+ {
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, "requeued because ready full\n");
+ queue_insn (insn, 1);
+ }
+ else
+ {
+ ready_add (ready, insn, false);
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, "moving to ready without stalls\n");
+ }
}
free_INSN_LIST_list (&insn_queue[q_ptr]);
make this function tries different samples of ready insns. READY
is current queue `ready'. Global array READY_TRY reflects what
insns are already issued in this try. MAX_POINTS is the sum of points
- of all instructions in READY. The function stops immediatelly,
+ of all instructions in READY. The function stops immediately,
if it reached the such a solution, that all instruction can be issued.
INDEX will contain index of the best insn in READY. The following
function is used only for first cycle multipass scheduling. */
&& spec_info->flags & (PREFER_NON_DATA_SPEC
| PREFER_NON_CONTROL_SPEC))
{
- rtx x;
- int s;
-
for (i = 0, n = ready->n_ready; i < n; i++)
{
+ rtx x;
+ ds_t s;
+
x = ready_element (ready, i);
s = TODO_SPEC (x);
|| (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
&& !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
(insn)))
+ /* Discard speculative instruction that stands first in the ready
+ list. */
{
change_queue_index (insn, 1);
return 0;
in try_ready () (which is called through init_ready_list ()). */
(*current_sched_info->init_ready_list) ();
+ /* The algorithm is O(n^2) in the number of ready insns at any given
+ time in the worst case. Before reload we are more likely to have
+ big lists so truncate them to a reasonable size. */
+ if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
+ {
+ ready_sort (&ready);
+
+ /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
+ for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
+ if (!SCHED_GROUP_P (ready_element (&ready, i)))
+ break;
+
+ if (sched_verbose >= 2)
+ {
+ fprintf (sched_dump,
+ ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
+ fprintf (sched_dump,
+ ";;\t\t before reload => truncated to %d insns\n", i);
+ }
+
+ /* Delay all insns past it for 1 cycle. */
+ while (i < ready.n_ready)
+ queue_insn (ready_remove (&ready, i), 1);
+ }
+
/* Now we can restore basic block notes and maintain precise cfg. */
restore_bb_notes (*target_bb);
continue;
}
- /* DECISSION is made. */
+ /* DECISION is made. */
if (TODO_SPEC (insn) & SPECULATIVE)
generate_recovery_code (insn);
/* This is used to to switch basic blocks by request
from scheduler front-end (actually, sched-ebb.c only).
This is used to process blocks with single fallthru
- edge. If successing block has jump, it [jump] will try
+ edge. If succeeding block has jump, it [jump] will try
move at the end of current bb, thus corrupting CFG. */
|| current_sched_info->advance_target_bb (*target_bb, insn))
{
spec_info->weakness_cutoff =
(PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
else
- /* So we won't read anything accidently. */
+ /* So we won't read anything accidentally. */
spec_info = 0;
#ifdef ENABLE_CHECKING
check_sched_flags ();
#endif
}
else
- /* So we won't read anything accidently. */
+ /* So we won't read anything accidentally. */
spec_info = 0;
/* Initialize issue_rate. */
}
/* Fix INSN_TICKs of the instructions in the current block as well as
- INSN_TICKs of their dependants.
+ INSN_TICKs of their dependents.
HEAD and TAIL are the begin and the end of the current scheduled block. */
static void
fix_inter_tick (rtx head, rtx tail)
|| !RECOVERY_BLOCK (next)
|| RECOVERY_BLOCK (next) == EXIT_BLOCK_PTR);
- if (*ts == 0 && ORIG_PAT (next) && !RECOVERY_BLOCK (next))
- /* We should change pattern of every previously speculative
- instruction - and we determine if NEXT was speculative by using
- ORIG_PAT field. Except one case - simple checks have ORIG_PAT
- pat too, hence we also check for the RECOVERY_BLOCK. */
- {
- change_pattern (next, ORIG_PAT (next));
- ORIG_PAT (next) = 0;
- }
-
if (*ts & HARD_DEP)
{
/* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
change_queue_index (next, QUEUE_NOWHERE);
return -1;
}
+ else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !RECOVERY_BLOCK (next))
+ /* We should change pattern of every previously speculative
+ instruction - and we determine if NEXT was speculative by using
+ ORIG_PAT field. Except one case - simple checks have ORIG_PAT
+ pat too, hence we also check for the RECOVERY_BLOCK. */
+ {
+ change_pattern (next, ORIG_PAT (next));
+ ORIG_PAT (next) = 0;
+ }
if (sched_verbose >= 2)
{
tick = INSN_TICK (next);
/* if tick is not equal to INVALID_TICK, then update
INSN_TICK of NEXT with the most recent resolved dependence
- cost. Overwise, recalculate from scratch. */
+ cost. Otherwise, recalculate from scratch. */
full_p = tick == INVALID_TICK;
do
{
/* We have nothing to do. */
return;
- /* Remove NEXT from whereever it is now. */
+ /* Remove NEXT from wherever it is now. */
if (i == QUEUE_READY)
ready_remove_insn (next);
else if (i >= 0)
ds = DEP_STATUS (link);
- if (fs && (ds & DEP_TYPES) == DEP_TRUE)
- ds = (ds & ~BEGIN_SPEC) | fs;
+ if (/* If we want to create speculative dep. */
+ fs
+ /* And we can do that because this is a true dep. */
+ && (ds & DEP_TYPES) == DEP_TRUE)
+ {
+ gcc_assert (!(ds & BE_IN_SPEC));
+
+ if (/* If this dep can be overcome with 'begin speculation'. */
+ ds & BEGIN_SPEC)
+ /* Then we have a choice: keep the dep 'begin speculative'
+ or transform it into 'be in speculative'. */
+ {
+ if (/* In try_ready we assert that if insn once became ready
+ it can be removed from the ready (or queue) list only
+ due to backend decision. Hence we can't let the
+ probability of the speculative dep to decrease. */
+ dep_weak (ds) <= dep_weak (fs))
+ /* Transform it to be in speculative. */
+ ds = (ds & ~BEGIN_SPEC) | fs;
+ }
+ else
+ /* Mark the dep as 'be in speculative'. */
+ ds |= fs;
+ }
add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
}
twins = alloc_INSN_LIST (twin, twins);
- /* Add dependences between TWIN and all apropriate
+ /* Add dependences between TWIN and all appropriate
instructions from REC. */
do
{
gcc_assert (ORIG_PAT (insn));
- /* Initialize TWIN (twin is a dublicate of original instruction
+ /* Initialize TWIN (twin is a duplicate of original instruction
in the recovery block). */
if (rec != EXIT_BLOCK_PTR)
{
add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
if (!mutate_p)
- /* Fix priorities. If MUTATE_P is nonzero, this is not neccessary,
+ /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
because it'll be done later in add_to_speculative_block. */
{
clear_priorities (twin);
/* Removes dependency between instructions in the recovery block REC
and usual region instructions. It keeps inner dependences so it
- won't be neccessary to recompute them. */
+ won't be necessary to recompute them. */
static void
fix_recovery_deps (basic_block rec)
{
/* Unlink basic block notes and labels and saves them, so they
can be easily restored. We unlink basic block notes in EBB to
- provide back-compatability with the previous code, as target backends
+ provide back-compatibility with the previous code, as target backends
assume, that there'll be only instructions between
current_sched_info->{head and tail}. We restore these notes as soon
as we can.
/* Initialize GLAT (global_live_at_{start, end}) structures.
GLAT structures are used to substitute global_live_{start, end}
- regsets during scheduling. This is neccessary to use such functions as
- split_block (), as they assume consistancy of register live information. */
+ regsets during scheduling. This is necessary to use such functions as
+ split_block (), as they assume consistency of register live information. */
static void
init_glat (void)
{
}
/* Return the NOTE_INSN_BASIC_BLOCK of BB. */
-static rtx
+rtx
bb_note (basic_block bb)
{
rtx note;
}
/* Helper function for check_cfg.
- Return non-zero, if edge vector pointed to by EL has edge with TYPE in
+ Return nonzero, if edge vector pointed to by EL has edge with TYPE in
its flags. */
static int
has_edge_p (VEC(edge,gc) *el, int type)
gcc_assert (EDGE_COUNT (bb->succs) == 1
&& BARRIER_P (NEXT_INSN (head)));
else if (any_condjump_p (head))
- gcc_assert (EDGE_COUNT (bb->succs) > 1
- && !BARRIER_P (NEXT_INSN (head)));
+ gcc_assert (/* Usual case. */
+ (EDGE_COUNT (bb->succs) > 1
+ && !BARRIER_P (NEXT_INSN (head)))
+ /* Or jump to the next instruction. */
+ || (EDGE_COUNT (bb->succs) == 1
+ && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
+ == JUMP_LABEL (head))));
}
if (BB_END (bb) == head)
{
gcc_assert (bb == 0);
}
-/* Perform few consistancy checks of flags in different data structures. */
+/* Perform a few consistency checks of flags in different data structures. */
static void
check_sched_flags (void)
{
gcc_assert (f & USE_GLAT);
}
-/* Checks global_live_at_{start, end} regsets. */
+/* Check global_live_at_{start, end} regsets.
+ If FATAL_P is TRUE, then abort execution at the first failure.
+ Otherwise, print diagnostics to STDERR (this mode is for calling
+ from debugger). */
void
-check_reg_live (void)
+check_reg_live (bool fatal_p)
{
basic_block bb;
i = bb->index;
if (glat_start[i])
- gcc_assert (bitmap_equal_p (bb->il.rtl->global_live_at_start,
- glat_start[i]));
+ {
+ bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
+ glat_start[i]);
+
+ if (!b)
+ {
+ gcc_assert (!fatal_p);
+
+ fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
+ }
+ }
+
if (glat_end[i])
- gcc_assert (bitmap_equal_p (bb->il.rtl->global_live_at_end,
- glat_end[i]));
+ {
+ bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
+ glat_end[i]);
+
+ if (!b)
+ {
+ gcc_assert (!fatal_p);
+
+ fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
+ }
+ }
}
}
#endif /* ENABLE_CHECKING */