/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
This pass must update information that subsequent passes expect to
be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
- reg_n_calls_crossed, and reg_live_length. Also, BLOCK_HEAD,
- BLOCK_END.
+ reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
The information in the line number notes is carefully retained by
this pass. Notes that refer to the starting and ending of
of the -fsched-verbose=N option. */
void
-fix_sched_param (param, val)
- const char *param, *val;
+fix_sched_param (const char *param, const char *val)
{
if (!strcmp (param, "verbose"))
sched_verbose_param = atoi (val);
int n_ready;
};
-static int may_trap_exp PARAMS ((rtx, int));
+static int may_trap_exp (rtx, int);
/* Nonzero iff the address is comprised from at most 1 register. */
#define CONST_BASED_ADDRESS_P(x) \
as found by analyzing insn's expression. */
static int
-may_trap_exp (x, is_store)
- rtx x;
- int is_store;
+may_trap_exp (rtx x, int is_store)
{
enum rtx_code code;
moved speculatively, by examining it's patterns, returning:
TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
TRAP_FREE: non-load insn.
- IFREE: load from a globaly safe location.
+ IFREE: load from a globally safe location.
IRISKY: volatile load.
PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
being either PFREE or PRISKY. */
int
-haifa_classify_insn (insn)
- rtx insn;
+haifa_classify_insn (rtx insn)
{
rtx pat = PATTERN (insn);
int tmp_class = TRAP_FREE;
/* The scheduler using only DFA description should never use the
following five functions: */
-static unsigned int blockage_range PARAMS ((int, rtx));
-static void clear_units PARAMS ((void));
-static void schedule_unit PARAMS ((int, rtx, int));
-static int actual_hazard PARAMS ((int, rtx, int, int));
-static int potential_hazard PARAMS ((int, rtx, int));
-
-static int priority PARAMS ((rtx));
-static int rank_for_schedule PARAMS ((const PTR, const PTR));
-static void swap_sort PARAMS ((rtx *, int));
-static void queue_insn PARAMS ((rtx, int));
-static int schedule_insn PARAMS ((rtx, struct ready_list *, int));
-static int find_set_reg_weight PARAMS ((rtx));
-static void find_insn_reg_weight PARAMS ((int));
-static void adjust_priority PARAMS ((rtx));
-static void advance_one_cycle PARAMS ((void));
+static unsigned int blockage_range (int, rtx);
+static void clear_units (void);
+static void schedule_unit (int, rtx, int);
+static int actual_hazard (int, rtx, int, int);
+static int potential_hazard (int, rtx, int);
+
+static int priority (rtx);
+static int rank_for_schedule (const void *, const void *);
+static void swap_sort (rtx *, int);
+static void queue_insn (rtx, int);
+static int schedule_insn (rtx, struct ready_list *, int);
+static int find_set_reg_weight (rtx);
+static void find_insn_reg_weight (int);
+static void adjust_priority (rtx);
+static void advance_one_cycle (void);
/* Notes handling mechanism:
=========================
unlink_other_notes ()). After scheduling the block, these notes are
inserted at the beginning of the block (in schedule_block()). */
-static rtx unlink_other_notes PARAMS ((rtx, rtx));
-static rtx unlink_line_notes PARAMS ((rtx, rtx));
-static rtx reemit_notes PARAMS ((rtx, rtx));
+static rtx unlink_other_notes (rtx, rtx);
+static rtx unlink_line_notes (rtx, rtx);
+static rtx reemit_notes (rtx, rtx);
-static rtx *ready_lastpos PARAMS ((struct ready_list *));
-static void ready_sort PARAMS ((struct ready_list *));
-static rtx ready_remove_first PARAMS ((struct ready_list *));
+static rtx *ready_lastpos (struct ready_list *);
+static void ready_sort (struct ready_list *);
+static rtx ready_remove_first (struct ready_list *);
-static void queue_to_ready PARAMS ((struct ready_list *));
+static void queue_to_ready (struct ready_list *);
+static int early_queue_to_ready (state_t, struct ready_list *);
-static void debug_ready_list PARAMS ((struct ready_list *));
+static void debug_ready_list (struct ready_list *);
-static rtx move_insn1 PARAMS ((rtx, rtx));
-static rtx move_insn PARAMS ((rtx, rtx));
+static rtx move_insn1 (rtx, rtx);
+static rtx move_insn (rtx, rtx);
/* The following functions are used to implement multi-pass scheduling
on the first cycle. It is used only for DFA based scheduler. */
-static rtx ready_element PARAMS ((struct ready_list *, int));
-static rtx ready_remove PARAMS ((struct ready_list *, int));
-static int max_issue PARAMS ((struct ready_list *, int *));
+static rtx ready_element (struct ready_list *, int);
+static rtx ready_remove (struct ready_list *, int);
+static int max_issue (struct ready_list *, int *);
-static rtx choose_ready PARAMS ((struct ready_list *));
+static rtx choose_ready (struct ready_list *);
#endif /* INSN_SCHEDULING */
\f
\f
#ifndef INSN_SCHEDULING
void
-schedule_insns (dump_file)
- FILE *dump_file ATTRIBUTE_UNUSED;
+schedule_insns (FILE *dump_file ATTRIBUTE_UNUSED)
{
}
#else
should never use the following function. */
HAIFA_INLINE int
-insn_unit (insn)
- rtx insn;
+insn_unit (rtx insn)
{
int unit = INSN_UNIT (insn);
function. */
HAIFA_INLINE static unsigned int
-blockage_range (unit, insn)
- int unit;
- rtx insn;
+blockage_range (int unit, rtx insn)
{
unsigned int blockage = INSN_BLOCKAGE (insn);
unsigned int range;
following function. */
rtx
-get_unit_last_insn (instance)
- int instance;
+get_unit_last_insn (int instance)
{
return unit_last_insn[instance];
}
/* Reset the function unit state to the null state. */
static void
-clear_units ()
+clear_units (void)
{
- memset ((char *) unit_last_insn, 0, sizeof (unit_last_insn));
- memset ((char *) unit_tick, 0, sizeof (unit_tick));
- memset ((char *) unit_n_insns, 0, sizeof (unit_n_insns));
+ memset (unit_last_insn, 0, sizeof (unit_last_insn));
+ memset (unit_tick, 0, sizeof (unit_tick));
+ memset (unit_n_insns, 0, sizeof (unit_n_insns));
}
/* Return the issue-delay of an insn. The scheduler using only DFA
description should never use the following function. */
HAIFA_INLINE int
-insn_issue_delay (insn)
- rtx insn;
+insn_issue_delay (rtx insn)
{
int i, delay = 0;
int unit = insn_unit (insn);
use the following function. */
HAIFA_INLINE int
-actual_hazard_this_instance (unit, instance, insn, clock, cost)
- int unit, instance, clock, cost;
- rtx insn;
+actual_hazard_this_instance (int unit, int instance, rtx insn, int clock, int cost)
{
int tick = unit_tick[instance]; /* Issue time of the last issued insn. */
at time CLOCK. The scheduler using only DFA description should
never use the following function. */
-HAIFA_INLINE static void
-schedule_unit (unit, insn, clock)
- int unit, clock;
- rtx insn;
+static void
+schedule_unit (int unit, rtx insn, int clock)
{
int i;
was COST. The scheduler using only DFA description should never
use the following function. */
-HAIFA_INLINE static int
-actual_hazard (unit, insn, clock, cost)
- int unit, clock, cost;
- rtx insn;
+static int
+actual_hazard (int unit, rtx insn, int clock, int cost)
{
int i;
the following function. */
HAIFA_INLINE static int
-potential_hazard (unit, insn, cost)
- int unit, cost;
- rtx insn;
+potential_hazard (int unit, rtx insn, int cost)
{
int i, ncost;
unsigned int minb, maxb;
instruction results. */
HAIFA_INLINE int
-insn_cost (insn, link, used)
- rtx insn, link, used;
+insn_cost (rtx insn, rtx link, rtx used)
{
int cost = INSN_COST (insn);
cost = insn_default_latency (insn);
else
cost = result_ready_cost (insn);
-
+
if (cost < 0)
cost = 0;
-
+
INSN_COST (insn) = cost;
}
}
if (cost < 0)
cost = 0;
}
-
+
return cost;
}
/* Compute the priority number for INSN. */
static int
-priority (insn)
- rtx insn;
+priority (rtx insn)
{
rtx link;
}
\f
/* Macros and functions for keeping the priority queue sorted, and
- dealing with queueing and dequeueing of instructions. */
+ dealing with queuing and dequeuing of instructions. */
#define SCHED_SORT(READY, N_READY) \
do { if ((N_READY) == 2) \
unstable. */
static int
-rank_for_schedule (x, y)
- const PTR x;
- const PTR y;
+rank_for_schedule (const void *x, const void *y)
{
rtx tmp = *(const rtx *) y;
rtx tmp2 = *(const rtx *) x;
/* Resort the array A in which only element at index N may be out of order. */
HAIFA_INLINE static void
-swap_sort (a, n)
- rtx *a;
- int n;
+swap_sort (rtx *a, int n)
{
rtx insn = a[n - 1];
int i = n - 2;
chain for debugging purposes. */
HAIFA_INLINE static void
-queue_insn (insn, n_cycles)
- rtx insn;
- int n_cycles;
+queue_insn (rtx insn, int n_cycles)
{
int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
with the lowest priority. */
HAIFA_INLINE static rtx *
-ready_lastpos (ready)
- struct ready_list *ready;
+ready_lastpos (struct ready_list *ready)
{
if (ready->n_ready == 0)
abort ();
priority. */
HAIFA_INLINE void
-ready_add (ready, insn)
- struct ready_list *ready;
- rtx insn;
+ready_add (struct ready_list *ready, rtx insn)
{
if (ready->first == ready->n_ready)
{
return it. */
HAIFA_INLINE static rtx
-ready_remove_first (ready)
- struct ready_list *ready;
+ready_remove_first (struct ready_list *ready)
{
rtx t;
if (ready->n_ready == 0)
N_READY - 1. */
HAIFA_INLINE static rtx
-ready_element (ready, index)
- struct ready_list *ready;
- int index;
+ready_element (struct ready_list *ready, int index)
{
#ifdef ENABLE_CHECKING
if (ready->n_ready == 0 || index >= ready->n_ready)
has N_READY - 1. */
HAIFA_INLINE static rtx
-ready_remove (ready, index)
- struct ready_list *ready;
- int index;
+ready_remove (struct ready_list *ready, int index)
{
rtx t;
int i;
macro. */
HAIFA_INLINE static void
-ready_sort (ready)
- struct ready_list *ready;
+ready_sort (struct ready_list *ready)
{
rtx *first = ready_lastpos (ready);
SCHED_SORT (first, ready->n_ready);
provide a hook for the target to tweek itself. */
HAIFA_INLINE static void
-adjust_priority (prev)
- rtx prev;
+adjust_priority (rtx prev)
{
/* ??? There used to be code here to try and estimate how an insn
affected register lifetimes, but it did it by looking at REG_DEAD
/* Advance time on one cycle. */
HAIFA_INLINE static void
-advance_one_cycle ()
+advance_one_cycle (void)
{
if (targetm.sched.use_dfa_pipeline_interface
&& (*targetm.sched.use_dfa_pipeline_interface) ())
zero for insns in a schedule group). */
static int
-schedule_insn (insn, ready, clock)
- rtx insn;
- struct ready_list *ready;
- int clock;
+schedule_insn (rtx insn, struct ready_list *ready, int clock)
{
rtx link;
int advance = 0;
int unit = 0;
+ int premature_issue = 0;
if (!targetm.sched.use_dfa_pipeline_interface
|| !(*targetm.sched.use_dfa_pipeline_interface) ())
char buf[2048];
print_insn (buf, insn, 0);
- buf[40]=0;
+ buf[40] = 0;
fprintf (sched_dump, ";;\t%3i--> %-40s:", clock, buf);
if (recog_memoized (insn) < 0)
if (MAX_BLOCKAGE > 1 || issue_rate > 1 || sched_verbose)
schedule_unit (unit, insn, clock);
-
+
if (INSN_DEPEND (insn) == 0)
return 0;
}
+ if (INSN_TICK (insn) > clock)
+ {
+ /* 'insn' has been prematurely moved from the queue to the
+ ready list. */
+ premature_issue = INSN_TICK (insn) - clock;
+ }
+
for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
{
rtx next = XEXP (link, 0);
int cost = insn_cost (insn, link, next);
- INSN_TICK (next) = MAX (INSN_TICK (next), clock + cost);
+ INSN_TICK (next) = MAX (INSN_TICK (next), clock + cost + premature_issue);
if ((INSN_DEP_COUNT (next) -= 1) == 0)
{
Returns the insn following the notes. */
static rtx
-unlink_other_notes (insn, tail)
- rtx insn, tail;
+unlink_other_notes (rtx insn, rtx tail)
{
rtx prev = PREV_INSN (insn);
they can be reused. Returns the insn following the notes. */
static rtx
-unlink_line_notes (insn, tail)
- rtx insn, tail;
+unlink_line_notes (rtx insn, rtx tail)
{
rtx prev = PREV_INSN (insn);
/* Return the head and tail pointers of BB. */
void
-get_block_head_tail (b, headp, tailp)
- int b;
- rtx *headp;
- rtx *tailp;
+get_block_head_tail (int b, rtx *headp, rtx *tailp)
{
/* HEAD and TAIL delimit the basic block being scheduled. */
- rtx head = BLOCK_HEAD (b);
- rtx tail = BLOCK_END (b);
+ rtx head = BB_HEAD (BASIC_BLOCK (b));
+ rtx tail = BB_END (BASIC_BLOCK (b));
/* Don't include any notes or labels at the beginning of the
basic block, or notes at the ends of basic blocks. */
/* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
int
-no_real_insns_p (head, tail)
- rtx head, tail;
+no_real_insns_p (rtx head, rtx tail)
{
while (head != NEXT_INSN (tail))
{
block in which notes should be processed. */
void
-rm_line_notes (head, tail)
- rtx head, tail;
+rm_line_notes (rtx head, rtx tail)
{
rtx next_tail;
rtx insn;
the boundaries of the block in which notes should be processed. */
void
-save_line_notes (b, head, tail)
- int b;
- rtx head, tail;
+save_line_notes (int b, rtx head, rtx tail)
{
rtx next_tail;
be processed. */
void
-restore_line_notes (head, tail)
- rtx head, tail;
+restore_line_notes (rtx head, rtx tail)
{
rtx line, note, prev, new;
int added_notes = 0;
insns list. */
void
-rm_redundant_line_notes ()
+rm_redundant_line_notes (void)
{
rtx line = 0;
rtx insn = get_insns ();
of notes ended by NOTE_LIST. */
void
-rm_other_notes (head, tail)
- rtx head;
- rtx tail;
+rm_other_notes (rtx head, rtx tail)
{
rtx next_tail;
rtx insn;
a new register is not needed. */
static int
-find_set_reg_weight (x)
- rtx x;
+find_set_reg_weight (rtx x)
{
if (GET_CODE (x) == CLOBBER
&& register_operand (SET_DEST (x), VOIDmode))
/* Calculate INSN_REG_WEIGHT for all insns of a block. */
static void
-find_insn_reg_weight (b)
- int b;
+find_insn_reg_weight (int b)
{
rtx insn, next_tail, head, tail;
x = PATTERN (insn);
reg_weight += find_set_reg_weight (x);
if (GET_CODE (x) == PARALLEL)
- {
- int j;
- for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
- {
- x = XVECEXP (PATTERN (insn), 0, j);
+ {
+ int j;
+ for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
+ {
+ x = XVECEXP (PATTERN (insn), 0, j);
reg_weight += find_set_reg_weight (x);
- }
- }
+ }
+ }
/* Decrement weight for each register that dies here. */
for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
{
/* Move insns that became ready to fire from queue to ready list. */
static void
-queue_to_ready (ready)
- struct ready_list *ready;
+queue_to_ready (struct ready_list *ready)
{
rtx insn;
rtx link;
}
}
+/* Used by early_queue_to_ready. Determines whether it is "ok" to
+ prematurely move INSN from the queue to the ready list. Currently,
+ if a target defines the hook 'is_costly_dependence', this function
+ uses the hook to check whether there exist any dependences which are
+ considered costly by the target, between INSN and other insns that
+ have already been scheduled. Dependences are checked up to Y cycles
+ back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
+ controlling this value.
+ (Other considerations could be taken into account instead (or in
+ addition) depending on user flags and target hooks. */
+
+static bool
+ok_for_early_queue_removal (rtx insn)
+{
+ int n_cycles;
+ rtx prev_insn = last_scheduled_insn;
+
+ if (targetm.sched.is_costly_dependence)
+ {
+ for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
+ {
+ for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
+ {
+ rtx dep_link = 0;
+ int dep_cost;
+
+ if (GET_CODE (prev_insn) != NOTE)
+ {
+ dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
+ if (dep_link)
+ {
+ dep_cost = insn_cost (prev_insn, dep_link, insn) ;
+ if (targetm.sched.is_costly_dependence (prev_insn, insn,
+ dep_link, dep_cost,
+ flag_sched_stalled_insns_dep - n_cycles))
+ return false;
+ }
+ }
+
+ if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
+ break;
+ }
+
+ if (!prev_insn)
+ break;
+ prev_insn = PREV_INSN (prev_insn);
+ }
+ }
+
+ return true;
+}
+
+
+/* Remove insns from the queue, before they become "ready" with respect
+ to FU latency considerations. */
+
+static int
+early_queue_to_ready (state_t state, struct ready_list *ready)
+{
+ rtx insn;
+ rtx link;
+ rtx next_link;
+ rtx prev_link;
+ bool move_to_ready;
+ int cost;
+ state_t temp_state = alloca (dfa_state_size);
+ int stalls;
+ int insns_removed = 0;
+
+ /*
+ Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
+ function:
+
+ X == 0: There is no limit on how many queued insns can be removed
+ prematurely. (flag_sched_stalled_insns = -1).
+
+ X >= 1: Only X queued insns can be removed prematurely in each
+ invocation. (flag_sched_stalled_insns = X).
+
+ Otherwise: Early queue removal is disabled.
+ (flag_sched_stalled_insns = 0)
+ */
+
+ if (! flag_sched_stalled_insns)
+ return 0;
+
+ for (stalls = 0; stalls <= MAX_INSN_QUEUE_INDEX; stalls++)
+ {
+ if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
+ {
+ if (sched_verbose > 6)
+ fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
+
+ prev_link = 0;
+ while (link)
+ {
+ next_link = XEXP (link, 1);
+ insn = XEXP (link, 0);
+ if (insn && sched_verbose > 6)
+ print_rtl_single (sched_dump, insn);
+
+ memcpy (temp_state, state, dfa_state_size);
+ if (recog_memoized (insn) < 0)
+ /* non-negative to indicate that it's not ready
+ to avoid infinite Q->R->Q->R... */
+ cost = 0;
+ else
+ cost = state_transition (temp_state, insn);
+
+ if (sched_verbose >= 6)
+ fprintf (sched_dump, "transition cost = %d\n", cost);
+
+ move_to_ready = false;
+ if (cost < 0)
+ {
+ move_to_ready = ok_for_early_queue_removal (insn);
+ if (move_to_ready == true)
+ {
+ /* move from Q to R */
+ q_size -= 1;
+ ready_add (ready, insn);
+
+ if (prev_link)
+ XEXP (prev_link, 1) = next_link;
+ else
+ insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
+
+ free_INSN_LIST_node (link);
+
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
+ (*current_sched_info->print_insn) (insn, 0));
+
+ insns_removed++;
+ if (insns_removed == flag_sched_stalled_insns)
+ /* remove only one insn from Q at a time */
+ return insns_removed;
+ }
+ }
+
+ if (move_to_ready == false)
+ prev_link = link;
+
+ link = next_link;
+ } /* while link */
+ } /* if link */
+
+ } /* for stalls.. */
+
+ return insns_removed;
+}
+
+
/* Print the ready list for debugging purposes. Callable from debugger. */
static void
-debug_ready_list (ready)
- struct ready_list *ready;
+debug_ready_list (struct ready_list *ready)
{
rtx *p;
int i;
/* move_insn1: Remove INSN from insn chain, and link it after LAST insn. */
static rtx
-move_insn1 (insn, last)
- rtx insn, last;
+move_insn1 (rtx insn, rtx last)
{
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
output by the instruction scheduler. Return the new value of LAST. */
static rtx
-reemit_notes (insn, last)
- rtx insn;
- rtx last;
+reemit_notes (rtx insn, rtx last)
{
rtx note, retval;
return value from the first call to reemit_notes. */
static rtx
-move_insn (insn, last)
- rtx insn, last;
+move_insn (rtx insn, rtx last)
{
rtx retval = NULL;
processors state. */
static int cycle_issued_insns;
+/* The following variable value is maximal number of tries of issuing
+ insns for the first cycle multipass insn scheduling. We define
+ this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
+ need this constraint if all real insns (with non-negative codes)
+ had reservations because in this case the algorithm complexity is
+ O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
+ might be incomplete and such insn might occur. For such
+ descriptions, the complexity of algorithm (without the constraint)
+ could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
+static int max_lookahead_tries;
+
+/* The following value is value of hook
+ `first_cycle_multipass_dfa_lookahead' at the last call of
+ `max_issue'. */
+static int cached_first_cycle_multipass_dfa_lookahead = 0;
+
+/* The following value is value of `issue_rate' at the last call of
+ `sched_init'. */
+static int cached_issue_rate = 0;
+
/* The following function returns maximal (or close to maximal) number
of insns which can be issued on the same cycle and one of which
insns is insns with the best rank (the first insn in READY). To
of the best insn in READY. The following function is used only for
first cycle multipass scheduling. */
static int
-max_issue (ready, index)
- struct ready_list *ready;
- int *index;
+max_issue (struct ready_list *ready, int *index)
{
- int n, i, all, n_ready, lookahead, best, delay;
+ int n, i, all, n_ready, best, delay, tries_num;
struct choice_entry *top;
rtx insn;
- lookahead = (*targetm.sched.first_cycle_multipass_dfa_lookahead) ();
best = 0;
memcpy (choice_stack->state, curr_state, dfa_state_size);
top = choice_stack;
- top->rest = lookahead;
+ top->rest = cached_first_cycle_multipass_dfa_lookahead;
top->n = 0;
n_ready = ready->n_ready;
for (all = i = 0; i < n_ready; i++)
if (!ready_try [i])
all++;
i = 0;
+ tries_num = 0;
for (;;)
{
if (top->rest == 0 || i >= n_ready)
}
else if (!ready_try [i])
{
+ tries_num++;
+ if (tries_num > max_lookahead_tries)
+ break;
insn = ready_element (ready, i);
delay = state_transition (curr_state, insn);
if (delay < 0)
if (memcmp (top->state, curr_state, dfa_state_size) != 0)
n++;
top++;
- top->rest = lookahead;
+ top->rest = cached_first_cycle_multipass_dfa_lookahead;
top->index = i;
top->n = n;
memcpy (top->state, curr_state, dfa_state_size);
cycle multipass scheduling. */
static rtx
-choose_ready (ready)
- struct ready_list *ready;
+choose_ready (struct ready_list *ready)
{
- if (!targetm.sched.first_cycle_multipass_dfa_lookahead
- || (*targetm.sched.first_cycle_multipass_dfa_lookahead) () <= 0
- || SCHED_GROUP_P (ready_element (ready, 0)))
+ int lookahead = 0;
+
+ if (targetm.sched.first_cycle_multipass_dfa_lookahead)
+ lookahead = (*targetm.sched.first_cycle_multipass_dfa_lookahead) ();
+ if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
return ready_remove_first (ready);
else
{
/* Try to choose the better insn. */
- int index, i;
+ int index = 0, i;
rtx insn;
+ if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
+ {
+ cached_first_cycle_multipass_dfa_lookahead = lookahead;
+ max_lookahead_tries = 100;
+ for (i = 0; i < issue_rate; i++)
+ max_lookahead_tries *= lookahead;
+ }
insn = ready_element (ready, 0);
if (INSN_CODE (insn) < 0)
return ready_remove_first (ready);
the instruction stream. */
rtx
-sched_emit_insn (pat)
- rtx pat;
+sched_emit_insn (rtx pat)
{
rtx insn = emit_insn_after (pat, last_scheduled_insn);
last_scheduled_insn = insn;
possibly bringing insns from subsequent blocks in the same region. */
void
-schedule_block (b, rgn_n_insns)
- int b;
- int rgn_n_insns;
+schedule_block (int b, int rgn_n_insns)
{
struct ready_list ready;
int i, first_cycle_insn_p;
/* Allocate the ready list. */
ready.veclen = rgn_n_insns + 1 + issue_rate;
ready.first = ready.veclen - 1;
- ready.vec = (rtx *) xmalloc (ready.veclen * sizeof (rtx));
+ ready.vec = xmalloc (ready.veclen * sizeof (rtx));
ready.n_ready = 0;
if (targetm.sched.use_dfa_pipeline_interface
{
/* It is used for first cycle multipass scheduling. */
temp_state = alloca (dfa_state_size);
- ready_try = (char *) xmalloc ((rgn_n_insns + 1) * sizeof (char));
- memset (ready_try, 0, (rgn_n_insns + 1) * sizeof (char));
- choice_stack
- = (struct choice_entry *) xmalloc ((rgn_n_insns + 1)
- * sizeof (struct choice_entry));
+ ready_try = xcalloc ((rgn_n_insns + 1), sizeof (char));
+ choice_stack = xmalloc ((rgn_n_insns + 1)
+ * sizeof (struct choice_entry));
for (i = 0; i <= rgn_n_insns; i++)
- choice_stack[i].state = (state_t) xmalloc (dfa_state_size);
+ choice_stack[i].state = xmalloc (dfa_state_size);
}
(*current_sched_info->init_ready_list) (&ready);
else
max_insn_queue_index_macro_value = max_insn_queue_index;
- insn_queue = (rtx *) alloca ((MAX_INSN_QUEUE_INDEX + 1) * sizeof (rtx));
- memset ((char *) insn_queue, 0, (MAX_INSN_QUEUE_INDEX + 1) * sizeof (rtx));
+ insn_queue = alloca ((MAX_INSN_QUEUE_INDEX + 1) * sizeof (rtx));
+ memset (insn_queue, 0, (MAX_INSN_QUEUE_INDEX + 1) * sizeof (rtx));
last_clock_var = -1;
/* Start just before the beginning of time. */
start_clock_var = clock_var;
clock_var++;
-
+
advance_one_cycle ();
-
+
/* Add to the ready list all pending insns that can be issued now.
If there are no ready insns, increment clock until one
is ready and add all pending insns at that point to the ready
list. */
queue_to_ready (&ready);
-
+
if (ready.n_ready == 0)
abort ();
-
+
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
{
/* Sort the ready list based on priority. */
ready_sort (&ready);
-
+
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
if (ready.n_ready == 0 || !can_issue_more
|| !(*current_sched_info->schedule_more_p) ())
break;
- insn = choose_ready (&ready);
+ insn = ready_remove_first (&ready);
cost = actual_hazard (insn_unit (insn), insn, clock_var, 0);
}
else
{
+ if (ready.n_ready == 0
+ && can_issue_more
+ && reload_completed)
+ {
+ /* Allow scheduling insns directly from the queue in case
+ there's nothing better to do (ready list is empty) but
+ there are still vacant dispatch slots in the current cycle. */
+ if (sched_verbose >= 6)
+ fprintf(sched_dump,";;\t\tSecond chance\n");
+ memcpy (temp_state, curr_state, dfa_state_size);
+ if (early_queue_to_ready (temp_state, &ready))
+ ready_sort (&ready);
+ }
+
if (ready.n_ready == 0 || !can_issue_more
|| state_dead_lock_p (curr_state)
|| !(*current_sched_info->schedule_more_p) ())
break;
-
+
/* Select and remove the insn from the ready list. */
if (sort_p)
insn = choose_ready (&ready);
else
insn = ready_remove_first (&ready);
-
+
if (targetm.sched.dfa_new_cycle
&& (*targetm.sched.dfa_new_cycle) (sched_dump, sched_verbose,
insn, last_clock_var,
ready_add (&ready, insn);
break;
}
-
+
sort_p = TRUE;
memcpy (temp_state, curr_state, dfa_state_size);
if (recog_memoized (insn) < 0)
{
int j;
rtx bubble;
-
+
for (j = 0;
(bubble = (*targetm.sched.dfa_bubble) (j))
!= NULL_RTX;
j++)
{
memcpy (temp_state, curr_state, dfa_state_size);
-
+
if (state_transition (temp_state, bubble) < 0
&& state_transition (temp_state, insn) < 0)
break;
}
-
+
if (bubble != NULL_RTX)
{
if (insert_schedule_bubbles_p)
{
rtx copy;
-
+
copy = copy_rtx (PATTERN (bubble));
emit_insn_after (copy, last_scheduled_insn);
last_scheduled_insn
= NEXT_INSN (last_scheduled_insn);
INSN_CODE (last_scheduled_insn)
= INSN_CODE (bubble);
-
+
/* Annotate the same for the first insns
scheduling by using mode. */
PUT_MODE (last_scheduled_insn,
? clock_var - last_clock_var
: VOIDmode));
last_clock_var = clock_var;
-
+
if (sched_verbose >= 2)
{
fprintf (sched_dump,
";;\t\t--> scheduling bubble insn <<<%d>>>:reservation ",
INSN_UID (last_scheduled_insn));
-
+
if (recog_memoized (last_scheduled_insn)
< 0)
fprintf (sched_dump, "nothing");
else
print_reservation
(sched_dump, last_scheduled_insn);
-
+
fprintf (sched_dump, "\n");
}
}
cycle_issued_insns++;
memcpy (curr_state, temp_state, dfa_state_size);
}
-
+
if (targetm.sched.variable_issue)
can_issue_more =
(*targetm.sched.variable_issue) (sched_dump, sched_verbose,
if (!reload_completed)
{
rtx insn, link, next;
-
+
/* INSN_TICK (minimum clock tick at which the insn becomes
ready) may be not correct for the insn in the subsequent
blocks of the region. We should use a correct value of
/* Set_priorities: compute priority of each insn in the block. */
int
-set_priorities (head, tail)
- rtx head, tail;
+set_priorities (rtx head, rtx tail)
{
rtx insn;
int n_insn;
-
+ int sched_max_insns_priority =
+ current_sched_info->sched_max_insns_priority;
rtx prev_head;
prev_head = PREV_INSN (head);
return 0;
n_insn = 0;
+ sched_max_insns_priority = 0;
for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
{
if (GET_CODE (insn) == NOTE)
n_insn++;
(void) priority (insn);
+
+ if (INSN_PRIORITY_KNOWN (insn))
+ sched_max_insns_priority =
+ MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
}
+ sched_max_insns_priority += 1;
+ current_sched_info->sched_max_insns_priority =
+ sched_max_insns_priority;
return n_insn;
}
for debugging output. */
void
-sched_init (dump_file)
- FILE *dump_file;
+sched_init (FILE *dump_file)
{
int luid;
basic_block b;
else
issue_rate = 1;
+ if (cached_issue_rate != issue_rate)
+ {
+ cached_issue_rate = issue_rate;
+ /* To invalidate max_lookahead_tries: */
+ cached_first_cycle_multipass_dfa_lookahead = 0;
+ }
+
/* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
pseudos which do not cross calls. */
old_max_uid = get_max_uid () + 1;
- h_i_d = (struct haifa_insn_data *) xcalloc (old_max_uid, sizeof (*h_i_d));
+ h_i_d = xcalloc (old_max_uid, sizeof (*h_i_d));
for (i = 0; i < old_max_uid; i++)
h_i_d [i].cost = -1;
{
if (targetm.sched.init_dfa_pre_cycle_insn)
(*targetm.sched.init_dfa_pre_cycle_insn) ();
-
+
if (targetm.sched.init_dfa_post_cycle_insn)
(*targetm.sched.init_dfa_post_cycle_insn) ();
-
+
if (targetm.sched.first_cycle_multipass_dfa_lookahead
&& targetm.sched.init_dfa_bubbles)
(*targetm.sched.init_dfa_bubbles) ();
-
+
dfa_start ();
dfa_state_size = state_size ();
curr_state = xmalloc (dfa_state_size);
h_i_d[0].luid = 0;
luid = 1;
FOR_EACH_BB (b)
- for (insn = b->head;; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
{
INSN_LUID (insn) = luid;
if (GET_CODE (insn) != NOTE)
++luid;
- if (insn == b->end)
+ if (insn == BB_END (b))
break;
}
{
rtx line;
- line_note_head = (rtx *) xcalloc (last_basic_block, sizeof (rtx));
+ line_note_head = xcalloc (last_basic_block, sizeof (rtx));
/* Save-line-note-head:
Determine the line-number at the start of each basic block.
FOR_EACH_BB (b)
{
- for (line = b->head; line; line = PREV_INSN (line))
+ for (line = BB_HEAD (b); line; line = PREV_INSN (line))
if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
{
line_note_head[b->index] = line;
}
/* Do a forward search as well, since we won't get to see the first
notes in a basic block. */
- for (line = b->head; line; line = NEXT_INSN (line))
+ for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
{
if (INSN_P (line))
break;
/* ??? Add a NOTE after the last insn of the last basic block. It is not
known why this is done. */
- insn = EXIT_BLOCK_PTR->prev_bb->end;
+ insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
if (NEXT_INSN (insn) == 0
|| (GET_CODE (insn) != NOTE
&& GET_CODE (insn) != CODE_LABEL
/* Don't emit a NOTE if it would end up before a BARRIER. */
&& GET_CODE (NEXT_INSN (insn)) != BARRIER))
{
- emit_note_after (NOTE_INSN_DELETED, EXIT_BLOCK_PTR->prev_bb->end);
+ emit_note_after (NOTE_INSN_DELETED, BB_END (EXIT_BLOCK_PTR->prev_bb));
/* Make insn to appear outside BB. */
- EXIT_BLOCK_PTR->prev_bb->end = PREV_INSN (EXIT_BLOCK_PTR->prev_bb->end);
+ BB_END (EXIT_BLOCK_PTR->prev_bb) = PREV_INSN (BB_END (EXIT_BLOCK_PTR->prev_bb));
}
/* Compute INSN_REG_WEIGHT for all blocks. We must do this before
/* Free global data used during insn scheduling. */
void
-sched_finish ()
+sched_finish (void)
{
free (h_i_d);