/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
- 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
-#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
#include "target.h"
+#include "common/common-target.h"
#include "output.h"
#include "params.h"
#include "vecprim.h"
#include "dbgcnt.h"
#include "cfgloop.h"
#include "ira.h"
+#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
+#include "hashtab.h"
#ifdef INSN_SCHEDULING
int issue_rate;
+/* This can be set to true by a backend if the scheduler should not
+ enable a DCE pass. */
+bool sched_no_dce;
+
/* sched-verbose controls the amount of debugging output the
scheduler prints. It is controlled by -fsched-verbose=N:
N>0 and no -DSR : the output is directed to stderr.
N=3: rtl at abort point, control-flow, regions info.
N=5: dependences info. */
-static int sched_verbose_param = 0;
int sched_verbose = 0;
/* Debugging file. All printouts are sent to dump, which is always set,
either to stderr, or to the dump listing file (-dRS). */
FILE *sched_dump = 0;
-/* fix_sched_param() is called from toplev.c upon detection
- of the -fsched-verbose=N option. */
-
-void
-fix_sched_param (const char *param, const char *val)
-{
- if (!strcmp (param, "verbose"))
- sched_verbose_param = atoi (val);
- else
- warning (0, "fix_sched_param: unknown param: %s", param);
-}
-
/* This is a placeholder for the scheduler parameters common
to all schedulers. */
struct common_sched_info_def *common_sched_info;
#define INSN_TICK(INSN) (HID (INSN)->tick)
+#define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
+#define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
#define INTER_TICK(INSN) (HID (INSN)->inter_tick)
+#define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
+#define SHADOW_P(INSN) (HID (INSN)->shadow_p)
/* If INSN_TICK of an instruction is equal to INVALID_TICK,
then it should be recalculated from scratch. */
/* The minimal value of the INSN_TICK of an instruction. */
#define MIN_TICK (-max_insn_queue_index)
-/* Issue points are used to distinguish between instructions in max_issue ().
- For now, all instructions are equally good. */
-#define ISSUE_POINTS(INSN) 1
-
/* List of important notes we must keep around. This is a pointer to the
last element in the list. */
rtx note_list;
/* Scheduling clock. */
static int clock_var;
+/* Clock at which the previous instruction was issued. */
+static int last_clock_var;
+
+/* Set to true if, when queuing a shadow insn, we discover that it would be
+ scheduled too late. */
+static bool must_backtrack;
+
+/* The following variable value is number of essential insns issued on
+ the current cycle. An insn is essential one if it changes the
+ processors state. */
+int cycle_issued_insns;
+
+/* This records the actual schedule. It is built up during the main phase
+ of schedule_block, and afterwards used to reorder the insns in the RTL. */
+static VEC(rtx, heap) *scheduled_insns;
+
static int may_trap_exp (const_rtx, int);
/* Nonzero iff the address is comprised from at most 1 register. */
SCHED_PASS_UNKNOWN /* sched_pass_id */
};
-const struct sched_scan_info_def *sched_scan_info;
-
/* Mapping from instruction UID to its Logical UID. */
VEC (int, heap) *sched_luids = NULL;
return haifa_classify_rtx (PATTERN (insn));
}
+/* A structure to record a pair of insns where the first one is a real
+ insn that has delay slots, and the second is its delayed shadow.
+ I1 is scheduled normally and will emit an assembly instruction,
+ while I2 describes the side effect that takes place at the
+ transition between cycles CYCLES and (CYCLES + 1) after I1. */
+struct delay_pair
+{
+ struct delay_pair *next_same_i1;
+ rtx i1, i2;
+ int cycles;
+};
+
+/* Two hash tables to record delay_pairs, one indexed by I1 and the other
+ indexed by I2. */
+static htab_t delay_htab;
+static htab_t delay_htab_i2;
+
+/* Returns a hash value for X (which really is a delay_pair), based on
+ hashing just I1. */
+static hashval_t
+delay_hash_i1 (const void *x)
+{
+ return htab_hash_pointer (((const struct delay_pair *) x)->i1);
+}
+
+/* Returns a hash value for X (which really is a delay_pair), based on
+ hashing just I2. */
+static hashval_t
+delay_hash_i2 (const void *x)
+{
+ return htab_hash_pointer (((const struct delay_pair *) x)->i2);
+}
+
+/* Return nonzero if I1 of pair X is the same as that of pair Y. */
+static int
+delay_i1_eq (const void *x, const void *y)
+{
+ return ((const struct delay_pair *) x)->i1 == y;
+}
+
+/* Return nonzero if I2 of pair X is the same as that of pair Y. */
+static int
+delay_i2_eq (const void *x, const void *y)
+{
+ return ((const struct delay_pair *) x)->i2 == y;
+}
+
+/* This function can be called by a port just before it starts the
+ final scheduling pass. It records the fact that an instruction
+ with delay slots has been split into two insns, I1 and I2. The
+ first one will be scheduled normally and initiates the operation.
+ The second one is a shadow which must follow a specific number of
+ CYCLES after I1; its only purpose is to show the side effect that
+ occurs at that cycle in the RTL. If a JUMP_INSN or a CALL_INSN has
+ been split, I1 should be a normal INSN, while I2 retains the
+ original insn type. */
+
+void
+record_delay_slot_pair (rtx i1, rtx i2, int cycles)
+{
+ struct delay_pair *p = XNEW (struct delay_pair);
+ struct delay_pair **slot;
+
+ p->i1 = i1;
+ p->i2 = i2;
+ p->cycles = cycles;
+
+ if (!delay_htab)
+ {
+ delay_htab = htab_create (10, delay_hash_i1, delay_i1_eq, NULL);
+ delay_htab_i2 = htab_create (10, delay_hash_i2, delay_i2_eq, free);
+ }
+ slot = ((struct delay_pair **)
+ htab_find_slot_with_hash (delay_htab, i1, htab_hash_pointer (i1),
+ INSERT));
+ p->next_same_i1 = *slot;
+ *slot = p;
+ slot = ((struct delay_pair **)
+ htab_find_slot_with_hash (delay_htab_i2, i2, htab_hash_pointer (i2),
+ INSERT));
+ *slot = p;
+}
+
+/* For a pair P of insns, return the fixed distance in cycles from the first
+ insn after which the second must be scheduled. */
+static int
+pair_delay (struct delay_pair *p)
+{
+ return p->cycles;
+}
+
+/* Given an insn INSN, add a dependence on its delayed shadow if it
+ has one. Also try to find situations where shadows depend on each other
+ and add dependencies to the real insns to limit the amount of backtracking
+ needed. */
+void
+add_delay_dependencies (rtx insn)
+{
+ struct delay_pair *pair;
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ if (!delay_htab)
+ return;
+
+ pair
+ = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
+ htab_hash_pointer (insn));
+ if (!pair)
+ return;
+ add_dependence (insn, pair->i1, REG_DEP_ANTI);
+
+ FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
+ {
+ rtx pro = DEP_PRO (dep);
+ struct delay_pair *other_pair
+ = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, pro,
+ htab_hash_pointer (pro));
+ if (!other_pair)
+ continue;
+ if (pair_delay (other_pair) >= pair_delay (pair))
+ {
+ if (sched_verbose >= 4)
+ {
+ fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
+ INSN_UID (other_pair->i1),
+ INSN_UID (pair->i1));
+ fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
+ INSN_UID (pair->i1),
+ INSN_UID (pair->i2),
+ pair_delay (pair));
+ fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
+ INSN_UID (other_pair->i1),
+ INSN_UID (other_pair->i2),
+ pair_delay (other_pair));
+ }
+ add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
+ }
+ }
+}
+\f
/* Forward declarations. */
static int priority (rtx);
static int rank_for_schedule (const void *, const void *);
static void swap_sort (rtx *, int);
-static void queue_insn (rtx, int);
+static void queue_insn (rtx, int, const char *);
static int schedule_insn (rtx);
static void adjust_priority (rtx);
static void advance_one_cycle (void);
static void ready_add (struct ready_list *, rtx, bool);
static rtx ready_remove_first (struct ready_list *);
+static rtx ready_remove_first_dispatch (struct ready_list *ready);
static void queue_to_ready (struct ready_list *);
static int early_queue_to_ready (state_t, struct ready_list *);
static rtx ready_remove (struct ready_list *, int);
static void ready_remove_insn (rtx);
-static int choose_ready (struct ready_list *, rtx *);
-
static void fix_inter_tick (rtx, rtx);
static int fix_tick_ready (rtx);
static void change_queue_index (rtx, int);
static void clear_priorities (rtx, rtx_vec_t *);
static void calc_priorities (rtx_vec_t);
static void add_jump_dependencies (rtx, rtx);
-#ifdef ENABLE_CHECKING
-static int has_edge_p (VEC(edge,gc) *, int);
-static void check_cfg (rtx, rtx);
-#endif
#endif /* INSN_SCHEDULING */
\f
up. */
bool sched_pressure_p;
-/* Map regno -> its cover class. The map defined only when
+/* Map regno -> its pressure class. The map defined only when
SCHED_PRESSURE_P is true. */
-enum reg_class *sched_regno_cover_class;
+enum reg_class *sched_regno_pressure_class;
-/* The current register pressure. Only elements corresponding cover
+/* The current register pressure. Only elements corresponding pressure
classes are defined. */
static int curr_reg_pressure[N_REG_CLASSES];
static void
mark_regno_birth_or_death (int regno, bool birth_p)
{
- enum reg_class cover_class;
+ enum reg_class pressure_class;
- cover_class = sched_regno_cover_class[regno];
+ pressure_class = sched_regno_pressure_class[regno];
if (regno >= FIRST_PSEUDO_REGISTER)
{
- if (cover_class != NO_REGS)
+ if (pressure_class != NO_REGS)
{
if (birth_p)
{
bitmap_set_bit (curr_reg_live, regno);
- curr_reg_pressure[cover_class]
- += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
+ curr_reg_pressure[pressure_class]
+ += (ira_reg_class_max_nregs
+ [pressure_class][PSEUDO_REGNO_MODE (regno)]);
}
else
{
bitmap_clear_bit (curr_reg_live, regno);
- curr_reg_pressure[cover_class]
- -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
+ curr_reg_pressure[pressure_class]
+ -= (ira_reg_class_max_nregs
+ [pressure_class][PSEUDO_REGNO_MODE (regno)]);
}
}
}
- else if (cover_class != NO_REGS
+ else if (pressure_class != NO_REGS
&& ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
if (birth_p)
{
bitmap_set_bit (curr_reg_live, regno);
- curr_reg_pressure[cover_class]++;
+ curr_reg_pressure[pressure_class]++;
}
else
{
bitmap_clear_bit (curr_reg_live, regno);
- curr_reg_pressure[cover_class]--;
+ curr_reg_pressure[pressure_class]--;
}
}
}
unsigned int j;
bitmap_iterator bi;
- for (i = 0; i < ira_reg_class_cover_size; i++)
- curr_reg_pressure[ira_reg_class_cover[i]] = 0;
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ curr_reg_pressure[ira_pressure_classes[i]] = 0;
bitmap_clear (curr_reg_live);
EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
if (REG_P (x))
{
regno = REGNO (x);
- if (regno >= FIRST_PSEUDO_REGISTER)
- bitmap_set_bit (region_ref_regs, REGNO (x));
+ if (HARD_REGISTER_NUM_P (regno))
+ bitmap_set_range (region_ref_regs, regno,
+ hard_regno_nregs[regno][GET_MODE (x)]);
else
- for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
- bitmap_set_bit (region_ref_regs, regno + i);
+ bitmap_set_bit (region_ref_regs, REGNO (x));
return;
}
fmt = GET_RTX_FORMAT (code);
static void
initiate_bb_reg_pressure_info (basic_block bb)
{
- unsigned int i;
+ unsigned int i ATTRIBUTE_UNUSED;
rtx insn;
if (current_nr_blocks > 1)
FOR_BB_INSNS (bb, insn)
- if (INSN_P (insn))
+ if (NONDEBUG_INSN_P (insn))
setup_ref_regs (PATTERN (insn));
initiate_reg_pressure_info (df_get_live_in (bb));
#ifdef EH_RETURN_DATA_REGNO
{
int i;
- for (i = 0; i < ira_reg_class_cover_size; i++)
- saved_reg_pressure[ira_reg_class_cover[i]]
- = curr_reg_pressure[ira_reg_class_cover[i]];
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ saved_reg_pressure[ira_pressure_classes[i]]
+ = curr_reg_pressure[ira_pressure_classes[i]];
bitmap_copy (saved_reg_live, curr_reg_live);
}
{
int i;
- for (i = 0; i < ira_reg_class_cover_size; i++)
- curr_reg_pressure[ira_reg_class_cover[i]]
- = saved_reg_pressure[ira_reg_class_cover[i]];
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ curr_reg_pressure[ira_pressure_classes[i]]
+ = saved_reg_pressure[ira_pressure_classes[i]];
bitmap_copy (curr_reg_live, saved_reg_live);
}
struct reg_use_data *next;
for (next = use->next_regno_use; next != use; next = next->next_regno_use)
- if (QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
+ if (NONDEBUG_INSN_P (next->insn)
+ && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
return false;
return true;
}
/* Print info about the current register pressure and its excess for
- each cover class. */
+ each pressure class. */
static void
print_curr_reg_pressure (void)
{
enum reg_class cl;
fprintf (sched_dump, ";;\t");
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- cl = ira_reg_class_cover[i];
+ cl = ira_pressure_classes[i];
gcc_assert (curr_reg_pressure[cl] >= 0);
fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
curr_reg_pressure[cl],
fprintf (sched_dump, "\n");
}
-/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
- so that insns independent of the last scheduled insn will be preferred
- over dependent instructions. */
-
+/* Pointer to the last instruction scheduled. */
static rtx last_scheduled_insn;
+/* Pointer to the last nondebug instruction scheduled within the
+ block, or the prev_head of the scheduling block. Used by
+ rank_for_schedule, so that insns independent of the last scheduled
+ insn will be preferred over dependent instructions. */
+static rtx last_nondebug_scheduled_insn;
+
+/* Pointer that iterates through the list of unscheduled insns if we
+ have a dbg_cnt enabled. It always points at an insn prior to the
+ first unscheduled one. */
+static rtx nonscheduled_insns_begin;
+
/* Cached cost of the instruction. Use below function to get cost of the
insn. -1 here means that the field is not initialized. */
#define INSN_COST(INSN) (HID (INSN)->cost)
rtx used = DEP_CON (link);
int cost;
+ if (DEP_COST (link) != UNKNOWN_DEP_COST)
+ return DEP_COST (link);
+
+ if (delay_htab)
+ {
+ struct delay_pair *delay_entry;
+ delay_entry
+ = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, used,
+ htab_hash_pointer (used));
+ if (delay_entry)
+ {
+ if (delay_entry->i1 == insn)
+ {
+ DEP_COST (link) = pair_delay (delay_entry);
+ return DEP_COST (link);
+ }
+ }
+ }
+
/* A USE insn should never require the value used to be computed.
This allows the computation of a function's result and parameter
values to overlap the return and call. We don't care about the
- the dependence cost when only decreasing register pressure. */
+ dependence cost when only decreasing register pressure. */
if (recog_memoized (used) < 0)
{
cost = 0;
cost = 0;
}
+ DEP_COST (link) = cost;
return cost;
}
struct reg_use_data *use;
static int death[N_REG_CLASSES];
+ gcc_checking_assert (!DEBUG_INSN_P (insn));
+
excess_cost_change = 0;
- for (i = 0; i < ira_reg_class_cover_size; i++)
- death[ira_reg_class_cover[i]] = 0;
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ death[ira_pressure_classes[i]] = 0;
for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
if (dying_use_p (use))
{
- cl = sched_regno_cover_class[use->regno];
+ cl = sched_regno_pressure_class[use->regno];
if (use->regno < FIRST_PSEUDO_REGISTER)
death[cl]++;
else
- death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
+ death[cl]
+ += ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
}
pressure_info = INSN_REG_PRESSURE (insn);
max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- cl = ira_reg_class_cover[i];
+ cl = ira_pressure_classes[i];
gcc_assert (curr_reg_pressure[cl] >= 0);
change = (int) pressure_info[i].set_increase - death[cl];
before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
{
rtx tmp = *(const rtx *) y;
rtx tmp2 = *(const rtx *) x;
- rtx last;
int tmp_class, tmp2_class;
int val, priority_val, info_val;
else
return INSN_TICK (tmp) - INSN_TICK (tmp2);
}
+
+ /* If we are doing backtracking in this schedule, prefer insns that
+ have forward dependencies with negative cost against an insn that
+ was already scheduled. */
+ if (current_sched_info->flags & DO_BACKTRACKING)
+ {
+ priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
+ if (priority_val)
+ return priority_val;
+ }
+
/* Prefer insn with higher priority. */
priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
if(flag_sched_rank_heuristic && info_val)
return info_val;
- if (flag_sched_last_insn_heuristic)
- {
- last = last_scheduled_insn;
-
- if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
- do
- last = PREV_INSN (last);
- while (!NONDEBUG_INSN_P (last)
- && last != current_sched_info->prev_head);
- }
-
/* Compare insns based on their relation to the last scheduled
non-debug insn. */
- if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
+ if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
{
dep_t dep1;
dep_t dep2;
+ rtx last = last_nondebug_scheduled_insn;
/* Classify the instructions into three classes:
1) Data dependent on last schedule insn.
/* Add INSN to the insn queue so that it can be executed at least
N_CYCLES after the currently executing insn. Preserve insns
- chain for debugging purposes. */
+ chain for debugging purposes. REASON will be printed in debugging
+ output. */
HAIFA_INLINE static void
-queue_insn (rtx insn, int n_cycles)
+queue_insn (rtx insn, int n_cycles, const char *reason)
{
int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
+ int new_tick;
gcc_assert (n_cycles <= max_insn_queue_index);
gcc_assert (!DEBUG_INSN_P (insn));
fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
(*current_sched_info->print_insn) (insn, 0));
- fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
+ fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
}
QUEUE_INDEX (insn) = next_q;
+
+ if (current_sched_info->flags & DO_BACKTRACKING)
+ {
+ new_tick = clock_var + n_cycles;
+ if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
+ INSN_TICK (insn) = new_tick;
+
+ if (INSN_EXACT_TICK (insn) != INVALID_TICK
+ && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
+ {
+ must_backtrack = true;
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
+ }
+ }
}
/* Remove INSN from queue. */
gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
QUEUE_INDEX (insn) = QUEUE_READY;
+
+ if (INSN_EXACT_TICK (insn) != INVALID_TICK
+ && INSN_EXACT_TICK (insn) < clock_var)
+ {
+ must_backtrack = true;
+ }
}
/* Remove the element with the highest priority from the ready list and
if (sched_pressure_p)
{
for (i = 0; i < ready->n_ready; i++)
- setup_insn_reg_pressure_info (first[i]);
+ if (!DEBUG_INSN_P (first[i]))
+ setup_insn_reg_pressure_info (first[i]);
}
SCHED_SORT (first, ready->n_ready);
}
fprintf (sched_dump, ";;\tAdvanced a state.\n");
}
-/* Clock at which the previous instruction was issued. */
-static int last_clock_var;
-
/* Update register pressure after scheduling INSN. */
static void
update_register_pressure (rtx insn)
struct reg_use_data *use;
struct reg_set_data *set;
+ gcc_checking_assert (!DEBUG_INSN_P (insn));
+
for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
mark_regno_birth_or_death (use->regno, false);
static int max_reg_pressure[N_REG_CLASSES];
save_reg_pressure ();
- for (i = 0; i < ira_reg_class_cover_size; i++)
- max_reg_pressure[ira_reg_class_cover[i]]
- = curr_reg_pressure[ira_reg_class_cover[i]];
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ max_reg_pressure[ira_pressure_classes[i]]
+ = curr_reg_pressure[ira_pressure_classes[i]];
for (insn = NEXT_INSN (after);
- insn != NULL_RTX && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
+ insn != NULL_RTX && ! BARRIER_P (insn)
+ && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
insn = NEXT_INSN (insn))
if (NONDEBUG_INSN_P (insn))
{
eq_p = true;
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- p = max_reg_pressure[ira_reg_class_cover[i]];
+ p = max_reg_pressure[ira_pressure_classes[i]];
if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
{
eq_p = false;
INSN_MAX_REG_PRESSURE (insn)[i]
- = max_reg_pressure[ira_reg_class_cover[i]];
+ = max_reg_pressure[ira_pressure_classes[i]];
}
}
if (update_p && eq_p)
break;
update_register_pressure (insn);
- for (i = 0; i < ira_reg_class_cover_size; i++)
- if (max_reg_pressure[ira_reg_class_cover[i]]
- < curr_reg_pressure[ira_reg_class_cover[i]])
- max_reg_pressure[ira_reg_class_cover[i]]
- = curr_reg_pressure[ira_reg_class_cover[i]];
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ if (max_reg_pressure[ira_pressure_classes[i]]
+ < curr_reg_pressure[ira_pressure_classes[i]])
+ max_reg_pressure[ira_pressure_classes[i]]
+ = curr_reg_pressure[ira_pressure_classes[i]];
}
restore_reg_pressure ();
}
int i;
int before[N_REG_CLASSES];
- for (i = 0; i < ira_reg_class_cover_size; i++)
- before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ before[i] = curr_reg_pressure[ira_pressure_classes[i]];
update_register_pressure (insn);
- for (i = 0; i < ira_reg_class_cover_size; i++)
- if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
break;
- if (i < ira_reg_class_cover_size)
+ if (i < ira_pressure_classes_num)
setup_insn_max_reg_pressure (insn, true);
}
initiate_bb_reg_pressure_info (bb);
setup_insn_max_reg_pressure (after, false);
}
+\f
+/* A structure that holds local state for the loop in schedule_block. */
+struct sched_block_state
+{
+ /* True if no real insns have been scheduled in the current cycle. */
+ bool first_cycle_insn_p;
+ /* True if a shadow insn has been scheduled in the current cycle, which
+ means that no more normal insns can be issued. */
+ bool shadows_only_p;
+ /* Initialized with the machine's issue rate every cycle, and updated
+ by calls to the variable_issue hook. */
+ int can_issue_more;
+};
/* INSN is the "currently executing insn". Launch each insn which was
waiting on INSN. READY is the ready list which contains the insns
if (pressure_info != NULL)
{
fputc (':', sched_dump);
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ for (i = 0; i < ira_pressure_classes_num; i++)
fprintf (sched_dump, "%s%+d(%d)",
- reg_class_names[ira_reg_class_cover[i]],
+ reg_class_names[ira_pressure_classes[i]],
pressure_info[i].set_increase, pressure_info[i].change);
}
fputc ('\n', sched_dump);
}
- if (sched_pressure_p)
+ if (sched_pressure_p && !DEBUG_INSN_P (insn))
update_reg_and_insn_max_reg_pressure (insn);
/* Scheduling instruction should have all its dependencies resolved and
sd_iterator_cond (&sd_it, &dep);)
{
rtx dbg = DEP_PRO (dep);
+ struct reg_use_data *use, *next;
gcc_assert (DEBUG_INSN_P (dbg));
INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
df_insn_rescan (dbg);
+ /* Unknown location doesn't use any registers. */
+ for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
+ {
+ struct reg_use_data *prev = use;
+
+ /* Remove use from the cyclic next_regno_use chain first. */
+ while (prev->next_regno_use != use)
+ prev = prev->next_regno_use;
+ prev->next_regno_use = use->next_regno_use;
+ next = use->next_insn_use;
+ free (use);
+ }
+ INSN_REG_USE_LIST (dbg) = NULL;
+
/* We delete rather than resolve these deps, otherwise we
crash in sched_free_deps(), because forward deps are
expected to be released before backward deps. */
}
}
- /* This is the place where scheduler doesn't *basically* need backward and
- forward dependencies for INSN anymore. Nevertheless they are used in
- heuristics in rank_for_schedule (), early_queue_to_ready () and in
- some targets (e.g. rs6000). Thus the earliest place where we *can*
- remove dependencies is after targetm.sched.md_finish () call in
- schedule_block (). But, on the other side, the safest place to remove
- dependencies is when we are finishing scheduling entire region. As we
- don't generate [many] dependencies during scheduling itself, we won't
- need memory until beginning of next region.
- Bottom line: Dependencies are removed for all insns in the end of
- scheduling the region. */
-
/* Annotate the instruction with issue information -- TImode
indicates that the instruction is expected not to be able
to issue on the same cycle as the previous insn. A machine
void
remove_notes (rtx head, rtx tail)
{
- rtx next_tail, prev, insn, next;
+ rtx next_tail, insn, next;
note_list = 0;
if (head == tail && !INSN_P (head))
return;
next_tail = NEXT_INSN (tail);
- prev = PREV_INSN (head);
for (insn = head; insn != next_tail; insn = next)
{
next = NEXT_INSN (insn);
if (!NOTE_P (insn))
- {
- prev = insn;
- continue;
- }
+ continue;
switch (NOTE_KIND (insn))
{
case NOTE_INSN_BASIC_BLOCK:
- prev = insn;
continue;
case NOTE_INSN_EPILOGUE_BEG:
}
}
+/* A structure to record enough data to allow us to backtrack the scheduler to
+ a previous state. */
+struct haifa_saved_data
+{
+ /* Next entry on the list. */
+ struct haifa_saved_data *next;
+
+ /* Backtracking is associated with scheduling insns that have delay slots.
+ DELAY_PAIR points to the structure that contains the insns involved, and
+ the number of cycles between them. */
+ struct delay_pair *delay_pair;
+
+ /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
+ void *fe_saved_data;
+ /* Data used by the backend. */
+ void *be_saved_data;
+
+ /* Copies of global state. */
+ int clock_var, last_clock_var;
+ struct ready_list ready;
+ state_t curr_state;
+
+ rtx last_scheduled_insn;
+ rtx last_nondebug_scheduled_insn;
+ int cycle_issued_insns;
+
+ /* Copies of state used in the inner loop of schedule_block. */
+ struct sched_block_state sched_block;
+
+ /* We don't need to save q_ptr, as its value is arbitrary and we can set it
+ to 0 when restoring. */
+ int q_size;
+ rtx *insn_queue;
+};
+
+/* A record, in reverse order, of all scheduled insns which have delay slots
+ and may require backtracking. */
+static struct haifa_saved_data *backtrack_queue;
+
+/* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
+ to SET_P. */
+static void
+mark_backtrack_feeds (rtx insn, int set_p)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+ FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
+ {
+ FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
+ }
+}
+
+/* Make a copy of the INSN_LIST list LINK and return it. */
+static rtx
+copy_insn_list (rtx link)
+{
+ rtx new_queue;
+ rtx *pqueue = &new_queue;
+
+ for (; link; link = XEXP (link, 1))
+ {
+ rtx x = XEXP (link, 0);
+ rtx newlink = alloc_INSN_LIST (x, NULL);
+ *pqueue = newlink;
+ pqueue = &XEXP (newlink, 1);
+ }
+ *pqueue = NULL_RTX;
+ return new_queue;
+}
+
+/* Save the current scheduler state so that we can backtrack to it
+ later if necessary. PAIR gives the insns that make it necessary to
+ save this point. SCHED_BLOCK is the local state of schedule_block
+ that need to be saved. */
+static void
+save_backtrack_point (struct delay_pair *pair,
+ struct sched_block_state sched_block)
+{
+ int i;
+ struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
+
+ save->curr_state = xmalloc (dfa_state_size);
+ memcpy (save->curr_state, curr_state, dfa_state_size);
+
+ save->ready.first = ready.first;
+ save->ready.n_ready = ready.n_ready;
+ save->ready.n_debug = ready.n_debug;
+ save->ready.veclen = ready.veclen;
+ save->ready.vec = XNEWVEC (rtx, ready.veclen);
+ memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
+
+ save->insn_queue = XNEWVEC (rtx, max_insn_queue_index + 1);
+ save->q_size = q_size;
+ for (i = 0; i <= max_insn_queue_index; i++)
+ {
+ int q = NEXT_Q_AFTER (q_ptr, i);
+ save->insn_queue[i] = copy_insn_list (insn_queue[q]);
+ }
+
+ save->clock_var = clock_var;
+ save->last_clock_var = last_clock_var;
+ save->cycle_issued_insns = cycle_issued_insns;
+ save->last_scheduled_insn = last_scheduled_insn;
+ save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
+
+ save->sched_block = sched_block;
+
+ if (current_sched_info->save_state)
+ save->fe_saved_data = (*current_sched_info->save_state) ();
+
+ if (targetm.sched.alloc_sched_context)
+ {
+ save->be_saved_data = targetm.sched.alloc_sched_context ();
+ targetm.sched.init_sched_context (save->be_saved_data, false);
+ }
+ else
+ save->be_saved_data = NULL;
+
+ save->delay_pair = pair;
+
+ save->next = backtrack_queue;
+ backtrack_queue = save;
+
+ while (pair)
+ {
+ mark_backtrack_feeds (pair->i2, 1);
+ INSN_TICK (pair->i2) = INVALID_TICK;
+ INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
+ SHADOW_P (pair->i2) = true;
+ pair = pair->next_same_i1;
+ }
+}
+
+/* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
+ Restore their dependencies to an unresolved state, and mark them as
+ queued nowhere. */
+
+static void
+unschedule_insns_until (rtx insn)
+{
+ for (;;)
+ {
+ rtx last;
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ last = VEC_pop (rtx, scheduled_insns);
+
+ /* This will be changed by restore_backtrack_point if the insn is in
+ any queue. */
+ QUEUE_INDEX (last) = QUEUE_NOWHERE;
+ if (last != insn)
+ INSN_TICK (last) = INVALID_TICK;
+
+ for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx con = DEP_CON (dep);
+ TODO_SPEC (con) |= HARD_DEP;
+ INSN_TICK (con) = INVALID_TICK;
+ sd_unresolve_dep (sd_it);
+ }
+
+ if (last == insn)
+ break;
+ }
+}
+
+/* Restore scheduler state from the topmost entry on the backtracking queue.
+ PSCHED_BLOCK_P points to the local data of schedule_block that we must
+ overwrite with the saved data.
+ The caller must already have called unschedule_insns_until. */
+
+static void
+restore_last_backtrack_point (struct sched_block_state *psched_block)
+
+{
+ rtx link;
+ int i;
+ struct haifa_saved_data *save = backtrack_queue;
+
+ backtrack_queue = save->next;
+
+ if (current_sched_info->restore_state)
+ (*current_sched_info->restore_state) (save->fe_saved_data);
+
+ if (targetm.sched.alloc_sched_context)
+ {
+ targetm.sched.set_sched_context (save->be_saved_data);
+ targetm.sched.free_sched_context (save->be_saved_data);
+ }
+
+ /* Clear the QUEUE_INDEX of everything in the ready list or one
+ of the queues. */
+ if (ready.n_ready > 0)
+ {
+ rtx *first = ready_lastpos (&ready);
+ for (i = 0; i < ready.n_ready; i++)
+ {
+ QUEUE_INDEX (first[i]) = QUEUE_NOWHERE;
+ INSN_TICK (first[i]) = INVALID_TICK;
+ }
+ }
+ for (i = 0; i <= max_insn_queue_index; i++)
+ {
+ int q = NEXT_Q_AFTER (q_ptr, i);
+
+ for (link = insn_queue[q]; link; link = XEXP (link, 1))
+ {
+ rtx x = XEXP (link, 0);
+ QUEUE_INDEX (x) = QUEUE_NOWHERE;
+ INSN_TICK (x) = INVALID_TICK;
+ }
+ free_INSN_LIST_list (&insn_queue[q]);
+ }
+
+ free (ready.vec);
+ ready = save->ready;
+
+ if (ready.n_ready > 0)
+ {
+ rtx *first = ready_lastpos (&ready);
+ for (i = 0; i < ready.n_ready; i++)
+ {
+ QUEUE_INDEX (first[i]) = QUEUE_READY;
+ INSN_TICK (first[i]) = save->clock_var;
+ }
+ }
+
+ q_ptr = 0;
+ q_size = save->q_size;
+ for (i = 0; i <= max_insn_queue_index; i++)
+ {
+ int q = NEXT_Q_AFTER (q_ptr, i);
+
+ insn_queue[q] = save->insn_queue[q];
+
+ for (link = insn_queue[q]; link; link = XEXP (link, 1))
+ {
+ rtx x = XEXP (link, 0);
+ QUEUE_INDEX (x) = i;
+ INSN_TICK (x) = save->clock_var + i;
+ }
+ }
+ free (save->insn_queue);
+
+ clock_var = save->clock_var;
+ last_clock_var = save->last_clock_var;
+ cycle_issued_insns = save->cycle_issued_insns;
+ last_scheduled_insn = save->last_scheduled_insn;
+ last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
+
+ *psched_block = save->sched_block;
+
+ memcpy (curr_state, save->curr_state, dfa_state_size);
+ free (save->curr_state);
+
+ mark_backtrack_feeds (save->delay_pair->i2, 0);
+
+ free (save);
+
+ for (save = backtrack_queue; save; save = save->next)
+ {
+ mark_backtrack_feeds (save->delay_pair->i2, 1);
+ }
+}
+
+/* Discard all data associated with the topmost entry in the backtrack
+ queue. If RESET_TICK is false, we just want to free the data. If true,
+ we are doing this because we discovered a reason to backtrack. In the
+ latter case, also reset the INSN_TICK for the shadow insn. */
+static void
+free_topmost_backtrack_point (bool reset_tick)
+{
+ struct haifa_saved_data *save = backtrack_queue;
+ int i;
+
+ backtrack_queue = save->next;
+
+ if (reset_tick)
+ {
+ struct delay_pair *pair = save->delay_pair;
+ while (pair)
+ {
+ INSN_TICK (pair->i2) = INVALID_TICK;
+ INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
+ pair = pair->next_same_i1;
+ }
+ }
+ if (targetm.sched.free_sched_context)
+ targetm.sched.free_sched_context (save->be_saved_data);
+ if (current_sched_info->restore_state)
+ free (save->fe_saved_data);
+ for (i = 0; i <= max_insn_queue_index; i++)
+ free_INSN_LIST_list (&save->insn_queue[i]);
+ free (save->insn_queue);
+ free (save->curr_state);
+ free (save->ready.vec);
+ free (save);
+}
+
+/* Free the entire backtrack queue. */
+static void
+free_backtrack_queue (void)
+{
+ while (backtrack_queue)
+ free_topmost_backtrack_point (false);
+}
+
+/* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
+ instructions we've previously encountered, a set bit prevents
+ recursion. BUDGET is a limit on how far ahead we look, it is
+ reduced on recursive calls. Return true if we produced a good
+ estimate, or false if we exceeded the budget. */
+static bool
+estimate_insn_tick (bitmap processed, rtx insn, int budget)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+ int earliest = INSN_TICK (insn);
+
+ FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
+ {
+ rtx pro = DEP_PRO (dep);
+ int t;
+
+ if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
+ gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
+ else
+ {
+ int cost = dep_cost (dep);
+ if (cost >= budget)
+ return false;
+ if (!bitmap_bit_p (processed, INSN_LUID (pro)))
+ {
+ if (!estimate_insn_tick (processed, pro, budget - cost))
+ return false;
+ }
+ gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
+ t = INSN_TICK_ESTIMATE (pro) + cost;
+ if (earliest == INVALID_TICK || t > earliest)
+ earliest = t;
+ }
+ }
+ bitmap_set_bit (processed, INSN_LUID (insn));
+ INSN_TICK_ESTIMATE (insn) = earliest;
+ return true;
+}
+
+/* Examine the pair of insns in P, and estimate (optimistically, assuming
+ infinite resources) the cycle in which the delayed shadow can be issued.
+ Return the number of cycles that must pass before the real insn can be
+ issued in order to meet this constraint. */
+static int
+estimate_shadow_tick (struct delay_pair *p)
+{
+ bitmap_head processed;
+ int t;
+ bool cutoff;
+ bitmap_initialize (&processed, 0);
+
+ cutoff = !estimate_insn_tick (&processed, p->i2,
+ max_insn_queue_index + pair_delay (p));
+ bitmap_clear (&processed);
+ if (cutoff)
+ return max_insn_queue_index;
+ t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
+ if (t > 0)
+ return t;
+ return 0;
+}
/* Return the head and tail pointers of ebb starting at BEG and ending
at END. */
beg_head = NEXT_INSN (beg_head);
while (beg_head != beg_tail)
- if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
+ if (NOTE_P (beg_head))
beg_head = NEXT_INSN (beg_head);
+ else if (DEBUG_INSN_P (beg_head))
+ {
+ rtx note, next;
+
+ for (note = NEXT_INSN (beg_head);
+ note != beg_tail;
+ note = next)
+ {
+ next = NEXT_INSN (note);
+ if (NOTE_P (note))
+ {
+ if (sched_verbose >= 9)
+ fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
+
+ reorder_insns_nobb (note, note, PREV_INSN (beg_head));
+
+ if (BLOCK_FOR_INSN (note) != beg)
+ df_insn_change_bb (note, beg);
+ }
+ else if (!DEBUG_INSN_P (note))
+ break;
+ }
+
+ break;
+ }
else
break;
end_head = NEXT_INSN (end_head);
while (end_head != end_tail)
- if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
+ if (NOTE_P (end_tail))
end_tail = PREV_INSN (end_tail);
+ else if (DEBUG_INSN_P (end_tail))
+ {
+ rtx note, prev;
+
+ for (note = PREV_INSN (end_tail);
+ note != end_head;
+ note = prev)
+ {
+ prev = PREV_INSN (note);
+ if (NOTE_P (note))
+ {
+ if (sched_verbose >= 9)
+ fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
+
+ reorder_insns_nobb (note, note, end_tail);
+
+ if (end_tail == BB_END (end))
+ BB_END (end) = note;
+
+ if (BLOCK_FOR_INSN (note) != end)
+ df_insn_change_bb (note, end);
+ }
+ else if (!DEBUG_INSN_P (note))
+ break;
+ }
+
+ break;
+ }
else
break;
{
while (head != NEXT_INSN (tail))
{
- if (!NOTE_P (head) && !LABEL_P (head)
- && !BOUNDARY_DEBUG_INSN_P (head))
+ if (!NOTE_P (head) && !LABEL_P (head))
return 0;
head = NEXT_INSN (head);
}
if (dbg_cnt (sched_insn) == false)
{
- /* If debug counter is activated do not requeue insn next after
- last_scheduled_insn. */
- skip_insn = next_nonnote_insn (last_scheduled_insn);
- while (skip_insn && DEBUG_INSN_P (skip_insn))
- skip_insn = next_nonnote_insn (skip_insn);
+ /* If debug counter is activated do not requeue the first
+ nonscheduled insn. */
+ skip_insn = nonscheduled_insns_begin;
+ do
+ {
+ skip_insn = next_nonnote_nondebug_insn (skip_insn);
+ }
+ while (QUEUE_INDEX (skip_insn) == QUEUE_SCHEDULED);
}
else
skip_insn = NULL_RTX;
&& ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
&& !SCHED_GROUP_P (insn)
&& insn != skip_insn)
- {
- if (sched_verbose >= 2)
- fprintf (sched_dump, "requeued because ready full\n");
- queue_insn (insn, 1);
- }
+ queue_insn (insn, 1, "ready full");
else
{
ready_add (ready, insn, false);
static bool
ok_for_early_queue_removal (rtx insn)
{
- int n_cycles;
- rtx prev_insn = last_scheduled_insn;
-
if (targetm.sched.is_costly_dependence)
{
+ rtx prev_insn;
+ int n_cycles;
+ int i = VEC_length (rtx, scheduled_insns);
for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
{
- for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
+ while (i-- > 0)
{
int cost;
- if (prev_insn == current_sched_info->prev_head)
- {
- prev_insn = NULL;
- break;
- }
+ prev_insn = VEC_index (rtx, scheduled_insns, i);
if (!NOTE_P (prev_insn))
{
break;
}
- if (!prev_insn)
+ if (i == 0)
break;
- prev_insn = PREV_INSN (prev_insn);
}
}
return false;
}
+/* Define type for target data used in multipass scheduling. */
+#ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
+# define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
+#endif
+typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
+
/* The following structure describe an entry of the stack of choices. */
struct choice_entry
{
int n;
/* State after issuing the insn. */
state_t state;
+ /* Target-specific data. */
+ first_cycle_multipass_data_t target_data;
};
/* The following array is used to implement a stack of choices used in
function max_issue. */
static struct choice_entry *choice_stack;
-/* The following variable value is number of essential insns issued on
- the current cycle. An insn is essential one if it changes the
- processors state. */
-int cycle_issued_insns;
-
/* This holds the value of the target dfa_lookahead hook. */
int dfa_lookahead;
insns is insns with the best rank (the first insn in READY). To
make this function tries different samples of ready insns. READY
is current queue `ready'. Global array READY_TRY reflects what
- insns are already issued in this try. MAX_POINTS is the sum of points
- of all instructions in READY. The function stops immediately,
+ insns are already issued in this try. The function stops immediately,
if it reached the such a solution, that all instruction can be issued.
INDEX will contain index of the best insn in READY. The following
function is used only for first cycle multipass scheduling.
CLOBBERs, etc must be filtered elsewhere. */
int
max_issue (struct ready_list *ready, int privileged_n, state_t state,
- int *index)
+ bool first_cycle_insn_p, int *index)
{
- int n, i, all, n_ready, best, delay, tries_num, max_points;
+ int n, i, all, n_ready, best, delay, tries_num;
int more_issue;
struct choice_entry *top;
rtx insn;
}
/* Init max_points. */
- max_points = 0;
more_issue = issue_rate - cycle_issued_insns;
-
- /* ??? We used to assert here that we never issue more insns than issue_rate.
- However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
- achieved to get better performance. Until these targets are fixed to use
- scheduler hooks to manipulate insns priority instead, the assert should
- be disabled.
-
- gcc_assert (more_issue >= 0); */
-
- for (i = 0; i < n_ready; i++)
- if (!ready_try [i])
- {
- if (more_issue-- > 0)
- max_points += ISSUE_POINTS (ready_element (ready, i));
- else
- break;
- }
+ gcc_assert (more_issue >= 0);
/* The number of the issued insns in the best solution. */
best = 0;
memcpy (top->state, state, dfa_state_size);
top->rest = dfa_lookahead;
top->n = 0;
+ if (targetm.sched.first_cycle_multipass_begin)
+ targetm.sched.first_cycle_multipass_begin (&top->target_data,
+ ready_try, n_ready,
+ first_cycle_insn_p);
/* Count the number of the insns to search among. */
for (all = i = 0; i < n_ready; i++)
if (/* If we've reached a dead end or searched enough of what we have
been asked... */
top->rest == 0
- /* Or have nothing else to try. */
- || i >= n_ready)
+ /* or have nothing else to try... */
+ || i >= n_ready
+ /* or should not issue more. */
+ || top->n >= more_issue)
{
/* ??? (... || i == n_ready). */
gcc_assert (i <= n_ready);
+ /* We should not issue more than issue_rate instructions. */
+ gcc_assert (top->n <= more_issue);
+
if (top == choice_stack)
break;
{
n = privileged_n;
/* Try to find issued privileged insn. */
- while (n && !ready_try[--n]);
+ while (n && !ready_try[--n])
+ ;
}
if (/* If all insns are equally good... */
/* This is the index of the insn issued first in this
solution. */
*index = choice_stack [1].index;
- if (top->n == max_points || best == all)
+ if (top->n == more_issue || best == all)
break;
}
}
/* Backtrack. */
ready_try [i] = 0;
+
+ if (targetm.sched.first_cycle_multipass_backtrack)
+ targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
+ ready_try, n_ready);
+
top--;
memcpy (state, top->state, dfa_state_size);
}
{
if (state_dead_lock_p (state)
|| insn_finishes_cycle_p (insn))
- /* We won't issue any more instructions in the next
- choice_state. */
+ /* We won't issue any more instructions in the next
+ choice_state. */
top->rest = 0;
else
top->rest--;
n = top->n;
if (memcmp (top->state, state, dfa_state_size) != 0)
- n += ISSUE_POINTS (insn);
+ n++;
/* Advance to the next choice_entry. */
top++;
top->index = i;
top->n = n;
memcpy (top->state, state, dfa_state_size);
-
ready_try [i] = 1;
+
+ if (targetm.sched.first_cycle_multipass_issue)
+ targetm.sched.first_cycle_multipass_issue (&top->target_data,
+ ready_try, n_ready,
+ insn,
+ &((top - 1)
+ ->target_data));
+
i = -1;
}
}
i++;
}
+ if (targetm.sched.first_cycle_multipass_end)
+ targetm.sched.first_cycle_multipass_end (best != 0
+ ? &choice_stack[1].target_data
+ : NULL);
+
/* Restore the original state of the DFA. */
memcpy (state, choice_stack->state, dfa_state_size);
0 if INSN_PTR is set to point to the desirable insn,
1 if choose_ready () should be restarted without advancing the cycle. */
static int
-choose_ready (struct ready_list *ready, rtx *insn_ptr)
+choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
+ rtx *insn_ptr)
{
int lookahead;
if (dbg_cnt (sched_insn) == false)
{
- rtx insn;
-
- insn = next_nonnote_insn (last_scheduled_insn);
+ rtx insn = nonscheduled_insns_begin;
+ do
+ {
+ insn = next_nonnote_insn (insn);
+ }
+ while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
if (QUEUE_INDEX (insn) == QUEUE_READY)
/* INSN is in the ready_list. */
{
+ nonscheduled_insns_begin = insn;
ready_remove_insn (insn);
*insn_ptr = insn;
return 0;
if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
|| DEBUG_INSN_P (ready_element (ready, 0)))
{
- *insn_ptr = ready_remove_first (ready);
+ if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
+ *insn_ptr = ready_remove_first_dispatch (ready);
+ else
+ *insn_ptr = ready_remove_first (ready);
+
return 0;
}
else
{
insn = ready_element (ready, i);
-#ifdef ENABLE_CHECKING
/* If this insn is recognizable we should have already
recognized it earlier.
??? Not very clear where this is supposed to be done.
See dep_cost_1. */
- gcc_assert (INSN_CODE (insn) >= 0
- || recog_memoized (insn) < 0);
-#endif
+ gcc_checking_assert (INSN_CODE (insn) >= 0
+ || recog_memoized (insn) < 0);
ready_try [i]
= (/* INSN_CODE check can be omitted here as it is also done later
(insn)));
}
- if (max_issue (ready, 1, curr_state, &index) == 0)
+ if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
{
*insn_ptr = ready_remove_first (ready);
if (sched_verbose >= 4)
}
}
+/* This function is called when we have successfully scheduled a
+ block. It uses the schedule stored in the scheduled_insns vector
+ to rearrange the RTL. PREV_HEAD is used as the anchor to which we
+ append the scheduled insns; TAIL is the insn after the scheduled
+ block. TARGET_BB is the argument passed to schedule_block. */
+
+static void
+commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
+{
+ unsigned int i;
+ rtx insn;
+
+ last_scheduled_insn = prev_head;
+ for (i = 0;
+ VEC_iterate (rtx, scheduled_insns, i, insn);
+ i++)
+ {
+ if (control_flow_insn_p (last_scheduled_insn)
+ || current_sched_info->advance_target_bb (*target_bb, insn))
+ {
+ *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
+
+ if (sched_verbose)
+ {
+ rtx x;
+
+ x = next_real_insn (last_scheduled_insn);
+ gcc_assert (x);
+ dump_new_block_header (1, *target_bb, x, tail);
+ }
+
+ last_scheduled_insn = bb_note (*target_bb);
+ }
+
+ if (current_sched_info->begin_move_insn)
+ (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
+ move_insn (insn, last_scheduled_insn,
+ current_sched_info->next_tail);
+ if (!DEBUG_INSN_P (insn))
+ reemit_notes (insn);
+ last_scheduled_insn = insn;
+ }
+
+ VEC_truncate (rtx, scheduled_insns, 0);
+}
+
+/* Examine all insns on the ready list and queue those which can't be
+ issued in this cycle. TEMP_STATE is temporary scheduler state we
+ can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
+ have been issued for the current cycle, which means it is valid to
+ issue an asm statement.
+
+ If SHADOWS_ONLY_P is true, we eliminate all real insns and only
+ leave those for which SHADOW_P is true.
+
+ Return the number of cycles we must
+ advance to find the next ready instruction, or zero if there remain
+ insns on the ready list. */
+
+static void
+prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
+ bool shadows_only_p)
+{
+ int i;
+
+ restart:
+ for (i = 0; i < ready.n_ready; i++)
+ {
+ rtx insn = ready_element (&ready, i);
+ int cost = 0;
+ const char *reason = "resource conflict";
+
+ if (shadows_only_p && !DEBUG_INSN_P (insn) && !SHADOW_P (insn))
+ {
+ cost = 1;
+ reason = "not a shadow";
+ }
+ else if (recog_memoized (insn) < 0)
+ {
+ if (!first_cycle_insn_p
+ && (GET_CODE (PATTERN (insn)) == ASM_INPUT
+ || asm_noperands (PATTERN (insn)) >= 0))
+ cost = 1;
+ reason = "asm";
+ }
+ else if (sched_pressure_p)
+ cost = 0;
+ else
+ {
+ int delay_cost = 0;
+
+ if (delay_htab)
+ {
+ struct delay_pair *delay_entry;
+ delay_entry
+ = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
+ htab_hash_pointer (insn));
+ while (delay_entry && delay_cost == 0)
+ {
+ delay_cost = estimate_shadow_tick (delay_entry);
+ if (delay_cost > max_insn_queue_index)
+ delay_cost = max_insn_queue_index;
+ delay_entry = delay_entry->next_same_i1;
+ }
+ }
+
+ memcpy (temp_state, curr_state, dfa_state_size);
+ cost = state_transition (temp_state, insn);
+ if (cost < 0)
+ cost = 0;
+ else if (cost == 0)
+ cost = 1;
+ if (cost < delay_cost)
+ {
+ cost = delay_cost;
+ reason = "shadow tick";
+ }
+ }
+ if (cost >= 1)
+ {
+ ready_remove (&ready, i);
+ queue_insn (insn, cost, reason);
+ goto restart;
+ }
+ }
+}
+
+/* Called when we detect that the schedule is impossible. We examine the
+ backtrack queue to find the earliest insn that caused this condition. */
+
+static struct haifa_saved_data *
+verify_shadows (void)
+{
+ struct haifa_saved_data *save, *earliest_fail = NULL;
+ for (save = backtrack_queue; save; save = save->next)
+ {
+ int t;
+ struct delay_pair *pair = save->delay_pair;
+ rtx i1 = pair->i1;
+
+ for (; pair; pair = pair->next_same_i1)
+ {
+ rtx i2 = pair->i2;
+
+ if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
+ continue;
+
+ t = INSN_TICK (i1) + pair_delay (pair);
+ if (t < clock_var)
+ {
+ if (sched_verbose >= 2)
+ fprintf (sched_dump,
+ ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
+ ", not ready\n",
+ INSN_UID (pair->i1), INSN_UID (pair->i2),
+ INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
+ earliest_fail = save;
+ break;
+ }
+ if (QUEUE_INDEX (i2) >= 0)
+ {
+ int queued_for = INSN_TICK (i2);
+
+ if (t < queued_for)
+ {
+ if (sched_verbose >= 2)
+ fprintf (sched_dump,
+ ";;\t\tfailed delay requirements for %d/%d"
+ " (%d->%d), queued too late\n",
+ INSN_UID (pair->i1), INSN_UID (pair->i2),
+ INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
+ earliest_fail = save;
+ break;
+ }
+ }
+ }
+ }
+
+ return earliest_fail;
+}
+
/* Use forward list scheduling to rearrange insns of block pointed to by
TARGET_BB, possibly bringing insns from subsequent blocks in the same
region. */
void
schedule_block (basic_block *target_bb)
{
- int i, first_cycle_insn_p;
- int can_issue_more;
+ int i;
+ struct sched_block_state ls;
state_t temp_state = NULL; /* It is used for multipass scheduling. */
int sort_p, advance, start_clock_var;
haifa_recovery_bb_recently_added_p = false;
+ backtrack_queue = NULL;
+
/* Debug info. */
if (sched_verbose)
dump_new_block_header (0, *target_bb, head, tail);
/* It is used for first cycle multipass scheduling. */
temp_state = alloca (dfa_state_size);
- if (targetm.sched.md_init)
- targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
+ if (targetm.sched.init)
+ targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
/* We start inserting insns after PREV_HEAD. */
- last_scheduled_insn = prev_head;
+ last_scheduled_insn = nonscheduled_insns_begin = prev_head;
+ last_nondebug_scheduled_insn = NULL_RTX;
gcc_assert ((NOTE_P (last_scheduled_insn)
- || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
+ || DEBUG_INSN_P (last_scheduled_insn))
&& BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
/* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
/* Delay all insns past it for 1 cycle. If debug counter is
activated make an exception for the insn right after
- last_scheduled_insn. */
+ nonscheduled_insns_begin. */
{
rtx skip_insn;
if (dbg_cnt (sched_insn) == false)
- skip_insn = next_nonnote_insn (last_scheduled_insn);
+ skip_insn = next_nonnote_insn (nonscheduled_insns_begin);
else
skip_insn = NULL_RTX;
insn = ready_remove (&ready, i);
if (insn != skip_insn)
- queue_insn (insn, 1);
+ queue_insn (insn, 1, "list truncated");
}
+ if (skip_insn)
+ ready_add (&ready, skip_insn, true);
}
}
advance = 0;
+ gcc_assert (VEC_length (rtx, scheduled_insns) == 0);
sort_p = TRUE;
+ must_backtrack = false;
+
/* Loop until all the insns in BB are scheduled. */
while ((*current_sched_info->schedule_more_p) ())
{
}
while (advance > 0);
- if (sort_p)
- {
- /* Sort the ready list based on priority. */
- ready_sort (&ready);
-
- if (sched_verbose >= 2)
- {
- fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
- debug_ready_list (&ready);
- }
- }
+ if (ready.n_ready > 0)
+ prune_ready_list (temp_state, true, false);
+ if (ready.n_ready == 0)
+ continue;
+ if (must_backtrack)
+ goto do_backtrack;
- /* We don't want md sched reorder to even see debug isns, so put
- them out right away. */
- if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
+ ls.first_cycle_insn_p = true;
+ ls.shadows_only_p = false;
+ cycle_issued_insns = 0;
+ ls.can_issue_more = issue_rate;
+ for (;;)
{
- if (control_flow_insn_p (last_scheduled_insn))
+ rtx insn;
+ int cost;
+ bool asm_p;
+
+ if (sort_p && ready.n_ready > 0)
{
- *target_bb = current_sched_info->advance_target_bb
- (*target_bb, 0);
+ /* Sort the ready list based on priority. This must be
+ done every iteration through the loop, as schedule_insn
+ may have readied additional insns that will not be
+ sorted correctly. */
+ ready_sort (&ready);
- if (sched_verbose)
+ if (sched_verbose >= 2)
{
- rtx x;
-
- x = next_real_insn (last_scheduled_insn);
- gcc_assert (x);
- dump_new_block_header (1, *target_bb, x, tail);
+ fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
+ debug_ready_list (&ready);
}
-
- last_scheduled_insn = bb_note (*target_bb);
}
- while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
+ /* We don't want md sched reorder to even see debug isns, so put
+ them out right away. */
+ if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
+ && (*current_sched_info->schedule_more_p) ())
{
- rtx insn = ready_remove_first (&ready);
- gcc_assert (DEBUG_INSN_P (insn));
- (*current_sched_info->begin_schedule_ready) (insn,
- last_scheduled_insn);
- move_insn (insn, last_scheduled_insn,
- current_sched_info->next_tail);
- last_scheduled_insn = insn;
- advance = schedule_insn (insn);
- gcc_assert (advance == 0);
- if (ready.n_ready > 0)
- ready_sort (&ready);
+ while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
+ {
+ rtx insn = ready_remove_first (&ready);
+ gcc_assert (DEBUG_INSN_P (insn));
+ (*current_sched_info->begin_schedule_ready) (insn);
+ VEC_safe_push (rtx, heap, scheduled_insns, insn);
+ last_scheduled_insn = insn;
+ advance = schedule_insn (insn);
+ gcc_assert (advance == 0);
+ if (ready.n_ready > 0)
+ ready_sort (&ready);
+ }
}
- if (!ready.n_ready)
- continue;
- }
-
- /* Allow the target to reorder the list, typically for
- better instruction bundling. */
- if (sort_p && targetm.sched.reorder
- && (ready.n_ready == 0
- || !SCHED_GROUP_P (ready_element (&ready, 0))))
- can_issue_more =
- targetm.sched.reorder (sched_dump, sched_verbose,
- ready_lastpos (&ready),
- &ready.n_ready, clock_var);
- else
- can_issue_more = issue_rate;
+ if (ls.first_cycle_insn_p && !ready.n_ready)
+ break;
- first_cycle_insn_p = 1;
- cycle_issued_insns = 0;
- for (;;)
- {
- rtx insn;
- int cost;
- bool asm_p = false;
+ resume_after_backtrack:
+ /* Allow the target to reorder the list, typically for
+ better instruction bundling. */
+ if (sort_p
+ && (ready.n_ready == 0
+ || !SCHED_GROUP_P (ready_element (&ready, 0))))
+ {
+ if (ls.first_cycle_insn_p && targetm.sched.reorder)
+ ls.can_issue_more
+ = targetm.sched.reorder (sched_dump, sched_verbose,
+ ready_lastpos (&ready),
+ &ready.n_ready, clock_var);
+ else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
+ ls.can_issue_more
+ = targetm.sched.reorder2 (sched_dump, sched_verbose,
+ ready.n_ready
+ ? ready_lastpos (&ready) : NULL,
+ &ready.n_ready, clock_var);
+ }
+ restart_choose_ready:
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\tReady list (t = %3d): ",
}
if (ready.n_ready == 0
- && can_issue_more
+ && ls.can_issue_more
&& reload_completed)
{
/* Allow scheduling insns directly from the queue in case
}
if (ready.n_ready == 0
- || !can_issue_more
+ || !ls.can_issue_more
|| state_dead_lock_p (curr_state)
|| !(*current_sched_info->schedule_more_p) ())
break;
int res;
insn = NULL_RTX;
- res = choose_ready (&ready, &insn);
+ res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
if (res < 0)
/* Finish cycle. */
break;
if (res > 0)
- /* Restart choose_ready (). */
- continue;
+ goto restart_choose_ready;
gcc_assert (insn != NULL_RTX);
}
}
sort_p = TRUE;
- memcpy (temp_state, curr_state, dfa_state_size);
- if (recog_memoized (insn) < 0)
- {
- asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
- || asm_noperands (PATTERN (insn)) >= 0);
- if (!first_cycle_insn_p && asm_p)
- /* This is asm insn which is tried to be issued on the
- cycle not first. Issue it on the next cycle. */
- cost = 1;
- else
- /* A USE insn, or something else we don't need to
- understand. We can't pass these directly to
- state_transition because it will trigger a
- fatal error for unrecognizable insns. */
- cost = 0;
- }
- else if (sched_pressure_p)
- cost = 0;
- else
- {
- cost = state_transition (temp_state, insn);
- if (cost < 0)
- cost = 0;
- else if (cost == 0)
- cost = 1;
- }
-
- if (cost >= 1)
- {
- queue_insn (insn, cost);
- if (SCHED_GROUP_P (insn))
- {
- advance = cost;
- break;
- }
-
- continue;
- }
if (current_sched_info->can_schedule_ready_p
&& ! (*current_sched_info->can_schedule_ready_p) (insn))
/* We normally get here only if we don't want to move
insn from the split block. */
{
- TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
- continue;
+ TODO_SPEC (insn) = HARD_DEP;
+ goto restart_choose_ready;
+ }
+
+ if (delay_htab)
+ {
+ /* If this insn is the first part of a delay-slot pair, record a
+ backtrack point. */
+ struct delay_pair *delay_entry;
+ delay_entry
+ = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
+ htab_hash_pointer (insn));
+ if (delay_entry)
+ {
+ save_backtrack_point (delay_entry, ls);
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
+ }
}
/* DECISION is made. */
if (TODO_SPEC (insn) & SPECULATIVE)
generate_recovery_code (insn);
- if (control_flow_insn_p (last_scheduled_insn)
- /* This is used to switch basic blocks by request
- from scheduler front-end (actually, sched-ebb.c only).
- This is used to process blocks with single fallthru
- edge. If succeeding block has jump, it [jump] will try
- move at the end of current bb, thus corrupting CFG. */
- || current_sched_info->advance_target_bb (*target_bb, insn))
- {
- *target_bb = current_sched_info->advance_target_bb
- (*target_bb, 0);
-
- if (sched_verbose)
- {
- rtx x;
-
- x = next_real_insn (last_scheduled_insn);
- gcc_assert (x);
- dump_new_block_header (1, *target_bb, x, tail);
- }
-
- last_scheduled_insn = bb_note (*target_bb);
- }
+ if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
+ targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
/* Update counters, etc in the scheduler's front end. */
- (*current_sched_info->begin_schedule_ready) (insn,
- last_scheduled_insn);
-
- move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
- reemit_notes (insn);
- last_scheduled_insn = insn;
+ (*current_sched_info->begin_schedule_ready) (insn);
+ VEC_safe_push (rtx, heap, scheduled_insns, insn);
+ gcc_assert (NONDEBUG_INSN_P (insn));
+ last_nondebug_scheduled_insn = last_scheduled_insn = insn;
- if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
- {
- cycle_issued_insns++;
- memcpy (curr_state, temp_state, dfa_state_size);
- }
+ if (recog_memoized (insn) >= 0)
+ {
+ memcpy (temp_state, curr_state, dfa_state_size);
+ cost = state_transition (curr_state, insn);
+ if (!sched_pressure_p)
+ gcc_assert (cost < 0);
+ if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
+ cycle_issued_insns++;
+ asm_p = false;
+ }
+ else
+ asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
+ || asm_noperands (PATTERN (insn)) >= 0);
if (targetm.sched.variable_issue)
- can_issue_more =
+ ls.can_issue_more =
targetm.sched.variable_issue (sched_dump, sched_verbose,
- insn, can_issue_more);
+ insn, ls.can_issue_more);
/* A naked CLOBBER or USE generates no instruction, so do
not count them against the issue rate. */
else if (GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
- can_issue_more--;
+ ls.can_issue_more--;
advance = schedule_insn (insn);
+ if (SHADOW_P (insn))
+ ls.shadows_only_p = true;
+
/* After issuing an asm insn we should start a new cycle. */
if (advance == 0 && asm_p)
advance = 1;
- if (advance != 0)
+
+ if (must_backtrack)
break;
- first_cycle_insn_p = 0;
+ if (advance != 0)
+ break;
- /* Sort the ready list based on priority. This must be
- redone here, as schedule_insn may have readied additional
- insns that will not be sorted correctly. */
+ ls.first_cycle_insn_p = false;
if (ready.n_ready > 0)
- ready_sort (&ready);
-
- /* Quickly go through debug insns such that md sched
- reorder2 doesn't have to deal with debug insns. */
- if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
- && (*current_sched_info->schedule_more_p) ())
- {
- if (control_flow_insn_p (last_scheduled_insn))
- {
- *target_bb = current_sched_info->advance_target_bb
- (*target_bb, 0);
-
- if (sched_verbose)
- {
- rtx x;
-
- x = next_real_insn (last_scheduled_insn);
- gcc_assert (x);
- dump_new_block_header (1, *target_bb, x, tail);
- }
-
- last_scheduled_insn = bb_note (*target_bb);
- }
-
- while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
- {
- insn = ready_remove_first (&ready);
- gcc_assert (DEBUG_INSN_P (insn));
- (*current_sched_info->begin_schedule_ready)
- (insn, last_scheduled_insn);
- move_insn (insn, last_scheduled_insn,
- current_sched_info->next_tail);
- advance = schedule_insn (insn);
- last_scheduled_insn = insn;
- gcc_assert (advance == 0);
- if (ready.n_ready > 0)
- ready_sort (&ready);
- }
- }
+ prune_ready_list (temp_state, false, ls.shadows_only_p);
+ }
- if (targetm.sched.reorder2
- && (ready.n_ready == 0
- || !SCHED_GROUP_P (ready_element (&ready, 0))))
+ do_backtrack:
+ if (!must_backtrack)
+ for (i = 0; i < ready.n_ready; i++)
+ {
+ rtx insn = ready_element (&ready, i);
+ if (INSN_EXACT_TICK (insn) == clock_var)
+ {
+ must_backtrack = true;
+ clock_var++;
+ break;
+ }
+ }
+ while (must_backtrack)
+ {
+ struct haifa_saved_data *failed;
+ rtx failed_insn;
+
+ must_backtrack = false;
+ failed = verify_shadows ();
+ gcc_assert (failed);
+
+ failed_insn = failed->delay_pair->i1;
+ unschedule_insns_until (failed_insn);
+ while (failed != backtrack_queue)
+ free_topmost_backtrack_point (true);
+ restore_last_backtrack_point (&ls);
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
+ /* Delay by at least a cycle. This could cause additional
+ backtracking. */
+ queue_insn (failed_insn, 1, "backtracked");
+ advance = 0;
+ if (must_backtrack)
+ continue;
+ if (ready.n_ready > 0)
+ goto resume_after_backtrack;
+ else
{
- can_issue_more =
- targetm.sched.reorder2 (sched_dump, sched_verbose,
- ready.n_ready
- ? ready_lastpos (&ready) : NULL,
- &ready.n_ready, clock_var);
+ if (clock_var == 0 && ls.first_cycle_insn_p)
+ goto end_schedule;
+ advance = 1;
+ break;
}
}
}
-
+ end_schedule:
/* Debug info. */
if (sched_verbose)
{
x = ready_element (&ready, i);
QUEUE_INDEX (x) = QUEUE_NOWHERE;
- TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
+ TODO_SPEC (x) = HARD_DEP;
}
if (q_size)
x = XEXP (link, 0);
QUEUE_INDEX (x) = QUEUE_NOWHERE;
- TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
+ TODO_SPEC (x) = HARD_DEP;
}
free_INSN_LIST_list (&insn_queue[i]);
}
}
+ commit_schedule (prev_head, tail, target_bb);
if (sched_verbose)
fprintf (sched_dump, ";; total time = %d\n", clock_var);
fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
}
- if (targetm.sched.md_finish)
+ if (targetm.sched.finish)
{
- targetm.sched.md_finish (sched_dump, sched_verbose);
+ targetm.sched.finish (sched_dump, sched_verbose);
/* Target might have added some instructions to the scheduled block
in its md_finish () hook. These new insns don't have any data
initialized and to identify them we extend h_i_d so that they'll
get zero luids. */
- sched_init_luids (NULL, NULL, NULL, NULL);
+ sched_extend_luids ();
}
if (sched_verbose)
current_sched_info->head = head;
current_sched_info->tail = tail;
+
+ free_backtrack_queue ();
}
\f
/* Set_priorities: compute priority of each insn in the block. */
current_sched_info->sched_max_insns_priority;
rtx prev_head;
- if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
+ if (head == tail && ! INSN_P (head))
gcc_unreachable ();
n_insn = 0;
flag_schedule_speculative_load = 0;
#endif
+ if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
+ targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
+
sched_pressure_p = (flag_sched_pressure && ! reload_completed
&& common_sched_info->sched_pass_id == SCHED_RGN_PASS);
+
if (sched_pressure_p)
ira_setup_eliminable_regset ();
init_alias_analysis ();
- df_set_flags (DF_LR_RUN_DCE);
+ if (!sched_no_dce)
+ df_set_flags (DF_LR_RUN_DCE);
df_note_add_problem ();
/* More problems needed for interloop dep calculation in SMS. */
regstat_compute_calls_crossed ();
- if (targetm.sched.md_init_global)
- targetm.sched.md_init_global (sched_dump, sched_verbose,
- get_max_uid () + 1);
+ if (targetm.sched.init_global)
+ targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
if (sched_pressure_p)
{
int i, max_regno = max_reg_num ();
ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
- sched_regno_cover_class
+ sched_regno_pressure_class
= (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
for (i = 0; i < max_regno; i++)
- sched_regno_cover_class[i]
+ sched_regno_pressure_class[i]
= (i < FIRST_PSEUDO_REGISTER
- ? ira_class_translate[REGNO_REG_CLASS (i)]
- : reg_cover_class (i));
+ ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
+ : ira_pressure_class_translate[reg_allocno_class (i)]);
curr_reg_live = BITMAP_ALLOC (NULL);
saved_reg_live = BITMAP_ALLOC (NULL);
region_ref_regs = BITMAP_ALLOC (NULL);
setup_sched_dump ();
sched_init ();
+ scheduled_insns = VEC_alloc (rtx, heap, 0);
+
if (spec_info != NULL)
{
sched_deps_info->use_deps_list = 1;
FOR_EACH_BB (bb)
VEC_quick_push (basic_block, bbs, bb);
- sched_init_luids (bbs, NULL, NULL, NULL);
+ sched_init_luids (bbs);
sched_deps_init (true);
sched_extend_target ();
- haifa_init_h_i_d (bbs, NULL, NULL, NULL);
+ haifa_init_h_i_d (bbs);
VEC_free (basic_block, heap, bbs);
}
sched_create_empty_bb = sched_create_empty_bb_1;
haifa_recovery_bb_ever_added_p = false;
-#ifdef ENABLE_CHECKING
- /* This is used preferably for finding bugs in check_cfg () itself.
- We must call sched_bbs_init () before check_cfg () because check_cfg ()
- assumes that the last insn in the last bb has a non-null successor. */
- check_cfg (0, 0);
-#endif
-
nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
before_recovery = 0;
after_recovery = 0;
c, nr_be_in_control);
}
+ VEC_free (rtx, heap, scheduled_insns);
+
/* Finalize h_i_d, dependency caches, and luids for the whole
function. Target will be finalized in md_global_finish (). */
sched_deps_finish ();
haifa_finish_h_i_d ();
if (sched_pressure_p)
{
- free (sched_regno_cover_class);
+ free (sched_regno_pressure_class);
BITMAP_FREE (region_ref_regs);
BITMAP_FREE (saved_reg_live);
BITMAP_FREE (curr_reg_live);
}
free (curr_state);
- if (targetm.sched.md_finish_global)
- targetm.sched.md_finish_global (sched_dump, sched_verbose);
+ if (targetm.sched.finish_global)
+ targetm.sched.finish_global (sched_dump, sched_verbose);
end_alias_analysis ();
regstat_free_calls_crossed ();
dfa_finish ();
+}
-#ifdef ENABLE_CHECKING
- /* After reload ia64 backend clobbers CFG, so can't check anything. */
- if (!reload_completed)
- check_cfg (0, 0);
-#endif
+/* Free all delay_pair structures that were recorded. */
+void
+free_delay_pairs (void)
+{
+ if (delay_htab)
+ {
+ htab_empty (delay_htab);
+ htab_empty (delay_htab_i2);
+ }
}
/* Fix INSN_TICKs of the instructions in the current block as well as
gcc_assert (tick >= MIN_TICK);
/* Fix INSN_TICK of instruction from just scheduled block. */
- if (!bitmap_bit_p (&processed, INSN_LUID (head)))
+ if (bitmap_set_bit (&processed, INSN_LUID (head)))
{
- bitmap_set_bit (&processed, INSN_LUID (head));
tick -= next_clock;
if (tick < MIN_TICK)
/* If NEXT has its INSN_TICK calculated, fix it.
If not - it will be properly calculated from
scratch later in fix_tick_ready. */
- && !bitmap_bit_p (&processed, INSN_LUID (next)))
+ && bitmap_set_bit (&processed, INSN_LUID (next)))
{
- bitmap_set_bit (&processed, INSN_LUID (next));
tick -= next_clock;
if (tick < MIN_TICK)
int
try_ready (rtx next)
{
- ds_t old_ts, *ts;
+ ds_t old_ts, new_ts;
- ts = &TODO_SPEC (next);
- old_ts = *ts;
+ old_ts = TODO_SPEC (next);
gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
&& ((old_ts & HARD_DEP)
if (sd_lists_empty_p (next, SD_LIST_BACK))
/* NEXT has all its dependencies resolved. */
- {
- /* Remove HARD_DEP bit from NEXT's status. */
- *ts &= ~HARD_DEP;
-
- if (current_sched_info->flags & DO_SPECULATION)
- /* Remove all speculative bits from NEXT's status. */
- *ts &= ~SPECULATIVE;
- }
+ new_ts = 0;
else
{
/* One of the NEXT's dependencies has been resolved.
Recalculate NEXT's status. */
- *ts &= ~SPECULATIVE & ~HARD_DEP;
-
- if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
+ if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
+ new_ts = HARD_DEP;
+ else
/* Now we've got NEXT with speculative deps only.
1. Look at the deps to see what we have to do.
2. Check if we can do 'todo'. */
dep_t dep;
bool first_p = true;
+ new_ts = 0;
+
FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
{
ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
{
first_p = false;
- *ts = ds;
+ new_ts = ds;
}
else
- *ts = ds_merge (*ts, ds);
+ new_ts = ds_merge (new_ts, ds);
}
- if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
+ if (ds_weak (new_ts) < spec_info->data_weakness_cutoff)
/* Too few points. */
- *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
+ new_ts = HARD_DEP;
}
- else
- *ts |= HARD_DEP;
}
- if (*ts & HARD_DEP)
- gcc_assert (*ts == old_ts
+ if (new_ts & HARD_DEP)
+ gcc_assert (new_ts == HARD_DEP && new_ts == old_ts
&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
else if (current_sched_info->new_ready)
- *ts = current_sched_info->new_ready (next, *ts);
+ new_ts = current_sched_info->new_ready (next, new_ts);
/* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
have its original pattern or changed (speculative) one. This is due
* But if (old_ts & SPECULATIVE), then we are pretty sure that insn
has speculative pattern.
- We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
+ We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
control-speculative NEXT could have been discarded by sched-rgn.c
(the same case as when discarded by can_schedule_ready_p ()). */
- if ((*ts & SPECULATIVE)
- /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
+ if ((new_ts & SPECULATIVE)
+ /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
need to change anything. */
- && *ts != old_ts)
+ && new_ts != old_ts)
{
int res;
rtx new_pat;
- gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
+ gcc_assert (!(new_ts & ~SPECULATIVE));
- res = haifa_speculate_insn (next, *ts, &new_pat);
+ res = haifa_speculate_insn (next, new_ts, &new_pat);
switch (res)
{
case -1:
/* It would be nice to change DEP_STATUS of all dependences,
- which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
+ which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
so we won't reanalyze anything. */
- *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
+ new_ts = HARD_DEP;
break;
case 0:
}
}
- /* We need to restore pattern only if (*ts == 0), because otherwise it is
- either correct (*ts & SPECULATIVE),
- or we simply don't care (*ts & HARD_DEP). */
+ /* We need to restore pattern only if (new_ts == 0), because otherwise it is
+ either correct (new_ts & SPECULATIVE),
+ or we simply don't care (new_ts & HARD_DEP). */
gcc_assert (!ORIG_PAT (next)
|| !IS_SPECULATION_BRANCHY_CHECK_P (next));
- if (*ts & HARD_DEP)
+ TODO_SPEC (next) = new_ts;
+
+ if (new_ts & HARD_DEP)
{
/* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
control-speculative NEXT could have been discarded by sched-rgn.c
change_queue_index (next, QUEUE_NOWHERE);
return -1;
}
- else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
+ else if (!(new_ts & BEGIN_SPEC)
+ && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
/* We should change pattern of every previously speculative
instruction - and we determine if NEXT was speculative by using
ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
if (sched_verbose >= 2)
{
- int s = TODO_SPEC (next);
-
fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
(*current_sched_info->print_insn) (next, 0));
if (spec_info && spec_info->dump)
{
- if (s & BEGIN_DATA)
+ if (new_ts & BEGIN_DATA)
fprintf (spec_info->dump, "; data-spec;");
- if (s & BEGIN_CONTROL)
+ if (new_ts & BEGIN_CONTROL)
fprintf (spec_info->dump, "; control-spec;");
- if (s & BE_IN_CONTROL)
+ if (new_ts & BE_IN_CONTROL)
fprintf (spec_info->dump, "; in-control-spec;");
}
{
int tick, delay;
- if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
+ if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
{
int full_p;
sd_iterator_def sd_it;
if (delay == QUEUE_READY)
ready_add (readyp, next, false);
else if (delay >= 1)
- queue_insn (next, delay);
+ queue_insn (next, delay, "change queue index");
if (sched_verbose >= 2)
{
{
i = 0;
sched_ready_n_insns = 0;
+ VEC_reserve (rtx, heap, scheduled_insns, new_sched_ready_n_insns);
}
else
i = sched_ready_n_insns + 1;
new_sched_ready_n_insns + 1);
for (; i <= new_sched_ready_n_insns; i++)
- choice_stack[i].state = xmalloc (dfa_state_size);
+ {
+ choice_stack[i].state = xmalloc (dfa_state_size);
+
+ if (targetm.sched.first_cycle_multipass_init)
+ targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
+ .target_data));
+ }
sched_ready_n_insns = new_sched_ready_n_insns;
}
ready_try = NULL;
for (i = 0; i <= sched_ready_n_insns; i++)
- free (choice_stack [i].state);
+ {
+ if (targetm.sched.first_cycle_multipass_fini)
+ targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
+ .target_data));
+
+ free (choice_stack [i].state);
+ }
free (choice_stack);
choice_stack = NULL;
/* Helper function.
Find fallthru edge from PRED. */
edge
-find_fallthru_edge (basic_block pred)
+find_fallthru_edge_from (basic_block pred)
{
edge e;
- edge_iterator ei;
basic_block succ;
succ = pred->next_bb;
if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
{
- FOR_EACH_EDGE (e, ei, pred->succs)
- if (e->flags & EDGE_FALLTHRU)
- {
- gcc_assert (e->dest == succ);
- return e;
- }
+ e = find_fallthru_edge (pred->succs);
+
+ if (e)
+ {
+ gcc_assert (e->dest == succ);
+ return e;
+ }
}
else
{
- FOR_EACH_EDGE (e, ei, succ->preds)
- if (e->flags & EDGE_FALLTHRU)
- {
- gcc_assert (e->src == pred);
- return e;
- }
+ e = find_fallthru_edge (succ->preds);
+
+ if (e)
+ {
+ gcc_assert (e->src == pred);
+ return e;
+ }
}
return NULL;
edge e;
last = EXIT_BLOCK_PTR->prev_bb;
- e = find_fallthru_edge (last);
+ e = find_fallthru_edge_from (last);
if (e)
{
{
/* Rewritten from cfgrtl.c. */
if (flag_reorder_blocks_and_partition
- && targetm.have_named_sections)
+ && targetm_common.have_named_sections)
{
/* We don't need the same note for the check because
any_condjump_p (check) == true. */
edge_flags = 0;
make_single_succ_edge (rec, second_bb, edge_flags);
+ if (dom_info_available_p (CDI_DOMINATORS))
+ set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
}
/* This function creates recovery code for INSN. If MUTATE_P is nonzero,
{
sd_delete_dep (sd_it);
- if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
- {
- ready_list = alloc_INSN_LIST (consumer, ready_list);
- bitmap_set_bit (&in_ready, INSN_LUID (consumer));
- }
+ if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
+ ready_list = alloc_INSN_LIST (consumer, ready_list);
}
else
{
void
sched_change_pattern (rtx insn, rtx new_pat)
{
+ sd_iterator_def sd_it;
+ dep_t dep;
int t;
t = validate_change (insn, &PATTERN (insn), new_pat, 0);
gcc_assert (t);
dfa_clear_single_insn_cache (insn);
+
+ for (sd_it = sd_iterator_start (insn, (SD_LIST_FORW | SD_LIST_BACK
+ | SD_LIST_RES_BACK));
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ DEP_COST (dep) = UNKNOWN_DEP_COST;
+ sd_iterator_next (&sd_it);
+ }
}
/* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
int i;
rtx insn;
- for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
+ FOR_EACH_VEC_ELT (rtx, roots, i, insn)
priority (insn);
}
return note;
}
-#ifdef ENABLE_CHECKING
-/* Helper function for check_cfg.
- Return nonzero, if edge vector pointed to by EL has edge with TYPE in
- its flags. */
-static int
-has_edge_p (VEC(edge,gc) *el, int type)
-{
- edge e;
- edge_iterator ei;
-
- FOR_EACH_EDGE (e, ei, el)
- if (e->flags & type)
- return 1;
- return 0;
-}
-
-/* Search back, starting at INSN, for an insn that is not a
- NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
- no such insn can be found. */
-static inline rtx
-prev_non_location_insn (rtx insn, rtx head)
-{
- while (insn != head && NOTE_P (insn)
- && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
- insn = PREV_INSN (insn);
-
- return insn;
-}
-
-/* Check few properties of CFG between HEAD and TAIL.
- If HEAD (TAIL) is NULL check from the beginning (till the end) of the
- instruction stream. */
-static void
-check_cfg (rtx head, rtx tail)
-{
- rtx next_tail;
- basic_block bb = 0;
- int not_first = 0, not_last;
-
- if (head == NULL)
- head = get_insns ();
- if (tail == NULL)
- tail = get_last_insn ();
- next_tail = NEXT_INSN (tail);
-
- do
- {
- not_last = head != tail;
-
- if (not_first)
- gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
- if (not_last)
- gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
-
- if (LABEL_P (head)
- || (NOTE_INSN_BASIC_BLOCK_P (head)
- && (!not_first
- || (not_first && !LABEL_P (PREV_INSN (head))))))
- {
- gcc_assert (bb == 0);
- bb = BLOCK_FOR_INSN (head);
- if (bb != 0)
- gcc_assert (BB_HEAD (bb) == head);
- else
- /* This is the case of jump table. See inside_basic_block_p (). */
- gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
- }
-
- if (bb == 0)
- {
- gcc_assert (!inside_basic_block_p (head));
- head = NEXT_INSN (head);
- }
- else
- {
- gcc_assert (inside_basic_block_p (head)
- || NOTE_P (head));
- gcc_assert (BLOCK_FOR_INSN (head) == bb);
-
- if (LABEL_P (head))
- {
- head = NEXT_INSN (head);
- gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
- }
- else
- {
- if (control_flow_insn_p (head))
- {
- gcc_assert (prev_non_location_insn (BB_END (bb), head)
- == head);
-
- if (any_uncondjump_p (head))
- gcc_assert (EDGE_COUNT (bb->succs) == 1
- && BARRIER_P (NEXT_INSN (head)));
- else if (any_condjump_p (head))
- gcc_assert (/* Usual case. */
- (EDGE_COUNT (bb->succs) > 1
- && !BARRIER_P (NEXT_INSN (head)))
- /* Or jump to the next instruction. */
- || (EDGE_COUNT (bb->succs) == 1
- && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
- == JUMP_LABEL (head))));
- }
- if (BB_END (bb) == head)
- {
- if (EDGE_COUNT (bb->succs) > 1)
- gcc_assert (control_flow_insn_p (prev_non_location_insn
- (head, BB_HEAD (bb)))
- || has_edge_p (bb->succs, EDGE_COMPLEX));
- bb = 0;
- }
-
- head = NEXT_INSN (head);
- }
- }
-
- not_first = 1;
- }
- while (head != next_tail);
-
- gcc_assert (bb == 0);
-}
-
-#endif /* ENABLE_CHECKING */
-
-/* Extend per basic block data structures. */
-static void
-extend_bb (void)
-{
- if (sched_scan_info->extend_bb)
- sched_scan_info->extend_bb ();
-}
-
-/* Init data for BB. */
-static void
-init_bb (basic_block bb)
-{
- if (sched_scan_info->init_bb)
- sched_scan_info->init_bb (bb);
-}
-
-/* Extend per insn data structures. */
-static void
-extend_insn (void)
-{
- if (sched_scan_info->extend_insn)
- sched_scan_info->extend_insn ();
-}
-
-/* Init data structures for INSN. */
-static void
-init_insn (rtx insn)
-{
- if (sched_scan_info->init_insn)
- sched_scan_info->init_insn (insn);
-}
-
-/* Init all insns in BB. */
-static void
-init_insns_in_bb (basic_block bb)
-{
- rtx insn;
-
- FOR_BB_INSNS (bb, insn)
- init_insn (insn);
-}
-
-/* A driver function to add a set of basic blocks (BBS),
- a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
- to the scheduling region. */
-void
-sched_scan (const struct sched_scan_info_def *ssi,
- bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
-{
- sched_scan_info = ssi;
-
- if (bbs != NULL || bb != NULL)
- {
- extend_bb ();
-
- if (bbs != NULL)
- {
- unsigned i;
- basic_block x;
-
- for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
- init_bb (x);
- }
-
- if (bb != NULL)
- init_bb (bb);
- }
-
- extend_insn ();
-
- if (bbs != NULL)
- {
- unsigned i;
- basic_block x;
-
- for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
- init_insns_in_bb (x);
- }
-
- if (bb != NULL)
- init_insns_in_bb (bb);
-
- if (insns != NULL)
- {
- unsigned i;
- rtx x;
-
- for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
- init_insn (x);
- }
-
- if (insn != NULL)
- init_insn (insn);
-}
-
-
/* Extend data structures for logical insn UID. */
-static void
-luids_extend_insn (void)
+void
+sched_extend_luids (void)
{
int new_luids_max_uid = get_max_uid () + 1;
}
/* Initialize LUID for INSN. */
-static void
-luids_init_insn (rtx insn)
+void
+sched_init_insn_luid (rtx insn)
{
int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
int luid;
SET_INSN_LUID (insn, luid);
}
-/* Initialize luids for BBS, BB, INSNS and INSN.
+/* Initialize luids for BBS.
The hook common_sched_info->luid_for_non_insn () is used to determine
if notes, labels, etc. need luids. */
void
-sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
+sched_init_luids (bb_vec_t bbs)
{
- const struct sched_scan_info_def ssi =
+ int i;
+ basic_block bb;
+
+ sched_extend_luids ();
+ FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
{
- NULL, /* extend_bb */
- NULL, /* init_bb */
- luids_extend_insn, /* extend_insn */
- luids_init_insn /* init_insn */
- };
+ rtx insn;
- sched_scan (&ssi, bbs, bb, insns, insn);
+ FOR_BB_INSNS (bb, insn)
+ sched_init_insn_luid (insn);
+ }
}
/* Free LUIDs. */
INSN_COST (insn) = -1;
QUEUE_INDEX (insn) = QUEUE_NOWHERE;
INSN_TICK (insn) = INVALID_TICK;
+ INSN_EXACT_TICK (insn) = INVALID_TICK;
INTER_TICK (insn) = INVALID_TICK;
TODO_SPEC (insn) = HARD_DEP;
}
}
-/* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
+/* Initialize haifa_insn_data for BBS. */
void
-haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
+haifa_init_h_i_d (bb_vec_t bbs)
{
- const struct sched_scan_info_def ssi =
+ int i;
+ basic_block bb;
+
+ extend_h_i_d ();
+ FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
{
- NULL, /* extend_bb */
- NULL, /* init_bb */
- extend_h_i_d, /* extend_insn */
- init_h_i_d /* init_insn */
- };
+ rtx insn;
- sched_scan (&ssi, bbs, bb, insns, insn);
+ FOR_BB_INSNS (bb, insn)
+ init_h_i_d (insn);
+ }
}
/* Finalize haifa_insn_data. */
haifa_insn_data_t data;
struct reg_use_data *use, *next;
- for (i = 0; VEC_iterate (haifa_insn_data_def, h_i_d, i, data); i++)
+ FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
{
- if (data->reg_pressure != NULL)
- free (data->reg_pressure);
+ free (data->reg_pressure);
for (use = data->reg_use_list; use != NULL; use = next)
{
next = use->next_insn_use;
{
gcc_assert (insn != NULL);
- sched_init_luids (NULL, NULL, NULL, insn);
+ sched_extend_luids ();
+ sched_init_insn_luid (insn);
sched_extend_target ();
sched_deps_init (false);
- haifa_init_h_i_d (NULL, NULL, NULL, insn);
+ extend_h_i_d ();
+ init_h_i_d (insn);
if (adding_bb_to_current_region_p)
{
/* Extend dependency caches by one element. */
extend_dependency_caches (1, false);
}
+ if (sched_pressure_p)
+ init_insn_reg_pressure_info (insn);
}
/* Init data for the new basic block BB which comes after AFTER. */
rtx
sched_emit_insn (rtx pat)
{
- rtx insn = emit_insn_after (pat, last_scheduled_insn);
- last_scheduled_insn = insn;
+ rtx insn = emit_insn_before (pat, nonscheduled_insns_begin);
haifa_init_insn (insn);
+
+ if (current_sched_info->add_remove_insn)
+ current_sched_info->add_remove_insn (insn, 0);
+
+ (*current_sched_info->begin_schedule_ready) (insn);
+ VEC_safe_push (rtx, heap, scheduled_insns, insn);
+
+ last_scheduled_insn = insn;
return insn;
}
+/* This function returns a candidate satisfying dispatch constraints from
+ the ready list. */
+
+static rtx
+ready_remove_first_dispatch (struct ready_list *ready)
+{
+ int i;
+ rtx insn = ready_element (ready, 0);
+
+ if (ready->n_ready == 1
+ || INSN_CODE (insn) < 0
+ || !INSN_P (insn)
+ || !active_insn_p (insn)
+ || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
+ return ready_remove_first (ready);
+
+ for (i = 1; i < ready->n_ready; i++)
+ {
+ insn = ready_element (ready, i);
+
+ if (INSN_CODE (insn) < 0
+ || !INSN_P (insn)
+ || !active_insn_p (insn))
+ continue;
+
+ if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
+ {
+ /* Return ith element of ready. */
+ insn = ready_remove (ready, i);
+ return insn;
+ }
+ }
+
+ if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
+ return ready_remove_first (ready);
+
+ for (i = 1; i < ready->n_ready; i++)
+ {
+ insn = ready_element (ready, i);
+
+ if (INSN_CODE (insn) < 0
+ || !INSN_P (insn)
+ || !active_insn_p (insn))
+ continue;
+
+ /* Return i-th element of ready. */
+ if (targetm.sched.dispatch (insn, IS_CMP))
+ return ready_remove (ready, i);
+ }
+
+ return ready_remove_first (ready);
+}
+
+/* Get number of ready insn in the ready list. */
+
+int
+number_in_ready (void)
+{
+ return ready.n_ready;
+}
+
+/* Get number of ready's in the ready list. */
+
+rtx
+get_ready_element (int i)
+{
+ return ready_element (&ready, i);
+}
+
#endif /* INSN_SCHEDULING */