/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
- 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "vecprim.h"
#include "dbgcnt.h"
#include "cfgloop.h"
+#include "ira.h"
#ifdef INSN_SCHEDULING
warning (0, "fix_sched_param: unknown param: %s", param);
}
-/* This is a placeholder for the scheduler parameters common
+/* This is a placeholder for the scheduler parameters common
to all schedulers. */
struct common_sched_info_def *common_sched_info;
queue or ready list.
QUEUE_READY - INSN is in ready list.
N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
-
+
#define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
/* The following variable value refers for all current and future
static int haifa_luid_for_non_insn (rtx x);
/* Haifa version of sched_info hooks common to all headers. */
-const struct common_sched_info_def haifa_common_sched_info =
+const struct common_sched_info_def haifa_common_sched_info =
{
NULL, /* fix_recovery_cfg */
NULL, /* add_block */
static void swap_sort (rtx *, int);
static void queue_insn (rtx, int);
static int schedule_insn (rtx);
-static int find_set_reg_weight (const_rtx);
-static void find_insn_reg_weight (const_rtx);
static void adjust_priority (rtx);
static void advance_one_cycle (void);
static void extend_h_i_d (void);
}
#else
+/* Do register pressure sensitive insn scheduling if the flag is set
+ up. */
+bool sched_pressure_p;
+
+/* Map regno -> its cover class. The map defined only when
+ SCHED_PRESSURE_P is true. */
+enum reg_class *sched_regno_cover_class;
+
+/* The current register pressure. Only elements corresponding cover
+ classes are defined. */
+static int curr_reg_pressure[N_REG_CLASSES];
+
+/* Saved value of the previous array. */
+static int saved_reg_pressure[N_REG_CLASSES];
+
+/* Register living at given scheduling point. */
+static bitmap curr_reg_live;
+
+/* Saved value of the previous array. */
+static bitmap saved_reg_live;
+
+/* Registers mentioned in the current region. */
+static bitmap region_ref_regs;
+
+/* Initiate register pressure relative info for scheduling the current
+ region. Currently it is only clearing register mentioned in the
+ current region. */
+void
+sched_init_region_reg_pressure_info (void)
+{
+ bitmap_clear (region_ref_regs);
+}
+
+/* Update current register pressure related info after birth (if
+ BIRTH_P) or death of register REGNO. */
+static void
+mark_regno_birth_or_death (int regno, bool birth_p)
+{
+ enum reg_class cover_class;
+
+ cover_class = sched_regno_cover_class[regno];
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (cover_class != NO_REGS)
+ {
+ if (birth_p)
+ {
+ bitmap_set_bit (curr_reg_live, regno);
+ curr_reg_pressure[cover_class]
+ += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
+ }
+ else
+ {
+ bitmap_clear_bit (curr_reg_live, regno);
+ curr_reg_pressure[cover_class]
+ -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
+ }
+ }
+ }
+ else if (cover_class != NO_REGS
+ && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
+ {
+ if (birth_p)
+ {
+ bitmap_set_bit (curr_reg_live, regno);
+ curr_reg_pressure[cover_class]++;
+ }
+ else
+ {
+ bitmap_clear_bit (curr_reg_live, regno);
+ curr_reg_pressure[cover_class]--;
+ }
+ }
+}
+
+/* Initiate current register pressure related info from living
+ registers given by LIVE. */
+static void
+initiate_reg_pressure_info (bitmap live)
+{
+ int i;
+ unsigned int j;
+ bitmap_iterator bi;
+
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ curr_reg_pressure[ira_reg_class_cover[i]] = 0;
+ bitmap_clear (curr_reg_live);
+ EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
+ if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
+ mark_regno_birth_or_death (j, true);
+}
+
+/* Mark registers in X as mentioned in the current region. */
+static void
+setup_ref_regs (rtx x)
+{
+ int i, j, regno;
+ const RTX_CODE code = GET_CODE (x);
+ const char *fmt;
+
+ if (REG_P (x))
+ {
+ regno = REGNO (x);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ bitmap_set_bit (region_ref_regs, REGNO (x));
+ else
+ for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
+ bitmap_set_bit (region_ref_regs, regno + i);
+ return;
+ }
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ setup_ref_regs (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ {
+ for (j = 0; j < XVECLEN (x, i); j++)
+ setup_ref_regs (XVECEXP (x, i, j));
+ }
+}
+
+/* Initiate current register pressure related info at the start of
+ basic block BB. */
+static void
+initiate_bb_reg_pressure_info (basic_block bb)
+{
+ unsigned int i;
+ rtx insn;
+
+ if (current_nr_blocks > 1)
+ FOR_BB_INSNS (bb, insn)
+ if (NONDEBUG_INSN_P (insn))
+ setup_ref_regs (PATTERN (insn));
+ initiate_reg_pressure_info (df_get_live_in (bb));
+#ifdef EH_RETURN_DATA_REGNO
+ if (bb_has_eh_pred (bb))
+ for (i = 0; ; ++i)
+ {
+ unsigned int regno = EH_RETURN_DATA_REGNO (i);
+
+ if (regno == INVALID_REGNUM)
+ break;
+ if (! bitmap_bit_p (df_get_live_in (bb), regno))
+ mark_regno_birth_or_death (regno, true);
+ }
+#endif
+}
+
+/* Save current register pressure related info. */
+static void
+save_reg_pressure (void)
+{
+ int i;
+
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ saved_reg_pressure[ira_reg_class_cover[i]]
+ = curr_reg_pressure[ira_reg_class_cover[i]];
+ bitmap_copy (saved_reg_live, curr_reg_live);
+}
+
+/* Restore saved register pressure related info. */
+static void
+restore_reg_pressure (void)
+{
+ int i;
+
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ curr_reg_pressure[ira_reg_class_cover[i]]
+ = saved_reg_pressure[ira_reg_class_cover[i]];
+ bitmap_copy (curr_reg_live, saved_reg_live);
+}
+
+/* Return TRUE if the register is dying after its USE. */
+static bool
+dying_use_p (struct reg_use_data *use)
+{
+ struct reg_use_data *next;
+
+ for (next = use->next_regno_use; next != use; next = next->next_regno_use)
+ if (NONDEBUG_INSN_P (next->insn)
+ && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
+ return false;
+ return true;
+}
+
+/* Print info about the current register pressure and its excess for
+ each cover class. */
+static void
+print_curr_reg_pressure (void)
+{
+ int i;
+ enum reg_class cl;
+
+ fprintf (sched_dump, ";;\t");
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ {
+ cl = ira_reg_class_cover[i];
+ gcc_assert (curr_reg_pressure[cl] >= 0);
+ fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
+ curr_reg_pressure[cl],
+ curr_reg_pressure[cl] - ira_available_class_regs[cl]);
+ }
+ fprintf (sched_dump, "\n");
+}
+
/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
so that insns independent of the last scheduled insn will be preferred
over dependent instructions. */
/* A USE insn should never require the value used to be computed.
This allows the computation of a function's result and parameter
- values to overlap the return and call. */
+ values to overlap the return and call. We don't care about the
+ the dependence cost when only decreasing register pressure. */
if (recog_memoized (used) < 0)
{
cost = 0;
else if (bypass_p (insn))
cost = insn_latency (insn, used);
}
-
+
if (targetm.sched.adjust_cost_2)
- {
- cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
- dw);
- }
+ cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
+ dw);
else if (targetm.sched.adjust_cost != NULL)
{
/* This variable is used for backward compatibility with the
}
else
{
- /* In sel-sched.c INSN_PRIORITY is not kept up to date.
+ /* In sel-sched.c INSN_PRIORITY is not kept up to date.
Use EXPR_PRIORITY instead. */
sel_add_to_insn_priority (insn, amount);
}
{
if (DEBUG_INSN_P (DEP_CON (dep)))
dbgcount++;
- else
+ else if (!DEBUG_INSN_P (DEP_PRO (dep)))
nodbgcount++;
}
different than that of normal instructions. Instead of walking
through INSN_FORW_DEPS (check) list, we walk through
INSN_FORW_DEPS list of each instruction in the corresponding
- recovery block. */
+ recovery block. */
/* Selective scheduling does not define RECOVERY_BLOCK macro. */
rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
this_priority = next_priority;
}
}
-
+
twin = PREV_INSN (twin);
}
while (twin != prev_first);
qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
while (0)
+/* Setup info about the current register pressure impact of scheduling
+ INSN at the current scheduling point. */
+static void
+setup_insn_reg_pressure_info (rtx insn)
+{
+ int i, change, before, after, hard_regno;
+ int excess_cost_change;
+ enum machine_mode mode;
+ enum reg_class cl;
+ struct reg_pressure_data *pressure_info;
+ int *max_reg_pressure;
+ struct reg_use_data *use;
+ static int death[N_REG_CLASSES];
+
+ excess_cost_change = 0;
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ death[ira_reg_class_cover[i]] = 0;
+ for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
+ if (dying_use_p (use))
+ {
+ cl = sched_regno_cover_class[use->regno];
+ if (use->regno < FIRST_PSEUDO_REGISTER)
+ death[cl]++;
+ else
+ death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
+ }
+ pressure_info = INSN_REG_PRESSURE (insn);
+ max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
+ gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ {
+ cl = ira_reg_class_cover[i];
+ gcc_assert (curr_reg_pressure[cl] >= 0);
+ change = (int) pressure_info[i].set_increase - death[cl];
+ before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
+ after = MAX (0, max_reg_pressure[i] + change
+ - ira_available_class_regs[cl]);
+ hard_regno = ira_class_hard_regs[cl][0];
+ gcc_assert (hard_regno >= 0);
+ mode = reg_raw_mode[hard_regno];
+ excess_cost_change += ((after - before)
+ * (ira_memory_move_cost[mode][cl][0]
+ + ira_memory_move_cost[mode][cl][1]));
+ }
+ INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
+}
+
/* Returns a positive value if x is preferred; returns a negative value if
y is preferred. Should never return 0, since that will make the sort
unstable. */
rtx tmp2 = *(const rtx *) x;
rtx last;
int tmp_class, tmp2_class;
- int val, priority_val, weight_val, info_val;
+ int val, priority_val, info_val;
if (MAY_HAVE_DEBUG_INSNS)
{
}
/* The insn in a schedule group should be issued the first. */
- if (flag_sched_group_heuristic &&
+ if (flag_sched_group_heuristic &&
SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
return SCHED_GROUP_P (tmp2) ? 1 : -1;
/* Make sure that priority of TMP and TMP2 are initialized. */
gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
+ if (sched_pressure_p)
+ {
+ int diff;
+
+ /* Prefer insn whose scheduling results in the smallest register
+ pressure excess. */
+ if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
+ + (INSN_TICK (tmp) > clock_var
+ ? INSN_TICK (tmp) - clock_var : 0)
+ - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
+ - (INSN_TICK (tmp2) > clock_var
+ ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
+ return diff;
+ }
+
+
+ if (sched_pressure_p
+ && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
+ {
+ if (INSN_TICK (tmp) <= clock_var)
+ return -1;
+ else if (INSN_TICK (tmp2) <= clock_var)
+ return 1;
+ else
+ return INSN_TICK (tmp) - INSN_TICK (tmp2);
+ }
/* Prefer insn with higher priority. */
priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
dw1 = ds_weak (ds1);
else
dw1 = NO_DEP_WEAK;
-
+
ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
if (ds2)
dw2 = ds_weak (ds2);
return dw;
}
- /* Prefer an insn with smaller contribution to registers-pressure. */
- if (flag_sched_reg_pressure_heuristic && !reload_completed &&
- (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
- return weight_val;
-
info_val = (*current_sched_info->rank) (tmp, tmp2);
if(flag_sched_rank_heuristic && info_val)
return info_val;
ready_remove_first (struct ready_list *ready)
{
rtx t;
-
+
gcc_assert (ready->n_ready);
t = ready->vec[ready->first--];
ready->n_ready--;
ready_element (struct ready_list *ready, int index)
{
gcc_assert (ready->n_ready && index < ready->n_ready);
-
+
return ready->vec[ready->first - index];
}
void
ready_sort (struct ready_list *ready)
{
+ int i;
rtx *first = ready_lastpos (ready);
+
+ if (sched_pressure_p)
+ {
+ for (i = 0; i < ready->n_ready; i++)
+ setup_insn_reg_pressure_info (first[i]);
+ }
SCHED_SORT (first, ready->n_ready);
}
targetm.sched.dfa_pre_cycle_insn ());
state_transition (state, NULL);
-
+
if (targetm.sched.dfa_post_cycle_insn)
state_transition (state,
targetm.sched.dfa_post_cycle_insn ());
/* Clock at which the previous instruction was issued. */
static int last_clock_var;
+/* Update register pressure after scheduling INSN. */
+static void
+update_register_pressure (rtx insn)
+{
+ struct reg_use_data *use;
+ struct reg_set_data *set;
+
+ for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
+ if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
+ mark_regno_birth_or_death (use->regno, false);
+ for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
+ mark_regno_birth_or_death (set->regno, true);
+}
+
+/* Set up or update (if UPDATE_P) max register pressure (see its
+ meaning in sched-int.h::_haifa_insn_data) for all current BB insns
+ after insn AFTER. */
+static void
+setup_insn_max_reg_pressure (rtx after, bool update_p)
+{
+ int i, p;
+ bool eq_p;
+ rtx insn;
+ static int max_reg_pressure[N_REG_CLASSES];
+
+ save_reg_pressure ();
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ max_reg_pressure[ira_reg_class_cover[i]]
+ = curr_reg_pressure[ira_reg_class_cover[i]];
+ for (insn = NEXT_INSN (after);
+ insn != NULL_RTX && ! BARRIER_P (insn)
+ && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
+ insn = NEXT_INSN (insn))
+ if (NONDEBUG_INSN_P (insn))
+ {
+ eq_p = true;
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ {
+ p = max_reg_pressure[ira_reg_class_cover[i]];
+ if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
+ {
+ eq_p = false;
+ INSN_MAX_REG_PRESSURE (insn)[i]
+ = max_reg_pressure[ira_reg_class_cover[i]];
+ }
+ }
+ if (update_p && eq_p)
+ break;
+ update_register_pressure (insn);
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ if (max_reg_pressure[ira_reg_class_cover[i]]
+ < curr_reg_pressure[ira_reg_class_cover[i]])
+ max_reg_pressure[ira_reg_class_cover[i]]
+ = curr_reg_pressure[ira_reg_class_cover[i]];
+ }
+ restore_reg_pressure ();
+}
+
+/* Update the current register pressure after scheduling INSN. Update
+ also max register pressure for unscheduled insns of the current
+ BB. */
+static void
+update_reg_and_insn_max_reg_pressure (rtx insn)
+{
+ int i;
+ int before[N_REG_CLASSES];
+
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
+ update_register_pressure (insn);
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
+ break;
+ if (i < ira_reg_class_cover_size)
+ setup_insn_max_reg_pressure (insn, true);
+}
+
+/* Set up register pressure at the beginning of basic block BB whose
+ insns starting after insn AFTER. Set up also max register pressure
+ for all insns of the basic block. */
+void
+sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
+{
+ gcc_assert (sched_pressure_p);
+ initiate_bb_reg_pressure_info (bb);
+ setup_insn_max_reg_pressure (after, false);
+}
+
/* INSN is the "currently executing insn". Launch each insn which was
waiting on INSN. READY is the ready list which contains the insns
that are ready to fire. CLOCK is the current cycle. The function
{
sd_iterator_def sd_it;
dep_t dep;
+ int i;
int advance = 0;
if (sched_verbose >= 1)
{
+ struct reg_pressure_data *pressure_info;
char buf[2048];
print_insn (buf, insn, 0);
fprintf (sched_dump, "nothing");
else
print_reservation (sched_dump, insn);
+ pressure_info = INSN_REG_PRESSURE (insn);
+ if (pressure_info != NULL)
+ {
+ fputc (':', sched_dump);
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ fprintf (sched_dump, "%s%+d(%d)",
+ reg_class_names[ira_reg_class_cover[i]],
+ pressure_info[i].set_increase, pressure_info[i].change);
+ }
fputc ('\n', sched_dump);
}
+ if (sched_pressure_p)
+ update_reg_and_insn_max_reg_pressure (insn);
+
/* Scheduling instruction should have all its dependencies resolved and
should have been removed from the ready list. */
gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
+ /* Reset debug insns invalidated by moving this insn. */
+ if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
+ for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx dbg = DEP_PRO (dep);
+ struct reg_use_data *use, *next;
+
+ gcc_assert (DEBUG_INSN_P (dbg));
+
+ if (sched_verbose >= 6)
+ fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
+ INSN_UID (dbg));
+
+ /* ??? Rather than resetting the debug insn, we might be able
+ to emit a debug temp before the just-scheduled insn, but
+ this would involve checking that the expression at the
+ point of the debug insn is equivalent to the expression
+ before the just-scheduled insn. They might not be: the
+ expression in the debug insn may depend on other insns not
+ yet scheduled that set MEMs, REGs or even other debug
+ insns. It's not clear that attempting to preserve debug
+ information in these cases is worth the effort, given how
+ uncommon these resets are and the likelihood that the debug
+ temps introduced won't survive the schedule change. */
+ INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
+ df_insn_rescan (dbg);
+
+ /* Unknown location doesn't use any registers. */
+ for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
+ {
+ struct reg_use_data *prev = use;
+
+ /* Remove use from the cyclic next_regno_use chain first. */
+ while (prev->next_regno_use != use)
+ prev = prev->next_regno_use;
+ prev->next_regno_use = use->next_regno_use;
+ next = use->next_insn_use;
+ free (use);
+ }
+ INSN_REG_USE_LIST (dbg) = NULL;
+
+ /* We delete rather than resolve these deps, otherwise we
+ crash in sched_free_deps(), because forward deps are
+ expected to be released before backward deps. */
+ sd_delete_dep (sd_it);
+ }
+
gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
if (INSN_TICK (insn) > clock_var)
/* INSN has been prematurely moved from the queue to the ready list.
This is possible only if following flag is set. */
- gcc_assert (flag_sched_stalled_insns);
+ gcc_assert (flag_sched_stalled_insns);
/* ??? Probably, if INSN is scheduled prematurely, we should leave
INSN_TICK untouched. This is a machine-dependent issue, actually. */
advancing the iterator. */
sd_resolve_dep (sd_it);
+ /* Don't bother trying to mark next as ready if insn is a debug
+ insn. If insn is the last hard dependency, it will have
+ already been discounted. */
+ if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
+ continue;
+
if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
{
- int effective_cost;
-
+ int effective_cost;
+
effective_cost = try_ready (next);
-
+
if (effective_cost >= 0
&& SCHED_GROUP_P (next)
&& advance < effective_cost)
/* Functions for handling of notes. */
-/* Insert the INSN note at the end of the notes list. */
-static void
-add_to_note_list (rtx insn, rtx *note_list_end_p)
-{
- PREV_INSN (insn) = *note_list_end_p;
- if (*note_list_end_p)
- NEXT_INSN (*note_list_end_p) = insn;
- *note_list_end_p = insn;
-}
-
/* Add note list that ends on FROM_END to the end of TO_ENDP. */
void
concat_note_lists (rtx from_end, rtx *to_endp)
{
rtx from_start;
+ /* It's easy when have nothing to concat. */
if (from_end == NULL)
- /* It's easy when have nothing to concat. */
return;
+ /* It's also easy when destination is empty. */
if (*to_endp == NULL)
- /* It's also easy when destination is empty. */
{
*to_endp = from_end;
return;
}
from_start = from_end;
- /* A note list should be traversed via PREV_INSN. */
- while (PREV_INSN (from_start) != NULL)
+ while (PREV_INSN (from_start) != NULL)
from_start = PREV_INSN (from_start);
- add_to_note_list (from_start, to_endp);
+ PREV_INSN (from_start) = *to_endp;
+ NEXT_INSN (*to_endp) = from_start;
*to_endp = from_end;
}
-/* Delete notes beginning with INSN and put them in the chain
- of notes ended by NOTE_LIST.
- Returns the insn following the notes. */
-static rtx
-unlink_other_notes (rtx insn, rtx tail)
+/* Delete notes between HEAD and TAIL and put them in the chain
+ of notes ended by NOTE_LIST. */
+void
+remove_notes (rtx head, rtx tail)
{
- rtx prev = PREV_INSN (insn);
+ rtx next_tail, insn, next;
- while (insn != tail && NOTE_NOT_BB_P (insn))
- {
- rtx next = NEXT_INSN (insn);
- basic_block bb = BLOCK_FOR_INSN (insn);
-
- /* Delete the note from its current position. */
- if (prev)
- NEXT_INSN (prev) = next;
- if (next)
- PREV_INSN (next) = prev;
+ note_list = 0;
+ if (head == tail && !INSN_P (head))
+ return;
- if (bb)
- {
- /* Basic block can begin with either LABEL or
- NOTE_INSN_BASIC_BLOCK. */
- gcc_assert (BB_HEAD (bb) != insn);
+ next_tail = NEXT_INSN (tail);
+ for (insn = head; insn != next_tail; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ if (!NOTE_P (insn))
+ continue;
- /* Check if we are removing last insn in the BB. */
- if (BB_END (bb) == insn)
- BB_END (bb) = prev;
- }
+ switch (NOTE_KIND (insn))
+ {
+ case NOTE_INSN_BASIC_BLOCK:
+ continue;
- /* See sched_analyze to see how these are handled. */
- if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
- && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END)
- add_to_note_list (insn, ¬e_list);
+ case NOTE_INSN_EPILOGUE_BEG:
+ if (insn != tail)
+ {
+ remove_insn (insn);
+ add_reg_note (next, REG_SAVE_NOTE,
+ GEN_INT (NOTE_INSN_EPILOGUE_BEG));
+ break;
+ }
+ /* FALLTHRU */
- insn = next;
- }
+ default:
+ remove_insn (insn);
+
+ /* Add the note to list that ends at NOTE_LIST. */
+ PREV_INSN (insn) = note_list;
+ NEXT_INSN (insn) = NULL_RTX;
+ if (note_list)
+ NEXT_INSN (note_list) = insn;
+ note_list = insn;
+ break;
+ }
- if (insn == tail)
- {
- gcc_assert (sel_sched_p ());
- return prev;
+ gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
}
-
- return insn;
}
+
/* Return the head and tail pointers of ebb starting at BEG and ending
at END. */
void
return 1;
}
-/* Delete notes between HEAD and TAIL and put them in the chain
- of notes ended by NOTE_LIST. */
-static void
-rm_other_notes (rtx head, rtx tail)
-{
- rtx next_tail;
- rtx insn;
-
- note_list = 0;
- if (head == tail && (! INSN_P (head)))
- return;
-
- next_tail = NEXT_INSN (tail);
- for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
- {
- rtx prev;
-
- /* Farm out notes, and maybe save them in NOTE_LIST.
- This is needed to keep the debugger from
- getting completely deranged. */
- if (NOTE_NOT_BB_P (insn))
- {
- prev = insn;
- insn = unlink_other_notes (insn, next_tail);
-
- gcc_assert ((sel_sched_p ()
- || prev != tail) && prev != head && insn != next_tail);
- }
- }
-}
-
-/* Same as above, but also process REG_SAVE_NOTEs of HEAD. */
-void
-remove_notes (rtx head, rtx tail)
-{
- /* rm_other_notes only removes notes which are _inside_ the
- block---that is, it won't remove notes before the first real insn
- or after the last real insn of the block. So if the first insn
- has a REG_SAVE_NOTE which would otherwise be emitted before the
- insn, it is redundant with the note before the start of the
- block, and so we have to take it out. */
- if (INSN_P (head))
- {
- rtx note;
-
- for (note = REG_NOTES (head); note; note = XEXP (note, 1))
- if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
- remove_note (head, note);
- }
-
- /* Remove remaining note insns from the block, save them in
- note_list. These notes are restored at the end of
- schedule_block (). */
- rm_other_notes (head, tail);
-}
-
/* Restore-other-notes: NOTE_LIST is the end of a chain of notes
previously found among the insns. Insert them just before HEAD. */
rtx
return head;
}
-/* Functions for computation of registers live/usage info. */
-
-/* This function looks for a new register being defined.
- If the destination register is already used by the source,
- a new register is not needed. */
-static int
-find_set_reg_weight (const_rtx x)
-{
- if (GET_CODE (x) == CLOBBER
- && register_operand (SET_DEST (x), VOIDmode))
- return 1;
- if (GET_CODE (x) == SET
- && register_operand (SET_DEST (x), VOIDmode))
- {
- if (REG_P (SET_DEST (x)))
- {
- if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
- return 1;
- else
- return 0;
- }
- return 1;
- }
- return 0;
-}
-
-/* Calculate INSN_REG_WEIGHT for INSN. */
-static void
-find_insn_reg_weight (const_rtx insn)
-{
- int reg_weight = 0;
- rtx x;
-
- /* Handle register life information. */
- if (! INSN_P (insn))
- return;
-
- /* Increment weight for each register born here. */
- x = PATTERN (insn);
- reg_weight += find_set_reg_weight (x);
- if (GET_CODE (x) == PARALLEL)
- {
- int j;
- for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
- {
- x = XVECEXP (PATTERN (insn), 0, j);
- reg_weight += find_set_reg_weight (x);
- }
- }
- /* Decrement weight for each register that dies here. */
- for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
- {
- if (REG_NOTE_KIND (x) == REG_DEAD
- || REG_NOTE_KIND (x) == REG_UNUSED)
- reg_weight--;
- }
-
- INSN_REG_WEIGHT (insn) = reg_weight;
-}
-
/* Move insns that became ready to fire from queue to ready list. */
static void
}
/* Used by early_queue_to_ready. Determines whether it is "ok" to
- prematurely move INSN from the queue to the ready list. Currently,
- if a target defines the hook 'is_costly_dependence', this function
+ prematurely move INSN from the queue to the ready list. Currently,
+ if a target defines the hook 'is_costly_dependence', this function
uses the hook to check whether there exist any dependences which are
- considered costly by the target, between INSN and other insns that
+ considered costly by the target, between INSN and other insns that
have already been scheduled. Dependences are checked up to Y cycles
back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
- controlling this value.
- (Other considerations could be taken into account instead (or in
+ controlling this value.
+ (Other considerations could be taken into account instead (or in
addition) depending on user flags and target hooks. */
-static bool
+static bool
ok_for_early_queue_removal (rtx insn)
{
int n_cycles;
break;
}
- if (!prev_insn)
+ if (!prev_insn)
break;
- prev_insn = PREV_INSN (prev_insn);
+ prev_insn = PREV_INSN (prev_insn);
}
}
/* Remove insns from the queue, before they become "ready" with respect
to FU latency considerations. */
-static int
+static int
early_queue_to_ready (state_t state, struct ready_list *ready)
{
rtx insn;
int insns_removed = 0;
/*
- Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
- function:
+ Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
+ function:
- X == 0: There is no limit on how many queued insns can be removed
+ X == 0: There is no limit on how many queued insns can be removed
prematurely. (flag_sched_stalled_insns = -1).
- X >= 1: Only X queued insns can be removed prematurely in each
+ X >= 1: Only X queued insns can be removed prematurely in each
invocation. (flag_sched_stalled_insns = X).
Otherwise: Early queue removal is disabled.
(flag_sched_stalled_insns = 0)
*/
- if (! flag_sched_stalled_insns)
+ if (! flag_sched_stalled_insns)
return 0;
for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
print_rtl_single (sched_dump, insn);
memcpy (temp_state, state, dfa_state_size);
- if (recog_memoized (insn) < 0)
+ if (recog_memoized (insn) < 0)
/* non-negative to indicate that it's not ready
to avoid infinite Q->R->Q->R... */
cost = 0;
fprintf (sched_dump, "transition cost = %d\n", cost);
move_to_ready = false;
- if (cost < 0)
+ if (cost < 0)
{
move_to_ready = ok_for_early_queue_removal (insn);
if (move_to_ready == true)
q_size -= 1;
ready_add (ready, insn, false);
- if (prev_link)
+ if (prev_link)
XEXP (prev_link, 1) = next_link;
else
insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
link = next_link;
} /* while link */
- } /* if link */
+ } /* if link */
} /* for stalls.. */
- return insns_removed;
+ return insns_removed;
}
p = ready_lastpos (ready);
for (i = 0; i < ready->n_ready; i++)
- fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
+ {
+ fprintf (sched_dump, " %s:%d",
+ (*current_sched_info->print_insn) (p[i], 0),
+ INSN_LUID (p[i]));
+ if (sched_pressure_p)
+ fprintf (sched_dump, "(cost=%d",
+ INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
+ if (INSN_TICK (p[i]) > clock_var)
+ fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
+ if (sched_pressure_p)
+ fprintf (sched_dump, ")");
+ }
fprintf (sched_dump, "\n");
}
-/* Search INSN for REG_SAVE_NOTE note pairs for
- NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
- NOTEs. The REG_SAVE_NOTE note following first one is contains the
- saved value for NOTE_BLOCK_NUMBER which is useful for
- NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
+/* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
+ NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
+ replaces the epilogue note in the correct basic block. */
void
reemit_notes (rtx insn)
{
int jump_p = 0;
bb = BLOCK_FOR_INSN (insn);
-
+
/* BB_HEAD is either LABEL or NOTE. */
- gcc_assert (BB_HEAD (bb) != insn);
+ gcc_assert (BB_HEAD (bb) != insn);
if (BB_END (bb) == insn)
/* If this is last instruction in BB, move end marker one
&& IS_SPECULATION_BRANCHY_CHECK_P (insn))
|| (common_sched_info->sched_pass_id
== SCHED_EBB_PASS));
-
+
gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
BB_END (bb) = PREV_INSN (insn);
&& (LABEL_P (note)
|| BARRIER_P (note)))
note = NEXT_INSN (note);
-
+
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
}
else
}
df_insn_change_bb (insn, bb);
-
+
/* Update BB_END, if needed. */
if (BB_END (bb) == last)
- BB_END (bb) = insn;
+ BB_END (bb) = insn;
}
- SCHED_GROUP_P (insn) = 0;
+ SCHED_GROUP_P (insn) = 0;
}
/* Return true if scheduling INSN will finish current clock cycle. */
max_issue (struct ready_list *ready, int privileged_n, state_t state,
int *index)
{
- int n, i, all, n_ready, best, delay, tries_num, points = -1, max_points;
+ int n, i, all, n_ready, best, delay, tries_num, max_points;
int more_issue;
struct choice_entry *top;
rtx insn;
/* ??? We used to assert here that we never issue more insns than issue_rate.
However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
achieved to get better performance. Until these targets are fixed to use
- scheduler hooks to manipulate insns priority instead, the assert should
- be disabled.
+ scheduler hooks to manipulate insns priority instead, the assert should
+ be disabled.
gcc_assert (more_issue >= 0); */
/* This is the index of the insn issued first in this
solution. */
*index = choice_stack [1].index;
- points = top->n;
if (top->n == max_points || best == all)
break;
}
}
/* Restore the original state of the DFA. */
- memcpy (state, choice_stack->state, dfa_state_size);
+ memcpy (state, choice_stack->state, dfa_state_size);
return best;
}
rtx insn;
int try_data = 1, try_control = 1;
ds_t ts;
-
+
insn = ready_element (ready, 0);
if (INSN_CODE (insn) < 0)
{
x = ready_element (ready, i);
s = TODO_SPEC (x);
-
+
if (spec_info->flags & PREFER_NON_DATA_SPEC
&& !(s & DATA_SPEC))
- {
+ {
try_data = 0;
if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
|| !try_control)
break;
}
-
+
if (spec_info->flags & PREFER_NON_CONTROL_SPEC
&& !(s & CONTROL_SPEC))
{
{
*insn_ptr = ready_remove_first (ready);
if (sched_verbose >= 4)
- fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
+ fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
(*current_sched_info->print_insn) (*insn_ptr, 0));
return 0;
}
else
{
- if (sched_verbose >= 4)
+ if (sched_verbose >= 4)
fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
(*current_sched_info->print_insn)
(ready_element (ready, index), 0));
-
+
*insn_ptr = ready_remove (ready, index);
return 0;
}
/* Start just before the beginning of time. */
clock_var = -1;
- /* We need queue and ready lists and clock_var be initialized
+ /* We need queue and ready lists and clock_var be initialized
in try_ready () (which is called through init_ready_list ()). */
(*current_sched_info->init_ready_list) ();
fprintf (sched_dump, ";;\tReady list (t = %3d): ",
clock_var);
debug_ready_list (&ready);
+ if (sched_pressure_p)
+ print_curr_reg_pressure ();
}
- if (ready.n_ready == 0
- && can_issue_more
- && reload_completed)
+ if (ready.n_ready == 0
+ && can_issue_more
+ && reload_completed)
{
/* Allow scheduling insns directly from the queue in case
there's nothing better to do (ready list is empty) but
else
insn = ready_remove_first (&ready);
+ if (sched_pressure_p && INSN_TICK (insn) > clock_var)
+ {
+ ready_add (&ready, insn, true);
+ advance = 1;
+ break;
+ }
+
if (targetm.sched.dfa_new_cycle
&& targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
insn, last_clock_var,
to have the highest priority (so it will be returned by
the ready_remove_first call above), we invoke
ready_add (&ready, insn, true).
- But, still, there is one issue: INSN can be later
- discarded by scheduler's front end through
+ But, still, there is one issue: INSN can be later
+ discarded by scheduler's front end through
current_sched_info->can_schedule_ready_p, hence, won't
- be issued next. */
+ be issued next. */
{
ready_add (&ready, insn, true);
break;
fatal error for unrecognizable insns. */
cost = 0;
}
+ else if (sched_pressure_p)
+ cost = 0;
else
{
cost = state_transition (temp_state, insn);
advance = cost;
break;
}
-
+
continue;
}
continue;
}
- /* DECISION is made. */
-
+ /* DECISION is made. */
+
if (TODO_SPEC (insn) & SPECULATIVE)
generate_recovery_code (insn);
- if (control_flow_insn_p (last_scheduled_insn)
+ if (control_flow_insn_p (last_scheduled_insn)
/* This is used to switch basic blocks by request
from scheduler front-end (actually, sched-ebb.c only).
This is used to process blocks with single fallthru
{
*target_bb = current_sched_info->advance_target_bb
(*target_bb, 0);
-
+
if (sched_verbose)
{
rtx x;
last_scheduled_insn = bb_note (*target_bb);
}
-
+
/* Update counters, etc in the scheduler's front end. */
(*current_sched_info->begin_schedule_ready) (insn,
last_scheduled_insn);
-
+
move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
reemit_notes (insn);
last_scheduled_insn = insn;
-
+
if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
{
cycle_issued_insns++;
else if (GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
can_issue_more--;
-
advance = schedule_insn (insn);
/* After issuing an asm insn we should start a new cycle. */
/* Sanity check -- queue must be empty now. Meaningless if region has
multiple bbs. */
gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
- else
+ else
{
/* We must maintain QUEUE_INDEX between blocks in region. */
for (i = ready.n_ready - 1; i >= 0; i--)
{
rtx x;
-
+
x = ready_element (&ready, i);
QUEUE_INDEX (x) = QUEUE_NOWHERE;
TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
}
- if (q_size)
+ if (q_size)
for (i = 0; i <= max_insn_queue_index; i++)
{
rtx link;
{
rtx insn;
int n_insn;
- int sched_max_insns_priority =
+ int sched_max_insns_priority =
current_sched_info->sched_max_insns_priority;
rtx prev_head;
? stderr : dump_file);
}
-/* Initialize some global state for the scheduler. This function works
+/* Initialize some global state for the scheduler. This function works
with the common data shared between all the schedulers. It is called
from the scheduler specific initialization routine. */
flag_schedule_speculative_load = 0;
#endif
+ sched_pressure_p = (flag_sched_pressure && ! reload_completed
+ && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
+ if (sched_pressure_p)
+ ira_setup_eliminable_regset ();
+
/* Initialize SPEC_INFO. */
if (targetm.sched.set_sched_flags)
{
}
df_analyze ();
-
- /* Do not run DCE after reload, as this can kill nops inserted
+
+ /* Do not run DCE after reload, as this can kill nops inserted
by bundling. */
if (reload_completed)
df_clear_flags (DF_LR_RUN_DCE);
targetm.sched.md_init_global (sched_dump, sched_verbose,
get_max_uid () + 1);
+ if (sched_pressure_p)
+ {
+ int i, max_regno = max_reg_num ();
+
+ ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
+ sched_regno_cover_class
+ = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
+ for (i = 0; i < max_regno; i++)
+ sched_regno_cover_class[i]
+ = (i < FIRST_PSEUDO_REGISTER
+ ? ira_class_translate[REGNO_REG_CLASS (i)]
+ : reg_cover_class (i));
+ curr_reg_live = BITMAP_ALLOC (NULL);
+ saved_reg_live = BITMAP_ALLOC (NULL);
+ region_ref_regs = BITMAP_ALLOC (NULL);
+ }
+
curr_state = xmalloc (dfa_state_size);
}
sched_finish ();
}
-/* Free global data used during insn scheduling. This function works with
+/* Free global data used during insn scheduling. This function works with
the common data shared between the schedulers. */
void
sched_finish (void)
{
haifa_finish_h_i_d ();
+ if (sched_pressure_p)
+ {
+ free (sched_regno_cover_class);
+ BITMAP_FREE (region_ref_regs);
+ BITMAP_FREE (saved_reg_live);
+ BITMAP_FREE (curr_reg_live);
+ }
free (curr_state);
if (targetm.sched.md_finish_global)
int next_clock = clock_var + 1;
bitmap_initialize (&processed, 0);
-
+
/* Iterates over scheduled instructions and fix their INSN_TICKs and
INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
across different blocks. */
int tick;
sd_iterator_def sd_it;
dep_t dep;
-
+
tick = INSN_TICK (head);
gcc_assert (tick >= MIN_TICK);
-
+
/* Fix INSN_TICK of instruction from just scheduled block. */
if (!bitmap_bit_p (&processed, INSN_LUID (head)))
{
bitmap_set_bit (&processed, INSN_LUID (head));
tick -= next_clock;
-
+
if (tick < MIN_TICK)
tick = MIN_TICK;
-
- INSN_TICK (head) = tick;
+
+ INSN_TICK (head) = tick;
}
-
+
FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
{
rtx next;
-
+
next = DEP_CON (dep);
tick = INSN_TICK (next);
{
bitmap_set_bit (&processed, INSN_LUID (next));
tick -= next_clock;
-
+
if (tick < MIN_TICK)
tick = MIN_TICK;
-
+
if (tick > INTER_TICK (next))
INTER_TICK (next) = tick;
else
}
static int haifa_speculate_insn (rtx, ds_t, rtx *);
-
+
/* Check if NEXT is ready to be added to the ready or queue list.
If "yes", add it to the proper list.
Returns:
0 < N - queued for N cycles. */
int
try_ready (rtx next)
-{
+{
ds_t old_ts, *ts;
ts = &TODO_SPEC (next);
gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
&& ((old_ts & HARD_DEP)
|| (old_ts & SPECULATIVE)));
-
+
if (sd_lists_empty_p (next, SD_LIST_BACK))
/* NEXT has all its dependencies resolved. */
{
{
ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
+ if (DEBUG_INSN_P (DEP_PRO (dep))
+ && !DEBUG_INSN_P (next))
+ continue;
+
if (first_p)
{
first_p = false;
{
int res;
rtx new_pat;
-
+
gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
-
+
res = haifa_speculate_insn (next, *ts, &new_pat);
-
+
switch (res)
{
case -1:
so we won't reanalyze anything. */
*ts = (*ts & ~SPECULATIVE) | HARD_DEP;
break;
-
+
case 0:
/* We follow the rule, that every speculative insn
has non-null ORIG_PAT. */
if (!ORIG_PAT (next))
ORIG_PAT (next) = PATTERN (next);
break;
-
- case 1:
+
+ case 1:
if (!ORIG_PAT (next))
/* If we gonna to overwrite the original pattern of insn,
save it. */
ORIG_PAT (next) = PATTERN (next);
-
+
haifa_change_pattern (next, new_pat);
break;
-
+
default:
gcc_unreachable ();
}
}
-
+
/* We need to restore pattern only if (*ts == 0), because otherwise it is
either correct (*ts & SPECULATIVE),
or we simply don't care (*ts & HARD_DEP). */
-
+
gcc_assert (!ORIG_PAT (next)
|| !IS_SPECULATION_BRANCHY_CHECK_P (next));
-
+
if (*ts & HARD_DEP)
{
/* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
control-speculative NEXT could have been discarded by sched-rgn.c
(the same case as when discarded by can_schedule_ready_p ()). */
/*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
-
+
change_queue_index (next, QUEUE_NOWHERE);
return -1;
}
else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
- /* We should change pattern of every previously speculative
+ /* We should change pattern of every previously speculative
instruction - and we determine if NEXT was speculative by using
ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
pat too, so skip them. */
}
if (sched_verbose >= 2)
- {
+ {
int s = TODO_SPEC (next);
-
+
fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
(*current_sched_info->print_insn) (next, 0));
-
+
if (spec_info && spec_info->dump)
{
if (s & BEGIN_DATA)
}
fprintf (sched_dump, "\n");
- }
-
+ }
+
adjust_priority (next);
-
+
return fix_tick_ready (next);
}
full_p = (tick == INVALID_TICK);
FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
- {
+ {
rtx pro = DEP_PRO (dep);
int tick1;
-
+
gcc_assert (INSN_TICK (pro) >= MIN_TICK);
tick1 = INSN_TICK (pro) + dep_cost (dep);
INSN_TICK (next) = tick;
delay = tick - clock_var;
- if (delay <= 0)
+ if (delay <= 0 || sched_pressure_p)
delay = QUEUE_READY;
change_queue_index (next, delay);
{
int i = QUEUE_INDEX (next);
- gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
+ gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
&& delay != 0);
gcc_assert (i != QUEUE_SCHEDULED);
-
+
if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
|| (delay < 0 && delay == i))
/* We have nothing to do. */
ready_remove_insn (next);
else if (i >= 0)
queue_remove (next);
-
+
/* Add it to the proper place. */
if (delay == QUEUE_READY)
ready_add (readyp, next, false);
else if (delay >= 1)
queue_insn (next, delay);
-
+
if (sched_verbose >= 2)
- {
+ {
fprintf (sched_dump, ";;\t\ttick updated: insn %s",
(*current_sched_info->print_insn) (next, 0));
-
+
if (delay == QUEUE_READY)
fprintf (sched_dump, " into ready\n");
else if (delay >= 1)
{
if (TODO_SPEC (insn) & BEGIN_SPEC)
begin_speculative_block (insn);
-
+
/* Here we have insn with no dependencies to
instructions other then CHECK_SPEC ones. */
-
+
if (TODO_SPEC (insn) & BE_IN_SPEC)
add_to_speculative_block (insn);
}
ds_t new_ds;
new_ds = (ds & ~BEGIN_SPEC) | fs;
-
+
if (/* consumer can 'be in speculative'. */
sched_insn_is_legitimate_for_speculation_p (consumer,
new_ds))
begin_speculative_block (rtx insn)
{
if (TODO_SPEC (insn) & BEGIN_DATA)
- nr_begin_data++;
+ nr_begin_data++;
if (TODO_SPEC (insn) & BEGIN_CONTROL)
nr_begin_control++;
TODO_SPEC (insn) &= ~BE_IN_SPEC;
gcc_assert (!TODO_SPEC (insn));
-
+
DONE_SPEC (insn) |= ts;
/* First we convert all simple checks to branchy. */
twin = XEXP (twins, 1);
free_INSN_LIST_node (twins);
- twins = twin;
+ twins = twin;
}
calc_priorities (priorities_roots);
if (e)
{
- /* We create two basic blocks:
+ /* We create two basic blocks:
1. Single instruction block is inserted right after E->SRC
- and has jump to
+ and has jump to
2. Empty block right before EXIT_BLOCK.
Between these two blocks recovery blocks will be emitted. */
basic_block single, empty;
rtx x, label;
- /* If the fallthrough edge to exit we've found is from the block we've
+ /* If the fallthrough edge to exit we've found is from the block we've
created before, don't do anything more. */
if (last == after_recovery)
return;
JUMP_LABEL (x) = label;
LABEL_NUSES (label)++;
haifa_init_insn (x);
-
+
emit_barrier_after (x);
sched_init_only_bb (empty, NULL);
if (sched_verbose >= 2 && spec_info->dump)
fprintf (spec_info->dump,
- ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
- last->index, single->index, empty->index);
+ ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
+ last->index, single->index, empty->index);
}
else
before_recovery = last;
rtx label;
rtx barrier;
basic_block rec;
-
+
haifa_recovery_bb_recently_added_p = true;
haifa_recovery_bb_ever_added_p = true;
if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
BB_SET_PARTITION (rec, BB_COLD_PARTITION);
-
- if (sched_verbose && spec_info->dump)
+
+ if (sched_verbose && spec_info->dump)
fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
rec->index);
{
rtx label;
rtx jump;
- edge e;
int edge_flags;
/* This is fixing of incoming edge. */
- /* ??? Which other flags should be specified? */
+ /* ??? Which other flags should be specified? */
if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
/* Partition type is the same, if it is "unpartitioned". */
edge_flags = EDGE_CROSSING;
else
edge_flags = 0;
-
- e = make_edge (first_bb, rec, edge_flags);
+
+ make_edge (first_bb, rec, edge_flags);
label = block_label (second_bb);
jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
JUMP_LABEL (jump) = label;
edge_flags = EDGE_CROSSING;
}
else
- edge_flags = 0;
+ edge_flags = 0;
- make_single_succ_edge (rec, second_bb, edge_flags);
+ make_single_succ_edge (rec, second_bb, edge_flags);
}
/* This function creates recovery code for INSN. If MUTATE_P is nonzero,
if (rec != EXIT_BLOCK_PTR)
{
/* To have mem_reg alive at the beginning of second_bb,
- we emit check BEFORE insn, so insn after splitting
+ we emit check BEFORE insn, so insn after splitting
insn will be at the beginning of second_bb, which will
provide us with the correct life information. */
check = emit_jump_insn_before (check, insn);
sched_create_recovery_edges (first_bb, rec, second_bb);
- sched_init_only_bb (second_bb, first_bb);
+ sched_init_only_bb (second_bb, first_bb);
sched_init_only_bb (rec, EXIT_BLOCK_PTR);
jump = BB_END (rec);
haifa_init_insn (jump);
}
- /* Move backward dependences from INSN to CHECK and
+ /* Move backward dependences from INSN to CHECK and
move forward dependences from INSN to TWIN. */
/* First, create dependencies between INSN's producers and CHECK & TWIN. */
check --TRUE--> producer ??? or ANTI ???
twin --TRUE--> producer
twin --ANTI--> check
-
+
If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
check --ANTI--> producer
twin --ANTI--> producer
If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
check ~~TRUE~~> producer
twin ~~TRUE~~> producer
- twin --ANTI--> check */
+ twin --ANTI--> check */
ds = DEP_STATUS (dep);
{
DEP_CON (new_dep) = twin;
sd_add_dep (new_dep, false);
- }
+ }
}
/* Second, remove backward dependencies of INSN. */
/* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
here. */
-
+
gcc_assert (!DONE_SPEC (insn));
-
+
if (!mutate_p)
- {
+ {
ds_t ts = TODO_SPEC (insn);
DONE_SPEC (insn) = ts & BEGIN_SPEC;
}
else
{
- if (spec_info->dump)
+ if (spec_info->dump)
fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
(*current_sched_info->print_insn) (insn, 0));
rtx link;
bitmap_initialize (&in_ready, 0);
-
+
/* NOTE - a basic block note. */
note = NEXT_INSN (BB_HEAD (rec));
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
sd_iterator_next (&sd_it);
}
}
-
+
insn = PREV_INSN (insn);
}
while (insn != note);
/* Fixing jump's dependences. */
insn = BB_HEAD (rec);
jump = BB_END (rec);
-
+
gcc_assert (LABEL_P (insn));
insn = NEXT_INSN (insn);
-
+
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
add_jump_dependencies (insn, jump);
}
if (LABEL_P (label))
note = NEXT_INSN (label);
else
- note = label;
+ note = label;
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
prev = PREV_INSN (label);
if (last == first)
break;
-
+
last = last->prev_bb;
}
while (1);
return;
/* We DON'T unlink basic block notes of the first block in the ebb. */
- first = first->next_bb;
+ first = first->next_bb;
/* Remember: FIRST is actually a second basic block in the ebb. */
while (first != EXIT_BLOCK_PTR
&& bb_header[first->index])
{
rtx prev, label, note, next;
-
+
label = bb_header[first->index];
prev = PREV_INSN (label);
next = NEXT_INSN (prev);
if (LABEL_P (label))
note = NEXT_INSN (label);
else
- note = label;
+ note = label;
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
bb_header[first->index] = 0;
NEXT_INSN (prev) = label;
NEXT_INSN (note) = next;
PREV_INSN (next) = note;
-
+
first = first->next_bb;
}
gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
|| IS_SPECULATION_BRANCHY_CHECK_P (jump));
-
+
if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
/* if jump_bb_next is not empty. */
BB_END (jump_bb) = BB_END (jump_bb_next);
bb = BLOCK_FOR_INSN (PREV_INSN (jump));
jump_bb = BLOCK_FOR_INSN (jump);
jump_bb_next = jump_bb->next_bb;
-
+
update_bb_for_insn (jump_bb);
-
+
gcc_assert (IS_SPECULATION_CHECK_P (jump)
|| IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
move_succs (&t, jump_bb_next);
df_mark_solutions_dirty ();
-
+
common_sched_info->fix_recovery_cfg
(bb->index, jump_bb->index, jump_bb_next->index);
}
insn = NEXT_INSN (insn);
if (insn == jump)
break;
-
+
if (dep_list_size (insn) == 0)
{
dep_def _new_dep, *new_dep = &_new_dep;
next_tail = NEXT_INSN (tail);
do
- {
- not_last = head != tail;
+ {
+ not_last = head != tail;
if (not_first)
gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
if (not_last)
gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
- if (LABEL_P (head)
+ if (LABEL_P (head)
|| (NOTE_INSN_BASIC_BLOCK_P (head)
&& (!not_first
|| (not_first && !LABEL_P (PREV_INSN (head))))))
{
- gcc_assert (bb == 0);
+ gcc_assert (bb == 0);
bb = BLOCK_FOR_INSN (head);
if (bb != 0)
- gcc_assert (BB_HEAD (bb) == head);
+ gcc_assert (BB_HEAD (bb) == head);
else
/* This is the case of jump table. See inside_basic_block_p (). */
gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
gcc_assert (inside_basic_block_p (head)
|| NOTE_P (head));
gcc_assert (BLOCK_FOR_INSN (head) == bb);
-
+
if (LABEL_P (head))
{
head = NEXT_INSN (head);
extend_insn ();
if (bbs != NULL)
- {
+ {
unsigned i;
basic_block x;
static void
extend_h_i_d (void)
{
- int reserve = (get_max_uid () + 1
+ int reserve = (get_max_uid () + 1
- VEC_length (haifa_insn_data_def, h_i_d));
- if (reserve > 0
+ if (reserve > 0
&& ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
{
- VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
+ VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
3 * get_max_uid () / 2);
sched_extend_target ();
}
if (INSN_LUID (insn) > 0)
{
INSN_COST (insn) = -1;
- find_insn_reg_weight (insn);
QUEUE_INDEX (insn) = QUEUE_NOWHERE;
INSN_TICK (insn) = INVALID_TICK;
INTER_TICK (insn) = INVALID_TICK;
void
haifa_finish_h_i_d (void)
{
+ int i;
+ haifa_insn_data_t data;
+ struct reg_use_data *use, *next;
+
+ for (i = 0; VEC_iterate (haifa_insn_data_def, h_i_d, i, data); i++)
+ {
+ if (data->reg_pressure != NULL)
+ free (data->reg_pressure);
+ for (use = data->reg_use_list; use != NULL; use = next)
+ {
+ next = use->next_insn_use;
+ free (use);
+ }
+ }
VEC_free (haifa_insn_data_def, heap, h_i_d);
}
e = split_block (first_bb, after);
gcc_assert (e->src == first_bb);
- /* sched_split_block emits note if *check == BB_END. Probably it
+ /* sched_split_block emits note if *check == BB_END. Probably it
is better to rip that note off. */
return e->dest;