/* Instruction scheduling pass.
- Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list (void);
-static void begin_schedule_ready (rtx, rtx);
+static void begin_schedule_ready (rtx);
static int schedule_more_p (void);
static const char *ebb_print_insn (const_rtx, int);
static int rank (rtx, rtx);
static int ebb_contributes_to_priority (rtx, rtx);
static basic_block earliest_block_with_similiar_load (basic_block, rtx);
static void add_deps_for_risky_insns (rtx, rtx);
-static basic_block schedule_ebb (rtx, rtx);
static void debug_ebb_dependencies (rtx, rtx);
static void ebb_add_remove_insn (rtx, int);
static basic_block advance_target_bb (basic_block, rtx);
static void ebb_fix_recovery_cfg (int, int, int);
+/* Allocate memory and store the state of the frontend. Return the allocated
+ memory. */
+static void *
+save_ebb_state (void)
+{
+ int *p = XNEW (int);
+ *p = sched_rgn_n_insns;
+ return p;
+}
+
+/* Restore the state of the frontend from P_, then free it. */
+static void
+restore_ebb_state (void *p_)
+{
+ int *p = (int *)p_;
+ sched_rgn_n_insns = *p;
+ free (p_);
+}
+
/* Return nonzero if there are more insns that should be scheduled. */
static int
/* INSN is being scheduled after LAST. Update counters. */
static void
-begin_schedule_ready (rtx insn, rtx last)
+begin_schedule_ready (rtx insn ATTRIBUTE_UNUSED)
{
sched_rgn_n_insns++;
+}
+/* INSN is being moved to its place in the schedule, after LAST. */
+static void
+begin_move_insn (rtx insn, rtx last)
+{
if (BLOCK_FOR_INSN (insn) == last_bb
/* INSN is a jump in the last block, ... */
&& control_flow_insn_p (insn)
return 1;
}
- /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
- conditionally set before INSN. Store the set of registers that
- must be considered as used by this jump in USED and that of
- registers that must be considered as set in SET. */
+ /* INSN is a JUMP_INSN. Store the set of registers that
+ must be considered as used by this jump in USED. */
void
-ebb_compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
- regset set)
+ebb_compute_jump_reg_dependencies (rtx insn, regset used)
{
basic_block b = BLOCK_FOR_INSN (insn);
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, b->succs)
- if (e->flags & EDGE_FALLTHRU)
- /* The jump may be a by-product of a branch that has been merged
- in the main codepath after being conditionalized. Therefore
- it may guard the fallthrough block from using a value that has
- conditionally overwritten that of the main codepath. So we
- consider that it restores the value of the main codepath. */
- bitmap_and (set, df_get_live_in (e->dest), cond_set);
- else
+ if ((e->flags & EDGE_FALLTHRU) == 0)
bitmap_ior_into (used, df_get_live_in (e->dest));
}
ebb_add_remove_insn,
begin_schedule_ready,
+ begin_move_insn,
advance_target_bb,
+
+ save_ebb_state,
+ restore_ebb_state,
+
SCHED_EBB
/* We can create new blocks in begin_schedule_ready (). */
| NEW_BBS
basic_block last_block = NULL, bb;
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
- if (control_flow_insn_p (insn))
- {
- bb = BLOCK_FOR_INSN (insn);
- bb->aux = last_block;
- last_block = bb;
- last_jump = insn;
- }
- else if (INSN_P (insn) && last_jump != NULL_RTX)
- {
- classification = haifa_classify_insn (insn);
- prev = last_jump;
- switch (classification)
- {
- case PFREE_CANDIDATE:
- if (flag_schedule_speculative_load)
- {
- bb = earliest_block_with_similiar_load (last_block, insn);
- if (bb)
- {
- bb = (basic_block) bb->aux;
- if (!bb)
- break;
- prev = BB_END (bb);
- }
- }
- /* Fall through. */
- case TRAP_RISKY:
- case IRISKY:
- case PRISKY_CANDIDATE:
- /* ??? We could implement better checking PRISKY_CANDIDATEs
- analogous to sched-rgn.c. */
- /* We can not change the mode of the backward
- dependency because REG_DEP_ANTI has the lowest
- rank. */
- if (! sched_insns_conditions_mutex_p (insn, prev))
- {
- dep_def _dep, *dep = &_dep;
-
- init_dep (dep, prev, insn, REG_DEP_ANTI);
-
- if (!(current_sched_info->flags & USE_DEPS_LIST))
- {
- enum DEPS_ADJUST_RESULT res;
-
- res = sd_add_or_update_dep (dep, false);
-
- /* We can't change an existing dependency with
- DEP_ANTI. */
- gcc_assert (res != DEP_CHANGED);
- }
- else
- {
- if ((current_sched_info->flags & DO_SPECULATION)
- && (spec_info->mask & BEGIN_CONTROL))
- DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
- MAX_DEP_WEAK);
-
- sd_add_or_update_dep (dep, false);
-
- /* Dep_status could have been changed.
- No assertion here. */
- }
- }
-
- break;
-
- default:
- break;
- }
- }
+ {
+ add_delay_dependencies (insn);
+ if (control_flow_insn_p (insn))
+ {
+ bb = BLOCK_FOR_INSN (insn);
+ bb->aux = last_block;
+ last_block = bb;
+ /* Ensure blocks stay in the same order. */
+ if (last_jump)
+ add_dependence (insn, last_jump, REG_DEP_ANTI);
+ last_jump = insn;
+ }
+ else if (INSN_P (insn) && last_jump != NULL_RTX)
+ {
+ classification = haifa_classify_insn (insn);
+ prev = last_jump;
+
+ switch (classification)
+ {
+ case PFREE_CANDIDATE:
+ if (flag_schedule_speculative_load)
+ {
+ bb = earliest_block_with_similiar_load (last_block, insn);
+ if (bb)
+ {
+ bb = (basic_block) bb->aux;
+ if (!bb)
+ break;
+ prev = BB_END (bb);
+ }
+ }
+ /* Fall through. */
+ case TRAP_RISKY:
+ case IRISKY:
+ case PRISKY_CANDIDATE:
+ /* ??? We could implement better checking PRISKY_CANDIDATEs
+ analogous to sched-rgn.c. */
+ /* We can not change the mode of the backward
+ dependency because REG_DEP_ANTI has the lowest
+ rank. */
+ if (! sched_insns_conditions_mutex_p (insn, prev))
+ {
+ dep_def _dep, *dep = &_dep;
+
+ init_dep (dep, prev, insn, REG_DEP_ANTI);
+
+ if (!(current_sched_info->flags & USE_DEPS_LIST))
+ {
+ enum DEPS_ADJUST_RESULT res;
+
+ res = sd_add_or_update_dep (dep, false);
+
+ /* We can't change an existing dependency with
+ DEP_ANTI. */
+ gcc_assert (res != DEP_CHANGED);
+ }
+ else
+ {
+ if ((current_sched_info->flags & DO_SPECULATION)
+ && (spec_info->mask & BEGIN_CONTROL))
+ DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
+ MAX_DEP_WEAK);
+
+ sd_add_or_update_dep (dep, false);
+
+ /* Dep_status could have been changed.
+ No assertion here. */
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
/* Maintain the invariant that bb->aux is clear after use. */
while (last_block)
{
}
}
-/* Schedule a single extended basic block, defined by the boundaries HEAD
- and TAIL. */
+/* Schedule a single extended basic block, defined by the boundaries
+ HEAD and TAIL.
-static basic_block
-schedule_ebb (rtx head, rtx tail)
+ We change our expectations about scheduler behaviour depending on
+ whether MODULO_SCHEDULING is true. If it is, we expect that the
+ caller has already called set_modulo_params and created delay pairs
+ as appropriate. If the modulo schedule failed, we return
+ NULL_RTX. */
+
+basic_block
+schedule_ebb (rtx head, rtx tail, bool modulo_scheduling)
{
basic_block first_bb, target_bb;
struct deps_desc tmp_deps;
+ bool success;
+
+ /* Blah. We should fix the rest of the code not to get confused by
+ a note or two. */
+ while (head != tail)
+ {
+ if (NOTE_P (head) || DEBUG_INSN_P (head))
+ head = NEXT_INSN (head);
+ else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
+ tail = PREV_INSN (tail);
+ else if (LABEL_P (head))
+ head = NEXT_INSN (head);
+ else
+ break;
+ }
first_bb = BLOCK_FOR_INSN (head);
last_bb = BLOCK_FOR_INSN (tail);
/* Make ready list big enough to hold all the instructions from the ebb. */
sched_extend_ready_list (rgn_n_insns);
- schedule_block (&target_bb);
+ success = schedule_block (&target_bb);
+ gcc_assert (success || modulo_scheduling);
+
/* Free ready list. */
sched_finish_ready_list ();
so we may made some of them empty. Can't assert (b == last_bb). */
/* Sanity check: verify that all region insns were scheduled. */
- gcc_assert (sched_rgn_n_insns == rgn_n_insns);
+ gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
/* Free dependencies. */
sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
delete_basic_block (last_bb->next_bb);
}
- return last_bb;
+ return success ? last_bb : NULL;
}
-/* The one entry point in this file. */
-
+/* Perform initializations before running schedule_ebbs or a single
+ schedule_ebb. */
void
-schedule_ebbs (void)
+schedule_ebbs_init (void)
{
- basic_block bb;
- int probability_cutoff;
- rtx tail;
-
- if (profile_info && flag_branch_probabilities)
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
- else
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
- probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
-
- /* Taking care of this degenerate case makes the rest of
- this code simpler. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
- return;
-
/* Setup infos. */
{
memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
/* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
bitmap_initialize (&dont_calc_deps, 0);
bitmap_clear (&dont_calc_deps);
+}
+
+/* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */
+void
+schedule_ebbs_finish (void)
+{
+ bitmap_clear (&dont_calc_deps);
+
+ /* Reposition the prologue and epilogue notes in case we moved the
+ prologue/epilogue insns. */
+ if (reload_completed)
+ reposition_prologue_and_epilogue_notes ();
+
+ haifa_sched_finish ();
+}
+
+/* The main entry point in this file. */
+
+void
+schedule_ebbs (void)
+{
+ basic_block bb;
+ int probability_cutoff;
+ rtx tail;
+
+ /* Taking care of this degenerate case makes the rest of
+ this code simpler. */
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ return;
+
+ if (profile_info && flag_branch_probabilities)
+ probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+ else
+ probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+ probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
+
+ schedule_ebbs_init ();
/* Schedule every region in the subroutine. */
FOR_EACH_BB (bb)
{
rtx head = BB_HEAD (bb);
+ if (bb->flags & BB_DISABLE_SCHEDULE)
+ continue;
+
for (;;)
{
edge e;
break;
if (e->probability <= probability_cutoff)
break;
+ if (e->dest->flags & BB_DISABLE_SCHEDULE)
+ break;
bb = bb->next_bb;
}
- /* Blah. We should fix the rest of the code not to get confused by
- a note or two. */
- while (head != tail)
- {
- if (NOTE_P (head) || DEBUG_INSN_P (head))
- head = NEXT_INSN (head);
- else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
- tail = PREV_INSN (tail);
- else if (LABEL_P (head))
- head = NEXT_INSN (head);
- else
- break;
- }
-
- bb = schedule_ebb (head, tail);
+ bb = schedule_ebb (head, tail, false);
}
- bitmap_clear (&dont_calc_deps);
-
- /* Reposition the prologue and epilogue notes in case we moved the
- prologue/epilogue insns. */
- if (reload_completed)
- reposition_prologue_and_epilogue_notes ();
-
- haifa_sched_finish ();
+ schedule_ebbs_finish ();
}
/* INSN has been added to/removed from current ebb. */