#include "target.h"
#include "output.h"
#include "params.h"
+#include "dbgcnt.h"
#ifdef INSN_SCHEDULING
/* Counters of different types of speculative instructions. */
static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
-/* Pointers to GLAT data. See init_glat for more information. */
-regset *glat_start, *glat_end;
-
/* Array used in {unlink, restore}_bb_notes. */
static rtx *bb_header = 0;
return insn_class;
}
+/* A typedef for rtx vector. */
+typedef VEC(rtx, heap) *rtx_vec_t;
+
/* Forward declarations. */
static int priority (rtx);
static void ready_remove_insn (rtx);
static int max_issue (struct ready_list *, int *, int);
-static rtx choose_ready (struct ready_list *);
+static int choose_ready (struct ready_list *, rtx *);
static void fix_inter_tick (rtx, rtx);
static int fix_tick_ready (rtx);
static void fix_jump_move (rtx);
static void move_block_after_check (rtx);
static void move_succs (VEC(edge,gc) **, basic_block);
-static void init_glat (void);
-static void init_glat1 (basic_block);
-static void attach_life_info1 (basic_block);
-static void free_glat (void);
static void sched_remove_insn (rtx);
-static void clear_priorities (rtx);
+static void clear_priorities (rtx, rtx_vec_t *);
+static void calc_priorities (rtx_vec_t);
static void add_jump_dependencies (rtx, rtx);
-static void calc_priorities (rtx);
#ifdef ENABLE_CHECKING
static int has_edge_p (VEC(edge,gc) *, int);
static void check_cfg (rtx, rtx);
return cost;
}
-/* Compute the priority number for INSN. */
+/* Return 'true' if DEP should be included in priority calculations. */
+static bool
+contributes_to_priority_p (dep_t dep)
+{
+ /* Critical path is meaningful in block boundaries only. */
+ if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
+ DEP_PRO (dep)))
+ return false;
+
+ /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
+ then speculative instructions will less likely be
+ scheduled. That is because the priority of
+ their producers will increase, and, thus, the
+ producers will more likely be scheduled, thus,
+ resolving the dependence. */
+ if ((current_sched_info->flags & DO_SPECULATION)
+ && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
+ && (DEP_STATUS (dep) & SPECULATIVE))
+ return false;
+
+ return true;
+}
+/* Compute the priority number for INSN. */
static int
priority (rtx insn)
{
if (! INSN_P (insn))
return 0;
- if (! INSN_PRIORITY_KNOWN (insn))
+ /* We should not be interested in priority of an already scheduled insn. */
+ gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
+
+ if (!INSN_PRIORITY_KNOWN (insn))
{
int this_priority = 0;
{
int cost;
- /* Critical path is meaningful in block boundaries
- only. */
- if (! (*current_sched_info->contributes_to_priority)
- (next, insn)
- /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
- then speculative instructions will less likely be
- scheduled. That is because the priority of
- their producers will increase, and, thus, the
- producers will more likely be scheduled, thus,
- resolving the dependence. */
- || ((current_sched_info->flags & DO_SPECULATION)
- && (DEP_STATUS (dep) & SPECULATIVE)
- && !(spec_info->flags
- & COUNT_SPEC_IN_CRITICAL_PATH)))
+ if (!contributes_to_priority_p (dep))
continue;
if (twin == insn)
while (twin != prev_first);
}
INSN_PRIORITY (insn) = this_priority;
- INSN_PRIORITY_KNOWN (insn) = 1;
+ INSN_PRIORITY_STATUS (insn) = 1;
}
return INSN_PRIORITY (insn);
if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
return SCHED_GROUP_P (tmp2) ? 1 : -1;
+ /* Make sure that priority of TMP and TMP2 are initialized. */
+ gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
+
/* Prefer insn with higher priority. */
priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
}
/* See sched_analyze to see how these are handled. */
- if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
- && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
+ if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
+ && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END)
{
/* Insert the note at the end of the notes list. */
PREV_INSN (insn) = note_list;
{
rtx insn;
rtx link;
+ rtx skip_insn;
q_ptr = NEXT_Q (q_ptr);
+ if (dbg_cnt (sched_insn) == false)
+ /* If debug counter is activated do not requeue insn next after
+ last_scheduled_insn. */
+ skip_insn = next_nonnote_insn (last_scheduled_insn);
+ else
+ skip_insn = NULL_RTX;
+
/* Add all pending insns that can be scheduled without stalls to the
ready list. */
for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
See the comment in schedule_block for the rationale. */
if (!reload_completed
&& ready->n_ready > MAX_SCHED_READY_INSNS
- && !SCHED_GROUP_P (insn))
+ && !SCHED_GROUP_P (insn)
+ && insn != skip_insn)
{
if (sched_verbose >= 2)
fprintf (sched_dump, "requeued because ready full\n");
}
set_block_for_insn (insn, bb);
+ df_insn_change_bb (insn);
/* Update BB_END, if needed. */
if (BB_END (bb) == last)
/* The following function chooses insn from READY and modifies
*N_READY and READY. The following function is used only for first
- cycle multipass scheduling. */
-
-static rtx
-choose_ready (struct ready_list *ready)
+ cycle multipass scheduling.
+ Return:
+ -1 if cycle should be advanced,
+ 0 if INSN_PTR is set to point to the desirable insn,
+ 1 if choose_ready () should be restarted without advancing the cycle. */
+static int
+choose_ready (struct ready_list *ready, rtx *insn_ptr)
{
- int lookahead = 0;
+ int lookahead;
+
+ if (dbg_cnt (sched_insn) == false)
+ {
+ rtx insn;
+
+ insn = next_nonnote_insn (last_scheduled_insn);
+
+ if (QUEUE_INDEX (insn) == QUEUE_READY)
+ /* INSN is in the ready_list. */
+ {
+ ready_remove_insn (insn);
+ *insn_ptr = insn;
+ return 0;
+ }
+
+ /* INSN is in the queue. Advance cycle to move it to the ready list. */
+ return -1;
+ }
+
+ lookahead = 0;
if (targetm.sched.first_cycle_multipass_dfa_lookahead)
lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
- return ready_remove_first (ready);
+ {
+ *insn_ptr = ready_remove_first (ready);
+ return 0;
+ }
else
{
/* Try to choose the better insn. */
}
insn = ready_element (ready, 0);
if (INSN_CODE (insn) < 0)
- return ready_remove_first (ready);
+ {
+ *insn_ptr = ready_remove_first (ready);
+ return 0;
+ }
if (spec_info
&& spec_info->flags & (PREFER_NON_DATA_SPEC
list. */
{
change_queue_index (insn, 1);
- return 0;
+ return 1;
}
max_points = ISSUE_POINTS (insn);
}
if (max_issue (ready, &index, max_points) == 0)
- return ready_remove_first (ready);
+ {
+ *insn_ptr = ready_remove_first (ready);
+ return 0;
+ }
else
- return ready_remove (ready, index);
+ {
+ *insn_ptr = ready_remove (ready, index);
+ return 0;
+ }
}
}
";;\t\t before reload => truncated to %d insns\n", i);
}
- /* Delay all insns past it for 1 cycle. */
- while (i < ready.n_ready)
- queue_insn (ready_remove (&ready, i), 1);
+ /* Delay all insns past it for 1 cycle. If debug counter is
+ activated make an exception for the insn right after
+ last_scheduled_insn. */
+ {
+ rtx skip_insn;
+
+ if (dbg_cnt (sched_insn) == false)
+ skip_insn = next_nonnote_insn (last_scheduled_insn);
+ else
+ skip_insn = NULL_RTX;
+
+ while (i < ready.n_ready)
+ {
+ rtx insn;
+
+ insn = ready_remove (&ready, i);
+
+ if (insn != skip_insn)
+ queue_insn (insn, 1);
+ }
+ }
}
/* Now we can restore basic block notes and maintain precise cfg. */
/* Select and remove the insn from the ready list. */
if (sort_p)
{
- insn = choose_ready (&ready);
- if (!insn)
+ int res;
+
+ insn = NULL_RTX;
+ res = choose_ready (&ready, &insn);
+
+ if (res < 0)
+ /* Finish cycle. */
+ break;
+ if (res > 0)
+ /* Restart choose_ready (). */
continue;
+
+ gcc_assert (insn != NULL_RTX);
}
else
insn = ready_remove_first (&ready);
generate_recovery_code (insn);
if (control_flow_insn_p (last_scheduled_insn)
- /* This is used to to switch basic blocks by request
+ /* This is used to switch basic blocks by request
from scheduler front-end (actually, sched-ebb.c only).
This is used to process blocks with single fallthru
edge. If succeeding block has jump, it [jump] will try
n_insn++;
(void) priority (insn);
- if (INSN_PRIORITY_KNOWN (insn))
- sched_max_insns_priority =
- MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
+ gcc_assert (INSN_PRIORITY_KNOWN (insn));
+
+ sched_max_insns_priority = MAX (sched_max_insns_priority,
+ INSN_PRIORITY (insn));
}
current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
init_alias_analysis ();
old_last_basic_block = 0;
- glat_start = 0;
- glat_end = 0;
extend_bb ();
- if (current_sched_info->flags & USE_GLAT)
- init_glat ();
-
/* Compute INSN_REG_WEIGHT for all blocks. We must do this before
removing death notes. */
FOR_EACH_BB_REVERSE (b)
dfa_finish ();
free_dependency_caches ();
end_alias_analysis ();
- free_glat ();
if (targetm.sched.md_finish_global)
targetm.sched.md_finish_global (sched_dump, sched_verbose);
ds_t ts;
dep_link_t link;
rtx twins = NULL;
+ rtx_vec_t priorities_roots;
ts = TODO_SPEC (insn);
gcc_assert (!(ts & ~BE_IN_SPEC));
link = DEP_LINK_NEXT (link);
}
- clear_priorities (insn);
+ priorities_roots = NULL;
+ clear_priorities (insn, &priorities_roots);
do
{
rec = BLOCK_FOR_INSN (check);
- twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
+ twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
extend_global (twin);
copy_deps_list_change_con (INSN_RESOLVED_BACK_DEPS (twin),
rtx twin;
twin = XEXP (twins, 0);
- calc_priorities (twin);
add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
twin = XEXP (twins, 1);
free_INSN_LIST_node (twins);
twins = twin;
}
+
+ calc_priorities (priorities_roots);
+ VEC_free (rtx, heap, priorities_roots);
}
/* Extends and fills with zeros (only the new part) array pointed to by P. */
/* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
because it'll be done later in add_to_speculative_block. */
{
- clear_priorities (twin);
- calc_priorities (twin);
+ rtx_vec_t priorities_roots = NULL;
+
+ clear_priorities (twin, &priorities_roots);
+ calc_priorities (priorities_roots);
+ VEC_free (rtx, heap, priorities_roots);
}
}
old_last_basic_block = last_basic_block;
- if (current_sched_info->flags & USE_GLAT)
- {
- glat_start = xrealloc (glat_start,
- last_basic_block * sizeof (*glat_start));
- glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
- }
-
/* The following is done to keep current_sched_info->next_tail non null. */
insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
/* Don't emit a NOTE if it would end up before a BARRIER. */
&& !BARRIER_P (NEXT_INSN (insn))))
{
- emit_note_after (NOTE_INSN_DELETED, insn);
- /* Make insn to appear outside BB. */
+ rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
+ /* Make insn appear outside BB. */
+ set_block_for_insn (note, NULL);
BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
}
}
void
add_block (basic_block bb, basic_block ebb)
{
- gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
- && bb->il.rtl->global_live_at_start == 0
- && bb->il.rtl->global_live_at_end == 0);
+ gcc_assert (current_sched_info->flags & NEW_BBS);
extend_bb ();
- glat_start[bb->index] = 0;
- glat_end[bb->index] = 0;
-
if (current_sched_info->add_block)
/* This changes only data structures of the front-end. */
current_sched_info->add_block (bb, ebb);
move_succs (&(jump_bb->succs), bb);
move_succs (&(jump_bb_next->succs), jump_bb);
move_succs (&t, jump_bb_next);
+
+ df_mark_solutions_dirty ();
if (current_sched_info->fix_recovery_cfg)
current_sched_info->fix_recovery_cfg
*succsp = 0;
}
-/* Initialize GLAT (global_live_at_{start, end}) structures.
- GLAT structures are used to substitute global_live_{start, end}
- regsets during scheduling. This is necessary to use such functions as
- split_block (), as they assume consistency of register live information. */
-static void
-init_glat (void)
-{
- basic_block bb;
-
- FOR_ALL_BB (bb)
- init_glat1 (bb);
-}
-
-/* Helper function for init_glat. */
-static void
-init_glat1 (basic_block bb)
-{
- gcc_assert (bb->il.rtl->global_live_at_start != 0
- && bb->il.rtl->global_live_at_end != 0);
-
- glat_start[bb->index] = bb->il.rtl->global_live_at_start;
- glat_end[bb->index] = bb->il.rtl->global_live_at_end;
-
- if (current_sched_info->flags & DETACH_LIFE_INFO)
- {
- bb->il.rtl->global_live_at_start = 0;
- bb->il.rtl->global_live_at_end = 0;
- }
-}
-
-/* Attach reg_live_info back to basic blocks.
- Also save regsets, that should not have been changed during scheduling,
- for checking purposes (see check_reg_live). */
-void
-attach_life_info (void)
-{
- basic_block bb;
-
- FOR_ALL_BB (bb)
- attach_life_info1 (bb);
-}
-
-/* Helper function for attach_life_info. */
-static void
-attach_life_info1 (basic_block bb)
-{
- gcc_assert (bb->il.rtl->global_live_at_start == 0
- && bb->il.rtl->global_live_at_end == 0);
-
- if (glat_start[bb->index])
- {
- gcc_assert (glat_end[bb->index]);
-
- bb->il.rtl->global_live_at_start = glat_start[bb->index];
- bb->il.rtl->global_live_at_end = glat_end[bb->index];
-
- /* Make them NULL, so they won't be freed in free_glat. */
- glat_start[bb->index] = 0;
- glat_end[bb->index] = 0;
-
-#ifdef ENABLE_CHECKING
- if (bb->index < NUM_FIXED_BLOCKS
- || current_sched_info->region_head_or_leaf_p (bb, 0))
- {
- glat_start[bb->index] = ALLOC_REG_SET (®_obstack);
- COPY_REG_SET (glat_start[bb->index],
- bb->il.rtl->global_live_at_start);
- }
-
- if (bb->index < NUM_FIXED_BLOCKS
- || current_sched_info->region_head_or_leaf_p (bb, 1))
- {
- glat_end[bb->index] = ALLOC_REG_SET (®_obstack);
- COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
- }
-#endif
- }
- else
- {
- gcc_assert (!glat_end[bb->index]);
-
- bb->il.rtl->global_live_at_start = ALLOC_REG_SET (®_obstack);
- bb->il.rtl->global_live_at_end = ALLOC_REG_SET (®_obstack);
- }
-}
-
-/* Free GLAT information. */
-static void
-free_glat (void)
-{
-#ifdef ENABLE_CHECKING
- if (current_sched_info->flags & DETACH_LIFE_INFO)
- {
- basic_block bb;
-
- FOR_ALL_BB (bb)
- {
- if (glat_start[bb->index])
- FREE_REG_SET (glat_start[bb->index]);
- if (glat_end[bb->index])
- FREE_REG_SET (glat_end[bb->index]);
- }
- }
-#endif
-
- free (glat_start);
- free (glat_end);
-}
-
/* Remove INSN from the instruction stream.
INSN should have any dependencies. */
static void
remove_insn (insn);
}
-/* Clear priorities of all instructions, that are
- forward dependent on INSN. */
+/* Clear priorities of all instructions, that are forward dependent on INSN.
+ Store in vector pointed to by ROOTS_PTR insns on which priority () should
+ be invoked to initialize all cleared priorities. */
static void
-clear_priorities (rtx insn)
+clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
{
dep_link_t link;
+ bool insn_is_root_p = true;
+
+ gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
{
- rtx pro = DEP_LINK_PRO (link);
+ dep_t dep = DEP_LINK_DEP (link);
+ rtx pro = DEP_PRO (dep);
- if (INSN_PRIORITY_KNOWN (pro))
+ if (INSN_PRIORITY_STATUS (pro) >= 0
+ && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
{
- INSN_PRIORITY_KNOWN (pro) = 0;
- clear_priorities (pro);
+ /* If DEP doesn't contribute to priority then INSN itself should
+ be added to priority roots. */
+ if (contributes_to_priority_p (dep))
+ insn_is_root_p = false;
+
+ INSN_PRIORITY_STATUS (pro) = -1;
+ clear_priorities (pro, roots_ptr);
}
}
+
+ if (insn_is_root_p)
+ VEC_safe_push (rtx, heap, *roots_ptr, insn);
}
/* Recompute priorities of instructions, whose priorities might have been
- changed due to changes in INSN. */
+ changed. ROOTS is a vector of instructions whose priority computation will
+ trigger initialization of all cleared priorities. */
static void
-calc_priorities (rtx insn)
+calc_priorities (rtx_vec_t roots)
{
- dep_link_t link;
-
- FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
- {
- rtx pro = DEP_LINK_PRO (link);
+ int i;
+ rtx insn;
- if (!INSN_PRIORITY_KNOWN (pro))
- {
- priority (pro);
- calc_priorities (pro);
- }
- }
+ for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
+ priority (insn);
}
gcc_assert (!(f & DO_SPECULATION));
if (f & DO_SPECULATION)
gcc_assert (!flag_sched_stalled_insns
- && (f & DETACH_LIFE_INFO)
&& spec_info
&& spec_info->mask);
- if (f & DETACH_LIFE_INFO)
- gcc_assert (f & USE_GLAT);
-}
-
-/* Check global_live_at_{start, end} regsets.
- If FATAL_P is TRUE, then abort execution at the first failure.
- Otherwise, print diagnostics to STDERR (this mode is for calling
- from debugger). */
-void
-check_reg_live (bool fatal_p)
-{
- basic_block bb;
-
- FOR_ALL_BB (bb)
- {
- int i;
-
- i = bb->index;
-
- if (glat_start[i])
- {
- bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
- glat_start[i]);
-
- if (!b)
- {
- gcc_assert (!fatal_p);
-
- fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
- }
- }
-
- if (glat_end[i])
- {
- bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
- glat_end[i]);
-
- if (!b)
- {
- gcc_assert (!fatal_p);
-
- fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
- }
- }
- }
}
#endif /* ENABLE_CHECKING */