/* Instruction scheduling pass.
- Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
/* This pass implements list scheduling within basic blocks. It is
run twice: (1) after flow analysis, but before register allocation,
#include "target.h"
#include "timevar.h"
#include "tree-pass.h"
-
-/* Define when we want to do count REG_DEAD notes before and after scheduling
- for sanity checking. We can't do that when conditional execution is used,
- as REG_DEAD exist only for unconditional deaths. */
-
-#if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
-#define CHECK_DEAD_NOTES 1
-#else
-#define CHECK_DEAD_NOTES 0
-#endif
-
+#include "dbgcnt.h"
#ifdef INSN_SCHEDULING
/* Some accessor macros for h_i_d members only used within this file. */
static int is_exception_free (rtx, int, int);
static bool sets_likely_spilled (rtx);
-static void sets_likely_spilled_1 (rtx, rtx, void *);
+static void sets_likely_spilled_1 (rtx, const_rtx, void *);
static void add_branch_dependences (rtx, rtx);
-static void compute_block_backward_dependences (int);
+static void compute_block_dependences (int);
static void init_regions (void);
static void schedule_region (int);
return 1;
/* If we have exception handlers, then we consider the cfg not well
- structured. ?!? We should be able to handle this now that flow.c
- computes an accurate cfg for EH. */
+ structured. ?!? We should be able to handle this now that we
+ compute an accurate cfg for EH. */
if (current_function_has_exception_handlers ())
return 1;
- /* If we have non-jumping insns which refer to labels, then we consider
- the cfg not well structured. */
+ /* If we have insns which refer to labels as non-jumped-to operands,
+ then we consider the cfg not well structured. */
FOR_EACH_BB (b)
FOR_BB_INSNS (b, insn)
{
- /* Check for labels referred by non-jump insns. */
- if (NONJUMP_INSN_P (insn) || CALL_P (insn))
- {
- rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
- if (note
- && ! (JUMP_P (NEXT_INSN (insn))
- && find_reg_note (NEXT_INSN (insn), REG_LABEL,
- XEXP (note, 0))))
- return 1;
- }
+ /* Check for labels referred to but (at least not directly) as
+ jump targets. */
+ if (INSN_P (insn)
+ && find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX))
+ return 1;
+
/* If this function has a computed jump, then we consider the cfg
not well structured. */
- else if (JUMP_P (insn) && computed_jump_p (insn))
+ if (JUMP_P (insn) && computed_jump_p (insn))
return 1;
}
max_hdr = xmalloc (last_basic_block * sizeof (*max_hdr));
order = xmalloc (last_basic_block * sizeof (*order));
- post_order_compute (order, false);
+ post_order_compute (order, false, false);
for (i = nblocks - 1; i >= 0; i--)
{
/* Functions for speculative scheduling. */
+static bitmap_head not_in_df;
+
/* Return 0 if x is a set of a register alive in the beginning of one
of the split-blocks of src, otherwise return 1. */
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
basic_block b = candidate_table[src].split_bbs.first_member[i];
+ int t = bitmap_bit_p (¬_in_df, b->index);
/* We can have split blocks, that were recently generated.
such blocks are always outside current region. */
- gcc_assert (glat_start[b->index]
- || CONTAINING_RGN (b->index)
- != CONTAINING_RGN (BB_TO_BLOCK (src)));
- if (!glat_start[b->index]
- || REGNO_REG_SET_P (glat_start[b->index],
- regno + j))
- {
- return 0;
- }
+ gcc_assert (!t || (CONTAINING_RGN (b->index)
+ != CONTAINING_RGN (BB_TO_BLOCK (src))));
+
+ if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
+ return 0;
}
}
}
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
basic_block b = candidate_table[src].split_bbs.first_member[i];
+ int t = bitmap_bit_p (¬_in_df, b->index);
- gcc_assert (glat_start[b->index]
- || CONTAINING_RGN (b->index)
- != CONTAINING_RGN (BB_TO_BLOCK (src)));
- if (!glat_start[b->index]
- || REGNO_REG_SET_P (glat_start[b->index], regno))
- {
- return 0;
- }
+ gcc_assert (!t || (CONTAINING_RGN (b->index)
+ != CONTAINING_RGN (BB_TO_BLOCK (src))));
+
+ if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
+ return 0;
}
}
}
{
basic_block b = candidate_table[src].update_bbs.first_member[i];
- SET_REGNO_REG_SET (glat_start[b->index], regno + j);
+ SET_REGNO_REG_SET (df_get_live_in (b), regno + j);
}
}
}
{
basic_block b = candidate_table[src].update_bbs.first_member[i];
- SET_REGNO_REG_SET (glat_start[b->index], regno);
+ SET_REGNO_REG_SET (df_get_live_in (b), regno);
}
}
}
static void
set_spec_fed (rtx load_insn)
{
- dep_link_t link;
+ sd_iterator_def sd_it;
+ dep_t dep;
- FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (load_insn))
- if (DEP_LINK_KIND (link) == REG_DEP_TRUE)
- FED_BY_SPEC_LOAD (DEP_LINK_CON (link)) = 1;
+ FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
+ if (DEP_TYPE (dep) == REG_DEP_TRUE)
+ FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
}
/* On the path from the insn to load_insn_bb, find a conditional
static int
find_conditional_protection (rtx insn, int load_insn_bb)
{
- dep_link_t link;
+ sd_iterator_def sd_it;
+ dep_t dep;
/* Iterate through DEF-USE forward dependences. */
- FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn))
+ FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
{
- rtx next = DEP_LINK_CON (link);
+ rtx next = DEP_CON (dep);
if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
&& IS_REACHABLE (INSN_BB (next), load_insn_bb)
&& load_insn_bb != INSN_BB (next)
- && DEP_LINK_KIND (link) == REG_DEP_TRUE
+ && DEP_TYPE (dep) == REG_DEP_TRUE
&& (JUMP_P (next)
|| find_conditional_protection (next, load_insn_bb)))
return 1;
static int
is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
{
- dep_link_t link;
+ sd_iterator_def sd_it;
+ dep_t dep;
- FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (load_insn))
+ FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
{
- rtx insn1 = DEP_LINK_PRO (link);
+ rtx insn1 = DEP_PRO (dep);
/* Must be a DEF-USE dependence upon non-branch. */
- if (DEP_LINK_KIND (link) != REG_DEP_TRUE
+ if (DEP_TYPE (dep) != REG_DEP_TRUE
|| JUMP_P (insn1))
continue;
static int
is_pfree (rtx load_insn, int bb_src, int bb_trg)
{
- dep_link_t back_link;
+ sd_iterator_def back_sd_it;
+ dep_t back_dep;
candidate *candp = candidate_table + bb_src;
if (candp->split_bbs.nr_members != 1)
/* Must have exactly one escape block. */
return 0;
- FOR_EACH_DEP_LINK (back_link, INSN_BACK_DEPS (load_insn))
+ FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
{
- rtx insn1 = DEP_LINK_PRO (back_link);
+ rtx insn1 = DEP_PRO (back_dep);
- if (DEP_LINK_KIND (back_link) == REG_DEP_TRUE)
+ if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
+ /* Found a DEF-USE dependence (insn1, load_insn). */
{
- /* Found a DEF-USE dependence (insn1, load_insn). */
- dep_link_t fore_link;
+ sd_iterator_def fore_sd_it;
+ dep_t fore_dep;
- FOR_EACH_DEP_LINK (fore_link, INSN_FORW_DEPS (insn1))
+ FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
{
- rtx insn2 = DEP_LINK_CON (fore_link);
+ rtx insn2 = DEP_CON (fore_dep);
- if (DEP_LINK_KIND (fore_link) == REG_DEP_TRUE)
+ if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
{
/* Found a DEF-USE dependence (insn1, insn2). */
if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
if (FED_BY_SPEC_LOAD (load_insn))
return 1;
- if (deps_list_empty_p (INSN_BACK_DEPS (load_insn)))
+ if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
/* Dependence may 'hide' out of the region. */
return 1;
static void add_block1 (basic_block, basic_block);
static void fix_recovery_cfg (int, int, int);
static basic_block advance_target_bb (basic_block, rtx);
-static void check_dead_notes1 (int, sbitmap);
-#ifdef ENABLE_CHECKING
-static int region_head_or_leaf_p (basic_block, int);
-#endif
static void debug_rgn_dependencies (int);
if (not_ex_free
/* We are here because is_exception_free () == false.
But we possibly can handle that with control speculation. */
- && current_sched_info->flags & DO_SPECULATION)
+ && (current_sched_info->flags & DO_SPECULATION)
+ && (spec_info->mask & BEGIN_CONTROL))
/* Here we got new control-speculative instruction. */
ts = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
else
add_block1,
advance_target_bb,
fix_recovery_cfg,
-#ifdef ENABLE_CHECKING
- region_head_or_leaf_p,
-#endif
- SCHED_RGN | USE_GLAT
-#ifdef ENABLE_CHECKING
- | DETACH_LIFE_INFO
-#endif
+ SCHED_RGN
};
/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
}
static void
-sets_likely_spilled_1 (rtx x, rtx pat, void *data)
+sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
{
bool *ret = (bool *) data;
if (!NOTE_P (insn))
{
if (last != 0
- && (find_link_by_pro_in_deps_list (INSN_BACK_DEPS (last), insn)
- == NULL))
+ && sd_find_dep_between (insn, last, false) == NULL)
{
if (! sched_insns_conditions_mutex_p (last, insn))
add_dependence (last, insn, REG_DEP_ANTI);
pred_deps->pending_write_mems = 0;
}
-/* Compute backward dependences inside bb. In a multiple blocks region:
+/* Compute dependences inside bb. In a multiple blocks region:
(1) a bb is analyzed after its predecessors, and (2) the lists in
effect at the end of bb (after analyzing for bb) are inherited by
bb's successors.
similar, and the result is interblock dependences in the region. */
static void
-compute_block_backward_dependences (int bb)
+compute_block_dependences (int bb)
{
rtx head, tail;
struct deps tmp_deps;
/* Do the analysis for this block. */
gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+
sched_analyze (&tmp_deps, head, tail);
add_branch_dependences (head, tail);
/* Free up the INSN_LISTs. */
free_deps (&tmp_deps);
+
+ if (targetm.sched.dependencies_evaluation_hook)
+ targetm.sched.dependencies_evaluation_hook (head, tail);
+}
+
+/* Free dependencies of instructions inside BB. */
+static void
+free_block_dependencies (int bb)
+{
+ rtx head;
+ rtx tail;
+
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+
+ sched_free_deps (head, tail, true);
}
/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
}
}
\f
-
+/* Print dependences for debugging starting from FROM_BB.
+ Callable from debugger. */
/* Print dependences for debugging starting from FROM_BB.
Callable from debugger. */
void
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
- dep_link_t link;
-
if (! INSN_P (insn))
{
int n;
fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
if (NOTE_P (insn))
{
- n = NOTE_LINE_NUMBER (insn);
- if (n < 0)
- fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
+ n = NOTE_KIND (insn);
+ fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
}
else
fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
INSN_UID (insn),
INSN_CODE (insn),
BLOCK_NUM (insn),
- INSN_DEP_COUNT (insn),
+ sd_lists_size (insn, SD_LIST_BACK),
INSN_PRIORITY (insn),
insn_cost (insn));
print_reservation (sched_dump, insn);
fprintf (sched_dump, "\t: ");
- FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn))
- fprintf (sched_dump, "%d ", INSN_UID (DEP_LINK_CON (link)));
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ fprintf (sched_dump, "%d ", INSN_UID (DEP_CON (dep)));
+ }
fprintf (sched_dump, "\n");
}
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb);
- /* Compute backward dependencies. */
+ /* Compute dependencies. */
for (bb = 0; bb < current_nr_blocks; bb++)
- compute_block_backward_dependences (bb);
-
- /* Compute forward dependencies. */
- for (bb = current_nr_blocks - 1; bb >= 0; bb--)
- {
- rtx head, tail;
-
- gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
- get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
-
- compute_forward_dependences (head, tail);
-
- if (targetm.sched.dependencies_evaluation_hook)
- targetm.sched.dependencies_evaluation_hook (head, tail);
- }
+ compute_block_dependences (bb);
free_pending_lists ();
current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
curr_bb = first_bb;
- schedule_block (&curr_bb, rgn_n_insns);
- gcc_assert (EBB_FIRST_BB (bb) == first_bb);
- sched_rgn_n_insns += sched_n_insns;
+ if (dbg_cnt (sched_block))
+ {
+ schedule_block (&curr_bb, rgn_n_insns);
+ gcc_assert (EBB_FIRST_BB (bb) == first_bb);
+ sched_rgn_n_insns += sched_n_insns;
+ }
+ else
+ {
+ sched_rgn_n_insns += rgn_n_insns;
+ }
/* Clean up. */
if (current_nr_blocks > 1)
/* Sanity check: verify that all region insns were scheduled. */
gcc_assert (sched_rgn_n_insns == rgn_n_insns);
-
/* Done with this region. */
if (current_nr_blocks > 1)
sbitmap_vector_free (ancestor_edges);
free (rgn_edges);
}
-}
-/* Indexed by region, holds the number of death notes found in that region.
- Used for consistency checks. */
-static int *deaths_in_region;
+ /* Free dependencies. */
+ for (bb = 0; bb < current_nr_blocks; ++bb)
+ free_block_dependencies (bb);
+
+ gcc_assert (haifa_recovery_bb_ever_added_p
+ || deps_pools_are_empty_p ());
+}
/* Initialize data structures for region scheduling. */
static void
init_regions (void)
{
- sbitmap blocks;
- int rgn;
-
nr_regions = 0;
rgn_table = 0;
rgn_bb_table = 0;
debug_regions ();
/* For now. This will move as more and more of haifa is converted
- to using the cfg code in flow.c. */
+ to using the cfg code. */
free_dominance_info (CDI_DOMINATORS);
}
RGN_BLOCKS (nr_regions) = RGN_BLOCKS (nr_regions - 1) +
RGN_NR_BLOCKS (nr_regions - 1);
-
-
- if (CHECK_DEAD_NOTES)
- {
- blocks = sbitmap_alloc (last_basic_block);
- deaths_in_region = XNEWVEC (int, nr_regions);
- /* Remove all death notes from the subroutine. */
- for (rgn = 0; rgn < nr_regions; rgn++)
- check_dead_notes1 (rgn, blocks);
-
- sbitmap_free (blocks);
- }
- else
- count_or_remove_death_notes (NULL, 1);
}
/* The one entry point in this file. */
void
schedule_insns (void)
{
- sbitmap large_region_blocks, blocks;
int rgn;
- int any_large_regions;
- basic_block bb;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
invoked via sched_init. */
current_sched_info = ®ion_sched_info;
+ df_set_flags (DF_LR_RUN_DCE);
+ df_note_add_problem ();
+ df_analyze ();
+ regstat_compute_calls_crossed ();
+
sched_init ();
+ bitmap_initialize (¬_in_df, 0);
+ bitmap_clear (¬_in_df);
+
min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
/ 100);
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
- schedule_region (rgn);
+ if (dbg_cnt (sched_region))
+ schedule_region (rgn);
free(ebb_head);
-
- /* Update life analysis for the subroutine. Do single block regions
- first so that we can verify that live_at_start didn't change. Then
- do all other blocks. */
- /* ??? There is an outside possibility that update_life_info, or more
- to the point propagate_block, could get called with nonzero flags
- more than once for one basic block. This would be kinda bad if it
- were to happen, since REG_INFO would be accumulated twice for the
- block, and we'd have twice the REG_DEAD notes.
-
- I'm fairly certain that this _shouldn't_ happen, since I don't think
- that live_at_start should change at region heads. Not sure what the
- best way to test for this kind of thing... */
-
- if (current_sched_info->flags & DETACH_LIFE_INFO)
- /* this flag can be set either by the target or by ENABLE_CHECKING. */
- attach_life_info ();
-
- allocate_reg_life_data ();
-
- any_large_regions = 0;
- large_region_blocks = sbitmap_alloc (last_basic_block);
- sbitmap_zero (large_region_blocks);
- FOR_EACH_BB (bb)
- SET_BIT (large_region_blocks, bb->index);
-
- blocks = sbitmap_alloc (last_basic_block);
- sbitmap_zero (blocks);
-
- /* Update life information. For regions consisting of multiple blocks
- we've possibly done interblock scheduling that affects global liveness.
- For regions consisting of single blocks we need to do only local
- liveness. */
- for (rgn = 0; rgn < nr_regions; rgn++)
- if (RGN_NR_BLOCKS (rgn) > 1
- /* Or the only block of this region has been split. */
- || RGN_HAS_REAL_EBB (rgn)
- /* New blocks (e.g. recovery blocks) should be processed
- as parts of large regions. */
- || !glat_start[rgn_bb_table[RGN_BLOCKS (rgn)]])
- any_large_regions = 1;
- else
- {
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
- RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
- }
-
- /* Don't update reg info after reload, since that affects
- regs_ever_live, which should not change after reload. */
- update_life_info (blocks, UPDATE_LIFE_LOCAL,
- (reload_completed ? PROP_DEATH_NOTES
- : (PROP_DEATH_NOTES | PROP_REG_INFO)));
- if (any_large_regions)
- {
- update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
- (reload_completed ? PROP_DEATH_NOTES
- : (PROP_DEATH_NOTES | PROP_REG_INFO)));
-
-#ifdef ENABLE_CHECKING
- check_reg_live (true);
-#endif
- }
-
- if (CHECK_DEAD_NOTES)
- {
- /* Verify the counts of basic block notes in single basic block
- regions. */
- for (rgn = 0; rgn < nr_regions; rgn++)
- if (RGN_NR_BLOCKS (rgn) == 1)
- {
- sbitmap_zero (blocks);
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
-
- gcc_assert (deaths_in_region[rgn]
- == count_or_remove_death_notes (blocks, 0));
- }
- free (deaths_in_region);
- }
-
/* Reposition the prologue and epilogue notes in case we moved the
prologue/epilogue insns. */
if (reload_completed)
- reposition_prologue_and_epilogue_notes (get_insns ());
+ reposition_prologue_and_epilogue_notes ();
if (sched_verbose)
{
free (block_to_bb);
free (containing_rgn);
- sched_finish ();
+ regstat_free_calls_crossed ();
+
+ bitmap_clear (¬_in_df);
- sbitmap_free (blocks);
- sbitmap_free (large_region_blocks);
+ sched_finish ();
}
/* INSN has been added to/removed from current region. */
{
extend_regions ();
+ bitmap_set_bit (¬_in_df, bb->index);
+
if (after == 0 || after == EXIT_BLOCK_PTR)
{
int i;
nr_regions++;
RGN_BLOCKS (nr_regions) = i + 1;
-
- if (CHECK_DEAD_NOTES)
- {
- sbitmap blocks = sbitmap_alloc (last_basic_block);
- deaths_in_region = xrealloc (deaths_in_region, nr_regions *
- sizeof (*deaths_in_region));
-
- check_dead_notes1 (nr_regions - 1, blocks);
-
- sbitmap_free (blocks);
- }
}
else
{
for (++i; i <= nr_regions; i++)
RGN_BLOCKS (i)++;
-
- /* We don't need to call check_dead_notes1 () because this new block
- is just a split of the old. We don't want to count anything twice. */
}
}
return bb->next_bb;
}
-/* Count and remove death notes in region RGN, which consists of blocks
- with indecies in BLOCKS. */
-static void
-check_dead_notes1 (int rgn, sbitmap blocks)
-{
- int b;
-
- sbitmap_zero (blocks);
- for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
-
- deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
-}
-
-#ifdef ENABLE_CHECKING
-/* Return non zero, if BB is head or leaf (depending of LEAF_P) block in
- current region. For more information please refer to
- sched-int.h: struct sched_info: region_head_or_leaf_p. */
-static int
-region_head_or_leaf_p (basic_block bb, int leaf_p)
-{
- if (!leaf_p)
- return bb->index == rgn_bb_table[RGN_BLOCKS (CONTAINING_RGN (bb->index))];
- else
- {
- int i;
- edge e;
- edge_iterator ei;
-
- i = CONTAINING_RGN (bb->index);
-
- FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
- && CONTAINING_RGN (e->dest->index) == i
- /* except self-loop. */
- && e->dest != bb)
- return 0;
-
- return 1;
- }
-}
-#endif /* ENABLE_CHECKING */
-
#endif
\f
static bool
gate_handle_sched (void)
{
#ifdef INSN_SCHEDULING
- return flag_schedule_insns;
+ return flag_schedule_insns && dbg_cnt (sched_func);
#else
return 0;
#endif
rest_of_handle_sched (void)
{
#ifdef INSN_SCHEDULING
- /* Do control and data sched analysis,
- and write some of the results to dump file. */
-
schedule_insns ();
#endif
return 0;
gate_handle_sched2 (void)
{
#ifdef INSN_SCHEDULING
- return optimize > 0 && flag_schedule_insns_after_reload;
+ return optimize > 0 && flag_schedule_insns_after_reload
+ && dbg_cnt (sched2_func);
#else
return 0;
#endif
#ifdef INSN_SCHEDULING
/* Do control and data sched analysis again,
and write some more of the results to dump file. */
-
- split_all_insns (1);
-
if (flag_sched2_use_superblocks || flag_sched2_use_traces)
- {
- schedule_ebbs ();
- /* No liveness updating code yet, but it should be easy to do.
- reg-stack recomputes the liveness when needed for now. */
- count_or_remove_death_notes (NULL, 1);
- cleanup_cfg (CLEANUP_EXPENSIVE);
- }
+ schedule_ebbs ();
else
schedule_insns ();
#endif
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
+ TODO_df_finish | TODO_verify_rtl_sharing |
TODO_dump_func |
+ TODO_verify_flow |
TODO_ggc_collect, /* todo_flags_finish */
'S' /* letter */
};
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
+ TODO_df_finish | TODO_verify_rtl_sharing |
TODO_dump_func |
+ TODO_verify_flow |
TODO_ggc_collect, /* todo_flags_finish */
'R' /* letter */
};