/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "toplev.h"
#include "recog.h"
#include "cfglayout.h"
+#include "params.h"
#include "sched-int.h"
#include "target.h"
#define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
#define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
-#define MAX_RGN_BLOCKS 10
-#define MAX_RGN_INSNS 100
-
/* nr_inter/spec counts interblock/speculative motion for the function. */
static int nr_inter, nr_spec;
static int is_cfg_nonregular (void);
static int build_control_flow (struct edge_list *);
static void new_edge (int, int);
+static bool sched_is_disabled_for_current_region_p (void);
/* A region is the main entity for interblock scheduling: insns
are allowed to move between blocks in the same region, along
void debug_regions (void);
static void find_single_block_region (void);
-static void find_rgns (struct edge_list *, dominance_info);
-static int too_large (int, int *, int *);
+static void find_rgns (struct edge_list *);
+static bool too_large (int, int *, int *);
extern void debug_live (int, int);
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
FOR_EACH_BB (b)
- for (insn = b->head;; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
- if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
+ if (INSN_P (insn) && code != JUMP_INSN)
{
rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
if (note
- && ! (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
+ && ! (JUMP_P (NEXT_INSN (insn))
&& find_reg_note (NEXT_INSN (insn), REG_LABEL,
XEXP (note, 0))))
return 1;
}
- if (insn == b->end)
+ if (insn == BB_END (b))
break;
}
}
/* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
- out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
- edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
+ in_edges = xcalloc (last_basic_block, sizeof (int));
+ out_edges = xcalloc (last_basic_block, sizeof (int));
+ edge_table = xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
for (i = 0; i < num_edges; i++)
}
/* Update number of blocks and the estimate for number of insns
- in the region. Return 1 if the region is "too large" for interblock
- scheduling (compile time considerations), otherwise return 0. */
+ in the region. Return true if the region is "too large" for interblock
+ scheduling (compile time considerations). */
-static int
+static bool
too_large (int block, int *num_bbs, int *num_insns)
{
(*num_bbs)++;
- (*num_insns) += (INSN_LUID (BLOCK_END (block)) -
- INSN_LUID (BLOCK_HEAD (block)));
- if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
- return 1;
- else
- return 0;
+ (*num_insns) += (INSN_LUID (BB_END (BASIC_BLOCK (block)))
+ - INSN_LUID (BB_HEAD (BASIC_BLOCK (block))));
+
+ return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
+ || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
}
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
of edge tables. That would simplify it somewhat. */
static void
-find_rgns (struct edge_list *edge_list, dominance_info dom)
+find_rgns (struct edge_list *edge_list)
{
int *max_hdr, *dfs_nr, *stack, *degree;
char no_loops = 1;
int node, child, loop_head, i, head, tail;
- int count = 0, sp, idx = 0, current_edge = out_edges[0];
+ int count = 0, sp, idx = 0;
+ int current_edge = out_edges[ENTRY_BLOCK_PTR->succ->dest->index];
int num_bbs, num_insns, unreachable;
int too_large_failure;
basic_block bb;
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
- dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
- stack = (int *) xmalloc (nr_edges * sizeof (int));
+ max_hdr = xmalloc (last_basic_block * sizeof (int));
+ dfs_nr = xcalloc (last_basic_block, sizeof (int));
+ stack = xmalloc (nr_edges * sizeof (int));
inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
/* Second traversal:find reducible inner loops and topologically sort
block of each region. */
- queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
+ queue = xmalloc (n_basic_blocks * sizeof (int));
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
{
/* Now verify that the block is dominated by the loop
header. */
- if (!dominated_by_p (dom, jbb, bb))
+ if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
break;
}
}
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (bb->end)
- - INSN_LUID (bb->head));
+ num_insns = (INSN_LUID (BB_END (bb))
+ - INSN_LUID (BB_HEAD (bb)));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
static void
split_edges (int bb_src, int bb_trg, edgelst *bl)
{
- sbitmap src = (edgeset) sbitmap_alloc (pot_split[bb_src]->n_bits);
+ sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits);
sbitmap_copy (src, pot_split[bb_src]);
sbitmap_difference (src, src, pot_split[bb_trg]);
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (last_basic_block);
+ update_blocks = alloca (last_basic_block);
memset (update_blocks, 0, last_basic_block);
update_idx = 0;
return 0;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return 1;
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
{
/* Check for hard registers. */
- int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ int j = hard_regno_nregs[regno][GET_MODE (reg)];
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
}
else
{
- /* Check for psuedo registers. */
+ /* Check for pseudo registers. */
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
int b = candidate_table[src].split_bbs.first_member[i];
return;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return;
/* Global registers are always live, so the code below does not apply
{
if (regno < FIRST_PSEUDO_REGISTER)
{
- int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ int j = hard_regno_nregs[regno][GET_MODE (reg)];
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
&& IS_REACHABLE (INSN_BB (next), load_insn_bb)
&& load_insn_bb != INSN_BB (next)
&& GET_MODE (link) == VOIDmode
- && (GET_CODE (next) == JUMP_INSN
+ && (JUMP_P (next)
|| find_conditional_protection (next, load_insn_bb)))
return 1;
}
/* Must be a DEF-USE dependence upon non-branch. */
if (GET_MODE (link) != VOIDmode
- || GET_CODE (insn1) == JUMP_INSN)
+ || JUMP_P (insn1))
continue;
/* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
static const char *rgn_print_insn (rtx, int);
static int rgn_rank (rtx, rtx);
static int contributes_to_priority (rtx, rtx);
-static void compute_jump_reg_dependencies (rtx, regset);
+static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
/* Return nonzero if there are more insns that should be scheduled. */
/* Prepare current target block info. */
if (current_nr_blocks > 1)
{
- candidate_table = (candidate *) xmalloc (current_nr_blocks
- * sizeof (candidate));
+ candidate_table = xmalloc (current_nr_blocks * sizeof (candidate));
bblst_last = 0;
/* bblst_table holds split blocks and update blocks for each block after
the TO blocks of region edges, so there can be at most rgn_nr_edges
of them. */
bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
- bblst_table = (int *) xmalloc (bblst_size * sizeof (int));
+ bblst_table = xmalloc (bblst_size * sizeof (int));
bitlst_table_last = 0;
- bitlst_table = (int *) xmalloc (rgn_nr_edges * sizeof (int));
+ bitlst_table = xmalloc (rgn_nr_edges * sizeof (int));
compute_trg_info (target_bb);
}
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
if (INSN_DEP_COUNT (insn) == 0)
- ready_add (ready, insn);
+ {
+ ready_add (ready, insn);
+
+ if (targetm.sched.adjust_priority)
+ INSN_PRIORITY (insn) =
+ targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn));
+ }
target_n_insns++;
}
if (!CANT_MOVE (insn)
&& (!IS_SPECULATIVE_INSN (insn)
- || ((((!targetm.sched.use_dfa_pipeline_interface
- || !(*targetm.sched.use_dfa_pipeline_interface) ())
- && insn_issue_delay (insn) <= 3)
- || (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ()
- && (recog_memoized (insn) < 0
- || min_insn_conflict_delay (curr_state,
- insn, insn) <= 3)))
+ || ((recog_memoized (insn) < 0
+ || min_insn_conflict_delay (curr_state,
+ insn, insn) <= 3)
&& check_live (insn, bb_src)
&& is_exception_free (insn, bb_src, target_bb))))
if (INSN_DEP_COUNT (insn) == 0)
- ready_add (ready, insn);
+ {
+ ready_add (ready, insn);
+
+ if (targetm.sched.adjust_priority)
+ INSN_PRIORITY (insn) =
+ targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn));
+ }
}
}
}
static int
can_schedule_ready_p (rtx insn)
{
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
last_was_jump = 1;
/* An interblock motion? */
/* Update source block boundaries. */
b1 = BLOCK_FOR_INSN (insn);
- if (insn == b1->head && insn == b1->end)
+ if (insn == BB_HEAD (b1) && insn == BB_END (b1))
{
/* We moved all the insns in the basic block.
Emit a note after the last insn and update the
begin/end boundaries to point to the note. */
rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
- b1->head = note;
- b1->end = note;
+ BB_HEAD (b1) = note;
+ BB_END (b1) = note;
}
- else if (insn == b1->end)
+ else if (insn == BB_END (b1))
{
/* We took insns from the end of the basic block,
so update the end of block boundary so that it
points to the first insn we did not move. */
- b1->end = PREV_INSN (insn);
+ BB_END (b1) = PREV_INSN (insn);
}
- else if (insn == b1->head)
+ else if (insn == BB_HEAD (b1))
{
/* We took insns from the start of the basic block,
so update the start of block boundary so that
it points to the first insn we did not move. */
- b1->head = NEXT_INSN (insn);
+ BB_HEAD (b1) = NEXT_INSN (insn);
}
}
else
&& (!IS_VALID (INSN_BB (next))
|| CANT_MOVE (next)
|| (IS_SPECULATIVE_INSN (next)
- && (0
- || (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ()
- && recog_memoized (next) >= 0
- && min_insn_conflict_delay (curr_state, next,
- next) > 3)
- || ((!targetm.sched.use_dfa_pipeline_interface
- || !(*targetm.sched.use_dfa_pipeline_interface) ())
- && insn_issue_delay (next) > 3)
+ && ((recog_memoized (next) >= 0
+ && min_insn_conflict_delay (curr_state, next, next) > 3)
|| !check_live (next, INSN_BB (next))
|| !is_exception_free (next, INSN_BB (next), target_bb)))))
return 0;
return BLOCK_NUM (next) == BLOCK_NUM (insn);
}
-/* INSN is a JUMP_INSN. Store the set of registers that must be considered
- to be set by this jump in SET. */
+/* INSN is a JUMP_INSN, COND_SET is the set of registers that are
+ conditionally set before INSN. Store the set of registers that
+ must be considered as used by this jump in USED and that of
+ registers that must be considered as set in SET. */
static void
compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
+ regset cond_exec ATTRIBUTE_UNUSED,
+ regset used ATTRIBUTE_UNUSED,
regset set ATTRIBUTE_UNUSED)
{
/* Nothing to do here, since we postprocess jumps in
NULL, NULL,
NULL, NULL,
- 0, 0
+ 0, 0, 0
};
/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
end since moving them results in worse register allocation. Uses remain
at the end to ensure proper register allocation.
- cc0 setters remaim at the end because they can't be moved away from
+ cc0 setters remain at the end because they can't be moved away from
their cc0 user.
Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
insn = tail;
last = 0;
- while (GET_CODE (insn) == CALL_INSN
- || GET_CODE (insn) == JUMP_INSN
- || (GET_CODE (insn) == INSN
+ while (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| can_throw_internal (insn)
#endif
|| (!reload_completed
&& sets_likely_spilled (PATTERN (insn)))))
- || GET_CODE (insn) == NOTE)
+ || NOTE_P (insn))
{
- if (GET_CODE (insn) != NOTE)
+ if (!NOTE_P (insn))
{
if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
{
fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n");
for (bb = 0; bb < current_nr_blocks; bb++)
{
- if (1)
- {
- rtx head, tail;
- rtx next_tail;
- rtx insn;
-
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
- next_tail = NEXT_INSN (tail);
- fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
- BB_TO_BLOCK (bb), bb);
+ rtx head, tail;
+ rtx next_tail;
+ rtx insn;
- if (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ())
- {
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
- "insn", "code", "bb", "dep", "prio", "cost",
- "reservation");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
- "----", "----", "--", "---", "----", "----",
- "-----------");
- }
- else
- {
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "----", "----", "--", "---", "----", "----", "--------", "-----");
- }
+ get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+ next_tail = NEXT_INSN (tail);
+ fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
+ BB_TO_BLOCK (bb), bb);
+
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "insn", "code", "bb", "dep", "prio", "cost",
+ "reservation");
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "----", "----", "--", "---", "----", "----",
+ "-----------");
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ {
+ rtx link;
- for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ if (! INSN_P (insn))
{
- rtx link;
-
- if (! INSN_P (insn))
+ int n;
+ fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
+ if (NOTE_P (insn))
{
- int n;
- fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
- if (GET_CODE (insn) == NOTE)
+ n = NOTE_LINE_NUMBER (insn);
+ if (n < 0)
+ fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
+ else
{
- n = NOTE_LINE_NUMBER (insn);
- if (n < 0)
- fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
- else
- fprintf (sched_dump, "line %d, file %s\n", n,
- NOTE_SOURCE_FILE (insn));
+ expanded_location xloc;
+ NOTE_EXPANDED_LOCATION (xloc, insn);
+ fprintf (sched_dump, "line %d, file %s\n",
+ xloc.line, xloc.file);
}
- else
- fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
- continue;
- }
-
- if (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ())
- {
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0));
-
- if (recog_memoized (insn) < 0)
- fprintf (sched_dump, "nothing");
- else
- print_reservation (sched_dump, insn);
}
else
- {
- int unit = insn_unit (insn);
- int range
- = (unit < 0
- || function_units[unit].blockage_range_function == 0
- ? 0
- : function_units[unit].blockage_range_function (insn));
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0),
- (int) MIN_BLOCKAGE_COST (range),
- (int) MAX_BLOCKAGE_COST (range));
- insn_print_units (insn);
- }
-
- fprintf (sched_dump, "\t: ");
- for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
- fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
- fprintf (sched_dump, "\n");
+ fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
+ continue;
}
+
+ fprintf (sched_dump,
+ ";; %s%5d%6d%6d%6d%6d%6d ",
+ (SCHED_GROUP_P (insn) ? "+" : " "),
+ INSN_UID (insn),
+ INSN_CODE (insn),
+ INSN_BB (insn),
+ INSN_DEP_COUNT (insn),
+ INSN_PRIORITY (insn),
+ insn_cost (insn, 0, 0));
+
+ if (recog_memoized (insn) < 0)
+ fprintf (sched_dump, "nothing");
+ else
+ print_reservation (sched_dump, insn);
+
+ fprintf (sched_dump, "\t: ");
+ for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
+ fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
+ fprintf (sched_dump, "\n");
}
}
fprintf (sched_dump, "\n");
}
\f
+/* Returns true if all the basic blocks of the current region have
+ NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
+static bool
+sched_is_disabled_for_current_region_p (void)
+{
+ rtx first_bb_insn, last_bb_insn, insn;
+ int bb;
+
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ {
+ bool disable_sched = false;
+ /* Searching for NOTE_DISABLE_SCHED_OF_BLOCK note between the
+ start and end of the basic block. */
+ get_block_head_tail (BB_TO_BLOCK (bb), &first_bb_insn,
+ &last_bb_insn);
+ for (insn = last_bb_insn; insn != NULL && insn != first_bb_insn;
+ insn = PREV_INSN (insn))
+ if (GET_CODE (insn) == NOTE
+ && (NOTE_LINE_NUMBER (insn)
+ == NOTE_INSN_DISABLE_SCHED_OF_BLOCK))
+ {
+ disable_sched = true;
+ break;
+ }
+ if (! disable_sched)
+ return false;
+ }
+
+ return true;
+}
+
/* Schedule a region. A region is either an inner loop, a loop-free
subroutine, or a single basic block. Each bb in the region is
scheduled after its flow predecessors. */
current_nr_blocks = RGN_NR_BLOCKS (rgn);
current_blocks = RGN_BLOCKS (rgn);
+ /* Don't schedule region that is marked by
+ NOTE_DISABLE_SCHED_OF_BLOCK. */
+ if (sched_is_disabled_for_current_region_p ())
+ return;
+
init_deps_global ();
/* Initializations for region data dependence analysis. */
- bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
+ bb_deps = xmalloc (sizeof (struct deps) * current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb);
{
int i;
- prob = (float *) xmalloc ((current_nr_blocks) * sizeof (float));
+ prob = xmalloc ((current_nr_blocks) * sizeof (float));
dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
sbitmap_vector_zero (dom, current_nr_blocks);
/* Edge to bit. */
rgn_nr_edges = 0;
- edge_to_bit = (int *) xmalloc (nr_edges * sizeof (int));
+ edge_to_bit = xmalloc (nr_edges * sizeof (int));
for (i = 1; i < nr_edges; i++)
if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
EDGE_TO_BIT (i) = rgn_nr_edges++;
- rgn_edges = (int *) xmalloc (rgn_nr_edges * sizeof (int));
+ rgn_edges = xmalloc (rgn_nr_edges * sizeof (int));
rgn_nr_edges = 0;
for (i = 1; i < nr_edges; i++)
sched_rgn_n_insns += sched_n_insns;
/* Update target block boundaries. */
- if (head == BLOCK_HEAD (b))
- BLOCK_HEAD (b) = current_sched_info->head;
- if (tail == BLOCK_END (b))
- BLOCK_END (b) = current_sched_info->tail;
+ if (head == BB_HEAD (BASIC_BLOCK (b)))
+ BB_HEAD (BASIC_BLOCK (b)) = current_sched_info->head;
+ if (tail == BB_END (BASIC_BLOCK (b)))
+ BB_END (BASIC_BLOCK (b)) = current_sched_info->tail;
/* Clean up. */
if (current_nr_blocks > 1)
int rgn;
nr_regions = 0;
- rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
- rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
- containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
+ rgn_table = xmalloc ((n_basic_blocks) * sizeof (region));
+ rgn_bb_table = xmalloc ((n_basic_blocks) * sizeof (int));
+ block_to_bb = xmalloc ((last_basic_block) * sizeof (int));
+ containing_rgn = xmalloc ((last_basic_block) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
}
else
{
- dominance_info dom;
struct edge_list *edge_list;
/* The scheduler runs after estimate_probabilities; therefore, we
edge_list = create_edge_list ();
/* Compute the dominators and post dominators. */
- dom = calculate_dominance_info (CDI_DOMINATORS);
+ calculate_dominance_info (CDI_DOMINATORS);
/* build_control_flow will return nonzero if it detects unreachable
blocks or any other irregularity with the cfg which prevents
if (build_control_flow (edge_list) != 0)
find_single_block_region ();
else
- find_rgns (edge_list, dom);
+ find_rgns (edge_list);
if (sched_verbose >= 3)
debug_regions ();
/* For now. This will move as more and more of haifa is converted
to using the cfg code in flow.c. */
- free_dominance_info (dom);
+ free_dominance_info (CDI_DOMINATORS);
}
}
if (CHECK_DEAD_NOTES)
{
blocks = sbitmap_alloc (last_basic_block);
- deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
+ deaths_in_region = xmalloc (sizeof (int) * nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
{