/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "toplev.h"
#include "recog.h"
#include "cfglayout.h"
+#include "params.h"
#include "sched-int.h"
#include "target.h"
#define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
#define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
-#define MAX_RGN_BLOCKS 10
-#define MAX_RGN_INSNS 100
-
/* nr_inter/spec counts interblock/speculative motion for the function. */
static int nr_inter, nr_spec;
void debug_regions (void);
static void find_single_block_region (void);
-static void find_rgns (struct edge_list *, dominance_info);
-static int too_large (int, int *, int *);
+static void find_rgns (struct edge_list *);
+static bool too_large (int, int *, int *);
extern void debug_live (int, int);
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
FOR_EACH_BB (b)
- for (insn = b->head;; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
- if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
+ if (INSN_P (insn) && code != JUMP_INSN)
{
rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
return 1;
}
- if (insn == b->end)
+ if (insn == BB_END (b))
break;
}
}
/* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
- out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
- edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
+ in_edges = xcalloc (last_basic_block, sizeof (int));
+ out_edges = xcalloc (last_basic_block, sizeof (int));
+ edge_table = xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
for (i = 0; i < num_edges; i++)
}
/* Update number of blocks and the estimate for number of insns
- in the region. Return 1 if the region is "too large" for interblock
- scheduling (compile time considerations), otherwise return 0. */
+ in the region. Return true if the region is "too large" for interblock
+ scheduling (compile time considerations). */
-static int
+static bool
too_large (int block, int *num_bbs, int *num_insns)
{
(*num_bbs)++;
- (*num_insns) += (INSN_LUID (BLOCK_END (block)) -
- INSN_LUID (BLOCK_HEAD (block)));
- if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
- return 1;
- else
- return 0;
+ (*num_insns) += (INSN_LUID (BB_END (BASIC_BLOCK (block)))
+ - INSN_LUID (BB_HEAD (BASIC_BLOCK (block))));
+
+ return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
+ || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
}
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
of edge tables. That would simplify it somewhat. */
static void
-find_rgns (struct edge_list *edge_list, dominance_info dom)
+find_rgns (struct edge_list *edge_list)
{
int *max_hdr, *dfs_nr, *stack, *degree;
char no_loops = 1;
int node, child, loop_head, i, head, tail;
- int count = 0, sp, idx = 0, current_edge = out_edges[0];
+ int count = 0, sp, idx = 0;
+ int current_edge = out_edges[ENTRY_BLOCK_PTR->succ->dest->index];
int num_bbs, num_insns, unreachable;
int too_large_failure;
basic_block bb;
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
- dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
- stack = (int *) xmalloc (nr_edges * sizeof (int));
+ max_hdr = xmalloc (last_basic_block * sizeof (int));
+ dfs_nr = xcalloc (last_basic_block, sizeof (int));
+ stack = xmalloc (nr_edges * sizeof (int));
inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
/* Second traversal:find reducible inner loops and topologically sort
block of each region. */
- queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
+ queue = xmalloc (n_basic_blocks * sizeof (int));
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
{
/* Now verify that the block is dominated by the loop
header. */
- if (!dominated_by_p (dom, jbb, bb))
+ if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
break;
}
}
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (bb->end)
- - INSN_LUID (bb->head));
+ num_insns = (INSN_LUID (BB_END (bb))
+ - INSN_LUID (BB_HEAD (bb)));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
static void
split_edges (int bb_src, int bb_trg, edgelst *bl)
{
- sbitmap src = (edgeset) sbitmap_alloc (pot_split[bb_src]->n_bits);
+ sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits);
sbitmap_copy (src, pot_split[bb_src]);
sbitmap_difference (src, src, pot_split[bb_trg]);
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (last_basic_block);
+ update_blocks = alloca (last_basic_block);
memset (update_blocks, 0, last_basic_block);
update_idx = 0;
if (regno < FIRST_PSEUDO_REGISTER)
{
/* Check for hard registers. */
- int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ int j = hard_regno_nregs[regno][GET_MODE (reg)];
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
}
else
{
- /* Check for psuedo registers. */
+ /* Check for pseudo registers. */
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
int b = candidate_table[src].split_bbs.first_member[i];
{
if (regno < FIRST_PSEUDO_REGISTER)
{
- int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ int j = hard_regno_nregs[regno][GET_MODE (reg)];
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
/* Prepare current target block info. */
if (current_nr_blocks > 1)
{
- candidate_table = (candidate *) xmalloc (current_nr_blocks
- * sizeof (candidate));
+ candidate_table = xmalloc (current_nr_blocks * sizeof (candidate));
bblst_last = 0;
/* bblst_table holds split blocks and update blocks for each block after
the TO blocks of region edges, so there can be at most rgn_nr_edges
of them. */
bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
- bblst_table = (int *) xmalloc (bblst_size * sizeof (int));
+ bblst_table = xmalloc (bblst_size * sizeof (int));
bitlst_table_last = 0;
- bitlst_table = (int *) xmalloc (rgn_nr_edges * sizeof (int));
+ bitlst_table = xmalloc (rgn_nr_edges * sizeof (int));
compute_trg_info (target_bb);
}
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
if (INSN_DEP_COUNT (insn) == 0)
- ready_add (ready, insn);
+ {
+ ready_add (ready, insn);
+
+ if (targetm.sched.adjust_priority)
+ INSN_PRIORITY (insn) =
+ targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn));
+ }
target_n_insns++;
}
if (!CANT_MOVE (insn)
&& (!IS_SPECULATIVE_INSN (insn)
|| ((((!targetm.sched.use_dfa_pipeline_interface
- || !(*targetm.sched.use_dfa_pipeline_interface) ())
+ || !targetm.sched.use_dfa_pipeline_interface ())
&& insn_issue_delay (insn) <= 3)
|| (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ()
+ && targetm.sched.use_dfa_pipeline_interface ()
&& (recog_memoized (insn) < 0
|| min_insn_conflict_delay (curr_state,
insn, insn) <= 3)))
&& check_live (insn, bb_src)
&& is_exception_free (insn, bb_src, target_bb))))
if (INSN_DEP_COUNT (insn) == 0)
- ready_add (ready, insn);
+ {
+ ready_add (ready, insn);
+
+ if (targetm.sched.adjust_priority)
+ INSN_PRIORITY (insn) =
+ targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn));
+ }
}
}
}
/* Update source block boundaries. */
b1 = BLOCK_FOR_INSN (insn);
- if (insn == b1->head && insn == b1->end)
+ if (insn == BB_HEAD (b1) && insn == BB_END (b1))
{
/* We moved all the insns in the basic block.
Emit a note after the last insn and update the
begin/end boundaries to point to the note. */
rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
- b1->head = note;
- b1->end = note;
+ BB_HEAD (b1) = note;
+ BB_END (b1) = note;
}
- else if (insn == b1->end)
+ else if (insn == BB_END (b1))
{
/* We took insns from the end of the basic block,
so update the end of block boundary so that it
points to the first insn we did not move. */
- b1->end = PREV_INSN (insn);
+ BB_END (b1) = PREV_INSN (insn);
}
- else if (insn == b1->head)
+ else if (insn == BB_HEAD (b1))
{
/* We took insns from the start of the basic block,
so update the start of block boundary so that
it points to the first insn we did not move. */
- b1->head = NEXT_INSN (insn);
+ BB_HEAD (b1) = NEXT_INSN (insn);
}
}
else
|| (IS_SPECULATIVE_INSN (next)
&& (0
|| (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ()
+ && targetm.sched.use_dfa_pipeline_interface ()
&& recog_memoized (next) >= 0
&& min_insn_conflict_delay (curr_state, next,
next) > 3)
|| ((!targetm.sched.use_dfa_pipeline_interface
- || !(*targetm.sched.use_dfa_pipeline_interface) ())
+ || !targetm.sched.use_dfa_pipeline_interface ())
&& insn_issue_delay (next) > 3)
|| !check_live (next, INSN_BB (next))
|| !is_exception_free (next, INSN_BB (next), target_bb)))))
NULL, NULL,
NULL, NULL,
- 0, 0
+ 0, 0, 0
};
/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
end since moving them results in worse register allocation. Uses remain
at the end to ensure proper register allocation.
- cc0 setters remaim at the end because they can't be moved away from
+ cc0 setters remain at the end because they can't be moved away from
their cc0 user.
Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
BB_TO_BLOCK (bb), bb);
if (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ())
+ && targetm.sched.use_dfa_pipeline_interface ())
{
fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
"insn", "code", "bb", "dep", "prio", "cost",
}
if (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ())
+ && targetm.sched.use_dfa_pipeline_interface ())
{
fprintf (sched_dump,
";; %s%5d%6d%6d%6d%6d%6d ",
init_deps_global ();
/* Initializations for region data dependence analysis. */
- bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
+ bb_deps = xmalloc (sizeof (struct deps) * current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb);
{
int i;
- prob = (float *) xmalloc ((current_nr_blocks) * sizeof (float));
+ prob = xmalloc ((current_nr_blocks) * sizeof (float));
dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
sbitmap_vector_zero (dom, current_nr_blocks);
/* Edge to bit. */
rgn_nr_edges = 0;
- edge_to_bit = (int *) xmalloc (nr_edges * sizeof (int));
+ edge_to_bit = xmalloc (nr_edges * sizeof (int));
for (i = 1; i < nr_edges; i++)
if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
EDGE_TO_BIT (i) = rgn_nr_edges++;
- rgn_edges = (int *) xmalloc (rgn_nr_edges * sizeof (int));
+ rgn_edges = xmalloc (rgn_nr_edges * sizeof (int));
rgn_nr_edges = 0;
for (i = 1; i < nr_edges; i++)
sched_rgn_n_insns += sched_n_insns;
/* Update target block boundaries. */
- if (head == BLOCK_HEAD (b))
- BLOCK_HEAD (b) = current_sched_info->head;
- if (tail == BLOCK_END (b))
- BLOCK_END (b) = current_sched_info->tail;
+ if (head == BB_HEAD (BASIC_BLOCK (b)))
+ BB_HEAD (BASIC_BLOCK (b)) = current_sched_info->head;
+ if (tail == BB_END (BASIC_BLOCK (b)))
+ BB_END (BASIC_BLOCK (b)) = current_sched_info->tail;
/* Clean up. */
if (current_nr_blocks > 1)
int rgn;
nr_regions = 0;
- rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
- rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
- containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
+ rgn_table = xmalloc ((n_basic_blocks) * sizeof (region));
+ rgn_bb_table = xmalloc ((n_basic_blocks) * sizeof (int));
+ block_to_bb = xmalloc ((last_basic_block) * sizeof (int));
+ containing_rgn = xmalloc ((last_basic_block) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
}
else
{
- dominance_info dom;
struct edge_list *edge_list;
/* The scheduler runs after estimate_probabilities; therefore, we
edge_list = create_edge_list ();
/* Compute the dominators and post dominators. */
- dom = calculate_dominance_info (CDI_DOMINATORS);
+ calculate_dominance_info (CDI_DOMINATORS);
/* build_control_flow will return nonzero if it detects unreachable
blocks or any other irregularity with the cfg which prevents
if (build_control_flow (edge_list) != 0)
find_single_block_region ();
else
- find_rgns (edge_list, dom);
+ find_rgns (edge_list);
if (sched_verbose >= 3)
debug_regions ();
/* For now. This will move as more and more of haifa is converted
to using the cfg code in flow.c. */
- free_dominance_info (dom);
+ free_dominance_info (CDI_DOMINATORS);
}
}
if (CHECK_DEAD_NOTES)
{
blocks = sbitmap_alloc (last_basic_block);
- deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
+ deaths_in_region = xmalloc (sizeof (int) * nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
{