/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "toplev.h"
#include "recog.h"
#include "cfglayout.h"
+#include "params.h"
#include "sched-int.h"
#include "target.h"
#define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
#define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
-#define MAX_RGN_BLOCKS 10
-#define MAX_RGN_INSNS 100
-
/* nr_inter/spec counts interblock/speculative motion for the function. */
static int nr_inter, nr_spec;
void debug_regions (void);
static void find_single_block_region (void);
-static void find_rgns (struct edge_list *, dominance_info);
-static int too_large (int, int *, int *);
+static void find_rgns (struct edge_list *);
+static bool too_large (int, int *, int *);
extern void debug_live (int, int);
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
FOR_EACH_BB (b)
- for (insn = b->head;; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
- if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
+ if (INSN_P (insn) && code != JUMP_INSN)
{
rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
if (note
- && ! (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
+ && ! (JUMP_P (NEXT_INSN (insn))
&& find_reg_note (NEXT_INSN (insn), REG_LABEL,
XEXP (note, 0))))
return 1;
}
- if (insn == b->end)
+ if (insn == BB_END (b))
break;
}
}
/* Update number of blocks and the estimate for number of insns
- in the region. Return 1 if the region is "too large" for interblock
- scheduling (compile time considerations), otherwise return 0. */
+ in the region. Return true if the region is "too large" for interblock
+ scheduling (compile time considerations). */
-static int
+static bool
too_large (int block, int *num_bbs, int *num_insns)
{
(*num_bbs)++;
- (*num_insns) += (INSN_LUID (BLOCK_END (block)) -
- INSN_LUID (BLOCK_HEAD (block)));
- if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
- return 1;
- else
- return 0;
+ (*num_insns) += (INSN_LUID (BB_END (BASIC_BLOCK (block)))
+ - INSN_LUID (BB_HEAD (BASIC_BLOCK (block))));
+
+ return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
+ || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
}
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
of edge tables. That would simplify it somewhat. */
static void
-find_rgns (struct edge_list *edge_list, dominance_info dom)
+find_rgns (struct edge_list *edge_list)
{
int *max_hdr, *dfs_nr, *stack, *degree;
char no_loops = 1;
{
/* Now verify that the block is dominated by the loop
header. */
- if (!dominated_by_p (dom, jbb, bb))
+ if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
break;
}
}
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (bb->end)
- - INSN_LUID (bb->head));
+ num_insns = (INSN_LUID (BB_END (bb))
+ - INSN_LUID (BB_HEAD (bb)));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
return 0;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return 1;
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
{
/* Check for hard registers. */
- int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ int j = hard_regno_nregs[regno][GET_MODE (reg)];
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
return;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return;
/* Global registers are always live, so the code below does not apply
{
if (regno < FIRST_PSEUDO_REGISTER)
{
- int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ int j = hard_regno_nregs[regno][GET_MODE (reg)];
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
&& IS_REACHABLE (INSN_BB (next), load_insn_bb)
&& load_insn_bb != INSN_BB (next)
&& GET_MODE (link) == VOIDmode
- && (GET_CODE (next) == JUMP_INSN
+ && (JUMP_P (next)
|| find_conditional_protection (next, load_insn_bb)))
return 1;
}
/* Must be a DEF-USE dependence upon non-branch. */
if (GET_MODE (link) != VOIDmode
- || GET_CODE (insn1) == JUMP_INSN)
+ || JUMP_P (insn1))
continue;
/* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
if (targetm.sched.adjust_priority)
INSN_PRIORITY (insn) =
- (*targetm.sched.adjust_priority) (insn, INSN_PRIORITY (insn));
+ targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn));
}
target_n_insns++;
}
if (!CANT_MOVE (insn)
&& (!IS_SPECULATIVE_INSN (insn)
- || ((((!targetm.sched.use_dfa_pipeline_interface
- || !(*targetm.sched.use_dfa_pipeline_interface) ())
- && insn_issue_delay (insn) <= 3)
- || (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ()
- && (recog_memoized (insn) < 0
- || min_insn_conflict_delay (curr_state,
- insn, insn) <= 3)))
+ || ((recog_memoized (insn) < 0
+ || min_insn_conflict_delay (curr_state,
+ insn, insn) <= 3)
&& check_live (insn, bb_src)
&& is_exception_free (insn, bb_src, target_bb))))
if (INSN_DEP_COUNT (insn) == 0)
if (targetm.sched.adjust_priority)
INSN_PRIORITY (insn) =
- (*targetm.sched.adjust_priority) (insn, INSN_PRIORITY (insn));
+ targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn));
}
}
}
static int
can_schedule_ready_p (rtx insn)
{
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
last_was_jump = 1;
/* An interblock motion? */
/* Update source block boundaries. */
b1 = BLOCK_FOR_INSN (insn);
- if (insn == b1->head && insn == b1->end)
+ if (insn == BB_HEAD (b1) && insn == BB_END (b1))
{
/* We moved all the insns in the basic block.
Emit a note after the last insn and update the
begin/end boundaries to point to the note. */
rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
- b1->head = note;
- b1->end = note;
+ BB_HEAD (b1) = note;
+ BB_END (b1) = note;
}
- else if (insn == b1->end)
+ else if (insn == BB_END (b1))
{
/* We took insns from the end of the basic block,
so update the end of block boundary so that it
points to the first insn we did not move. */
- b1->end = PREV_INSN (insn);
+ BB_END (b1) = PREV_INSN (insn);
}
- else if (insn == b1->head)
+ else if (insn == BB_HEAD (b1))
{
/* We took insns from the start of the basic block,
so update the start of block boundary so that
it points to the first insn we did not move. */
- b1->head = NEXT_INSN (insn);
+ BB_HEAD (b1) = NEXT_INSN (insn);
}
}
else
&& (!IS_VALID (INSN_BB (next))
|| CANT_MOVE (next)
|| (IS_SPECULATIVE_INSN (next)
- && (0
- || (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ()
- && recog_memoized (next) >= 0
- && min_insn_conflict_delay (curr_state, next,
- next) > 3)
- || ((!targetm.sched.use_dfa_pipeline_interface
- || !(*targetm.sched.use_dfa_pipeline_interface) ())
- && insn_issue_delay (next) > 3)
+ && ((recog_memoized (next) >= 0
+ && min_insn_conflict_delay (curr_state, next, next) > 3)
|| !check_live (next, INSN_BB (next))
|| !is_exception_free (next, INSN_BB (next), target_bb)))))
return 0;
end since moving them results in worse register allocation. Uses remain
at the end to ensure proper register allocation.
- cc0 setters remaim at the end because they can't be moved away from
+ cc0 setters remain at the end because they can't be moved away from
their cc0 user.
Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
insn = tail;
last = 0;
- while (GET_CODE (insn) == CALL_INSN
- || GET_CODE (insn) == JUMP_INSN
- || (GET_CODE (insn) == INSN
+ while (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| can_throw_internal (insn)
#endif
|| (!reload_completed
&& sets_likely_spilled (PATTERN (insn)))))
- || GET_CODE (insn) == NOTE)
+ || NOTE_P (insn))
{
- if (GET_CODE (insn) != NOTE)
+ if (!NOTE_P (insn))
{
if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
{
fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n");
for (bb = 0; bb < current_nr_blocks; bb++)
{
- if (1)
- {
- rtx head, tail;
- rtx next_tail;
- rtx insn;
-
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
- next_tail = NEXT_INSN (tail);
- fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
- BB_TO_BLOCK (bb), bb);
+ rtx head, tail;
+ rtx next_tail;
+ rtx insn;
- if (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ())
- {
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
- "insn", "code", "bb", "dep", "prio", "cost",
- "reservation");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
- "----", "----", "--", "---", "----", "----",
- "-----------");
- }
- else
- {
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "----", "----", "--", "---", "----", "----", "--------", "-----");
- }
+ get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+ next_tail = NEXT_INSN (tail);
+ fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
+ BB_TO_BLOCK (bb), bb);
+
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "insn", "code", "bb", "dep", "prio", "cost",
+ "reservation");
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "----", "----", "--", "---", "----", "----",
+ "-----------");
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ {
+ rtx link;
- for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ if (! INSN_P (insn))
{
- rtx link;
-
- if (! INSN_P (insn))
+ int n;
+ fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
+ if (NOTE_P (insn))
{
- int n;
- fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
- if (GET_CODE (insn) == NOTE)
+ n = NOTE_LINE_NUMBER (insn);
+ if (n < 0)
+ fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
+ else
{
- n = NOTE_LINE_NUMBER (insn);
- if (n < 0)
- fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
- else
- fprintf (sched_dump, "line %d, file %s\n", n,
- NOTE_SOURCE_FILE (insn));
+ expanded_location xloc;
+ NOTE_EXPANDED_LOCATION (xloc, insn);
+ fprintf (sched_dump, "line %d, file %s\n",
+ xloc.line, xloc.file);
}
- else
- fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
- continue;
- }
-
- if (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ())
- {
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0));
-
- if (recog_memoized (insn) < 0)
- fprintf (sched_dump, "nothing");
- else
- print_reservation (sched_dump, insn);
}
else
- {
- int unit = insn_unit (insn);
- int range
- = (unit < 0
- || function_units[unit].blockage_range_function == 0
- ? 0
- : function_units[unit].blockage_range_function (insn));
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0),
- (int) MIN_BLOCKAGE_COST (range),
- (int) MAX_BLOCKAGE_COST (range));
- insn_print_units (insn);
- }
-
- fprintf (sched_dump, "\t: ");
- for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
- fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
- fprintf (sched_dump, "\n");
+ fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
+ continue;
}
+
+ fprintf (sched_dump,
+ ";; %s%5d%6d%6d%6d%6d%6d ",
+ (SCHED_GROUP_P (insn) ? "+" : " "),
+ INSN_UID (insn),
+ INSN_CODE (insn),
+ INSN_BB (insn),
+ INSN_DEP_COUNT (insn),
+ INSN_PRIORITY (insn),
+ insn_cost (insn, 0, 0));
+
+ if (recog_memoized (insn) < 0)
+ fprintf (sched_dump, "nothing");
+ else
+ print_reservation (sched_dump, insn);
+
+ fprintf (sched_dump, "\t: ");
+ for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
+ fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
+ fprintf (sched_dump, "\n");
}
}
fprintf (sched_dump, "\n");
sched_rgn_n_insns += sched_n_insns;
/* Update target block boundaries. */
- if (head == BLOCK_HEAD (b))
- BLOCK_HEAD (b) = current_sched_info->head;
- if (tail == BLOCK_END (b))
- BLOCK_END (b) = current_sched_info->tail;
+ if (head == BB_HEAD (BASIC_BLOCK (b)))
+ BB_HEAD (BASIC_BLOCK (b)) = current_sched_info->head;
+ if (tail == BB_END (BASIC_BLOCK (b)))
+ BB_END (BASIC_BLOCK (b)) = current_sched_info->tail;
/* Clean up. */
if (current_nr_blocks > 1)
}
else
{
- dominance_info dom;
struct edge_list *edge_list;
/* The scheduler runs after estimate_probabilities; therefore, we
edge_list = create_edge_list ();
/* Compute the dominators and post dominators. */
- dom = calculate_dominance_info (CDI_DOMINATORS);
+ calculate_dominance_info (CDI_DOMINATORS);
/* build_control_flow will return nonzero if it detects unreachable
blocks or any other irregularity with the cfg which prevents
if (build_control_flow (edge_list) != 0)
find_single_block_region ();
else
- find_rgns (edge_list, dom);
+ find_rgns (edge_list);
if (sched_verbose >= 3)
debug_regions ();
/* For now. This will move as more and more of haifa is converted
to using the cfg code in flow.c. */
- free_dominance_info (dom);
+ free_dominance_info (CDI_DOMINATORS);
}
}