/* Instruction scheduling pass.
- Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
+ Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
/* This pass implements list scheduling within basic blocks. It is
run twice: (1) after flow analysis, but before register allocation,
#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
-#include "basic-block.h"
#include "regs.h"
#include "function.h"
#include "flags.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
-#include "toplev.h"
#include "recog.h"
#include "cfglayout.h"
+#include "params.h"
#include "sched-int.h"
+#include "sel-sched.h"
#include "target.h"
-
-/* Define when we want to do count REG_DEAD notes before and after scheduling
- for sanity checking. We can't do that when conditional execution is used,
- as REG_DEAD exist only for unconditional deaths. */
-
-#if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
-#define CHECK_DEAD_NOTES 1
-#else
-#define CHECK_DEAD_NOTES 0
-#endif
-
+#include "timevar.h"
+#include "tree-pass.h"
+#include "dbgcnt.h"
#ifdef INSN_SCHEDULING
-/* Some accessor macros for h_i_d members only used within this file. */
-#define INSN_REF_COUNT(INSN) (h_i_d[INSN_UID (INSN)].ref_count)
-#define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
-#define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
-#define MAX_RGN_BLOCKS 10
-#define MAX_RGN_INSNS 100
+/* Some accessor macros for h_i_d members only used within this file. */
+#define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
+#define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
/* nr_inter/spec counts interblock/speculative motion for the function. */
static int nr_inter, nr_spec;
-/* Control flow graph edges are kept in circular lists. */
-typedef struct
-{
- int from_block;
- int to_block;
- int next_in;
- int next_out;
-}
-haifa_edge;
-static haifa_edge *edge_table;
-
-#define NEXT_IN(edge) (edge_table[edge].next_in)
-#define NEXT_OUT(edge) (edge_table[edge].next_out)
-#define FROM_BLOCK(edge) (edge_table[edge].from_block)
-#define TO_BLOCK(edge) (edge_table[edge].to_block)
-
-/* Number of edges in the control flow graph. (In fact, larger than
- that by 1, since edge 0 is unused.) */
-static int nr_edges;
-
-/* Circular list of incoming/outgoing edges of a block. */
-static int *in_edges;
-static int *out_edges;
-
-#define IN_EDGES(block) (in_edges[block])
-#define OUT_EDGES(block) (out_edges[block])
-
static int is_cfg_nonregular (void);
-static int build_control_flow (struct edge_list *);
-static void new_edge (int, int);
-
-/* A region is the main entity for interblock scheduling: insns
- are allowed to move between blocks in the same region, along
- control flow graph edges, in the 'up' direction. */
-typedef struct
-{
- int rgn_nr_blocks; /* Number of blocks in region. */
- int rgn_blocks; /* cblocks in the region (actually index in rgn_bb_table). */
-}
-region;
/* Number of regions in the procedure. */
-static int nr_regions;
+int nr_regions = 0;
/* Table of region descriptions. */
-static region *rgn_table;
+region *rgn_table = NULL;
/* Array of lists of regions' blocks. */
-static int *rgn_bb_table;
+int *rgn_bb_table = NULL;
/* Topological order of blocks in the region (if b2 is reachable from
b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
always referred to by either block or b, while its topological
order name (in the region) is referred to by bb. */
-static int *block_to_bb;
+int *block_to_bb = NULL;
/* The number of the region containing a block. */
-static int *containing_rgn;
+int *containing_rgn = NULL;
-#define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
-#define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
-#define BLOCK_TO_BB(block) (block_to_bb[block])
-#define CONTAINING_RGN(block) (containing_rgn[block])
+/* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
+ Currently we can get a ebb only through splitting of currently
+ scheduling block, therefore, we don't need ebb_head array for every region,
+ hence, its sufficient to hold it for current one only. */
+int *ebb_head = NULL;
-void debug_regions (void);
-static void find_single_block_region (void);
-static void find_rgns (struct edge_list *);
-static int too_large (int, int *, int *);
+/* The minimum probability of reaching a source block so that it will be
+ considered for speculative scheduling. */
+static int min_spec_prob;
-extern void debug_live (int, int);
+static void find_single_block_region (bool);
+static void find_rgns (void);
+static bool too_large (int, int *, int *);
/* Blocks of the current region being scheduled. */
-static int current_nr_blocks;
-static int current_blocks;
-
-/* The mapping from bb to block. */
-#define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
-
-typedef struct
-{
- int *first_member; /* Pointer to the list start in bitlst_table. */
- int nr_members; /* The number of members of the bit list. */
-}
-bitlst;
+int current_nr_blocks;
+int current_blocks;
-static int bitlst_table_last;
-static int *bitlst_table;
+/* A speculative motion requires checking live information on the path
+ from 'source' to 'target'. The split blocks are those to be checked.
+ After a speculative motion, live information should be modified in
+ the 'update' blocks.
-static void extract_bitlst (sbitmap, bitlst *);
+ Lists of split and update blocks for each candidate of the current
+ target are in array bblst_table. */
+static basic_block *bblst_table;
+static int bblst_size, bblst_last;
/* Target info declarations.
while other blocks in the region from which insns can be moved to the
target are called "source" blocks. The candidate structure holds info
about such sources: are they valid? Speculative? Etc. */
-typedef bitlst bblst;
+typedef struct
+{
+ basic_block *first_member;
+ int nr_members;
+}
+bblst;
+
typedef struct
{
char is_valid;
candidate;
static candidate *candidate_table;
-
-/* A speculative motion requires checking live information on the path
- from 'source' to 'target'. The split blocks are those to be checked.
- After a speculative motion, live information should be modified in
- the 'update' blocks.
-
- Lists of split and update blocks for each candidate of the current
- target are in array bblst_table. */
-static int *bblst_table, bblst_size, bblst_last;
-
-#define IS_VALID(src) ( candidate_table[src].is_valid )
-#define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
+#define IS_VALID(src) (candidate_table[src].is_valid)
+#define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
+#define IS_SPECULATIVE_INSN(INSN) \
+ (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define SRC_PROB(src) ( candidate_table[src].src_prob )
/* The bb being currently scheduled. */
-static int target_bb;
+int target_bb;
/* List of edges. */
-typedef bitlst edgelst;
+typedef struct
+{
+ edge *first_member;
+ int nr_members;
+}
+edgelst;
+
+static edge *edgelst_table;
+static int edgelst_last;
+
+static void extract_edgelst (sbitmap, edgelst *);
/* Target info functions. */
static void split_edges (int, int, edgelst *);
#define IS_DOMINATED(bb_src, bb_trg) \
( TEST_BIT (dom[bb_src], bb_trg) )
-/* Probability: Prob[i] is a float in [0, 1] which is the probability
- of bb i relative to the region entry. */
-static float *prob;
-
-/* The probability of bb_src, relative to bb_trg. Note, that while the
- 'prob[bb]' is a float in [0, 1], this macro returns an integer
- in [0, 100]. */
-#define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
- prob[bb_trg])))
+/* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
+ the probability of bb i relative to the region entry. */
+static int *prob;
/* Bit-set of edges, where bit i stands for edge i. */
typedef sbitmap edgeset;
static int rgn_nr_edges;
/* Array of size rgn_nr_edges. */
-static int *rgn_edges;
-
+static edge *rgn_edges;
/* Mapping from each edge in the graph to its number in the rgn. */
-static int *edge_to_bit;
-#define EDGE_TO_BIT(edge) (edge_to_bit[edge])
+#define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
+#define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
/* The split edges of a source bb is different for each target
bb. In order to compute this efficiently, the 'potential-split edges'
/* For every bb, a set of its ancestor edges. */
static edgeset *ancestor_edges;
-static void compute_dom_prob_ps (int);
-
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
-#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
-#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
-
-/* Parameters affecting the decision of rank_for_schedule().
- ??? Nope. But MIN_PROBABILITY is used in compute_trg_info. */
-#define MIN_PROBABILITY 40
/* Speculative scheduling functions. */
static int check_live_1 (int, rtx);
static void update_live_1 (int, rtx);
-static int check_live (rtx, int);
-static void update_live (rtx, int);
-static void set_spec_fed (rtx);
static int is_pfree (rtx, int, int);
static int find_conditional_protection (rtx, int);
static int is_conditionally_protected (rtx, int, int);
static int is_exception_free (rtx, int, int);
static bool sets_likely_spilled (rtx);
-static void sets_likely_spilled_1 (rtx, rtx, void *);
+static void sets_likely_spilled_1 (rtx, const_rtx, void *);
static void add_branch_dependences (rtx, rtx);
-static void compute_block_backward_dependences (int);
-void debug_dependencies (void);
+static void compute_block_dependences (int);
-static void init_regions (void);
static void schedule_region (int);
static rtx concat_INSN_LIST (rtx, rtx);
static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
-static void propagate_deps (int, struct deps *);
+static void propagate_deps (int, struct deps_desc *);
static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
/* Return 1 if control flow graph should not be constructed, 0 otherwise.
We decide not to build the control flow graph if there is possibly more
- than one entry to the function, if computed branches exist, of if we
- have nonlocal gotos. */
+ than one entry to the function, if computed branches exist, if we
+ have nonlocal gotos, or if we have an unreachable loop. */
static int
is_cfg_nonregular (void)
{
basic_block b;
rtx insn;
- RTX_CODE code;
/* If we have a label that could be the target of a nonlocal goto, then
the cfg is not well structured. */
if (forced_labels)
return 1;
- /* If this function has a computed jump, then we consider the cfg
- not well structured. */
- if (current_function_has_computed_jump)
- return 1;
-
/* If we have exception handlers, then we consider the cfg not well
- structured. ?!? We should be able to handle this now that flow.c
- computes an accurate cfg for EH. */
+ structured. ?!? We should be able to handle this now that we
+ compute an accurate cfg for EH. */
if (current_function_has_exception_handlers ())
return 1;
- /* If we have non-jumping insns which refer to labels, then we consider
- the cfg not well structured. */
- /* Check for labels referred to other thn by jumps. */
+ /* If we have insns which refer to labels as non-jumped-to operands,
+ then we consider the cfg not well structured. */
FOR_EACH_BB (b)
- for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (b, insn)
{
- code = GET_CODE (insn);
- if (INSN_P (insn) && code != JUMP_INSN)
- {
- rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
-
- if (note
- && ! (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
- && find_reg_note (NEXT_INSN (insn), REG_LABEL,
- XEXP (note, 0))))
- return 1;
- }
-
- if (insn == BB_END (b))
- break;
- }
-
- /* All the tests passed. Consider the cfg well structured. */
- return 0;
-}
-
-/* Build the control flow graph and set nr_edges.
+ rtx note, next, set, dest;
- Instead of trying to build a cfg ourselves, we rely on flow to
- do it for us. Stamp out useless code (and bug) duplication.
+ /* If this function has a computed jump, then we consider the cfg
+ not well structured. */
+ if (JUMP_P (insn) && computed_jump_p (insn))
+ return 1;
- Return nonzero if an irregularity in the cfg is found which would
- prevent cross block scheduling. */
+ if (!INSN_P (insn))
+ continue;
-static int
-build_control_flow (struct edge_list *edge_list)
-{
- int i, unreachable, num_edges;
- basic_block b;
+ note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
+ if (note == NULL_RTX)
+ continue;
- /* This already accounts for entry/exit edges. */
- num_edges = NUM_EDGES (edge_list);
+ /* For that label not to be seen as a referred-to label, this
+ must be a single-set which is feeding a jump *only*. This
+ could be a conditional jump with the label split off for
+ machine-specific reasons or a casesi/tablejump. */
+ next = next_nonnote_insn (insn);
+ if (next == NULL_RTX
+ || !JUMP_P (next)
+ || (JUMP_LABEL (next) != XEXP (note, 0)
+ && find_reg_note (next, REG_LABEL_TARGET,
+ XEXP (note, 0)) == NULL_RTX)
+ || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
+ return 1;
+
+ set = single_set (insn);
+ if (set == NULL_RTX)
+ return 1;
+
+ dest = SET_DEST (set);
+ if (!REG_P (dest) || !dead_or_set_p (next, dest))
+ return 1;
+ }
/* Unreachable loops with more than one basic block are detected
during the DFS traversal in find_rgns.
Unreachable loops with a single block are detected here. This
test is redundant with the one in find_rgns, but it's much
- cheaper to go ahead and catch the trivial case here. */
- unreachable = 0;
+ cheaper to go ahead and catch the trivial case here. */
FOR_EACH_BB (b)
{
- if (b->pred == NULL
- || (b->pred->src == b
- && b->pred->pred_next == NULL))
- unreachable = 1;
+ if (EDGE_COUNT (b->preds) == 0
+ || (single_pred_p (b)
+ && single_pred (b) == b))
+ return 1;
}
- /* ??? We can kill these soon. */
- in_edges = xcalloc (last_basic_block, sizeof (int));
- out_edges = xcalloc (last_basic_block, sizeof (int));
- edge_table = xcalloc (num_edges, sizeof (haifa_edge));
+ /* All the tests passed. Consider the cfg well structured. */
+ return 0;
+}
+
+/* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
- nr_edges = 0;
- for (i = 0; i < num_edges; i++)
- {
- edge e = INDEX_EDGE (edge_list, i);
+static void
+extract_edgelst (sbitmap set, edgelst *el)
+{
+ unsigned int i = 0;
+ sbitmap_iterator sbi;
- if (e->dest != EXIT_BLOCK_PTR
- && e->src != ENTRY_BLOCK_PTR)
- new_edge (e->src->index, e->dest->index);
- }
+ /* edgelst table space is reused in each call to extract_edgelst. */
+ edgelst_last = 0;
- /* Increment by 1, since edge 0 is unused. */
- nr_edges++;
+ el->first_member = &edgelst_table[edgelst_last];
+ el->nr_members = 0;
- return unreachable;
+ /* Iterate over each word in the bitset. */
+ EXECUTE_IF_SET_IN_SBITMAP (set, 0, i, sbi)
+ {
+ edgelst_table[edgelst_last++] = rgn_edges[i];
+ el->nr_members++;
+ }
}
-/* Record an edge in the control flow graph from SOURCE to TARGET.
+/* Functions for the construction of regions. */
- In theory, this is redundant with the s_succs computed above, but
- we have not converted all of haifa to use information from the
- integer lists. */
+/* Print the regions, for debugging purposes. Callable from debugger. */
-static void
-new_edge (int source, int target)
+DEBUG_FUNCTION void
+debug_regions (void)
{
- int e, next_edge;
- int curr_edge, fst_edge;
+ int rgn, bb;
- /* Check for duplicates. */
- fst_edge = curr_edge = OUT_EDGES (source);
- while (curr_edge)
+ fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
+ for (rgn = 0; rgn < nr_regions; rgn++)
{
- if (FROM_BLOCK (curr_edge) == source
- && TO_BLOCK (curr_edge) == target)
- {
- return;
- }
+ fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
+ rgn_table[rgn].rgn_nr_blocks);
+ fprintf (sched_dump, ";;\tbb/block: ");
- curr_edge = NEXT_OUT (curr_edge);
+ /* We don't have ebb_head initialized yet, so we can't use
+ BB_TO_BLOCK (). */
+ current_blocks = RGN_BLOCKS (rgn);
- if (fst_edge == curr_edge)
- break;
+ for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
+ fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
+
+ fprintf (sched_dump, "\n\n");
}
+}
- e = ++nr_edges;
+/* Print the region's basic blocks. */
- FROM_BLOCK (e) = source;
- TO_BLOCK (e) = target;
+DEBUG_FUNCTION void
+debug_region (int rgn)
+{
+ int bb;
- if (OUT_EDGES (source))
- {
- next_edge = NEXT_OUT (OUT_EDGES (source));
- NEXT_OUT (OUT_EDGES (source)) = e;
- NEXT_OUT (e) = next_edge;
- }
- else
- {
- OUT_EDGES (source) = e;
- NEXT_OUT (e) = e;
- }
+ fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn);
+ fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
+ rgn_table[rgn].rgn_nr_blocks);
+ fprintf (stderr, ";;\tbb/block: ");
- if (IN_EDGES (target))
- {
- next_edge = NEXT_IN (IN_EDGES (target));
- NEXT_IN (IN_EDGES (target)) = e;
- NEXT_IN (e) = next_edge;
- }
- else
+ /* We don't have ebb_head initialized yet, so we can't use
+ BB_TO_BLOCK (). */
+ current_blocks = RGN_BLOCKS (rgn);
+
+ for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
+ fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
+
+ fprintf (stderr, "\n\n");
+
+ for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
{
- IN_EDGES (target) = e;
- NEXT_IN (e) = e;
+ debug_bb_n_slim (rgn_bb_table[current_blocks + bb]);
+ fprintf (stderr, "\n");
}
-}
-/* Translate a bit-set SET to a list BL of the bit-set members. */
+ fprintf (stderr, "\n");
-static void
-extract_bitlst (sbitmap set, bitlst *bl)
+}
+
+/* True when a bb with index BB_INDEX contained in region RGN. */
+static bool
+bb_in_region_p (int bb_index, int rgn)
{
int i;
- /* bblst table space is reused in each call to extract_bitlst. */
- bitlst_table_last = 0;
-
- bl->first_member = &bitlst_table[bitlst_table_last];
- bl->nr_members = 0;
-
- /* Iterate over each word in the bitset. */
- EXECUTE_IF_SET_IN_SBITMAP (set, 0, i,
- {
- bitlst_table[bitlst_table_last++] = i;
- (bl->nr_members)++;
- });
+ for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
+ if (rgn_bb_table[current_blocks + i] == bb_index)
+ return true;
+ return false;
}
-/* Functions for the construction of regions. */
-
-/* Print the regions, for debugging purposes. Callable from debugger. */
-
+/* Dump region RGN to file F using dot syntax. */
void
-debug_regions (void)
+dump_region_dot (FILE *f, int rgn)
{
- int rgn, bb;
-
- fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
- for (rgn = 0; rgn < nr_regions; rgn++)
- {
- fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
- rgn_table[rgn].rgn_nr_blocks);
- fprintf (sched_dump, ";;\tbb/block: ");
-
- for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
- {
- current_blocks = RGN_BLOCKS (rgn);
+ int i;
- if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
- abort ();
+ fprintf (f, "digraph Region_%d {\n", rgn);
- fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
- }
+ /* We don't have ebb_head initialized yet, so we can't use
+ BB_TO_BLOCK (). */
+ current_blocks = RGN_BLOCKS (rgn);
- fprintf (sched_dump, "\n\n");
+ for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
+ {
+ edge e;
+ edge_iterator ei;
+ int src_bb_num = rgn_bb_table[current_blocks + i];
+ struct basic_block_def *bb = BASIC_BLOCK (src_bb_num);
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (bb_in_region_p (e->dest->index, rgn))
+ fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
}
+ fprintf (f, "}\n");
+}
+
+/* The same, but first open a file specified by FNAME. */
+void
+dump_region_dot_file (const char *fname, int rgn)
+{
+ FILE *f = fopen (fname, "wt");
+ dump_region_dot (f, rgn);
+ fclose (f);
}
/* Build a single block region for each basic block in the function.
scheduling. */
static void
-find_single_block_region (void)
+find_single_block_region (bool ebbs_p)
{
- basic_block bb;
+ basic_block bb, ebb_start;
+ int i = 0;
nr_regions = 0;
- FOR_EACH_BB (bb)
+ if (ebbs_p) {
+ int probability_cutoff;
+ if (profile_info && flag_branch_probabilities)
+ probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+ else
+ probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+ probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
+
+ FOR_EACH_BB (ebb_start)
+ {
+ RGN_NR_BLOCKS (nr_regions) = 0;
+ RGN_BLOCKS (nr_regions) = i;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+
+ for (bb = ebb_start; ; bb = bb->next_bb)
+ {
+ edge e;
+
+ rgn_bb_table[i] = bb->index;
+ RGN_NR_BLOCKS (nr_regions)++;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
+ i++;
+
+ if (bb->next_bb == EXIT_BLOCK_PTR
+ || LABEL_P (BB_HEAD (bb->next_bb)))
+ break;
+
+ e = find_fallthru_edge (bb->succs);
+ if (! e)
+ break;
+ if (e->probability <= probability_cutoff)
+ break;
+ }
+
+ ebb_start = bb;
+ nr_regions++;
+ }
+ }
+ else
+ FOR_EACH_BB (bb)
+ {
+ rgn_bb_table[nr_regions] = bb->index;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_BLOCKS (nr_regions) = nr_regions;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = 0;
+ nr_regions++;
+ }
+}
+
+/* Estimate number of the insns in the BB. */
+static int
+rgn_estimate_number_of_insns (basic_block bb)
+{
+ int count;
+
+ count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
+
+ if (MAY_HAVE_DEBUG_INSNS)
{
- rgn_bb_table[nr_regions] = bb->index;
- RGN_NR_BLOCKS (nr_regions) = 1;
- RGN_BLOCKS (nr_regions) = nr_regions;
- CONTAINING_RGN (bb->index) = nr_regions;
- BLOCK_TO_BB (bb->index) = 0;
- nr_regions++;
+ rtx insn;
+
+ FOR_BB_INSNS (bb, insn)
+ if (DEBUG_INSN_P (insn))
+ count--;
}
+
+ return count;
}
/* Update number of blocks and the estimate for number of insns
- in the region. Return 1 if the region is "too large" for interblock
- scheduling (compile time considerations), otherwise return 0. */
+ in the region. Return true if the region is "too large" for interblock
+ scheduling (compile time considerations). */
-static int
+static bool
too_large (int block, int *num_bbs, int *num_insns)
{
(*num_bbs)++;
- (*num_insns) += (INSN_LUID (BB_END (BASIC_BLOCK (block))) -
- INSN_LUID (BB_HEAD (BASIC_BLOCK (block))));
- if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
- return 1;
- else
- return 0;
+ (*num_insns) += (common_sched_info->estimate_number_of_insns
+ (BASIC_BLOCK (block)));
+
+ return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
+ || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
}
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
of edge tables. That would simplify it somewhat. */
static void
-find_rgns (struct edge_list *edge_list)
+haifa_find_rgns (void)
{
- int *max_hdr, *dfs_nr, *stack, *degree;
+ int *max_hdr, *dfs_nr, *degree;
char no_loops = 1;
int node, child, loop_head, i, head, tail;
int count = 0, sp, idx = 0;
- int current_edge = out_edges[ENTRY_BLOCK_PTR->succ->dest->index];
+ edge_iterator current_edge;
+ edge_iterator *stack;
int num_bbs, num_insns, unreachable;
int too_large_failure;
basic_block bb;
- /* Note if an edge has been passed. */
- sbitmap passed;
-
/* Note if a block is a natural loop header. */
sbitmap header;
/* Note if a block is in the block queue. */
sbitmap in_stack;
- int num_edges = NUM_EDGES (edge_list);
-
/* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
and a mapping from block to its loop header (if the block is contained
in a loop, else -1).
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = xmalloc (last_basic_block * sizeof (int));
- dfs_nr = xcalloc (last_basic_block, sizeof (int));
- stack = xmalloc (nr_edges * sizeof (int));
+ max_hdr = XNEWVEC (int, last_basic_block);
+ dfs_nr = XCNEWVEC (int, last_basic_block);
+ stack = XNEWVEC (edge_iterator, n_edges);
inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
header = sbitmap_alloc (last_basic_block);
sbitmap_zero (header);
- passed = sbitmap_alloc (nr_edges);
- sbitmap_zero (passed);
-
in_queue = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_queue);
for (i = 0; i < last_basic_block; i++)
max_hdr[i] = -1;
+ #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
+ #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
+
/* DFS traversal to find inner loops in the cfg. */
+ current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR)->succs);
sp = -1;
+
while (1)
{
- if (current_edge == 0 || TEST_BIT (passed, current_edge))
+ if (EDGE_PASSED (current_edge))
{
/* We have reached a leaf node or a node that was already
processed. Pop edges off the stack until we find
an edge that has not yet been processed. */
- while (sp >= 0
- && (current_edge == 0 || TEST_BIT (passed, current_edge)))
+ while (sp >= 0 && EDGE_PASSED (current_edge))
{
/* Pop entry off the stack. */
current_edge = stack[sp--];
- node = FROM_BLOCK (current_edge);
- child = TO_BLOCK (current_edge);
+ node = ei_edge (current_edge)->src->index;
+ gcc_assert (node != ENTRY_BLOCK);
+ child = ei_edge (current_edge)->dest->index;
+ gcc_assert (child != EXIT_BLOCK);
RESET_BIT (in_stack, child);
if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
- current_edge = NEXT_OUT (current_edge);
+ ei_next (¤t_edge);
}
/* See if have finished the DFS tree traversal. */
- if (sp < 0 && TEST_BIT (passed, current_edge))
+ if (sp < 0 && EDGE_PASSED (current_edge))
break;
/* Nope, continue the traversal with the popped node. */
}
/* Process a node. */
- node = FROM_BLOCK (current_edge);
- child = TO_BLOCK (current_edge);
+ node = ei_edge (current_edge)->src->index;
+ gcc_assert (node != ENTRY_BLOCK);
SET_BIT (in_stack, node);
dfs_nr[node] = ++count;
+ /* We don't traverse to the exit block. */
+ child = ei_edge (current_edge)->dest->index;
+ if (child == EXIT_BLOCK)
+ {
+ SET_EDGE_PASSED (current_edge);
+ ei_next (¤t_edge);
+ continue;
+ }
+
/* If the successor is in the stack, then we've found a loop.
Mark the loop, if it is not a natural loop, then it will
be rejected during the second traversal. */
no_loops = 0;
SET_BIT (header, child);
UPDATE_LOOP_RELATIONS (node, child);
- SET_BIT (passed, current_edge);
- current_edge = NEXT_OUT (current_edge);
+ SET_EDGE_PASSED (current_edge);
+ ei_next (¤t_edge);
continue;
}
{
if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
- SET_BIT (passed, current_edge);
- current_edge = NEXT_OUT (current_edge);
+ SET_EDGE_PASSED (current_edge);
+ ei_next (¤t_edge);
continue;
}
/* Push an entry on the stack and continue DFS traversal. */
stack[++sp] = current_edge;
- SET_BIT (passed, current_edge);
- current_edge = OUT_EDGES (child);
-
- /* This is temporary until haifa is converted to use rth's new
- cfg routines which have true entry/exit blocks and the
- appropriate edges from/to those blocks.
-
- Generally we update dfs_nr for a node when we process its
- out edge. However, if the node has no out edge then we will
- not set dfs_nr for that node. This can confuse the scheduler
- into thinking that we have unreachable blocks, which in turn
- disables cross block scheduling.
-
- So, if we have a node with no out edges, go ahead and mark it
- as reachable now. */
- if (current_edge == 0)
- dfs_nr[child] = ++count;
+ SET_EDGE_PASSED (current_edge);
+ current_edge = ei_start (ei_edge (current_edge)->dest->succs);
+ }
+
+ /* Reset ->aux field used by EDGE_PASSED. */
+ FOR_ALL_BB (bb)
+ {
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ e->aux = NULL;
}
+
/* Another check for unreachable blocks. The earlier test in
is_cfg_nonregular only finds unreachable blocks that do not
form a loop.
degree = dfs_nr;
FOR_EACH_BB (bb)
- degree[bb->index] = 0;
- for (i = 0; i < num_edges; i++)
- {
- edge e = INDEX_EDGE (edge_list, i);
-
- if (e->dest != EXIT_BLOCK_PTR)
- degree[e->dest->index]++;
- }
+ degree[bb->index] = EDGE_COUNT (bb->preds);
/* Do not perform region scheduling if there are any unreachable
blocks. */
if (!unreachable)
{
- int *queue;
+ int *queue, *degree1 = NULL;
+ /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
+ there basic blocks, which are forced to be region heads.
+ This is done to try to assemble few smaller regions
+ from a too_large region. */
+ sbitmap extended_rgn_header = NULL;
+ bool extend_regions_p;
if (no_loops)
SET_BIT (header, 0);
/* Second traversal:find reducible inner loops and topologically sort
block of each region. */
- queue = xmalloc (n_basic_blocks * sizeof (int));
+ queue = XNEWVEC (int, n_basic_blocks);
+
+ extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
+ if (extend_regions_p)
+ {
+ degree1 = XNEWVEC (int, last_basic_block);
+ extended_rgn_header = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (extended_rgn_header);
+ }
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index))
{
edge e;
+ edge_iterator ei;
basic_block jbb;
/* Now check that the loop is reducible. We do this separate
too_large_failure = 0;
loop_head = max_hdr[bb->index];
+ if (extend_regions_p)
+ /* We save degree in case when we meet a too_large region
+ and cancel it. We need a correct degree later when
+ calling extend_rgns. */
+ memcpy (degree1, degree, last_basic_block * sizeof (int));
+
/* Decrease degree of all I's successors for topological
ordering. */
- for (e = bb->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, bb->succs)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (BB_END (bb))
- - INSN_LUID (BB_HEAD (bb)));
+ num_insns = common_sched_info->estimate_number_of_insns (bb);
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
FOR_EACH_BB (jbb)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
- if (jbb->succ
- && jbb->succ->dest == EXIT_BLOCK_PTR
- && jbb->succ->succ_next == NULL)
+ if (single_succ_p (jbb)
+ && single_succ (jbb) == EXIT_BLOCK_PTR)
{
queue[++tail] = jbb->index;
SET_BIT (in_queue, jbb->index);
{
edge e;
- for (e = bb->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->src == ENTRY_BLOCK_PTR)
continue;
d b
The algorithm in the DFS traversal may not mark B & D as part
- of the loop (ie they will not have max_hdr set to A).
+ of the loop (i.e. they will not have max_hdr set to A).
We know they can not be loop latches (else they would have
had max_hdr set since they'd have a backedge to a dominator
edge e;
child = queue[++head];
- for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->preds)
{
node = e->src->index;
rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = num_bbs;
RGN_BLOCKS (nr_regions) = idx++;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
CONTAINING_RGN (bb->index) = nr_regions;
BLOCK_TO_BB (bb->index) = count = 0;
CONTAINING_RGN (child) = nr_regions;
queue[head] = queue[tail--];
- for (e = BASIC_BLOCK (child)->succ;
- e;
- e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
}
}
++nr_regions;
}
+ else if (extend_regions_p)
+ {
+ /* Restore DEGREE. */
+ int *t = degree;
+
+ degree = degree1;
+ degree1 = t;
+
+ /* And force successors of BB to be region heads.
+ This may provide several smaller regions instead
+ of one too_large region. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->dest != EXIT_BLOCK_PTR)
+ SET_BIT (extended_rgn_header, e->dest->index);
+ }
}
}
free (queue);
+
+ if (extend_regions_p)
+ {
+ free (degree1);
+
+ sbitmap_a_or_b (header, header, extended_rgn_header);
+ sbitmap_free (extended_rgn_header);
+
+ extend_rgns (degree, &idx, header, max_hdr);
+ }
}
/* Any block that did not end up in a region is placed into a region
rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = idx++;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
CONTAINING_RGN (bb->index) = nr_regions++;
BLOCK_TO_BB (bb->index) = 0;
}
free (max_hdr);
- free (dfs_nr);
+ free (degree);
free (stack);
- sbitmap_free (passed);
sbitmap_free (header);
sbitmap_free (inner);
sbitmap_free (in_queue);
sbitmap_free (in_stack);
}
-/* Functions for regions scheduling information. */
-
-/* Compute dominators, probability, and potential-split-edges of bb.
- Assume that these values were already computed for bb's predecessors. */
+/* Wrapper function.
+ If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
+ regions. Otherwise just call find_rgns_haifa. */
static void
-compute_dom_prob_ps (int bb)
+find_rgns (void)
{
- int nxt_in_edge, fst_in_edge, pred;
- int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
-
- prob[bb] = 0.0;
- if (IS_RGN_ENTRY (bb))
- {
- SET_BIT (dom[bb], 0);
- prob[bb] = 1.0;
- return;
- }
+ if (sel_sched_p () && flag_sel_sched_pipelining)
+ sel_find_rgns ();
+ else
+ haifa_find_rgns ();
+}
- fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
+static int gather_region_statistics (int **);
+static void print_region_statistics (int *, int, int *, int);
- /* Initialize dom[bb] to '111..1'. */
- sbitmap_ones (dom[bb]);
+/* Calculate the histogram that shows the number of regions having the
+ given number of basic blocks, and store it in the RSP array. Return
+ the size of this array. */
+static int
+gather_region_statistics (int **rsp)
+{
+ int i, *a = 0, a_sz = 0;
- do
+ /* a[i] is the number of regions that have (i + 1) basic blocks. */
+ for (i = 0; i < nr_regions; i++)
{
- pred = FROM_BLOCK (nxt_in_edge);
- sbitmap_a_and_b (dom[bb], dom[bb], dom[BLOCK_TO_BB (pred)]);
- sbitmap_a_or_b (ancestor_edges[bb], ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)]);
-
- SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge));
+ int nr_blocks = RGN_NR_BLOCKS (i);
- nr_out_edges = 1;
- nr_rgn_out_edges = 0;
- fst_out_edge = OUT_EDGES (pred);
- nxt_out_edge = NEXT_OUT (fst_out_edge);
+ gcc_assert (nr_blocks >= 1);
- sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[BLOCK_TO_BB (pred)]);
-
- SET_BIT (pot_split[bb], EDGE_TO_BIT (fst_out_edge));
-
- /* The successor doesn't belong in the region? */
- if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
- CONTAINING_RGN (BB_TO_BLOCK (bb)))
- ++nr_rgn_out_edges;
-
- while (fst_out_edge != nxt_out_edge)
+ if (nr_blocks > a_sz)
{
- ++nr_out_edges;
- /* The successor doesn't belong in the region? */
- if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
- CONTAINING_RGN (BB_TO_BLOCK (bb)))
- ++nr_rgn_out_edges;
- SET_BIT (pot_split[bb], EDGE_TO_BIT (nxt_out_edge));
- nxt_out_edge = NEXT_OUT (nxt_out_edge);
-
+ a = XRESIZEVEC (int, a, nr_blocks);
+ do
+ a[a_sz++] = 0;
+ while (a_sz != nr_blocks);
}
- /* Now nr_rgn_out_edges is the number of region-exit edges from
- pred, and nr_out_edges will be the number of pred out edges
- not leaving the region. */
- nr_out_edges -= nr_rgn_out_edges;
- if (nr_rgn_out_edges > 0)
- prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges;
- else
- prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges;
- nxt_in_edge = NEXT_IN (nxt_in_edge);
+ a[nr_blocks - 1]++;
}
- while (fst_in_edge != nxt_in_edge);
-
- SET_BIT (dom[bb], bb);
- sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
- if (sched_verbose >= 2)
- fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
- (int) (100.0 * prob[bb]));
+ *rsp = a;
+ return a_sz;
}
-/* Functions for target info. */
-
-/* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
- Note that bb_trg dominates bb_src. */
-
+/* Print regions statistics. S1 and S2 denote the data before and after
+ calling extend_rgns, respectively. */
static void
-split_edges (int bb_src, int bb_trg, edgelst *bl)
+print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
{
- sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits);
- sbitmap_copy (src, pot_split[bb_src]);
+ int i;
- sbitmap_difference (src, src, pot_split[bb_trg]);
- extract_bitlst (src, bl);
- sbitmap_free (src);
-}
+ /* We iterate until s2_sz because extend_rgns does not decrease
+ the maximal region size. */
+ for (i = 1; i < s2_sz; i++)
+ {
+ int n1, n2;
-/* Find the valid candidate-source-blocks for the target block TRG, compute
- their probability, and check if they are speculative or not.
- For speculative sources, compute their update-blocks and split-blocks. */
+ n2 = s2[i];
-static void
-compute_trg_info (int trg)
+ if (n2 == 0)
+ continue;
+
+ if (i >= s1_sz)
+ n1 = 0;
+ else
+ n1 = s1[i];
+
+ fprintf (sched_dump, ";; Region extension statistics: size %d: " \
+ "was %d + %d more\n", i + 1, n1, n2 - n1);
+ }
+}
+
+/* Extend regions.
+ DEGREE - Array of incoming edge count, considering only
+ the edges, that don't have their sources in formed regions yet.
+ IDXP - pointer to the next available index in rgn_bb_table.
+ HEADER - set of all region heads.
+ LOOP_HDR - mapping from block to the containing loop
+ (two blocks can reside within one region if they have
+ the same loop header). */
+void
+extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
- candidate *sp;
- edgelst el;
- int check_block, update_idx;
- int i, j, k, fst_edge, nxt_edge;
+ int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
+ int nblocks = n_basic_blocks - NUM_FIXED_BLOCKS;
- /* Define some of the fields for the target bb as well. */
- sp = candidate_table + trg;
- sp->is_valid = 1;
- sp->is_speculative = 0;
- sp->src_prob = 100;
+ max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
- for (i = trg + 1; i < current_nr_blocks; i++)
- {
- sp = candidate_table + i;
+ max_hdr = XNEWVEC (int, last_basic_block);
- sp->is_valid = IS_DOMINATED (i, trg);
- if (sp->is_valid)
+ order = XNEWVEC (int, last_basic_block);
+ post_order_compute (order, false, false);
+
+ for (i = nblocks - 1; i >= 0; i--)
+ {
+ int bbn = order[i];
+ if (degree[bbn] >= 0)
{
- sp->src_prob = GET_SRC_PROB (i, trg);
- sp->is_valid = (sp->src_prob >= MIN_PROBABILITY);
+ max_hdr[bbn] = bbn;
+ rescan = 1;
+ }
+ else
+ /* This block already was processed in find_rgns. */
+ max_hdr[bbn] = -1;
+ }
+
+ /* The idea is to topologically walk through CFG in top-down order.
+ During the traversal, if all the predecessors of a node are
+ marked to be in the same region (they all have the same max_hdr),
+ then current node is also marked to be a part of that region.
+ Otherwise the node starts its own region.
+ CFG should be traversed until no further changes are made. On each
+ iteration the set of the region heads is extended (the set of those
+ blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
+ set of all basic blocks, thus the algorithm is guaranteed to
+ terminate. */
+
+ while (rescan && iter < max_iter)
+ {
+ rescan = 0;
+
+ for (i = nblocks - 1; i >= 0; i--)
+ {
+ edge e;
+ edge_iterator ei;
+ int bbn = order[i];
+
+ if (max_hdr[bbn] != -1 && !TEST_BIT (header, bbn))
+ {
+ int hdr = -1;
+
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->preds)
+ {
+ int predn = e->src->index;
+
+ if (predn != ENTRY_BLOCK
+ /* If pred wasn't processed in find_rgns. */
+ && max_hdr[predn] != -1
+ /* And pred and bb reside in the same loop.
+ (Or out of any loop). */
+ && loop_hdr[bbn] == loop_hdr[predn])
+ {
+ if (hdr == -1)
+ /* Then bb extends the containing region of pred. */
+ hdr = max_hdr[predn];
+ else if (hdr != max_hdr[predn])
+ /* Too bad, there are at least two predecessors
+ that reside in different regions. Thus, BB should
+ begin its own region. */
+ {
+ hdr = bbn;
+ break;
+ }
+ }
+ else
+ /* BB starts its own region. */
+ {
+ hdr = bbn;
+ break;
+ }
+ }
+
+ if (hdr == bbn)
+ {
+ /* If BB start its own region,
+ update set of headers with BB. */
+ SET_BIT (header, bbn);
+ rescan = 1;
+ }
+ else
+ gcc_assert (hdr != -1);
+
+ max_hdr[bbn] = hdr;
+ }
+ }
+
+ iter++;
+ }
+
+ /* Statistics were gathered on the SPEC2000 package of tests with
+ mainline weekly snapshot gcc-4.1-20051015 on ia64.
+
+ Statistics for SPECint:
+ 1 iteration : 1751 cases (38.7%)
+ 2 iterations: 2770 cases (61.3%)
+ Blocks wrapped in regions by find_rgns without extension: 18295 blocks
+ Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
+ (We don't count single block regions here).
+
+ Statistics for SPECfp:
+ 1 iteration : 621 cases (35.9%)
+ 2 iterations: 1110 cases (64.1%)
+ Blocks wrapped in regions by find_rgns without extension: 6476 blocks
+ Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
+ (We don't count single block regions here).
+
+ By default we do at most 2 iterations.
+ This can be overridden with max-sched-extend-regions-iters parameter:
+ 0 - disable region extension,
+ N > 0 - do at most N iterations. */
+
+ if (sched_verbose && iter != 0)
+ fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
+ rescan ? "... failed" : "");
+
+ if (!rescan && iter != 0)
+ {
+ int *s1 = NULL, s1_sz = 0;
+
+ /* Save the old statistics for later printout. */
+ if (sched_verbose >= 6)
+ s1_sz = gather_region_statistics (&s1);
+
+ /* We have succeeded. Now assemble the regions. */
+ for (i = nblocks - 1; i >= 0; i--)
+ {
+ int bbn = order[i];
+
+ if (max_hdr[bbn] == bbn)
+ /* BBN is a region head. */
+ {
+ edge e;
+ edge_iterator ei;
+ int num_bbs = 0, j, num_insns = 0, large;
+
+ large = too_large (bbn, &num_bbs, &num_insns);
+
+ degree[bbn] = -1;
+ rgn_bb_table[idx] = bbn;
+ RGN_BLOCKS (nr_regions) = idx++;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ CONTAINING_RGN (bbn) = nr_regions;
+ BLOCK_TO_BB (bbn) = 0;
+
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->succs)
+ if (e->dest != EXIT_BLOCK_PTR)
+ degree[e->dest->index]--;
+
+ if (!large)
+ /* Here we check whether the region is too_large. */
+ for (j = i - 1; j >= 0; j--)
+ {
+ int succn = order[j];
+ if (max_hdr[succn] == bbn)
+ {
+ if ((large = too_large (succn, &num_bbs, &num_insns)))
+ break;
+ }
+ }
+
+ if (large)
+ /* If the region is too_large, then wrap every block of
+ the region into single block region.
+ Here we wrap region head only. Other blocks are
+ processed in the below cycle. */
+ {
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ nr_regions++;
+ }
+
+ num_bbs = 1;
+
+ for (j = i - 1; j >= 0; j--)
+ {
+ int succn = order[j];
+
+ if (max_hdr[succn] == bbn)
+ /* This cycle iterates over all basic blocks, that
+ are supposed to be in the region with head BBN,
+ and wraps them into that region (or in single
+ block region). */
+ {
+ gcc_assert (degree[succn] == 0);
+
+ degree[succn] = -1;
+ rgn_bb_table[idx] = succn;
+ BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
+ CONTAINING_RGN (succn) = nr_regions;
+
+ if (large)
+ /* Wrap SUCCN into single block region. */
+ {
+ RGN_BLOCKS (nr_regions) = idx;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ nr_regions++;
+ }
+
+ idx++;
+
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
+ if (e->dest != EXIT_BLOCK_PTR)
+ degree[e->dest->index]--;
+ }
+ }
+
+ if (!large)
+ {
+ RGN_NR_BLOCKS (nr_regions) = num_bbs;
+ nr_regions++;
+ }
+ }
+ }
+
+ if (sched_verbose >= 6)
+ {
+ int *s2, s2_sz;
+
+ /* Get the new statistics and print the comparison with the
+ one before calling this function. */
+ s2_sz = gather_region_statistics (&s2);
+ print_region_statistics (s1, s1_sz, s2, s2_sz);
+ free (s1);
+ free (s2);
+ }
+ }
+
+ free (order);
+ free (max_hdr);
+
+ *idxp = idx;
+}
+
+/* Functions for regions scheduling information. */
+
+/* Compute dominators, probability, and potential-split-edges of bb.
+ Assume that these values were already computed for bb's predecessors. */
+
+static void
+compute_dom_prob_ps (int bb)
+{
+ edge_iterator in_ei;
+ edge in_edge;
+
+ /* We shouldn't have any real ebbs yet. */
+ gcc_assert (ebb_head [bb] == bb + current_blocks);
+
+ if (IS_RGN_ENTRY (bb))
+ {
+ SET_BIT (dom[bb], 0);
+ prob[bb] = REG_BR_PROB_BASE;
+ return;
+ }
+
+ prob[bb] = 0;
+
+ /* Initialize dom[bb] to '111..1'. */
+ sbitmap_ones (dom[bb]);
+
+ FOR_EACH_EDGE (in_edge, in_ei, BASIC_BLOCK (BB_TO_BLOCK (bb))->preds)
+ {
+ int pred_bb;
+ edge out_edge;
+ edge_iterator out_ei;
+
+ if (in_edge->src == ENTRY_BLOCK_PTR)
+ continue;
+
+ pred_bb = BLOCK_TO_BB (in_edge->src->index);
+ sbitmap_a_and_b (dom[bb], dom[bb], dom[pred_bb]);
+ sbitmap_a_or_b (ancestor_edges[bb],
+ ancestor_edges[bb], ancestor_edges[pred_bb]);
+
+ SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
+
+ sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
+
+ FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
+ SET_BIT (pot_split[bb], EDGE_TO_BIT (out_edge));
+
+ prob[bb] += ((prob[pred_bb] * in_edge->probability) / REG_BR_PROB_BASE);
+ }
+
+ SET_BIT (dom[bb], bb);
+ sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
+
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
+ (100 * prob[bb]) / REG_BR_PROB_BASE);
+}
+
+/* Functions for target info. */
+
+/* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
+ Note that bb_trg dominates bb_src. */
+
+static void
+split_edges (int bb_src, int bb_trg, edgelst *bl)
+{
+ sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits);
+ sbitmap_copy (src, pot_split[bb_src]);
+
+ sbitmap_difference (src, src, pot_split[bb_trg]);
+ extract_edgelst (src, bl);
+ sbitmap_free (src);
+}
+
+/* Find the valid candidate-source-blocks for the target block TRG, compute
+ their probability, and check if they are speculative or not.
+ For speculative sources, compute their update-blocks and split-blocks. */
+
+static void
+compute_trg_info (int trg)
+{
+ candidate *sp;
+ edgelst el = { NULL, 0 };
+ int i, j, k, update_idx;
+ basic_block block;
+ sbitmap visited;
+ edge_iterator ei;
+ edge e;
+
+ candidate_table = XNEWVEC (candidate, current_nr_blocks);
+
+ bblst_last = 0;
+ /* bblst_table holds split blocks and update blocks for each block after
+ the current one in the region. split blocks and update blocks are
+ the TO blocks of region edges, so there can be at most rgn_nr_edges
+ of them. */
+ bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
+ bblst_table = XNEWVEC (basic_block, bblst_size);
+
+ edgelst_last = 0;
+ edgelst_table = XNEWVEC (edge, rgn_nr_edges);
+
+ /* Define some of the fields for the target bb as well. */
+ sp = candidate_table + trg;
+ sp->is_valid = 1;
+ sp->is_speculative = 0;
+ sp->src_prob = REG_BR_PROB_BASE;
+
+ visited = sbitmap_alloc (last_basic_block);
+
+ for (i = trg + 1; i < current_nr_blocks; i++)
+ {
+ sp = candidate_table + i;
+
+ sp->is_valid = IS_DOMINATED (i, trg);
+ if (sp->is_valid)
+ {
+ int tf = prob[trg], cf = prob[i];
+
+ /* In CFGs with low probability edges TF can possibly be zero. */
+ sp->src_prob = (tf ? ((cf * REG_BR_PROB_BASE) / tf) : 0);
+ sp->is_valid = (sp->src_prob >= min_spec_prob);
}
if (sp->is_valid)
if (sp->is_valid)
{
- char *update_blocks;
-
/* Compute split blocks and store them in bblst_table.
The TO block of every split edge is a split block. */
sp->split_bbs.first_member = &bblst_table[bblst_last];
sp->split_bbs.nr_members = el.nr_members;
for (j = 0; j < el.nr_members; bblst_last++, j++)
- bblst_table[bblst_last] =
- TO_BLOCK (rgn_edges[el.first_member[j]]);
+ bblst_table[bblst_last] = el.first_member[j]->dest;
sp->update_bbs.first_member = &bblst_table[bblst_last];
/* Compute update blocks and store them in bblst_table.
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = alloca (last_basic_block);
- memset (update_blocks, 0, last_basic_block);
update_idx = 0;
+ sbitmap_zero (visited);
for (j = 0; j < el.nr_members; j++)
{
- check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]);
- fst_edge = nxt_edge = OUT_EDGES (check_block);
- do
+ block = el.first_member[j]->src;
+ FOR_EACH_EDGE (e, ei, block->succs)
{
- if (! update_blocks[TO_BLOCK (nxt_edge)])
+ if (!TEST_BIT (visited, e->dest->index))
{
for (k = 0; k < el.nr_members; k++)
- if (EDGE_TO_BIT (nxt_edge) == el.first_member[k])
+ if (e == el.first_member[k])
break;
if (k >= el.nr_members)
{
- bblst_table[bblst_last++] = TO_BLOCK (nxt_edge);
- update_blocks[TO_BLOCK (nxt_edge)] = 1;
+ bblst_table[bblst_last++] = e->dest;
+ SET_BIT (visited, e->dest->index);
update_idx++;
}
}
-
- nxt_edge = NEXT_OUT (nxt_edge);
}
- while (fst_edge != nxt_edge);
}
sp->update_bbs.nr_members = update_idx;
/* Make sure we didn't overrun the end of bblst_table. */
- if (bblst_last > bblst_size)
- abort ();
+ gcc_assert (bblst_last <= bblst_size);
}
else
{
sp->src_prob = 0;
}
}
+
+ sbitmap_free (visited);
+}
+
+/* Free the computed target info. */
+static void
+free_trg_info (void)
+{
+ free (candidate_table);
+ free (bblst_table);
+ free (edgelst_table);
}
/* Print candidates info, for debugging purposes. Callable from debugger. */
-void
+DEBUG_FUNCTION void
debug_candidate (int i)
{
if (!candidate_table[i].is_valid)
fprintf (sched_dump, "split path: ");
for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
{
- int b = candidate_table[i].split_bbs.first_member[j];
+ int b = candidate_table[i].split_bbs.first_member[j]->index;
fprintf (sched_dump, " %d ", b);
}
fprintf (sched_dump, "update path: ");
for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
{
- int b = candidate_table[i].update_bbs.first_member[j];
+ int b = candidate_table[i].update_bbs.first_member[j]->index;
fprintf (sched_dump, " %d ", b);
}
/* Print candidates info, for debugging purposes. Callable from debugger. */
-void
+DEBUG_FUNCTION void
debug_candidates (int trg)
{
int i;
/* Functions for speculative scheduling. */
+static bitmap_head not_in_df;
+
/* Return 0 if x is a set of a register alive in the beginning of one
of the split-blocks of src, otherwise return 1. */
if (reg == 0)
return 1;
- while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
- || GET_CODE (reg) == SIGN_EXTRACT
+ while (GET_CODE (reg) == SUBREG
+ || GET_CODE (reg) == ZERO_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
return 0;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return 1;
regno = REGNO (reg);
{
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
- int b = candidate_table[src].split_bbs.first_member[i];
+ basic_block b = candidate_table[src].split_bbs.first_member[i];
+ int t = bitmap_bit_p (¬_in_df, b->index);
- if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start,
- regno + j))
- {
- return 0;
- }
+ /* We can have split blocks, that were recently generated.
+ Such blocks are always outside current region. */
+ gcc_assert (!t || (CONTAINING_RGN (b->index)
+ != CONTAINING_RGN (BB_TO_BLOCK (src))));
+
+ if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
+ return 0;
}
}
}
/* Check for pseudo registers. */
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
- int b = candidate_table[src].split_bbs.first_member[i];
+ basic_block b = candidate_table[src].split_bbs.first_member[i];
+ int t = bitmap_bit_p (¬_in_df, b->index);
- if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno))
- {
- return 0;
- }
+ gcc_assert (!t || (CONTAINING_RGN (b->index)
+ != CONTAINING_RGN (BB_TO_BLOCK (src))));
+
+ if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
+ return 0;
}
}
}
if (reg == 0)
return;
- while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
- || GET_CODE (reg) == SIGN_EXTRACT
+ while (GET_CODE (reg) == SUBREG
+ || GET_CODE (reg) == ZERO_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
return;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return;
/* Global registers are always live, so the code below does not apply
regno = REGNO (reg);
- if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
+ if (! HARD_REGISTER_NUM_P (regno)
+ || !global_regs[regno])
{
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int j = hard_regno_nregs[regno][GET_MODE (reg)];
- while (--j >= 0)
- {
- for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
- {
- int b = candidate_table[src].update_bbs.first_member[i];
-
- SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start,
- regno + j);
- }
- }
- }
- else
+ for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
{
- for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
- {
- int b = candidate_table[src].update_bbs.first_member[i];
+ basic_block b = candidate_table[src].update_bbs.first_member[i];
- SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno);
- }
+ if (HARD_REGISTER_NUM_P (regno))
+ bitmap_set_range (df_get_live_in (b), regno,
+ hard_regno_nregs[regno][GET_MODE (reg)]);
+ else
+ bitmap_set_bit (df_get_live_in (b), regno);
}
}
}
(bb_from == bb_to \
|| IS_RGN_ENTRY (bb_from) \
|| (TEST_BIT (ancestor_edges[bb_to], \
- EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
+ EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK (BB_TO_BLOCK (bb_from)))))))
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
static void
set_spec_fed (rtx load_insn)
{
- rtx link;
+ sd_iterator_def sd_it;
+ dep_t dep;
- for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1))
- if (GET_MODE (link) == VOIDmode)
- FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1;
-} /* set_spec_fed */
+ FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
+ if (DEP_TYPE (dep) == REG_DEP_TRUE)
+ FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
+}
/* On the path from the insn to load_insn_bb, find a conditional
branch depending on insn, that guards the speculative load. */
static int
find_conditional_protection (rtx insn, int load_insn_bb)
{
- rtx link;
+ sd_iterator_def sd_it;
+ dep_t dep;
/* Iterate through DEF-USE forward dependences. */
- for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
+ FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
{
- rtx next = XEXP (link, 0);
+ rtx next = DEP_CON (dep);
+
if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
&& IS_REACHABLE (INSN_BB (next), load_insn_bb)
&& load_insn_bb != INSN_BB (next)
- && GET_MODE (link) == VOIDmode
- && (GET_CODE (next) == JUMP_INSN
+ && DEP_TYPE (dep) == REG_DEP_TRUE
+ && (JUMP_P (next)
|| find_conditional_protection (next, load_insn_bb)))
return 1;
}
/* Returns 1 if the same insn1 that participates in the computation
of load_insn's address is feeding a conditional branch that is
- guarding on load_insn. This is true if we find a the two DEF-USE
+ guarding on load_insn. This is true if we find two DEF-USE
chains:
insn1 -> ... -> conditional-branch
insn1 -> ... -> load_insn,
- and if a flow path exist:
+ and if a flow path exists:
insn1 -> ... -> conditional-branch -> ... -> load_insn,
and if insn1 is on the path
region-entry -> ... -> bb_trg -> ... load_insn.
- Locate insn1 by climbing on LOG_LINKS from load_insn.
- Locate the branch by following INSN_DEPEND from insn1. */
+ Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
+ Locate the branch by following INSN_FORW_DEPS from insn1. */
static int
is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
{
- rtx link;
+ sd_iterator_def sd_it;
+ dep_t dep;
- for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1))
+ FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
{
- rtx insn1 = XEXP (link, 0);
+ rtx insn1 = DEP_PRO (dep);
/* Must be a DEF-USE dependence upon non-branch. */
- if (GET_MODE (link) != VOIDmode
- || GET_CODE (insn1) == JUMP_INSN)
+ if (DEP_TYPE (dep) != REG_DEP_TRUE
+ || JUMP_P (insn1))
continue;
/* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
static int
is_pfree (rtx load_insn, int bb_src, int bb_trg)
{
- rtx back_link;
+ sd_iterator_def back_sd_it;
+ dep_t back_dep;
candidate *candp = candidate_table + bb_src;
if (candp->split_bbs.nr_members != 1)
/* Must have exactly one escape block. */
return 0;
- for (back_link = LOG_LINKS (load_insn);
- back_link; back_link = XEXP (back_link, 1))
+ FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
{
- rtx insn1 = XEXP (back_link, 0);
+ rtx insn1 = DEP_PRO (back_dep);
- if (GET_MODE (back_link) == VOIDmode)
+ if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
+ /* Found a DEF-USE dependence (insn1, load_insn). */
{
- /* Found a DEF-USE dependence (insn1, load_insn). */
- rtx fore_link;
+ sd_iterator_def fore_sd_it;
+ dep_t fore_dep;
- for (fore_link = INSN_DEPEND (insn1);
- fore_link; fore_link = XEXP (fore_link, 1))
+ FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
{
- rtx insn2 = XEXP (fore_link, 0);
- if (GET_MODE (fore_link) == VOIDmode)
+ rtx insn2 = DEP_CON (fore_dep);
+
+ if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
{
/* Found a DEF-USE dependence (insn1, insn2). */
if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
/* insn2 is the similar load, in the target block. */
return 1;
- if (*(candp->split_bbs.first_member) == BLOCK_NUM (insn2))
+ if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
/* insn2 is a similar load, in a split-block. */
return 1;
}
if (FED_BY_SPEC_LOAD (load_insn))
return 1;
- if (LOG_LINKS (load_insn) == NULL)
+ if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
/* Dependence may 'hide' out of the region. */
return 1;
static int target_n_insns;
/* The number of insns from the entire region scheduled so far. */
static int sched_n_insns;
-/* Nonzero if the last scheduled insn was a jump. */
-static int last_was_jump;
/* Implementations of the sched_info functions for region scheduling. */
-static void init_ready_list (struct ready_list *);
+static void init_ready_list (void);
static int can_schedule_ready_p (rtx);
-static int new_ready (rtx);
+static void begin_schedule_ready (rtx);
+static ds_t new_ready (rtx, ds_t);
static int schedule_more_p (void);
-static const char *rgn_print_insn (rtx, int);
+static const char *rgn_print_insn (const_rtx, int);
static int rgn_rank (rtx, rtx);
-static int contributes_to_priority (rtx, rtx);
static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
+/* Functions for speculative scheduling. */
+static void rgn_add_remove_insn (rtx, int);
+static void rgn_add_block (basic_block, basic_block);
+static void rgn_fix_recovery_cfg (int, int, int);
+static basic_block advance_target_bb (basic_block, rtx);
+
/* Return nonzero if there are more insns that should be scheduled. */
static int
schedule_more_p (void)
{
- return ! last_was_jump && sched_target_n_insns < target_n_insns;
+ return sched_target_n_insns < target_n_insns;
}
/* Add all insns that are initially ready to the ready list READY. Called
once before scheduling a set of insns. */
static void
-init_ready_list (struct ready_list *ready)
+init_ready_list (void)
{
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
target_n_insns = 0;
sched_target_n_insns = 0;
sched_n_insns = 0;
- last_was_jump = 0;
/* Print debugging information. */
if (sched_verbose >= 5)
- debug_dependencies ();
+ debug_rgn_dependencies (target_bb);
/* Prepare current target block info. */
if (current_nr_blocks > 1)
- {
- candidate_table = xmalloc (current_nr_blocks * sizeof (candidate));
-
- bblst_last = 0;
- /* bblst_table holds split blocks and update blocks for each block after
- the current one in the region. split blocks and update blocks are
- the TO blocks of region edges, so there can be at most rgn_nr_edges
- of them. */
- bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
- bblst_table = xmalloc (bblst_size * sizeof (int));
-
- bitlst_table_last = 0;
- bitlst_table = xmalloc (rgn_nr_edges * sizeof (int));
-
- compute_trg_info (target_bb);
- }
+ compute_trg_info (target_bb);
/* Initialize ready list with all 'ready' insns in target block.
Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
- if (INSN_DEP_COUNT (insn) == 0)
- {
- ready_add (ready, insn);
-
- if (targetm.sched.adjust_priority)
- INSN_PRIORITY (insn) =
- (*targetm.sched.adjust_priority) (insn, INSN_PRIORITY (insn));
- }
+ try_ready (insn);
target_n_insns++;
+
+ gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
}
/* Add to ready list all 'ready' insns in valid source blocks.
rtx src_next_tail;
rtx tail, head;
- get_block_head_tail (BB_TO_BLOCK (bb_src), &head, &tail);
+ get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
+ &head, &tail);
src_next_tail = NEXT_INSN (tail);
src_head = head;
for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
- {
- if (! INSN_P (insn))
- continue;
-
- if (!CANT_MOVE (insn)
- && (!IS_SPECULATIVE_INSN (insn)
- || ((((!targetm.sched.use_dfa_pipeline_interface
- || !(*targetm.sched.use_dfa_pipeline_interface) ())
- && insn_issue_delay (insn) <= 3)
- || (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ()
- && (recog_memoized (insn) < 0
- || min_insn_conflict_delay (curr_state,
- insn, insn) <= 3)))
- && check_live (insn, bb_src)
- && is_exception_free (insn, bb_src, target_bb))))
- if (INSN_DEP_COUNT (insn) == 0)
- {
- ready_add (ready, insn);
-
- if (targetm.sched.adjust_priority)
- INSN_PRIORITY (insn) =
- (*targetm.sched.adjust_priority) (insn, INSN_PRIORITY (insn));
- }
- }
+ if (INSN_P (insn))
+ try_ready (insn);
}
}
static int
can_schedule_ready_p (rtx insn)
{
- if (GET_CODE (insn) == JUMP_INSN)
- last_was_jump = 1;
+ /* An interblock motion? */
+ if (INSN_BB (insn) != target_bb
+ && IS_SPECULATIVE_INSN (insn)
+ && !check_live (insn, INSN_BB (insn)))
+ return 0;
+ else
+ return 1;
+}
+/* Updates counter and other information. Split from can_schedule_ready_p ()
+ because when we schedule insn speculatively then insn passed to
+ can_schedule_ready_p () differs from the one passed to
+ begin_schedule_ready (). */
+static void
+begin_schedule_ready (rtx insn)
+{
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
{
- basic_block b1;
-
if (IS_SPECULATIVE_INSN (insn))
{
- if (!check_live (insn, INSN_BB (insn)))
- return 0;
+ gcc_assert (check_live (insn, INSN_BB (insn)));
+
update_live (insn, INSN_BB (insn));
/* For speculative load, mark insns fed by it. */
nr_spec++;
}
nr_inter++;
-
- /* Update source block boundaries. */
- b1 = BLOCK_FOR_INSN (insn);
- if (insn == BB_HEAD (b1) && insn == BB_END (b1))
- {
- /* We moved all the insns in the basic block.
- Emit a note after the last insn and update the
- begin/end boundaries to point to the note. */
- rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
- BB_HEAD (b1) = note;
- BB_END (b1) = note;
- }
- else if (insn == BB_END (b1))
- {
- /* We took insns from the end of the basic block,
- so update the end of block boundary so that it
- points to the first insn we did not move. */
- BB_END (b1) = PREV_INSN (insn);
- }
- else if (insn == BB_HEAD (b1))
- {
- /* We took insns from the start of the basic block,
- so update the start of block boundary so that
- it points to the first insn we did not move. */
- BB_HEAD (b1) = NEXT_INSN (insn);
- }
}
else
{
sched_target_n_insns++;
}
sched_n_insns++;
-
- return 1;
}
-/* Called after INSN has all its dependencies resolved. Return nonzero
- if it should be moved to the ready list or the queue, or zero if we
- should silently discard it. */
-static int
-new_ready (rtx next)
+/* Called after INSN has all its hard dependencies resolved and the speculation
+ of type TS is enough to overcome them all.
+ Return nonzero if it should be moved to the ready list or the queue, or zero
+ if we should silently discard it. */
+static ds_t
+new_ready (rtx next, ds_t ts)
{
- /* For speculative insns, before inserting to ready/queue,
- check live, exception-free, and issue-delay. */
- if (INSN_BB (next) != target_bb
- && (!IS_VALID (INSN_BB (next))
+ if (INSN_BB (next) != target_bb)
+ {
+ int not_ex_free = 0;
+
+ /* For speculative insns, before inserting to ready/queue,
+ check live, exception-free, and issue-delay. */
+ if (!IS_VALID (INSN_BB (next))
|| CANT_MOVE (next)
|| (IS_SPECULATIVE_INSN (next)
- && (0
- || (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ()
- && recog_memoized (next) >= 0
- && min_insn_conflict_delay (curr_state, next,
- next) > 3)
- || ((!targetm.sched.use_dfa_pipeline_interface
- || !(*targetm.sched.use_dfa_pipeline_interface) ())
- && insn_issue_delay (next) > 3)
+ && ((recog_memoized (next) >= 0
+ && min_insn_conflict_delay (curr_state, next, next)
+ > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
+ || IS_SPECULATION_CHECK_P (next)
|| !check_live (next, INSN_BB (next))
- || !is_exception_free (next, INSN_BB (next), target_bb)))))
- return 0;
- return 1;
+ || (not_ex_free = !is_exception_free (next, INSN_BB (next),
+ target_bb)))))
+ {
+ if (not_ex_free
+ /* We are here because is_exception_free () == false.
+ But we possibly can handle that with control speculation. */
+ && sched_deps_info->generate_spec_deps
+ && spec_info->mask & BEGIN_CONTROL)
+ {
+ ds_t new_ds;
+
+ /* Add control speculation to NEXT's dependency type. */
+ new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
+
+ /* Check if NEXT can be speculated with new dependency type. */
+ if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
+ /* Here we got new control-speculative instruction. */
+ ts = new_ds;
+ else
+ /* NEXT isn't ready yet. */
+ ts = (ts & ~SPECULATIVE) | HARD_DEP;
+ }
+ else
+ /* NEXT isn't ready yet. */
+ ts = (ts & ~SPECULATIVE) | HARD_DEP;
+ }
+ }
+
+ return ts;
}
/* Return a string that contains the insn uid and optionally anything else
to be formatted so that multiple output lines will line up nicely. */
static const char *
-rgn_print_insn (rtx insn, int aligned)
+rgn_print_insn (const_rtx insn, int aligned)
{
static char tmp[80];
return nonzero if we should include this dependence in priority
calculations. */
-static int
+int
contributes_to_priority (rtx next, rtx insn)
{
- return BLOCK_NUM (next) == BLOCK_NUM (insn);
+ /* NEXT and INSN reside in one ebb. */
+ return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
}
/* INSN is a JUMP_INSN, COND_SET is the set of registers that are
add_branch_dependences. */
}
+/* This variable holds common_sched_info hooks and data relevant to
+ the interblock scheduler. */
+static struct common_sched_info_def rgn_common_sched_info;
+
+
+/* This holds data for the dependence analysis relevant to
+ the interblock scheduler. */
+static struct sched_deps_info_def rgn_sched_deps_info;
+
+/* This holds constant data used for initializing the above structure
+ for the Haifa scheduler. */
+static const struct sched_deps_info_def rgn_const_sched_deps_info =
+ {
+ compute_jump_reg_dependencies,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ 0, 0, 0
+ };
+
+/* Same as above, but for the selective scheduler. */
+static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
+ {
+ compute_jump_reg_dependencies,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ 0, 0, 0
+ };
+
+/* Return true if scheduling INSN will trigger finish of scheduling
+ current block. */
+static bool
+rgn_insn_finishes_block_p (rtx insn)
+{
+ if (INSN_BB (insn) == target_bb
+ && sched_target_n_insns + 1 == target_n_insns)
+ /* INSN is the last not-scheduled instruction in the current block. */
+ return true;
+
+ return false;
+}
+
/* Used in schedule_insns to initialize current_sched_info for scheduling
regions (or single basic blocks). */
-static struct sched_info region_sched_info =
+static const struct haifa_sched_info rgn_const_sched_info =
{
init_ready_list,
can_schedule_ready_p,
rgn_rank,
rgn_print_insn,
contributes_to_priority,
- compute_jump_reg_dependencies,
+ rgn_insn_finishes_block_p,
NULL, NULL,
NULL, NULL,
- 0, 0, 0
+ 0, 0,
+
+ rgn_add_remove_insn,
+ begin_schedule_ready,
+ NULL,
+ advance_target_bb,
+ SCHED_RGN
};
-/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
+/* This variable holds the data and hooks needed to the Haifa scheduler backend
+ for the interblock scheduler frontend. */
+static struct haifa_sched_info rgn_sched_info;
+
+/* Returns maximum priority that an insn was assigned to. */
+
+int
+get_rgn_sched_max_insns_priority (void)
+{
+ return rgn_sched_info.sched_max_insns_priority;
+}
+
+/* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
static bool
sets_likely_spilled (rtx pat)
}
static void
-sets_likely_spilled_1 (rtx x, rtx pat, void *data)
+sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
{
bool *ret = (bool *) data;
if (GET_CODE (pat) == SET
&& REG_P (x)
- && REGNO (x) < FIRST_PSEUDO_REGISTER
- && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (x))))
+ && HARD_REGISTER_P (x)
+ && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
*ret = true;
}
+/* A bitmap to note insns that participate in any dependency. Used in
+ add_branch_dependences. */
+static sbitmap insn_referenced;
+
/* Add dependences so that branches are scheduled to run last in their
block. */
-
static void
add_branch_dependences (rtx head, rtx tail)
{
cc0 setters remain at the end because they can't be moved away from
their cc0 user.
- Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
- are not moved before reload because we can wind up with register
+ COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
+
+ Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
+ values) are not moved before reload because we can wind up with register
allocation failures. */
+ while (tail != head && DEBUG_INSN_P (tail))
+ tail = PREV_INSN (tail);
+
insn = tail;
last = 0;
- while (GET_CODE (insn) == CALL_INSN
- || GET_CODE (insn) == JUMP_INSN
- || (GET_CODE (insn) == INSN
+ while (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| can_throw_internal (insn)
#endif
|| (!reload_completed
&& sets_likely_spilled (PATTERN (insn)))))
- || GET_CODE (insn) == NOTE)
+ || NOTE_P (insn))
{
- if (GET_CODE (insn) != NOTE)
+ if (!NOTE_P (insn))
{
- if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
+ if (last != 0
+ && sd_find_dep_between (insn, last, false) == NULL)
{
- add_dependence (last, insn, REG_DEP_ANTI);
- INSN_REF_COUNT (insn)++;
+ if (! sched_insns_conditions_mutex_p (last, insn))
+ add_dependence (last, insn, REG_DEP_ANTI);
+ SET_BIT (insn_referenced, INSN_LUID (insn));
}
CANT_MOVE (insn) = 1;
if (insn == head)
break;
- insn = PREV_INSN (insn);
+ do
+ insn = PREV_INSN (insn);
+ while (insn != head && DEBUG_INSN_P (insn));
}
/* Make sure these insns are scheduled last in their block. */
{
insn = prev_nonnote_insn (insn);
- if (INSN_REF_COUNT (insn) != 0)
+ if (TEST_BIT (insn_referenced, INSN_LUID (insn))
+ || DEBUG_INSN_P (insn))
continue;
- add_dependence (last, insn, REG_DEP_ANTI);
- INSN_REF_COUNT (insn) = 1;
+ if (! sched_insns_conditions_mutex_p (last, insn))
+ add_dependence (last, insn, REG_DEP_ANTI);
}
+
+ if (!targetm.have_conditional_execution ())
+ return;
+
+ /* Finally, if the block ends in a jump, and we are doing intra-block
+ scheduling, make sure that the branch depends on any COND_EXEC insns
+ inside the block to avoid moving the COND_EXECs past the branch insn.
+
+ We only have to do this after reload, because (1) before reload there
+ are no COND_EXEC insns, and (2) the region scheduler is an intra-block
+ scheduler after reload.
+
+ FIXME: We could in some cases move COND_EXEC insns past the branch if
+ this scheduler would be a little smarter. Consider this code:
+
+ T = [addr]
+ C ? addr += 4
+ !C ? X += 12
+ C ? T += 1
+ C ? jump foo
+
+ On a target with a one cycle stall on a memory access the optimal
+ sequence would be:
+
+ T = [addr]
+ C ? addr += 4
+ C ? T += 1
+ C ? jump foo
+ !C ? X += 12
+
+ We don't want to put the 'X += 12' before the branch because it just
+ wastes a cycle of execution time when the branch is taken.
+
+ Note that in the example "!C" will always be true. That is another
+ possible improvement for handling COND_EXECs in this scheduler: it
+ could remove always-true predicates. */
+
+ if (!reload_completed || ! JUMP_P (tail))
+ return;
+
+ insn = tail;
+ while (insn != head)
+ {
+ insn = PREV_INSN (insn);
+
+ /* Note that we want to add this dependency even when
+ sched_insns_conditions_mutex_p returns true. The whole point
+ is that we _want_ this dependency, even if these insns really
+ are independent. */
+ if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
+ add_dependence (tail, insn, REG_DEP_ANTI);
+ }
}
/* Data structures for the computation of data dependences in a regions. We
the variables of its predecessors. When the analysis for a bb completes,
we save the contents to the corresponding bb_deps[bb] variable. */
-static struct deps *bb_deps;
+static struct deps_desc *bb_deps;
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
static rtx
concat_INSN_LIST (rtx copy, rtx old)
{
- rtx new = old;
+ rtx new_rtx = old;
for (; copy ; copy = XEXP (copy, 1))
- new = alloc_INSN_LIST (XEXP (copy, 0), new);
- return new;
+ {
+ new_rtx = alloc_INSN_LIST (XEXP (copy, 0), new_rtx);
+ PUT_REG_NOTE_KIND (new_rtx, REG_NOTE_KIND (copy));
+ }
+ return new_rtx;
}
static void
*old_mems_p = new_mems;
}
+/* Join PRED_DEPS to the SUCC_DEPS. */
+void
+deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
+{
+ unsigned reg;
+ reg_set_iterator rsi;
+
+ /* The reg_last lists are inherited by successor. */
+ EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
+ {
+ struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
+ struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
+
+ succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
+ succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
+ succ_rl->implicit_sets
+ = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
+ succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
+ succ_rl->clobbers);
+ succ_rl->uses_length += pred_rl->uses_length;
+ succ_rl->clobbers_length += pred_rl->clobbers_length;
+ }
+ IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
+
+ /* Mem read/write lists are inherited by successor. */
+ concat_insn_mem_list (pred_deps->pending_read_insns,
+ pred_deps->pending_read_mems,
+ &succ_deps->pending_read_insns,
+ &succ_deps->pending_read_mems);
+ concat_insn_mem_list (pred_deps->pending_write_insns,
+ pred_deps->pending_write_mems,
+ &succ_deps->pending_write_insns,
+ &succ_deps->pending_write_mems);
+
+ succ_deps->last_pending_memory_flush
+ = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
+ succ_deps->last_pending_memory_flush);
+
+ succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
+ succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
+ succ_deps->pending_flush_length += pred_deps->pending_flush_length;
+
+ /* last_function_call is inherited by successor. */
+ succ_deps->last_function_call
+ = concat_INSN_LIST (pred_deps->last_function_call,
+ succ_deps->last_function_call);
+
+ /* last_function_call_may_noreturn is inherited by successor. */
+ succ_deps->last_function_call_may_noreturn
+ = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
+ succ_deps->last_function_call_may_noreturn);
+
+ /* sched_before_next_call is inherited by successor. */
+ succ_deps->sched_before_next_call
+ = concat_INSN_LIST (pred_deps->sched_before_next_call,
+ succ_deps->sched_before_next_call);
+}
+
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (int bb, struct deps *pred_deps)
+propagate_deps (int bb, struct deps_desc *pred_deps)
{
- int b = BB_TO_BLOCK (bb);
- int e, first_edge;
+ basic_block block = BASIC_BLOCK (BB_TO_BLOCK (bb));
+ edge_iterator ei;
+ edge e;
/* bb's structures are inherited by its successors. */
- first_edge = e = OUT_EDGES (b);
- if (e > 0)
- do
- {
- int b_succ = TO_BLOCK (e);
- int bb_succ = BLOCK_TO_BB (b_succ);
- struct deps *succ_deps = bb_deps + bb_succ;
- int reg;
-
- /* Only bbs "below" bb, in the same region, are interesting. */
- if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
- || bb_succ <= bb)
- {
- e = NEXT_OUT (e);
- continue;
- }
-
- /* The reg_last lists are inherited by bb_succ. */
- EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg,
- {
- struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
- struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
-
- succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
- succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
- succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
- succ_rl->clobbers);
- succ_rl->uses_length += pred_rl->uses_length;
- succ_rl->clobbers_length += pred_rl->clobbers_length;
- });
- IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
-
- /* Mem read/write lists are inherited by bb_succ. */
- concat_insn_mem_list (pred_deps->pending_read_insns,
- pred_deps->pending_read_mems,
- &succ_deps->pending_read_insns,
- &succ_deps->pending_read_mems);
- concat_insn_mem_list (pred_deps->pending_write_insns,
- pred_deps->pending_write_mems,
- &succ_deps->pending_write_insns,
- &succ_deps->pending_write_mems);
-
- succ_deps->last_pending_memory_flush
- = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
- succ_deps->last_pending_memory_flush);
-
- succ_deps->pending_lists_length += pred_deps->pending_lists_length;
- succ_deps->pending_flush_length += pred_deps->pending_flush_length;
-
- /* last_function_call is inherited by bb_succ. */
- succ_deps->last_function_call
- = concat_INSN_LIST (pred_deps->last_function_call,
- succ_deps->last_function_call);
-
- /* sched_before_next_call is inherited by bb_succ. */
- succ_deps->sched_before_next_call
- = concat_INSN_LIST (pred_deps->sched_before_next_call,
- succ_deps->sched_before_next_call);
-
- e = NEXT_OUT (e);
- }
- while (e != first_edge);
+ FOR_EACH_EDGE (e, ei, block->succs)
+ {
+ /* Only bbs "below" bb, in the same region, are interesting. */
+ if (e->dest == EXIT_BLOCK_PTR
+ || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
+ || BLOCK_TO_BB (e->dest->index) <= bb)
+ continue;
+
+ deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
+ }
/* These lists should point to the right place, for correct
freeing later. */
pred_deps->pending_write_mems = 0;
}
-/* Compute backward dependences inside bb. In a multiple blocks region:
+/* Compute dependences inside bb. In a multiple blocks region:
(1) a bb is analyzed after its predecessors, and (2) the lists in
effect at the end of bb (after analyzing for bb) are inherited by
bb's successors.
Specifically for reg-reg data dependences, the block insns are
- scanned by sched_analyze () top-to-bottom. Two lists are
+ scanned by sched_analyze () top-to-bottom. Three lists are
maintained by sched_analyze (): reg_last[].sets for register DEFs,
- and reg_last[].uses for register USEs.
+ reg_last[].implicit_sets for implicit hard register DEFs, and
+ reg_last[].uses for register USEs.
When analysis is completed for bb, we update for its successors:
; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
+ ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
; - USES[succ] = Union (USES [succ], DEFS [bb])
The mechanism for computing mem-mem data dependence is very
similar, and the result is interblock dependences in the region. */
static void
-compute_block_backward_dependences (int bb)
+compute_block_dependences (int bb)
{
rtx head, tail;
- struct deps tmp_deps;
+ struct deps_desc tmp_deps;
tmp_deps = bb_deps[bb];
/* Do the analysis for this block. */
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+
sched_analyze (&tmp_deps, head, tail);
- add_branch_dependences (head, tail);
+
+ /* Selective scheduling handles control dependencies by itself. */
+ if (!sel_sched_p ())
+ add_branch_dependences (head, tail);
if (current_nr_blocks > 1)
propagate_deps (bb, &tmp_deps);
/* Free up the INSN_LISTs. */
free_deps (&tmp_deps);
+
+ if (targetm.sched.dependencies_evaluation_hook)
+ targetm.sched.dependencies_evaluation_hook (head, tail);
+}
+
+/* Free dependencies of instructions inside BB. */
+static void
+free_block_dependencies (int bb)
+{
+ rtx head;
+ rtx tail;
+
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+
+ if (no_real_insns_p (head, tail))
+ return;
+
+ sched_free_deps (head, tail, true);
}
/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
}
}
\f
-/* Print dependences for debugging, callable from debugger. */
-
-void
-debug_dependencies (void)
+/* Print dependences for debugging starting from FROM_BB.
+ Callable from debugger. */
+/* Print dependences for debugging starting from FROM_BB.
+ Callable from debugger. */
+DEBUG_FUNCTION void
+debug_rgn_dependencies (int from_bb)
{
int bb;
- fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n");
- for (bb = 0; bb < current_nr_blocks; bb++)
+ fprintf (sched_dump,
+ ";; --------------- forward dependences: ------------ \n");
+
+ for (bb = from_bb; bb < current_nr_blocks; bb++)
{
- if (1)
- {
- rtx head, tail;
- rtx next_tail;
- rtx insn;
+ rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
- next_tail = NEXT_INSN (tail);
- fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
- BB_TO_BLOCK (bb), bb);
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+ fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
+ BB_TO_BLOCK (bb), bb);
- if (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ())
+ debug_dependencies (head, tail);
+ }
+}
+
+/* Print dependencies information for instructions between HEAD and TAIL.
+ ??? This function would probably fit best in haifa-sched.c. */
+void debug_dependencies (rtx head, rtx tail)
+{
+ rtx insn;
+ rtx next_tail = NEXT_INSN (tail);
+
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "insn", "code", "bb", "dep", "prio", "cost",
+ "reservation");
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "----", "----", "--", "---", "----", "----",
+ "-----------");
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ {
+ if (! INSN_P (insn))
+ {
+ int n;
+ fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
+ if (NOTE_P (insn))
{
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
- "insn", "code", "bb", "dep", "prio", "cost",
- "reservation");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
- "----", "----", "--", "---", "----", "----",
- "-----------");
+ n = NOTE_KIND (insn);
+ fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
}
else
- {
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "----", "----", "--", "---", "----", "----", "--------", "-----");
- }
-
- for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
- {
- rtx link;
+ fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
+ continue;
+ }
- if (! INSN_P (insn))
- {
- int n;
- fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
- if (GET_CODE (insn) == NOTE)
- {
- n = NOTE_LINE_NUMBER (insn);
- if (n < 0)
- fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
- else
- fprintf (sched_dump, "line %d, file %s\n", n,
- NOTE_SOURCE_FILE (insn));
- }
- else
- fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
- continue;
- }
+ fprintf (sched_dump,
+ ";; %s%5d%6d%6d%6d%6d%6d ",
+ (SCHED_GROUP_P (insn) ? "+" : " "),
+ INSN_UID (insn),
+ INSN_CODE (insn),
+ BLOCK_NUM (insn),
+ sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
+ (sel_sched_p () ? (sched_emulate_haifa_p ? -1
+ : INSN_PRIORITY (insn))
+ : INSN_PRIORITY (insn)),
+ (sel_sched_p () ? (sched_emulate_haifa_p ? -1
+ : insn_cost (insn))
+ : insn_cost (insn)));
+
+ if (recog_memoized (insn) < 0)
+ fprintf (sched_dump, "nothing");
+ else
+ print_reservation (sched_dump, insn);
- if (targetm.sched.use_dfa_pipeline_interface
- && (*targetm.sched.use_dfa_pipeline_interface) ())
- {
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0));
-
- if (recog_memoized (insn) < 0)
- fprintf (sched_dump, "nothing");
- else
- print_reservation (sched_dump, insn);
- }
- else
- {
- int unit = insn_unit (insn);
- int range
- = (unit < 0
- || function_units[unit].blockage_range_function == 0
- ? 0
- : function_units[unit].blockage_range_function (insn));
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0),
- (int) MIN_BLOCKAGE_COST (range),
- (int) MAX_BLOCKAGE_COST (range));
- insn_print_units (insn);
- }
+ fprintf (sched_dump, "\t: ");
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
- fprintf (sched_dump, "\t: ");
- for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
- fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
- fprintf (sched_dump, "\n");
- }
- }
+ FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ fprintf (sched_dump, "%d ", INSN_UID (DEP_CON (dep)));
+ }
+ fprintf (sched_dump, "\n");
}
+
fprintf (sched_dump, "\n");
}
\f
-/* Schedule a region. A region is either an inner loop, a loop-free
- subroutine, or a single basic block. Each bb in the region is
- scheduled after its flow predecessors. */
-
-static void
-schedule_region (int rgn)
+/* Returns true if all the basic blocks of the current region have
+ NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
+bool
+sched_is_disabled_for_current_region_p (void)
{
int bb;
- int rgn_n_insns = 0;
- int sched_rgn_n_insns = 0;
- /* Set variables for the current region. */
- current_nr_blocks = RGN_NR_BLOCKS (rgn);
- current_blocks = RGN_BLOCKS (rgn);
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ if (!(BASIC_BLOCK (BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
+ return false;
- init_deps_global ();
+ return true;
+}
- /* Initializations for region data dependence analysis. */
- bb_deps = xmalloc (sizeof (struct deps) * current_nr_blocks);
- for (bb = 0; bb < current_nr_blocks; bb++)
- init_deps (bb_deps + bb);
+/* Free all region dependencies saved in INSN_BACK_DEPS and
+ INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
+ when scheduling, so this function is supposed to be called from
+ the selective scheduling only. */
+void
+free_rgn_deps (void)
+{
+ int bb;
- /* Compute LOG_LINKS. */
for (bb = 0; bb < current_nr_blocks; bb++)
- compute_block_backward_dependences (bb);
-
- /* Compute INSN_DEPEND. */
- for (bb = current_nr_blocks - 1; bb >= 0; bb--)
{
rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
-
- compute_forward_dependences (head, tail);
- if (targetm.sched.dependencies_evaluation_hook)
- targetm.sched.dependencies_evaluation_hook (head, tail);
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+ sched_free_deps (head, tail, false);
}
+}
- /* Set priorities. */
+static int rgn_n_insns;
+
+/* Compute insn priority for a current region. */
+void
+compute_priorities (void)
+{
+ int bb;
+
+ current_sched_info->sched_max_insns_priority = 0;
for (bb = 0; bb < current_nr_blocks; bb++)
{
rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+
+ if (no_real_insns_p (head, tail))
+ continue;
rgn_n_insns += set_priorities (head, tail);
}
+ current_sched_info->sched_max_insns_priority++;
+}
- /* Compute interblock info: probabilities, split-edges, dominators, etc. */
- if (current_nr_blocks > 1)
- {
- int i;
+/* Schedule a region. A region is either an inner loop, a loop-free
+ subroutine, or a single basic block. Each bb in the region is
+ scheduled after its flow predecessors. */
- prob = xmalloc ((current_nr_blocks) * sizeof (float));
+static void
+schedule_region (int rgn)
+{
+ int bb;
+ int sched_rgn_n_insns = 0;
- dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
- sbitmap_vector_zero (dom, current_nr_blocks);
- /* Edge to bit. */
- rgn_nr_edges = 0;
- edge_to_bit = xmalloc (nr_edges * sizeof (int));
- for (i = 1; i < nr_edges; i++)
- if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
- EDGE_TO_BIT (i) = rgn_nr_edges++;
- rgn_edges = xmalloc (rgn_nr_edges * sizeof (int));
+ rgn_n_insns = 0;
- rgn_nr_edges = 0;
- for (i = 1; i < nr_edges; i++)
- if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn))
- rgn_edges[rgn_nr_edges++] = i;
+ rgn_setup_region (rgn);
- /* Split edges. */
- pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
- sbitmap_vector_zero (pot_split, current_nr_blocks);
- ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
- sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
+ /* Don't schedule region that is marked by
+ NOTE_DISABLE_SCHED_OF_BLOCK. */
+ if (sched_is_disabled_for_current_region_p ())
+ return;
- /* Compute probabilities, dominators, split_edges. */
+ sched_rgn_compute_dependencies (rgn);
+
+ sched_rgn_local_init (rgn);
+
+ /* Set priorities. */
+ compute_priorities ();
+
+ sched_extend_ready_list (rgn_n_insns);
+
+ if (sched_pressure_p)
+ {
+ sched_init_region_reg_pressure_info ();
for (bb = 0; bb < current_nr_blocks; bb++)
- compute_dom_prob_ps (bb);
+ {
+ basic_block first_bb, last_bb;
+ rtx head, tail;
+
+ first_bb = EBB_FIRST_BB (bb);
+ last_bb = EBB_LAST_BB (bb);
+
+ get_ebb_head_tail (first_bb, last_bb, &head, &tail);
+
+ if (no_real_insns_p (head, tail))
+ {
+ gcc_assert (first_bb == last_bb);
+ continue;
+ }
+ sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
+ }
}
/* Now we can schedule all blocks. */
for (bb = 0; bb < current_nr_blocks; bb++)
{
+ basic_block first_bb, last_bb, curr_bb;
rtx head, tail;
- int b = BB_TO_BLOCK (bb);
- get_block_head_tail (b, &head, &tail);
+ first_bb = EBB_FIRST_BB (bb);
+ last_bb = EBB_LAST_BB (bb);
+
+ get_ebb_head_tail (first_bb, last_bb, &head, &tail);
if (no_real_insns_p (head, tail))
- continue;
+ {
+ gcc_assert (first_bb == last_bb);
+ continue;
+ }
current_sched_info->prev_head = PREV_INSN (head);
current_sched_info->next_tail = NEXT_INSN (tail);
- if (write_symbols != NO_DEBUG)
- {
- save_line_notes (b, head, tail);
- rm_line_notes (head, tail);
- }
-
- /* rm_other_notes only removes notes which are _inside_ the
- block---that is, it won't remove notes before the first real insn
- or after the last real insn of the block. So if the first insn
- has a REG_SAVE_NOTE which would otherwise be emitted before the
- insn, it is redundant with the note before the start of the
- block, and so we have to take it out. */
- if (INSN_P (head))
- {
- rtx note;
-
- for (note = REG_NOTES (head); note; note = XEXP (note, 1))
- if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
- {
- remove_note (head, note);
- note = XEXP (note, 1);
- remove_note (head, note);
- }
- }
+ remove_notes (head, tail);
- /* Remove remaining note insns from the block, save them in
- note_list. These notes are restored at the end of
- schedule_block (). */
- rm_other_notes (head, tail);
+ unlink_bb_notes (first_bb, last_bb);
target_bb = bb;
- current_sched_info->queue_must_finish_empty
- = current_nr_blocks > 1 && !flag_schedule_interblock;
+ gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
+ current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
- schedule_block (b, rgn_n_insns);
- sched_rgn_n_insns += sched_n_insns;
-
- /* Update target block boundaries. */
- if (head == BB_HEAD (BASIC_BLOCK (b)))
- BB_HEAD (BASIC_BLOCK (b)) = current_sched_info->head;
- if (tail == BB_END (BASIC_BLOCK (b)))
- BB_END (BASIC_BLOCK (b)) = current_sched_info->tail;
+ curr_bb = first_bb;
+ if (dbg_cnt (sched_block))
+ {
+ schedule_block (&curr_bb);
+ gcc_assert (EBB_FIRST_BB (bb) == first_bb);
+ sched_rgn_n_insns += sched_n_insns;
+ }
+ else
+ {
+ sched_rgn_n_insns += rgn_n_insns;
+ }
/* Clean up. */
if (current_nr_blocks > 1)
- {
- free (candidate_table);
- free (bblst_table);
- free (bitlst_table);
- }
+ free_trg_info ();
}
/* Sanity check: verify that all region insns were scheduled. */
- if (sched_rgn_n_insns != rgn_n_insns)
- abort ();
+ gcc_assert (sched_rgn_n_insns == rgn_n_insns);
- /* Restore line notes. */
- if (write_symbols != NO_DEBUG)
- {
- for (bb = 0; bb < current_nr_blocks; bb++)
- {
- rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
- restore_line_notes (head, tail);
- }
- }
+ sched_finish_ready_list ();
/* Done with this region. */
- free_pending_lists ();
+ sched_rgn_local_finish ();
- finish_deps_global ();
+ /* Free dependencies. */
+ for (bb = 0; bb < current_nr_blocks; ++bb)
+ free_block_dependencies (bb);
- free (bb_deps);
-
- if (current_nr_blocks > 1)
- {
- free (prob);
- sbitmap_vector_free (dom);
- sbitmap_vector_free (pot_split);
- sbitmap_vector_free (ancestor_edges);
- free (edge_to_bit);
- free (rgn_edges);
- }
+ gcc_assert (haifa_recovery_bb_ever_added_p
+ || deps_pools_are_empty_p ());
}
-/* Indexed by region, holds the number of death notes found in that region.
- Used for consistency checks. */
-static int *deaths_in_region;
-
/* Initialize data structures for region scheduling. */
-static void
-init_regions (void)
+void
+sched_rgn_init (bool single_blocks_p)
{
- sbitmap blocks;
- int rgn;
+ min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
+ / 100);
- nr_regions = 0;
- rgn_table = xmalloc ((n_basic_blocks) * sizeof (region));
- rgn_bb_table = xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = xmalloc ((last_basic_block) * sizeof (int));
- containing_rgn = xmalloc ((last_basic_block) * sizeof (int));
+ nr_inter = 0;
+ nr_spec = 0;
+
+ extend_regions ();
+
+ CONTAINING_RGN (ENTRY_BLOCK) = -1;
+ CONTAINING_RGN (EXIT_BLOCK) = -1;
/* Compute regions for scheduling. */
- if (reload_completed
- || n_basic_blocks == 1
- || !flag_schedule_interblock)
+ if (single_blocks_p
+ || n_basic_blocks == NUM_FIXED_BLOCKS + 1
+ || !flag_schedule_interblock
+ || is_cfg_nonregular ())
{
- find_single_block_region ();
+ find_single_block_region (sel_sched_p ());
}
else
{
- /* Verify that a 'good' control flow graph can be built. */
- if (is_cfg_nonregular ())
+ /* Compute the dominators and post dominators. */
+ if (!sel_sched_p ())
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ /* Find regions. */
+ find_rgns ();
+
+ if (sched_verbose >= 3)
+ debug_regions ();
+
+ /* For now. This will move as more and more of haifa is converted
+ to using the cfg code. */
+ if (!sel_sched_p ())
+ free_dominance_info (CDI_DOMINATORS);
+ }
+
+ gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks);
+
+ RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
+ RGN_NR_BLOCKS (nr_regions - 1));
+}
+
+/* Free data structures for region scheduling. */
+void
+sched_rgn_finish (void)
+{
+ /* Reposition the prologue and epilogue notes in case we moved the
+ prologue/epilogue insns. */
+ if (reload_completed)
+ reposition_prologue_and_epilogue_notes ();
+
+ if (sched_verbose)
+ {
+ if (reload_completed == 0
+ && flag_schedule_interblock)
{
- find_single_block_region ();
+ fprintf (sched_dump,
+ "\n;; Procedure interblock/speculative motions == %d/%d \n",
+ nr_inter, nr_spec);
}
else
- {
- struct edge_list *edge_list;
-
- /* The scheduler runs after estimate_probabilities; therefore, we
- can't blindly call back into find_basic_blocks since doing so
- could invalidate the branch probability info. We could,
- however, call cleanup_cfg. */
- edge_list = create_edge_list ();
-
- /* Compute the dominators and post dominators. */
- calculate_dominance_info (CDI_DOMINATORS);
-
- /* build_control_flow will return nonzero if it detects unreachable
- blocks or any other irregularity with the cfg which prevents
- cross block scheduling. */
- if (build_control_flow (edge_list) != 0)
- find_single_block_region ();
- else
- find_rgns (edge_list);
+ gcc_assert (nr_inter <= 0);
+ fprintf (sched_dump, "\n\n");
+ }
- if (sched_verbose >= 3)
- debug_regions ();
+ nr_regions = 0;
- /* We are done with flow's edge list. */
- free_edge_list (edge_list);
+ free (rgn_table);
+ rgn_table = NULL;
- /* For now. This will move as more and more of haifa is converted
- to using the cfg code in flow.c. */
- free_dominance_info (CDI_DOMINATORS);
- }
+ free (rgn_bb_table);
+ rgn_bb_table = NULL;
+
+ free (block_to_bb);
+ block_to_bb = NULL;
+
+ free (containing_rgn);
+ containing_rgn = NULL;
+
+ free (ebb_head);
+ ebb_head = NULL;
+}
+
+/* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
+ point to the region RGN. */
+void
+rgn_setup_region (int rgn)
+{
+ int bb;
+
+ /* Set variables for the current region. */
+ current_nr_blocks = RGN_NR_BLOCKS (rgn);
+ current_blocks = RGN_BLOCKS (rgn);
+
+ /* EBB_HEAD is a region-scope structure. But we realloc it for
+ each region to save time/memory/something else.
+ See comments in add_block1, for what reasons we allocate +1 element. */
+ ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
+ for (bb = 0; bb <= current_nr_blocks; bb++)
+ ebb_head[bb] = current_blocks + bb;
+}
+
+/* Compute instruction dependencies in region RGN. */
+void
+sched_rgn_compute_dependencies (int rgn)
+{
+ if (!RGN_DONT_CALC_DEPS (rgn))
+ {
+ int bb;
+
+ if (sel_sched_p ())
+ sched_emulate_haifa_p = 1;
+
+ init_deps_global ();
+
+ /* Initializations for region data dependence analysis. */
+ bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ init_deps (bb_deps + bb, false);
+
+ /* Initialize bitmap used in add_branch_dependences. */
+ insn_referenced = sbitmap_alloc (sched_max_luid);
+ sbitmap_zero (insn_referenced);
+
+ /* Compute backward dependencies. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_block_dependences (bb);
+
+ sbitmap_free (insn_referenced);
+ free_pending_lists ();
+ finish_deps_global ();
+ free (bb_deps);
+
+ /* We don't want to recalculate this twice. */
+ RGN_DONT_CALC_DEPS (rgn) = 1;
+
+ if (sel_sched_p ())
+ sched_emulate_haifa_p = 0;
}
+ else
+ /* (This is a recovery block. It is always a single block region.)
+ OR (We use selective scheduling.) */
+ gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
+}
+/* Init region data structures. Returns true if this region should
+ not be scheduled. */
+void
+sched_rgn_local_init (int rgn)
+{
+ int bb;
- if (CHECK_DEAD_NOTES)
+ /* Compute interblock info: probabilities, split-edges, dominators, etc. */
+ if (current_nr_blocks > 1)
{
- blocks = sbitmap_alloc (last_basic_block);
- deaths_in_region = xmalloc (sizeof (int) * nr_regions);
- /* Remove all death notes from the subroutine. */
- for (rgn = 0; rgn < nr_regions; rgn++)
- {
- int b;
+ basic_block block;
+ edge e;
+ edge_iterator ei;
- sbitmap_zero (blocks);
- for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
+ prob = XNEWVEC (int, current_nr_blocks);
- deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
+ dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
+ sbitmap_vector_zero (dom, current_nr_blocks);
+
+ /* Use ->aux to implement EDGE_TO_BIT mapping. */
+ rgn_nr_edges = 0;
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ SET_EDGE_TO_BIT (e, rgn_nr_edges++);
}
- sbitmap_free (blocks);
+
+ rgn_edges = XNEWVEC (edge, rgn_nr_edges);
+ rgn_nr_edges = 0;
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ rgn_edges[rgn_nr_edges++] = e;
+ }
+
+ /* Split edges. */
+ pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (pot_split, current_nr_blocks);
+ ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
+
+ /* Compute probabilities, dominators, split_edges. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_dom_prob_ps (bb);
+
+ /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
+ /* We don't need them anymore. But we want to avoid duplication of
+ aux fields in the newly created edges. */
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ e->aux = NULL;
+ }
}
- else
- count_or_remove_death_notes (NULL, 1);
}
-/* The one entry point in this file. DUMP_FILE is the dump file for
- this pass. */
+/* Free data computed for the finished region. */
+void
+sched_rgn_local_free (void)
+{
+ free (prob);
+ sbitmap_vector_free (dom);
+ sbitmap_vector_free (pot_split);
+ sbitmap_vector_free (ancestor_edges);
+ free (rgn_edges);
+}
+
+/* Free data computed for the finished region. */
+void
+sched_rgn_local_finish (void)
+{
+ if (current_nr_blocks > 1 && !sel_sched_p ())
+ {
+ sched_rgn_local_free ();
+ }
+}
+
+/* Setup scheduler infos. */
+void
+rgn_setup_common_sched_info (void)
+{
+ memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
+ sizeof (rgn_common_sched_info));
+
+ rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
+ rgn_common_sched_info.add_block = rgn_add_block;
+ rgn_common_sched_info.estimate_number_of_insns
+ = rgn_estimate_number_of_insns;
+ rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
+
+ common_sched_info = &rgn_common_sched_info;
+}
+
+/* Setup all *_sched_info structures (for the Haifa frontend
+ and for the dependence analysis) in the interblock scheduler. */
+void
+rgn_setup_sched_infos (void)
+{
+ if (!sel_sched_p ())
+ memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
+ sizeof (rgn_sched_deps_info));
+ else
+ memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
+ sizeof (rgn_sched_deps_info));
+
+ sched_deps_info = &rgn_sched_deps_info;
+
+ memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
+ current_sched_info = &rgn_sched_info;
+}
+/* The one entry point in this file. */
void
-schedule_insns (FILE *dump_file)
+schedule_insns (void)
{
- sbitmap large_region_blocks, blocks;
int rgn;
- int any_large_regions;
- basic_block bb;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return;
- nr_inter = 0;
- nr_spec = 0;
-
- sched_init (dump_file);
+ rgn_setup_common_sched_info ();
+ rgn_setup_sched_infos ();
- init_regions ();
+ haifa_sched_init ();
+ sched_rgn_init (reload_completed);
- current_sched_info = ®ion_sched_info;
+ bitmap_initialize (¬_in_df, 0);
+ bitmap_clear (¬_in_df);
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
- schedule_region (rgn);
-
- /* Update life analysis for the subroutine. Do single block regions
- first so that we can verify that live_at_start didn't change. Then
- do all other blocks. */
- /* ??? There is an outside possibility that update_life_info, or more
- to the point propagate_block, could get called with nonzero flags
- more than once for one basic block. This would be kinda bad if it
- were to happen, since REG_INFO would be accumulated twice for the
- block, and we'd have twice the REG_DEAD notes.
-
- I'm fairly certain that this _shouldn't_ happen, since I don't think
- that live_at_start should change at region heads. Not sure what the
- best way to test for this kind of thing... */
-
- allocate_reg_life_data ();
- compute_bb_for_insn ();
-
- any_large_regions = 0;
- large_region_blocks = sbitmap_alloc (last_basic_block);
- sbitmap_zero (large_region_blocks);
- FOR_EACH_BB (bb)
- SET_BIT (large_region_blocks, bb->index);
+ if (dbg_cnt (sched_region))
+ schedule_region (rgn);
- blocks = sbitmap_alloc (last_basic_block);
- sbitmap_zero (blocks);
+ /* Clean up. */
+ sched_rgn_finish ();
+ bitmap_clear (¬_in_df);
- /* Update life information. For regions consisting of multiple blocks
- we've possibly done interblock scheduling that affects global liveness.
- For regions consisting of single blocks we need to do only local
- liveness. */
- for (rgn = 0; rgn < nr_regions; rgn++)
- if (RGN_NR_BLOCKS (rgn) > 1)
- any_large_regions = 1;
- else
- {
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
- RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
- }
+ haifa_sched_finish ();
+}
- /* Don't update reg info after reload, since that affects
- regs_ever_live, which should not change after reload. */
- update_life_info (blocks, UPDATE_LIFE_LOCAL,
- (reload_completed ? PROP_DEATH_NOTES
- : PROP_DEATH_NOTES | PROP_REG_INFO));
- if (any_large_regions)
- {
- update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
- PROP_DEATH_NOTES | PROP_REG_INFO);
- }
+/* INSN has been added to/removed from current region. */
+static void
+rgn_add_remove_insn (rtx insn, int remove_p)
+{
+ if (!remove_p)
+ rgn_n_insns++;
+ else
+ rgn_n_insns--;
- if (CHECK_DEAD_NOTES)
+ if (INSN_BB (insn) == target_bb)
{
- /* Verify the counts of basic block notes in single the basic block
- regions. */
- for (rgn = 0; rgn < nr_regions; rgn++)
- if (RGN_NR_BLOCKS (rgn) == 1)
- {
- sbitmap_zero (blocks);
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
-
- if (deaths_in_region[rgn]
- != count_or_remove_death_notes (blocks, 0))
- abort ();
- }
- free (deaths_in_region);
+ if (!remove_p)
+ target_n_insns++;
+ else
+ target_n_insns--;
}
+}
- /* Reposition the prologue and epilogue notes in case we moved the
- prologue/epilogue insns. */
- if (reload_completed)
- reposition_prologue_and_epilogue_notes (get_insns ());
+/* Extend internal data structures. */
+void
+extend_regions (void)
+{
+ rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
+ rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
+ block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
+ containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
+}
- /* Delete redundant line notes. */
- if (write_symbols != NO_DEBUG)
- rm_redundant_line_notes ();
+void
+rgn_make_new_region_out_of_new_block (basic_block bb)
+{
+ int i;
- if (sched_verbose)
- {
- if (reload_completed == 0 && flag_schedule_interblock)
- {
- fprintf (sched_dump,
- "\n;; Procedure interblock/speculative motions == %d/%d \n",
- nr_inter, nr_spec);
- }
- else
- {
- if (nr_inter > 0)
- abort ();
- }
- fprintf (sched_dump, "\n\n");
- }
+ i = RGN_BLOCKS (nr_regions);
+ /* I - first free position in rgn_bb_table. */
- /* Clean up. */
- free (rgn_table);
- free (rgn_bb_table);
- free (block_to_bb);
- free (containing_rgn);
+ rgn_bb_table[i] = bb->index;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = 0;
- sched_finish ();
+ nr_regions++;
- if (edge_table)
- {
- free (edge_table);
- edge_table = NULL;
- }
+ RGN_BLOCKS (nr_regions) = i + 1;
+}
- if (in_edges)
+/* BB was added to ebb after AFTER. */
+static void
+rgn_add_block (basic_block bb, basic_block after)
+{
+ extend_regions ();
+ bitmap_set_bit (¬_in_df, bb->index);
+
+ if (after == 0 || after == EXIT_BLOCK_PTR)
{
- free (in_edges);
- in_edges = NULL;
+ rgn_make_new_region_out_of_new_block (bb);
+ RGN_DONT_CALC_DEPS (nr_regions - 1) = (after == EXIT_BLOCK_PTR);
}
- if (out_edges)
+ else
{
- free (out_edges);
- out_edges = NULL;
+ int i, pos;
+
+ /* We need to fix rgn_table, block_to_bb, containing_rgn
+ and ebb_head. */
+
+ BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
+
+ /* We extend ebb_head to one more position to
+ easily find the last position of the last ebb in
+ the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
+ is _always_ valid for access. */
+
+ i = BLOCK_TO_BB (after->index) + 1;
+ pos = ebb_head[i] - 1;
+ /* Now POS is the index of the last block in the region. */
+
+ /* Find index of basic block AFTER. */
+ for (; rgn_bb_table[pos] != after->index; pos--);
+
+ pos++;
+ gcc_assert (pos > ebb_head[i - 1]);
+
+ /* i - ebb right after "AFTER". */
+ /* ebb_head[i] - VALID. */
+
+ /* Source position: ebb_head[i]
+ Destination position: ebb_head[i] + 1
+ Last position:
+ RGN_BLOCKS (nr_regions) - 1
+ Number of elements to copy: (last_position) - (source_position) + 1
+ */
+
+ memmove (rgn_bb_table + pos + 1,
+ rgn_bb_table + pos,
+ ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
+ * sizeof (*rgn_bb_table));
+
+ rgn_bb_table[pos] = bb->index;
+
+ for (; i <= current_nr_blocks; i++)
+ ebb_head [i]++;
+
+ i = CONTAINING_RGN (after->index);
+ CONTAINING_RGN (bb->index) = i;
+
+ RGN_HAS_REAL_EBB (i) = 1;
+
+ for (++i; i <= nr_regions; i++)
+ RGN_BLOCKS (i)++;
}
+}
- sbitmap_free (blocks);
- sbitmap_free (large_region_blocks);
+/* Fix internal data after interblock movement of jump instruction.
+ For parameter meaning please refer to
+ sched-int.h: struct sched_info: fix_recovery_cfg. */
+static void
+rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
+{
+ int old_pos, new_pos, i;
+
+ BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
+
+ for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
+ rgn_bb_table[old_pos] != check_bb_nexti;
+ old_pos--);
+ gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
+
+ for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
+ rgn_bb_table[new_pos] != bbi;
+ new_pos--);
+ new_pos++;
+ gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
+
+ gcc_assert (new_pos < old_pos);
+
+ memmove (rgn_bb_table + new_pos + 1,
+ rgn_bb_table + new_pos,
+ (old_pos - new_pos) * sizeof (*rgn_bb_table));
+
+ rgn_bb_table[new_pos] = check_bb_nexti;
+
+ for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
+ ebb_head[i]++;
+}
+
+/* Return next block in ebb chain. For parameter meaning please refer to
+ sched-int.h: struct sched_info: advance_target_bb. */
+static basic_block
+advance_target_bb (basic_block bb, rtx insn)
+{
+ if (insn)
+ return 0;
+
+ gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
+ && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
+ return bb->next_bb;
+}
+
+#endif
+\f
+static bool
+gate_handle_sched (void)
+{
+#ifdef INSN_SCHEDULING
+ return flag_schedule_insns && dbg_cnt (sched_func);
+#else
+ return 0;
+#endif
+}
+
+/* Run instruction scheduler. */
+static unsigned int
+rest_of_handle_sched (void)
+{
+#ifdef INSN_SCHEDULING
+ if (flag_selective_scheduling
+ && ! maybe_skip_selective_scheduling ())
+ run_selective_scheduling ();
+ else
+ schedule_insns ();
+#endif
+ return 0;
}
+
+static bool
+gate_handle_sched2 (void)
+{
+#ifdef INSN_SCHEDULING
+ return optimize > 0 && flag_schedule_insns_after_reload
+ && dbg_cnt (sched2_func);
+#else
+ return 0;
+#endif
+}
+
+/* Run second scheduling pass after reload. */
+static unsigned int
+rest_of_handle_sched2 (void)
+{
+#ifdef INSN_SCHEDULING
+ if (flag_selective_scheduling2
+ && ! maybe_skip_selective_scheduling ())
+ run_selective_scheduling ();
+ else
+ {
+ /* Do control and data sched analysis again,
+ and write some more of the results to dump file. */
+ if (flag_sched2_use_superblocks)
+ schedule_ebbs ();
+ else
+ schedule_insns ();
+ }
#endif
+ return 0;
+}
+
+struct rtl_opt_pass pass_sched =
+{
+ {
+ RTL_PASS,
+ "sched1", /* name */
+ gate_handle_sched, /* gate */
+ rest_of_handle_sched, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_SCHED, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_df_finish | TODO_verify_rtl_sharing |
+ TODO_dump_func |
+ TODO_verify_flow |
+ TODO_ggc_collect /* todo_flags_finish */
+ }
+};
+
+struct rtl_opt_pass pass_sched2 =
+{
+ {
+ RTL_PASS,
+ "sched2", /* name */
+ gate_handle_sched2, /* gate */
+ rest_of_handle_sched2, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_SCHED2, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_df_finish | TODO_verify_rtl_sharing |
+ TODO_dump_func |
+ TODO_verify_flow |
+ TODO_ggc_collect /* todo_flags_finish */
+ }
+};