/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free the
-Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
/* This pass implements list scheduling within basic blocks. It is
run twice: (1) after flow analysis, but before register allocation,
\f
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
-#include "basic-block.h"
#include "regs.h"
#include "function.h"
#include "flags.h"
#include "except.h"
#include "toplev.h"
#include "recog.h"
+#include "cfglayout.h"
+#include "params.h"
#include "sched-int.h"
+#include "target.h"
+#include "timevar.h"
+#include "tree-pass.h"
+
+/* Define when we want to do count REG_DEAD notes before and after scheduling
+ for sanity checking. We can't do that when conditional execution is used,
+ as REG_DEAD exist only for unconditional deaths. */
+
+#if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
+#define CHECK_DEAD_NOTES 1
+#else
+#define CHECK_DEAD_NOTES 0
+#endif
+
#ifdef INSN_SCHEDULING
/* Some accessor macros for h_i_d members only used within this file. */
#define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
#define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
-#define MAX_RGN_BLOCKS 10
-#define MAX_RGN_INSNS 100
-
/* nr_inter/spec counts interblock/speculative motion for the function. */
static int nr_inter, nr_spec;
-/* Control flow graph edges are kept in circular lists. */
-typedef struct
-{
- int from_block;
- int to_block;
- int next_in;
- int next_out;
-}
-haifa_edge;
-static haifa_edge *edge_table;
-
-#define NEXT_IN(edge) (edge_table[edge].next_in)
-#define NEXT_OUT(edge) (edge_table[edge].next_out)
-#define FROM_BLOCK(edge) (edge_table[edge].from_block)
-#define TO_BLOCK(edge) (edge_table[edge].to_block)
-
-/* Number of edges in the control flow graph. (In fact, larger than
- that by 1, since edge 0 is unused.) */
-static int nr_edges;
-
-/* Circular list of incoming/outgoing edges of a block. */
-static int *in_edges;
-static int *out_edges;
-
-#define IN_EDGES(block) (in_edges[block])
-#define OUT_EDGES(block) (out_edges[block])
-
-static int is_cfg_nonregular PARAMS ((void));
-static int build_control_flow PARAMS ((struct edge_list *));
-static void new_edge PARAMS ((int, int));
+static int is_cfg_nonregular (void);
+static bool sched_is_disabled_for_current_region_p (void);
/* A region is the main entity for interblock scheduling: insns
are allowed to move between blocks in the same region, along
control flow graph edges, in the 'up' direction. */
typedef struct
{
- int rgn_nr_blocks; /* Number of blocks in region. */
- int rgn_blocks; /* cblocks in the region (actually index in rgn_bb_table). */
+ /* Number of extended basic blocks in region. */
+ int rgn_nr_blocks;
+ /* cblocks in the region (actually index in rgn_bb_table). */
+ int rgn_blocks;
+ /* Dependencies for this region are already computed. Basically, indicates,
+ that this is a recovery block. */
+ unsigned int dont_calc_deps : 1;
+ /* This region has at least one non-trivial ebb. */
+ unsigned int has_real_ebb : 1;
}
region;
/* Topological order of blocks in the region (if b2 is reachable from
b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
always referred to by either block or b, while its topological
- order name (in the region) is refered to by bb. */
+ order name (in the region) is referred to by bb. */
static int *block_to_bb;
/* The number of the region containing a block. */
static int *containing_rgn;
+/* The minimum probability of reaching a source block so that it will be
+ considered for speculative scheduling. */
+static int min_spec_prob;
+
#define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
#define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
+#define RGN_DONT_CALC_DEPS(rgn) (rgn_table[rgn].dont_calc_deps)
+#define RGN_HAS_REAL_EBB(rgn) (rgn_table[rgn].has_real_ebb)
#define BLOCK_TO_BB(block) (block_to_bb[block])
#define CONTAINING_RGN(block) (containing_rgn[block])
-void debug_regions PARAMS ((void));
-static void find_single_block_region PARAMS ((void));
-static void find_rgns PARAMS ((struct edge_list *, sbitmap *));
-static int too_large PARAMS ((int, int *, int *));
+void debug_regions (void);
+static void find_single_block_region (void);
+static void find_rgns (void);
+static void extend_rgns (int *, int *, sbitmap, int *);
+static bool too_large (int, int *, int *);
-extern void debug_live PARAMS ((int, int));
+extern void debug_live (int, int);
/* Blocks of the current region being scheduled. */
static int current_nr_blocks;
static int current_blocks;
-/* The mapping from bb to block. */
-#define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
-
-/* Bit vectors and bitset operations are needed for computations on
- the control flow graph. */
-
-typedef unsigned HOST_WIDE_INT *bitset;
-typedef struct
-{
- int *first_member; /* Pointer to the list start in bitlst_table. */
- int nr_members; /* The number of members of the bit list. */
-}
-bitlst;
-
-static int bitlst_table_last;
-static int bitlst_table_size;
-static int *bitlst_table;
+static int rgn_n_insns;
-static char bitset_member PARAMS ((bitset, int, int));
-static void extract_bitlst PARAMS ((bitset, int, int, bitlst *));
+/* The mapping from ebb to block. */
+/* ebb_head [i] - is index in rgn_bb_table, while
+ EBB_HEAD (i) - is basic block index.
+ BASIC_BLOCK (EBB_HEAD (i)) - head of ebb. */
+#define BB_TO_BLOCK(ebb) (rgn_bb_table[ebb_head[ebb]])
+#define EBB_FIRST_BB(ebb) BASIC_BLOCK (BB_TO_BLOCK (ebb))
+#define EBB_LAST_BB(ebb) BASIC_BLOCK (rgn_bb_table[ebb_head[ebb + 1] - 1])
/* Target info declarations.
while other blocks in the region from which insns can be moved to the
target are called "source" blocks. The candidate structure holds info
about such sources: are they valid? Speculative? Etc. */
-typedef bitlst bblst;
+typedef struct
+{
+ basic_block *first_member;
+ int nr_members;
+}
+bblst;
+
typedef struct
{
char is_valid;
Lists of split and update blocks for each candidate of the current
target are in array bblst_table. */
-static int *bblst_table, bblst_size, bblst_last;
+static basic_block *bblst_table;
+static int bblst_size, bblst_last;
#define IS_VALID(src) ( candidate_table[src].is_valid )
#define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
static int target_bb;
/* List of edges. */
-typedef bitlst edgelst;
+typedef struct
+{
+ edge *first_member;
+ int nr_members;
+}
+edgelst;
-/* Target info functions. */
-static void split_edges PARAMS ((int, int, edgelst *));
-static void compute_trg_info PARAMS ((int));
-void debug_candidate PARAMS ((int));
-void debug_candidates PARAMS ((int));
+static edge *edgelst_table;
+static int edgelst_last;
-/* Bit-set of bbs, where bit 'i' stands for bb 'i'. */
-typedef bitset bbset;
+static void extract_edgelst (sbitmap, edgelst *);
-/* Number of words of the bbset. */
-static int bbset_size;
-/* Dominators array: dom[i] contains the bbset of dominators of
+/* Target info functions. */
+static void split_edges (int, int, edgelst *);
+static void compute_trg_info (int);
+void debug_candidate (int);
+void debug_candidates (int);
+
+/* Dominators array: dom[i] contains the sbitmap of dominators of
bb i in the region. */
-static bbset *dom;
+static sbitmap *dom;
/* bb 0 is the only region entry. */
#define IS_RGN_ENTRY(bb) (!bb)
/* Is bb_src dominated by bb_trg. */
#define IS_DOMINATED(bb_src, bb_trg) \
-( bitset_member (dom[bb_src], bb_trg, bbset_size) )
+( TEST_BIT (dom[bb_src], bb_trg) )
-/* Probability: Prob[i] is a float in [0, 1] which is the probability
- of bb i relative to the region entry. */
-static float *prob;
-
-/* The probability of bb_src, relative to bb_trg. Note, that while the
- 'prob[bb]' is a float in [0, 1], this macro returns an integer
- in [0, 100]. */
-#define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
- prob[bb_trg])))
+/* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
+ the probability of bb i relative to the region entry. */
+static int *prob;
/* Bit-set of edges, where bit i stands for edge i. */
-typedef bitset edgeset;
+typedef sbitmap edgeset;
/* Number of edges in the region. */
static int rgn_nr_edges;
/* Array of size rgn_nr_edges. */
-static int *rgn_edges;
-
-/* Number of words in an edgeset. */
-static int edgeset_size;
-
-/* Number of bits in an edgeset. */
-static int edgeset_bitsize;
+static edge *rgn_edges;
/* Mapping from each edge in the graph to its number in the rgn. */
-static int *edge_to_bit;
-#define EDGE_TO_BIT(edge) (edge_to_bit[edge])
+#define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
+#define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
/* The split edges of a source bb is different for each target
bb. In order to compute this efficiently, the 'potential-split edges'
/* For every bb, a set of its ancestor edges. */
static edgeset *ancestor_edges;
-static void compute_dom_prob_ps PARAMS ((int));
+/* Array of EBBs sizes. Currently we can get a ebb only through
+ splitting of currently scheduling block, therefore, we don't need
+ ebb_head array for every region, its sufficient to hold it only
+ for current one. */
+static int *ebb_head;
+
+static void compute_dom_prob_ps (int);
-#define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
-/* Parameters affecting the decision of rank_for_schedule().
- ??? Nope. But MIN_PROBABILITY is used in copmute_trg_info. */
-#define MIN_DIFF_PRIORITY 2
-#define MIN_PROBABILITY 40
-#define MIN_PROB_DIFF 10
-
/* Speculative scheduling functions. */
-static int check_live_1 PARAMS ((int, rtx));
-static void update_live_1 PARAMS ((int, rtx));
-static int check_live PARAMS ((rtx, int));
-static void update_live PARAMS ((rtx, int));
-static void set_spec_fed PARAMS ((rtx));
-static int is_pfree PARAMS ((rtx, int, int));
-static int find_conditional_protection PARAMS ((rtx, int));
-static int is_conditionally_protected PARAMS ((rtx, int, int));
-static int may_trap_exp PARAMS ((rtx, int));
-static int haifa_classify_insn PARAMS ((rtx));
-static int is_prisky PARAMS ((rtx, int, int));
-static int is_exception_free PARAMS ((rtx, int, int));
-
-static void add_branch_dependences PARAMS ((rtx, rtx));
-static void compute_block_backward_dependences PARAMS ((int));
-void debug_dependencies PARAMS ((void));
-
-static void init_regions PARAMS ((void));
-static void schedule_region PARAMS ((int));
-static void propagate_deps PARAMS ((int, struct deps *));
-static void free_pending_lists PARAMS ((void));
+static int check_live_1 (int, rtx);
+static void update_live_1 (int, rtx);
+static int check_live (rtx, int);
+static void update_live (rtx, int);
+static void set_spec_fed (rtx);
+static int is_pfree (rtx, int, int);
+static int find_conditional_protection (rtx, int);
+static int is_conditionally_protected (rtx, int, int);
+static int is_prisky (rtx, int, int);
+static int is_exception_free (rtx, int, int);
+
+static bool sets_likely_spilled (rtx);
+static void sets_likely_spilled_1 (rtx, rtx, void *);
+static void add_branch_dependences (rtx, rtx);
+static void compute_block_backward_dependences (int);
+void debug_dependencies (void);
+
+static void init_regions (void);
+static void schedule_region (int);
+static rtx concat_INSN_LIST (rtx, rtx);
+static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
+static void propagate_deps (int, struct deps *);
+static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
/* Return 1 if control flow graph should not be constructed, 0 otherwise.
We decide not to build the control flow graph if there is possibly more
- than one entry to the function, if computed branches exist, of if we
- have nonlocal gotos. */
+ than one entry to the function, if computed branches exist, if we
+ have nonlocal gotos, or if we have an unreachable loop. */
static int
-is_cfg_nonregular ()
+is_cfg_nonregular (void)
{
- int b;
+ basic_block b;
rtx insn;
- RTX_CODE code;
/* If we have a label that could be the target of a nonlocal goto, then
the cfg is not well structured. */
if (forced_labels)
return 1;
- /* If this function has a computed jump, then we consider the cfg
- not well structured. */
- if (current_function_has_computed_jump)
- return 1;
-
/* If we have exception handlers, then we consider the cfg not well
structured. ?!? We should be able to handle this now that flow.c
computes an accurate cfg for EH. */
- if (exception_handler_labels)
+ if (current_function_has_exception_handlers ())
return 1;
/* If we have non-jumping insns which refer to labels, then we consider
the cfg not well structured. */
- /* Check for labels referred to other thn by jumps. */
- for (b = 0; b < n_basic_blocks; b++)
- for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
+ FOR_EACH_BB (b)
+ FOR_BB_INSNS (b, insn)
{
- code = GET_CODE (insn);
- if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
+ /* Check for labels referred by non-jump insns. */
+ if (NONJUMP_INSN_P (insn) || CALL_P (insn))
{
rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
-
if (note
- && ! (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
+ && ! (JUMP_P (NEXT_INSN (insn))
&& find_reg_note (NEXT_INSN (insn), REG_LABEL,
XEXP (note, 0))))
return 1;
}
-
- if (insn == BLOCK_END (b))
- break;
+ /* If this function has a computed jump, then we consider the cfg
+ not well structured. */
+ else if (JUMP_P (insn) && computed_jump_p (insn))
+ return 1;
}
- /* All the tests passed. Consider the cfg well structured. */
- return 0;
-}
-
-/* Build the control flow graph and set nr_edges.
-
- Instead of trying to build a cfg ourselves, we rely on flow to
- do it for us. Stamp out useless code (and bug) duplication.
-
- Return nonzero if an irregularity in the cfg is found which would
- prevent cross block scheduling. */
-
-static int
-build_control_flow (edge_list)
- struct edge_list *edge_list;
-{
- int i, unreachable, num_edges;
-
- /* This already accounts for entry/exit edges. */
- num_edges = NUM_EDGES (edge_list);
-
/* Unreachable loops with more than one basic block are detected
during the DFS traversal in find_rgns.
Unreachable loops with a single block are detected here. This
test is redundant with the one in find_rgns, but it's much
- cheaper to go ahead and catch the trivial case here. */
- unreachable = 0;
- for (i = 0; i < n_basic_blocks; i++)
- {
- basic_block b = BASIC_BLOCK (i);
-
- if (b->pred == NULL
- || (b->pred->src == b
- && b->pred->pred_next == NULL))
- unreachable = 1;
- }
-
- /* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
- out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
- edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
-
- nr_edges = 0;
- for (i = 0; i < num_edges; i++)
- {
- edge e = INDEX_EDGE (edge_list, i);
-
- if (e->dest != EXIT_BLOCK_PTR
- && e->src != ENTRY_BLOCK_PTR)
- new_edge (e->src->index, e->dest->index);
- }
-
- /* Increment by 1, since edge 0 is unused. */
- nr_edges++;
-
- return unreachable;
-}
-
-/* Record an edge in the control flow graph from SOURCE to TARGET.
-
- In theory, this is redundant with the s_succs computed above, but
- we have not converted all of haifa to use information from the
- integer lists. */
-
-static void
-new_edge (source, target)
- int source, target;
-{
- int e, next_edge;
- int curr_edge, fst_edge;
-
- /* Check for duplicates. */
- fst_edge = curr_edge = OUT_EDGES (source);
- while (curr_edge)
- {
- if (FROM_BLOCK (curr_edge) == source
- && TO_BLOCK (curr_edge) == target)
- {
- return;
- }
-
- curr_edge = NEXT_OUT (curr_edge);
-
- if (fst_edge == curr_edge)
- break;
- }
-
- e = ++nr_edges;
-
- FROM_BLOCK (e) = source;
- TO_BLOCK (e) = target;
-
- if (OUT_EDGES (source))
- {
- next_edge = NEXT_OUT (OUT_EDGES (source));
- NEXT_OUT (OUT_EDGES (source)) = e;
- NEXT_OUT (e) = next_edge;
- }
- else
- {
- OUT_EDGES (source) = e;
- NEXT_OUT (e) = e;
- }
-
- if (IN_EDGES (target))
+ cheaper to go ahead and catch the trivial case here. */
+ FOR_EACH_BB (b)
{
- next_edge = NEXT_IN (IN_EDGES (target));
- NEXT_IN (IN_EDGES (target)) = e;
- NEXT_IN (e) = next_edge;
- }
- else
- {
- IN_EDGES (target) = e;
- NEXT_IN (e) = e;
+ if (EDGE_COUNT (b->preds) == 0
+ || (single_pred_p (b)
+ && single_pred (b) == b))
+ return 1;
}
-}
-
-/* BITSET macros for operations on the control flow graph. */
-
-/* Compute bitwise union of two bitsets. */
-#define BITSET_UNION(set1, set2, len) \
-do { register bitset tp = set1, sp = set2; \
- register int i; \
- for (i = 0; i < len; i++) \
- *(tp++) |= *(sp++); } while (0)
-
-/* Compute bitwise intersection of two bitsets. */
-#define BITSET_INTER(set1, set2, len) \
-do { register bitset tp = set1, sp = set2; \
- register int i; \
- for (i = 0; i < len; i++) \
- *(tp++) &= *(sp++); } while (0)
-
-/* Compute bitwise difference of two bitsets. */
-#define BITSET_DIFFER(set1, set2, len) \
-do { register bitset tp = set1, sp = set2; \
- register int i; \
- for (i = 0; i < len; i++) \
- *(tp++) &= ~*(sp++); } while (0)
-
-/* Inverts every bit of bitset 'set'. */
-#define BITSET_INVERT(set, len) \
-do { register bitset tmpset = set; \
- register int i; \
- for (i = 0; i < len; i++, tmpset++) \
- *tmpset = ~*tmpset; } while (0)
-
-/* Turn on the index'th bit in bitset set. */
-#define BITSET_ADD(set, index, len) \
-{ \
- if (index >= HOST_BITS_PER_WIDE_INT * len) \
- abort (); \
- else \
- set[index/HOST_BITS_PER_WIDE_INT] |= \
- ((unsigned HOST_WIDE_INT) 1) << (index % HOST_BITS_PER_WIDE_INT); \
-}
-
-/* Turn off the index'th bit in set. */
-#define BITSET_REMOVE(set, index, len) \
-{ \
- if (index >= HOST_BITS_PER_WIDE_INT * len) \
- abort (); \
- else \
- set[index/HOST_BITS_PER_WIDE_INT] &= \
- ~(((unsigned HOST_WIDE_INT) 1) << (index % HOST_BITS_PER_WIDE_INT)); \
-}
-
-/* Check if the index'th bit in bitset set is on. */
-static char
-bitset_member (set, index, len)
- bitset set;
- int index, len;
-{
- if (index >= HOST_BITS_PER_WIDE_INT * len)
- abort ();
- return ((set[index / HOST_BITS_PER_WIDE_INT] &
- ((unsigned HOST_WIDE_INT) 1) << (index % HOST_BITS_PER_WIDE_INT))
- ? 1 : 0);
+ /* All the tests passed. Consider the cfg well structured. */
+ return 0;
}
-/* Translate a bit-set SET to a list BL of the bit-set members. */
+/* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
static void
-extract_bitlst (set, len, bitlen, bl)
- bitset set;
- int len;
- int bitlen;
- bitlst *bl;
+extract_edgelst (sbitmap set, edgelst *el)
{
- int i, j, offset;
- unsigned HOST_WIDE_INT word;
+ unsigned int i = 0;
+ sbitmap_iterator sbi;
- /* bblst table space is reused in each call to extract_bitlst. */
- bitlst_table_last = 0;
+ /* edgelst table space is reused in each call to extract_edgelst. */
+ edgelst_last = 0;
- bl->first_member = &bitlst_table[bitlst_table_last];
- bl->nr_members = 0;
+ el->first_member = &edgelst_table[edgelst_last];
+ el->nr_members = 0;
/* Iterate over each word in the bitset. */
- for (i = 0; i < len; i++)
+ EXECUTE_IF_SET_IN_SBITMAP (set, 0, i, sbi)
{
- word = set[i];
- offset = i * HOST_BITS_PER_WIDE_INT;
-
- /* Iterate over each bit in the word, but do not
- go beyond the end of the defined bits. */
- for (j = 0; offset < bitlen && word; j++)
- {
- if (word & 1)
- {
- bitlst_table[bitlst_table_last++] = offset;
- (bl->nr_members)++;
- }
- word >>= 1;
- ++offset;
- }
+ edgelst_table[edgelst_last++] = rgn_edges[i];
+ el->nr_members++;
}
-
}
/* Functions for the construction of regions. */
/* Print the regions, for debugging purposes. Callable from debugger. */
void
-debug_regions ()
+debug_regions (void)
{
int rgn, bb;
rgn_table[rgn].rgn_nr_blocks);
fprintf (sched_dump, ";;\tbb/block: ");
- for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
- {
- current_blocks = RGN_BLOCKS (rgn);
-
- if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
- abort ();
+ /* We don't have ebb_head initialized yet, so we can't use
+ BB_TO_BLOCK (). */
+ current_blocks = RGN_BLOCKS (rgn);
- fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
- }
+ for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
+ fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
fprintf (sched_dump, "\n\n");
}
scheduling. */
static void
-find_single_block_region ()
+find_single_block_region (void)
{
- int i;
+ basic_block bb;
+
+ nr_regions = 0;
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (bb)
{
- rgn_bb_table[i] = i;
- RGN_NR_BLOCKS (i) = 1;
- RGN_BLOCKS (i) = i;
- CONTAINING_RGN (i) = i;
- BLOCK_TO_BB (i) = 0;
+ rgn_bb_table[nr_regions] = bb->index;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_BLOCKS (nr_regions) = nr_regions;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = 0;
+ nr_regions++;
}
- nr_regions = n_basic_blocks;
}
/* Update number of blocks and the estimate for number of insns
- in the region. Return 1 if the region is "too large" for interblock
- scheduling (compile time considerations), otherwise return 0. */
+ in the region. Return true if the region is "too large" for interblock
+ scheduling (compile time considerations). */
-static int
-too_large (block, num_bbs, num_insns)
- int block, *num_bbs, *num_insns;
+static bool
+too_large (int block, int *num_bbs, int *num_insns)
{
(*num_bbs)++;
- (*num_insns) += (INSN_LUID (BLOCK_END (block)) -
- INSN_LUID (BLOCK_HEAD (block)));
- if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
- return 1;
- else
- return 0;
+ (*num_insns) += (INSN_LUID (BB_END (BASIC_BLOCK (block)))
+ - INSN_LUID (BB_HEAD (BASIC_BLOCK (block))));
+
+ return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
+ || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
}
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
is still an inner loop. Put in max_hdr[blk] the header of the most inner
loop containing blk. */
-#define UPDATE_LOOP_RELATIONS(blk, hdr) \
-{ \
- if (max_hdr[blk] == -1) \
- max_hdr[blk] = hdr; \
- else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
- RESET_BIT (inner, hdr); \
- else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
- { \
- RESET_BIT (inner,max_hdr[blk]); \
- max_hdr[blk] = hdr; \
- } \
+#define UPDATE_LOOP_RELATIONS(blk, hdr) \
+{ \
+ if (max_hdr[blk] == -1) \
+ max_hdr[blk] = hdr; \
+ else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
+ RESET_BIT (inner, hdr); \
+ else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
+ { \
+ RESET_BIT (inner,max_hdr[blk]); \
+ max_hdr[blk] = hdr; \
+ } \
}
/* Find regions for interblock scheduling.
of edge tables. That would simplify it somewhat. */
static void
-find_rgns (edge_list, dom)
- struct edge_list *edge_list;
- sbitmap *dom;
+find_rgns (void)
{
- int *max_hdr, *dfs_nr, *stack, *degree;
+ int *max_hdr, *dfs_nr, *degree;
char no_loops = 1;
int node, child, loop_head, i, head, tail;
- int count = 0, sp, idx = 0, current_edge = out_edges[0];
+ int count = 0, sp, idx = 0;
+ edge_iterator current_edge;
+ edge_iterator *stack;
int num_bbs, num_insns, unreachable;
int too_large_failure;
-
- /* Note if an edge has been passed. */
- sbitmap passed;
+ basic_block bb;
/* Note if a block is a natural loop header. */
sbitmap header;
- /* Note if a block is an natural inner loop header. */
+ /* Note if a block is a natural inner loop header. */
sbitmap inner;
/* Note if a block is in the block queue. */
/* Note if a block is in the block queue. */
sbitmap in_stack;
- int num_edges = NUM_EDGES (edge_list);
-
/* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
and a mapping from block to its loop header (if the block is contained
in a loop, else -1).
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
- dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
- stack = (int *) xmalloc (nr_edges * sizeof (int));
+ max_hdr = XNEWVEC (int, last_basic_block);
+ dfs_nr = XCNEWVEC (int, last_basic_block);
+ stack = XNEWVEC (edge_iterator, n_edges);
- inner = sbitmap_alloc (n_basic_blocks);
+ inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
- header = sbitmap_alloc (n_basic_blocks);
+ header = sbitmap_alloc (last_basic_block);
sbitmap_zero (header);
- passed = sbitmap_alloc (nr_edges);
- sbitmap_zero (passed);
-
- in_queue = sbitmap_alloc (n_basic_blocks);
+ in_queue = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_queue);
- in_stack = sbitmap_alloc (n_basic_blocks);
+ in_stack = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_stack);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < last_basic_block; i++)
max_hdr[i] = -1;
+ #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
+ #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
+
/* DFS traversal to find inner loops in the cfg. */
+ current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR)->succs);
sp = -1;
+
while (1)
{
- if (current_edge == 0 || TEST_BIT (passed, current_edge))
+ if (EDGE_PASSED (current_edge))
{
/* We have reached a leaf node or a node that was already
processed. Pop edges off the stack until we find
an edge that has not yet been processed. */
- while (sp >= 0
- && (current_edge == 0 || TEST_BIT (passed, current_edge)))
+ while (sp >= 0 && EDGE_PASSED (current_edge))
{
/* Pop entry off the stack. */
current_edge = stack[sp--];
- node = FROM_BLOCK (current_edge);
- child = TO_BLOCK (current_edge);
+ node = ei_edge (current_edge)->src->index;
+ gcc_assert (node != ENTRY_BLOCK);
+ child = ei_edge (current_edge)->dest->index;
+ gcc_assert (child != EXIT_BLOCK);
RESET_BIT (in_stack, child);
if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
- current_edge = NEXT_OUT (current_edge);
+ ei_next (¤t_edge);
}
/* See if have finished the DFS tree traversal. */
- if (sp < 0 && TEST_BIT (passed, current_edge))
+ if (sp < 0 && EDGE_PASSED (current_edge))
break;
/* Nope, continue the traversal with the popped node. */
}
/* Process a node. */
- node = FROM_BLOCK (current_edge);
- child = TO_BLOCK (current_edge);
+ node = ei_edge (current_edge)->src->index;
+ gcc_assert (node != ENTRY_BLOCK);
SET_BIT (in_stack, node);
dfs_nr[node] = ++count;
+ /* We don't traverse to the exit block. */
+ child = ei_edge (current_edge)->dest->index;
+ if (child == EXIT_BLOCK)
+ {
+ SET_EDGE_PASSED (current_edge);
+ ei_next (¤t_edge);
+ continue;
+ }
+
/* If the successor is in the stack, then we've found a loop.
Mark the loop, if it is not a natural loop, then it will
be rejected during the second traversal. */
no_loops = 0;
SET_BIT (header, child);
UPDATE_LOOP_RELATIONS (node, child);
- SET_BIT (passed, current_edge);
- current_edge = NEXT_OUT (current_edge);
+ SET_EDGE_PASSED (current_edge);
+ ei_next (¤t_edge);
continue;
}
{
if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
- SET_BIT (passed, current_edge);
- current_edge = NEXT_OUT (current_edge);
+ SET_EDGE_PASSED (current_edge);
+ ei_next (¤t_edge);
continue;
}
/* Push an entry on the stack and continue DFS traversal. */
stack[++sp] = current_edge;
- SET_BIT (passed, current_edge);
- current_edge = OUT_EDGES (child);
-
- /* This is temporary until haifa is converted to use rth's new
- cfg routines which have true entry/exit blocks and the
- appropriate edges from/to those blocks.
-
- Generally we update dfs_nr for a node when we process its
- out edge. However, if the node has no out edge then we will
- not set dfs_nr for that node. This can confuse the scheduler
- into thinking that we have unreachable blocks, which in turn
- disables cross block scheduling.
-
- So, if we have a node with no out edges, go ahead and mark it
- as reachable now. */
- if (current_edge == 0)
- dfs_nr[child] = ++count;
+ SET_EDGE_PASSED (current_edge);
+ current_edge = ei_start (ei_edge (current_edge)->dest->succs);
+ }
+
+ /* Reset ->aux field used by EDGE_PASSED. */
+ FOR_ALL_BB (bb)
+ {
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ e->aux = NULL;
}
+
/* Another check for unreachable blocks. The earlier test in
is_cfg_nonregular only finds unreachable blocks that do not
form a loop.
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
- for (i = 0; i < n_basic_blocks; i++)
- if (dfs_nr[i] == 0)
+ FOR_EACH_BB (bb)
+ if (dfs_nr[bb->index] == 0)
{
unreachable = 1;
break;
to hold degree counts. */
degree = dfs_nr;
- for (i = 0; i < n_basic_blocks; i++)
- degree[i] = 0;
- for (i = 0; i < num_edges; i++)
- {
- edge e = INDEX_EDGE (edge_list, i);
-
- if (e->dest != EXIT_BLOCK_PTR)
- degree[e->dest->index]++;
- }
+ FOR_EACH_BB (bb)
+ degree[bb->index] = EDGE_COUNT (bb->preds);
/* Do not perform region scheduling if there are any unreachable
blocks. */
if (!unreachable)
{
- int *queue;
+ int *queue, *degree1 = NULL;
+ /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
+ there basic blocks, which are forced to be region heads.
+ This is done to try to assemble few smaller regions
+ from a too_large region. */
+ sbitmap extended_rgn_header = NULL;
+ bool extend_regions_p;
if (no_loops)
SET_BIT (header, 0);
- /* Second travsersal:find reducible inner loops and topologically sort
+ /* Second traversal:find reducible inner loops and topologically sort
block of each region. */
- queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
+ queue = XNEWVEC (int, n_basic_blocks);
+
+ extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
+ if (extend_regions_p)
+ {
+ degree1 = xmalloc (last_basic_block * sizeof (int));
+ extended_rgn_header = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (extended_rgn_header);
+ }
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (bb)
{
- if (TEST_BIT (header, i) && TEST_BIT (inner, i))
+ if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index))
{
edge e;
- int j;
+ edge_iterator ei;
+ basic_block jbb;
/* Now check that the loop is reducible. We do this separate
from finding inner loops so that we do not find a reducible
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
- for (j = 0; j < n_basic_blocks; j++)
+ FOR_EACH_BB (jbb)
{
/* First identify blocks in the loop, except for the loop
entry block. */
- if (i == max_hdr[j] && i != j)
+ if (bb->index == max_hdr[jbb->index] && bb != jbb)
{
/* Now verify that the block is dominated by the loop
header. */
- if (!TEST_BIT (dom[j], i))
+ if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
break;
}
}
/* If we exited the loop early, then I is the header of
a non-reducible loop and we should quit processing it
now. */
- if (j != n_basic_blocks)
+ if (jbb != EXIT_BLOCK_PTR)
continue;
/* I is a header of an inner loop, or block 0 in a subroutine
with no loops at all. */
head = tail = -1;
too_large_failure = 0;
- loop_head = max_hdr[i];
-
+ loop_head = max_hdr[bb->index];
+
+ if (extend_regions_p)
+ /* We save degree in case when we meet a too_large region
+ and cancel it. We need a correct degree later when
+ calling extend_rgns. */
+ memcpy (degree1, degree, last_basic_block * sizeof (int));
+
/* Decrease degree of all I's successors for topological
ordering. */
- for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, bb->succs)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (BLOCK_END (i))
- - INSN_LUID (BLOCK_HEAD (i)));
+ num_insns = (INSN_LUID (BB_END (bb))
+ - INSN_LUID (BB_HEAD (bb)));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
Place those blocks into the queue. */
if (no_loops)
{
- for (j = 0; j < n_basic_blocks; j++)
+ FOR_EACH_BB (jbb)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
- if (BASIC_BLOCK (j)->succ
- && BASIC_BLOCK (j)->succ->dest == EXIT_BLOCK_PTR
- && BASIC_BLOCK (j)->succ->succ_next == NULL)
+ if (single_succ_p (jbb)
+ && single_succ (jbb) == EXIT_BLOCK_PTR)
{
- queue[++tail] = j;
- SET_BIT (in_queue, j);
+ queue[++tail] = jbb->index;
+ SET_BIT (in_queue, jbb->index);
- if (too_large (j, &num_bbs, &num_insns))
+ if (too_large (jbb->index, &num_bbs, &num_insns))
{
too_large_failure = 1;
break;
{
edge e;
- for (e = BASIC_BLOCK (i)->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->src == ENTRY_BLOCK_PTR)
continue;
node = e->src->index;
- if (max_hdr[node] == loop_head && node != i)
+ if (max_hdr[node] == loop_head && node != bb->index)
{
/* This is a loop latch. */
queue[++tail] = node;
d b
The algorithm in the DFS traversal may not mark B & D as part
- of the loop (ie they will not have max_hdr set to A).
+ of the loop (i.e. they will not have max_hdr set to A).
We know they can not be loop latches (else they would have
had max_hdr set since they'd have a backedge to a dominator
edge e;
child = queue[++head];
- for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->preds)
{
node = e->src->index;
tail = -1;
break;
}
- else if (!TEST_BIT (in_queue, node) && node != i)
+ else if (!TEST_BIT (in_queue, node) && node != bb->index)
{
queue[++tail] = node;
SET_BIT (in_queue, node);
if (tail >= 0 && !too_large_failure)
{
/* Place the loop header into list of region blocks. */
- degree[i] = -1;
- rgn_bb_table[idx] = i;
+ degree[bb->index] = -1;
+ rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = num_bbs;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (i) = nr_regions;
- BLOCK_TO_BB (i) = count = 0;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = count = 0;
/* Remove blocks from queue[] when their in degree
becomes zero. Repeat until no blocks are left on the
CONTAINING_RGN (child) = nr_regions;
queue[head] = queue[tail--];
- for (e = BASIC_BLOCK (child)->succ;
- e;
- e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
}
}
++nr_regions;
}
+ else if (extend_regions_p)
+ {
+ /* Restore DEGREE. */
+ int *t = degree;
+
+ degree = degree1;
+ degree1 = t;
+
+ /* And force successors of BB to be region heads.
+ This may provide several smaller regions instead
+ of one too_large region. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->dest != EXIT_BLOCK_PTR)
+ SET_BIT (extended_rgn_header, e->dest->index);
+ }
}
}
free (queue);
+
+ if (extend_regions_p)
+ {
+ free (degree1);
+
+ sbitmap_a_or_b (header, header, extended_rgn_header);
+ sbitmap_free (extended_rgn_header);
+
+ extend_rgns (degree, &idx, header, max_hdr);
+ }
}
/* Any block that did not end up in a region is placed into a region
by itself. */
- for (i = 0; i < n_basic_blocks; i++)
- if (degree[i] >= 0)
+ FOR_EACH_BB (bb)
+ if (degree[bb->index] >= 0)
{
- rgn_bb_table[idx] = i;
+ rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (i) = nr_regions++;
- BLOCK_TO_BB (i) = 0;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions++;
+ BLOCK_TO_BB (bb->index) = 0;
}
free (max_hdr);
- free (dfs_nr);
+ free (degree);
free (stack);
- free (passed);
- free (header);
- free (inner);
- free (in_queue);
- free (in_stack);
+ sbitmap_free (header);
+ sbitmap_free (inner);
+ sbitmap_free (in_queue);
+ sbitmap_free (in_stack);
+}
+
+static int gather_region_statistics (int **);
+static void print_region_statistics (int *, int, int *, int);
+
+/* Calculate the histogram that shows the number of regions having the
+ given number of basic blocks, and store it in the RSP array. Return
+ the size of this array. */
+static int
+gather_region_statistics (int **rsp)
+{
+ int i, *a = 0, a_sz = 0;
+
+ /* a[i] is the number of regions that have (i + 1) basic blocks. */
+ for (i = 0; i < nr_regions; i++)
+ {
+ int nr_blocks = RGN_NR_BLOCKS (i);
+
+ gcc_assert (nr_blocks >= 1);
+
+ if (nr_blocks > a_sz)
+ {
+ a = xrealloc (a, nr_blocks * sizeof (*a));
+ do
+ a[a_sz++] = 0;
+ while (a_sz != nr_blocks);
+ }
+
+ a[nr_blocks - 1]++;
+ }
+
+ *rsp = a;
+ return a_sz;
+}
+
+/* Print regions statistics. S1 and S2 denote the data before and after
+ calling extend_rgns, respectively. */
+static void
+print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
+{
+ int i;
+
+ /* We iterate until s2_sz because extend_rgns does not decrease
+ the maximal region size. */
+ for (i = 1; i < s2_sz; i++)
+ {
+ int n1, n2;
+
+ n2 = s2[i];
+
+ if (n2 == 0)
+ continue;
+
+ if (i >= s1_sz)
+ n1 = 0;
+ else
+ n1 = s1[i];
+
+ fprintf (sched_dump, ";; Region extension statistics: size %d: " \
+ "was %d + %d more\n", i + 1, n1, n2 - n1);
+ }
+}
+
+/* Extend regions.
+ DEGREE - Array of incoming edge count, considering only
+ the edges, that don't have their sources in formed regions yet.
+ IDXP - pointer to the next available index in rgn_bb_table.
+ HEADER - set of all region heads.
+ LOOP_HDR - mapping from block to the containing loop
+ (two blocks can reside within one region if they have
+ the same loop header). */
+static void
+extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
+{
+ int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
+ int nblocks = n_basic_blocks - NUM_FIXED_BLOCKS;
+
+ max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
+
+ max_hdr = xmalloc (last_basic_block * sizeof (*max_hdr));
+
+ order = xmalloc (last_basic_block * sizeof (*order));
+ post_order_compute (order, false);
+
+ for (i = nblocks - 1; i >= 0; i--)
+ {
+ int bbn = order[i];
+ if (degree[bbn] >= 0)
+ {
+ max_hdr[bbn] = bbn;
+ rescan = 1;
+ }
+ else
+ /* This block already was processed in find_rgns. */
+ max_hdr[bbn] = -1;
+ }
+
+ /* The idea is to topologically walk through CFG in top-down order.
+ During the traversal, if all the predecessors of a node are
+ marked to be in the same region (they all have the same max_hdr),
+ then current node is also marked to be a part of that region.
+ Otherwise the node starts its own region.
+ CFG should be traversed until no further changes are made. On each
+ iteration the set of the region heads is extended (the set of those
+ blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
+ set of all basic blocks, thus the algorithm is guaranteed to terminate. */
+
+ while (rescan && iter < max_iter)
+ {
+ rescan = 0;
+
+ for (i = nblocks - 1; i >= 0; i--)
+ {
+ edge e;
+ edge_iterator ei;
+ int bbn = order[i];
+
+ if (max_hdr[bbn] != -1 && !TEST_BIT (header, bbn))
+ {
+ int hdr = -1;
+
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->preds)
+ {
+ int predn = e->src->index;
+
+ if (predn != ENTRY_BLOCK
+ /* If pred wasn't processed in find_rgns. */
+ && max_hdr[predn] != -1
+ /* And pred and bb reside in the same loop.
+ (Or out of any loop). */
+ && loop_hdr[bbn] == loop_hdr[predn])
+ {
+ if (hdr == -1)
+ /* Then bb extends the containing region of pred. */
+ hdr = max_hdr[predn];
+ else if (hdr != max_hdr[predn])
+ /* Too bad, there are at least two predecessors
+ that reside in different regions. Thus, BB should
+ begin its own region. */
+ {
+ hdr = bbn;
+ break;
+ }
+ }
+ else
+ /* BB starts its own region. */
+ {
+ hdr = bbn;
+ break;
+ }
+ }
+
+ if (hdr == bbn)
+ {
+ /* If BB start its own region,
+ update set of headers with BB. */
+ SET_BIT (header, bbn);
+ rescan = 1;
+ }
+ else
+ gcc_assert (hdr != -1);
+
+ max_hdr[bbn] = hdr;
+ }
+ }
+
+ iter++;
+ }
+
+ /* Statistics were gathered on the SPEC2000 package of tests with
+ mainline weekly snapshot gcc-4.1-20051015 on ia64.
+
+ Statistics for SPECint:
+ 1 iteration : 1751 cases (38.7%)
+ 2 iterations: 2770 cases (61.3%)
+ Blocks wrapped in regions by find_rgns without extension: 18295 blocks
+ Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
+ (We don't count single block regions here).
+
+ Statistics for SPECfp:
+ 1 iteration : 621 cases (35.9%)
+ 2 iterations: 1110 cases (64.1%)
+ Blocks wrapped in regions by find_rgns without extension: 6476 blocks
+ Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
+ (We don't count single block regions here).
+
+ By default we do at most 2 iterations.
+ This can be overridden with max-sched-extend-regions-iters parameter:
+ 0 - disable region extension,
+ N > 0 - do at most N iterations. */
+
+ if (sched_verbose && iter != 0)
+ fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
+ rescan ? "... failed" : "");
+
+ if (!rescan && iter != 0)
+ {
+ int *s1 = NULL, s1_sz = 0;
+
+ /* Save the old statistics for later printout. */
+ if (sched_verbose >= 6)
+ s1_sz = gather_region_statistics (&s1);
+
+ /* We have succeeded. Now assemble the regions. */
+ for (i = nblocks - 1; i >= 0; i--)
+ {
+ int bbn = order[i];
+
+ if (max_hdr[bbn] == bbn)
+ /* BBN is a region head. */
+ {
+ edge e;
+ edge_iterator ei;
+ int num_bbs = 0, j, num_insns = 0, large;
+
+ large = too_large (bbn, &num_bbs, &num_insns);
+
+ degree[bbn] = -1;
+ rgn_bb_table[idx] = bbn;
+ RGN_BLOCKS (nr_regions) = idx++;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ CONTAINING_RGN (bbn) = nr_regions;
+ BLOCK_TO_BB (bbn) = 0;
+
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->succs)
+ if (e->dest != EXIT_BLOCK_PTR)
+ degree[e->dest->index]--;
+
+ if (!large)
+ /* Here we check whether the region is too_large. */
+ for (j = i - 1; j >= 0; j--)
+ {
+ int succn = order[j];
+ if (max_hdr[succn] == bbn)
+ {
+ if ((large = too_large (succn, &num_bbs, &num_insns)))
+ break;
+ }
+ }
+
+ if (large)
+ /* If the region is too_large, then wrap every block of
+ the region into single block region.
+ Here we wrap region head only. Other blocks are
+ processed in the below cycle. */
+ {
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ nr_regions++;
+ }
+
+ num_bbs = 1;
+
+ for (j = i - 1; j >= 0; j--)
+ {
+ int succn = order[j];
+
+ if (max_hdr[succn] == bbn)
+ /* This cycle iterates over all basic blocks, that
+ are supposed to be in the region with head BBN,
+ and wraps them into that region (or in single
+ block region). */
+ {
+ gcc_assert (degree[succn] == 0);
+
+ degree[succn] = -1;
+ rgn_bb_table[idx] = succn;
+ BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
+ CONTAINING_RGN (succn) = nr_regions;
+
+ if (large)
+ /* Wrap SUCCN into single block region. */
+ {
+ RGN_BLOCKS (nr_regions) = idx;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ nr_regions++;
+ }
+
+ idx++;
+
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
+ if (e->dest != EXIT_BLOCK_PTR)
+ degree[e->dest->index]--;
+ }
+ }
+
+ if (!large)
+ {
+ RGN_NR_BLOCKS (nr_regions) = num_bbs;
+ nr_regions++;
+ }
+ }
+ }
+
+ if (sched_verbose >= 6)
+ {
+ int *s2, s2_sz;
+
+ /* Get the new statistics and print the comparison with the
+ one before calling this function. */
+ s2_sz = gather_region_statistics (&s2);
+ print_region_statistics (s1, s1_sz, s2, s2_sz);
+ free (s1);
+ free (s2);
+ }
+ }
+
+ free (order);
+ free (max_hdr);
+
+ *idxp = idx;
}
/* Functions for regions scheduling information. */
Assume that these values were already computed for bb's predecessors. */
static void
-compute_dom_prob_ps (bb)
- int bb;
+compute_dom_prob_ps (int bb)
{
- int nxt_in_edge, fst_in_edge, pred;
- int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
+ edge_iterator in_ei;
+ edge in_edge;
- prob[bb] = 0.0;
+ /* We shouldn't have any real ebbs yet. */
+ gcc_assert (ebb_head [bb] == bb + current_blocks);
+
if (IS_RGN_ENTRY (bb))
{
- BITSET_ADD (dom[bb], 0, bbset_size);
- prob[bb] = 1.0;
+ SET_BIT (dom[bb], 0);
+ prob[bb] = REG_BR_PROB_BASE;
return;
}
- fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
+ prob[bb] = 0;
- /* Intialize dom[bb] to '111..1'. */
- BITSET_INVERT (dom[bb], bbset_size);
+ /* Initialize dom[bb] to '111..1'. */
+ sbitmap_ones (dom[bb]);
- do
+ FOR_EACH_EDGE (in_edge, in_ei, BASIC_BLOCK (BB_TO_BLOCK (bb))->preds)
{
- pred = FROM_BLOCK (nxt_in_edge);
- BITSET_INTER (dom[bb], dom[BLOCK_TO_BB (pred)], bbset_size);
-
- BITSET_UNION (ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)],
- edgeset_size);
-
- BITSET_ADD (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge), edgeset_size);
+ int pred_bb;
+ edge out_edge;
+ edge_iterator out_ei;
- nr_out_edges = 1;
- nr_rgn_out_edges = 0;
- fst_out_edge = OUT_EDGES (pred);
- nxt_out_edge = NEXT_OUT (fst_out_edge);
- BITSET_UNION (pot_split[bb], pot_split[BLOCK_TO_BB (pred)],
- edgeset_size);
+ if (in_edge->src == ENTRY_BLOCK_PTR)
+ continue;
- BITSET_ADD (pot_split[bb], EDGE_TO_BIT (fst_out_edge), edgeset_size);
+ pred_bb = BLOCK_TO_BB (in_edge->src->index);
+ sbitmap_a_and_b (dom[bb], dom[bb], dom[pred_bb]);
+ sbitmap_a_or_b (ancestor_edges[bb],
+ ancestor_edges[bb], ancestor_edges[pred_bb]);
- /* The successor doesn't belong in the region? */
- if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
- CONTAINING_RGN (BB_TO_BLOCK (bb)))
- ++nr_rgn_out_edges;
+ SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
- while (fst_out_edge != nxt_out_edge)
- {
- ++nr_out_edges;
- /* The successor doesn't belong in the region? */
- if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
- CONTAINING_RGN (BB_TO_BLOCK (bb)))
- ++nr_rgn_out_edges;
- BITSET_ADD (pot_split[bb], EDGE_TO_BIT (nxt_out_edge), edgeset_size);
- nxt_out_edge = NEXT_OUT (nxt_out_edge);
+ sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
- }
+ FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
+ SET_BIT (pot_split[bb], EDGE_TO_BIT (out_edge));
- /* Now nr_rgn_out_edges is the number of region-exit edges from
- pred, and nr_out_edges will be the number of pred out edges
- not leaving the region. */
- nr_out_edges -= nr_rgn_out_edges;
- if (nr_rgn_out_edges > 0)
- prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges;
- else
- prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges;
- nxt_in_edge = NEXT_IN (nxt_in_edge);
+ prob[bb] += ((prob[pred_bb] * in_edge->probability) / REG_BR_PROB_BASE);
}
- while (fst_in_edge != nxt_in_edge);
- BITSET_ADD (dom[bb], bb, bbset_size);
- BITSET_DIFFER (pot_split[bb], ancestor_edges[bb], edgeset_size);
+ SET_BIT (dom[bb], bb);
+ sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
if (sched_verbose >= 2)
fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
- (int) (100.0 * prob[bb]));
+ (100 * prob[bb]) / REG_BR_PROB_BASE);
}
/* Functions for target info. */
Note that bb_trg dominates bb_src. */
static void
-split_edges (bb_src, bb_trg, bl)
- int bb_src;
- int bb_trg;
- edgelst *bl;
-{
- int es = edgeset_size;
- edgeset src = (edgeset) xcalloc (es, sizeof (HOST_WIDE_INT));
-
- while (es--)
- src[es] = (pot_split[bb_src])[es];
- BITSET_DIFFER (src, pot_split[bb_trg], edgeset_size);
- extract_bitlst (src, edgeset_size, edgeset_bitsize, bl);
- free (src);
+split_edges (int bb_src, int bb_trg, edgelst *bl)
+{
+ sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits);
+ sbitmap_copy (src, pot_split[bb_src]);
+
+ sbitmap_difference (src, src, pot_split[bb_trg]);
+ extract_edgelst (src, bl);
+ sbitmap_free (src);
}
/* Find the valid candidate-source-blocks for the target block TRG, compute
For speculative sources, compute their update-blocks and split-blocks. */
static void
-compute_trg_info (trg)
- int trg;
+compute_trg_info (int trg)
{
- register candidate *sp;
+ candidate *sp;
edgelst el;
- int check_block, update_idx;
- int i, j, k, fst_edge, nxt_edge;
+ int i, j, k, update_idx;
+ basic_block block;
+ sbitmap visited;
+ edge_iterator ei;
+ edge e;
/* Define some of the fields for the target bb as well. */
sp = candidate_table + trg;
sp->is_valid = 1;
sp->is_speculative = 0;
- sp->src_prob = 100;
+ sp->src_prob = REG_BR_PROB_BASE;
+
+ visited = sbitmap_alloc (last_basic_block);
for (i = trg + 1; i < current_nr_blocks; i++)
{
sp->is_valid = IS_DOMINATED (i, trg);
if (sp->is_valid)
{
- sp->src_prob = GET_SRC_PROB (i, trg);
- sp->is_valid = (sp->src_prob >= MIN_PROBABILITY);
+ int tf = prob[trg], cf = prob[i];
+
+ /* In CFGs with low probability edges TF can possibly be zero. */
+ sp->src_prob = (tf ? ((cf * REG_BR_PROB_BASE) / tf) : 0);
+ sp->is_valid = (sp->src_prob >= min_spec_prob);
}
if (sp->is_valid)
if (sp->is_valid)
{
- char *update_blocks;
-
/* Compute split blocks and store them in bblst_table.
The TO block of every split edge is a split block. */
sp->split_bbs.first_member = &bblst_table[bblst_last];
sp->split_bbs.nr_members = el.nr_members;
for (j = 0; j < el.nr_members; bblst_last++, j++)
- bblst_table[bblst_last] =
- TO_BLOCK (rgn_edges[el.first_member[j]]);
+ bblst_table[bblst_last] = el.first_member[j]->dest;
sp->update_bbs.first_member = &bblst_table[bblst_last];
/* Compute update blocks and store them in bblst_table.
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (n_basic_blocks);
- memset (update_blocks, 0, n_basic_blocks);
update_idx = 0;
+ sbitmap_zero (visited);
for (j = 0; j < el.nr_members; j++)
{
- check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]);
- fst_edge = nxt_edge = OUT_EDGES (check_block);
- do
+ block = el.first_member[j]->src;
+ FOR_EACH_EDGE (e, ei, block->succs)
{
- if (! update_blocks[TO_BLOCK (nxt_edge)])
+ if (!TEST_BIT (visited, e->dest->index))
{
for (k = 0; k < el.nr_members; k++)
- if (EDGE_TO_BIT (nxt_edge) == el.first_member[k])
+ if (e == el.first_member[k])
break;
if (k >= el.nr_members)
{
- bblst_table[bblst_last++] = TO_BLOCK (nxt_edge);
- update_blocks[TO_BLOCK (nxt_edge)] = 1;
+ bblst_table[bblst_last++] = e->dest;
+ SET_BIT (visited, e->dest->index);
update_idx++;
}
}
-
- nxt_edge = NEXT_OUT (nxt_edge);
}
- while (fst_edge != nxt_edge);
}
sp->update_bbs.nr_members = update_idx;
/* Make sure we didn't overrun the end of bblst_table. */
- if (bblst_last > bblst_size)
- abort ();
+ gcc_assert (bblst_last <= bblst_size);
}
else
{
sp->src_prob = 0;
}
}
+
+ sbitmap_free (visited);
}
/* Print candidates info, for debugging purposes. Callable from debugger. */
void
-debug_candidate (i)
- int i;
+debug_candidate (int i)
{
if (!candidate_table[i].is_valid)
return;
fprintf (sched_dump, "split path: ");
for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
{
- int b = candidate_table[i].split_bbs.first_member[j];
+ int b = candidate_table[i].split_bbs.first_member[j]->index;
fprintf (sched_dump, " %d ", b);
}
fprintf (sched_dump, "update path: ");
for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
{
- int b = candidate_table[i].update_bbs.first_member[j];
+ int b = candidate_table[i].update_bbs.first_member[j]->index;
fprintf (sched_dump, " %d ", b);
}
/* Print candidates info, for debugging purposes. Callable from debugger. */
void
-debug_candidates (trg)
- int trg;
+debug_candidates (int trg)
{
int i;
debug_candidate (i);
}
-/* Functions for speculative scheduing. */
+/* Functions for speculative scheduling. */
/* Return 0 if x is a set of a register alive in the beginning of one
of the split-blocks of src, otherwise return 1. */
static int
-check_live_1 (src, x)
- int src;
- rtx x;
+check_live_1 (int src, rtx x)
{
- register int i;
- register int regno;
- register rtx reg = SET_DEST (x);
+ int i;
+ int regno;
+ rtx reg = SET_DEST (x);
if (reg == 0)
return 1;
- while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
- || GET_CODE (reg) == SIGN_EXTRACT
+ while (GET_CODE (reg) == SUBREG
+ || GET_CODE (reg) == ZERO_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
if (GET_CODE (reg) == PARALLEL)
{
- register int i;
+ int i;
for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
return 0;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return 1;
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
{
/* Check for hard registers. */
- int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ int j = hard_regno_nregs[regno][GET_MODE (reg)];
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
- int b = candidate_table[src].split_bbs.first_member[i];
-
- if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start,
- regno + j))
+ basic_block b = candidate_table[src].split_bbs.first_member[i];
+
+ /* We can have split blocks, that were recently generated.
+ such blocks are always outside current region. */
+ gcc_assert (glat_start[b->index]
+ || CONTAINING_RGN (b->index)
+ != CONTAINING_RGN (BB_TO_BLOCK (src)));
+ if (!glat_start[b->index]
+ || REGNO_REG_SET_P (glat_start[b->index],
+ regno + j))
{
return 0;
}
}
else
{
- /* Check for psuedo registers. */
+ /* Check for pseudo registers. */
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
- int b = candidate_table[src].split_bbs.first_member[i];
+ basic_block b = candidate_table[src].split_bbs.first_member[i];
- if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno))
+ gcc_assert (glat_start[b->index]
+ || CONTAINING_RGN (b->index)
+ != CONTAINING_RGN (BB_TO_BLOCK (src)));
+ if (!glat_start[b->index]
+ || REGNO_REG_SET_P (glat_start[b->index], regno))
{
return 0;
}
of every update-block of src. */
static void
-update_live_1 (src, x)
- int src;
- rtx x;
+update_live_1 (int src, rtx x)
{
- register int i;
- register int regno;
- register rtx reg = SET_DEST (x);
+ int i;
+ int regno;
+ rtx reg = SET_DEST (x);
if (reg == 0)
return;
- while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
- || GET_CODE (reg) == SIGN_EXTRACT
+ while (GET_CODE (reg) == SUBREG
+ || GET_CODE (reg) == ZERO_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
if (GET_CODE (reg) == PARALLEL)
{
- register int i;
+ int i;
for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
return;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return;
/* Global registers are always live, so the code below does not apply
{
if (regno < FIRST_PSEUDO_REGISTER)
{
- int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ int j = hard_regno_nregs[regno][GET_MODE (reg)];
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
{
- int b = candidate_table[src].update_bbs.first_member[i];
+ basic_block b = candidate_table[src].update_bbs.first_member[i];
- SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start,
- regno + j);
+ SET_REGNO_REG_SET (glat_start[b->index], regno + j);
}
}
}
{
for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
{
- int b = candidate_table[src].update_bbs.first_member[i];
+ basic_block b = candidate_table[src].update_bbs.first_member[i];
- SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno);
+ SET_REGNO_REG_SET (glat_start[b->index], regno);
}
}
}
ready-list or before the scheduling. */
static int
-check_live (insn, src)
- rtx insn;
- int src;
+check_live (rtx insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
block src to trg. */
static void
-update_live (insn, src)
- rtx insn;
- int src;
+update_live (rtx insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
}
}
-/* Exception Free Loads:
-
- We define five classes of speculative loads: IFREE, IRISKY,
- PFREE, PRISKY, and MFREE.
-
- IFREE loads are loads that are proved to be exception-free, just
- by examining the load insn. Examples for such loads are loads
- from TOC and loads of global data.
-
- IRISKY loads are loads that are proved to be exception-risky,
- just by examining the load insn. Examples for such loads are
- volatile loads and loads from shared memory.
-
- PFREE loads are loads for which we can prove, by examining other
- insns, that they are exception-free. Currently, this class consists
- of loads for which we are able to find a "similar load", either in
- the target block, or, if only one split-block exists, in that split
- block. Load2 is similar to load1 if both have same single base
- register. We identify only part of the similar loads, by finding
- an insn upon which both load1 and load2 have a DEF-USE dependence.
-
- PRISKY loads are loads for which we can prove, by examining other
- insns, that they are exception-risky. Currently we have two proofs for
- such loads. The first proof detects loads that are probably guarded by a
- test on the memory address. This proof is based on the
- backward and forward data dependence information for the region.
- Let load-insn be the examined load.
- Load-insn is PRISKY iff ALL the following hold:
-
- - insn1 is not in the same block as load-insn
- - there is a DEF-USE dependence chain (insn1, ..., load-insn)
- - test-insn is either a compare or a branch, not in the same block
- as load-insn
- - load-insn is reachable from test-insn
- - there is a DEF-USE dependence chain (insn1, ..., test-insn)
-
- This proof might fail when the compare and the load are fed
- by an insn not in the region. To solve this, we will add to this
- group all loads that have no input DEF-USE dependence.
-
- The second proof detects loads that are directly or indirectly
- fed by a speculative load. This proof is affected by the
- scheduling process. We will use the flag fed_by_spec_load.
- Initially, all insns have this flag reset. After a speculative
- motion of an insn, if insn is either a load, or marked as
- fed_by_spec_load, we will also mark as fed_by_spec_load every
- insn1 for which a DEF-USE dependence (insn, insn1) exists. A
- load which is fed_by_spec_load is also PRISKY.
-
- MFREE (maybe-free) loads are all the remaining loads. They may be
- exception-free, but we cannot prove it.
-
- Now, all loads in IFREE and PFREE classes are considered
- exception-free, while all loads in IRISKY and PRISKY classes are
- considered exception-risky. As for loads in the MFREE class,
- these are considered either exception-free or exception-risky,
- depending on whether we are pessimistic or optimistic. We have
- to take the pessimistic approach to assure the safety of
- speculative scheduling, but we can take the optimistic approach
- by invoking the -fsched_spec_load_dangerous option. */
-
-enum INSN_TRAP_CLASS
-{
- TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
- PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
-};
-
-#define WORST_CLASS(class1, class2) \
-((class1 > class2) ? class1 : class2)
-
-/* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
+/* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
#define IS_REACHABLE(bb_from, bb_to) \
-(bb_from == bb_to \
+ (bb_from == bb_to \
|| IS_RGN_ENTRY (bb_from) \
- || (bitset_member (ancestor_edges[bb_to], \
- EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))), \
- edgeset_size)))
-
-/* Non-zero iff the address is comprised from at most 1 register. */
-#define CONST_BASED_ADDRESS_P(x) \
- (GET_CODE (x) == REG \
- || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
- || (GET_CODE (x) == LO_SUM)) \
- && (GET_CODE (XEXP (x, 0)) == CONST_INT \
- || GET_CODE (XEXP (x, 1)) == CONST_INT)))
+ || (TEST_BIT (ancestor_edges[bb_to], \
+ EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK (BB_TO_BLOCK (bb_from)))))))
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
static void
-set_spec_fed (load_insn)
- rtx load_insn;
+set_spec_fed (rtx load_insn)
{
rtx link;
branch depending on insn, that guards the speculative load. */
static int
-find_conditional_protection (insn, load_insn_bb)
- rtx insn;
- int load_insn_bb;
+find_conditional_protection (rtx insn, int load_insn_bb)
{
rtx link;
&& IS_REACHABLE (INSN_BB (next), load_insn_bb)
&& load_insn_bb != INSN_BB (next)
&& GET_MODE (link) == VOIDmode
- && (GET_CODE (next) == JUMP_INSN
+ && (JUMP_P (next)
|| find_conditional_protection (next, load_insn_bb)))
return 1;
}
Locate the branch by following INSN_DEPEND from insn1. */
static int
-is_conditionally_protected (load_insn, bb_src, bb_trg)
- rtx load_insn;
- int bb_src, bb_trg;
+is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
{
rtx link;
/* Must be a DEF-USE dependence upon non-branch. */
if (GET_MODE (link) != VOIDmode
- || GET_CODE (insn1) == JUMP_INSN)
+ || JUMP_P (insn1))
continue;
/* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
load2 anyhow. */
static int
-is_pfree (load_insn, bb_src, bb_trg)
- rtx load_insn;
- int bb_src, bb_trg;
+is_pfree (rtx load_insn, int bb_src, int bb_trg)
{
rtx back_link;
- register candidate *candp = candidate_table + bb_src;
+ candidate *candp = candidate_table + bb_src;
if (candp->split_bbs.nr_members != 1)
/* Must have exactly one escape block. */
/* insn2 is the similar load, in the target block. */
return 1;
- if (*(candp->split_bbs.first_member) == BLOCK_NUM (insn2))
+ if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
/* insn2 is a similar load, in a split-block. */
return 1;
}
return 0;
} /* is_pfree */
-/* Returns a class that insn with GET_DEST(insn)=x may belong to,
- as found by analyzing insn's expression. */
-
-static int
-may_trap_exp (x, is_store)
- rtx x;
- int is_store;
-{
- enum rtx_code code;
-
- if (x == 0)
- return TRAP_FREE;
- code = GET_CODE (x);
- if (is_store)
- {
- if (code == MEM)
- return TRAP_RISKY;
- else
- return TRAP_FREE;
- }
- if (code == MEM)
- {
- /* The insn uses memory: a volatile load. */
- if (MEM_VOLATILE_P (x))
- return IRISKY;
- /* An exception-free load. */
- if (!may_trap_p (x))
- return IFREE;
- /* A load with 1 base register, to be further checked. */
- if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
- return PFREE_CANDIDATE;
- /* No info on the load, to be further checked. */
- return PRISKY_CANDIDATE;
- }
- else
- {
- const char *fmt;
- int i, insn_class = TRAP_FREE;
-
- /* Neither store nor load, check if it may cause a trap. */
- if (may_trap_p (x))
- return TRAP_RISKY;
- /* Recursive step: walk the insn... */
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- {
- int tmp_class = may_trap_exp (XEXP (x, i), is_store);
- insn_class = WORST_CLASS (insn_class, tmp_class);
- }
- else if (fmt[i] == 'E')
- {
- int j;
- for (j = 0; j < XVECLEN (x, i); j++)
- {
- int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
- insn_class = WORST_CLASS (insn_class, tmp_class);
- if (insn_class == TRAP_RISKY || insn_class == IRISKY)
- break;
- }
- }
- if (insn_class == TRAP_RISKY || insn_class == IRISKY)
- break;
- }
- return insn_class;
- }
-}
-
-/* Classifies insn for the purpose of verifying that it can be
- moved speculatively, by examining it's patterns, returning:
- TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
- TRAP_FREE: non-load insn.
- IFREE: load from a globaly safe location.
- IRISKY: volatile load.
- PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
- being either PFREE or PRISKY. */
-
-static int
-haifa_classify_insn (insn)
- rtx insn;
-{
- rtx pat = PATTERN (insn);
- int tmp_class = TRAP_FREE;
- int insn_class = TRAP_FREE;
- enum rtx_code code;
-
- if (GET_CODE (pat) == PARALLEL)
- {
- int i, len = XVECLEN (pat, 0);
-
- for (i = len - 1; i >= 0; i--)
- {
- code = GET_CODE (XVECEXP (pat, 0, i));
- switch (code)
- {
- case CLOBBER:
- /* Test if it is a 'store'. */
- tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
- break;
- case SET:
- /* Test if it is a store. */
- tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
- if (tmp_class == TRAP_RISKY)
- break;
- /* Test if it is a load. */
- tmp_class
- = WORST_CLASS (tmp_class,
- may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
- 0));
- break;
- case COND_EXEC:
- case TRAP_IF:
- tmp_class = TRAP_RISKY;
- break;
- default:
- ;
- }
- insn_class = WORST_CLASS (insn_class, tmp_class);
- if (insn_class == TRAP_RISKY || insn_class == IRISKY)
- break;
- }
- }
- else
- {
- code = GET_CODE (pat);
- switch (code)
- {
- case CLOBBER:
- /* Test if it is a 'store'. */
- tmp_class = may_trap_exp (XEXP (pat, 0), 1);
- break;
- case SET:
- /* Test if it is a store. */
- tmp_class = may_trap_exp (SET_DEST (pat), 1);
- if (tmp_class == TRAP_RISKY)
- break;
- /* Test if it is a load. */
- tmp_class =
- WORST_CLASS (tmp_class,
- may_trap_exp (SET_SRC (pat), 0));
- break;
- case COND_EXEC:
- case TRAP_IF:
- tmp_class = TRAP_RISKY;
- break;
- default:;
- }
- insn_class = tmp_class;
- }
-
- return insn_class;
-}
-
/* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
a load moved speculatively, or if load_insn is protected by
a compare on load_insn's address). */
static int
-is_prisky (load_insn, bb_src, bb_trg)
- rtx load_insn;
- int bb_src, bb_trg;
+is_prisky (rtx load_insn, int bb_src, int bb_trg)
{
if (FED_BY_SPEC_LOAD (load_insn))
return 1;
and 0 otherwise. */
static int
-is_exception_free (insn, bb_src, bb_trg)
- rtx insn;
- int bb_src, bb_trg;
+is_exception_free (rtx insn, int bb_src, int bb_trg)
{
int insn_class = haifa_classify_insn (insn);
static int target_n_insns;
/* The number of insns from the entire region scheduled so far. */
static int sched_n_insns;
-/* Nonzero if the last scheduled insn was a jump. */
-static int last_was_jump;
/* Implementations of the sched_info functions for region scheduling. */
-static void init_ready_list PARAMS ((struct ready_list *));
-static int can_schedule_ready_p PARAMS ((rtx));
-static int new_ready PARAMS ((rtx));
-static int schedule_more_p PARAMS ((void));
-static const char *rgn_print_insn PARAMS ((rtx, int));
-static int rgn_rank PARAMS ((rtx, rtx));
-static int contributes_to_priority PARAMS ((rtx, rtx));
-static void compute_jump_reg_dependencies PARAMS ((rtx, regset));
+static void init_ready_list (void);
+static int can_schedule_ready_p (rtx);
+static void begin_schedule_ready (rtx, rtx);
+static ds_t new_ready (rtx, ds_t);
+static int schedule_more_p (void);
+static const char *rgn_print_insn (rtx, int);
+static int rgn_rank (rtx, rtx);
+static int contributes_to_priority (rtx, rtx);
+static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
+
+/* Functions for speculative scheduling. */
+static void add_remove_insn (rtx, int);
+static void extend_regions (void);
+static void add_block1 (basic_block, basic_block);
+static void fix_recovery_cfg (int, int, int);
+static basic_block advance_target_bb (basic_block, rtx);
+static void check_dead_notes1 (int, sbitmap);
+#ifdef ENABLE_CHECKING
+static int region_head_or_leaf_p (basic_block, int);
+#endif
/* Return nonzero if there are more insns that should be scheduled. */
static int
-schedule_more_p ()
+schedule_more_p (void)
{
- return ! last_was_jump && sched_target_n_insns < target_n_insns;
+ return sched_target_n_insns < target_n_insns;
}
/* Add all insns that are initially ready to the ready list READY. Called
once before scheduling a set of insns. */
static void
-init_ready_list (ready)
- struct ready_list *ready;
+init_ready_list (void)
{
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
target_n_insns = 0;
sched_target_n_insns = 0;
sched_n_insns = 0;
- last_was_jump = 0;
/* Print debugging information. */
if (sched_verbose >= 5)
/* Prepare current target block info. */
if (current_nr_blocks > 1)
{
- candidate_table = (candidate *) xmalloc (current_nr_blocks
- * sizeof (candidate));
+ candidate_table = XNEWVEC (candidate, current_nr_blocks);
bblst_last = 0;
/* bblst_table holds split blocks and update blocks for each block after
the TO blocks of region edges, so there can be at most rgn_nr_edges
of them. */
bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
- bblst_table = (int *) xmalloc (bblst_size * sizeof (int));
+ bblst_table = XNEWVEC (basic_block, bblst_size);
- bitlst_table_last = 0;
- bitlst_table_size = rgn_nr_edges;
- bitlst_table = (int *) xmalloc (rgn_nr_edges * sizeof (int));
+ edgelst_last = 0;
+ edgelst_table = XNEWVEC (edge, rgn_nr_edges);
compute_trg_info (target_bb);
}
/* Initialize ready list with all 'ready' insns in target block.
Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
- {
- rtx next;
+ {
+ try_ready (insn);
+ target_n_insns++;
- if (! INSN_P (insn))
- continue;
- next = NEXT_INSN (insn);
-
- if (INSN_DEP_COUNT (insn) == 0
- && (SCHED_GROUP_P (next) == 0 || ! INSN_P (next)))
- ready_add (ready, insn);
- if (!(SCHED_GROUP_P (insn)))
- target_n_insns++;
+ gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
}
/* Add to ready list all 'ready' insns in valid source blocks.
rtx src_next_tail;
rtx tail, head;
- get_block_head_tail (BB_TO_BLOCK (bb_src), &head, &tail);
+ get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
+ &head, &tail);
src_next_tail = NEXT_INSN (tail);
src_head = head;
for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
- {
- if (! INSN_P (insn))
- continue;
-
- if (!CANT_MOVE (insn)
- && (!IS_SPECULATIVE_INSN (insn)
- || (insn_issue_delay (insn) <= 3
- && check_live (insn, bb_src)
- && is_exception_free (insn, bb_src, target_bb))))
- {
- rtx next;
-
- /* Note that we havn't squirrled away the notes for
- blocks other than the current. So if this is a
- speculative insn, NEXT might otherwise be a note. */
- next = next_nonnote_insn (insn);
- if (INSN_DEP_COUNT (insn) == 0
- && (! next
- || SCHED_GROUP_P (next) == 0
- || ! INSN_P (next)))
- ready_add (ready, insn);
- }
- }
+ if (INSN_P (insn))
+ try_ready (insn);
}
}
insn can be scheduled, nonzero if we should silently discard it. */
static int
-can_schedule_ready_p (insn)
- rtx insn;
+can_schedule_ready_p (rtx insn)
{
- if (GET_CODE (insn) == JUMP_INSN)
- last_was_jump = 1;
+ /* An interblock motion? */
+ if (INSN_BB (insn) != target_bb
+ && IS_SPECULATIVE_INSN (insn)
+ && !check_live (insn, INSN_BB (insn)))
+ return 0;
+ else
+ return 1;
+}
+/* Updates counter and other information. Split from can_schedule_ready_p ()
+ because when we schedule insn speculatively then insn passed to
+ can_schedule_ready_p () differs from the one passed to
+ begin_schedule_ready (). */
+static void
+begin_schedule_ready (rtx insn, rtx last ATTRIBUTE_UNUSED)
+{
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
{
- rtx temp;
- basic_block b1;
-
if (IS_SPECULATIVE_INSN (insn))
{
- if (!check_live (insn, INSN_BB (insn)))
- return 0;
+ gcc_assert (check_live (insn, INSN_BB (insn)));
+
update_live (insn, INSN_BB (insn));
/* For speculative load, mark insns fed by it. */
nr_spec++;
}
nr_inter++;
-
- /* Find the beginning of the scheduling group. */
- /* ??? Ought to update basic block here, but later bits of
- schedule_block assumes the original insn block is
- still intact. */
-
- temp = insn;
- while (SCHED_GROUP_P (temp))
- temp = PREV_INSN (temp);
-
- /* Update source block boundaries. */
- b1 = BLOCK_FOR_INSN (temp);
- if (temp == b1->head && insn == b1->end)
- {
- /* We moved all the insns in the basic block.
- Emit a note after the last insn and update the
- begin/end boundaries to point to the note. */
- rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
- b1->head = note;
- b1->end = note;
- }
- else if (insn == b1->end)
- {
- /* We took insns from the end of the basic block,
- so update the end of block boundary so that it
- points to the first insn we did not move. */
- b1->end = PREV_INSN (temp);
- }
- else if (temp == b1->head)
- {
- /* We took insns from the start of the basic block,
- so update the start of block boundary so that
- it points to the first insn we did not move. */
- b1->head = NEXT_INSN (insn);
- }
}
else
{
sched_target_n_insns++;
}
sched_n_insns++;
-
- return 1;
}
-/* Called after INSN has all its dependencies resolved. Return nonzero
- if it should be moved to the ready list or the queue, or zero if we
- should silently discard it. */
-static int
-new_ready (next)
- rtx next;
+/* Called after INSN has all its hard dependencies resolved and the speculation
+ of type TS is enough to overcome them all.
+ Return nonzero if it should be moved to the ready list or the queue, or zero
+ if we should silently discard it. */
+static ds_t
+new_ready (rtx next, ds_t ts)
{
- /* For speculative insns, before inserting to ready/queue,
- check live, exception-free, and issue-delay. */
- if (INSN_BB (next) != target_bb
- && (!IS_VALID (INSN_BB (next))
+ if (INSN_BB (next) != target_bb)
+ {
+ int not_ex_free = 0;
+
+ /* For speculative insns, before inserting to ready/queue,
+ check live, exception-free, and issue-delay. */
+ if (!IS_VALID (INSN_BB (next))
|| CANT_MOVE (next)
|| (IS_SPECULATIVE_INSN (next)
- && (insn_issue_delay (next) > 3
+ && ((recog_memoized (next) >= 0
+ && min_insn_conflict_delay (curr_state, next, next)
+ > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
+ || RECOVERY_BLOCK (next)
|| !check_live (next, INSN_BB (next))
- || !is_exception_free (next, INSN_BB (next), target_bb)))))
- return 0;
- return 1;
+ || (not_ex_free = !is_exception_free (next, INSN_BB (next),
+ target_bb)))))
+ {
+ if (not_ex_free
+ /* We are here because is_exception_free () == false.
+ But we possibly can handle that with control speculation. */
+ && current_sched_info->flags & DO_SPECULATION)
+ /* Here we got new control-speculative instruction. */
+ ts = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
+ else
+ ts = (ts & ~SPECULATIVE) | HARD_DEP;
+ }
+ }
+
+ return ts;
}
/* Return a string that contains the insn uid and optionally anything else
to be formatted so that multiple output lines will line up nicely. */
static const char *
-rgn_print_insn (insn, aligned)
- rtx insn;
- int aligned;
+rgn_print_insn (rtx insn, int aligned)
{
static char tmp[80];
is to be preferred. Zero if they are equally good. */
static int
-rgn_rank (insn1, insn2)
- rtx insn1, insn2;
+rgn_rank (rtx insn1, rtx insn2)
{
/* Some comparison make sense in interblock scheduling only. */
if (INSN_BB (insn1) != INSN_BB (insn2))
calculations. */
static int
-contributes_to_priority (next, insn)
- rtx next, insn;
+contributes_to_priority (rtx next, rtx insn)
{
- return BLOCK_NUM (next) == BLOCK_NUM (insn);
+ /* NEXT and INSN reside in one ebb. */
+ return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
}
-/* INSN is a JUMP_INSN. Store the set of registers that must be considered
- to be set by this jump in SET. */
+/* INSN is a JUMP_INSN, COND_SET is the set of registers that are
+ conditionally set before INSN. Store the set of registers that
+ must be considered as used by this jump in USED and that of
+ registers that must be considered as set in SET. */
static void
-compute_jump_reg_dependencies (insn, set)
- rtx insn ATTRIBUTE_UNUSED;
- regset set ATTRIBUTE_UNUSED;
+compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
+ regset cond_exec ATTRIBUTE_UNUSED,
+ regset used ATTRIBUTE_UNUSED,
+ regset set ATTRIBUTE_UNUSED)
{
/* Nothing to do here, since we postprocess jumps in
add_branch_dependences. */
NULL, NULL,
NULL, NULL,
- 0, 0
+ 0, 0, 0,
+
+ add_remove_insn,
+ begin_schedule_ready,
+ add_block1,
+ advance_target_bb,
+ fix_recovery_cfg,
+#ifdef ENABLE_CHECKING
+ region_head_or_leaf_p,
+#endif
+ SCHED_RGN | USE_GLAT
+#ifdef ENABLE_CHECKING
+ | DETACH_LIFE_INFO
+#endif
};
+/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
+
+static bool
+sets_likely_spilled (rtx pat)
+{
+ bool ret = false;
+ note_stores (pat, sets_likely_spilled_1, &ret);
+ return ret;
+}
+
+static void
+sets_likely_spilled_1 (rtx x, rtx pat, void *data)
+{
+ bool *ret = (bool *) data;
+
+ if (GET_CODE (pat) == SET
+ && REG_P (x)
+ && REGNO (x) < FIRST_PSEUDO_REGISTER
+ && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (x))))
+ *ret = true;
+}
+
/* Add dependences so that branches are scheduled to run last in their
block. */
static void
-add_branch_dependences (head, tail)
- rtx head, tail;
+add_branch_dependences (rtx head, rtx tail)
{
rtx insn, last;
- /* For all branches, calls, uses, clobbers, and cc0 setters, force them
- to remain in order at the end of the block by adding dependencies and
- giving the last a high priority. There may be notes present, and
- prev_head may also be a note.
+ /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
+ that can throw exceptions, force them to remain in order at the end of
+ the block by adding dependencies and giving the last a high priority.
+ There may be notes present, and prev_head may also be a note.
Branches must obviously remain at the end. Calls should remain at the
end since moving them results in worse register allocation. Uses remain
- at the end to ensure proper register allocation. cc0 setters remaim
- at the end because they can't be moved away from their cc0 user. */
+ at the end to ensure proper register allocation.
+
+ cc0 setters remain at the end because they can't be moved away from
+ their cc0 user.
+
+ COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
+
+ Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
+ are not moved before reload because we can wind up with register
+ allocation failures. */
+
insn = tail;
last = 0;
- while (GET_CODE (insn) == CALL_INSN
- || GET_CODE (insn) == JUMP_INSN
- || (GET_CODE (insn) == INSN
+ while (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
+ || can_throw_internal (insn)
#ifdef HAVE_cc0
|| sets_cc0_p (PATTERN (insn))
#endif
- ))
- || GET_CODE (insn) == NOTE)
+ || (!reload_completed
+ && sets_likely_spilled (PATTERN (insn)))))
+ || NOTE_P (insn))
{
- if (GET_CODE (insn) != NOTE)
+ if (!NOTE_P (insn))
{
- if (last != 0
- && !find_insn_list (insn, LOG_LINKS (last)))
+ if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
{
- add_dependence (last, insn, REG_DEP_ANTI);
+ if (! sched_insns_conditions_mutex_p (last, insn))
+ add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn)++;
}
CANT_MOVE (insn) = 1;
last = insn;
- /* Skip over insns that are part of a group.
- Make each insn explicitly depend on the previous insn.
- This ensures that only the group header will ever enter
- the ready queue (and, when scheduled, will automatically
- schedule the SCHED_GROUP_P block). */
- while (SCHED_GROUP_P (insn))
- {
- rtx temp = prev_nonnote_insn (insn);
- add_dependence (insn, temp, REG_DEP_ANTI);
- insn = temp;
- }
}
/* Don't overrun the bounds of the basic block. */
if (INSN_REF_COUNT (insn) != 0)
continue;
- add_dependence (last, insn, REG_DEP_ANTI);
+ if (! sched_insns_conditions_mutex_p (last, insn))
+ add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn) = 1;
-
- /* Skip over insns that are part of a group. */
- while (SCHED_GROUP_P (insn))
- insn = prev_nonnote_insn (insn);
}
+
+#ifdef HAVE_conditional_execution
+ /* Finally, if the block ends in a jump, and we are doing intra-block
+ scheduling, make sure that the branch depends on any COND_EXEC insns
+ inside the block to avoid moving the COND_EXECs past the branch insn.
+
+ We only have to do this after reload, because (1) before reload there
+ are no COND_EXEC insns, and (2) the region scheduler is an intra-block
+ scheduler after reload.
+
+ FIXME: We could in some cases move COND_EXEC insns past the branch if
+ this scheduler would be a little smarter. Consider this code:
+
+ T = [addr]
+ C ? addr += 4
+ !C ? X += 12
+ C ? T += 1
+ C ? jump foo
+
+ On a target with a one cycle stall on a memory access the optimal
+ sequence would be:
+
+ T = [addr]
+ C ? addr += 4
+ C ? T += 1
+ C ? jump foo
+ !C ? X += 12
+
+ We don't want to put the 'X += 12' before the branch because it just
+ wastes a cycle of execution time when the branch is taken.
+
+ Note that in the example "!C" will always be true. That is another
+ possible improvement for handling COND_EXECs in this scheduler: it
+ could remove always-true predicates. */
+
+ if (!reload_completed || ! JUMP_P (tail))
+ return;
+
+ insn = tail;
+ while (insn != head)
+ {
+ insn = PREV_INSN (insn);
+
+ /* Note that we want to add this dependency even when
+ sched_insns_conditions_mutex_p returns true. The whole point
+ is that we _want_ this dependency, even if these insns really
+ are independent. */
+ if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
+ add_dependence (tail, insn, REG_DEP_ANTI);
+ }
+#endif
}
/* Data structures for the computation of data dependences in a regions. We
static struct deps *bb_deps;
+/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
+
+static rtx
+concat_INSN_LIST (rtx copy, rtx old)
+{
+ rtx new = old;
+ for (; copy ; copy = XEXP (copy, 1))
+ new = alloc_INSN_LIST (XEXP (copy, 0), new);
+ return new;
+}
+
+static void
+concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
+ rtx *old_mems_p)
+{
+ rtx new_insns = *old_insns_p;
+ rtx new_mems = *old_mems_p;
+
+ while (copy_insns)
+ {
+ new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
+ new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
+ copy_insns = XEXP (copy_insns, 1);
+ copy_mems = XEXP (copy_mems, 1);
+ }
+
+ *old_insns_p = new_insns;
+ *old_mems_p = new_mems;
+}
+
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (bb, tmp_deps)
- int bb;
- struct deps *tmp_deps;
+propagate_deps (int bb, struct deps *pred_deps)
{
- int b = BB_TO_BLOCK (bb);
- int e, first_edge;
- int reg;
- rtx link_insn, link_mem;
- rtx u;
-
- /* These lists should point to the right place, for correct
- freeing later. */
- bb_deps[bb].pending_read_insns = tmp_deps->pending_read_insns;
- bb_deps[bb].pending_read_mems = tmp_deps->pending_read_mems;
- bb_deps[bb].pending_write_insns = tmp_deps->pending_write_insns;
- bb_deps[bb].pending_write_mems = tmp_deps->pending_write_mems;
+ basic_block block = BASIC_BLOCK (BB_TO_BLOCK (bb));
+ edge_iterator ei;
+ edge e;
/* bb's structures are inherited by its successors. */
- first_edge = e = OUT_EDGES (b);
- if (e <= 0)
- return;
-
- do
+ FOR_EACH_EDGE (e, ei, block->succs)
{
- rtx x;
- int b_succ = TO_BLOCK (e);
- int bb_succ = BLOCK_TO_BB (b_succ);
- struct deps *succ_deps = bb_deps + bb_succ;
+ struct deps *succ_deps;
+ unsigned reg;
+ reg_set_iterator rsi;
/* Only bbs "below" bb, in the same region, are interesting. */
- if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
- || bb_succ <= bb)
- {
- e = NEXT_OUT (e);
- continue;
- }
+ if (e->dest == EXIT_BLOCK_PTR
+ || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
+ || BLOCK_TO_BB (e->dest->index) <= bb)
+ continue;
- /* The reg_last lists are inherited by bb_succ. */
- EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg,
- {
- struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg];
- struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg];
-
- for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses))
- succ_deps_reg->uses
- = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses);
-
- for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets))
- succ_deps_reg->sets
- = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets);
-
- for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers))
- succ_deps_reg->clobbers
- = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers);
- });
- IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use);
-
- /* Mem read/write lists are inherited by bb_succ. */
- link_insn = tmp_deps->pending_read_insns;
- link_mem = tmp_deps->pending_read_mems;
- while (link_insn)
- {
- if (!(find_insn_mem_list (XEXP (link_insn, 0),
- XEXP (link_mem, 0),
- succ_deps->pending_read_insns,
- succ_deps->pending_read_mems)))
- add_insn_mem_dependence (succ_deps, &succ_deps->pending_read_insns,
- &succ_deps->pending_read_mems,
- XEXP (link_insn, 0), XEXP (link_mem, 0));
- link_insn = XEXP (link_insn, 1);
- link_mem = XEXP (link_mem, 1);
- }
+ succ_deps = bb_deps + BLOCK_TO_BB (e->dest->index);
- link_insn = tmp_deps->pending_write_insns;
- link_mem = tmp_deps->pending_write_mems;
- while (link_insn)
+ /* The reg_last lists are inherited by successor. */
+ EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
{
- if (!(find_insn_mem_list (XEXP (link_insn, 0),
- XEXP (link_mem, 0),
- succ_deps->pending_write_insns,
- succ_deps->pending_write_mems)))
- add_insn_mem_dependence (succ_deps,
- &succ_deps->pending_write_insns,
- &succ_deps->pending_write_mems,
- XEXP (link_insn, 0), XEXP (link_mem, 0));
-
- link_insn = XEXP (link_insn, 1);
- link_mem = XEXP (link_mem, 1);
+ struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
+ struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
+
+ succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
+ succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
+ succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
+ succ_rl->clobbers);
+ succ_rl->uses_length += pred_rl->uses_length;
+ succ_rl->clobbers_length += pred_rl->clobbers_length;
}
-
- /* last_function_call is inherited by bb_succ. */
- for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call))
- succ_deps->last_function_call
- = alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call);
-
- /* last_pending_memory_flush is inherited by bb_succ. */
- for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0),
- succ_deps->last_pending_memory_flush))
- succ_deps->last_pending_memory_flush
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->last_pending_memory_flush);
-
- /* sched_before_next_call is inherited by bb_succ. */
- x = LOG_LINKS (tmp_deps->sched_before_next_call);
- for (; x; x = XEXP (x, 1))
- add_dependence (succ_deps->sched_before_next_call,
- XEXP (x, 0), REG_DEP_ANTI);
-
- e = NEXT_OUT (e);
+ IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
+
+ /* Mem read/write lists are inherited by successor. */
+ concat_insn_mem_list (pred_deps->pending_read_insns,
+ pred_deps->pending_read_mems,
+ &succ_deps->pending_read_insns,
+ &succ_deps->pending_read_mems);
+ concat_insn_mem_list (pred_deps->pending_write_insns,
+ pred_deps->pending_write_mems,
+ &succ_deps->pending_write_insns,
+ &succ_deps->pending_write_mems);
+
+ succ_deps->last_pending_memory_flush
+ = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
+ succ_deps->last_pending_memory_flush);
+
+ succ_deps->pending_lists_length += pred_deps->pending_lists_length;
+ succ_deps->pending_flush_length += pred_deps->pending_flush_length;
+
+ /* last_function_call is inherited by successor. */
+ succ_deps->last_function_call
+ = concat_INSN_LIST (pred_deps->last_function_call,
+ succ_deps->last_function_call);
+
+ /* sched_before_next_call is inherited by successor. */
+ succ_deps->sched_before_next_call
+ = concat_INSN_LIST (pred_deps->sched_before_next_call,
+ succ_deps->sched_before_next_call);
}
- while (e != first_edge);
+
+ /* These lists should point to the right place, for correct
+ freeing later. */
+ bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
+ bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
+ bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
+ bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
+
+ /* Can't allow these to be freed twice. */
+ pred_deps->pending_read_insns = 0;
+ pred_deps->pending_read_mems = 0;
+ pred_deps->pending_write_insns = 0;
+ pred_deps->pending_write_mems = 0;
}
/* Compute backward dependences inside bb. In a multiple blocks region:
(1) a bb is analyzed after its predecessors, and (2) the lists in
effect at the end of bb (after analyzing for bb) are inherited by
- bb's successrs.
+ bb's successors.
Specifically for reg-reg data dependences, the block insns are
scanned by sched_analyze () top-to-bottom. Two lists are
similar, and the result is interblock dependences in the region. */
static void
-compute_block_backward_dependences (bb)
- int bb;
+compute_block_backward_dependences (int bb)
{
rtx head, tail;
struct deps tmp_deps;
tmp_deps = bb_deps[bb];
/* Do the analysis for this block. */
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
sched_analyze (&tmp_deps, head, tail);
add_branch_dependences (head, tail);
them to the unused_*_list variables, so that they can be reused. */
static void
-free_pending_lists ()
+free_pending_lists (void)
{
int bb;
/* Print dependences for debugging, callable from debugger. */
void
-debug_dependencies ()
+debug_dependencies (void)
{
int bb;
fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n");
for (bb = 0; bb < current_nr_blocks; bb++)
{
- if (1)
+ rtx head, tail;
+ rtx next_tail;
+ rtx insn;
+
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+ next_tail = NEXT_INSN (tail);
+ fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
+ BB_TO_BLOCK (bb), bb);
+
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "insn", "code", "bb", "dep", "prio", "cost",
+ "reservation");
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "----", "----", "--", "---", "----", "----",
+ "-----------");
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
- rtx head, tail;
- rtx next_tail;
- rtx insn;
-
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
- next_tail = NEXT_INSN (tail);
- fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
- BB_TO_BLOCK (bb), bb);
-
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "----", "----", "--", "---", "----", "----", "--------", "-----");
- for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
- {
- rtx link;
- int unit, range;
+ rtx link;
- if (! INSN_P (insn))
+ if (! INSN_P (insn))
+ {
+ int n;
+ fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
+ if (NOTE_P (insn))
{
- int n;
- fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
- if (GET_CODE (insn) == NOTE)
+ n = NOTE_LINE_NUMBER (insn);
+ if (n < 0)
+ fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
+ else
{
- n = NOTE_LINE_NUMBER (insn);
- if (n < 0)
- fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
- else
- fprintf (sched_dump, "line %d, file %s\n", n,
- NOTE_SOURCE_FILE (insn));
+ expanded_location xloc;
+ NOTE_EXPANDED_LOCATION (xloc, insn);
+ fprintf (sched_dump, "line %d, file %s\n",
+ xloc.line, xloc.file);
}
- else
- fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
- continue;
}
-
- unit = insn_unit (insn);
- range = (unit < 0
- || function_units[unit].blockage_range_function == 0) ? 0 :
- function_units[unit].blockage_range_function (insn);
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0),
- (int) MIN_BLOCKAGE_COST (range),
- (int) MAX_BLOCKAGE_COST (range));
- insn_print_units (insn);
- fprintf (sched_dump, "\t: ");
- for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
- fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
- fprintf (sched_dump, "\n");
+ else
+ fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
+ continue;
}
+
+ fprintf (sched_dump,
+ ";; %s%5d%6d%6d%6d%6d%6d ",
+ (SCHED_GROUP_P (insn) ? "+" : " "),
+ INSN_UID (insn),
+ INSN_CODE (insn),
+ INSN_BB (insn),
+ INSN_DEP_COUNT (insn),
+ INSN_PRIORITY (insn),
+ insn_cost (insn, 0, 0));
+
+ if (recog_memoized (insn) < 0)
+ fprintf (sched_dump, "nothing");
+ else
+ print_reservation (sched_dump, insn);
+
+ fprintf (sched_dump, "\t: ");
+ for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
+ fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
+ fprintf (sched_dump, "\n");
}
}
fprintf (sched_dump, "\n");
}
\f
+/* Returns true if all the basic blocks of the current region have
+ NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
+static bool
+sched_is_disabled_for_current_region_p (void)
+{
+ int bb;
+
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ if (!(BASIC_BLOCK (BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
+ return false;
+
+ return true;
+}
+
/* Schedule a region. A region is either an inner loop, a loop-free
subroutine, or a single basic block. Each bb in the region is
scheduled after its flow predecessors. */
static void
-schedule_region (rgn)
- int rgn;
+schedule_region (int rgn)
{
+ basic_block block;
+ edge_iterator ei;
+ edge e;
int bb;
- int rgn_n_insns = 0;
int sched_rgn_n_insns = 0;
+ rgn_n_insns = 0;
/* Set variables for the current region. */
current_nr_blocks = RGN_NR_BLOCKS (rgn);
current_blocks = RGN_BLOCKS (rgn);
+
+ /* See comments in add_block1, for what reasons we allocate +1 element. */
+ ebb_head = xrealloc (ebb_head, (current_nr_blocks + 1) * sizeof (*ebb_head));
+ for (bb = 0; bb <= current_nr_blocks; bb++)
+ ebb_head[bb] = current_blocks + bb;
+
+ /* Don't schedule region that is marked by
+ NOTE_DISABLE_SCHED_OF_BLOCK. */
+ if (sched_is_disabled_for_current_region_p ())
+ return;
- init_deps_global ();
+ if (!RGN_DONT_CALC_DEPS (rgn))
+ {
+ init_deps_global ();
- /* Initializations for region data dependence analyisis. */
- bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
- for (bb = 0; bb < current_nr_blocks; bb++)
- init_deps (bb_deps + bb);
+ /* Initializations for region data dependence analysis. */
+ bb_deps = XNEWVEC (struct deps, current_nr_blocks);
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ init_deps (bb_deps + bb);
- /* Compute LOG_LINKS. */
- for (bb = 0; bb < current_nr_blocks; bb++)
- compute_block_backward_dependences (bb);
+ /* Compute LOG_LINKS. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_block_backward_dependences (bb);
- /* Compute INSN_DEPEND. */
- for (bb = current_nr_blocks - 1; bb >= 0; bb--)
- {
- rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+ /* Compute INSN_DEPEND. */
+ for (bb = current_nr_blocks - 1; bb >= 0; bb--)
+ {
+ rtx head, tail;
- compute_forward_dependences (head, tail);
- }
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+
+ compute_forward_dependences (head, tail);
+ if (targetm.sched.dependencies_evaluation_hook)
+ targetm.sched.dependencies_evaluation_hook (head, tail);
+ }
+
+ free_pending_lists ();
+
+ finish_deps_global ();
+
+ free (bb_deps);
+ }
+ else
+ /* This is a recovery block. It is always a single block region. */
+ gcc_assert (current_nr_blocks == 1);
+
/* Set priorities. */
+ current_sched_info->sched_max_insns_priority = 0;
for (bb = 0; bb < current_nr_blocks; bb++)
{
rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
rgn_n_insns += set_priorities (head, tail);
}
+ current_sched_info->sched_max_insns_priority++;
/* Compute interblock info: probabilities, split-edges, dominators, etc. */
if (current_nr_blocks > 1)
{
- int i;
+ prob = XNEWVEC (int, current_nr_blocks);
- prob = (float *) xmalloc ((current_nr_blocks) * sizeof (float));
+ dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
+ sbitmap_vector_zero (dom, current_nr_blocks);
- bbset_size = current_nr_blocks / HOST_BITS_PER_WIDE_INT + 1;
- dom = (bbset *) xmalloc (current_nr_blocks * sizeof (bbset));
- for (i = 0; i < current_nr_blocks; i++)
- dom[i] = (bbset) xcalloc (bbset_size, sizeof (HOST_WIDE_INT));
-
- /* Edge to bit. */
+ /* Use ->aux to implement EDGE_TO_BIT mapping. */
rgn_nr_edges = 0;
- edge_to_bit = (int *) xmalloc (nr_edges * sizeof (int));
- for (i = 1; i < nr_edges; i++)
- if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
- EDGE_TO_BIT (i) = rgn_nr_edges++;
- rgn_edges = (int *) xmalloc (rgn_nr_edges * sizeof (int));
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ SET_EDGE_TO_BIT (e, rgn_nr_edges++);
+ }
+ rgn_edges = XNEWVEC (edge, rgn_nr_edges);
rgn_nr_edges = 0;
- for (i = 1; i < nr_edges; i++)
- if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn))
- rgn_edges[rgn_nr_edges++] = i;
-
- /* Split edges. */
- edgeset_size = rgn_nr_edges / HOST_BITS_PER_WIDE_INT + 1;
- edgeset_bitsize = rgn_nr_edges;
- pot_split = (edgeset *) xmalloc (current_nr_blocks * sizeof (edgeset));
- ancestor_edges
- = (edgeset *) xmalloc (current_nr_blocks * sizeof (edgeset));
- for (i = 0; i < current_nr_blocks; i++)
+ FOR_EACH_BB (block)
{
- pot_split[i] =
- (edgeset) xcalloc (edgeset_size, sizeof (HOST_WIDE_INT));
- ancestor_edges[i] =
- (edgeset) xcalloc (edgeset_size, sizeof (HOST_WIDE_INT));
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ rgn_edges[rgn_nr_edges++] = e;
}
+ /* Split edges. */
+ pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (pot_split, current_nr_blocks);
+ ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
+
/* Compute probabilities, dominators, split_edges. */
for (bb = 0; bb < current_nr_blocks; bb++)
compute_dom_prob_ps (bb);
+
+ /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
+ /* We don't need them anymore. But we want to avoid duplication of
+ aux fields in the newly created edges. */
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ e->aux = NULL;
+ }
}
/* Now we can schedule all blocks. */
for (bb = 0; bb < current_nr_blocks; bb++)
{
+ basic_block first_bb, last_bb, curr_bb;
rtx head, tail;
int b = BB_TO_BLOCK (bb);
- get_block_head_tail (b, &head, &tail);
+ first_bb = EBB_FIRST_BB (bb);
+ last_bb = EBB_LAST_BB (bb);
+
+ get_ebb_head_tail (first_bb, last_bb, &head, &tail);
if (no_real_insns_p (head, tail))
- continue;
+ {
+ gcc_assert (first_bb == last_bb);
+ continue;
+ }
current_sched_info->prev_head = PREV_INSN (head);
current_sched_info->next_tail = NEXT_INSN (tail);
/* rm_other_notes only removes notes which are _inside_ the
block---that is, it won't remove notes before the first real insn
- or after the last real insn of the block. So if the first insn
+ or after the last real insn of the block. So if the first insn
has a REG_SAVE_NOTE which would otherwise be emitted before the
insn, it is redundant with the note before the start of the
block, and so we have to take it out. */
for (note = REG_NOTES (head); note; note = XEXP (note, 1))
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
- {
- remove_note (head, note);
- note = XEXP (note, 1);
- remove_note (head, note);
- }
+ remove_note (head, note);
}
+ else
+ /* This means that first block in ebb is empty.
+ It looks to me as an impossible thing. There at least should be
+ a recovery check, that caused the splitting. */
+ gcc_unreachable ();
/* Remove remaining note insns from the block, save them in
note_list. These notes are restored at the end of
schedule_block (). */
rm_other_notes (head, tail);
+ unlink_bb_notes (first_bb, last_bb);
+
target_bb = bb;
- current_sched_info->queue_must_finish_empty
- = current_nr_blocks > 1 && !flag_schedule_interblock;
+ gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
+ current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
- schedule_block (b, rgn_n_insns);
+ curr_bb = first_bb;
+ schedule_block (&curr_bb, rgn_n_insns);
+ gcc_assert (EBB_FIRST_BB (bb) == first_bb);
sched_rgn_n_insns += sched_n_insns;
- /* Update target block boundaries. */
- if (head == BLOCK_HEAD (b))
- BLOCK_HEAD (b) = current_sched_info->head;
- if (tail == BLOCK_END (b))
- BLOCK_END (b) = current_sched_info->tail;
-
/* Clean up. */
if (current_nr_blocks > 1)
{
free (candidate_table);
free (bblst_table);
- free (bitlst_table);
+ free (edgelst_table);
}
}
/* Sanity check: verify that all region insns were scheduled. */
- if (sched_rgn_n_insns != rgn_n_insns)
- abort ();
+ gcc_assert (sched_rgn_n_insns == rgn_n_insns);
/* Restore line notes. */
if (write_symbols != NO_DEBUG)
for (bb = 0; bb < current_nr_blocks; bb++)
{
rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
restore_line_notes (head, tail);
}
}
/* Done with this region. */
- free_pending_lists ();
-
- finish_deps_global ();
-
- free (bb_deps);
if (current_nr_blocks > 1)
{
- int i;
-
free (prob);
- for (i = 0; i < current_nr_blocks; ++i)
- {
- free (dom[i]);
- free (pot_split[i]);
- free (ancestor_edges[i]);
- }
- free (dom);
- free (edge_to_bit);
+ sbitmap_vector_free (dom);
+ sbitmap_vector_free (pot_split);
+ sbitmap_vector_free (ancestor_edges);
free (rgn_edges);
- free (pot_split);
- free (ancestor_edges);
}
}
/* Initialize data structures for region scheduling. */
static void
-init_regions ()
+init_regions (void)
{
sbitmap blocks;
int rgn;
nr_regions = 0;
- rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
- rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
-
- blocks = sbitmap_alloc (n_basic_blocks);
+ rgn_table = 0;
+ rgn_bb_table = 0;
+ block_to_bb = 0;
+ containing_rgn = 0;
+ extend_regions ();
/* Compute regions for scheduling. */
if (reload_completed
- || n_basic_blocks == 1
- || !flag_schedule_interblock)
+ || n_basic_blocks == NUM_FIXED_BLOCKS + 1
+ || !flag_schedule_interblock
+ || is_cfg_nonregular ())
{
find_single_block_region ();
}
else
{
- /* Verify that a 'good' control flow graph can be built. */
- if (is_cfg_nonregular ())
- {
- find_single_block_region ();
- }
- else
- {
- sbitmap *dom;
- struct edge_list *edge_list;
-
- dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
-
- /* The scheduler runs after flow; therefore, we can't blindly call
- back into find_basic_blocks since doing so could invalidate the
- info in global_live_at_start.
-
- Consider a block consisting entirely of dead stores; after life
- analysis it would be a block of NOTE_INSN_DELETED notes. If
- we call find_basic_blocks again, then the block would be removed
- entirely and invalidate our the register live information.
+ /* Compute the dominators and post dominators. */
+ calculate_dominance_info (CDI_DOMINATORS);
- We could (should?) recompute register live information. Doing
- so may even be beneficial. */
- edge_list = create_edge_list ();
+ /* Find regions. */
+ find_rgns ();
- /* Compute the dominators and post dominators. */
- calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
+ if (sched_verbose >= 3)
+ debug_regions ();
- /* build_control_flow will return nonzero if it detects unreachable
- blocks or any other irregularity with the cfg which prevents
- cross block scheduling. */
- if (build_control_flow (edge_list) != 0)
- find_single_block_region ();
- else
- find_rgns (edge_list, dom);
-
- if (sched_verbose >= 3)
- debug_regions ();
-
- /* We are done with flow's edge list. */
- free_edge_list (edge_list);
-
- /* For now. This will move as more and more of haifa is converted
- to using the cfg code in flow.c. */
- free (dom);
- }
+ /* For now. This will move as more and more of haifa is converted
+ to using the cfg code in flow.c. */
+ free_dominance_info (CDI_DOMINATORS);
}
+ RGN_BLOCKS (nr_regions) = RGN_BLOCKS (nr_regions - 1) +
+ RGN_NR_BLOCKS (nr_regions - 1);
- deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
- /* Remove all death notes from the subroutine. */
- for (rgn = 0; rgn < nr_regions; rgn++)
+ if (CHECK_DEAD_NOTES)
{
- int b;
-
- sbitmap_zero (blocks);
- for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
+ blocks = sbitmap_alloc (last_basic_block);
+ deaths_in_region = XNEWVEC (int, nr_regions);
+ /* Remove all death notes from the subroutine. */
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ check_dead_notes1 (rgn, blocks);
- deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
+ sbitmap_free (blocks);
}
-
- sbitmap_free (blocks);
+ else
+ count_or_remove_death_notes (NULL, 1);
}
-/* The one entry point in this file. DUMP_FILE is the dump file for
- this pass. */
+/* The one entry point in this file. */
void
-schedule_insns (dump_file)
- FILE *dump_file;
+schedule_insns (void)
{
sbitmap large_region_blocks, blocks;
int rgn;
int any_large_regions;
+ basic_block bb;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return;
nr_inter = 0;
nr_spec = 0;
- sched_init (dump_file);
+ /* We need current_sched_info in init_dependency_caches, which is
+ invoked via sched_init. */
+ current_sched_info = ®ion_sched_info;
+
+ sched_init ();
+
+ min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
+ / 100);
init_regions ();
- current_sched_info = ®ion_sched_info;
+ /* EBB_HEAD is a region-scope structure. But we realloc it for
+ each region to save time/memory/something else. */
+ ebb_head = 0;
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
schedule_region (rgn);
+
+ free(ebb_head);
/* Update life analysis for the subroutine. Do single block regions
first so that we can verify that live_at_start didn't change. Then
- do all other blocks. */
+ do all other blocks. */
/* ??? There is an outside possibility that update_life_info, or more
- to the point propagate_block, could get called with non-zero flags
+ to the point propagate_block, could get called with nonzero flags
more than once for one basic block. This would be kinda bad if it
were to happen, since REG_INFO would be accumulated twice for the
block, and we'd have twice the REG_DEAD notes.
that live_at_start should change at region heads. Not sure what the
best way to test for this kind of thing... */
+ if (current_sched_info->flags & DETACH_LIFE_INFO)
+ /* this flag can be set either by the target or by ENABLE_CHECKING. */
+ attach_life_info ();
+
allocate_reg_life_data ();
- compute_bb_for_insn (get_max_uid ());
any_large_regions = 0;
- large_region_blocks = sbitmap_alloc (n_basic_blocks);
- sbitmap_ones (large_region_blocks);
-
- blocks = sbitmap_alloc (n_basic_blocks);
-
- for (rgn = 0; rgn < nr_regions; rgn++)
- if (RGN_NR_BLOCKS (rgn) > 1)
+ large_region_blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (large_region_blocks);
+ FOR_EACH_BB (bb)
+ SET_BIT (large_region_blocks, bb->index);
+
+ blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (blocks);
+
+ /* Update life information. For regions consisting of multiple blocks
+ we've possibly done interblock scheduling that affects global liveness.
+ For regions consisting of single blocks we need to do only local
+ liveness. */
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ if (RGN_NR_BLOCKS (rgn) > 1
+ /* Or the only block of this region has been split. */
+ || RGN_HAS_REAL_EBB (rgn)
+ /* New blocks (e.g. recovery blocks) should be processed
+ as parts of large regions. */
+ || !glat_start[rgn_bb_table[RGN_BLOCKS (rgn)]])
any_large_regions = 1;
else
{
- sbitmap_zero (blocks);
SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
-
- /* Don't update reg info after reload, since that affects
- regs_ever_live, which should not change after reload. */
- update_life_info (blocks, UPDATE_LIFE_LOCAL,
- (reload_completed ? PROP_DEATH_NOTES
- : PROP_DEATH_NOTES | PROP_REG_INFO));
-
-#ifndef HAVE_conditional_execution
- /* ??? REG_DEAD notes only exist for unconditional deaths. We need
- a count of the conditional plus unconditional deaths for this to
- work out. */
- /* In the single block case, the count of registers that died should
- not have changed during the schedule. */
- if (count_or_remove_death_notes (blocks, 0) != deaths_in_region[rgn])
- abort ();
-#endif
}
+ /* Don't update reg info after reload, since that affects
+ regs_ever_live, which should not change after reload. */
+ update_life_info (blocks, UPDATE_LIFE_LOCAL,
+ (reload_completed ? PROP_DEATH_NOTES
+ : (PROP_DEATH_NOTES | PROP_REG_INFO)));
if (any_large_regions)
{
update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
- PROP_DEATH_NOTES | PROP_REG_INFO);
+ (reload_completed ? PROP_DEATH_NOTES
+ : (PROP_DEATH_NOTES | PROP_REG_INFO)));
+
+#ifdef ENABLE_CHECKING
+ check_reg_live (true);
+#endif
+ }
+
+ if (CHECK_DEAD_NOTES)
+ {
+ /* Verify the counts of basic block notes in single basic block
+ regions. */
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ if (RGN_NR_BLOCKS (rgn) == 1)
+ {
+ sbitmap_zero (blocks);
+ SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
+
+ gcc_assert (deaths_in_region[rgn]
+ == count_or_remove_death_notes (blocks, 0));
+ }
+ free (deaths_in_region);
}
/* Reposition the prologue and epilogue notes in case we moved the
nr_inter, nr_spec);
}
else
- {
- if (nr_inter > 0)
- abort ();
- }
+ gcc_assert (nr_inter <= 0);
fprintf (sched_dump, "\n\n");
}
sched_finish ();
- if (edge_table)
+ sbitmap_free (blocks);
+ sbitmap_free (large_region_blocks);
+}
+
+/* INSN has been added to/removed from current region. */
+static void
+add_remove_insn (rtx insn, int remove_p)
+{
+ if (!remove_p)
+ rgn_n_insns++;
+ else
+ rgn_n_insns--;
+
+ if (INSN_BB (insn) == target_bb)
{
- free (edge_table);
- edge_table = NULL;
+ if (!remove_p)
+ target_n_insns++;
+ else
+ target_n_insns--;
}
+}
- if (in_edges)
+/* Extend internal data structures. */
+static void
+extend_regions (void)
+{
+ rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
+ rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
+ block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
+ containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
+}
+
+/* BB was added to ebb after AFTER. */
+static void
+add_block1 (basic_block bb, basic_block after)
+{
+ extend_regions ();
+
+ if (after == 0 || after == EXIT_BLOCK_PTR)
{
- free (in_edges);
- in_edges = NULL;
+ int i;
+
+ i = RGN_BLOCKS (nr_regions);
+ /* I - first free position in rgn_bb_table. */
+
+ rgn_bb_table[i] = bb->index;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_DONT_CALC_DEPS (nr_regions) = after == EXIT_BLOCK_PTR;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = 0;
+
+ nr_regions++;
+
+ RGN_BLOCKS (nr_regions) = i + 1;
+
+ if (CHECK_DEAD_NOTES)
+ {
+ sbitmap blocks = sbitmap_alloc (last_basic_block);
+ deaths_in_region = xrealloc (deaths_in_region, nr_regions *
+ sizeof (*deaths_in_region));
+
+ check_dead_notes1 (nr_regions - 1, blocks);
+
+ sbitmap_free (blocks);
+ }
+ }
+ else
+ {
+ int i, pos;
+
+ /* We need to fix rgn_table, block_to_bb, containing_rgn
+ and ebb_head. */
+
+ BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
+
+ /* We extend ebb_head to one more position to
+ easily find the last position of the last ebb in
+ the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
+ is _always_ valid for access. */
+
+ i = BLOCK_TO_BB (after->index) + 1;
+ for (pos = ebb_head[i]; rgn_bb_table[pos] != after->index; pos--);
+ pos++;
+ gcc_assert (pos > ebb_head[i - 1]);
+ /* i - ebb right after "AFTER". */
+ /* ebb_head[i] - VALID. */
+
+ /* Source position: ebb_head[i]
+ Destination position: ebb_head[i] + 1
+ Last position:
+ RGN_BLOCKS (nr_regions) - 1
+ Number of elements to copy: (last_position) - (source_position) + 1
+ */
+
+ memmove (rgn_bb_table + pos + 1,
+ rgn_bb_table + pos,
+ ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
+ * sizeof (*rgn_bb_table));
+
+ rgn_bb_table[pos] = bb->index;
+
+ for (; i <= current_nr_blocks; i++)
+ ebb_head [i]++;
+
+ i = CONTAINING_RGN (after->index);
+ CONTAINING_RGN (bb->index) = i;
+
+ RGN_HAS_REAL_EBB (i) = 1;
+
+ for (++i; i <= nr_regions; i++)
+ RGN_BLOCKS (i)++;
+
+ /* We don't need to call check_dead_notes1 () because this new block
+ is just a split of the old. We don't want to count anything twice. */
}
- if (out_edges)
+}
+
+/* Fix internal data after interblock movement of jump instruction.
+ For parameter meaning please refer to
+ sched-int.h: struct sched_info: fix_recovery_cfg. */
+static void
+fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
+{
+ int old_pos, new_pos, i;
+
+ BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
+
+ for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
+ rgn_bb_table[old_pos] != check_bb_nexti;
+ old_pos--);
+ gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
+
+ for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
+ rgn_bb_table[new_pos] != bbi;
+ new_pos--);
+ new_pos++;
+ gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
+
+ gcc_assert (new_pos < old_pos);
+
+ memmove (rgn_bb_table + new_pos + 1,
+ rgn_bb_table + new_pos,
+ (old_pos - new_pos) * sizeof (*rgn_bb_table));
+
+ rgn_bb_table[new_pos] = check_bb_nexti;
+
+ for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
+ ebb_head[i]++;
+}
+
+/* Return next block in ebb chain. For parameter meaning please refer to
+ sched-int.h: struct sched_info: advance_target_bb. */
+static basic_block
+advance_target_bb (basic_block bb, rtx insn)
+{
+ if (insn)
+ return 0;
+
+ gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
+ && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
+ return bb->next_bb;
+}
+
+/* Count and remove death notes in region RGN, which consists of blocks
+ with indecies in BLOCKS. */
+static void
+check_dead_notes1 (int rgn, sbitmap blocks)
+{
+ int b;
+
+ sbitmap_zero (blocks);
+ for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
+ SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
+
+ deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
+}
+
+#ifdef ENABLE_CHECKING
+/* Return non zero, if BB is head or leaf (depending of LEAF_P) block in
+ current region. For more information please refer to
+ sched-int.h: struct sched_info: region_head_or_leaf_p. */
+static int
+region_head_or_leaf_p (basic_block bb, int leaf_p)
+{
+ if (!leaf_p)
+ return bb->index == rgn_bb_table[RGN_BLOCKS (CONTAINING_RGN (bb->index))];
+ else
{
- free (out_edges);
- out_edges = NULL;
+ int i;
+ edge e;
+ edge_iterator ei;
+
+ i = CONTAINING_RGN (bb->index);
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->dest != EXIT_BLOCK_PTR
+ && CONTAINING_RGN (e->dest->index) == i
+ /* except self-loop. */
+ && e->dest != bb)
+ return 0;
+
+ return 1;
}
+}
+#endif /* ENABLE_CHECKING */
- sbitmap_free (blocks);
- sbitmap_free (large_region_blocks);
+#endif
+\f
+static bool
+gate_handle_sched (void)
+{
+#ifdef INSN_SCHEDULING
+ return flag_schedule_insns;
+#else
+ return 0;
+#endif
+}
+
+/* Run instruction scheduler. */
+static unsigned int
+rest_of_handle_sched (void)
+{
+#ifdef INSN_SCHEDULING
+ /* Do control and data sched analysis,
+ and write some of the results to dump file. */
+
+ schedule_insns ();
+#endif
+ return 0;
+}
- free (deaths_in_region);
+static bool
+gate_handle_sched2 (void)
+{
+#ifdef INSN_SCHEDULING
+ return optimize > 0 && flag_schedule_insns_after_reload;
+#else
+ return 0;
+#endif
}
+
+/* Run second scheduling pass after reload. */
+static unsigned int
+rest_of_handle_sched2 (void)
+{
+#ifdef INSN_SCHEDULING
+ /* Do control and data sched analysis again,
+ and write some more of the results to dump file. */
+
+ split_all_insns (1);
+
+ if (flag_sched2_use_superblocks || flag_sched2_use_traces)
+ {
+ schedule_ebbs ();
+ /* No liveness updating code yet, but it should be easy to do.
+ reg-stack recomputes the liveness when needed for now. */
+ count_or_remove_death_notes (NULL, 1);
+ cleanup_cfg (CLEANUP_EXPENSIVE);
+ }
+ else
+ schedule_insns ();
#endif
+ return 0;
+}
+
+struct tree_opt_pass pass_sched =
+{
+ "sched1", /* name */
+ gate_handle_sched, /* gate */
+ rest_of_handle_sched, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_SCHED, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func |
+ TODO_ggc_collect, /* todo_flags_finish */
+ 'S' /* letter */
+};
+
+struct tree_opt_pass pass_sched2 =
+{
+ "sched2", /* name */
+ gate_handle_sched2, /* gate */
+ rest_of_handle_sched2, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_SCHED2, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func |
+ TODO_ggc_collect, /* todo_flags_finish */
+ 'R' /* letter */
+};
+