/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
-This file is part of GNU CC.
+This file is part of GCC.
-GNU CC is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License as published by the
-Free Software Foundation; either version 2, or (at your option) any
-later version.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
-GNU CC is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to the Free
-the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA. */
/* This pass implements list scheduling within basic blocks. It is
\f
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
#include "except.h"
#include "toplev.h"
#include "recog.h"
+#include "cfglayout.h"
#include "sched-int.h"
+#include "target.h"
+
+/* Define when we want to do count REG_DEAD notes before and after scheduling
+ for sanity checking. We can't do that when conditional execution is used,
+ as REG_DEAD exist only for unconditional deaths. */
+
+#if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
+#define CHECK_DEAD_NOTES 1
+#else
+#define CHECK_DEAD_NOTES 0
+#endif
+
#ifdef INSN_SCHEDULING
/* Some accessor macros for h_i_d members only used within this file. */
void debug_regions PARAMS ((void));
static void find_single_block_region PARAMS ((void));
-static void find_rgns PARAMS ((struct edge_list *, sbitmap *));
+static void find_rgns PARAMS ((struct edge_list *, dominance_info));
static int too_large PARAMS ((int, int *, int *));
extern void debug_live PARAMS ((int, int));
/* The mapping from bb to block. */
#define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
-/* Bit vectors and bitset operations are needed for computations on
- the control flow graph. */
-
-typedef unsigned HOST_WIDE_INT *bitset;
typedef struct
{
int *first_member; /* Pointer to the list start in bitlst_table. */
bitlst;
static int bitlst_table_last;
-static int bitlst_table_size;
static int *bitlst_table;
-static char bitset_member PARAMS ((bitset, int, int));
-static void extract_bitlst PARAMS ((bitset, int, int, bitlst *));
+static void extract_bitlst PARAMS ((sbitmap, bitlst *));
/* Target info declarations.
void debug_candidate PARAMS ((int));
void debug_candidates PARAMS ((int));
-/* Bit-set of bbs, where bit 'i' stands for bb 'i'. */
-typedef bitset bbset;
-
-/* Number of words of the bbset. */
-static int bbset_size;
-
-/* Dominators array: dom[i] contains the bbset of dominators of
+/* Dominators array: dom[i] contains the sbitmap of dominators of
bb i in the region. */
-static bbset *dom;
+static sbitmap *dom;
/* bb 0 is the only region entry. */
#define IS_RGN_ENTRY(bb) (!bb)
/* Is bb_src dominated by bb_trg. */
#define IS_DOMINATED(bb_src, bb_trg) \
-( bitset_member (dom[bb_src], bb_trg, bbset_size) )
+( TEST_BIT (dom[bb_src], bb_trg) )
/* Probability: Prob[i] is a float in [0, 1] which is the probability
of bb i relative to the region entry. */
prob[bb_trg])))
/* Bit-set of edges, where bit i stands for edge i. */
-typedef bitset edgeset;
+typedef sbitmap edgeset;
/* Number of edges in the region. */
static int rgn_nr_edges;
/* Array of size rgn_nr_edges. */
static int *rgn_edges;
-/* Number of words in an edgeset. */
-static int edgeset_size;
-
-/* Number of bits in an edgeset. */
-static int edgeset_bitsize;
/* Mapping from each edge in the graph to its number in the rgn. */
static int *edge_to_bit;
static void compute_dom_prob_ps PARAMS ((int));
-#define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
/* Parameters affecting the decision of rank_for_schedule().
- ??? Nope. But MIN_PROBABILITY is used in copmute_trg_info. */
-#define MIN_DIFF_PRIORITY 2
+ ??? Nope. But MIN_PROBABILITY is used in compute_trg_info. */
#define MIN_PROBABILITY 40
-#define MIN_PROB_DIFF 10
/* Speculative scheduling functions. */
static int check_live_1 PARAMS ((int, rtx));
static int is_prisky PARAMS ((rtx, int, int));
static int is_exception_free PARAMS ((rtx, int, int));
+static bool sets_likely_spilled PARAMS ((rtx));
+static void sets_likely_spilled_1 PARAMS ((rtx, rtx, void *));
static void add_branch_dependences PARAMS ((rtx, rtx));
static void compute_block_backward_dependences PARAMS ((int));
void debug_dependencies PARAMS ((void));
static void init_regions PARAMS ((void));
static void schedule_region PARAMS ((int));
+static rtx concat_INSN_LIST PARAMS ((rtx, rtx));
+static void concat_insn_mem_list PARAMS ((rtx, rtx, rtx *, rtx *));
static void propagate_deps PARAMS ((int, struct deps *));
static void free_pending_lists PARAMS ((void));
static int
is_cfg_nonregular ()
{
- int b;
+ basic_block b;
rtx insn;
RTX_CODE code;
/* If we have exception handlers, then we consider the cfg not well
structured. ?!? We should be able to handle this now that flow.c
computes an accurate cfg for EH. */
- if (exception_handler_labels)
+ if (current_function_has_exception_handlers ())
return 1;
/* If we have non-jumping insns which refer to labels, then we consider
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
- for (b = 0; b < n_basic_blocks; b++)
- for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
+ FOR_EACH_BB (b)
+ for (insn = b->head;; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
return 1;
}
- if (insn == BLOCK_END (b))
+ if (insn == b->end)
break;
}
struct edge_list *edge_list;
{
int i, unreachable, num_edges;
+ basic_block b;
/* This already accounts for entry/exit edges. */
num_edges = NUM_EDGES (edge_list);
test is redundant with the one in find_rgns, but it's much
cheaper to go ahead and catch the trivial case here. */
unreachable = 0;
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (b)
{
- basic_block b = BASIC_BLOCK (i);
-
if (b->pred == NULL
|| (b->pred->src == b
&& b->pred->pred_next == NULL))
}
/* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
- out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
+ out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
}
}
-/* BITSET macros for operations on the control flow graph. */
-
-/* Compute bitwise union of two bitsets. */
-#define BITSET_UNION(set1, set2, len) \
-do { register bitset tp = set1, sp = set2; \
- register int i; \
- for (i = 0; i < len; i++) \
- *(tp++) |= *(sp++); } while (0)
-
-/* Compute bitwise intersection of two bitsets. */
-#define BITSET_INTER(set1, set2, len) \
-do { register bitset tp = set1, sp = set2; \
- register int i; \
- for (i = 0; i < len; i++) \
- *(tp++) &= *(sp++); } while (0)
-
-/* Compute bitwise difference of two bitsets. */
-#define BITSET_DIFFER(set1, set2, len) \
-do { register bitset tp = set1, sp = set2; \
- register int i; \
- for (i = 0; i < len; i++) \
- *(tp++) &= ~*(sp++); } while (0)
-
-/* Inverts every bit of bitset 'set'. */
-#define BITSET_INVERT(set, len) \
-do { register bitset tmpset = set; \
- register int i; \
- for (i = 0; i < len; i++, tmpset++) \
- *tmpset = ~*tmpset; } while (0)
-
-/* Turn on the index'th bit in bitset set. */
-#define BITSET_ADD(set, index, len) \
-{ \
- if (index >= HOST_BITS_PER_WIDE_INT * len) \
- abort (); \
- else \
- set[index/HOST_BITS_PER_WIDE_INT] |= \
- ((unsigned HOST_WIDE_INT) 1) << (index % HOST_BITS_PER_WIDE_INT); \
-}
-
-/* Turn off the index'th bit in set. */
-#define BITSET_REMOVE(set, index, len) \
-{ \
- if (index >= HOST_BITS_PER_WIDE_INT * len) \
- abort (); \
- else \
- set[index/HOST_BITS_PER_WIDE_INT] &= \
- ~(((unsigned HOST_WIDE_INT) 1) << (index % HOST_BITS_PER_WIDE_INT)); \
-}
-
-/* Check if the index'th bit in bitset set is on. */
-
-static char
-bitset_member (set, index, len)
- bitset set;
- int index, len;
-{
- if (index >= HOST_BITS_PER_WIDE_INT * len)
- abort ();
- return ((set[index / HOST_BITS_PER_WIDE_INT] &
- ((unsigned HOST_WIDE_INT) 1) << (index % HOST_BITS_PER_WIDE_INT))
- ? 1 : 0);
-}
-
/* Translate a bit-set SET to a list BL of the bit-set members. */
static void
-extract_bitlst (set, len, bitlen, bl)
- bitset set;
- int len;
- int bitlen;
+extract_bitlst (set, bl)
+ sbitmap set;
bitlst *bl;
{
- int i, j, offset;
- unsigned HOST_WIDE_INT word;
+ int i;
/* bblst table space is reused in each call to extract_bitlst. */
bitlst_table_last = 0;
bl->nr_members = 0;
/* Iterate over each word in the bitset. */
- for (i = 0; i < len; i++)
- {
- word = set[i];
- offset = i * HOST_BITS_PER_WIDE_INT;
-
- /* Iterate over each bit in the word, but do not
- go beyond the end of the defined bits. */
- for (j = 0; offset < bitlen && word; j++)
- {
- if (word & 1)
- {
- bitlst_table[bitlst_table_last++] = offset;
- (bl->nr_members)++;
- }
- word >>= 1;
- ++offset;
- }
- }
+ EXECUTE_IF_SET_IN_SBITMAP (set, 0, i,
+ {
+ bitlst_table[bitlst_table_last++] = i;
+ (bl->nr_members)++;
+ });
}
static void
find_single_block_region ()
{
- int i;
+ basic_block bb;
- for (i = 0; i < n_basic_blocks; i++)
+ nr_regions = 0;
+
+ FOR_EACH_BB (bb)
{
- rgn_bb_table[i] = i;
- RGN_NR_BLOCKS (i) = 1;
- RGN_BLOCKS (i) = i;
- CONTAINING_RGN (i) = i;
- BLOCK_TO_BB (i) = 0;
+ rgn_bb_table[nr_regions] = bb->index;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_BLOCKS (nr_regions) = nr_regions;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = 0;
+ nr_regions++;
}
- nr_regions = n_basic_blocks;
}
/* Update number of blocks and the estimate for number of insns
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
is still an inner loop. Put in max_hdr[blk] the header of the most inner
loop containing blk. */
-#define UPDATE_LOOP_RELATIONS(blk, hdr) \
-{ \
- if (max_hdr[blk] == -1) \
- max_hdr[blk] = hdr; \
- else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
- RESET_BIT (inner, hdr); \
- else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
- { \
- RESET_BIT (inner,max_hdr[blk]); \
- max_hdr[blk] = hdr; \
- } \
+#define UPDATE_LOOP_RELATIONS(blk, hdr) \
+{ \
+ if (max_hdr[blk] == -1) \
+ max_hdr[blk] = hdr; \
+ else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
+ RESET_BIT (inner, hdr); \
+ else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
+ { \
+ RESET_BIT (inner,max_hdr[blk]); \
+ max_hdr[blk] = hdr; \
+ } \
}
/* Find regions for interblock scheduling.
static void
find_rgns (edge_list, dom)
struct edge_list *edge_list;
- sbitmap *dom;
+ dominance_info dom;
{
int *max_hdr, *dfs_nr, *stack, *degree;
char no_loops = 1;
int count = 0, sp, idx = 0, current_edge = out_edges[0];
int num_bbs, num_insns, unreachable;
int too_large_failure;
+ basic_block bb;
/* Note if an edge has been passed. */
sbitmap passed;
/* Note if a block is a natural loop header. */
sbitmap header;
- /* Note if a block is an natural inner loop header. */
+ /* Note if a block is a natural inner loop header. */
sbitmap inner;
/* Note if a block is in the block queue. */
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
- dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
+ dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
stack = (int *) xmalloc (nr_edges * sizeof (int));
- inner = sbitmap_alloc (n_basic_blocks);
+ inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
- header = sbitmap_alloc (n_basic_blocks);
+ header = sbitmap_alloc (last_basic_block);
sbitmap_zero (header);
passed = sbitmap_alloc (nr_edges);
sbitmap_zero (passed);
- in_queue = sbitmap_alloc (n_basic_blocks);
+ in_queue = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_queue);
- in_stack = sbitmap_alloc (n_basic_blocks);
+ in_stack = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_stack);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < last_basic_block; i++)
max_hdr[i] = -1;
/* DFS traversal to find inner loops in the cfg. */
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
- for (i = 0; i < n_basic_blocks; i++)
- if (dfs_nr[i] == 0)
+ FOR_EACH_BB (bb)
+ if (dfs_nr[bb->index] == 0)
{
unreachable = 1;
break;
to hold degree counts. */
degree = dfs_nr;
- for (i = 0; i < n_basic_blocks; i++)
- degree[i] = 0;
+ FOR_EACH_BB (bb)
+ degree[bb->index] = 0;
for (i = 0; i < num_edges; i++)
{
edge e = INDEX_EDGE (edge_list, i);
if (no_loops)
SET_BIT (header, 0);
- /* Second travsersal:find reducible inner loops and topologically sort
+ /* Second traversal:find reducible inner loops and topologically sort
block of each region. */
queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (bb)
{
- if (TEST_BIT (header, i) && TEST_BIT (inner, i))
+ if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index))
{
edge e;
- int j;
+ basic_block jbb;
/* Now check that the loop is reducible. We do this separate
from finding inner loops so that we do not find a reducible
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
- for (j = 0; j < n_basic_blocks; j++)
+ FOR_EACH_BB (jbb)
{
/* First identify blocks in the loop, except for the loop
entry block. */
- if (i == max_hdr[j] && i != j)
+ if (bb->index == max_hdr[jbb->index] && bb != jbb)
{
/* Now verify that the block is dominated by the loop
header. */
- if (!TEST_BIT (dom[j], i))
+ if (!dominated_by_p (dom, jbb, bb))
break;
}
}
/* If we exited the loop early, then I is the header of
a non-reducible loop and we should quit processing it
now. */
- if (j != n_basic_blocks)
+ if (jbb != EXIT_BLOCK_PTR)
continue;
/* I is a header of an inner loop, or block 0 in a subroutine
with no loops at all. */
head = tail = -1;
too_large_failure = 0;
- loop_head = max_hdr[i];
+ loop_head = max_hdr[bb->index];
/* Decrease degree of all I's successors for topological
ordering. */
- for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
+ for (e = bb->succ; e; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (BLOCK_END (i))
- - INSN_LUID (BLOCK_HEAD (i)));
+ num_insns = (INSN_LUID (bb->end)
+ - INSN_LUID (bb->head));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
Place those blocks into the queue. */
if (no_loops)
{
- for (j = 0; j < n_basic_blocks; j++)
+ FOR_EACH_BB (jbb)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
- if (BASIC_BLOCK (j)->succ
- && BASIC_BLOCK (j)->succ->dest == EXIT_BLOCK_PTR
- && BASIC_BLOCK (j)->succ->succ_next == NULL)
+ if (jbb->succ
+ && jbb->succ->dest == EXIT_BLOCK_PTR
+ && jbb->succ->succ_next == NULL)
{
- queue[++tail] = j;
- SET_BIT (in_queue, j);
+ queue[++tail] = jbb->index;
+ SET_BIT (in_queue, jbb->index);
- if (too_large (j, &num_bbs, &num_insns))
+ if (too_large (jbb->index, &num_bbs, &num_insns))
{
too_large_failure = 1;
break;
{
edge e;
- for (e = BASIC_BLOCK (i)->pred; e; e = e->pred_next)
+ for (e = bb->pred; e; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR)
continue;
node = e->src->index;
- if (max_hdr[node] == loop_head && node != i)
+ if (max_hdr[node] == loop_head && node != bb->index)
{
/* This is a loop latch. */
queue[++tail] = node;
tail = -1;
break;
}
- else if (!TEST_BIT (in_queue, node) && node != i)
+ else if (!TEST_BIT (in_queue, node) && node != bb->index)
{
queue[++tail] = node;
SET_BIT (in_queue, node);
if (tail >= 0 && !too_large_failure)
{
/* Place the loop header into list of region blocks. */
- degree[i] = -1;
- rgn_bb_table[idx] = i;
+ degree[bb->index] = -1;
+ rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = num_bbs;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (i) = nr_regions;
- BLOCK_TO_BB (i) = count = 0;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = count = 0;
/* Remove blocks from queue[] when their in degree
becomes zero. Repeat until no blocks are left on the
/* Any block that did not end up in a region is placed into a region
by itself. */
- for (i = 0; i < n_basic_blocks; i++)
- if (degree[i] >= 0)
+ FOR_EACH_BB (bb)
+ if (degree[bb->index] >= 0)
{
- rgn_bb_table[idx] = i;
+ rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (i) = nr_regions++;
- BLOCK_TO_BB (i) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions++;
+ BLOCK_TO_BB (bb->index) = 0;
}
free (max_hdr);
free (dfs_nr);
free (stack);
- free (passed);
- free (header);
- free (inner);
- free (in_queue);
- free (in_stack);
+ sbitmap_free (passed);
+ sbitmap_free (header);
+ sbitmap_free (inner);
+ sbitmap_free (in_queue);
+ sbitmap_free (in_stack);
}
/* Functions for regions scheduling information. */
prob[bb] = 0.0;
if (IS_RGN_ENTRY (bb))
{
- BITSET_ADD (dom[bb], 0, bbset_size);
+ SET_BIT (dom[bb], 0);
prob[bb] = 1.0;
return;
}
fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
- /* Intialize dom[bb] to '111..1'. */
- BITSET_INVERT (dom[bb], bbset_size);
+ /* Initialize dom[bb] to '111..1'. */
+ sbitmap_ones (dom[bb]);
do
{
pred = FROM_BLOCK (nxt_in_edge);
- BITSET_INTER (dom[bb], dom[BLOCK_TO_BB (pred)], bbset_size);
+ sbitmap_a_and_b (dom[bb], dom[bb], dom[BLOCK_TO_BB (pred)]);
+ sbitmap_a_or_b (ancestor_edges[bb], ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)]);
- BITSET_UNION (ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)],
- edgeset_size);
-
- BITSET_ADD (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge), edgeset_size);
+ SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge));
nr_out_edges = 1;
nr_rgn_out_edges = 0;
fst_out_edge = OUT_EDGES (pred);
nxt_out_edge = NEXT_OUT (fst_out_edge);
- BITSET_UNION (pot_split[bb], pot_split[BLOCK_TO_BB (pred)],
- edgeset_size);
- BITSET_ADD (pot_split[bb], EDGE_TO_BIT (fst_out_edge), edgeset_size);
+ sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[BLOCK_TO_BB (pred)]);
+
+ SET_BIT (pot_split[bb], EDGE_TO_BIT (fst_out_edge));
/* The successor doesn't belong in the region? */
if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
CONTAINING_RGN (BB_TO_BLOCK (bb)))
++nr_rgn_out_edges;
- BITSET_ADD (pot_split[bb], EDGE_TO_BIT (nxt_out_edge), edgeset_size);
+ SET_BIT (pot_split[bb], EDGE_TO_BIT (nxt_out_edge));
nxt_out_edge = NEXT_OUT (nxt_out_edge);
}
}
while (fst_in_edge != nxt_in_edge);
- BITSET_ADD (dom[bb], bb, bbset_size);
- BITSET_DIFFER (pot_split[bb], ancestor_edges[bb], edgeset_size);
+ SET_BIT (dom[bb], bb);
+ sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
if (sched_verbose >= 2)
fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
int bb_trg;
edgelst *bl;
{
- int es = edgeset_size;
- edgeset src = (edgeset) xcalloc (es, sizeof (HOST_WIDE_INT));
-
- while (es--)
- src[es] = (pot_split[bb_src])[es];
- BITSET_DIFFER (src, pot_split[bb_trg], edgeset_size);
- extract_bitlst (src, edgeset_size, edgeset_bitsize, bl);
- free (src);
+ sbitmap src = (edgeset) sbitmap_alloc (pot_split[bb_src]->n_bits);
+ sbitmap_copy (src, pot_split[bb_src]);
+
+ sbitmap_difference (src, src, pot_split[bb_trg]);
+ extract_bitlst (src, bl);
+ sbitmap_free (src);
}
/* Find the valid candidate-source-blocks for the target block TRG, compute
compute_trg_info (trg)
int trg;
{
- register candidate *sp;
+ candidate *sp;
edgelst el;
int check_block, update_idx;
int i, j, k, fst_edge, nxt_edge;
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (n_basic_blocks);
- memset (update_blocks, 0, n_basic_blocks);
+ update_blocks = (char *) alloca (last_basic_block);
+ memset (update_blocks, 0, last_basic_block);
update_idx = 0;
for (j = 0; j < el.nr_members; j++)
debug_candidate (i);
}
-/* Functions for speculative scheduing. */
+/* Functions for speculative scheduling. */
/* Return 0 if x is a set of a register alive in the beginning of one
of the split-blocks of src, otherwise return 1. */
int src;
rtx x;
{
- register int i;
- register int regno;
- register rtx reg = SET_DEST (x);
+ int i;
+ int regno;
+ rtx reg = SET_DEST (x);
if (reg == 0)
return 1;
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
- if (GET_CODE (reg) == PARALLEL
- && GET_MODE (reg) == BLKmode)
+ if (GET_CODE (reg) == PARALLEL)
{
- register int i;
+ int i;
+
for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
- if (check_live_1 (src, XVECEXP (reg, 0, i)))
- return 1;
+ if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
+ if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
+ return 1;
+
return 0;
}
int src;
rtx x;
{
- register int i;
- register int regno;
- register rtx reg = SET_DEST (x);
+ int i;
+ int regno;
+ rtx reg = SET_DEST (x);
if (reg == 0)
return;
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
- if (GET_CODE (reg) == PARALLEL
- && GET_MODE (reg) == BLKmode)
+ if (GET_CODE (reg) == PARALLEL)
{
- register int i;
+ int i;
+
for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
- update_live_1 (src, XVECEXP (reg, 0, i));
+ if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
+ update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
+
return;
}
#define WORST_CLASS(class1, class2) \
((class1 > class2) ? class1 : class2)
-/* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
+/* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
#define IS_REACHABLE(bb_from, bb_to) \
-(bb_from == bb_to \
+ (bb_from == bb_to \
|| IS_RGN_ENTRY (bb_from) \
- || (bitset_member (ancestor_edges[bb_to], \
- EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))), \
- edgeset_size)))
+ || (TEST_BIT (ancestor_edges[bb_to], \
+ EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
-/* Non-zero iff the address is comprised from at most 1 register. */
+/* Nonzero iff the address is comprised from at most 1 register. */
#define CONST_BASED_ADDRESS_P(x) \
(GET_CODE (x) == REG \
- || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
- || (GET_CODE (x) == LO_SUM)) \
- && (GET_CODE (XEXP (x, 0)) == CONST_INT \
- || GET_CODE (XEXP (x, 1)) == CONST_INT)))
+ || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
+ || (GET_CODE (x) == LO_SUM)) \
+ && (CONSTANT_P (XEXP (x, 0)) \
+ || CONSTANT_P (XEXP (x, 1)))))
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
int bb_src, bb_trg;
{
rtx back_link;
- register candidate *candp = candidate_table + bb_src;
+ candidate *candp = candidate_table + bb_src;
if (candp->split_bbs.nr_members != 1)
/* Must have exactly one escape block. */
code = GET_CODE (x);
if (is_store)
{
- if (code == MEM)
+ if (code == MEM && may_trap_p (x))
return TRAP_RISKY;
else
return TRAP_FREE;
if (tmp_class == TRAP_RISKY)
break;
/* Test if it is a load. */
- tmp_class =
- WORST_CLASS (tmp_class,
- may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)), 0));
+ tmp_class
+ = WORST_CLASS (tmp_class,
+ may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
+ 0));
break;
case COND_EXEC:
case TRAP_IF:
tmp_class = TRAP_RISKY;
break;
- default:;
+ default:
+ ;
}
insn_class = WORST_CLASS (insn_class, tmp_class);
if (insn_class == TRAP_RISKY || insn_class == IRISKY)
bblst_table = (int *) xmalloc (bblst_size * sizeof (int));
bitlst_table_last = 0;
- bitlst_table_size = rgn_nr_edges;
bitlst_table = (int *) xmalloc (rgn_nr_edges * sizeof (int));
compute_trg_info (target_bb);
Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
- rtx next;
-
- if (! INSN_P (insn))
- continue;
- next = NEXT_INSN (insn);
-
- if (INSN_DEP_COUNT (insn) == 0
- && (SCHED_GROUP_P (next) == 0 || ! INSN_P (next)))
+ if (INSN_DEP_COUNT (insn) == 0)
ready_add (ready, insn);
- if (!(SCHED_GROUP_P (insn)))
- target_n_insns++;
+ target_n_insns++;
}
/* Add to ready list all 'ready' insns in valid source blocks.
if (!CANT_MOVE (insn)
&& (!IS_SPECULATIVE_INSN (insn)
- || (insn_issue_delay (insn) <= 3
+ || ((((!targetm.sched.use_dfa_pipeline_interface
+ || !(*targetm.sched.use_dfa_pipeline_interface) ())
+ && insn_issue_delay (insn) <= 3)
+ || (targetm.sched.use_dfa_pipeline_interface
+ && (*targetm.sched.use_dfa_pipeline_interface) ()
+ && (recog_memoized (insn) < 0
+ || min_insn_conflict_delay (curr_state,
+ insn, insn) <= 3)))
&& check_live (insn, bb_src)
&& is_exception_free (insn, bb_src, target_bb))))
- {
- rtx next;
-
- /* Note that we havn't squirrled away the notes for
- blocks other than the current. So if this is a
- speculative insn, NEXT might otherwise be a note. */
- next = next_nonnote_insn (insn);
- if (INSN_DEP_COUNT (insn) == 0
- && (! next
- || SCHED_GROUP_P (next) == 0
- || ! INSN_P (next)))
- ready_add (ready, insn);
- }
+ if (INSN_DEP_COUNT (insn) == 0)
+ ready_add (ready, insn);
}
}
}
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
{
- rtx temp;
basic_block b1;
if (IS_SPECULATIVE_INSN (insn))
}
nr_inter++;
- /* Find the beginning of the scheduling group. */
- /* ??? Ought to update basic block here, but later bits of
- schedule_block assumes the original insn block is
- still intact. */
-
- temp = insn;
- while (SCHED_GROUP_P (temp))
- temp = PREV_INSN (temp);
-
- /* Update source block boundaries. */
- b1 = BLOCK_FOR_INSN (temp);
- if (temp == b1->head && insn == b1->end)
+ /* Update source block boundaries. */
+ b1 = BLOCK_FOR_INSN (insn);
+ if (insn == b1->head && insn == b1->end)
{
/* We moved all the insns in the basic block.
Emit a note after the last insn and update the
/* We took insns from the end of the basic block,
so update the end of block boundary so that it
points to the first insn we did not move. */
- b1->end = PREV_INSN (temp);
+ b1->end = PREV_INSN (insn);
}
- else if (temp == b1->head)
+ else if (insn == b1->head)
{
/* We took insns from the start of the basic block,
so update the start of block boundary so that
&& (!IS_VALID (INSN_BB (next))
|| CANT_MOVE (next)
|| (IS_SPECULATIVE_INSN (next)
- && (insn_issue_delay (next) > 3
+ && (0
+ || (targetm.sched.use_dfa_pipeline_interface
+ && (*targetm.sched.use_dfa_pipeline_interface) ()
+ && recog_memoized (next) >= 0
+ && min_insn_conflict_delay (curr_state, next,
+ next) > 3)
+ || ((!targetm.sched.use_dfa_pipeline_interface
+ || !(*targetm.sched.use_dfa_pipeline_interface) ())
+ && insn_issue_delay (next) > 3)
|| !check_live (next, INSN_BB (next))
|| !is_exception_free (next, INSN_BB (next), target_bb)))))
return 0;
NULL, NULL,
NULL, NULL,
- 0
+ 0, 0
};
+/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
+
+static bool
+sets_likely_spilled (pat)
+ rtx pat;
+{
+ bool ret = false;
+ note_stores (pat, sets_likely_spilled_1, &ret);
+ return ret;
+}
+
+static void
+sets_likely_spilled_1 (x, pat, data)
+ rtx x, pat;
+ void *data;
+{
+ bool *ret = (bool *) data;
+
+ if (GET_CODE (pat) == SET
+ && REG_P (x)
+ && REGNO (x) < FIRST_PSEUDO_REGISTER
+ && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (x))))
+ *ret = true;
+}
+
/* Add dependences so that branches are scheduled to run last in their
block. */
{
rtx insn, last;
- /* For all branches, calls, uses, clobbers, and cc0 setters, force them
- to remain in order at the end of the block by adding dependencies and
- giving the last a high priority. There may be notes present, and
- prev_head may also be a note.
+ /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
+ that can throw exceptions, force them to remain in order at the end of
+ the block by adding dependencies and giving the last a high priority.
+ There may be notes present, and prev_head may also be a note.
Branches must obviously remain at the end. Calls should remain at the
end since moving them results in worse register allocation. Uses remain
- at the end to ensure proper register allocation. cc0 setters remaim
- at the end because they can't be moved away from their cc0 user. */
+ at the end to ensure proper register allocation.
+
+ cc0 setters remaim at the end because they can't be moved away from
+ their cc0 user.
+
+ Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
+ are not moved before reload because we can wind up with register
+ allocation failures. */
+
insn = tail;
last = 0;
while (GET_CODE (insn) == CALL_INSN
|| (GET_CODE (insn) == INSN
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
+ || can_throw_internal (insn)
#ifdef HAVE_cc0
|| sets_cc0_p (PATTERN (insn))
#endif
- ))
+ || (!reload_completed
+ && sets_likely_spilled (PATTERN (insn)))))
|| GET_CODE (insn) == NOTE)
{
if (GET_CODE (insn) != NOTE)
{
- if (last != 0
- && !find_insn_list (insn, LOG_LINKS (last)))
+ if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
{
add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn)++;
CANT_MOVE (insn) = 1;
last = insn;
- /* Skip over insns that are part of a group.
- Make each insn explicitly depend on the previous insn.
- This ensures that only the group header will ever enter
- the ready queue (and, when scheduled, will automatically
- schedule the SCHED_GROUP_P block). */
- while (SCHED_GROUP_P (insn))
- {
- rtx temp = prev_nonnote_insn (insn);
- add_dependence (insn, temp, REG_DEP_ANTI);
- insn = temp;
- }
}
/* Don't overrun the bounds of the basic block. */
add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn) = 1;
-
- /* Skip over insns that are part of a group. */
- while (SCHED_GROUP_P (insn))
- insn = prev_nonnote_insn (insn);
}
}
static struct deps *bb_deps;
+/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
+
+static rtx
+concat_INSN_LIST (copy, old)
+ rtx copy, old;
+{
+ rtx new = old;
+ for (; copy ; copy = XEXP (copy, 1))
+ new = alloc_INSN_LIST (XEXP (copy, 0), new);
+ return new;
+}
+
+static void
+concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p)
+ rtx copy_insns, copy_mems;
+ rtx *old_insns_p, *old_mems_p;
+{
+ rtx new_insns = *old_insns_p;
+ rtx new_mems = *old_mems_p;
+
+ while (copy_insns)
+ {
+ new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
+ new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
+ copy_insns = XEXP (copy_insns, 1);
+ copy_mems = XEXP (copy_mems, 1);
+ }
+
+ *old_insns_p = new_insns;
+ *old_mems_p = new_mems;
+}
+
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (bb, tmp_deps)
+propagate_deps (bb, pred_deps)
int bb;
- struct deps *tmp_deps;
+ struct deps *pred_deps;
{
int b = BB_TO_BLOCK (bb);
int e, first_edge;
- int reg;
- rtx link_insn, link_mem;
- rtx u;
-
- /* These lists should point to the right place, for correct
- freeing later. */
- bb_deps[bb].pending_read_insns = tmp_deps->pending_read_insns;
- bb_deps[bb].pending_read_mems = tmp_deps->pending_read_mems;
- bb_deps[bb].pending_write_insns = tmp_deps->pending_write_insns;
- bb_deps[bb].pending_write_mems = tmp_deps->pending_write_mems;
/* bb's structures are inherited by its successors. */
first_edge = e = OUT_EDGES (b);
- if (e <= 0)
- return;
-
- do
- {
- rtx x;
- int b_succ = TO_BLOCK (e);
- int bb_succ = BLOCK_TO_BB (b_succ);
- struct deps *succ_deps = bb_deps + bb_succ;
-
- /* Only bbs "below" bb, in the same region, are interesting. */
- if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
- || bb_succ <= bb)
- {
- e = NEXT_OUT (e);
- continue;
- }
-
- /* The reg_last lists are inherited by bb_succ. */
- EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg,
- {
- struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg];
- struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg];
-
- for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses))
- succ_deps_reg->uses
- = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses);
-
- for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets))
- succ_deps_reg->sets
- = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets);
-
- for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers))
- succ_deps_reg->clobbers
- = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers);
- });
- IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use);
-
- /* Mem read/write lists are inherited by bb_succ. */
- link_insn = tmp_deps->pending_read_insns;
- link_mem = tmp_deps->pending_read_mems;
- while (link_insn)
- {
- if (!(find_insn_mem_list (XEXP (link_insn, 0),
- XEXP (link_mem, 0),
- succ_deps->pending_read_insns,
- succ_deps->pending_read_mems)))
- add_insn_mem_dependence (succ_deps, &succ_deps->pending_read_insns,
- &succ_deps->pending_read_mems,
- XEXP (link_insn, 0), XEXP (link_mem, 0));
- link_insn = XEXP (link_insn, 1);
- link_mem = XEXP (link_mem, 1);
- }
+ if (e > 0)
+ do
+ {
+ int b_succ = TO_BLOCK (e);
+ int bb_succ = BLOCK_TO_BB (b_succ);
+ struct deps *succ_deps = bb_deps + bb_succ;
+ int reg;
+
+ /* Only bbs "below" bb, in the same region, are interesting. */
+ if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
+ || bb_succ <= bb)
+ {
+ e = NEXT_OUT (e);
+ continue;
+ }
- link_insn = tmp_deps->pending_write_insns;
- link_mem = tmp_deps->pending_write_mems;
- while (link_insn)
- {
- if (!(find_insn_mem_list (XEXP (link_insn, 0),
- XEXP (link_mem, 0),
- succ_deps->pending_write_insns,
- succ_deps->pending_write_mems)))
- add_insn_mem_dependence (succ_deps,
- &succ_deps->pending_write_insns,
- &succ_deps->pending_write_mems,
- XEXP (link_insn, 0), XEXP (link_mem, 0));
-
- link_insn = XEXP (link_insn, 1);
- link_mem = XEXP (link_mem, 1);
- }
+ /* The reg_last lists are inherited by bb_succ. */
+ EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg,
+ {
+ struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
+ struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
+
+ succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
+ succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
+ succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
+ succ_rl->clobbers);
+ succ_rl->uses_length += pred_rl->uses_length;
+ succ_rl->clobbers_length += pred_rl->clobbers_length;
+ });
+ IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
+
+ /* Mem read/write lists are inherited by bb_succ. */
+ concat_insn_mem_list (pred_deps->pending_read_insns,
+ pred_deps->pending_read_mems,
+ &succ_deps->pending_read_insns,
+ &succ_deps->pending_read_mems);
+ concat_insn_mem_list (pred_deps->pending_write_insns,
+ pred_deps->pending_write_mems,
+ &succ_deps->pending_write_insns,
+ &succ_deps->pending_write_mems);
+
+ succ_deps->last_pending_memory_flush
+ = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
+ succ_deps->last_pending_memory_flush);
+
+ succ_deps->pending_lists_length += pred_deps->pending_lists_length;
+ succ_deps->pending_flush_length += pred_deps->pending_flush_length;
+
+ /* last_function_call is inherited by bb_succ. */
+ succ_deps->last_function_call
+ = concat_INSN_LIST (pred_deps->last_function_call,
+ succ_deps->last_function_call);
+
+ /* sched_before_next_call is inherited by bb_succ. */
+ succ_deps->sched_before_next_call
+ = concat_INSN_LIST (pred_deps->sched_before_next_call,
+ succ_deps->sched_before_next_call);
+
+ e = NEXT_OUT (e);
+ }
+ while (e != first_edge);
- /* last_function_call is inherited by bb_succ. */
- for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call))
- succ_deps->last_function_call
- = alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call);
-
- /* last_pending_memory_flush is inherited by bb_succ. */
- for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (! find_insn_list (XEXP (u, 0),
- succ_deps->last_pending_memory_flush))
- succ_deps->last_pending_memory_flush
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->last_pending_memory_flush);
-
- /* sched_before_next_call is inherited by bb_succ. */
- x = LOG_LINKS (tmp_deps->sched_before_next_call);
- for (; x; x = XEXP (x, 1))
- add_dependence (succ_deps->sched_before_next_call,
- XEXP (x, 0), REG_DEP_ANTI);
-
- e = NEXT_OUT (e);
- }
- while (e != first_edge);
+ /* These lists should point to the right place, for correct
+ freeing later. */
+ bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
+ bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
+ bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
+ bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
+
+ /* Can't allow these to be freed twice. */
+ pred_deps->pending_read_insns = 0;
+ pred_deps->pending_read_mems = 0;
+ pred_deps->pending_write_insns = 0;
+ pred_deps->pending_write_mems = 0;
}
/* Compute backward dependences inside bb. In a multiple blocks region:
(1) a bb is analyzed after its predecessors, and (2) the lists in
effect at the end of bb (after analyzing for bb) are inherited by
- bb's successrs.
+ bb's successors.
Specifically for reg-reg data dependences, the block insns are
scanned by sched_analyze () top-to-bottom. Two lists are
fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
BB_TO_BLOCK (bb), bb);
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "----", "----", "--", "---", "----", "----", "--------", "-----");
+ if (targetm.sched.use_dfa_pipeline_interface
+ && (*targetm.sched.use_dfa_pipeline_interface) ())
+ {
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "insn", "code", "bb", "dep", "prio", "cost",
+ "reservation");
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "----", "----", "--", "---", "----", "----",
+ "-----------");
+ }
+ else
+ {
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
+ "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
+ "----", "----", "--", "---", "----", "----", "--------", "-----");
+ }
+
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
rtx link;
- int unit, range;
if (! INSN_P (insn))
{
continue;
}
- unit = insn_unit (insn);
- range = (unit < 0
- || function_units[unit].blockage_range_function == 0) ? 0 :
- function_units[unit].blockage_range_function (insn);
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0),
- (int) MIN_BLOCKAGE_COST (range),
- (int) MAX_BLOCKAGE_COST (range));
- insn_print_units (insn);
+ if (targetm.sched.use_dfa_pipeline_interface
+ && (*targetm.sched.use_dfa_pipeline_interface) ())
+ {
+ fprintf (sched_dump,
+ ";; %s%5d%6d%6d%6d%6d%6d ",
+ (SCHED_GROUP_P (insn) ? "+" : " "),
+ INSN_UID (insn),
+ INSN_CODE (insn),
+ INSN_BB (insn),
+ INSN_DEP_COUNT (insn),
+ INSN_PRIORITY (insn),
+ insn_cost (insn, 0, 0));
+
+ if (recog_memoized (insn) < 0)
+ fprintf (sched_dump, "nothing");
+ else
+ print_reservation (sched_dump, insn);
+ }
+ else
+ {
+ int unit = insn_unit (insn);
+ int range
+ = (unit < 0
+ || function_units[unit].blockage_range_function == 0
+ ? 0
+ : function_units[unit].blockage_range_function (insn));
+ fprintf (sched_dump,
+ ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
+ (SCHED_GROUP_P (insn) ? "+" : " "),
+ INSN_UID (insn),
+ INSN_CODE (insn),
+ INSN_BB (insn),
+ INSN_DEP_COUNT (insn),
+ INSN_PRIORITY (insn),
+ insn_cost (insn, 0, 0),
+ (int) MIN_BLOCKAGE_COST (range),
+ (int) MAX_BLOCKAGE_COST (range));
+ insn_print_units (insn);
+ }
+
fprintf (sched_dump, "\t: ");
for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
init_deps_global ();
- /* Initializations for region data dependence analyisis. */
+ /* Initializations for region data dependence analysis. */
bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb);
get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
compute_forward_dependences (head, tail);
+
+ if (targetm.sched.dependencies_evaluation_hook)
+ targetm.sched.dependencies_evaluation_hook (head, tail);
+
}
/* Set priorities. */
prob = (float *) xmalloc ((current_nr_blocks) * sizeof (float));
- bbset_size = current_nr_blocks / HOST_BITS_PER_WIDE_INT + 1;
- dom = (bbset *) xmalloc (current_nr_blocks * sizeof (bbset));
- for (i = 0; i < current_nr_blocks; i++)
- dom[i] = (bbset) xcalloc (bbset_size, sizeof (HOST_WIDE_INT));
-
+ dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
+ sbitmap_vector_zero (dom, current_nr_blocks);
/* Edge to bit. */
rgn_nr_edges = 0;
edge_to_bit = (int *) xmalloc (nr_edges * sizeof (int));
rgn_edges[rgn_nr_edges++] = i;
/* Split edges. */
- edgeset_size = rgn_nr_edges / HOST_BITS_PER_WIDE_INT + 1;
- edgeset_bitsize = rgn_nr_edges;
- pot_split = (edgeset *) xmalloc (current_nr_blocks * sizeof (edgeset));
- ancestor_edges
- = (edgeset *) xmalloc (current_nr_blocks * sizeof (edgeset));
- for (i = 0; i < current_nr_blocks; i++)
- {
- pot_split[i] =
- (edgeset) xcalloc (edgeset_size, sizeof (HOST_WIDE_INT));
- ancestor_edges[i] =
- (edgeset) xcalloc (edgeset_size, sizeof (HOST_WIDE_INT));
- }
+ pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (pot_split, current_nr_blocks);
+ ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
/* Compute probabilities, dominators, split_edges. */
for (bb = 0; bb < current_nr_blocks; bb++)
or after the last real insn of the block. So if the first insn
has a REG_SAVE_NOTE which would otherwise be emitted before the
insn, it is redundant with the note before the start of the
- block, and so we have to take it out.
-
- FIXME: Probably the same thing should be done with REG_SAVE_NOTEs
- referencing NOTE_INSN_SETJMP at the end of the block. */
+ block, and so we have to take it out. */
if (INSN_P (head))
{
rtx note;
for (note = REG_NOTES (head); note; note = XEXP (note, 1))
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
{
- if (INTVAL (XEXP (note, 0)) != NOTE_INSN_SETJMP)
- {
- remove_note (head, note);
- note = XEXP (note, 1);
- remove_note (head, note);
- }
- else
- note = XEXP (note, 1);
+ remove_note (head, note);
+ note = XEXP (note, 1);
+ remove_note (head, note);
}
}
if (current_nr_blocks > 1)
{
- int i;
-
free (prob);
- for (i = 0; i < current_nr_blocks; ++i)
- {
- free (dom[i]);
- free (pot_split[i]);
- free (ancestor_edges[i]);
- }
- free (dom);
+ sbitmap_vector_free (dom);
+ sbitmap_vector_free (pot_split);
+ sbitmap_vector_free (ancestor_edges);
free (edge_to_bit);
free (rgn_edges);
- free (pot_split);
- free (ancestor_edges);
}
}
nr_regions = 0;
rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
-
- blocks = sbitmap_alloc (n_basic_blocks);
+ block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
+ containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
}
else
{
- sbitmap *dom;
+ dominance_info dom;
struct edge_list *edge_list;
- dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
-
- /* The scheduler runs after flow; therefore, we can't blindly call
- back into find_basic_blocks since doing so could invalidate the
- info in global_live_at_start.
-
- Consider a block consisting entirely of dead stores; after life
- analysis it would be a block of NOTE_INSN_DELETED notes. If
- we call find_basic_blocks again, then the block would be removed
- entirely and invalidate our the register live information.
-
- We could (should?) recompute register live information. Doing
- so may even be beneficial. */
+ /* The scheduler runs after estimate_probabilities; therefore, we
+ can't blindly call back into find_basic_blocks since doing so
+ could invalidate the branch probability info. We could,
+ however, call cleanup_cfg. */
edge_list = create_edge_list ();
/* Compute the dominators and post dominators. */
- calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
+ dom = calculate_dominance_info (CDI_DOMINATORS);
/* build_control_flow will return nonzero if it detects unreachable
blocks or any other irregularity with the cfg which prevents
/* For now. This will move as more and more of haifa is converted
to using the cfg code in flow.c. */
- free (dom);
+ free_dominance_info (dom);
}
}
- deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
- /* Remove all death notes from the subroutine. */
- for (rgn = 0; rgn < nr_regions; rgn++)
+ if (CHECK_DEAD_NOTES)
{
- int b;
+ blocks = sbitmap_alloc (last_basic_block);
+ deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
+ /* Remove all death notes from the subroutine. */
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ {
+ int b;
- sbitmap_zero (blocks);
- for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
+ sbitmap_zero (blocks);
+ for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
+ SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
- deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
+ deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
+ }
+ sbitmap_free (blocks);
}
-
- sbitmap_free (blocks);
+ else
+ count_or_remove_death_notes (NULL, 1);
}
/* The one entry point in this file. DUMP_FILE is the dump file for
sbitmap large_region_blocks, blocks;
int rgn;
int any_large_regions;
+ basic_block bb;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
init_regions ();
current_sched_info = ®ion_sched_info;
-
+
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
schedule_region (rgn);
/* Update life analysis for the subroutine. Do single block regions
first so that we can verify that live_at_start didn't change. Then
- do all other blocks. */
+ do all other blocks. */
/* ??? There is an outside possibility that update_life_info, or more
- to the point propagate_block, could get called with non-zero flags
+ to the point propagate_block, could get called with nonzero flags
more than once for one basic block. This would be kinda bad if it
were to happen, since REG_INFO would be accumulated twice for the
block, and we'd have twice the REG_DEAD notes.
best way to test for this kind of thing... */
allocate_reg_life_data ();
- compute_bb_for_insn (get_max_uid ());
+ compute_bb_for_insn ();
any_large_regions = 0;
- large_region_blocks = sbitmap_alloc (n_basic_blocks);
- sbitmap_ones (large_region_blocks);
-
- blocks = sbitmap_alloc (n_basic_blocks);
-
+ large_region_blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (large_region_blocks);
+ FOR_EACH_BB (bb)
+ SET_BIT (large_region_blocks, bb->index);
+
+ blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (blocks);
+
+ /* Update life information. For regions consisting of multiple blocks
+ we've possibly done interblock scheduling that affects global liveness.
+ For regions consisting of single blocks we need to do only local
+ liveness. */
for (rgn = 0; rgn < nr_regions; rgn++)
if (RGN_NR_BLOCKS (rgn) > 1)
any_large_regions = 1;
else
{
- sbitmap_zero (blocks);
SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
-
- /* Don't update reg info after reload, since that affects
- regs_ever_live, which should not change after reload. */
- update_life_info (blocks, UPDATE_LIFE_LOCAL,
- (reload_completed ? PROP_DEATH_NOTES
- : PROP_DEATH_NOTES | PROP_REG_INFO));
-
-#ifndef HAVE_conditional_execution
- /* ??? REG_DEAD notes only exist for unconditional deaths. We need
- a count of the conditional plus unconditional deaths for this to
- work out. */
- /* In the single block case, the count of registers that died should
- not have changed during the schedule. */
- if (count_or_remove_death_notes (blocks, 0) != deaths_in_region[rgn])
- abort ();
-#endif
}
+ /* Don't update reg info after reload, since that affects
+ regs_ever_live, which should not change after reload. */
+ update_life_info (blocks, UPDATE_LIFE_LOCAL,
+ (reload_completed ? PROP_DEATH_NOTES
+ : PROP_DEATH_NOTES | PROP_REG_INFO));
if (any_large_regions)
{
update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
PROP_DEATH_NOTES | PROP_REG_INFO);
}
+ if (CHECK_DEAD_NOTES)
+ {
+ /* Verify the counts of basic block notes in single the basic block
+ regions. */
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ if (RGN_NR_BLOCKS (rgn) == 1)
+ {
+ sbitmap_zero (blocks);
+ SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
+
+ if (deaths_in_region[rgn]
+ != count_or_remove_death_notes (blocks, 0))
+ abort ();
+ }
+ free (deaths_in_region);
+ }
+
/* Reposition the prologue and epilogue notes in case we moved the
prologue/epilogue insns. */
if (reload_completed)
sbitmap_free (blocks);
sbitmap_free (large_region_blocks);
-
- free (deaths_in_region);
}
#endif