X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fsched-rgn.c;h=c7b0ad6f39075257803124a497540188c0dd716f;hb=5b6554fe6b055223340c34dab17087bece883928;hp=0bb69ee4cdc9207d581a5020ead91dcb68735959;hpb=deb2741b38fe5c3702de0139b72df56eb72ebe35;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 0bb69ee4cdc..c7b0ad6f390 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -1,6 +1,6 @@ /* Instruction scheduling pass. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, - 2001, 2002, 2003, 2004, 2005, 2006, 2007 + 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) @@ -9,7 +9,7 @@ This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -18,9 +18,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ /* This pass implements list scheduling within basic blocks. It is run twice: (1) after flow analysis, but before register allocation, @@ -65,90 +64,68 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "cfglayout.h" #include "params.h" #include "sched-int.h" +#include "sel-sched.h" #include "target.h" #include "timevar.h" #include "tree-pass.h" #include "dbgcnt.h" #ifdef INSN_SCHEDULING + /* Some accessor macros for h_i_d members only used within this file. */ -#define INSN_REF_COUNT(INSN) (h_i_d[INSN_UID (INSN)].ref_count) -#define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load) -#define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn) +#define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load) +#define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn) /* nr_inter/spec counts interblock/speculative motion for the function. */ static int nr_inter, nr_spec; static int is_cfg_nonregular (void); -static bool sched_is_disabled_for_current_region_p (void); - -/* A region is the main entity for interblock scheduling: insns - are allowed to move between blocks in the same region, along - control flow graph edges, in the 'up' direction. */ -typedef struct -{ - /* Number of extended basic blocks in region. */ - int rgn_nr_blocks; - /* cblocks in the region (actually index in rgn_bb_table). */ - int rgn_blocks; - /* Dependencies for this region are already computed. Basically, indicates, - that this is a recovery block. */ - unsigned int dont_calc_deps : 1; - /* This region has at least one non-trivial ebb. */ - unsigned int has_real_ebb : 1; -} -region; /* Number of regions in the procedure. */ -static int nr_regions; +int nr_regions = 0; /* Table of region descriptions. */ -static region *rgn_table; +region *rgn_table = NULL; /* Array of lists of regions' blocks. */ -static int *rgn_bb_table; +int *rgn_bb_table = NULL; /* Topological order of blocks in the region (if b2 is reachable from b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is always referred to by either block or b, while its topological order name (in the region) is referred to by bb. */ -static int *block_to_bb; +int *block_to_bb = NULL; /* The number of the region containing a block. */ -static int *containing_rgn; +int *containing_rgn = NULL; + +/* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb. + Currently we can get a ebb only through splitting of currently + scheduling block, therefore, we don't need ebb_head array for every region, + hence, its sufficient to hold it for current one only. */ +int *ebb_head = NULL; /* The minimum probability of reaching a source block so that it will be considered for speculative scheduling. */ static int min_spec_prob; -#define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks) -#define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks) -#define RGN_DONT_CALC_DEPS(rgn) (rgn_table[rgn].dont_calc_deps) -#define RGN_HAS_REAL_EBB(rgn) (rgn_table[rgn].has_real_ebb) -#define BLOCK_TO_BB(block) (block_to_bb[block]) -#define CONTAINING_RGN(block) (containing_rgn[block]) - -void debug_regions (void); -static void find_single_block_region (void); +static void find_single_block_region (bool); static void find_rgns (void); -static void extend_rgns (int *, int *, sbitmap, int *); static bool too_large (int, int *, int *); -extern void debug_live (int, int); - /* Blocks of the current region being scheduled. */ -static int current_nr_blocks; -static int current_blocks; +int current_nr_blocks; +int current_blocks; -static int rgn_n_insns; +/* A speculative motion requires checking live information on the path + from 'source' to 'target'. The split blocks are those to be checked. + After a speculative motion, live information should be modified in + the 'update' blocks. -/* The mapping from ebb to block. */ -/* ebb_head [i] - is index in rgn_bb_table, while - EBB_HEAD (i) - is basic block index. - BASIC_BLOCK (EBB_HEAD (i)) - head of ebb. */ -#define BB_TO_BLOCK(ebb) (rgn_bb_table[ebb_head[ebb]]) -#define EBB_FIRST_BB(ebb) BASIC_BLOCK (BB_TO_BLOCK (ebb)) -#define EBB_LAST_BB(ebb) BASIC_BLOCK (rgn_bb_table[ebb_head[ebb + 1] - 1]) + Lists of split and update blocks for each candidate of the current + target are in array bblst_table. */ +static basic_block *bblst_table; +static int bblst_size, bblst_last; /* Target info declarations. @@ -174,23 +151,14 @@ typedef struct candidate; static candidate *candidate_table; - -/* A speculative motion requires checking live information on the path - from 'source' to 'target'. The split blocks are those to be checked. - After a speculative motion, live information should be modified in - the 'update' blocks. - - Lists of split and update blocks for each candidate of the current - target are in array bblst_table. */ -static basic_block *bblst_table; -static int bblst_size, bblst_last; - -#define IS_VALID(src) ( candidate_table[src].is_valid ) -#define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative ) +#define IS_VALID(src) (candidate_table[src].is_valid) +#define IS_SPECULATIVE(src) (candidate_table[src].is_speculative) +#define IS_SPECULATIVE_INSN(INSN) \ + (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN)))) #define SRC_PROB(src) ( candidate_table[src].src_prob ) /* The bb being currently scheduled. */ -static int target_bb; +int target_bb; /* List of edges. */ typedef struct @@ -205,7 +173,6 @@ static int edgelst_last; static void extract_edgelst (sbitmap, edgelst *); - /* Target info functions. */ static void split_edges (int, int, edgelst *); static void compute_trg_info (int); @@ -251,24 +218,11 @@ static edgeset *pot_split; /* For every bb, a set of its ancestor edges. */ static edgeset *ancestor_edges; -/* Array of EBBs sizes. Currently we can get a ebb only through - splitting of currently scheduling block, therefore, we don't need - ebb_head array for every region, its sufficient to hold it only - for current one. */ -static int *ebb_head; - -static void compute_dom_prob_ps (int); - #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN)))) -#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN)))) -#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN))) /* Speculative scheduling functions. */ static int check_live_1 (int, rtx); static void update_live_1 (int, rtx); -static int check_live (rtx, int); -static void update_live (rtx, int); -static void set_spec_fed (rtx); static int is_pfree (rtx, int, int); static int find_conditional_protection (rtx, int); static int is_conditionally_protected (rtx, int, int); @@ -276,11 +230,10 @@ static int is_prisky (rtx, int, int); static int is_exception_free (rtx, int, int); static bool sets_likely_spilled (rtx); -static void sets_likely_spilled_1 (rtx, rtx, void *); +static void sets_likely_spilled_1 (rtx, const_rtx, void *); static void add_branch_dependences (rtx, rtx); -static void compute_block_backward_dependences (int); +static void compute_block_dependences (int); -static void init_regions (void); static void schedule_region (int); static rtx concat_INSN_LIST (rtx, rtx); static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *); @@ -316,24 +269,44 @@ is_cfg_nonregular (void) if (current_function_has_exception_handlers ()) return 1; - /* If we have non-jumping insns which refer to labels, then we consider - the cfg not well structured. */ + /* If we have insns which refer to labels as non-jumped-to operands, + then we consider the cfg not well structured. */ FOR_EACH_BB (b) FOR_BB_INSNS (b, insn) { - /* Check for labels referred by non-jump insns. */ - if (NONJUMP_INSN_P (insn) || CALL_P (insn)) - { - rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX); - if (note - && ! (JUMP_P (NEXT_INSN (insn)) - && find_reg_note (NEXT_INSN (insn), REG_LABEL, - XEXP (note, 0)))) - return 1; - } + rtx note, next, set, dest; + /* If this function has a computed jump, then we consider the cfg not well structured. */ - else if (JUMP_P (insn) && computed_jump_p (insn)) + if (JUMP_P (insn) && computed_jump_p (insn)) + return 1; + + if (!INSN_P (insn)) + continue; + + note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX); + if (note == NULL_RTX) + continue; + + /* For that label not to be seen as a referred-to label, this + must be a single-set which is feeding a jump *only*. This + could be a conditional jump with the label split off for + machine-specific reasons or a casesi/tablejump. */ + next = next_nonnote_insn (insn); + if (next == NULL_RTX + || !JUMP_P (next) + || (JUMP_LABEL (next) != XEXP (note, 0) + && find_reg_note (next, REG_LABEL_TARGET, + XEXP (note, 0)) == NULL_RTX) + || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next)) + return 1; + + set = single_set (insn); + if (set == NULL_RTX) + return 1; + + dest = SET_DEST (set); + if (!REG_P (dest) || !dead_or_set_p (next, dest)) return 1; } @@ -404,28 +377,173 @@ debug_regions (void) } } +/* Print the region's basic blocks. */ + +void +debug_region (int rgn) +{ + int bb; + + fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn); + fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn, + rgn_table[rgn].rgn_nr_blocks); + fprintf (stderr, ";;\tbb/block: "); + + /* We don't have ebb_head initialized yet, so we can't use + BB_TO_BLOCK (). */ + current_blocks = RGN_BLOCKS (rgn); + + for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) + fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]); + + fprintf (stderr, "\n\n"); + + for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) + { + debug_bb_n_slim (rgn_bb_table[current_blocks + bb]); + fprintf (stderr, "\n"); + } + + fprintf (stderr, "\n"); + +} + +/* True when a bb with index BB_INDEX contained in region RGN. */ +static bool +bb_in_region_p (int bb_index, int rgn) +{ + int i; + + for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++) + if (rgn_bb_table[current_blocks + i] == bb_index) + return true; + + return false; +} + +/* Dump region RGN to file F using dot syntax. */ +void +dump_region_dot (FILE *f, int rgn) +{ + int i; + + fprintf (f, "digraph Region_%d {\n", rgn); + + /* We don't have ebb_head initialized yet, so we can't use + BB_TO_BLOCK (). */ + current_blocks = RGN_BLOCKS (rgn); + + for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++) + { + edge e; + edge_iterator ei; + int src_bb_num = rgn_bb_table[current_blocks + i]; + struct basic_block_def *bb = BASIC_BLOCK (src_bb_num); + + FOR_EACH_EDGE (e, ei, bb->succs) + if (bb_in_region_p (e->dest->index, rgn)) + fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index); + } + fprintf (f, "}\n"); +} + +/* The same, but first open a file specified by FNAME. */ +void +dump_region_dot_file (const char *fname, int rgn) +{ + FILE *f = fopen (fname, "wt"); + dump_region_dot (f, rgn); + fclose (f); +} + /* Build a single block region for each basic block in the function. This allows for using the same code for interblock and basic block scheduling. */ static void -find_single_block_region (void) +find_single_block_region (bool ebbs_p) { - basic_block bb; + basic_block bb, ebb_start; + int i = 0; nr_regions = 0; - FOR_EACH_BB (bb) + if (ebbs_p) { + int probability_cutoff; + if (profile_info && flag_branch_probabilities) + probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); + else + probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY); + probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; + + FOR_EACH_BB (ebb_start) + { + RGN_NR_BLOCKS (nr_regions) = 0; + RGN_BLOCKS (nr_regions) = i; + RGN_DONT_CALC_DEPS (nr_regions) = 0; + RGN_HAS_REAL_EBB (nr_regions) = 0; + + for (bb = ebb_start; ; bb = bb->next_bb) + { + edge e; + edge_iterator ei; + + rgn_bb_table[i] = bb->index; + RGN_NR_BLOCKS (nr_regions)++; + CONTAINING_RGN (bb->index) = nr_regions; + BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions); + i++; + + if (bb->next_bb == EXIT_BLOCK_PTR + || LABEL_P (BB_HEAD (bb->next_bb))) + break; + + FOR_EACH_EDGE (e, ei, bb->succs) + if ((e->flags & EDGE_FALLTHRU) != 0) + break; + if (! e) + break; + if (e->probability <= probability_cutoff) + break; + } + + ebb_start = bb; + nr_regions++; + } + } + else + FOR_EACH_BB (bb) + { + rgn_bb_table[nr_regions] = bb->index; + RGN_NR_BLOCKS (nr_regions) = 1; + RGN_BLOCKS (nr_regions) = nr_regions; + RGN_DONT_CALC_DEPS (nr_regions) = 0; + RGN_HAS_REAL_EBB (nr_regions) = 0; + + CONTAINING_RGN (bb->index) = nr_regions; + BLOCK_TO_BB (bb->index) = 0; + nr_regions++; + } +} + +/* Estimate number of the insns in the BB. */ +static int +rgn_estimate_number_of_insns (basic_block bb) +{ + int count; + + count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb)); + + if (MAY_HAVE_DEBUG_INSNS) { - rgn_bb_table[nr_regions] = bb->index; - RGN_NR_BLOCKS (nr_regions) = 1; - RGN_BLOCKS (nr_regions) = nr_regions; - RGN_DONT_CALC_DEPS (nr_regions) = 0; - RGN_HAS_REAL_EBB (nr_regions) = 0; - CONTAINING_RGN (bb->index) = nr_regions; - BLOCK_TO_BB (bb->index) = 0; - nr_regions++; + rtx insn; + + FOR_BB_INSNS (bb, insn) + if (DEBUG_INSN_P (insn)) + count--; } + + return count; } /* Update number of blocks and the estimate for number of insns @@ -436,8 +554,8 @@ static bool too_large (int block, int *num_bbs, int *num_insns) { (*num_bbs)++; - (*num_insns) += (INSN_LUID (BB_END (BASIC_BLOCK (block))) - - INSN_LUID (BB_HEAD (BASIC_BLOCK (block)))); + (*num_insns) += (common_sched_info->estimate_number_of_insns + (BASIC_BLOCK (block))); return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS)) || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS))); @@ -490,7 +608,7 @@ too_large (int block, int *num_bbs, int *num_insns) of edge tables. That would simplify it somewhat. */ static void -find_rgns (void) +haifa_find_rgns (void) { int *max_hdr, *dfs_nr, *degree; char no_loops = 1; @@ -665,7 +783,7 @@ find_rgns (void) int *queue, *degree1 = NULL; /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put there basic blocks, which are forced to be region heads. - This is done to try to assemble few smaller regions + This is done to try to assemble few smaller regions from a too_large region. */ sbitmap extended_rgn_header = NULL; bool extend_regions_p; @@ -677,11 +795,11 @@ find_rgns (void) block of each region. */ queue = XNEWVEC (int, n_basic_blocks); - + extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0; if (extend_regions_p) { - degree1 = xmalloc (last_basic_block * sizeof (int)); + degree1 = XNEWVEC (int, last_basic_block); extended_rgn_header = sbitmap_alloc (last_basic_block); sbitmap_zero (extended_rgn_header); } @@ -733,11 +851,11 @@ find_rgns (void) loop_head = max_hdr[bb->index]; if (extend_regions_p) - /* We save degree in case when we meet a too_large region - and cancel it. We need a correct degree later when + /* We save degree in case when we meet a too_large region + and cancel it. We need a correct degree later when calling extend_rgns. */ memcpy (degree1, degree, last_basic_block * sizeof (int)); - + /* Decrease degree of all I's successors for topological ordering. */ FOR_EACH_EDGE (e, ei, bb->succs) @@ -746,8 +864,7 @@ find_rgns (void) /* Estimate # insns, and count # blocks in the region. */ num_bbs = 1; - num_insns = (INSN_LUID (BB_END (bb)) - - INSN_LUID (BB_HEAD (bb))); + num_insns = common_sched_info->estimate_number_of_insns (bb); /* Find all loop latches (blocks with back edges to the loop header) or all the leaf blocks in the cfg has no loops. @@ -905,7 +1022,7 @@ find_rgns (void) degree = degree1; degree1 = t; - + /* And force successors of BB to be region heads. This may provide several smaller regions instead of one too_large region. */ @@ -920,10 +1037,10 @@ find_rgns (void) if (extend_regions_p) { free (degree1); - + sbitmap_a_or_b (header, header, extended_rgn_header); sbitmap_free (extended_rgn_header); - + extend_rgns (degree, &idx, header, max_hdr); } } @@ -951,11 +1068,24 @@ find_rgns (void) sbitmap_free (in_stack); } + +/* Wrapper function. + If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form + regions. Otherwise just call find_rgns_haifa. */ +static void +find_rgns (void) +{ + if (sel_sched_p () && flag_sel_sched_pipelining) + sel_find_rgns (); + else + haifa_find_rgns (); +} + static int gather_region_statistics (int **); static void print_region_statistics (int *, int, int *, int); -/* Calculate the histogram that shows the number of regions having the - given number of basic blocks, and store it in the RSP array. Return +/* Calculate the histogram that shows the number of regions having the + given number of basic blocks, and store it in the RSP array. Return the size of this array. */ static int gather_region_statistics (int **rsp) @@ -970,8 +1100,8 @@ gather_region_statistics (int **rsp) gcc_assert (nr_blocks >= 1); if (nr_blocks > a_sz) - { - a = xrealloc (a, nr_blocks * sizeof (*a)); + { + a = XRESIZEVEC (int, a, nr_blocks); do a[a_sz++] = 0; while (a_sz != nr_blocks); @@ -984,14 +1114,14 @@ gather_region_statistics (int **rsp) return a_sz; } -/* Print regions statistics. S1 and S2 denote the data before and after +/* Print regions statistics. S1 and S2 denote the data before and after calling extend_rgns, respectively. */ static void print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz) { int i; - - /* We iterate until s2_sz because extend_rgns does not decrease + + /* We iterate until s2_sz because extend_rgns does not decrease the maximal region size. */ for (i = 1; i < s2_sz; i++) { @@ -1020,7 +1150,7 @@ print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz) LOOP_HDR - mapping from block to the containing loop (two blocks can reside within one region if they have the same loop header). */ -static void +void extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) { int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr; @@ -1028,9 +1158,9 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS); - max_hdr = xmalloc (last_basic_block * sizeof (*max_hdr)); + max_hdr = XNEWVEC (int, last_basic_block); - order = xmalloc (last_basic_block * sizeof (*order)); + order = XNEWVEC (int, last_basic_block); post_order_compute (order, false, false); for (i = nblocks - 1; i >= 0; i--) @@ -1045,27 +1175,28 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) /* This block already was processed in find_rgns. */ max_hdr[bbn] = -1; } - + /* The idea is to topologically walk through CFG in top-down order. During the traversal, if all the predecessors of a node are marked to be in the same region (they all have the same max_hdr), - then current node is also marked to be a part of that region. + then current node is also marked to be a part of that region. Otherwise the node starts its own region. - CFG should be traversed until no further changes are made. On each - iteration the set of the region heads is extended (the set of those - blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the - set of all basic blocks, thus the algorithm is guaranteed to terminate. */ + CFG should be traversed until no further changes are made. On each + iteration the set of the region heads is extended (the set of those + blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the + set of all basic blocks, thus the algorithm is guaranteed to + terminate. */ while (rescan && iter < max_iter) { rescan = 0; - + for (i = nblocks - 1; i >= 0; i--) { edge e; edge_iterator ei; int bbn = order[i]; - + if (max_hdr[bbn] != -1 && !TEST_BIT (header, bbn)) { int hdr = -1; @@ -1091,16 +1222,16 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) { hdr = bbn; break; - } + } } else /* BB starts its own region. */ { hdr = bbn; break; - } + } } - + if (hdr == bbn) { /* If BB start its own region, @@ -1109,7 +1240,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) rescan = 1; } else - gcc_assert (hdr != -1); + gcc_assert (hdr != -1); max_hdr[bbn] = hdr; } @@ -1117,17 +1248,17 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) iter++; } - + /* Statistics were gathered on the SPEC2000 package of tests with mainline weekly snapshot gcc-4.1-20051015 on ia64. - + Statistics for SPECint: 1 iteration : 1751 cases (38.7%) 2 iterations: 2770 cases (61.3%) Blocks wrapped in regions by find_rgns without extension: 18295 blocks Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks (We don't count single block regions here). - + Statistics for SPECfp: 1 iteration : 621 cases (35.9%) 2 iterations: 1110 cases (64.1%) @@ -1139,11 +1270,11 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) This can be overridden with max-sched-extend-regions-iters parameter: 0 - disable region extension, N > 0 - do at most N iterations. */ - + if (sched_verbose && iter != 0) fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter, rescan ? "... failed" : ""); - + if (!rescan && iter != 0) { int *s1 = NULL, s1_sz = 0; @@ -1163,7 +1294,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) edge e; edge_iterator ei; int num_bbs = 0, j, num_insns = 0, large; - + large = too_large (bbn, &num_bbs, &num_insns); degree[bbn] = -1; @@ -1198,7 +1329,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) { RGN_NR_BLOCKS (nr_regions) = 1; nr_regions++; - } + } num_bbs = 1; @@ -1207,7 +1338,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) int succn = order[j]; if (max_hdr[succn] == bbn) - /* This cycle iterates over all basic blocks, that + /* This cycle iterates over all basic blocks, that are supposed to be in the region with head BBN, and wraps them into that region (or in single block region). */ @@ -1215,7 +1346,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) gcc_assert (degree[succn] == 0); degree[succn] = -1; - rgn_bb_table[idx] = succn; + rgn_bb_table[idx] = succn; BLOCK_TO_BB (succn) = large ? 0 : num_bbs++; CONTAINING_RGN (succn) = nr_regions; @@ -1230,7 +1361,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) } idx++; - + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs) if (e->dest != EXIT_BLOCK_PTR) degree[e->dest->index]--; @@ -1249,7 +1380,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) { int *s2, s2_sz; - /* Get the new statistics and print the comparison with the + /* Get the new statistics and print the comparison with the one before calling this function. */ s2_sz = gather_region_statistics (&s2); print_region_statistics (s1, s1_sz, s2, s2_sz); @@ -1257,11 +1388,11 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) free (s2); } } - + free (order); free (max_hdr); - *idxp = idx; + *idxp = idx; } /* Functions for regions scheduling information. */ @@ -1277,7 +1408,7 @@ compute_dom_prob_ps (int bb) /* We shouldn't have any real ebbs yet. */ gcc_assert (ebb_head [bb] == bb + current_blocks); - + if (IS_RGN_ENTRY (bb)) { SET_BIT (dom[bb], 0); @@ -1346,13 +1477,26 @@ static void compute_trg_info (int trg) { candidate *sp; - edgelst el; + edgelst el = { NULL, 0 }; int i, j, k, update_idx; basic_block block; sbitmap visited; edge_iterator ei; edge e; + candidate_table = XNEWVEC (candidate, current_nr_blocks); + + bblst_last = 0; + /* bblst_table holds split blocks and update blocks for each block after + the current one in the region. split blocks and update blocks are + the TO blocks of region edges, so there can be at most rgn_nr_edges + of them. */ + bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges; + bblst_table = XNEWVEC (basic_block, bblst_size); + + edgelst_last = 0; + edgelst_table = XNEWVEC (edge, rgn_nr_edges); + /* Define some of the fields for the target bb as well. */ sp = candidate_table + trg; sp->is_valid = 1; @@ -1439,6 +1583,15 @@ compute_trg_info (int trg) sbitmap_free (visited); } +/* Free the computed target info. */ +static void +free_trg_info (void) +{ + free (candidate_table); + free (bblst_table); + free (edgelst_table); +} + /* Print candidates info, for debugging purposes. Callable from debugger. */ void @@ -1547,7 +1700,7 @@ check_live_1 (int src, rtx x) int t = bitmap_bit_p (¬_in_df, b->index); /* We can have split blocks, that were recently generated. - such blocks are always outside current region. */ + Such blocks are always outside current region. */ gcc_assert (!t || (CONTAINING_RGN (b->index) != CONTAINING_RGN (BB_TO_BLOCK (src)))); @@ -1698,11 +1851,12 @@ update_live (rtx insn, int src) static void set_spec_fed (rtx load_insn) { - dep_link_t link; + sd_iterator_def sd_it; + dep_t dep; - FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (load_insn)) - if (DEP_LINK_KIND (link) == REG_DEP_TRUE) - FED_BY_SPEC_LOAD (DEP_LINK_CON (link)) = 1; + FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep) + if (DEP_TYPE (dep) == REG_DEP_TRUE) + FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1; } /* On the path from the insn to load_insn_bb, find a conditional @@ -1711,18 +1865,19 @@ branch depending on insn, that guards the speculative load. */ static int find_conditional_protection (rtx insn, int load_insn_bb) { - dep_link_t link; + sd_iterator_def sd_it; + dep_t dep; /* Iterate through DEF-USE forward dependences. */ - FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn)) + FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) { - rtx next = DEP_LINK_CON (link); + rtx next = DEP_CON (dep); if ((CONTAINING_RGN (BLOCK_NUM (next)) == CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb))) && IS_REACHABLE (INSN_BB (next), load_insn_bb) && load_insn_bb != INSN_BB (next) - && DEP_LINK_KIND (link) == REG_DEP_TRUE + && DEP_TYPE (dep) == REG_DEP_TRUE && (JUMP_P (next) || find_conditional_protection (next, load_insn_bb))) return 1; @@ -1732,11 +1887,11 @@ find_conditional_protection (rtx insn, int load_insn_bb) /* Returns 1 if the same insn1 that participates in the computation of load_insn's address is feeding a conditional branch that is - guarding on load_insn. This is true if we find a the two DEF-USE + guarding on load_insn. This is true if we find two DEF-USE chains: insn1 -> ... -> conditional-branch insn1 -> ... -> load_insn, - and if a flow path exist: + and if a flow path exists: insn1 -> ... -> conditional-branch -> ... -> load_insn, and if insn1 is on the path region-entry -> ... -> bb_trg -> ... load_insn. @@ -1747,14 +1902,15 @@ find_conditional_protection (rtx insn, int load_insn_bb) static int is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg) { - dep_link_t link; + sd_iterator_def sd_it; + dep_t dep; - FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (load_insn)) + FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep) { - rtx insn1 = DEP_LINK_PRO (link); + rtx insn1 = DEP_PRO (dep); /* Must be a DEF-USE dependence upon non-branch. */ - if (DEP_LINK_KIND (link) != REG_DEP_TRUE + if (DEP_TYPE (dep) != REG_DEP_TRUE || JUMP_P (insn1)) continue; @@ -1797,27 +1953,29 @@ is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg) static int is_pfree (rtx load_insn, int bb_src, int bb_trg) { - dep_link_t back_link; + sd_iterator_def back_sd_it; + dep_t back_dep; candidate *candp = candidate_table + bb_src; if (candp->split_bbs.nr_members != 1) /* Must have exactly one escape block. */ return 0; - FOR_EACH_DEP_LINK (back_link, INSN_BACK_DEPS (load_insn)) + FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) { - rtx insn1 = DEP_LINK_PRO (back_link); + rtx insn1 = DEP_PRO (back_dep); - if (DEP_LINK_KIND (back_link) == REG_DEP_TRUE) + if (DEP_TYPE (back_dep) == REG_DEP_TRUE) + /* Found a DEF-USE dependence (insn1, load_insn). */ { - /* Found a DEF-USE dependence (insn1, load_insn). */ - dep_link_t fore_link; + sd_iterator_def fore_sd_it; + dep_t fore_dep; - FOR_EACH_DEP_LINK (fore_link, INSN_FORW_DEPS (insn1)) + FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) { - rtx insn2 = DEP_LINK_CON (fore_link); + rtx insn2 = DEP_CON (fore_dep); - if (DEP_LINK_KIND (fore_link) == REG_DEP_TRUE) + if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) { /* Found a DEF-USE dependence (insn1, insn2). */ if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) @@ -1850,7 +2008,7 @@ is_prisky (rtx load_insn, int bb_src, int bb_trg) if (FED_BY_SPEC_LOAD (load_insn)) return 1; - if (deps_list_empty_p (INSN_BACK_DEPS (load_insn))) + if (sd_lists_empty_p (load_insn, SD_LIST_BACK)) /* Dependence may 'hide' out of the region. */ return 1; @@ -1917,20 +2075,16 @@ static int can_schedule_ready_p (rtx); static void begin_schedule_ready (rtx, rtx); static ds_t new_ready (rtx, ds_t); static int schedule_more_p (void); -static const char *rgn_print_insn (rtx, int); +static const char *rgn_print_insn (const_rtx, int); static int rgn_rank (rtx, rtx); -static int contributes_to_priority (rtx, rtx); static void compute_jump_reg_dependencies (rtx, regset, regset, regset); /* Functions for speculative scheduling. */ -static void add_remove_insn (rtx, int); -static void extend_regions (void); -static void add_block1 (basic_block, basic_block); -static void fix_recovery_cfg (int, int, int); +static void rgn_add_remove_insn (rtx, int); +static void rgn_add_block (basic_block, basic_block); +static void rgn_fix_recovery_cfg (int, int, int); static basic_block advance_target_bb (basic_block, rtx); -static void debug_rgn_dependencies (int); - /* Return nonzero if there are more insns that should be scheduled. */ static int @@ -1960,27 +2114,12 @@ init_ready_list (void) /* Prepare current target block info. */ if (current_nr_blocks > 1) - { - candidate_table = XNEWVEC (candidate, current_nr_blocks); - - bblst_last = 0; - /* bblst_table holds split blocks and update blocks for each block after - the current one in the region. split blocks and update blocks are - the TO blocks of region edges, so there can be at most rgn_nr_edges - of them. */ - bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges; - bblst_table = XNEWVEC (basic_block, bblst_size); - - edgelst_last = 0; - edgelst_table = XNEWVEC (edge, rgn_nr_edges); - - compute_trg_info (target_bb); - } + compute_trg_info (target_bb); /* Initialize ready list with all 'ready' insns in target block. Count number of insns in the target block being scheduled. */ for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) - { + { try_ready (insn); target_n_insns++; @@ -2003,7 +2142,7 @@ init_ready_list (void) src_head = head; for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn)) - if (INSN_P (insn)) + if (INSN_P (insn) && !BOUNDARY_DEBUG_INSN_P (insn)) try_ready (insn); } } @@ -2018,7 +2157,7 @@ can_schedule_ready_p (rtx insn) if (INSN_BB (insn) != target_bb && IS_SPECULATIVE_INSN (insn) && !check_live (insn, INSN_BB (insn))) - return 0; + return 0; else return 1; } @@ -2067,12 +2206,12 @@ new_ready (rtx next, ds_t ts) int not_ex_free = 0; /* For speculative insns, before inserting to ready/queue, - check live, exception-free, and issue-delay. */ + check live, exception-free, and issue-delay. */ if (!IS_VALID (INSN_BB (next)) || CANT_MOVE (next) || (IS_SPECULATIVE_INSN (next) && ((recog_memoized (next) >= 0 - && min_insn_conflict_delay (curr_state, next, next) + && min_insn_conflict_delay (curr_state, next, next) > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY)) || IS_SPECULATION_CHECK_P (next) || !check_live (next, INSN_BB (next)) @@ -2082,14 +2221,28 @@ new_ready (rtx next, ds_t ts) if (not_ex_free /* We are here because is_exception_free () == false. But we possibly can handle that with control speculation. */ - && current_sched_info->flags & DO_SPECULATION) - /* Here we got new control-speculative instruction. */ - ts = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK); + && sched_deps_info->generate_spec_deps + && spec_info->mask & BEGIN_CONTROL) + { + ds_t new_ds; + + /* Add control speculation to NEXT's dependency type. */ + new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK); + + /* Check if NEXT can be speculated with new dependency type. */ + if (sched_insn_is_legitimate_for_speculation_p (next, new_ds)) + /* Here we got new control-speculative instruction. */ + ts = new_ds; + else + /* NEXT isn't ready yet. */ + ts = (ts & ~SPECULATIVE) | HARD_DEP; + } else + /* NEXT isn't ready yet. */ ts = (ts & ~SPECULATIVE) | HARD_DEP; } } - + return ts; } @@ -2099,7 +2252,7 @@ new_ready (rtx next, ds_t ts) to be formatted so that multiple output lines will line up nicely. */ static const char * -rgn_print_insn (rtx insn, int aligned) +rgn_print_insn (const_rtx insn, int aligned) { static char tmp[80]; @@ -2150,7 +2303,7 @@ rgn_rank (rtx insn1, rtx insn2) return nonzero if we should include this dependence in priority calculations. */ -static int +int contributes_to_priority (rtx next, rtx insn) { /* NEXT and INSN reside in one ebb. */ @@ -2172,10 +2325,49 @@ compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED, add_branch_dependences. */ } +/* This variable holds common_sched_info hooks and data relevant to + the interblock scheduler. */ +static struct common_sched_info_def rgn_common_sched_info; + + +/* This holds data for the dependence analysis relevant to + the interblock scheduler. */ +static struct sched_deps_info_def rgn_sched_deps_info; + +/* This holds constant data used for initializing the above structure + for the Haifa scheduler. */ +static const struct sched_deps_info_def rgn_const_sched_deps_info = + { + compute_jump_reg_dependencies, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + 0, 0, 0 + }; + +/* Same as above, but for the selective scheduler. */ +static const struct sched_deps_info_def rgn_const_sel_sched_deps_info = + { + compute_jump_reg_dependencies, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + 0, 0, 0 + }; + +/* Return true if scheduling INSN will trigger finish of scheduling + current block. */ +static bool +rgn_insn_finishes_block_p (rtx insn) +{ + if (INSN_BB (insn) == target_bb + && sched_target_n_insns + 1 == target_n_insns) + /* INSN is the last not-scheduled instruction in the current block. */ + return true; + + return false; +} + /* Used in schedule_insns to initialize current_sched_info for scheduling regions (or single basic blocks). */ -static struct sched_info region_sched_info = +static const struct haifa_sched_info rgn_const_sched_info = { init_ready_list, can_schedule_ready_p, @@ -2184,20 +2376,30 @@ static struct sched_info region_sched_info = rgn_rank, rgn_print_insn, contributes_to_priority, - compute_jump_reg_dependencies, + rgn_insn_finishes_block_p, NULL, NULL, NULL, NULL, - 0, 0, 0, + 0, 0, - add_remove_insn, + rgn_add_remove_insn, begin_schedule_ready, - add_block1, advance_target_bb, - fix_recovery_cfg, SCHED_RGN }; +/* This variable holds the data and hooks needed to the Haifa scheduler backend + for the interblock scheduler frontend. */ +static struct haifa_sched_info rgn_sched_info; + +/* Returns maximum priority that an insn was assigned to. */ + +int +get_rgn_sched_max_insns_priority (void) +{ + return rgn_sched_info.sched_max_insns_priority; +} + /* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */ static bool @@ -2209,7 +2411,7 @@ sets_likely_spilled (rtx pat) } static void -sets_likely_spilled_1 (rtx x, rtx pat, void *data) +sets_likely_spilled_1 (rtx x, const_rtx pat, void *data) { bool *ret = (bool *) data; @@ -2220,9 +2422,12 @@ sets_likely_spilled_1 (rtx x, rtx pat, void *data) *ret = true; } +/* A bitmap to note insns that participate in any dependency. Used in + add_branch_dependences. */ +static sbitmap insn_referenced; + /* Add dependences so that branches are scheduled to run last in their block. */ - static void add_branch_dependences (rtx head, rtx tail) { @@ -2246,6 +2451,9 @@ add_branch_dependences (rtx head, rtx tail) are not moved before reload because we can wind up with register allocation failures. */ + while (tail != head && DEBUG_INSN_P (tail)) + tail = PREV_INSN (tail); + insn = tail; last = 0; while (CALL_P (insn) @@ -2264,12 +2472,11 @@ add_branch_dependences (rtx head, rtx tail) if (!NOTE_P (insn)) { if (last != 0 - && (find_link_by_pro_in_deps_list (INSN_BACK_DEPS (last), insn) - == NULL)) + && sd_find_dep_between (insn, last, false) == NULL) { if (! sched_insns_conditions_mutex_p (last, insn)) add_dependence (last, insn, REG_DEP_ANTI); - INSN_REF_COUNT (insn)++; + SET_BIT (insn_referenced, INSN_LUID (insn)); } CANT_MOVE (insn) = 1; @@ -2281,7 +2488,9 @@ add_branch_dependences (rtx head, rtx tail) if (insn == head) break; - insn = PREV_INSN (insn); + do + insn = PREV_INSN (insn); + while (insn != head && DEBUG_INSN_P (insn)); } /* Make sure these insns are scheduled last in their block. */ @@ -2291,15 +2500,17 @@ add_branch_dependences (rtx head, rtx tail) { insn = prev_nonnote_insn (insn); - if (INSN_REF_COUNT (insn) != 0) + if (TEST_BIT (insn_referenced, INSN_LUID (insn)) + || DEBUG_INSN_P (insn)) continue; if (! sched_insns_conditions_mutex_p (last, insn)) add_dependence (last, insn, REG_DEP_ANTI); - INSN_REF_COUNT (insn) = 1; } -#ifdef HAVE_conditional_execution + if (!targetm.have_conditional_execution ()) + return; + /* Finally, if the block ends in a jump, and we are doing intra-block scheduling, make sure that the branch depends on any COND_EXEC insns inside the block to avoid moving the COND_EXECs past the branch insn. @@ -2348,7 +2559,6 @@ add_branch_dependences (rtx head, rtx tail) if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC) add_dependence (tail, insn, REG_DEP_ANTI); } -#endif } /* Data structures for the computation of data dependences in a regions. We @@ -2364,10 +2574,10 @@ static struct deps *bb_deps; static rtx concat_INSN_LIST (rtx copy, rtx old) { - rtx new = old; + rtx new_rtx = old; for (; copy ; copy = XEXP (copy, 1)) - new = alloc_INSN_LIST (XEXP (copy, 0), new); - return new; + new_rtx = alloc_INSN_LIST (XEXP (copy, 0), new_rtx); + return new_rtx; } static void @@ -2389,6 +2599,64 @@ concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p, *old_mems_p = new_mems; } +/* Join PRED_DEPS to the SUCC_DEPS. */ +void +deps_join (struct deps *succ_deps, struct deps *pred_deps) +{ + unsigned reg; + reg_set_iterator rsi; + + /* The reg_last lists are inherited by successor. */ + EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi) + { + struct deps_reg *pred_rl = &pred_deps->reg_last[reg]; + struct deps_reg *succ_rl = &succ_deps->reg_last[reg]; + + succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses); + succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets); + succ_rl->implicit_sets + = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets); + succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers, + succ_rl->clobbers); + succ_rl->uses_length += pred_rl->uses_length; + succ_rl->clobbers_length += pred_rl->clobbers_length; + } + IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use); + + /* Mem read/write lists are inherited by successor. */ + concat_insn_mem_list (pred_deps->pending_read_insns, + pred_deps->pending_read_mems, + &succ_deps->pending_read_insns, + &succ_deps->pending_read_mems); + concat_insn_mem_list (pred_deps->pending_write_insns, + pred_deps->pending_write_mems, + &succ_deps->pending_write_insns, + &succ_deps->pending_write_mems); + + succ_deps->last_pending_memory_flush + = concat_INSN_LIST (pred_deps->last_pending_memory_flush, + succ_deps->last_pending_memory_flush); + + succ_deps->pending_read_list_length += pred_deps->pending_read_list_length; + succ_deps->pending_write_list_length += pred_deps->pending_write_list_length; + succ_deps->pending_flush_length += pred_deps->pending_flush_length; + + /* last_function_call is inherited by successor. */ + succ_deps->last_function_call + = concat_INSN_LIST (pred_deps->last_function_call, + succ_deps->last_function_call); + + /* last_function_call_may_noreturn is inherited by successor. */ + succ_deps->last_function_call_may_noreturn + = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn, + succ_deps->last_function_call_may_noreturn); + + /* sched_before_next_call is inherited by successor. */ + succ_deps->sched_before_next_call + = concat_INSN_LIST (pred_deps->sched_before_next_call, + succ_deps->sched_before_next_call); +} + /* After computing the dependencies for block BB, propagate the dependencies found in TMP_DEPS to the successors of the block. */ static void @@ -2401,62 +2669,13 @@ propagate_deps (int bb, struct deps *pred_deps) /* bb's structures are inherited by its successors. */ FOR_EACH_EDGE (e, ei, block->succs) { - struct deps *succ_deps; - unsigned reg; - reg_set_iterator rsi; - /* Only bbs "below" bb, in the same region, are interesting. */ if (e->dest == EXIT_BLOCK_PTR || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index) || BLOCK_TO_BB (e->dest->index) <= bb) continue; - succ_deps = bb_deps + BLOCK_TO_BB (e->dest->index); - - /* The reg_last lists are inherited by successor. */ - EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi) - { - struct deps_reg *pred_rl = &pred_deps->reg_last[reg]; - struct deps_reg *succ_rl = &succ_deps->reg_last[reg]; - - succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses); - succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets); - succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers, - succ_rl->clobbers); - succ_rl->uses_length += pred_rl->uses_length; - succ_rl->clobbers_length += pred_rl->clobbers_length; - } - IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use); - - /* Mem read/write lists are inherited by successor. */ - concat_insn_mem_list (pred_deps->pending_read_insns, - pred_deps->pending_read_mems, - &succ_deps->pending_read_insns, - &succ_deps->pending_read_mems); - concat_insn_mem_list (pred_deps->pending_write_insns, - pred_deps->pending_write_mems, - &succ_deps->pending_write_insns, - &succ_deps->pending_write_mems); - - succ_deps->last_pending_memory_flush - = concat_INSN_LIST (pred_deps->last_pending_memory_flush, - succ_deps->last_pending_memory_flush); - - succ_deps->pending_read_list_length - += pred_deps->pending_read_list_length; - succ_deps->pending_write_list_length - += pred_deps->pending_write_list_length; - succ_deps->pending_flush_length += pred_deps->pending_flush_length; - - /* last_function_call is inherited by successor. */ - succ_deps->last_function_call - = concat_INSN_LIST (pred_deps->last_function_call, - succ_deps->last_function_call); - - /* sched_before_next_call is inherited by successor. */ - succ_deps->sched_before_next_call - = concat_INSN_LIST (pred_deps->sched_before_next_call, - succ_deps->sched_before_next_call); + deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps); } /* These lists should point to the right place, for correct @@ -2473,25 +2692,27 @@ propagate_deps (int bb, struct deps *pred_deps) pred_deps->pending_write_mems = 0; } -/* Compute backward dependences inside bb. In a multiple blocks region: +/* Compute dependences inside bb. In a multiple blocks region: (1) a bb is analyzed after its predecessors, and (2) the lists in effect at the end of bb (after analyzing for bb) are inherited by bb's successors. Specifically for reg-reg data dependences, the block insns are - scanned by sched_analyze () top-to-bottom. Two lists are + scanned by sched_analyze () top-to-bottom. Three lists are maintained by sched_analyze (): reg_last[].sets for register DEFs, - and reg_last[].uses for register USEs. + reg_last[].implicit_sets for implicit hard register DEFs, and + reg_last[].uses for register USEs. When analysis is completed for bb, we update for its successors: ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb]) + ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb]) ; - USES[succ] = Union (USES [succ], DEFS [bb]) The mechanism for computing mem-mem data dependence is very similar, and the result is interblock dependences in the region. */ static void -compute_block_backward_dependences (int bb) +compute_block_dependences (int bb) { rtx head, tail; struct deps tmp_deps; @@ -2501,14 +2722,36 @@ compute_block_backward_dependences (int bb) /* Do the analysis for this block. */ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + sched_analyze (&tmp_deps, head, tail); - add_branch_dependences (head, tail); + + /* Selective scheduling handles control dependencies by itself. */ + if (!sel_sched_p ()) + add_branch_dependences (head, tail); if (current_nr_blocks > 1) propagate_deps (bb, &tmp_deps); /* Free up the INSN_LISTs. */ free_deps (&tmp_deps); + + if (targetm.sched.dependencies_evaluation_hook) + targetm.sched.dependencies_evaluation_hook (head, tail); +} + +/* Free dependencies of instructions inside BB. */ +static void +free_block_dependencies (int bb) +{ + rtx head; + rtx tail; + + get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + + if (no_real_insns_p (head, tail)) + return; + + sched_free_deps (head, tail, true); } /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add @@ -2528,7 +2771,8 @@ free_pending_lists (void) } } - +/* Print dependences for debugging starting from FROM_BB. + Callable from debugger. */ /* Print dependences for debugging starting from FROM_BB. Callable from debugger. */ void @@ -2543,7 +2787,6 @@ debug_rgn_dependencies (int from_bb) { rtx head, tail; - gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n", BB_TO_BLOCK (bb), bb); @@ -2568,8 +2811,6 @@ void debug_dependencies (rtx head, rtx tail) for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) { - dep_link_t link; - if (! INSN_P (insn)) { int n; @@ -2590,9 +2831,13 @@ void debug_dependencies (rtx head, rtx tail) INSN_UID (insn), INSN_CODE (insn), BLOCK_NUM (insn), - INSN_DEP_COUNT (insn), - INSN_PRIORITY (insn), - insn_cost (insn)); + sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK), + (sel_sched_p () ? (sched_emulate_haifa_p ? -1 + : INSN_PRIORITY (insn)) + : INSN_PRIORITY (insn)), + (sel_sched_p () ? (sched_emulate_haifa_p ? -1 + : insn_cost (insn)) + : insn_cost (insn))); if (recog_memoized (insn) < 0) fprintf (sched_dump, "nothing"); @@ -2600,8 +2845,13 @@ void debug_dependencies (rtx head, rtx tail) print_reservation (sched_dump, insn); fprintf (sched_dump, "\t: "); - FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn)) - fprintf (sched_dump, "%d ", INSN_UID (DEP_LINK_CON (link))); + { + sd_iterator_def sd_it; + dep_t dep; + + FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) + fprintf (sched_dump, "%d ", INSN_UID (DEP_CON (dep))); + } fprintf (sched_dump, "\n"); } @@ -2610,7 +2860,7 @@ void debug_dependencies (rtx head, rtx tail) /* Returns true if all the basic blocks of the current region have NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */ -static bool +bool sched_is_disabled_for_current_region_p (void) { int bb; @@ -2622,6 +2872,50 @@ sched_is_disabled_for_current_region_p (void) return true; } +/* Free all region dependencies saved in INSN_BACK_DEPS and + INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly + when scheduling, so this function is supposed to be called from + the selective scheduling only. */ +void +free_rgn_deps (void) +{ + int bb; + + for (bb = 0; bb < current_nr_blocks; bb++) + { + rtx head, tail; + + gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); + get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + + sched_free_deps (head, tail, false); + } +} + +static int rgn_n_insns; + +/* Compute insn priority for a current region. */ +void +compute_priorities (void) +{ + int bb; + + current_sched_info->sched_max_insns_priority = 0; + for (bb = 0; bb < current_nr_blocks; bb++) + { + rtx head, tail; + + gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); + get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + + if (no_real_insns_p (head, tail)) + continue; + + rgn_n_insns += set_priorities (head, tail); + } + current_sched_info->sched_max_insns_priority++; +} + /* Schedule a region. A region is either an inner loop, a loop-free subroutine, or a single basic block. Each bb in the region is scheduled after its flow predecessors. */ @@ -2629,125 +2923,47 @@ sched_is_disabled_for_current_region_p (void) static void schedule_region (int rgn) { - basic_block block; - edge_iterator ei; - edge e; int bb; int sched_rgn_n_insns = 0; rgn_n_insns = 0; - /* Set variables for the current region. */ - current_nr_blocks = RGN_NR_BLOCKS (rgn); - current_blocks = RGN_BLOCKS (rgn); - - /* See comments in add_block1, for what reasons we allocate +1 element. */ - ebb_head = xrealloc (ebb_head, (current_nr_blocks + 1) * sizeof (*ebb_head)); - for (bb = 0; bb <= current_nr_blocks; bb++) - ebb_head[bb] = current_blocks + bb; + + rgn_setup_region (rgn); /* Don't schedule region that is marked by NOTE_DISABLE_SCHED_OF_BLOCK. */ if (sched_is_disabled_for_current_region_p ()) return; - if (!RGN_DONT_CALC_DEPS (rgn)) - { - init_deps_global (); - - /* Initializations for region data dependence analysis. */ - bb_deps = XNEWVEC (struct deps, current_nr_blocks); - for (bb = 0; bb < current_nr_blocks; bb++) - init_deps (bb_deps + bb); - - /* Compute backward dependencies. */ - for (bb = 0; bb < current_nr_blocks; bb++) - compute_block_backward_dependences (bb); - - /* Compute forward dependencies. */ - for (bb = current_nr_blocks - 1; bb >= 0; bb--) - { - rtx head, tail; + sched_rgn_compute_dependencies (rgn); - gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); - get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + sched_rgn_local_init (rgn); - compute_forward_dependences (head, tail); - - if (targetm.sched.dependencies_evaluation_hook) - targetm.sched.dependencies_evaluation_hook (head, tail); - } - - free_pending_lists (); - - finish_deps_global (); - - free (bb_deps); - } - else - /* This is a recovery block. It is always a single block region. */ - gcc_assert (current_nr_blocks == 1); - /* Set priorities. */ - current_sched_info->sched_max_insns_priority = 0; - for (bb = 0; bb < current_nr_blocks; bb++) - { - rtx head, tail; - - gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); - get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + compute_priorities (); - rgn_n_insns += set_priorities (head, tail); - } - current_sched_info->sched_max_insns_priority++; + sched_extend_ready_list (rgn_n_insns); - /* Compute interblock info: probabilities, split-edges, dominators, etc. */ - if (current_nr_blocks > 1) + if (sched_pressure_p) { - prob = XNEWVEC (int, current_nr_blocks); - - dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks); - sbitmap_vector_zero (dom, current_nr_blocks); - - /* Use ->aux to implement EDGE_TO_BIT mapping. */ - rgn_nr_edges = 0; - FOR_EACH_BB (block) - { - if (CONTAINING_RGN (block->index) != rgn) - continue; - FOR_EACH_EDGE (e, ei, block->succs) - SET_EDGE_TO_BIT (e, rgn_nr_edges++); - } - - rgn_edges = XNEWVEC (edge, rgn_nr_edges); - rgn_nr_edges = 0; - FOR_EACH_BB (block) + sched_init_region_reg_pressure_info (); + for (bb = 0; bb < current_nr_blocks; bb++) { - if (CONTAINING_RGN (block->index) != rgn) - continue; - FOR_EACH_EDGE (e, ei, block->succs) - rgn_edges[rgn_nr_edges++] = e; - } + basic_block first_bb, last_bb; + rtx head, tail; - /* Split edges. */ - pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); - sbitmap_vector_zero (pot_split, current_nr_blocks); - ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); - sbitmap_vector_zero (ancestor_edges, current_nr_blocks); + first_bb = EBB_FIRST_BB (bb); + last_bb = EBB_LAST_BB (bb); - /* Compute probabilities, dominators, split_edges. */ - for (bb = 0; bb < current_nr_blocks; bb++) - compute_dom_prob_ps (bb); + get_ebb_head_tail (first_bb, last_bb, &head, &tail); - /* Cleanup ->aux used for EDGE_TO_BIT mapping. */ - /* We don't need them anymore. But we want to avoid duplication of - aux fields in the newly created edges. */ - FOR_EACH_BB (block) - { - if (CONTAINING_RGN (block->index) != rgn) - continue; - FOR_EACH_EDGE (e, ei, block->succs) - e->aux = NULL; - } + if (no_real_insns_p (head, tail)) + { + gcc_assert (first_bb == last_bb); + continue; + } + sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head)); + } } /* Now we can schedule all blocks. */ @@ -2770,31 +2986,7 @@ schedule_region (int rgn) current_sched_info->prev_head = PREV_INSN (head); current_sched_info->next_tail = NEXT_INSN (tail); - - /* rm_other_notes only removes notes which are _inside_ the - block---that is, it won't remove notes before the first real insn - or after the last real insn of the block. So if the first insn - has a REG_SAVE_NOTE which would otherwise be emitted before the - insn, it is redundant with the note before the start of the - block, and so we have to take it out. */ - if (INSN_P (head)) - { - rtx note; - - for (note = REG_NOTES (head); note; note = XEXP (note, 1)) - if (REG_NOTE_KIND (note) == REG_SAVE_NOTE) - remove_note (head, note); - } - else - /* This means that first block in ebb is empty. - It looks to me as an impossible thing. There at least should be - a recovery check, that caused the splitting. */ - gcc_unreachable (); - - /* Remove remaining note insns from the block, save them in - note_list. These notes are restored at the end of - schedule_block (). */ - rm_other_notes (head, tail); + remove_notes (head, tail); unlink_bb_notes (first_bb, last_bb); @@ -2806,7 +2998,7 @@ schedule_region (int rgn) curr_bb = first_bb; if (dbg_cnt (sched_block)) { - schedule_block (&curr_bb, rgn_n_insns); + schedule_block (&curr_bb); gcc_assert (EBB_FIRST_BB (bb) == first_bb); sched_rgn_n_insns += sched_n_insns; } @@ -2817,53 +3009,54 @@ schedule_region (int rgn) /* Clean up. */ if (current_nr_blocks > 1) - { - free (candidate_table); - free (bblst_table); - free (edgelst_table); - } + free_trg_info (); } /* Sanity check: verify that all region insns were scheduled. */ gcc_assert (sched_rgn_n_insns == rgn_n_insns); + sched_finish_ready_list (); /* Done with this region. */ + sched_rgn_local_finish (); - if (current_nr_blocks > 1) - { - free (prob); - sbitmap_vector_free (dom); - sbitmap_vector_free (pot_split); - sbitmap_vector_free (ancestor_edges); - free (rgn_edges); - } + /* Free dependencies. */ + for (bb = 0; bb < current_nr_blocks; ++bb) + free_block_dependencies (bb); + + gcc_assert (haifa_recovery_bb_ever_added_p + || deps_pools_are_empty_p ()); } /* Initialize data structures for region scheduling. */ -static void -init_regions (void) +void +sched_rgn_init (bool single_blocks_p) { - nr_regions = 0; - rgn_table = 0; - rgn_bb_table = 0; - block_to_bb = 0; - containing_rgn = 0; + min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE) + / 100); + + nr_inter = 0; + nr_spec = 0; + extend_regions (); + CONTAINING_RGN (ENTRY_BLOCK) = -1; + CONTAINING_RGN (EXIT_BLOCK) = -1; + /* Compute regions for scheduling. */ - if (reload_completed + if (single_blocks_p || n_basic_blocks == NUM_FIXED_BLOCKS + 1 || !flag_schedule_interblock || is_cfg_nonregular ()) { - find_single_block_region (); + find_single_block_region (sel_sched_p ()); } else { /* Compute the dominators and post dominators. */ - calculate_dominance_info (CDI_DOMINATORS); + if (!sel_sched_p ()) + calculate_dominance_info (CDI_DOMINATORS); /* Find regions. */ find_rgns (); @@ -2873,56 +3066,20 @@ init_regions (void) /* For now. This will move as more and more of haifa is converted to using the cfg code. */ - free_dominance_info (CDI_DOMINATORS); + if (!sel_sched_p ()) + free_dominance_info (CDI_DOMINATORS); } - RGN_BLOCKS (nr_regions) = RGN_BLOCKS (nr_regions - 1) + - RGN_NR_BLOCKS (nr_regions - 1); -} -/* The one entry point in this file. */ + gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks); + RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) + + RGN_NR_BLOCKS (nr_regions - 1)); +} + +/* Free data structures for region scheduling. */ void -schedule_insns (void) +sched_rgn_finish (void) { - int rgn; - - /* Taking care of this degenerate case makes the rest of - this code simpler. */ - if (n_basic_blocks == NUM_FIXED_BLOCKS) - return; - - nr_inter = 0; - nr_spec = 0; - - /* We need current_sched_info in init_dependency_caches, which is - invoked via sched_init. */ - current_sched_info = ®ion_sched_info; - - df_set_flags (DF_LR_RUN_DCE); - df_note_add_problem (); - df_analyze (); - regstat_compute_calls_crossed (); - - sched_init (); - - bitmap_initialize (¬_in_df, 0); - bitmap_clear (¬_in_df); - - min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE) - / 100); - - init_regions (); - - /* EBB_HEAD is a region-scope structure. But we realloc it for - each region to save time/memory/something else. */ - ebb_head = 0; - - /* Schedule every region in the subroutine. */ - for (rgn = 0; rgn < nr_regions; rgn++) - if (dbg_cnt (sched_region)) - schedule_region (rgn); - - free(ebb_head); /* Reposition the prologue and epilogue notes in case we moved the prologue/epilogue insns. */ if (reload_completed) @@ -2930,7 +3087,8 @@ schedule_insns (void) if (sched_verbose) { - if (reload_completed == 0 && flag_schedule_interblock) + if (reload_completed == 0 + && flag_schedule_interblock) { fprintf (sched_dump, "\n;; Procedure interblock/speculative motions == %d/%d \n", @@ -2941,22 +3099,238 @@ schedule_insns (void) fprintf (sched_dump, "\n\n"); } - /* Clean up. */ + nr_regions = 0; + free (rgn_table); + rgn_table = NULL; + free (rgn_bb_table); + rgn_bb_table = NULL; + free (block_to_bb); + block_to_bb = NULL; + free (containing_rgn); + containing_rgn = NULL; + + free (ebb_head); + ebb_head = NULL; +} + +/* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to + point to the region RGN. */ +void +rgn_setup_region (int rgn) +{ + int bb; + + /* Set variables for the current region. */ + current_nr_blocks = RGN_NR_BLOCKS (rgn); + current_blocks = RGN_BLOCKS (rgn); + + /* EBB_HEAD is a region-scope structure. But we realloc it for + each region to save time/memory/something else. + See comments in add_block1, for what reasons we allocate +1 element. */ + ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1); + for (bb = 0; bb <= current_nr_blocks; bb++) + ebb_head[bb] = current_blocks + bb; +} + +/* Compute instruction dependencies in region RGN. */ +void +sched_rgn_compute_dependencies (int rgn) +{ + if (!RGN_DONT_CALC_DEPS (rgn)) + { + int bb; + + if (sel_sched_p ()) + sched_emulate_haifa_p = 1; + + init_deps_global (); + + /* Initializations for region data dependence analysis. */ + bb_deps = XNEWVEC (struct deps, current_nr_blocks); + for (bb = 0; bb < current_nr_blocks; bb++) + init_deps (bb_deps + bb, false); + + /* Initialize bitmap used in add_branch_dependences. */ + insn_referenced = sbitmap_alloc (sched_max_luid); + sbitmap_zero (insn_referenced); + + /* Compute backward dependencies. */ + for (bb = 0; bb < current_nr_blocks; bb++) + compute_block_dependences (bb); + + sbitmap_free (insn_referenced); + free_pending_lists (); + finish_deps_global (); + free (bb_deps); + + /* We don't want to recalculate this twice. */ + RGN_DONT_CALC_DEPS (rgn) = 1; + + if (sel_sched_p ()) + sched_emulate_haifa_p = 0; + } + else + /* (This is a recovery block. It is always a single block region.) + OR (We use selective scheduling.) */ + gcc_assert (current_nr_blocks == 1 || sel_sched_p ()); +} + +/* Init region data structures. Returns true if this region should + not be scheduled. */ +void +sched_rgn_local_init (int rgn) +{ + int bb; + + /* Compute interblock info: probabilities, split-edges, dominators, etc. */ + if (current_nr_blocks > 1) + { + basic_block block; + edge e; + edge_iterator ei; + + prob = XNEWVEC (int, current_nr_blocks); + + dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks); + sbitmap_vector_zero (dom, current_nr_blocks); - regstat_free_calls_crossed (); + /* Use ->aux to implement EDGE_TO_BIT mapping. */ + rgn_nr_edges = 0; + FOR_EACH_BB (block) + { + if (CONTAINING_RGN (block->index) != rgn) + continue; + FOR_EACH_EDGE (e, ei, block->succs) + SET_EDGE_TO_BIT (e, rgn_nr_edges++); + } + + rgn_edges = XNEWVEC (edge, rgn_nr_edges); + rgn_nr_edges = 0; + FOR_EACH_BB (block) + { + if (CONTAINING_RGN (block->index) != rgn) + continue; + FOR_EACH_EDGE (e, ei, block->succs) + rgn_edges[rgn_nr_edges++] = e; + } + /* Split edges. */ + pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); + sbitmap_vector_zero (pot_split, current_nr_blocks); + ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); + sbitmap_vector_zero (ancestor_edges, current_nr_blocks); + + /* Compute probabilities, dominators, split_edges. */ + for (bb = 0; bb < current_nr_blocks; bb++) + compute_dom_prob_ps (bb); + + /* Cleanup ->aux used for EDGE_TO_BIT mapping. */ + /* We don't need them anymore. But we want to avoid duplication of + aux fields in the newly created edges. */ + FOR_EACH_BB (block) + { + if (CONTAINING_RGN (block->index) != rgn) + continue; + FOR_EACH_EDGE (e, ei, block->succs) + e->aux = NULL; + } + } +} + +/* Free data computed for the finished region. */ +void +sched_rgn_local_free (void) +{ + free (prob); + sbitmap_vector_free (dom); + sbitmap_vector_free (pot_split); + sbitmap_vector_free (ancestor_edges); + free (rgn_edges); +} + +/* Free data computed for the finished region. */ +void +sched_rgn_local_finish (void) +{ + if (current_nr_blocks > 1 && !sel_sched_p ()) + { + sched_rgn_local_free (); + } +} + +/* Setup scheduler infos. */ +void +rgn_setup_common_sched_info (void) +{ + memcpy (&rgn_common_sched_info, &haifa_common_sched_info, + sizeof (rgn_common_sched_info)); + + rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg; + rgn_common_sched_info.add_block = rgn_add_block; + rgn_common_sched_info.estimate_number_of_insns + = rgn_estimate_number_of_insns; + rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS; + + common_sched_info = &rgn_common_sched_info; +} + +/* Setup all *_sched_info structures (for the Haifa frontend + and for the dependence analysis) in the interblock scheduler. */ +void +rgn_setup_sched_infos (void) +{ + if (!sel_sched_p ()) + memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info, + sizeof (rgn_sched_deps_info)); + else + memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info, + sizeof (rgn_sched_deps_info)); + + sched_deps_info = &rgn_sched_deps_info; + + memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info)); + current_sched_info = &rgn_sched_info; +} + +/* The one entry point in this file. */ +void +schedule_insns (void) +{ + int rgn; + + /* Taking care of this degenerate case makes the rest of + this code simpler. */ + if (n_basic_blocks == NUM_FIXED_BLOCKS) + return; + + rgn_setup_common_sched_info (); + rgn_setup_sched_infos (); + + haifa_sched_init (); + sched_rgn_init (reload_completed); + + bitmap_initialize (¬_in_df, 0); bitmap_clear (¬_in_df); - sched_finish (); + /* Schedule every region in the subroutine. */ + for (rgn = 0; rgn < nr_regions; rgn++) + if (dbg_cnt (sched_region)) + schedule_region (rgn); + + /* Clean up. */ + sched_rgn_finish (); + bitmap_clear (¬_in_df); + + haifa_sched_finish (); } /* INSN has been added to/removed from current region. */ static void -add_remove_insn (rtx insn, int remove_p) +rgn_add_remove_insn (rtx insn, int remove_p) { if (!remove_p) rgn_n_insns++; @@ -2973,7 +3347,7 @@ add_remove_insn (rtx insn, int remove_p) } /* Extend internal data structures. */ -static void +void extend_regions (void) { rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks); @@ -2982,34 +3356,40 @@ extend_regions (void) containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block); } +void +rgn_make_new_region_out_of_new_block (basic_block bb) +{ + int i; + + i = RGN_BLOCKS (nr_regions); + /* I - first free position in rgn_bb_table. */ + + rgn_bb_table[i] = bb->index; + RGN_NR_BLOCKS (nr_regions) = 1; + RGN_HAS_REAL_EBB (nr_regions) = 0; + RGN_DONT_CALC_DEPS (nr_regions) = 0; + CONTAINING_RGN (bb->index) = nr_regions; + BLOCK_TO_BB (bb->index) = 0; + + nr_regions++; + + RGN_BLOCKS (nr_regions) = i + 1; +} + /* BB was added to ebb after AFTER. */ static void -add_block1 (basic_block bb, basic_block after) +rgn_add_block (basic_block bb, basic_block after) { extend_regions (); - bitmap_set_bit (¬_in_df, bb->index); if (after == 0 || after == EXIT_BLOCK_PTR) { - int i; - - i = RGN_BLOCKS (nr_regions); - /* I - first free position in rgn_bb_table. */ - - rgn_bb_table[i] = bb->index; - RGN_NR_BLOCKS (nr_regions) = 1; - RGN_DONT_CALC_DEPS (nr_regions) = after == EXIT_BLOCK_PTR; - RGN_HAS_REAL_EBB (nr_regions) = 0; - CONTAINING_RGN (bb->index) = nr_regions; - BLOCK_TO_BB (bb->index) = 0; - - nr_regions++; - - RGN_BLOCKS (nr_regions) = i + 1; + rgn_make_new_region_out_of_new_block (bb); + RGN_DONT_CALC_DEPS (nr_regions - 1) = (after == EXIT_BLOCK_PTR); } else - { + { int i, pos; /* We need to fix rgn_table, block_to_bb, containing_rgn @@ -3018,7 +3398,7 @@ add_block1 (basic_block bb, basic_block after) BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index); /* We extend ebb_head to one more position to - easily find the last position of the last ebb in + easily find the last position of the last ebb in the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1] is _always_ valid for access. */ @@ -3037,24 +3417,24 @@ add_block1 (basic_block bb, basic_block after) /* Source position: ebb_head[i] Destination position: ebb_head[i] + 1 - Last position: + Last position: RGN_BLOCKS (nr_regions) - 1 Number of elements to copy: (last_position) - (source_position) + 1 */ - + memmove (rgn_bb_table + pos + 1, rgn_bb_table + pos, ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1) * sizeof (*rgn_bb_table)); rgn_bb_table[pos] = bb->index; - + for (; i <= current_nr_blocks; i++) ebb_head [i]++; i = CONTAINING_RGN (after->index); CONTAINING_RGN (bb->index) = i; - + RGN_HAS_REAL_EBB (i) = 1; for (++i; i <= nr_regions; i++) @@ -3066,12 +3446,12 @@ add_block1 (basic_block bb, basic_block after) For parameter meaning please refer to sched-int.h: struct sched_info: fix_recovery_cfg. */ static void -fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti) +rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti) { int old_pos, new_pos, i; BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi); - + for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1; rgn_bb_table[old_pos] != check_bb_nexti; old_pos--); @@ -3082,7 +3462,7 @@ fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti) new_pos--); new_pos++; gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]); - + gcc_assert (new_pos < old_pos); memmove (rgn_bb_table + new_pos + 1, @@ -3125,7 +3505,11 @@ static unsigned int rest_of_handle_sched (void) { #ifdef INSN_SCHEDULING - schedule_insns (); + if (flag_selective_scheduling + && ! maybe_skip_selective_scheduling ()) + run_selective_scheduling (); + else + schedule_insns (); #endif return 0; } @@ -3134,7 +3518,7 @@ static bool gate_handle_sched2 (void) { #ifdef INSN_SCHEDULING - return optimize > 0 && flag_schedule_insns_after_reload + return optimize > 0 && flag_schedule_insns_after_reload && dbg_cnt (sched2_func); #else return 0; @@ -3146,18 +3530,26 @@ static unsigned int rest_of_handle_sched2 (void) { #ifdef INSN_SCHEDULING - /* Do control and data sched analysis again, - and write some more of the results to dump file. */ - if (flag_sched2_use_superblocks || flag_sched2_use_traces) - schedule_ebbs (); + if (flag_selective_scheduling2 + && ! maybe_skip_selective_scheduling ()) + run_selective_scheduling (); else - schedule_insns (); + { + /* Do control and data sched analysis again, + and write some more of the results to dump file. */ + if (flag_sched2_use_superblocks) + schedule_ebbs (); + else + schedule_insns (); + } #endif return 0; } -struct tree_opt_pass pass_sched = +struct rtl_opt_pass pass_sched = { + { + RTL_PASS, "sched1", /* name */ gate_handle_sched, /* gate */ rest_of_handle_sched, /* execute */ @@ -3169,15 +3561,17 @@ struct tree_opt_pass pass_sched = 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ - TODO_df_finish | + TODO_df_finish | TODO_verify_rtl_sharing | TODO_dump_func | TODO_verify_flow | - TODO_ggc_collect, /* todo_flags_finish */ - 'S' /* letter */ + TODO_ggc_collect /* todo_flags_finish */ + } }; -struct tree_opt_pass pass_sched2 = +struct rtl_opt_pass pass_sched2 = { + { + RTL_PASS, "sched2", /* name */ gate_handle_sched2, /* gate */ rest_of_handle_sched2, /* execute */ @@ -3189,10 +3583,9 @@ struct tree_opt_pass pass_sched2 = 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ - TODO_df_finish | + TODO_df_finish | TODO_verify_rtl_sharing | TODO_dump_func | TODO_verify_flow | - TODO_ggc_collect, /* todo_flags_finish */ - 'R' /* letter */ + TODO_ggc_collect /* todo_flags_finish */ + } }; -