X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fsched-rgn.c;h=ca090dea4dada5de74511bf711d09e66202de512;hb=9fed75026cca1c2bf8cbf67920c15fe52489d1c9;hp=7f7f58692388683a7856b9e072339e0f1b114350;hpb=90bf0a0066d0e93b78eb3012c17b323cd084ff8b;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 7f7f5869238..ca090dea4da 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -1,6 +1,7 @@ /* Instruction scheduling pass. - Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, + 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 + Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) @@ -8,7 +9,7 @@ This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -17,9 +18,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ /* This pass implements list scheduling within basic blocks. It is run twice: (1) after flow analysis, but before register allocation, @@ -67,17 +67,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "target.h" #include "timevar.h" #include "tree-pass.h" - -/* Define when we want to do count REG_DEAD notes before and after scheduling - for sanity checking. We can't do that when conditional execution is used, - as REG_DEAD exist only for unconditional deaths. */ - -#if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING) -#define CHECK_DEAD_NOTES 1 -#else -#define CHECK_DEAD_NOTES 0 -#endif - +#include "dbgcnt.h" #ifdef INSN_SCHEDULING /* Some accessor macros for h_i_d members only used within this file. */ @@ -285,10 +275,9 @@ static int is_prisky (rtx, int, int); static int is_exception_free (rtx, int, int); static bool sets_likely_spilled (rtx); -static void sets_likely_spilled_1 (rtx, rtx, void *); +static void sets_likely_spilled_1 (rtx, const_rtx, void *); static void add_branch_dependences (rtx, rtx); -static void compute_block_backward_dependences (int); -void debug_dependencies (void); +static void compute_block_dependences (int); static void init_regions (void); static void schedule_region (int); @@ -321,29 +310,49 @@ is_cfg_nonregular (void) return 1; /* If we have exception handlers, then we consider the cfg not well - structured. ?!? We should be able to handle this now that flow.c - computes an accurate cfg for EH. */ + structured. ?!? We should be able to handle this now that we + compute an accurate cfg for EH. */ if (current_function_has_exception_handlers ()) return 1; - /* If we have non-jumping insns which refer to labels, then we consider - the cfg not well structured. */ + /* If we have insns which refer to labels as non-jumped-to operands, + then we consider the cfg not well structured. */ FOR_EACH_BB (b) FOR_BB_INSNS (b, insn) { - /* Check for labels referred by non-jump insns. */ - if (NONJUMP_INSN_P (insn) || CALL_P (insn)) - { - rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX); - if (note - && ! (JUMP_P (NEXT_INSN (insn)) - && find_reg_note (NEXT_INSN (insn), REG_LABEL, - XEXP (note, 0)))) - return 1; - } + rtx note, next, set, dest; + /* If this function has a computed jump, then we consider the cfg not well structured. */ - else if (JUMP_P (insn) && computed_jump_p (insn)) + if (JUMP_P (insn) && computed_jump_p (insn)) + return 1; + + if (!INSN_P (insn)) + continue; + + note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX); + if (note == NULL_RTX) + continue; + + /* For that label not to be seen as a referred-to label, this + must be a single-set which is feeding a jump *only*. This + could be a conditional jump with the label split off for + machine-specific reasons or a casesi/tablejump. */ + next = next_nonnote_insn (insn); + if (next == NULL_RTX + || !JUMP_P (next) + || (JUMP_LABEL (next) != XEXP (note, 0) + && find_reg_note (next, REG_LABEL_TARGET, + XEXP (note, 0)) == NULL_RTX) + || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next)) + return 1; + + set = single_set (insn); + if (set == NULL_RTX) + return 1; + + dest = SET_DEST (set); + if (!REG_P (dest) || !dead_or_set_p (next, dest)) return 1; } @@ -1041,7 +1050,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) max_hdr = xmalloc (last_basic_block * sizeof (*max_hdr)); order = xmalloc (last_basic_block * sizeof (*order)); - post_order_compute (order, false); + post_order_compute (order, false, false); for (i = nblocks - 1; i >= 0; i--) { @@ -1356,7 +1365,7 @@ static void compute_trg_info (int trg) { candidate *sp; - edgelst el; + edgelst el = { NULL, 0 }; int i, j, k, update_idx; basic_block block; sbitmap visited; @@ -1501,6 +1510,8 @@ debug_candidates (int trg) /* Functions for speculative scheduling. */ +static bitmap_head not_in_df; + /* Return 0 if x is a set of a register alive in the beginning of one of the split-blocks of src, otherwise return 1. */ @@ -1552,18 +1563,15 @@ check_live_1 (int src, rtx x) for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) { basic_block b = candidate_table[src].split_bbs.first_member[i]; + int t = bitmap_bit_p (¬_in_df, b->index); /* We can have split blocks, that were recently generated. - such blocks are always outside current region. */ - gcc_assert (glat_start[b->index] - || CONTAINING_RGN (b->index) - != CONTAINING_RGN (BB_TO_BLOCK (src))); - if (!glat_start[b->index] - || REGNO_REG_SET_P (glat_start[b->index], - regno + j)) - { - return 0; - } + Such blocks are always outside current region. */ + gcc_assert (!t || (CONTAINING_RGN (b->index) + != CONTAINING_RGN (BB_TO_BLOCK (src)))); + + if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j)) + return 0; } } } @@ -1573,15 +1581,13 @@ check_live_1 (int src, rtx x) for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) { basic_block b = candidate_table[src].split_bbs.first_member[i]; + int t = bitmap_bit_p (¬_in_df, b->index); - gcc_assert (glat_start[b->index] - || CONTAINING_RGN (b->index) - != CONTAINING_RGN (BB_TO_BLOCK (src))); - if (!glat_start[b->index] - || REGNO_REG_SET_P (glat_start[b->index], regno)) - { - return 0; - } + gcc_assert (!t || (CONTAINING_RGN (b->index) + != CONTAINING_RGN (BB_TO_BLOCK (src)))); + + if (t || REGNO_REG_SET_P (df_get_live_in (b), regno)) + return 0; } } } @@ -1637,7 +1643,7 @@ update_live_1 (int src, rtx x) { basic_block b = candidate_table[src].update_bbs.first_member[i]; - SET_REGNO_REG_SET (glat_start[b->index], regno + j); + SET_REGNO_REG_SET (df_get_live_in (b), regno + j); } } } @@ -1647,7 +1653,7 @@ update_live_1 (int src, rtx x) { basic_block b = candidate_table[src].update_bbs.first_member[i]; - SET_REGNO_REG_SET (glat_start[b->index], regno); + SET_REGNO_REG_SET (df_get_live_in (b), regno); } } } @@ -1711,12 +1717,13 @@ update_live (rtx insn, int src) static void set_spec_fed (rtx load_insn) { - rtx link; + sd_iterator_def sd_it; + dep_t dep; - for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1)) - if (GET_MODE (link) == VOIDmode) - FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1; -} /* set_spec_fed */ + FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep) + if (DEP_TYPE (dep) == REG_DEP_TRUE) + FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1; +} /* On the path from the insn to load_insn_bb, find a conditional branch depending on insn, that guards the speculative load. */ @@ -1724,17 +1731,19 @@ branch depending on insn, that guards the speculative load. */ static int find_conditional_protection (rtx insn, int load_insn_bb) { - rtx link; + sd_iterator_def sd_it; + dep_t dep; /* Iterate through DEF-USE forward dependences. */ - for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1)) + FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) { - rtx next = XEXP (link, 0); + rtx next = DEP_CON (dep); + if ((CONTAINING_RGN (BLOCK_NUM (next)) == CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb))) && IS_REACHABLE (INSN_BB (next), load_insn_bb) && load_insn_bb != INSN_BB (next) - && GET_MODE (link) == VOIDmode + && DEP_TYPE (dep) == REG_DEP_TRUE && (JUMP_P (next) || find_conditional_protection (next, load_insn_bb))) return 1; @@ -1744,29 +1753,30 @@ find_conditional_protection (rtx insn, int load_insn_bb) /* Returns 1 if the same insn1 that participates in the computation of load_insn's address is feeding a conditional branch that is - guarding on load_insn. This is true if we find a the two DEF-USE + guarding on load_insn. This is true if we find two DEF-USE chains: insn1 -> ... -> conditional-branch insn1 -> ... -> load_insn, - and if a flow path exist: + and if a flow path exists: insn1 -> ... -> conditional-branch -> ... -> load_insn, and if insn1 is on the path region-entry -> ... -> bb_trg -> ... load_insn. - Locate insn1 by climbing on LOG_LINKS from load_insn. - Locate the branch by following INSN_DEPEND from insn1. */ + Locate insn1 by climbing on INSN_BACK_DEPS from load_insn. + Locate the branch by following INSN_FORW_DEPS from insn1. */ static int is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg) { - rtx link; + sd_iterator_def sd_it; + dep_t dep; - for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1)) + FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep) { - rtx insn1 = XEXP (link, 0); + rtx insn1 = DEP_PRO (dep); /* Must be a DEF-USE dependence upon non-branch. */ - if (GET_MODE (link) != VOIDmode + if (DEP_TYPE (dep) != REG_DEP_TRUE || JUMP_P (insn1)) continue; @@ -1809,28 +1819,29 @@ is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg) static int is_pfree (rtx load_insn, int bb_src, int bb_trg) { - rtx back_link; + sd_iterator_def back_sd_it; + dep_t back_dep; candidate *candp = candidate_table + bb_src; if (candp->split_bbs.nr_members != 1) /* Must have exactly one escape block. */ return 0; - for (back_link = LOG_LINKS (load_insn); - back_link; back_link = XEXP (back_link, 1)) + FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) { - rtx insn1 = XEXP (back_link, 0); + rtx insn1 = DEP_PRO (back_dep); - if (GET_MODE (back_link) == VOIDmode) + if (DEP_TYPE (back_dep) == REG_DEP_TRUE) + /* Found a DEF-USE dependence (insn1, load_insn). */ { - /* Found a DEF-USE dependence (insn1, load_insn). */ - rtx fore_link; + sd_iterator_def fore_sd_it; + dep_t fore_dep; - for (fore_link = INSN_DEPEND (insn1); - fore_link; fore_link = XEXP (fore_link, 1)) + FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) { - rtx insn2 = XEXP (fore_link, 0); - if (GET_MODE (fore_link) == VOIDmode) + rtx insn2 = DEP_CON (fore_dep); + + if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) { /* Found a DEF-USE dependence (insn1, insn2). */ if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) @@ -1863,7 +1874,7 @@ is_prisky (rtx load_insn, int bb_src, int bb_trg) if (FED_BY_SPEC_LOAD (load_insn)) return 1; - if (LOG_LINKS (load_insn) == NULL) + if (sd_lists_empty_p (load_insn, SD_LIST_BACK)) /* Dependence may 'hide' out of the region. */ return 1; @@ -1941,10 +1952,8 @@ static void extend_regions (void); static void add_block1 (basic_block, basic_block); static void fix_recovery_cfg (int, int, int); static basic_block advance_target_bb (basic_block, rtx); -static void check_dead_notes1 (int, sbitmap); -#ifdef ENABLE_CHECKING -static int region_head_or_leaf_p (basic_block, int); -#endif + +static void debug_rgn_dependencies (int); /* Return nonzero if there are more insns that should be scheduled. */ @@ -1971,7 +1980,7 @@ init_ready_list (void) /* Print debugging information. */ if (sched_verbose >= 5) - debug_dependencies (); + debug_rgn_dependencies (target_bb); /* Prepare current target block info. */ if (current_nr_blocks > 1) @@ -2097,7 +2106,8 @@ new_ready (rtx next, ds_t ts) if (not_ex_free /* We are here because is_exception_free () == false. But we possibly can handle that with control speculation. */ - && current_sched_info->flags & DO_SPECULATION) + && (current_sched_info->flags & DO_SPECULATION) + && (spec_info->mask & BEGIN_CONTROL)) /* Here we got new control-speculative instruction. */ ts = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK); else @@ -2210,13 +2220,7 @@ static struct sched_info region_sched_info = add_block1, advance_target_bb, fix_recovery_cfg, -#ifdef ENABLE_CHECKING - region_head_or_leaf_p, -#endif - SCHED_RGN | USE_GLAT -#ifdef ENABLE_CHECKING - | DETACH_LIFE_INFO -#endif + SCHED_RGN }; /* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */ @@ -2230,7 +2234,7 @@ sets_likely_spilled (rtx pat) } static void -sets_likely_spilled_1 (rtx x, rtx pat, void *data) +sets_likely_spilled_1 (rtx x, const_rtx pat, void *data) { bool *ret = (bool *) data; @@ -2284,7 +2288,8 @@ add_branch_dependences (rtx head, rtx tail) { if (!NOTE_P (insn)) { - if (last != 0 && !find_insn_list (insn, LOG_LINKS (last))) + if (last != 0 + && sd_find_dep_between (insn, last, false) == NULL) { if (! sched_insns_conditions_mutex_p (last, insn)) add_dependence (last, insn, REG_DEP_ANTI); @@ -2461,7 +2466,10 @@ propagate_deps (int bb, struct deps *pred_deps) = concat_INSN_LIST (pred_deps->last_pending_memory_flush, succ_deps->last_pending_memory_flush); - succ_deps->pending_lists_length += pred_deps->pending_lists_length; + succ_deps->pending_read_list_length + += pred_deps->pending_read_list_length; + succ_deps->pending_write_list_length + += pred_deps->pending_write_list_length; succ_deps->pending_flush_length += pred_deps->pending_flush_length; /* last_function_call is inherited by successor. */ @@ -2489,7 +2497,7 @@ propagate_deps (int bb, struct deps *pred_deps) pred_deps->pending_write_mems = 0; } -/* Compute backward dependences inside bb. In a multiple blocks region: +/* Compute dependences inside bb. In a multiple blocks region: (1) a bb is analyzed after its predecessors, and (2) the lists in effect at the end of bb (after analyzing for bb) are inherited by bb's successors. @@ -2507,7 +2515,7 @@ propagate_deps (int bb, struct deps *pred_deps) similar, and the result is interblock dependences in the region. */ static void -compute_block_backward_dependences (int bb) +compute_block_dependences (int bb) { rtx head, tail; struct deps tmp_deps; @@ -2517,6 +2525,7 @@ compute_block_backward_dependences (int bb) /* Do the analysis for this block. */ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + sched_analyze (&tmp_deps, head, tail); add_branch_dependences (head, tail); @@ -2525,6 +2534,21 @@ compute_block_backward_dependences (int bb) /* Free up the INSN_LISTs. */ free_deps (&tmp_deps); + + if (targetm.sched.dependencies_evaluation_hook) + targetm.sched.dependencies_evaluation_hook (head, tail); +} + +/* Free dependencies of instructions inside BB. */ +static void +free_block_dependencies (int bb) +{ + rtx head; + rtx tail; + + get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + + sched_free_deps (head, tail, true); } /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add @@ -2544,73 +2568,87 @@ free_pending_lists (void) } } -/* Print dependences for debugging, callable from debugger. */ - +/* Print dependences for debugging starting from FROM_BB. + Callable from debugger. */ +/* Print dependences for debugging starting from FROM_BB. + Callable from debugger. */ void -debug_dependencies (void) +debug_rgn_dependencies (int from_bb) { int bb; - fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n"); - for (bb = 0; bb < current_nr_blocks; bb++) + fprintf (sched_dump, + ";; --------------- forward dependences: ------------ \n"); + + for (bb = from_bb; bb < current_nr_blocks; bb++) { rtx head, tail; - rtx next_tail; - rtx insn; gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); - next_tail = NEXT_INSN (tail); fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n", BB_TO_BLOCK (bb), bb); - fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", - "insn", "code", "bb", "dep", "prio", "cost", - "reservation"); - fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", - "----", "----", "--", "---", "----", "----", - "-----------"); + debug_dependencies (head, tail); + } +} - for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) - { - rtx link; +/* Print dependencies information for instructions between HEAD and TAIL. + ??? This function would probably fit best in haifa-sched.c. */ +void debug_dependencies (rtx head, rtx tail) +{ + rtx insn; + rtx next_tail = NEXT_INSN (tail); + + fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", + "insn", "code", "bb", "dep", "prio", "cost", + "reservation"); + fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", + "----", "----", "--", "---", "----", "----", + "-----------"); - if (! INSN_P (insn)) + for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) + { + if (! INSN_P (insn)) + { + int n; + fprintf (sched_dump, ";; %6d ", INSN_UID (insn)); + if (NOTE_P (insn)) { - int n; - fprintf (sched_dump, ";; %6d ", INSN_UID (insn)); - if (NOTE_P (insn)) - { - n = NOTE_LINE_NUMBER (insn); - if (n < 0) - fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n)); - } - else - fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn))); - continue; + n = NOTE_KIND (insn); + fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n)); } - - fprintf (sched_dump, - ";; %s%5d%6d%6d%6d%6d%6d ", - (SCHED_GROUP_P (insn) ? "+" : " "), - INSN_UID (insn), - INSN_CODE (insn), - INSN_BB (insn), - INSN_DEP_COUNT (insn), - INSN_PRIORITY (insn), - insn_cost (insn, 0, 0)); - - if (recog_memoized (insn) < 0) - fprintf (sched_dump, "nothing"); else - print_reservation (sched_dump, insn); - - fprintf (sched_dump, "\t: "); - for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1)) - fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0))); - fprintf (sched_dump, "\n"); + fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn))); + continue; } + + fprintf (sched_dump, + ";; %s%5d%6d%6d%6d%6d%6d ", + (SCHED_GROUP_P (insn) ? "+" : " "), + INSN_UID (insn), + INSN_CODE (insn), + BLOCK_NUM (insn), + sd_lists_size (insn, SD_LIST_BACK), + INSN_PRIORITY (insn), + insn_cost (insn)); + + if (recog_memoized (insn) < 0) + fprintf (sched_dump, "nothing"); + else + print_reservation (sched_dump, insn); + + fprintf (sched_dump, "\t: "); + { + sd_iterator_def sd_it; + dep_t dep; + + FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) + fprintf (sched_dump, "%d ", INSN_UID (DEP_CON (dep))); + } + fprintf (sched_dump, "\n"); } + fprintf (sched_dump, "\n"); } @@ -2665,23 +2703,9 @@ schedule_region (int rgn) for (bb = 0; bb < current_nr_blocks; bb++) init_deps (bb_deps + bb); - /* Compute LOG_LINKS. */ + /* Compute dependencies. */ for (bb = 0; bb < current_nr_blocks; bb++) - compute_block_backward_dependences (bb); - - /* Compute INSN_DEPEND. */ - for (bb = current_nr_blocks - 1; bb >= 0; bb--) - { - rtx head, tail; - - gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); - get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); - - compute_forward_dependences (head, tail); - - if (targetm.sched.dependencies_evaluation_hook) - targetm.sched.dependencies_evaluation_hook (head, tail); - } + compute_block_dependences (bb); free_pending_lists (); @@ -2810,9 +2834,16 @@ schedule_region (int rgn) current_sched_info->queue_must_finish_empty = current_nr_blocks == 1; curr_bb = first_bb; - schedule_block (&curr_bb, rgn_n_insns); - gcc_assert (EBB_FIRST_BB (bb) == first_bb); - sched_rgn_n_insns += sched_n_insns; + if (dbg_cnt (sched_block)) + { + schedule_block (&curr_bb, rgn_n_insns); + gcc_assert (EBB_FIRST_BB (bb) == first_bb); + sched_rgn_n_insns += sched_n_insns; + } + else + { + sched_rgn_n_insns += rgn_n_insns; + } /* Clean up. */ if (current_nr_blocks > 1) @@ -2826,7 +2857,6 @@ schedule_region (int rgn) /* Sanity check: verify that all region insns were scheduled. */ gcc_assert (sched_rgn_n_insns == rgn_n_insns); - /* Done with this region. */ if (current_nr_blocks > 1) @@ -2837,20 +2867,20 @@ schedule_region (int rgn) sbitmap_vector_free (ancestor_edges); free (rgn_edges); } -} -/* Indexed by region, holds the number of death notes found in that region. - Used for consistency checks. */ -static int *deaths_in_region; + /* Free dependencies. */ + for (bb = 0; bb < current_nr_blocks; ++bb) + free_block_dependencies (bb); + + gcc_assert (haifa_recovery_bb_ever_added_p + || deps_pools_are_empty_p ()); +} /* Initialize data structures for region scheduling. */ static void init_regions (void) { - sbitmap blocks; - int rgn; - nr_regions = 0; rgn_table = 0; rgn_bb_table = 0; @@ -2878,25 +2908,11 @@ init_regions (void) debug_regions (); /* For now. This will move as more and more of haifa is converted - to using the cfg code in flow.c. */ + to using the cfg code. */ free_dominance_info (CDI_DOMINATORS); } RGN_BLOCKS (nr_regions) = RGN_BLOCKS (nr_regions - 1) + RGN_NR_BLOCKS (nr_regions - 1); - - - if (CHECK_DEAD_NOTES) - { - blocks = sbitmap_alloc (last_basic_block); - deaths_in_region = XNEWVEC (int, nr_regions); - /* Remove all death notes from the subroutine. */ - for (rgn = 0; rgn < nr_regions; rgn++) - check_dead_notes1 (rgn, blocks); - - sbitmap_free (blocks); - } - else - count_or_remove_death_notes (NULL, 1); } /* The one entry point in this file. */ @@ -2904,10 +2920,7 @@ init_regions (void) void schedule_insns (void) { - sbitmap large_region_blocks, blocks; int rgn; - int any_large_regions; - basic_block bb; /* Taking care of this degenerate case makes the rest of this code simpler. */ @@ -2921,8 +2934,16 @@ schedule_insns (void) invoked via sched_init. */ current_sched_info = ®ion_sched_info; + df_set_flags (DF_LR_RUN_DCE); + df_note_add_problem (); + df_analyze (); + regstat_compute_calls_crossed (); + sched_init (); + bitmap_initialize (¬_in_df, 0); + bitmap_clear (¬_in_df); + min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE) / 100); @@ -2934,92 +2955,14 @@ schedule_insns (void) /* Schedule every region in the subroutine. */ for (rgn = 0; rgn < nr_regions; rgn++) - schedule_region (rgn); + if (dbg_cnt (sched_region)) + schedule_region (rgn); free(ebb_head); - - /* Update life analysis for the subroutine. Do single block regions - first so that we can verify that live_at_start didn't change. Then - do all other blocks. */ - /* ??? There is an outside possibility that update_life_info, or more - to the point propagate_block, could get called with nonzero flags - more than once for one basic block. This would be kinda bad if it - were to happen, since REG_INFO would be accumulated twice for the - block, and we'd have twice the REG_DEAD notes. - - I'm fairly certain that this _shouldn't_ happen, since I don't think - that live_at_start should change at region heads. Not sure what the - best way to test for this kind of thing... */ - - if (current_sched_info->flags & DETACH_LIFE_INFO) - /* this flag can be set either by the target or by ENABLE_CHECKING. */ - attach_life_info (); - - allocate_reg_life_data (); - - any_large_regions = 0; - large_region_blocks = sbitmap_alloc (last_basic_block); - sbitmap_zero (large_region_blocks); - FOR_EACH_BB (bb) - SET_BIT (large_region_blocks, bb->index); - - blocks = sbitmap_alloc (last_basic_block); - sbitmap_zero (blocks); - - /* Update life information. For regions consisting of multiple blocks - we've possibly done interblock scheduling that affects global liveness. - For regions consisting of single blocks we need to do only local - liveness. */ - for (rgn = 0; rgn < nr_regions; rgn++) - if (RGN_NR_BLOCKS (rgn) > 1 - /* Or the only block of this region has been split. */ - || RGN_HAS_REAL_EBB (rgn) - /* New blocks (e.g. recovery blocks) should be processed - as parts of large regions. */ - || !glat_start[rgn_bb_table[RGN_BLOCKS (rgn)]]) - any_large_regions = 1; - else - { - SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]); - RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]); - } - - /* Don't update reg info after reload, since that affects - regs_ever_live, which should not change after reload. */ - update_life_info (blocks, UPDATE_LIFE_LOCAL, - (reload_completed ? PROP_DEATH_NOTES - : (PROP_DEATH_NOTES | PROP_REG_INFO))); - if (any_large_regions) - { - update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL, - (reload_completed ? PROP_DEATH_NOTES - : (PROP_DEATH_NOTES | PROP_REG_INFO))); - -#ifdef ENABLE_CHECKING - check_reg_live (true); -#endif - } - - if (CHECK_DEAD_NOTES) - { - /* Verify the counts of basic block notes in single basic block - regions. */ - for (rgn = 0; rgn < nr_regions; rgn++) - if (RGN_NR_BLOCKS (rgn) == 1) - { - sbitmap_zero (blocks); - SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]); - - gcc_assert (deaths_in_region[rgn] - == count_or_remove_death_notes (blocks, 0)); - } - free (deaths_in_region); - } - /* Reposition the prologue and epilogue notes in case we moved the prologue/epilogue insns. */ if (reload_completed) - reposition_prologue_and_epilogue_notes (get_insns ()); + reposition_prologue_and_epilogue_notes (); if (sched_verbose) { @@ -3040,10 +2983,11 @@ schedule_insns (void) free (block_to_bb); free (containing_rgn); - sched_finish (); + regstat_free_calls_crossed (); - sbitmap_free (blocks); - sbitmap_free (large_region_blocks); + bitmap_clear (¬_in_df); + + sched_finish (); } /* INSN has been added to/removed from current region. */ @@ -3080,6 +3024,8 @@ add_block1 (basic_block bb, basic_block after) { extend_regions (); + bitmap_set_bit (¬_in_df, bb->index); + if (after == 0 || after == EXIT_BLOCK_PTR) { int i; @@ -3097,17 +3043,6 @@ add_block1 (basic_block bb, basic_block after) nr_regions++; RGN_BLOCKS (nr_regions) = i + 1; - - if (CHECK_DEAD_NOTES) - { - sbitmap blocks = sbitmap_alloc (last_basic_block); - deaths_in_region = xrealloc (deaths_in_region, nr_regions * - sizeof (*deaths_in_region)); - - check_dead_notes1 (nr_regions - 1, blocks); - - sbitmap_free (blocks); - } } else { @@ -3160,9 +3095,6 @@ add_block1 (basic_block bb, basic_block after) for (++i; i <= nr_regions; i++) RGN_BLOCKS (i)++; - - /* We don't need to call check_dead_notes1 () because this new block - is just a split of the old. We don't want to count anything twice. */ } } @@ -3212,56 +3144,13 @@ advance_target_bb (basic_block bb, rtx insn) return bb->next_bb; } -/* Count and remove death notes in region RGN, which consists of blocks - with indecies in BLOCKS. */ -static void -check_dead_notes1 (int rgn, sbitmap blocks) -{ - int b; - - sbitmap_zero (blocks); - for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b) - SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]); - - deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1); -} - -#ifdef ENABLE_CHECKING -/* Return non zero, if BB is head or leaf (depending of LEAF_P) block in - current region. For more information please refer to - sched-int.h: struct sched_info: region_head_or_leaf_p. */ -static int -region_head_or_leaf_p (basic_block bb, int leaf_p) -{ - if (!leaf_p) - return bb->index == rgn_bb_table[RGN_BLOCKS (CONTAINING_RGN (bb->index))]; - else - { - int i; - edge e; - edge_iterator ei; - - i = CONTAINING_RGN (bb->index); - - FOR_EACH_EDGE (e, ei, bb->succs) - if (e->dest != EXIT_BLOCK_PTR - && CONTAINING_RGN (e->dest->index) == i - /* except self-loop. */ - && e->dest != bb) - return 0; - - return 1; - } -} -#endif /* ENABLE_CHECKING */ - #endif static bool gate_handle_sched (void) { #ifdef INSN_SCHEDULING - return flag_schedule_insns; + return flag_schedule_insns && dbg_cnt (sched_func); #else return 0; #endif @@ -3272,9 +3161,6 @@ static unsigned int rest_of_handle_sched (void) { #ifdef INSN_SCHEDULING - /* Do control and data sched analysis, - and write some of the results to dump file. */ - schedule_insns (); #endif return 0; @@ -3284,7 +3170,8 @@ static bool gate_handle_sched2 (void) { #ifdef INSN_SCHEDULING - return optimize > 0 && flag_schedule_insns_after_reload; + return optimize > 0 && flag_schedule_insns_after_reload + && dbg_cnt (sched2_func); #else return 0; #endif @@ -3297,25 +3184,18 @@ rest_of_handle_sched2 (void) #ifdef INSN_SCHEDULING /* Do control and data sched analysis again, and write some more of the results to dump file. */ - - split_all_insns (1); - if (flag_sched2_use_superblocks || flag_sched2_use_traces) - { - schedule_ebbs (); - /* No liveness updating code yet, but it should be easy to do. - reg-stack recomputes the liveness when needed for now. */ - count_or_remove_death_notes (NULL, 1); - cleanup_cfg (CLEANUP_EXPENSIVE); - } + schedule_ebbs (); else schedule_insns (); #endif return 0; } -struct tree_opt_pass pass_sched = +struct rtl_opt_pass pass_sched = { + { + RTL_PASS, "sched1", /* name */ gate_handle_sched, /* gate */ rest_of_handle_sched, /* execute */ @@ -3327,13 +3207,17 @@ struct tree_opt_pass pass_sched = 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ + TODO_df_finish | TODO_verify_rtl_sharing | TODO_dump_func | - TODO_ggc_collect, /* todo_flags_finish */ - 'S' /* letter */ + TODO_verify_flow | + TODO_ggc_collect /* todo_flags_finish */ + } }; -struct tree_opt_pass pass_sched2 = +struct rtl_opt_pass pass_sched2 = { + { + RTL_PASS, "sched2", /* name */ gate_handle_sched2, /* gate */ rest_of_handle_sched2, /* execute */ @@ -3345,8 +3229,10 @@ struct tree_opt_pass pass_sched2 = 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ + TODO_df_finish | TODO_verify_rtl_sharing | TODO_dump_func | - TODO_ggc_collect, /* todo_flags_finish */ - 'R' /* letter */ + TODO_verify_flow | + TODO_ggc_collect /* todo_flags_finish */ + } };