X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fsched-ebb.c;h=f6dc8ec04fe673a26c0f17aa851fb432d6ad4711;hp=d27630ead5d17ca30d9f26c62d4ee2af4f601020;hb=974bc84e97db9d9741e71dd74930e98f0bec94e9;hpb=d01481af8ee764f1263a8ef91f14f0e716c470a4 diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c index d27630ead5d..f6dc8ec04fe 100644 --- a/gcc/sched-ebb.c +++ b/gcc/sched-ebb.c @@ -1,6 +1,6 @@ /* Instruction scheduling pass. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) @@ -18,8 +18,8 @@ for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA +02110-1301, USA. */ #include "config.h" #include "system.h" @@ -29,7 +29,6 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "rtl.h" #include "tm_p.h" #include "hard-reg-set.h" -#include "basic-block.h" #include "regs.h" #include "function.h" #include "flags.h" @@ -39,50 +38,65 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "toplev.h" #include "recog.h" #include "cfglayout.h" +#include "params.h" #include "sched-int.h" #include "target.h" +#include "output.h" -/* The number of insns to be scheduled in total. */ -static int target_n_insns; /* The number of insns scheduled so far. */ static int sched_n_insns; +/* The number of insns to be scheduled in total. */ +static int n_insns; + +/* Set of blocks, that already have their dependencies calculated. */ +static bitmap_head dont_calc_deps; +/* Set of basic blocks, that are ebb heads of tails respectively. */ +static bitmap_head ebb_head, ebb_tail; + +/* Last basic block in current ebb. */ +static basic_block last_bb; + /* Implementations of the sched_info functions for region scheduling. */ -static void init_ready_list PARAMS ((struct ready_list *)); -static int can_schedule_ready_p PARAMS ((rtx)); -static int new_ready PARAMS ((rtx)); -static int schedule_more_p PARAMS ((void)); -static const char *ebb_print_insn PARAMS ((rtx, int)); -static int rank PARAMS ((rtx, rtx)); -static int contributes_to_priority PARAMS ((rtx, rtx)); -static void compute_jump_reg_dependencies PARAMS ((rtx, regset)); -static basic_block earliest_block_with_similiar_load PARAMS ((basic_block, - rtx)); -static void add_deps_for_risky_insns PARAMS ((rtx, rtx)); -static basic_block schedule_ebb PARAMS ((rtx, rtx)); -static basic_block fix_basic_block_boundaries PARAMS ((basic_block, basic_block, rtx, rtx)); -static void add_missing_bbs PARAMS ((rtx, basic_block, basic_block)); +static void init_ready_list (void); +static void begin_schedule_ready (rtx, rtx); +static int schedule_more_p (void); +static const char *ebb_print_insn (rtx, int); +static int rank (rtx, rtx); +static int contributes_to_priority (rtx, rtx); +static void compute_jump_reg_dependencies (rtx, regset, regset, regset); +static basic_block earliest_block_with_similiar_load (basic_block, rtx); +static void add_deps_for_risky_insns (rtx, rtx); +static basic_block schedule_ebb (rtx, rtx); + +static void add_remove_insn (rtx, int); +static void add_block1 (basic_block, basic_block); +static basic_block advance_target_bb (basic_block, rtx); +static void fix_recovery_cfg (int, int, int); + +#ifdef ENABLE_CHECKING +static int ebb_head_or_leaf_p (basic_block, int); +#endif /* Return nonzero if there are more insns that should be scheduled. */ static int -schedule_more_p () +schedule_more_p (void) { - return sched_n_insns < target_n_insns; + return sched_n_insns < n_insns; } /* Add all insns that are initially ready to the ready list READY. Called once before scheduling a set of insns. */ static void -init_ready_list (ready) - struct ready_list *ready; +init_ready_list (void) { + int n = 0; rtx prev_head = current_sched_info->prev_head; rtx next_tail = current_sched_info->next_tail; rtx insn; - target_n_insns = 0; sched_n_insns = 0; #if 0 @@ -95,31 +109,76 @@ init_ready_list (ready) Count number of insns in the target block being scheduled. */ for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) { - if (INSN_DEP_COUNT (insn) == 0) - ready_add (ready, insn); - target_n_insns++; + try_ready (insn); + n++; } -} -/* Called after taking INSN from the ready list. Returns nonzero if this - insn can be scheduled, nonzero if we should silently discard it. */ + gcc_assert (n == n_insns); +} -static int -can_schedule_ready_p (insn) - rtx insn ATTRIBUTE_UNUSED; +/* INSN is being scheduled after LAST. Update counters. */ +static void +begin_schedule_ready (rtx insn, rtx last) { sched_n_insns++; - return 1; -} -/* Called after INSN has all its dependencies resolved. Return nonzero - if it should be moved to the ready list or the queue, or zero if we - should silently discard it. */ -static int -new_ready (next) - rtx next ATTRIBUTE_UNUSED; -{ - return 1; + if (BLOCK_FOR_INSN (insn) == last_bb + /* INSN is a jump in the last block, ... */ + && control_flow_insn_p (insn) + /* that is going to be moved over some instructions. */ + && last != PREV_INSN (insn)) + { + edge e; + edge_iterator ei; + basic_block bb; + + /* An obscure special case, where we do have partially dead + instruction scheduled after last control flow instruction. + In this case we can create new basic block. It is + always exactly one basic block last in the sequence. */ + + FOR_EACH_EDGE (e, ei, last_bb->succs) + if (e->flags & EDGE_FALLTHRU) + break; + +#ifdef ENABLE_CHECKING + gcc_assert (!e || !(e->flags & EDGE_COMPLEX)); + + gcc_assert (BLOCK_FOR_INSN (insn) == last_bb + && !IS_SPECULATION_CHECK_P (insn) + && BB_HEAD (last_bb) != insn + && BB_END (last_bb) == insn); + + { + rtx x; + + x = NEXT_INSN (insn); + if (e) + gcc_assert (NOTE_P (x) || LABEL_P (x)); + else + gcc_assert (BARRIER_P (x)); + } +#endif + + if (e) + { + bb = split_edge (e); + gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); + } + else + /* Create an empty unreachable block after the INSN. */ + bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb); + + /* split_edge () creates BB before E->DEST. Keep in mind, that + this operation extends scheduling region till the end of BB. + Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out + of the scheduling region. */ + current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); + gcc_assert (current_sched_info->next_tail); + + add_block (bb, last_bb); + gcc_assert (last_bb == bb); + } } /* Return a string that contains the insn uid and optionally anything else @@ -128,9 +187,7 @@ new_ready (next) to be formatted so that multiple output lines will line up nicely. */ static const char * -ebb_print_insn (insn, aligned) - rtx insn; - int aligned ATTRIBUTE_UNUSED; +ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED) { static char tmp[80]; @@ -143,8 +200,7 @@ ebb_print_insn (insn, aligned) is to be preferred. Zero if they are equally good. */ static int -rank (insn1, insn2) - rtx insn1, insn2; +rank (rtx insn1, rtx insn2) { basic_block bb1 = BLOCK_FOR_INSN (insn1); basic_block bb2 = BLOCK_FOR_INSN (insn2); @@ -163,28 +219,35 @@ rank (insn1, insn2) calculations. */ static int -contributes_to_priority (next, insn) - rtx next ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED; +contributes_to_priority (rtx next ATTRIBUTE_UNUSED, + rtx insn ATTRIBUTE_UNUSED) { return 1; } -/* INSN is a JUMP_INSN. Store the set of registers that must be considered - to be set by this jump in SET. */ + /* INSN is a JUMP_INSN, COND_SET is the set of registers that are + conditionally set before INSN. Store the set of registers that + must be considered as used by this jump in USED and that of + registers that must be considered as set in SET. */ static void -compute_jump_reg_dependencies (insn, set) - rtx insn; - regset set; +compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used, + regset set) { basic_block b = BLOCK_FOR_INSN (insn); edge e; - for (e = b->succ; e; e = e->succ_next) - if ((e->flags & EDGE_FALLTHRU) == 0) - { - bitmap_operation (set, set, e->dest->global_live_at_start, - BITMAP_IOR); - } + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, b->succs) + if (e->flags & EDGE_FALLTHRU) + /* The jump may be a by-product of a branch that has been merged + in the main codepath after being conditionalized. Therefore + it may guard the fallthrough block from using a value that has + conditionally overwritten that of the main codepath. So we + consider that it restores the value of the main codepath. */ + bitmap_and (set, glat_start [e->dest->index], cond_set); + else + bitmap_ior_into (used, glat_start [e->dest->index]); } /* Used in schedule_insns to initialize current_sched_info for scheduling @@ -193,9 +256,9 @@ compute_jump_reg_dependencies (insn, set) static struct sched_info ebb_sched_info = { init_ready_list, - can_schedule_ready_p, + NULL, schedule_more_p, - new_ready, + NULL, rank, ebb_print_insn, contributes_to_priority, @@ -203,145 +266,21 @@ static struct sched_info ebb_sched_info = NULL, NULL, NULL, NULL, - 0, 1 + 0, 1, 0, + + add_remove_insn, + begin_schedule_ready, + add_block1, + advance_target_bb, + fix_recovery_cfg, +#ifdef ENABLE_CHECKING + ebb_head_or_leaf_p, +#endif + /* We need to DETACH_LIVE_INFO to be able to create new basic blocks. + See begin_schedule_ready (). */ + SCHED_EBB | USE_GLAT | DETACH_LIFE_INFO }; -/* It is possible that ebb scheduling eliminated some blocks. - Place blocks from FIRST to LAST before BEFORE. */ - -static void -add_missing_bbs (before, first, last) - rtx before; - basic_block first, last; -{ - for (; last != first->prev_bb; last = last->prev_bb) - { - before = emit_note_before (NOTE_INSN_BASIC_BLOCK, before); - NOTE_BASIC_BLOCK (before) = last; - last->head = before; - last->end = before; - update_bb_for_insn (last); - } -} - -/* Fixup the CFG after EBB scheduling. Re-recognize the basic - block boundaries in between HEAD and TAIL and update basic block - structures between BB and LAST. */ - -static basic_block -fix_basic_block_boundaries (bb, last, head, tail) - basic_block bb, last; - rtx head, tail; -{ - rtx insn = head; - rtx last_inside = bb->head; - rtx aftertail = NEXT_INSN (tail); - - head = bb->head; - - for (; insn != aftertail; insn = NEXT_INSN (insn)) - { - if (GET_CODE (insn) == CODE_LABEL) - abort (); - /* Create new basic blocks just before first insn. */ - if (inside_basic_block_p (insn)) - { - if (!last_inside) - { - rtx note; - - /* Re-emit the basic block note for newly found BB header. */ - if (GET_CODE (insn) == CODE_LABEL) - { - note = emit_note_after (NOTE_INSN_BASIC_BLOCK, insn); - head = insn; - last_inside = note; - } - else - { - note = emit_note_before (NOTE_INSN_BASIC_BLOCK, insn); - head = note; - last_inside = insn; - } - } - else - last_inside = insn; - } - /* Control flow instruction terminate basic block. It is possible - that we've eliminated some basic blocks (made them empty). - Find the proper basic block using BLOCK_FOR_INSN and arrange things in - a sensible way by inserting empty basic blocks as needed. */ - if (control_flow_insn_p (insn) || (insn == tail && last_inside)) - { - basic_block curr_bb = BLOCK_FOR_INSN (insn); - rtx note; - - if (!control_flow_insn_p (insn)) - curr_bb = last; - if (bb == last->next_bb) - { - edge f; - rtx h; - - /* An obscure special case, where we do have partially dead - instruction scheduled after last control flow instruction. - In this case we can create new basic block. It is - always exactly one basic block last in the sequence. Handle - it by splitting the edge and repositioning the block. - This is somewhat hackish, but at least avoid cut&paste - - Safter sollution can be to bring the code into sequence, - do the split and re-emit it back in case this will ever - trigger problem. */ - f = bb->prev_bb->succ; - while (f && !(f->flags & EDGE_FALLTHRU)) - f = f->succ_next; - - if (f) - { - last = curr_bb = split_edge (f); - h = curr_bb->head; - curr_bb->head = head; - curr_bb->end = insn; - /* Edge splitting created misplaced BASIC_BLOCK note, kill - it. */ - delete_insn (h); - } - /* It may happen that code got moved past unconditional jump in - case the code is completely dead. Kill it. */ - else - { - rtx next = next_nonnote_insn (insn); - delete_insn_chain (head, insn); - /* We keep some notes in the way that may split barrier from the - jump. */ - if (GET_CODE (next) == BARRIER) - { - emit_barrier_after (prev_nonnote_insn (head)); - delete_insn (next); - } - insn = NULL; - } - } - else - { - curr_bb->head = head; - curr_bb->end = insn; - add_missing_bbs (curr_bb->head, bb, curr_bb->prev_bb); - } - note = GET_CODE (head) == CODE_LABEL ? NEXT_INSN (head) : head; - NOTE_BASIC_BLOCK (note) = curr_bb; - update_bb_for_insn (curr_bb); - bb = curr_bb->next_bb; - last_inside = NULL; - if (!insn) - break; - } - } - add_missing_bbs (last->next_bb->head, bb, last); - return bb->prev_bb; -} - /* Returns the earliest block in EBB currently being processed where a "similar load" 'insn2' is found, and hence LOAD_INSN can move speculatively into the found block. All the following must hold: @@ -359,32 +298,26 @@ fix_basic_block_boundaries (bb, last, head, tail) blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */ static basic_block -earliest_block_with_similiar_load (last_block, load_insn) - basic_block last_block; - rtx load_insn; +earliest_block_with_similiar_load (basic_block last_block, rtx load_insn) { - rtx back_link; + dep_link_t back_link; basic_block bb, earliest_block = NULL; - for (back_link = LOG_LINKS (load_insn); - back_link; - back_link = XEXP (back_link, 1)) + FOR_EACH_DEP_LINK (back_link, INSN_BACK_DEPS (load_insn)) { - rtx insn1 = XEXP (back_link, 0); + rtx insn1 = DEP_LINK_PRO (back_link); - if (GET_MODE (back_link) == VOIDmode) + if (DEP_LINK_KIND (back_link) == REG_DEP_TRUE) { /* Found a DEF-USE dependence (insn1, load_insn). */ - rtx fore_link; + dep_link_t fore_link; - for (fore_link = INSN_DEPEND (insn1); - fore_link; - fore_link = XEXP (fore_link, 1)) + FOR_EACH_DEP_LINK (fore_link, INSN_FORW_DEPS (insn1)) { - rtx insn2 = XEXP (fore_link, 0); + rtx insn2 = DEP_LINK_CON (fore_link); basic_block insn2_block = BLOCK_FOR_INSN (insn2); - if (GET_MODE (fore_link) == VOIDmode) + if (DEP_LINK_KIND (fore_link) == REG_DEP_TRUE) { if (earliest_block != NULL && earliest_block->index < insn2_block->index) @@ -394,7 +327,7 @@ earliest_block_with_similiar_load (last_block, load_insn) if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) /* insn2 not guaranteed to be a 1 base reg load. */ continue; - + for (bb = last_block; bb; bb = bb->aux) if (insn2_block == bb) break; @@ -410,21 +343,20 @@ earliest_block_with_similiar_load (last_block, load_insn) return earliest_block; } -/* The following function adds dependecies between jumps and risky +/* The following function adds dependencies between jumps and risky insns in given ebb. */ static void -add_deps_for_risky_insns (head, tail) - rtx head, tail; +add_deps_for_risky_insns (rtx head, rtx tail) { rtx insn, prev; int class; rtx last_jump = NULL_RTX; rtx next_tail = NEXT_INSN (tail); basic_block last_block = NULL, bb; - + for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == JUMP_INSN) + if (control_flow_insn_p (insn)) { bb = BLOCK_FOR_INSN (insn); bb->aux = last_block; @@ -446,10 +378,10 @@ add_deps_for_risky_insns (head, tail) bb = bb->aux; if (!bb) break; - prev = bb->end; + prev = BB_END (bb); } } - /* FALLTHRU */ + /* Fall through. */ case TRAP_RISKY: case IRISKY: case PRISKY_CANDIDATE: @@ -458,10 +390,29 @@ add_deps_for_risky_insns (head, tail) /* We can not change the mode of the backward dependency because REG_DEP_ANTI has the lowest rank. */ - if (add_dependence (insn, prev, REG_DEP_ANTI)) - add_forward_dependence (prev, insn, REG_DEP_ANTI); + if (! sched_insns_conditions_mutex_p (insn, prev)) + { + if (!(current_sched_info->flags & DO_SPECULATION)) + { + enum DEPS_ADJUST_RESULT res; + + res = add_or_update_back_dep (insn, prev, + REG_DEP_ANTI, DEP_ANTI); + + if (res == DEP_CREATED) + add_forw_dep (DEPS_LIST_FIRST (INSN_BACK_DEPS (insn))); + else + gcc_assert (res != DEP_CHANGED); + } + else + add_or_update_back_forw_dep (insn, prev, REG_DEP_ANTI, + set_dep_weak (DEP_ANTI, + BEGIN_CONTROL, + MAX_DEP_WEAK)); + } + break; - + default: break; } @@ -479,45 +430,51 @@ add_deps_for_risky_insns (head, tail) and TAIL. */ static basic_block -schedule_ebb (head, tail) - rtx head, tail; +schedule_ebb (rtx head, rtx tail) { - int n_insns; - basic_block b; + basic_block first_bb, target_bb; struct deps tmp_deps; - basic_block first_bb = BLOCK_FOR_INSN (head); - basic_block last_bb = BLOCK_FOR_INSN (tail); + + first_bb = BLOCK_FOR_INSN (head); + last_bb = BLOCK_FOR_INSN (tail); if (no_real_insns_p (head, tail)) return BLOCK_FOR_INSN (tail); - init_deps_global (); + gcc_assert (INSN_P (head) && INSN_P (tail)); + + if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) + { + init_deps_global (); - /* Compute LOG_LINKS. */ - init_deps (&tmp_deps); - sched_analyze (&tmp_deps, head, tail); - free_deps (&tmp_deps); + /* Compute backward dependencies. */ + init_deps (&tmp_deps); + sched_analyze (&tmp_deps, head, tail); + free_deps (&tmp_deps); - /* Compute INSN_DEPEND. */ - compute_forward_dependences (head, tail); + /* Compute forward dependencies. */ + compute_forward_dependences (head, tail); - add_deps_for_risky_insns (head, tail); + add_deps_for_risky_insns (head, tail); - if (targetm.sched.dependencies_evaluation_hook) - targetm.sched.dependencies_evaluation_hook (head, tail); + if (targetm.sched.dependencies_evaluation_hook) + targetm.sched.dependencies_evaluation_hook (head, tail); + + finish_deps_global (); + } + else + /* Only recovery blocks can have their dependencies already calculated, + and they always are single block ebbs. */ + gcc_assert (first_bb == last_bb); /* Set priorities. */ + current_sched_info->sched_max_insns_priority = 0; n_insns = set_priorities (head, tail); + current_sched_info->sched_max_insns_priority++; current_sched_info->prev_head = PREV_INSN (head); current_sched_info->next_tail = NEXT_INSN (tail); - if (write_symbols != NO_DEBUG) - { - save_line_notes (first_bb->index, head, tail); - rm_line_notes (head, tail); - } - /* rm_other_notes only removes notes which are _inside_ the block---that is, it won't remove notes before the first real insn or after the last real insn of the block. So if the first insn @@ -530,11 +487,7 @@ schedule_ebb (head, tail) for (note = REG_NOTES (head); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_SAVE_NOTE) - { - remove_note (head, note); - note = XEXP (note, 1); - remove_note (head, note); - } + remove_note (head, note); } /* Remove remaining note insns from the block, save them in @@ -542,64 +495,90 @@ schedule_ebb (head, tail) schedule_block (). */ rm_other_notes (head, tail); + unlink_bb_notes (first_bb, last_bb); + current_sched_info->queue_must_finish_empty = 1; - schedule_block (-1, n_insns); + target_bb = first_bb; + schedule_block (&target_bb, n_insns); + /* We might pack all instructions into fewer blocks, + so we may made some of them empty. Can't assert (b == last_bb). */ + /* Sanity check: verify that all region insns were scheduled. */ - if (sched_n_insns != n_insns) - abort (); + gcc_assert (sched_n_insns == n_insns); head = current_sched_info->head; tail = current_sched_info->tail; - if (write_symbols != NO_DEBUG) - restore_line_notes (head, tail); - b = fix_basic_block_boundaries (first_bb, last_bb, head, tail); + if (EDGE_COUNT (last_bb->preds) == 0) + /* LAST_BB is unreachable. */ + { + gcc_assert (first_bb != last_bb + && EDGE_COUNT (last_bb->succs) == 0); + last_bb = last_bb->prev_bb; + delete_basic_block (last_bb->next_bb); + } - finish_deps_global (); - return b; + return last_bb; } -/* The one entry point in this file. DUMP_FILE is the dump file for - this pass. */ +/* The one entry point in this file. */ void -schedule_ebbs (dump_file) - FILE *dump_file; +schedule_ebbs (void) { basic_block bb; + int probability_cutoff; + rtx tail; + sbitmap large_region_blocks, blocks; + int any_large_regions; + + if (profile_info && flag_branch_probabilities) + probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); + else + probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY); + probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; /* Taking care of this degenerate case makes the rest of this code simpler. */ - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return; - sched_init (dump_file); - + /* We need current_sched_info in init_dependency_caches, which is + invoked via sched_init. */ current_sched_info = &ebb_sched_info; - allocate_reg_life_data (); + sched_init (); + compute_bb_for_insn (); + /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */ + bitmap_initialize (&dont_calc_deps, 0); + bitmap_clear (&dont_calc_deps); + bitmap_initialize (&ebb_head, 0); + bitmap_clear (&ebb_head); + bitmap_initialize (&ebb_tail, 0); + bitmap_clear (&ebb_tail); + /* Schedule every region in the subroutine. */ FOR_EACH_BB (bb) { - rtx head = bb->head; - rtx tail; + rtx head = BB_HEAD (bb); for (;;) { edge e; - tail = bb->end; + edge_iterator ei; + tail = BB_END (bb); if (bb->next_bb == EXIT_BLOCK_PTR - || GET_CODE (bb->next_bb->head) == CODE_LABEL) + || LABEL_P (BB_HEAD (bb->next_bb))) break; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if ((e->flags & EDGE_FALLTHRU) != 0) break; if (! e) break; - if (e->probability < REG_BR_PROB_BASE / 2) + if (e->probability <= probability_cutoff) break; bb = bb->next_bb; } @@ -608,29 +587,173 @@ schedule_ebbs (dump_file) a note or two. */ while (head != tail) { - if (GET_CODE (head) == NOTE) + if (NOTE_P (head)) head = NEXT_INSN (head); - else if (GET_CODE (tail) == NOTE) + else if (NOTE_P (tail)) tail = PREV_INSN (tail); - else if (GET_CODE (head) == CODE_LABEL) + else if (LABEL_P (head)) head = NEXT_INSN (head); else break; } + bitmap_set_bit (&ebb_head, BLOCK_NUM (head)); bb = schedule_ebb (head, tail); + bitmap_set_bit (&ebb_tail, bb->index); + } + bitmap_clear (&dont_calc_deps); + + gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO); + /* We can create new basic blocks during scheduling, and + attach_life_info () will create regsets for them + (along with attaching existing info back). */ + attach_life_info (); + + /* Updating register live information. */ + allocate_reg_life_data (); + + any_large_regions = 0; + large_region_blocks = sbitmap_alloc (last_basic_block); + sbitmap_zero (large_region_blocks); + FOR_EACH_BB (bb) + SET_BIT (large_region_blocks, bb->index); + + blocks = sbitmap_alloc (last_basic_block); + sbitmap_zero (blocks); + + /* Update life information. For regions consisting of multiple blocks + we've possibly done interblock scheduling that affects global liveness. + For regions consisting of single blocks we need to do only local + liveness. */ + FOR_EACH_BB (bb) + { + int bbi; + + bbi = bb->index; + + if (!bitmap_bit_p (&ebb_head, bbi) + || !bitmap_bit_p (&ebb_tail, bbi) + /* New blocks (e.g. recovery blocks) should be processed + as parts of large regions. */ + || !glat_start[bbi]) + any_large_regions = 1; + else + { + SET_BIT (blocks, bbi); + RESET_BIT (large_region_blocks, bbi); + } + } + + update_life_info (blocks, UPDATE_LIFE_LOCAL, 0); + sbitmap_free (blocks); + + if (any_large_regions) + { + update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL, 0); + +#ifdef ENABLE_CHECKING + /* !!! We can't check reg_live_info here because of the fact, + that destination registers of COND_EXEC's may be dead + before scheduling (while they should be alive). Don't know why. */ + /*check_reg_live (true);*/ +#endif } + sbitmap_free (large_region_blocks); - /* Updating life info can be done by local propagation over the modified - superblocks. */ + bitmap_clear (&ebb_head); + bitmap_clear (&ebb_tail); /* Reposition the prologue and epilogue notes in case we moved the prologue/epilogue insns. */ if (reload_completed) reposition_prologue_and_epilogue_notes (get_insns ()); - if (write_symbols != NO_DEBUG) - rm_redundant_line_notes (); - sched_finish (); } + +/* INSN has been added to/removed from current ebb. */ +static void +add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p) +{ + if (!remove_p) + n_insns++; + else + n_insns--; +} + +/* BB was added to ebb after AFTER. */ +static void +add_block1 (basic_block bb, basic_block after) +{ + /* Recovery blocks are always bounded by BARRIERS, + therefore, they always form single block EBB, + therefore, we can use rec->index to identify such EBBs. */ + if (after == EXIT_BLOCK_PTR) + bitmap_set_bit (&dont_calc_deps, bb->index); + else if (after == last_bb) + last_bb = bb; +} + +/* Return next block in ebb chain. For parameter meaning please refer to + sched-int.h: struct sched_info: advance_target_bb. */ +static basic_block +advance_target_bb (basic_block bb, rtx insn) +{ + if (insn) + { + if (BLOCK_FOR_INSN (insn) != bb + && control_flow_insn_p (insn) + /* We handle interblock movement of the speculation check + or over a speculation check in + haifa-sched.c: move_block_after_check (). */ + && !IS_SPECULATION_BRANCHY_CHECK_P (insn) + && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb))) + { + /* Assert that we don't move jumps across blocks. */ + gcc_assert (!control_flow_insn_p (BB_END (bb)) + && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb))); + return bb; + } + else + return 0; + } + else + /* Return next non empty block. */ + { + do + { + gcc_assert (bb != last_bb); + + bb = bb->next_bb; + } + while (bb_note (bb) == BB_END (bb)); + + return bb; + } +} + +/* Fix internal data after interblock movement of jump instruction. + For parameter meaning please refer to + sched-int.h: struct sched_info: fix_recovery_cfg. */ +static void +fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, int jump_bb_nexti) +{ + gcc_assert (last_bb->index != bbi); + + if (jump_bb_nexti == last_bb->index) + last_bb = BASIC_BLOCK (jump_bbi); +} + +#ifdef ENABLE_CHECKING +/* Return non zero, if BB is first or last (depending of LEAF_P) block in + current ebb. For more information please refer to + sched-int.h: struct sched_info: region_head_or_leaf_p. */ +static int +ebb_head_or_leaf_p (basic_block bb, int leaf_p) +{ + if (!leaf_p) + return bitmap_bit_p (&ebb_head, bb->index); + else + return bitmap_bit_p (&ebb_tail, bb->index); +} +#endif /* ENABLE_CHECKING */