+ first_bb = EBB_FIRST_BB (bb);
+ last_bb = EBB_LAST_BB (bb);
+
+ get_ebb_head_tail (first_bb, last_bb, &head, &tail);
+
+ if (no_real_insns_p (head, tail))
+ {
+ gcc_assert (first_bb == last_bb);
+ continue;
+ }
+
+ current_sched_info->prev_head = PREV_INSN (head);
+ current_sched_info->next_tail = NEXT_INSN (tail);
+
+ remove_notes (head, tail);
+
+ unlink_bb_notes (first_bb, last_bb);
+
+ target_bb = bb;
+
+ gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
+ current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
+
+ curr_bb = first_bb;
+ if (dbg_cnt (sched_block))
+ {
+ schedule_block (&curr_bb);
+ gcc_assert (EBB_FIRST_BB (bb) == first_bb);
+ sched_rgn_n_insns += sched_n_insns;
+ }
+ else
+ {
+ sched_rgn_n_insns += rgn_n_insns;
+ }
+
+ /* Clean up. */
+ if (current_nr_blocks > 1)
+ free_trg_info ();
+ }
+
+ /* Sanity check: verify that all region insns were scheduled. */
+ gcc_assert (sched_rgn_n_insns == rgn_n_insns);
+
+ sched_finish_ready_list ();
+
+ /* Done with this region. */
+ sched_rgn_local_finish ();
+
+ /* Free dependencies. */
+ for (bb = 0; bb < current_nr_blocks; ++bb)
+ free_block_dependencies (bb);
+
+ gcc_assert (haifa_recovery_bb_ever_added_p
+ || deps_pools_are_empty_p ());
+}
+
+/* Initialize data structures for region scheduling. */
+
+void
+sched_rgn_init (bool single_blocks_p)
+{
+ min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
+ / 100);
+
+ nr_inter = 0;
+ nr_spec = 0;
+
+ extend_regions ();
+
+ CONTAINING_RGN (ENTRY_BLOCK) = -1;
+ CONTAINING_RGN (EXIT_BLOCK) = -1;
+
+ /* Compute regions for scheduling. */
+ if (single_blocks_p
+ || n_basic_blocks == NUM_FIXED_BLOCKS + 1
+ || !flag_schedule_interblock
+ || is_cfg_nonregular ())
+ {
+ find_single_block_region (sel_sched_p ());
+ }
+ else
+ {
+ /* Compute the dominators and post dominators. */
+ if (!sel_sched_p ())
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ /* Find regions. */
+ find_rgns ();
+
+ if (sched_verbose >= 3)
+ debug_regions ();
+
+ /* For now. This will move as more and more of haifa is converted
+ to using the cfg code. */
+ if (!sel_sched_p ())
+ free_dominance_info (CDI_DOMINATORS);
+ }
+
+ gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks);
+
+ RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
+ RGN_NR_BLOCKS (nr_regions - 1));
+}
+
+/* Free data structures for region scheduling. */
+void
+sched_rgn_finish (void)
+{
+ /* Reposition the prologue and epilogue notes in case we moved the
+ prologue/epilogue insns. */
+ if (reload_completed)
+ reposition_prologue_and_epilogue_notes ();
+
+ if (sched_verbose)
+ {
+ if (reload_completed == 0
+ && flag_schedule_interblock)
+ {
+ fprintf (sched_dump,
+ "\n;; Procedure interblock/speculative motions == %d/%d \n",
+ nr_inter, nr_spec);
+ }
+ else
+ gcc_assert (nr_inter <= 0);
+ fprintf (sched_dump, "\n\n");
+ }
+
+ nr_regions = 0;
+
+ free (rgn_table);
+ rgn_table = NULL;
+
+ free (rgn_bb_table);
+ rgn_bb_table = NULL;
+
+ free (block_to_bb);
+ block_to_bb = NULL;
+
+ free (containing_rgn);
+ containing_rgn = NULL;
+
+ free (ebb_head);
+ ebb_head = NULL;
+}
+
+/* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
+ point to the region RGN. */
+void
+rgn_setup_region (int rgn)
+{
+ int bb;
+
+ /* Set variables for the current region. */
+ current_nr_blocks = RGN_NR_BLOCKS (rgn);
+ current_blocks = RGN_BLOCKS (rgn);
+
+ /* EBB_HEAD is a region-scope structure. But we realloc it for
+ each region to save time/memory/something else.
+ See comments in add_block1, for what reasons we allocate +1 element. */
+ ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
+ for (bb = 0; bb <= current_nr_blocks; bb++)
+ ebb_head[bb] = current_blocks + bb;
+}
+
+/* Compute instruction dependencies in region RGN. */
+void
+sched_rgn_compute_dependencies (int rgn)
+{
+ if (!RGN_DONT_CALC_DEPS (rgn))
+ {
+ int bb;
+
+ if (sel_sched_p ())
+ sched_emulate_haifa_p = 1;
+
+ init_deps_global ();
+
+ /* Initializations for region data dependence analysis. */
+ bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ init_deps (bb_deps + bb, false);
+
+ /* Initialize bitmap used in add_branch_dependences. */
+ insn_referenced = sbitmap_alloc (sched_max_luid);
+ sbitmap_zero (insn_referenced);
+
+ /* Compute backward dependencies. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_block_dependences (bb);
+
+ sbitmap_free (insn_referenced);
+ free_pending_lists ();
+ finish_deps_global ();
+ free (bb_deps);
+
+ /* We don't want to recalculate this twice. */
+ RGN_DONT_CALC_DEPS (rgn) = 1;
+
+ if (sel_sched_p ())
+ sched_emulate_haifa_p = 0;