+ containing_rgn = NULL;
+
+ free (ebb_head);
+ ebb_head = NULL;
+}
+
+/* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
+ point to the region RGN. */
+void
+rgn_setup_region (int rgn)
+{
+ int bb;
+
+ /* Set variables for the current region. */
+ current_nr_blocks = RGN_NR_BLOCKS (rgn);
+ current_blocks = RGN_BLOCKS (rgn);
+
+ /* EBB_HEAD is a region-scope structure. But we realloc it for
+ each region to save time/memory/something else.
+ See comments in add_block1, for what reasons we allocate +1 element. */
+ ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
+ for (bb = 0; bb <= current_nr_blocks; bb++)
+ ebb_head[bb] = current_blocks + bb;
+}
+
+/* Compute instruction dependencies in region RGN. */
+void
+sched_rgn_compute_dependencies (int rgn)
+{
+ if (!RGN_DONT_CALC_DEPS (rgn))
+ {
+ int bb;
+
+ if (sel_sched_p ())
+ sched_emulate_haifa_p = 1;
+
+ init_deps_global ();
+
+ /* Initializations for region data dependence analysis. */
+ bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ init_deps (bb_deps + bb, false);
+
+ /* Initialize bitmap used in add_branch_dependences. */
+ insn_referenced = sbitmap_alloc (sched_max_luid);
+ sbitmap_zero (insn_referenced);
+
+ /* Compute backward dependencies. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_block_dependences (bb);
+
+ sbitmap_free (insn_referenced);
+ free_pending_lists ();
+ finish_deps_global ();
+ free (bb_deps);
+
+ /* We don't want to recalculate this twice. */
+ RGN_DONT_CALC_DEPS (rgn) = 1;
+
+ if (sel_sched_p ())
+ sched_emulate_haifa_p = 0;
+ }
+ else
+ /* (This is a recovery block. It is always a single block region.)
+ OR (We use selective scheduling.) */
+ gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
+}
+
+/* Init region data structures. Returns true if this region should
+ not be scheduled. */
+void
+sched_rgn_local_init (int rgn)
+{
+ int bb;
+
+ /* Compute interblock info: probabilities, split-edges, dominators, etc. */
+ if (current_nr_blocks > 1)
+ {
+ basic_block block;
+ edge e;
+ edge_iterator ei;
+
+ prob = XNEWVEC (int, current_nr_blocks);
+
+ dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
+ sbitmap_vector_zero (dom, current_nr_blocks);
+
+ /* Use ->aux to implement EDGE_TO_BIT mapping. */
+ rgn_nr_edges = 0;
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ SET_EDGE_TO_BIT (e, rgn_nr_edges++);
+ }
+
+ rgn_edges = XNEWVEC (edge, rgn_nr_edges);
+ rgn_nr_edges = 0;
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ rgn_edges[rgn_nr_edges++] = e;
+ }
+
+ /* Split edges. */
+ pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (pot_split, current_nr_blocks);
+ ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
+
+ /* Compute probabilities, dominators, split_edges. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_dom_prob_ps (bb);
+
+ /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
+ /* We don't need them anymore. But we want to avoid duplication of
+ aux fields in the newly created edges. */
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ e->aux = NULL;
+ }
+ }
+}
+
+/* Free data computed for the finished region. */
+void
+sched_rgn_local_free (void)
+{
+ free (prob);
+ sbitmap_vector_free (dom);
+ sbitmap_vector_free (pot_split);
+ sbitmap_vector_free (ancestor_edges);
+ free (rgn_edges);
+}
+
+/* Free data computed for the finished region. */
+void
+sched_rgn_local_finish (void)
+{
+ if (current_nr_blocks > 1 && !sel_sched_p ())
+ {
+ sched_rgn_local_free ();
+ }
+}
+
+/* Setup scheduler infos. */
+void
+rgn_setup_common_sched_info (void)
+{
+ memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
+ sizeof (rgn_common_sched_info));
+
+ rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
+ rgn_common_sched_info.add_block = rgn_add_block;
+ rgn_common_sched_info.estimate_number_of_insns
+ = rgn_estimate_number_of_insns;
+ rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
+
+ common_sched_info = &rgn_common_sched_info;
+}
+
+/* Setup all *_sched_info structures (for the Haifa frontend
+ and for the dependence analysis) in the interblock scheduler. */
+void
+rgn_setup_sched_infos (void)
+{
+ if (!sel_sched_p ())
+ memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
+ sizeof (rgn_sched_deps_info));
+ else
+ memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
+ sizeof (rgn_sched_deps_info));