+ init_deps_global ();
+
+ /* Initializations for region data dependence analysis. */
+ bb_deps = XNEWVEC (struct deps, current_nr_blocks);
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ init_deps (bb_deps + bb, false);
+
+ /* Initialize bitmap used in add_branch_dependences. */
+ insn_referenced = sbitmap_alloc (sched_max_luid);
+ sbitmap_zero (insn_referenced);
+
+ /* Compute backward dependencies. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_block_dependences (bb);
+
+ sbitmap_free (insn_referenced);
+ free_pending_lists ();
+ finish_deps_global ();
+ free (bb_deps);
+
+ /* We don't want to recalculate this twice. */
+ RGN_DONT_CALC_DEPS (rgn) = 1;
+
+ if (sel_sched_p ())
+ sched_emulate_haifa_p = 0;
+ }
+ else
+ /* (This is a recovery block. It is always a single block region.)
+ OR (We use selective scheduling.) */
+ gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
+}
+
+/* Init region data structures. Returns true if this region should
+ not be scheduled. */
+void
+sched_rgn_local_init (int rgn)
+{
+ int bb;
+
+ /* Compute interblock info: probabilities, split-edges, dominators, etc. */
+ if (current_nr_blocks > 1)
+ {
+ basic_block block;
+ edge e;
+ edge_iterator ei;
+
+ prob = XNEWVEC (int, current_nr_blocks);
+
+ dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
+ sbitmap_vector_zero (dom, current_nr_blocks);
+
+ /* Use ->aux to implement EDGE_TO_BIT mapping. */
+ rgn_nr_edges = 0;
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ SET_EDGE_TO_BIT (e, rgn_nr_edges++);
+ }
+
+ rgn_edges = XNEWVEC (edge, rgn_nr_edges);
+ rgn_nr_edges = 0;
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ rgn_edges[rgn_nr_edges++] = e;
+ }
+
+ /* Split edges. */
+ pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (pot_split, current_nr_blocks);
+ ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
+ sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
+
+ /* Compute probabilities, dominators, split_edges. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_dom_prob_ps (bb);
+
+ /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
+ /* We don't need them anymore. But we want to avoid duplication of
+ aux fields in the newly created edges. */
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ e->aux = NULL;
+ }
+ }
+}
+
+/* Free data computed for the finished region. */
+void
+sched_rgn_local_free (void)
+{
+ free (prob);
+ sbitmap_vector_free (dom);
+ sbitmap_vector_free (pot_split);
+ sbitmap_vector_free (ancestor_edges);
+ free (rgn_edges);
+}
+
+/* Free data computed for the finished region. */
+void
+sched_rgn_local_finish (void)
+{
+ if (current_nr_blocks > 1 && !sel_sched_p ())
+ {
+ sched_rgn_local_free ();
+ }
+}
+
+/* Setup scheduler infos. */
+void
+rgn_setup_common_sched_info (void)
+{
+ memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
+ sizeof (rgn_common_sched_info));
+
+ rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
+ rgn_common_sched_info.add_block = rgn_add_block;
+ rgn_common_sched_info.estimate_number_of_insns
+ = rgn_estimate_number_of_insns;
+ rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
+
+ common_sched_info = &rgn_common_sched_info;
+}
+
+/* Setup all *_sched_info structures (for the Haifa frontend
+ and for the dependence analysis) in the interblock scheduler. */
+void
+rgn_setup_sched_infos (void)
+{
+ if (!sel_sched_p ())
+ memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
+ sizeof (rgn_sched_deps_info));
+ else
+ memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
+ sizeof (rgn_sched_deps_info));
+
+ sched_deps_info = &rgn_sched_deps_info;
+
+ memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
+ current_sched_info = &rgn_sched_info;
+}
+
+/* The one entry point in this file. */
+void
+schedule_insns (void)
+{
+ int rgn;
+
+ /* Taking care of this degenerate case makes the rest of
+ this code simpler. */
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ return;
+
+ rgn_setup_common_sched_info ();
+ rgn_setup_sched_infos ();
+
+ haifa_sched_init ();
+ sched_rgn_init (reload_completed);
+
+ bitmap_initialize (¬_in_df, 0);
+ bitmap_clear (¬_in_df);
+
+ /* Schedule every region in the subroutine. */
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ if (dbg_cnt (sched_region))
+ schedule_region (rgn);
+
+ /* Clean up. */
+ sched_rgn_finish ();