+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ return 0;
+
+ df_set_flags (DF_DEFER_INSN_RESCAN);
+
+ crossing_edges = find_rarely_executed_basic_blocks_and_crossing_edges ();
+ if (crossing_edges == NULL)
+ return 0;
+
+ /* Make sure the source of any crossing edge ends in a jump and the
+ destination of any crossing edge has a label. */
+ add_labels_and_missing_jumps (crossing_edges);
+
+ /* Convert all crossing fall_thru edges to non-crossing fall
+ thrus to unconditional jumps (that jump to the original fall
+ thru dest). */
+ fix_up_fall_thru_edges ();
+
+ /* If the architecture does not have conditional branches that can
+ span all of memory, convert crossing conditional branches into
+ crossing unconditional branches. */
+ if (!HAS_LONG_COND_BRANCH)
+ fix_crossing_conditional_branches ();
+
+ /* If the architecture does not have unconditional branches that
+ can span all of memory, convert crossing unconditional branches
+ into indirect jumps. Since adding an indirect jump also adds
+ a new register usage, update the register usage information as
+ well. */
+ if (!HAS_LONG_UNCOND_BRANCH)
+ fix_crossing_unconditional_branches ();
+
+ add_reg_crossing_jump_notes ();
+
+ /* Clear bb->aux fields that the above routines were using. */
+ clear_aux_for_blocks ();
+
+ VEC_free (edge, heap, crossing_edges);
+
+ /* ??? FIXME: DF generates the bb info for a block immediately.
+ And by immediately, I mean *during* creation of the block.
+
+ #0 df_bb_refs_collect
+ #1 in df_bb_refs_record
+ #2 in create_basic_block_structure
+
+ Which means that the bb_has_eh_pred test in df_bb_refs_collect
+ will *always* fail, because no edges can have been added to the
+ block yet. Which of course means we don't add the right
+ artificial refs, which means we fail df_verify (much) later.
+
+ Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply
+ that we also shouldn't grab data from the new blocks those new
+ insns are in either. In this way one can create the block, link
+ it up properly, and have everything Just Work later, when deferred
+ insns are processed.
+
+ In the meantime, we have no other option but to throw away all
+ of the DF data and recompute it all. */
+ if (cfun->eh->lp_array)
+ {
+ df_finish_pass (true);
+ df_scan_alloc (NULL);
+ df_scan_blocks ();
+ /* Not all post-landing pads use all of the EH_RETURN_DATA_REGNO
+ data. We blindly generated all of them when creating the new
+ landing pad. Delete those assignments we don't use. */
+ df_set_flags (DF_LR_RUN_DCE);
+ df_analyze ();
+ }
+
+ return TODO_verify_flow | TODO_verify_rtl_sharing;
+}
+\f
+static bool
+gate_handle_reorder_blocks (void)
+{
+ if (targetm.cannot_modify_jumps_p ())
+ return false;
+ /* Don't reorder blocks when optimizing for size because extra jump insns may
+ be created; also barrier may create extra padding.
+
+ More correctly we should have a block reordering mode that tried to
+ minimize the combined size of all the jumps. This would more or less
+ automatically remove extra jumps, but would also try to use more short
+ jumps instead of long jumps. */
+ if (!optimize_function_for_speed_p (cfun))
+ return false;
+ return (optimize > 0
+ && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
+}
+
+
+/* Reorder basic blocks. */
+static unsigned int
+rest_of_handle_reorder_blocks (void)
+{
+ basic_block bb;
+
+ /* Last attempt to optimize CFG, as scheduling, peepholing and insn
+ splitting possibly introduced more crossjumping opportunities. */
+ cfg_layout_initialize (CLEANUP_EXPENSIVE);
+
+ reorder_basic_blocks ();
+ cleanup_cfg (CLEANUP_EXPENSIVE);
+
+ FOR_EACH_BB (bb)
+ if (bb->next_bb != EXIT_BLOCK_PTR)
+ bb->aux = bb->next_bb;
+ cfg_layout_finalize ();
+
+ /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
+ insert_section_boundary_note ();
+ return 0;
+}
+
+struct rtl_opt_pass pass_reorder_blocks =
+{
+ {
+ RTL_PASS,
+ "bbro", /* name */
+ gate_handle_reorder_blocks, /* gate */
+ rest_of_handle_reorder_blocks, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_REORDER_BLOCKS, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_verify_rtl_sharing, /* todo_flags_finish */
+ }
+};
+
+static bool
+gate_handle_partition_blocks (void)
+{
+ /* The optimization to partition hot/cold basic blocks into separate
+ sections of the .o file does not work well with linkonce or with
+ user defined section attributes. Don't call it if either case
+ arises. */
+ return (flag_reorder_blocks_and_partition
+ && optimize
+ /* See gate_handle_reorder_blocks. We should not partition if
+ we are going to omit the reordering. */
+ && optimize_function_for_speed_p (cfun)
+ && !DECL_ONE_ONLY (current_function_decl)
+ && !user_defined_section_attribute);