/* Basic block reordering routines for the GNU compiler.
- Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010
+ Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
#include "toplev.h" /* user_defined_section_attribute */
#include "tree-pass.h"
#include "df.h"
+#include "bb-reorder.h"
+#include "except.h"
/* The number of rounds. In most cases there will only be 4 rounds, but
when partitioning hot and cold basic blocks into separate sections of
#endif
+struct target_bb_reorder default_target_bb_reorder;
+#if SWITCHABLE_TARGET
+struct target_bb_reorder *this_target_bb_reorder = &default_target_bb_reorder;
+#endif
+
+#define uncond_jump_length \
+ (this_target_bb_reorder->x_uncond_jump_length)
+
/* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
block the edge destination is not duplicated while connecting traces. */
#define DUPLICATION_THRESHOLD 100
-/* Length of unconditional jump instruction. */
-static int uncond_jump_length;
-
/* Structure to hold needed information for each basic block. */
typedef struct bbro_basic_block_data_def
{
static bool copy_bb_p (const_basic_block, int);
static int get_uncond_jump_length (void);
static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type);
-static void find_rarely_executed_basic_blocks_and_crossing_edges (edge **,
- int *,
- int *);
-static void add_labels_and_missing_jumps (edge *, int);
-static void add_reg_crossing_jump_notes (void);
-static void fix_up_fall_thru_edges (void);
-static void fix_edges_for_rarely_executed_code (edge *, int);
-static void fix_crossing_conditional_branches (void);
-static void fix_crossing_unconditional_branches (void);
\f
/* Check to see if bb should be pushed into the next round of trace
collections or not. Reasons for pushing the block forward are 1).
return length;
}
+/* Emit a barrier into the footer of BB. */
+
+static void
+emit_barrier_after_bb (basic_block bb)
+{
+ rtx barrier = emit_barrier_after (BB_END (bb));
+ bb->il.rtl->footer = unlink_insn_chain (barrier, barrier);
+}
+
+/* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
+ Duplicate the landing pad and split the edges so that no EH edge
+ crosses partitions. */
+
+static void
+fix_up_crossing_landing_pad (eh_landing_pad old_lp, basic_block old_bb)
+{
+ eh_landing_pad new_lp;
+ basic_block new_bb, last_bb, post_bb;
+ rtx new_label, jump, post_label;
+ unsigned new_partition;
+ edge_iterator ei;
+ edge e;
+
+ /* Generate the new landing-pad structure. */
+ new_lp = gen_eh_landing_pad (old_lp->region);
+ new_lp->post_landing_pad = old_lp->post_landing_pad;
+ new_lp->landing_pad = gen_label_rtx ();
+ LABEL_PRESERVE_P (new_lp->landing_pad) = 1;
+
+ /* Put appropriate instructions in new bb. */
+ new_label = emit_label (new_lp->landing_pad);
+
+ expand_dw2_landing_pad_for_region (old_lp->region);
+
+ post_bb = BLOCK_FOR_INSN (old_lp->landing_pad);
+ post_bb = single_succ (post_bb);
+ post_label = block_label (post_bb);
+ jump = emit_jump_insn (gen_jump (post_label));
+ JUMP_LABEL (jump) = post_label;
+
+ /* Create new basic block to be dest for lp. */
+ last_bb = EXIT_BLOCK_PTR->prev_bb;
+ new_bb = create_basic_block (new_label, jump, last_bb);
+ new_bb->aux = last_bb->aux;
+ last_bb->aux = new_bb;
+
+ emit_barrier_after_bb (new_bb);
+
+ make_edge (new_bb, post_bb, 0);
+
+ /* Make sure new bb is in the other partition. */
+ new_partition = BB_PARTITION (old_bb);
+ new_partition ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
+ BB_SET_PARTITION (new_bb, new_partition);
+
+ /* Fix up the edges. */
+ for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)) != NULL; )
+ if (BB_PARTITION (e->src) == new_partition)
+ {
+ rtx insn = BB_END (e->src);
+ rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
+
+ gcc_assert (note != NULL);
+ gcc_checking_assert (INTVAL (XEXP (note, 0)) == old_lp->index);
+ XEXP (note, 0) = GEN_INT (new_lp->index);
+
+ /* Adjust the edge to the new destination. */
+ redirect_edge_succ (e, new_bb);
+ }
+ else
+ ei_next (&ei);
+}
+
/* Find the basic blocks that are rarely executed and need to be moved to
a separate section of the .o file (to cut down on paging and improve
- cache locality). */
+ cache locality). Return a vector of all edges that cross. */
-static void
-find_rarely_executed_basic_blocks_and_crossing_edges (edge **crossing_edges,
- int *n_crossing_edges,
- int *max_idx)
+static VEC(edge, heap) *
+find_rarely_executed_basic_blocks_and_crossing_edges (void)
{
+ VEC(edge, heap) *crossing_edges = NULL;
basic_block bb;
edge e;
- int i;
edge_iterator ei;
/* Mark which partition (hot/cold) each basic block belongs in. */
-
FOR_EACH_BB (bb)
{
if (probably_never_executed_bb_p (bb))
BB_SET_PARTITION (bb, BB_HOT_PARTITION);
}
- /* Mark every edge that crosses between sections. */
-
- i = 0;
- FOR_EACH_BB (bb)
- FOR_EACH_EDGE (e, ei, bb->succs)
+ /* The format of .gcc_except_table does not allow landing pads to
+ be in a different partition as the throw. Fix this by either
+ moving or duplicating the landing pads. */
+ if (cfun->eh->lp_array)
{
- if (e->src != ENTRY_BLOCK_PTR
- && e->dest != EXIT_BLOCK_PTR
- && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
+ unsigned i;
+ eh_landing_pad lp;
+
+ FOR_EACH_VEC_ELT (eh_landing_pad, cfun->eh->lp_array, i, lp)
{
- e->flags |= EDGE_CROSSING;
- if (i == *max_idx)
+ bool all_same, all_diff;
+
+ if (lp == NULL
+ || lp->landing_pad == NULL_RTX
+ || !LABEL_P (lp->landing_pad))
+ continue;
+
+ all_same = all_diff = true;
+ bb = BLOCK_FOR_INSN (lp->landing_pad);
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
- *max_idx *= 2;
- *crossing_edges = XRESIZEVEC (edge, *crossing_edges, *max_idx);
+ gcc_assert (e->flags & EDGE_EH);
+ if (BB_PARTITION (bb) == BB_PARTITION (e->src))
+ all_diff = false;
+ else
+ all_same = false;
}
- (*crossing_edges)[i++] = e;
+
+ if (all_same)
+ ;
+ else if (all_diff)
+ {
+ int which = BB_PARTITION (bb);
+ which ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
+ BB_SET_PARTITION (bb, which);
+ }
+ else
+ fix_up_crossing_landing_pad (lp, bb);
}
- else
- e->flags &= ~EDGE_CROSSING;
}
- *n_crossing_edges = i;
+
+ /* Mark every edge that crosses between sections. */
+
+ FOR_EACH_BB (bb)
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ unsigned int flags = e->flags;
+
+ /* We should never have EDGE_CROSSING set yet. */
+ gcc_checking_assert ((flags & EDGE_CROSSING) == 0);
+
+ if (e->src != ENTRY_BLOCK_PTR
+ && e->dest != EXIT_BLOCK_PTR
+ && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
+ {
+ VEC_safe_push (edge, heap, crossing_edges, e);
+ flags |= EDGE_CROSSING;
+ }
+
+ /* Now that we've split eh edges as appropriate, allow landing pads
+ to be merged with the post-landing pads. */
+ flags &= ~EDGE_PRESERVE;
+
+ e->flags = flags;
+ }
+
+ return crossing_edges;
}
/* If any destination of a crossing edge does not have a label, add label;
- Convert any fall-through crossing edges (for blocks that do not contain
- a jump) to unconditional jumps. */
+ Convert any easy fall-through crossing edges to unconditional jumps. */
static void
-add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
+add_labels_and_missing_jumps (VEC(edge, heap) *crossing_edges)
{
- int i;
- basic_block src;
- basic_block dest;
- rtx label;
- rtx barrier;
- rtx new_jump;
+ size_t i;
+ edge e;
- for (i=0; i < n_crossing_edges; i++)
+ FOR_EACH_VEC_ELT (edge, crossing_edges, i, e)
{
- if (crossing_edges[i])
- {
- src = crossing_edges[i]->src;
- dest = crossing_edges[i]->dest;
+ basic_block src = e->src;
+ basic_block dest = e->dest;
+ rtx label, new_jump;
- /* Make sure dest has a label. */
+ if (dest == EXIT_BLOCK_PTR)
+ continue;
- if (dest && (dest != EXIT_BLOCK_PTR))
- {
- label = block_label (dest);
+ /* Make sure dest has a label. */
+ label = block_label (dest);
- /* Make sure source block ends with a jump. If the
- source block does not end with a jump it might end
- with a call_insn; this case will be handled in
- fix_up_fall_thru_edges function. */
+ /* Nothing to do for non-fallthru edges. */
+ if (src == ENTRY_BLOCK_PTR)
+ continue;
+ if ((e->flags & EDGE_FALLTHRU) == 0)
+ continue;
- if (src && (src != ENTRY_BLOCK_PTR))
- {
- if (!JUMP_P (BB_END (src)) && !block_ends_with_call_p (src))
- /* bb just falls through. */
- {
- /* make sure there's only one successor */
- gcc_assert (single_succ_p (src));
-
- /* Find label in dest block. */
- label = block_label (dest);
-
- new_jump = emit_jump_insn_after (gen_jump (label),
- BB_END (src));
- barrier = emit_barrier_after (new_jump);
- JUMP_LABEL (new_jump) = label;
- LABEL_NUSES (label) += 1;
- src->il.rtl->footer = unlink_insn_chain (barrier, barrier);
- /* Mark edge as non-fallthru. */
- crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
- } /* end: 'if (GET_CODE ... ' */
- } /* end: 'if (src && src->index...' */
- } /* end: 'if (dest && dest->index...' */
- } /* end: 'if (crossing_edges[i]...' */
- } /* end for loop */
+ /* If the block does not end with a control flow insn, then we
+ can trivially add a jump to the end to fixup the crossing.
+ Otherwise the jump will have to go in a new bb, which will
+ be handled by fix_up_fall_thru_edges function. */
+ if (control_flow_insn_p (BB_END (src)))
+ continue;
+
+ /* Make sure there's only one successor. */
+ gcc_assert (single_succ_p (src));
+
+ new_jump = emit_jump_insn_after (gen_jump (label), BB_END (src));
+ BB_END (src) = new_jump;
+ JUMP_LABEL (new_jump) = label;
+ LABEL_NUSES (label) += 1;
+
+ emit_barrier_after_bb (src);
+
+ /* Mark edge as non-fallthru. */
+ e->flags &= ~EDGE_FALLTHRU;
+ }
}
/* Find any bb's where the fall-through edge is a crossing edge (note that
int invert_worked;
rtx old_jump;
rtx fall_thru_label;
- rtx barrier;
FOR_EACH_BB (cur_bb)
{
fall_thru = succ2;
cond_jump = succ1;
}
- else if (!fall_thru && succ1 && block_ends_with_call_p (cur_bb))
- {
- edge e;
- edge_iterator ei;
-
- /* Find EDGE_CAN_FALLTHRU edge. */
- FOR_EACH_EDGE (e, ei, cur_bb->succs)
- if (e->flags & EDGE_CAN_FALLTHRU)
- {
- fall_thru = e;
- break;
- }
- }
+ else if (succ1
+ && (block_ends_with_call_p (cur_bb)
+ || can_throw_internal (BB_END (cur_bb))))
+ {
+ edge e;
+ edge_iterator ei;
+
+ /* Find EDGE_CAN_FALLTHRU edge. */
+ FOR_EACH_EDGE (e, ei, cur_bb->succs)
+ if (e->flags & EDGE_CAN_FALLTHRU)
+ {
+ fall_thru = e;
+ break;
+ }
+ }
if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
{
}
/* Add barrier after new jump */
-
- if (new_bb)
- {
- barrier = emit_barrier_after (BB_END (new_bb));
- new_bb->il.rtl->footer = unlink_insn_chain (barrier,
- barrier);
- }
- else
- {
- barrier = emit_barrier_after (BB_END (cur_bb));
- cur_bb->il.rtl->footer = unlink_insn_chain (barrier,
- barrier);
- }
+ emit_barrier_after_bb (new_bb ? new_bb : cur_bb);
}
}
}
{
basic_block cur_bb;
basic_block new_bb;
- basic_block last_bb;
basic_block dest;
edge succ1;
edge succ2;
rtx set_src;
rtx old_label = NULL_RTX;
rtx new_label;
- rtx new_jump;
- rtx barrier;
-
- last_bb = EXIT_BLOCK_PTR->prev_bb;
FOR_EACH_BB (cur_bb)
{
new_label = block_label (new_bb);
else
{
+ basic_block last_bb;
+ rtx new_jump;
+
/* Create new basic block to be dest for
conditional jump. */
- new_bb = create_basic_block (NULL, NULL, last_bb);
- new_bb->aux = last_bb->aux;
- last_bb->aux = new_bb;
- last_bb = new_bb;
/* Put appropriate instructions in new bb. */
new_label = gen_label_rtx ();
- emit_label_before (new_label, BB_HEAD (new_bb));
- BB_HEAD (new_bb) = new_label;
-
- if (GET_CODE (old_label) == LABEL_REF)
- {
- old_label = JUMP_LABEL (old_jump);
- new_jump = emit_jump_insn_after (gen_jump
- (old_label),
- BB_END (new_bb));
- }
- else
- {
- gcc_assert (HAVE_return
- && GET_CODE (old_label) == RETURN);
- new_jump = emit_jump_insn_after (gen_return (),
- BB_END (new_bb));
- }
+ emit_label (new_label);
- barrier = emit_barrier_after (new_jump);
+ gcc_assert (GET_CODE (old_label) == LABEL_REF);
+ old_label = JUMP_LABEL (old_jump);
+ new_jump = emit_jump_insn (gen_jump (old_label));
JUMP_LABEL (new_jump) = old_label;
- new_bb->il.rtl->footer = unlink_insn_chain (barrier,
- barrier);
+
+ last_bb = EXIT_BLOCK_PTR->prev_bb;
+ new_bb = create_basic_block (new_label, new_jump, last_bb);
+ new_bb->aux = last_bb->aux;
+ last_bb->aux = new_bb;
+
+ emit_barrier_after_bb (new_bb);
/* Make sure new bb is in same partition as source
of conditional branch. */
add_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
}
-/* Hot and cold basic blocks are partitioned and put in separate
- sections of the .o file, to reduce paging and improve cache
- performance (hopefully). This can result in bits of code from the
- same function being widely separated in the .o file. However this
- is not obvious to the current bb structure. Therefore we must take
- care to ensure that: 1). There are no fall_thru edges that cross
- between sections; 2). For those architectures which have "short"
- conditional branches, all conditional branches that attempt to
- cross between sections are converted to unconditional branches;
- and, 3). For those architectures which have "short" unconditional
- branches, all unconditional branches that attempt to cross between
- sections are converted to indirect jumps.
-
- The code for fixing up fall_thru edges that cross between hot and
- cold basic blocks does so by creating new basic blocks containing
- unconditional branches to the appropriate label in the "other"
- section. The new basic block is then put in the same (hot or cold)
- section as the original conditional branch, and the fall_thru edge
- is modified to fall into the new basic block instead. By adding
- this level of indirection we end up with only unconditional branches
- crossing between hot and cold sections.
-
- Conditional branches are dealt with by adding a level of indirection.
- A new basic block is added in the same (hot/cold) section as the
- conditional branch, and the conditional branch is retargeted to the
- new basic block. The new basic block contains an unconditional branch
- to the original target of the conditional branch (in the other section).
-
- Unconditional branches are dealt with by converting them into
- indirect jumps. */
-
-static void
-fix_edges_for_rarely_executed_code (edge *crossing_edges,
- int n_crossing_edges)
-{
- /* Make sure the source of any crossing edge ends in a jump and the
- destination of any crossing edge has a label. */
-
- add_labels_and_missing_jumps (crossing_edges, n_crossing_edges);
-
- /* Convert all crossing fall_thru edges to non-crossing fall
- thrus to unconditional jumps (that jump to the original fall
- thru dest). */
-
- fix_up_fall_thru_edges ();
-
- /* If the architecture does not have conditional branches that can
- span all of memory, convert crossing conditional branches into
- crossing unconditional branches. */
-
- if (!HAS_LONG_COND_BRANCH)
- fix_crossing_conditional_branches ();
-
- /* If the architecture does not have unconditional branches that
- can span all of memory, convert crossing unconditional branches
- into indirect jumps. Since adding an indirect jump also adds
- a new register usage, update the register usage information as
- well. */
-
- if (!HAS_LONG_UNCOND_BRANCH)
- fix_crossing_unconditional_branches ();
-
- add_reg_crossing_jump_notes ();
-}
-
/* Verify, in the basic block chain, that there is at most one switch
between hot/cold partitions. This is modelled on
rtl_verify_flow_info_1, but it cannot go inside that function
rtx new_note;
int first_partition = 0;
- if (flag_reorder_blocks_and_partition)
- FOR_EACH_BB (bb)
+ if (!flag_reorder_blocks_and_partition)
+ return;
+
+ FOR_EACH_BB (bb)
{
if (!first_partition)
first_partition = BB_PARTITION (bb);
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
+ TODO_verify_rtl_sharing,/* todo_flags_finish */
}
};
if we could perform this optimization later in the compilation, but
unfortunately the fact that we may need to create indirect jumps
(through registers) requires that this optimization be performed
- before register allocation. */
+ before register allocation.
-static void
+ Hot and cold basic blocks are partitioned and put in separate
+ sections of the .o file, to reduce paging and improve cache
+ performance (hopefully). This can result in bits of code from the
+ same function being widely separated in the .o file. However this
+ is not obvious to the current bb structure. Therefore we must take
+ care to ensure that: 1). There are no fall_thru edges that cross
+ between sections; 2). For those architectures which have "short"
+ conditional branches, all conditional branches that attempt to
+ cross between sections are converted to unconditional branches;
+ and, 3). For those architectures which have "short" unconditional
+ branches, all unconditional branches that attempt to cross between
+ sections are converted to indirect jumps.
+
+ The code for fixing up fall_thru edges that cross between hot and
+ cold basic blocks does so by creating new basic blocks containing
+ unconditional branches to the appropriate label in the "other"
+ section. The new basic block is then put in the same (hot or cold)
+ section as the original conditional branch, and the fall_thru edge
+ is modified to fall into the new basic block instead. By adding
+ this level of indirection we end up with only unconditional branches
+ crossing between hot and cold sections.
+
+ Conditional branches are dealt with by adding a level of indirection.
+ A new basic block is added in the same (hot/cold) section as the
+ conditional branch, and the conditional branch is retargeted to the
+ new basic block. The new basic block contains an unconditional branch
+ to the original target of the conditional branch (in the other section).
+
+ Unconditional branches are dealt with by converting them into
+ indirect jumps. */
+
+static unsigned
partition_hot_cold_basic_blocks (void)
{
- edge *crossing_edges;
- int n_crossing_edges;
- int max_edges = 2 * last_basic_block;
+ VEC(edge, heap) *crossing_edges;
if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
- return;
+ return 0;
- crossing_edges = XCNEWVEC (edge, max_edges);
+ df_set_flags (DF_DEFER_INSN_RESCAN);
- find_rarely_executed_basic_blocks_and_crossing_edges (&crossing_edges,
- &n_crossing_edges,
- &max_edges);
+ crossing_edges = find_rarely_executed_basic_blocks_and_crossing_edges ();
+ if (crossing_edges == NULL)
+ return 0;
- if (n_crossing_edges > 0)
- fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
+ /* Make sure the source of any crossing edge ends in a jump and the
+ destination of any crossing edge has a label. */
+ add_labels_and_missing_jumps (crossing_edges);
- free (crossing_edges);
+ /* Convert all crossing fall_thru edges to non-crossing fall
+ thrus to unconditional jumps (that jump to the original fall
+ thru dest). */
+ fix_up_fall_thru_edges ();
+
+ /* If the architecture does not have conditional branches that can
+ span all of memory, convert crossing conditional branches into
+ crossing unconditional branches. */
+ if (!HAS_LONG_COND_BRANCH)
+ fix_crossing_conditional_branches ();
+
+ /* If the architecture does not have unconditional branches that
+ can span all of memory, convert crossing unconditional branches
+ into indirect jumps. Since adding an indirect jump also adds
+ a new register usage, update the register usage information as
+ well. */
+ if (!HAS_LONG_UNCOND_BRANCH)
+ fix_crossing_unconditional_branches ();
+
+ add_reg_crossing_jump_notes ();
+
+ VEC_free (edge, heap, crossing_edges);
+
+ /* ??? FIXME: DF generates the bb info for a block immediately.
+ And by immediately, I mean *during* creation of the block.
+
+ #0 df_bb_refs_collect
+ #1 in df_bb_refs_record
+ #2 in create_basic_block_structure
+
+ Which means that the bb_has_eh_pred test in df_bb_refs_collect
+ will *always* fail, because no edges can have been added to the
+ block yet. Which of course means we don't add the right
+ artificial refs, which means we fail df_verify (much) later.
+
+ Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply
+ that we also shouldn't grab data from the new blocks those new
+ insns are in either. In this way one can create the block, link
+ it up properly, and have everything Just Work later, when deferred
+ insns are processed.
+
+ In the meantime, we have no other option but to throw away all
+ of the DF data and recompute it all. */
+ if (cfun->eh->lp_array)
+ {
+ df_finish_pass (true);
+ df_scan_alloc (NULL);
+ df_scan_blocks ();
+ /* Not all post-landing pads use all of the EH_RETURN_DATA_REGNO
+ data. We blindly generated all of them when creating the new
+ landing pad. Delete those assignments we don't use. */
+ df_set_flags (DF_LR_RUN_DCE);
+ df_analyze ();
+ }
+
+ return TODO_verify_flow | TODO_verify_rtl_sharing;
}
\f
static bool
{
if (targetm.cannot_modify_jumps_p ())
return false;
- return (optimize > 0);
+ /* Don't reorder blocks when optimizing for size because extra jump insns may
+ be created; also barrier may create extra padding.
+
+ More correctly we should have a block reordering mode that tried to
+ minimize the combined size of all the jumps. This would more or less
+ automatically remove extra jumps, but would also try to use more short
+ jumps instead of long jumps. */
+ if (!optimize_function_for_speed_p (cfun))
+ return false;
+ return (optimize > 0
+ && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
}
splitting possibly introduced more crossjumping opportunities. */
cfg_layout_initialize (CLEANUP_EXPENSIVE);
- if ((flag_reorder_blocks || flag_reorder_blocks_and_partition)
- /* Don't reorder blocks when optimizing for size because extra jump insns may
- be created; also barrier may create extra padding.
-
- More correctly we should have a block reordering mode that tried to
- minimize the combined size of all the jumps. This would more or less
- automatically remove extra jumps, but would also try to use more short
- jumps instead of long jumps. */
- && optimize_function_for_speed_p (cfun))
- {
- reorder_basic_blocks ();
- cleanup_cfg (CLEANUP_EXPENSIVE);
- }
+ reorder_basic_blocks ();
+ cleanup_cfg (CLEANUP_EXPENSIVE);
FOR_EACH_BB (bb)
if (bb->next_bb != EXIT_BLOCK_PTR)
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
+ TODO_verify_rtl_sharing, /* todo_flags_finish */
}
};
sections of the .o file does not work well with linkonce or with
user defined section attributes. Don't call it if either case
arises. */
-
return (flag_reorder_blocks_and_partition
+ && optimize
+ /* See gate_handle_reorder_blocks. We should not partition if
+ we are going to omit the reordering. */
+ && optimize_function_for_speed_p (cfun)
&& !DECL_ONE_ONLY (current_function_decl)
&& !user_defined_section_attribute);
}
-/* Partition hot and cold basic blocks. */
-static unsigned int
-rest_of_handle_partition_blocks (void)
-{
- partition_hot_cold_basic_blocks ();
- return 0;
-}
-
struct rtl_opt_pass pass_partition_blocks =
{
{
RTL_PASS,
"bbpart", /* name */
gate_handle_partition_blocks, /* gate */
- rest_of_handle_partition_blocks, /* execute */
+ partition_hot_cold_basic_blocks, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func | TODO_verify_rtl_sharing/* todo_flags_finish */
+ 0 /* todo_flags_finish */
}
};