/* Basic block reordering routines for the GNU compiler.
- Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
basic_block bb;
fprintf (dump_file, "Trace %d (round %d): ", i + 1,
traces[i].round + 1);
- for (bb = traces[i].first; bb != traces[i].last; bb = bb->aux)
+ for (bb = traces[i].first; bb != traces[i].last; bb = (basic_block) bb->aux)
fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
}
}
}
}
- bb = bb->aux;
+ bb = (basic_block) bb->aux;
}
while (bb != back_edge->dest);
the trace. */
if (back_edge->dest == trace->first)
{
- trace->first = best_bb->aux;
+ trace->first = (basic_block) best_bb->aux;
}
else
{
for (prev_bb = trace->first;
prev_bb->aux != back_edge->dest;
- prev_bb = prev_bb->aux)
+ prev_bb = (basic_block) prev_bb->aux)
;
prev_bb->aux = best_bb->aux;
fibheapkey_t key;
edge_iterator ei;
- bb = fibheap_extract_min (*heap);
+ bb = (basic_block) fibheap_extract_min (*heap);
bbd[bb->index].heap = NULL;
bbd[bb->index].node = NULL;
/* The loop has less than 4 iterations. */
if (single_succ_p (bb)
- && copy_bb_p (best_edge->dest, !optimize_size))
+ && copy_bb_p (best_edge->dest,
+ optimize_edge_for_speed_p (best_edge)))
{
bb = copy_bb (best_edge->dest, best_edge, bb,
*n_traces);
new_size = MAX (last_basic_block, new_bb->index + 1);
new_size = GET_ARRAY_SIZE (new_size);
- bbd = xrealloc (bbd, new_size * sizeof (bbro_basic_block_data));
+ bbd = XRESIZEVEC (bbro_basic_block_data, bbd, new_size);
for (i = array_size; i < new_size; i++)
{
bbd[i].start_of_trace = -1;
edge is traversed frequently enough. */
if (try_copy
&& copy_bb_p (best->dest,
- !optimize_size
+ optimize_edge_for_speed_p (best)
&& EDGE_FREQUENCY (best) >= freq_threshold
&& best->count >= count_threshold))
{
basic_block bb;
fprintf (dump_file, "Final order:\n");
- for (bb = traces[0].first; bb; bb = bb->aux)
+ for (bb = traces[0].first; bb; bb = (basic_block) bb->aux)
fprintf (dump_file, "%d ", bb->index);
fprintf (dump_file, "\n");
fflush (dump_file);
if (EDGE_COUNT (bb->succs) > 8)
return false;
- if (code_may_grow && maybe_hot_bb_p (bb))
+ if (code_may_grow && optimize_bb_for_speed_p (bb))
max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
FOR_BB_INSNS (bb, insn)
if (i == *max_idx)
{
*max_idx *= 2;
- *crossing_edges = xrealloc (*crossing_edges,
- (*max_idx) * sizeof (edge));
+ *crossing_edges = XRESIZEVEC (edge, *crossing_edges, *max_idx);
}
(*crossing_edges)[i++] = e;
}
}
}
-/* This function checks the destination blockof a "crossing jump" to
+/* This function checks the destination block of a "crossing jump" to
see if it has any crossing predecessors that begin with a code label
and end with an unconditional jump. If so, it returns that predecessor
block. (This is to avoid creating lots of new basic blocks that all
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & EDGE_CROSSING)
&& JUMP_P (BB_END (e->src)))
- REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
- NULL_RTX,
- REG_NOTES (BB_END
- (e->src)));
+ add_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
}
/* Hot and cold basic blocks are partitioned and put in separate
{
if (targetm.cannot_modify_jumps_p ())
return false;
- return (optimize > 0 && flag_expensive_optimizations && !optimize_size);
+ return (optimize > 0 && flag_expensive_optimizations);
}
|| single_pred_p (single_succ (bb)))
continue;
+ if (!optimize_bb_for_size_p (bb))
+ continue;
+
/* The successor block has to be a duplication candidate. */
if (!bitmap_bit_p (candidates, single_succ (bb)->index))
continue;
return 0;
}
-struct tree_opt_pass pass_duplicate_computed_gotos =
+struct rtl_opt_pass pass_duplicate_computed_gotos =
{
+ {
+ RTL_PASS,
"compgotos", /* name */
gate_duplicate_computed_gotos, /* gate */
duplicate_computed_gotos, /* execute */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
- 0 /* letter */
+ }
};
splitting possibly introduced more crossjumping opportunities. */
cfg_layout_initialize (CLEANUP_EXPENSIVE);
- if (flag_reorder_blocks || flag_reorder_blocks_and_partition)
+ if ((flag_reorder_blocks || flag_reorder_blocks_and_partition)
+ /* Don't reorder blocks when optimizing for size because extra jump insns may
+ be created; also barrier may create extra padding.
+
+ More correctly we should have a block reordering mode that tried to
+ minimize the combined size of all the jumps. This would more or less
+ automatically remove extra jumps, but would also try to use more short
+ jumps instead of long jumps. */
+ && optimize_function_for_speed_p (cfun))
{
reorder_basic_blocks ();
cleanup_cfg (CLEANUP_EXPENSIVE);
return 0;
}
-struct tree_opt_pass pass_reorder_blocks =
+struct rtl_opt_pass pass_reorder_blocks =
{
+ {
+ RTL_PASS,
"bbro", /* name */
gate_handle_reorder_blocks, /* gate */
rest_of_handle_reorder_blocks, /* execute */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
- 'B' /* letter */
+ }
};
static bool
return 0;
}
-struct tree_opt_pass pass_partition_blocks =
+struct rtl_opt_pass pass_partition_blocks =
{
+ {
+ RTL_PASS,
"bbpart", /* name */
gate_handle_partition_blocks, /* gate */
rest_of_handle_partition_blocks, /* execute */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
- 0 /* letter */
+ TODO_dump_func | TODO_verify_rtl_sharing/* todo_flags_finish */
+ }
};