You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 59 Temple Place - Suite 330, Boston, MA
- 02111-1307, USA. */
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. */
/* This (greedy) algorithm constructs traces in several rounds.
The construction starts from "seeds". The seed for the first round
#include "tm_p.h"
#include "obstack.h"
#include "expr.h"
-#include "errors.h"
#include "params.h"
+#include "toplev.h"
+#include "tree-pass.h"
+
+#ifndef HAVE_conditional_execution
+#define HAVE_conditional_execution 0
+#endif
/* The number of rounds. In most cases there will only be 4 rounds, but
when partitioning hot and cold basic blocks into separate sections of
basic_block bb;
fprintf (dump_file, "Trace %d (round %d): ", i + 1,
traces[i].round + 1);
- for (bb = traces[i].first; bb != traces[i].last; bb = bb->rbi->next)
+ for (bb = traces[i].first; bb != traces[i].last; bb = bb->aux)
fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
}
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->dest != EXIT_BLOCK_PTR
- && e->dest->rbi->visited != trace_n
+ && e->dest->il.rtl->visited != trace_n
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX))
{
if (is_preferred)
{
/* The best edge is preferred. */
- if (!e->dest->rbi->visited
+ if (!e->dest->il.rtl->visited
|| bbd[e->dest->index].start_of_trace >= 0)
{
/* The current edge E is also preferred. */
}
else
{
- if (!e->dest->rbi->visited
+ if (!e->dest->il.rtl->visited
|| bbd[e->dest->index].start_of_trace >= 0)
{
/* The current edge E is preferred. */
}
}
}
- bb = bb->rbi->next;
+ bb = bb->aux;
}
while (bb != back_edge->dest);
the trace. */
if (back_edge->dest == trace->first)
{
- trace->first = best_bb->rbi->next;
+ trace->first = best_bb->aux;
}
else
{
basic_block prev_bb;
for (prev_bb = trace->first;
- prev_bb->rbi->next != back_edge->dest;
- prev_bb = prev_bb->rbi->next)
+ prev_bb->aux != back_edge->dest;
+ prev_bb = prev_bb->aux)
;
- prev_bb->rbi->next = best_bb->rbi->next;
+ prev_bb->aux = best_bb->aux;
/* Try to get rid of uncond jump to cond jump. */
if (single_succ_p (prev_bb))
/* We have not found suitable loop tail so do no rotation. */
best_bb = back_edge->src;
}
- best_bb->rbi->next = NULL;
+ best_bb->aux = NULL;
return best_bb;
}
static void
mark_bb_visited (basic_block bb, int trace)
{
- bb->rbi->visited = trace;
+ bb->il.rtl->visited = trace;
if (bbd[bb->index].heap)
{
fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
if (e->dest == EXIT_BLOCK_PTR)
continue;
- if (e->dest->rbi->visited
- && e->dest->rbi->visited != *n_traces)
+ if (e->dest->il.rtl->visited
+ && e->dest->il.rtl->visited != *n_traces)
continue;
if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
continue;
prob = e->probability;
- freq = EDGE_FREQUENCY (e);
+ freq = e->dest->frequency;
/* The only sensible preference for a call instruction is the
fallthru edge. Don't bother selecting anything else. */
/* Edge that cannot be fallthru or improbable or infrequent
successor (i.e. it is unsuitable successor). */
if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
- || prob < branch_th || freq < exec_th || e->count < count_th)
+ || prob < branch_th || EDGE_FREQUENCY (e) < exec_th
+ || e->count < count_th)
continue;
/* If partitioning hot/cold basic blocks, don't consider edges
{
if (e == best_edge
|| e->dest == EXIT_BLOCK_PTR
- || e->dest->rbi->visited)
+ || e->dest->il.rtl->visited)
continue;
key = bb_to_key (e->dest);
if (best_edge) /* Suitable successor was found. */
{
- if (best_edge->dest->rbi->visited == *n_traces)
+ if (best_edge->dest->il.rtl->visited == *n_traces)
{
/* We do nothing with one basic block loops. */
if (best_edge->dest != bb)
"Rotating loop %d - %d\n",
best_edge->dest->index, bb->index);
}
- bb->rbi->next = best_edge->dest;
+ bb->aux = best_edge->dest;
bbd[best_edge->dest->index].in_trace =
(*n_traces) - 1;
bb = rotate_loop (best_edge, trace, *n_traces);
if (e != best_edge
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
- && !e->dest->rbi->visited
+ && !e->dest->il.rtl->visited
&& single_pred_p (e->dest)
&& !(e->flags & EDGE_CROSSING)
&& single_succ_p (e->dest)
break;
}
- bb->rbi->next = best_edge->dest;
+ bb->aux = best_edge->dest;
bbd[best_edge->dest->index].in_trace = (*n_traces) - 1;
bb = best_edge->dest;
}
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->dest == EXIT_BLOCK_PTR
- || e->dest->rbi->visited)
+ || e->dest->il.rtl->visited)
continue;
if (bbd[e->dest->index].heap)
{
basic_block new_bb;
- new_bb = duplicate_block (old_bb, e);
+ new_bb = duplicate_block (old_bb, e, bb);
BB_COPY_PARTITION (new_bb, old_bb);
gcc_assert (e->dest == new_bb);
- gcc_assert (!e->dest->rbi->visited);
+ gcc_assert (!e->dest->il.rtl->visited);
if (dump_file)
fprintf (dump_file,
"Duplicated bb %d (created bb %d)\n",
old_bb->index, new_bb->index);
- new_bb->rbi->visited = trace;
- new_bb->rbi->next = bb->rbi->next;
- bb->rbi->next = new_bb;
+ new_bb->il.rtl->visited = trace;
+ new_bb->aux = bb->aux;
+ bb->aux = new_bb;
if (new_bb->index >= array_size || last_basic_block > array_size)
{
else
count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
- connected = xcalloc (n_traces, sizeof (bool));
+ connected = XCNEWVEC (bool, n_traces);
last_trace = -1;
current_pass = 1;
current_partition = BB_PARTITION (traces[0].first);
}
if (best)
{
- best->src->rbi->next = best->dest;
+ best->src->aux = best->dest;
t2 = bbd[best->src->index].end_of_trace;
connected[t2] = true;
}
if (last_trace >= 0)
- traces[last_trace].last->rbi->next = traces[t2].first;
+ traces[last_trace].last->aux = traces[t2].first;
last_trace = t;
/* Find the successor traces. */
best->src->index, best->dest->index);
}
t = bbd[best->dest->index].start_of_trace;
- traces[last_trace].last->rbi->next = traces[t].first;
+ traces[last_trace].last->aux = traces[t].first;
connected[t] = true;
last_trace = t;
}
if (next_bb && next_bb != EXIT_BLOCK_PTR)
{
t = bbd[next_bb->index].start_of_trace;
- traces[last_trace].last->rbi->next = traces[t].first;
+ traces[last_trace].last->aux = traces[t].first;
connected[t] = true;
last_trace = t;
}
basic_block bb;
fprintf (dump_file, "Final order:\n");
- for (bb = traces[0].first; bb; bb = bb->rbi->next)
+ for (bb = traces[0].first; bb; bb = bb->aux)
fprintf (dump_file, "%d ", bb->index);
fprintf (dump_file, "\n");
fflush (dump_file);
return false;
if (code_may_grow && maybe_hot_bb_p (bb))
- max_size *= 8;
+ max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
- size += get_attr_length (insn);
+ size += get_attr_min_length (insn);
}
if (size <= max_size)
label = emit_label_before (gen_label_rtx (), get_insns ());
jump = emit_jump_insn (gen_jump (label));
- length = get_attr_length (jump);
+ length = get_attr_min_length (jump);
delete_insn (jump);
delete_insn (label);
/* Mark every edge that crosses between sections. */
i = 0;
- if (targetm.have_named_sections)
+ FOR_EACH_BB (bb)
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
- FOR_EACH_BB (bb)
- FOR_EACH_EDGE (e, ei, bb->succs)
- {
- if (e->src != ENTRY_BLOCK_PTR
- && e->dest != EXIT_BLOCK_PTR
- && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
- {
- e->flags |= EDGE_CROSSING;
- if (i == *max_idx)
- {
- *max_idx *= 2;
- crossing_edges = xrealloc (crossing_edges,
- (*max_idx) * sizeof (edge));
- }
- crossing_edges[i++] = e;
- }
- else
- e->flags &= ~EDGE_CROSSING;
- }
+ if (e->src != ENTRY_BLOCK_PTR
+ && e->dest != EXIT_BLOCK_PTR
+ && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
+ {
+ e->flags |= EDGE_CROSSING;
+ if (i == *max_idx)
+ {
+ *max_idx *= 2;
+ crossing_edges = xrealloc (crossing_edges,
+ (*max_idx) * sizeof (edge));
+ }
+ crossing_edges[i++] = e;
+ }
+ else
+ e->flags &= ~EDGE_CROSSING;
}
*n_crossing_edges = i;
}
barrier = emit_barrier_after (new_jump);
JUMP_LABEL (new_jump) = label;
LABEL_NUSES (label) += 1;
- src->rbi->footer = unlink_insn_chain (barrier, barrier);
+ src->il.rtl->footer = unlink_insn_chain (barrier, barrier);
/* Mark edge as non-fallthru. */
crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
} /* end: 'if (GET_CODE ... ' */
the cond jump does). */
if (!cond_jump_crosses
- && cur_bb->rbi->next == cond_jump->dest)
+ && cur_bb->aux == cond_jump->dest)
{
/* Find label in fall_thru block. We've already added
any missing labels, so there must be one. */
if (new_bb)
{
- new_bb->rbi->next = cur_bb->rbi->next;
- cur_bb->rbi->next = new_bb;
+ new_bb->aux = cur_bb->aux;
+ cur_bb->aux = new_bb;
/* Make sure new fall-through bb is in same
partition as bb it's falling through from. */
if (new_bb)
{
barrier = emit_barrier_after (BB_END (new_bb));
- new_bb->rbi->footer = unlink_insn_chain (barrier,
+ new_bb->il.rtl->footer = unlink_insn_chain (barrier,
barrier);
}
else
{
barrier = emit_barrier_after (BB_END (cur_bb));
- cur_bb->rbi->footer = unlink_insn_chain (barrier,
+ cur_bb->il.rtl->footer = unlink_insn_chain (barrier,
barrier);
}
}
conditional jump. */
new_bb = create_basic_block (NULL, NULL, last_bb);
- new_bb->rbi->next = last_bb->rbi->next;
- last_bb->rbi->next = new_bb;
+ new_bb->aux = last_bb->aux;
+ last_bb->aux = new_bb;
prev_bb = last_bb;
last_bb = new_bb;
/* Update register liveness information. */
- new_bb->global_live_at_start = ALLOC_REG_SET (®_obstack);
- new_bb->global_live_at_end = ALLOC_REG_SET (®_obstack);
- COPY_REG_SET (new_bb->global_live_at_end,
- prev_bb->global_live_at_end);
- COPY_REG_SET (new_bb->global_live_at_start,
- prev_bb->global_live_at_end);
+ new_bb->il.rtl->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ new_bb->il.rtl->global_live_at_end = ALLOC_REG_SET (®_obstack);
+ COPY_REG_SET (new_bb->il.rtl->global_live_at_end,
+ prev_bb->il.rtl->global_live_at_end);
+ COPY_REG_SET (new_bb->il.rtl->global_live_at_start,
+ prev_bb->il.rtl->global_live_at_end);
/* Put appropriate instructions in new bb. */
barrier = emit_barrier_after (new_jump);
JUMP_LABEL (new_jump) = old_label;
- new_bb->rbi->footer = unlink_insn_chain (barrier,
+ new_bb->il.rtl->footer = unlink_insn_chain (barrier,
barrier);
/* Make sure new bb is in same partition as source
for (cur_insn = indirect_jump_sequence; cur_insn;
cur_insn = NEXT_INSN (cur_insn))
{
- BLOCK_FOR_INSN (cur_insn) = cur_bb;
+ if (!BARRIER_P (cur_insn))
+ BLOCK_FOR_INSN (cur_insn) = cur_bb;
if (JUMP_P (cur_insn))
jump_insn = cur_insn;
}
fix_up_fall_thru_edges ();
- /* Only do the parts necessary for writing separate sections if
- the target architecture has the ability to write separate sections
- (i.e. it has named sections). Otherwise, the hot/cold partitioning
- information will be used when reordering blocks to try to put all
- the hot blocks together, then all the cold blocks, but no actual
- section partitioning will be done. */
-
- if (targetm.have_named_sections)
- {
- /* If the architecture does not have conditional branches that can
- span all of memory, convert crossing conditional branches into
- crossing unconditional branches. */
+ /* If the architecture does not have conditional branches that can
+ span all of memory, convert crossing conditional branches into
+ crossing unconditional branches. */
- if (!HAS_LONG_COND_BRANCH)
- fix_crossing_conditional_branches ();
+ if (!HAS_LONG_COND_BRANCH)
+ fix_crossing_conditional_branches ();
- /* If the architecture does not have unconditional branches that
- can span all of memory, convert crossing unconditional branches
- into indirect jumps. Since adding an indirect jump also adds
- a new register usage, update the register usage information as
- well. */
-
- if (!HAS_LONG_UNCOND_BRANCH)
- {
- fix_crossing_unconditional_branches ();
- reg_scan (get_insns(), max_reg_num ());
- }
-
- add_reg_crossing_jump_notes ();
+ /* If the architecture does not have unconditional branches that
+ can span all of memory, convert crossing unconditional branches
+ into indirect jumps. Since adding an indirect jump also adds
+ a new register usage, update the register usage information as
+ well. */
+
+ if (!HAS_LONG_UNCOND_BRANCH)
+ {
+ fix_crossing_unconditional_branches ();
+ reg_scan (get_insns(), max_reg_num ());
}
+
+ add_reg_crossing_jump_notes ();
}
/* Verify, in the basic block chain, that there is at most one switch
{
if (switched_sections)
{
- error ("Multiple hot/cold transitions found (bb %i)",
+ error ("multiple hot/cold transitions found (bb %i)",
bb->index);
err = 1;
}
}
}
- if (err)
- internal_error ("verify_hot_cold_block_grouping failed");
+ gcc_assert(!err);
}
/* Reorder basic blocks. The main entry point to this file. FLAGS is
int i;
struct trace *traces;
- if (n_basic_blocks <= 1)
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return;
if (targetm.cannot_modify_jumps_p ())
return;
- timevar_push (TV_REORDER_BLOCKS);
-
cfg_layout_initialize (flags);
set_edge_can_fallthru_flag ();
/* We need to know some information for each basic block. */
array_size = GET_ARRAY_SIZE (last_basic_block);
- bbd = xmalloc (array_size * sizeof (bbro_basic_block_data));
+ bbd = XNEWVEC (bbro_basic_block_data, array_size);
for (i = 0; i < array_size; i++)
{
bbd[i].start_of_trace = -1;
bbd[i].node = NULL;
}
- traces = xmalloc (n_basic_blocks * sizeof (struct trace));
+ traces = XNEWVEC (struct trace, n_basic_blocks);
n_traces = 0;
find_traces (&n_traces, traces);
connect_traces (n_traces, traces);
FREE (bbd);
if (dump_file)
- dump_flow_info (dump_file);
+ dump_flow_info (dump_file, dump_flags);
cfg_layout_finalize ();
- verify_hot_cold_block_grouping ();
-
- timevar_pop (TV_REORDER_BLOCKS);
+ if (flag_reorder_blocks_and_partition)
+ verify_hot_cold_block_grouping ();
}
/* Determine which partition the first basic block in the function
encountering this note will make the compiler switch between the
hot and cold text sections. */
-void
+static void
insert_section_boundary_note (void)
{
basic_block bb;
rtx new_note;
int first_partition = 0;
- if (flag_reorder_blocks_and_partition
- && targetm.have_named_sections)
+ if (flag_reorder_blocks_and_partition)
FOR_EACH_BB (bb)
{
if (!first_partition)
which can seriously pessimize code with many computed jumps in the source
code, such as interpreters. See e.g. PR15242. */
-void
+static bool
+gate_duplicate_computed_gotos (void)
+{
+ return (optimize > 0 && flag_expensive_optimizations && !optimize_size);
+}
+
+
+static unsigned int
duplicate_computed_gotos (void)
{
basic_block bb, new_bb;
bitmap candidates;
int max_size;
- if (n_basic_blocks <= 1)
- return;
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ return 0;
if (targetm.cannot_modify_jumps_p ())
- return;
-
- timevar_push (TV_REORDER_BLOCKS);
+ return 0;
cfg_layout_initialize (0);
/* Build the reorder chain for the original order of blocks. */
if (bb->next_bb != EXIT_BLOCK_PTR)
- bb->rbi->next = bb->next_bb;
+ bb->aux = bb->next_bb;
/* Obviously the block has to end in a computed jump. */
if (!computed_jump_p (BB_END (bb)))
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
{
- size += get_attr_length (insn);
+ size += get_attr_min_length (insn);
if (size > max_size)
break;
}
/* Duplicate computed gotos. */
FOR_EACH_BB (bb)
{
- if (bb->rbi->visited)
+ if (bb->il.rtl->visited)
continue;
- bb->rbi->visited = 1;
+ bb->il.rtl->visited = 1;
/* BB must have one outgoing edge. That edge must not lead to
the exit block or the next block.
if (!bitmap_bit_p (candidates, single_succ (bb)->index))
continue;
- new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb));
- new_bb->rbi->next = bb->rbi->next;
- bb->rbi->next = new_bb;
- new_bb->rbi->visited = 1;
+ new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
+ new_bb->aux = bb->aux;
+ bb->aux = new_bb;
+ new_bb->il.rtl->visited = 1;
}
done:
cfg_layout_finalize ();
BITMAP_FREE (candidates);
-
- timevar_pop (TV_REORDER_BLOCKS);
+ return 0;
}
+struct tree_opt_pass pass_duplicate_computed_gotos =
+{
+ "compgotos", /* name */
+ gate_duplicate_computed_gotos, /* gate */
+ duplicate_computed_gotos, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_REORDER_BLOCKS, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func, /* todo_flags_finish */
+ 0 /* letter */
+};
+
+
/* This function is the main 'entrance' for the optimization that
partitions hot and cold basic blocks into separate sections of the
.o file (to improve performance and cache locality). Ideally it
(through registers) requires that this optimization be performed
before register allocation. */
-void
+static void
partition_hot_cold_basic_blocks (void)
{
basic_block cur_bb;
int n_crossing_edges;
int max_edges = 2 * last_basic_block;
- if (n_basic_blocks <= 1)
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return;
- crossing_edges = xcalloc (max_edges, sizeof (edge));
+ crossing_edges = XCNEWVEC (edge, max_edges);
cfg_layout_initialize (0);
FOR_EACH_BB (cur_bb)
- if (cur_bb->index >= 0
- && cur_bb->next_bb->index >= 0)
- cur_bb->rbi->next = cur_bb->next_bb;
+ if (cur_bb->index >= NUM_FIXED_BLOCKS
+ && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
+ cur_bb->aux = cur_bb->next_bb;
find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges,
&n_crossing_edges,
cfg_layout_finalize();
}
+\f
+static bool
+gate_handle_reorder_blocks (void)
+{
+ return (optimize > 0);
+}
+
+
+/* Reorder basic blocks. */
+static unsigned int
+rest_of_handle_reorder_blocks (void)
+{
+ bool changed;
+ unsigned int liveness_flags;
+
+ /* Last attempt to optimize CFG, as scheduling, peepholing and insn
+ splitting possibly introduced more crossjumping opportunities. */
+ liveness_flags = (!HAVE_conditional_execution ? CLEANUP_UPDATE_LIFE : 0);
+ changed = cleanup_cfg (CLEANUP_EXPENSIVE | liveness_flags);
+
+ if (flag_sched2_use_traces && flag_schedule_insns_after_reload)
+ {
+ timevar_push (TV_TRACER);
+ tracer (liveness_flags);
+ timevar_pop (TV_TRACER);
+ }
+
+ if (flag_reorder_blocks || flag_reorder_blocks_and_partition)
+ reorder_basic_blocks (liveness_flags);
+ if (flag_reorder_blocks || flag_reorder_blocks_and_partition
+ || (flag_sched2_use_traces && flag_schedule_insns_after_reload))
+ changed |= cleanup_cfg (CLEANUP_EXPENSIVE | liveness_flags);
+
+ /* On conditional execution targets we can not update the life cheaply, so
+ we deffer the updating to after both cleanups. This may lose some cases
+ but should not be terribly bad. */
+ if (changed && HAVE_conditional_execution)
+ update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
+ PROP_DEATH_NOTES);
+
+ /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
+ insert_section_boundary_note ();
+ return 0;
+}
+
+struct tree_opt_pass pass_reorder_blocks =
+{
+ "bbro", /* name */
+ gate_handle_reorder_blocks, /* gate */
+ rest_of_handle_reorder_blocks, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_REORDER_BLOCKS, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func, /* todo_flags_finish */
+ 'B' /* letter */
+};
+
+static bool
+gate_handle_partition_blocks (void)
+{
+ /* The optimization to partition hot/cold basic blocks into separate
+ sections of the .o file does not work well with linkonce or with
+ user defined section attributes. Don't call it if either case
+ arises. */
+
+ return (flag_reorder_blocks_and_partition
+ && !DECL_ONE_ONLY (current_function_decl)
+ && !user_defined_section_attribute);
+}
+
+/* Partition hot and cold basic blocks. */
+static unsigned int
+rest_of_handle_partition_blocks (void)
+{
+ no_new_pseudos = 0;
+ partition_hot_cold_basic_blocks ();
+ allocate_reg_life_data ();
+ update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
+ PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES);
+ no_new_pseudos = 1;
+ return 0;
+}
+
+struct tree_opt_pass pass_partition_blocks =
+{
+ "bbpart", /* name */
+ gate_handle_partition_blocks, /* gate */
+ rest_of_handle_partition_blocks, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_REORDER_BLOCKS, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func, /* todo_flags_finish */
+ 0 /* letter */
+};
+
+