int n_calls_crossed;
+ /* Number of times a reg tied to given qty lives across a CALL_INSN. */
+
+ int freq_calls_crossed;
+
/* Number of times a reg tied to given qty lives across a CALL_INSN
that might throw. */
static int contains_replace_regs (rtx);
static int memref_referenced_p (rtx, rtx);
static int memref_used_between_p (rtx, rtx, rtx);
-static void update_equiv_regs (void);
static void no_equiv (rtx, const_rtx, void *);
-static void block_alloc (int);
+static void block_alloc (basic_block);
static int qty_sugg_compare (int, int);
static int qty_sugg_compare_1 (const void *, const void *);
static int qty_compare (int, int);
static int qty_compare_1 (const void *, const void *);
-static int combine_regs (rtx, rtx, int, int, rtx, int);
+static int combine_regs (rtx, rtx, int, int, rtx);
static int reg_meets_class_p (int, enum reg_class);
static void update_qty_class (int, int);
static void reg_is_set (rtx, const_rtx, void *);
static void reg_is_born (rtx, int);
static void wipe_dead_reg (rtx, int);
static int find_free_reg (enum reg_class, enum machine_mode, int, int, int,
- int, int);
+ int, int, basic_block);
static void mark_life (int, enum machine_mode, int);
static void post_mark_life (int, enum machine_mode, int, int, int);
-static int no_conflict_p (rtx, rtx, rtx);
static int requires_inout (const char *);
\f
/* Allocate a new quantity (new within current basic block)
qty[qtyno].mode = mode;
qty[qtyno].birth = birth;
qty[qtyno].n_calls_crossed = REG_N_CALLS_CROSSED (regno);
+ qty[qtyno].freq_calls_crossed = REG_FREQ_CALLS_CROSSED (regno);
qty[qtyno].n_throwing_calls_crossed = REG_N_THROWING_CALLS_CROSSED (regno);
qty[qtyno].min_class = reg_preferred_class (regno);
qty[qtyno].alternate_class = reg_alternate_class (regno);
next_qty = 0;
- block_alloc (b->index);
+ block_alloc (b);
}
free (qty);
return 1;
if (CALL_P (insn) && ! MEM_READONLY_P (memref)
- && ! CONST_OR_PURE_CALL_P (insn))
+ && ! RTL_CONST_OR_PURE_CALL_P (insn))
return 0;
note_stores (PATTERN (insn), validate_equiv_mem_from_store, NULL);
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_FIXED:
case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
case LABEL_REF:
case SYMBOL_REF:
case CONST_DOUBLE:
+ case CONST_FIXED:
case CONST_VECTOR:
case PC:
case CC0:
case LABEL_REF:
case SYMBOL_REF:
case CONST_DOUBLE:
+ case CONST_FIXED:
case CONST_VECTOR:
case PC:
case CC0:
return 1;
/* Nonconst functions may access memory. */
- if (CALL_P (insn)
- && (! CONST_OR_PURE_CALL_P (insn)
- || pure_call_p (insn)))
+ if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
return 1;
}
into the using insn. If it succeeds, we can eliminate the register
completely.
- Initialize the REG_EQUIV_INIT array of initializing insns. */
+ Initialize the REG_EQUIV_INIT array of initializing insns.
-static void
+ Return non-zero if jump label rebuilding should be done. */
+
+int
update_equiv_regs (void)
{
rtx insn;
bitmap cleared_regs;
reg_equiv = XCNEWVEC (struct equivalence, max_regno);
- reg_equiv_init = ggc_alloc_cleared (max_regno * sizeof (rtx));
+ reg_equiv_init = GGC_CNEWVEC (rtx, max_regno);
reg_equiv_init_size = max_regno;
init_alias_analysis ();
new_insn = emit_insn_before (PATTERN (equiv_insn), insn);
REG_NOTES (new_insn) = REG_NOTES (equiv_insn);
REG_NOTES (equiv_insn) = 0;
+ /* Rescan it to process the notes. */
+ df_insn_rescan (new_insn);
/* Make sure this insn is recognized before
reload begins, otherwise
REG_BASIC_BLOCK (regno) = bb->index;
REG_N_CALLS_CROSSED (regno) = 0;
+ REG_FREQ_CALLS_CROSSED (regno) = 0;
REG_N_THROWING_CALLS_CROSSED (regno) = 0;
REG_LIVE_LENGTH (regno) = 2;
if (!bitmap_empty_p (cleared_regs))
FOR_EACH_BB (bb)
{
- bitmap_and_compl_into (DF_RA_LIVE_IN (bb), cleared_regs);
- if (DF_RA_LIVE_TOP (bb))
- bitmap_and_compl_into (DF_RA_LIVE_TOP (bb), cleared_regs);
- bitmap_and_compl_into (DF_RA_LIVE_OUT (bb), cleared_regs);
+ bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
+ bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
- if (DF_LR_TOP (bb))
- bitmap_and_compl_into (DF_LR_TOP (bb), cleared_regs);
bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
}
end_alias_analysis ();
free (reg_equiv);
+ return recorded_label_ref;
}
/* Mark REG as having no known equivalence.
Only the pseudos that die but once can be handled. */
static void
-block_alloc (int b)
+block_alloc (basic_block b)
{
int i, q;
rtx insn;
- rtx note, hard_reg;
+ rtx hard_reg;
int insn_number = 0;
int insn_count = 0;
int max_uid = get_max_uid ();
int *qty_order;
- int no_conflict_combined_regno = -1;
+ df_ref *def_rec;
/* Count the instructions in the basic block. */
- insn = BB_END (BASIC_BLOCK (b));
+ insn = BB_END (b);
while (1)
{
if (!NOTE_P (insn))
++insn_count;
gcc_assert (insn_count <= max_uid);
}
- if (insn == BB_HEAD (BASIC_BLOCK (b)))
+ if (insn == BB_HEAD (b))
break;
insn = PREV_INSN (insn);
}
/* Initialize table of hardware registers currently live. */
- REG_SET_TO_HARD_REG_SET (regs_live, DF_LR_TOP (BASIC_BLOCK (b)));
+ REG_SET_TO_HARD_REG_SET (regs_live, DF_LR_IN (b));
+
+ /* This is conservative, as this would include registers that are
+ artificial-def'ed-but-not-used. However, artificial-defs are
+ rare, and such uninitialized use is rarer still, and the chance
+ of this having any performance impact is even less, while the
+ benefit is not having to compute and keep the TOP set around. */
+ for (def_rec = df_get_artificial_defs (b->index); *def_rec; def_rec++)
+ {
+ int regno = DF_REF_REGNO (*def_rec);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ SET_HARD_REG_BIT (regs_live, regno);
+ }
/* This loop scans the instructions of the basic block
and assigns quantities to registers.
It computes which registers to tie. */
- insn = BB_HEAD (BASIC_BLOCK (b));
+ insn = BB_HEAD (b);
while (1)
{
if (!NOTE_P (insn))
if (INSN_P (insn))
{
- rtx link, set;
+ rtx link;
int win = 0;
rtx r0, r1 = NULL_RTX;
int combined_regno = -1;
/* Avoid making a call-saved register unnecessarily
clobbered. */
- hard_reg = get_hard_reg_initial_reg (cfun, r1);
+ hard_reg = get_hard_reg_initial_reg (r1);
if (hard_reg != NULL_RTX)
{
if (REG_P (hard_reg)
if (REG_P (r1) || GET_CODE (r1) == SUBREG)
win = combine_regs (r1, r0, may_save_copy,
- insn_number, insn, 0);
+ insn_number, insn);
}
if (win)
break;
}
}
- /* Recognize an insn sequence with an ultimate result
- which can safely overlap one of the inputs.
- The sequence begins with a CLOBBER of its result,
- and ends with an insn that copies the result to itself
- and has a REG_EQUAL note for an equivalent formula.
- That note indicates what the inputs are.
- The result and the input can overlap if each insn in
- the sequence either doesn't mention the input
- or has a REG_NO_CONFLICT note to inhibit the conflict.
-
- We do the combining test at the CLOBBER so that the
- destination register won't have had a quantity number
- assigned, since that would prevent combining. */
-
- if (optimize
- && GET_CODE (PATTERN (insn)) == CLOBBER
- && (r0 = XEXP (PATTERN (insn), 0),
- REG_P (r0))
- && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
- && XEXP (link, 0) != 0
- && NONJUMP_INSN_P (XEXP (link, 0))
- && (set = single_set (XEXP (link, 0))) != 0
- && SET_DEST (set) == r0 && SET_SRC (set) == r0
- && (note = find_reg_note (XEXP (link, 0), REG_EQUAL,
- NULL_RTX)) != 0)
- {
- if (r1 = XEXP (note, 0), REG_P (r1)
- /* Check that we have such a sequence. */
- && no_conflict_p (insn, r0, r1))
- win = combine_regs (r1, r0, 1, insn_number, insn, 1);
- else if (GET_RTX_FORMAT (GET_CODE (XEXP (note, 0)))[0] == 'e'
- && (r1 = XEXP (XEXP (note, 0), 0),
- REG_P (r1) || GET_CODE (r1) == SUBREG)
- && no_conflict_p (insn, r0, r1))
- win = combine_regs (r1, r0, 0, insn_number, insn, 1);
-
- /* Here we care if the operation to be computed is
- commutative. */
- else if (COMMUTATIVE_P (XEXP (note, 0))
- && (r1 = XEXP (XEXP (note, 0), 1),
- (REG_P (r1) || GET_CODE (r1) == SUBREG))
- && no_conflict_p (insn, r0, r1))
- win = combine_regs (r1, r0, 0, insn_number, insn, 1);
-
- /* If we did combine something, show the register number
- in question so that we know to ignore its death. */
- if (win)
- no_conflict_combined_regno = REGNO (r1);
- }
-
/* If registers were just tied, set COMBINED_REGNO
to the number of the register used in this insn
that was tied to the register set in this insn.
combined_regno = REGNO (r1);
}
- /* Mark the death of everything that dies in this instruction,
- except for anything that was just combined. */
+ /* Mark the death of everything that dies in this instruction. */
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD
&& REG_P (XEXP (link, 0))
- && combined_regno != (int) REGNO (XEXP (link, 0))
- && (no_conflict_combined_regno != (int) REGNO (XEXP (link, 0))
- || ! find_reg_note (insn, REG_NO_CONFLICT,
- XEXP (link, 0))))
+ && combined_regno != (int) REGNO (XEXP (link, 0)))
wipe_dead_reg (XEXP (link, 0), 0);
/* Allocate qty numbers for all registers local to this block
if (REG_NOTE_KIND (link) == REG_UNUSED
&& REG_P (XEXP (link, 0)))
wipe_dead_reg (XEXP (link, 0), 1);
-
- /* If this is an insn that has a REG_RETVAL note pointing at a
- CLOBBER insn, we have reached the end of a REG_NO_CONFLICT
- block, so clear any register number that combined within it. */
- if ((note = find_reg_note (insn, REG_RETVAL, NULL_RTX)) != 0
- && NONJUMP_INSN_P (XEXP (note, 0))
- && GET_CODE (PATTERN (XEXP (note, 0))) == CLOBBER)
- no_conflict_combined_regno = -1;
}
/* Set the registers live after INSN_NUMBER. Note that we never
IOR_HARD_REG_SET (regs_live_at[2 * insn_number], regs_live);
IOR_HARD_REG_SET (regs_live_at[2 * insn_number + 1], regs_live);
- if (insn == BB_END (BASIC_BLOCK (b)))
+ if (insn == BB_END (b))
break;
insn = NEXT_INSN (insn);
q = qty_order[i];
if (qty_phys_num_sugg[q] != 0 || qty_phys_num_copy_sugg[q] != 0)
qty[q].phys_reg = find_free_reg (qty[q].min_class, qty[q].mode, q,
- 0, 1, qty[q].birth, qty[q].death);
+ 0, 1, qty[q].birth, qty[q].death, b);
else
qty[q].phys_reg = -1;
}
a scheduling pass after reload and we are not optimizing
for code size. */
if (flag_schedule_insns_after_reload && dbg_cnt (local_alloc_for_sched)
- && !optimize_size
+ && optimize_bb_for_speed_p (b)
&& !SMALL_REGISTER_CLASSES)
{
qty[q].phys_reg = find_free_reg (qty[q].min_class,
qty[q].mode, q, 0, 0,
- fake_birth, fake_death);
+ fake_birth, fake_death, b);
if (qty[q].phys_reg >= 0)
continue;
}
#endif
qty[q].phys_reg = find_free_reg (qty[q].min_class,
qty[q].mode, q, 0, 0,
- qty[q].birth, qty[q].death);
+ qty[q].birth, qty[q].death, b);
if (qty[q].phys_reg >= 0)
continue;
}
#ifdef INSN_SCHEDULING
/* Similarly, avoid false dependencies. */
if (flag_schedule_insns_after_reload && dbg_cnt (local_alloc_for_sched)
- && !optimize_size
+ && optimize_bb_for_speed_p (b)
&& !SMALL_REGISTER_CLASSES
&& qty[q].alternate_class != NO_REGS)
qty[q].phys_reg = find_free_reg (qty[q].alternate_class,
qty[q].mode, q, 0, 0,
- fake_birth, fake_death);
+ fake_birth, fake_death, b);
#endif
if (qty[q].alternate_class != NO_REGS)
qty[q].phys_reg = find_free_reg (qty[q].alternate_class,
qty[q].mode, q, 0, 0,
- qty[q].birth, qty[q].death);
+ qty[q].birth, qty[q].death, b);
}
}
If we really combined them, we could lose if the pseudo lives
across an insn that clobbers the hard reg (eg, movmem).
- ALREADY_DEAD is nonzero if USEDREG is known to be dead even though
- there is no REG_DEAD note on INSN. This occurs during the processing
- of REG_NO_CONFLICT blocks.
-
MAY_SAVE_COPY is nonzero if this insn is simply copying USEDREG to
SETREG or if the input and output must share a register.
In that case, we record a hard reg suggestion in QTY_PHYS_COPY_SUGG.
static int
combine_regs (rtx usedreg, rtx setreg, int may_save_copy, int insn_number,
- rtx insn, int already_dead)
+ rtx insn)
{
int ureg, sreg;
int offset = 0;
&& usize < qty[reg_qty[ureg]].size)
/* Can't combine if SREG is not a register we can allocate. */
|| (sreg >= FIRST_PSEUDO_REGISTER && reg_qty[sreg] == -1)
- /* Don't combine with a pseudo mentioned in a REG_NO_CONFLICT note.
- These have already been taken care of. This probably wouldn't
- combine anyway, but don't take any chances. */
- || (ureg >= FIRST_PSEUDO_REGISTER
- && find_reg_note (insn, REG_NO_CONFLICT, usedreg))
/* Don't tie something to itself. In most cases it would make no
difference, but it would screw up if the reg being tied to itself
also dies in this insn. */
if (reg_qty[sreg] >= -1
/* If we are not going to let any regs live across calls,
don't tie a call-crossing reg to a non-call-crossing reg. */
- || (current_function_has_nonlocal_label
+ || (cfun->has_nonlocal_label
&& ((REG_N_CALLS_CROSSED (ureg) > 0)
!= (REG_N_CALLS_CROSSED (sreg) > 0))))
return 0;
if this is the last use of UREG, provided the classes they want
are compatible. */
- if ((already_dead || find_regno_note (insn, REG_DEAD, ureg))
+ if (find_regno_note (insn, REG_DEAD, ureg)
&& reg_meets_class_p (sreg, qty[reg_qty[ureg]].min_class))
{
/* Add SREG to UREG's quantity. */
/* Update info about quantity SQTY. */
qty[sqty].n_calls_crossed += REG_N_CALLS_CROSSED (sreg);
+ qty[sqty].freq_calls_crossed += REG_FREQ_CALLS_CROSSED (sreg);
qty[sqty].n_throwing_calls_crossed
+= REG_N_THROWING_CALLS_CROSSED (sreg);
qty[sqty].n_refs += REG_N_REFS (sreg);
True if REG's reg class either contains or is contained in CLASS. */
static int
-reg_meets_class_p (int reg, enum reg_class class)
+reg_meets_class_p (int reg, enum reg_class rclass)
{
- enum reg_class rclass = reg_preferred_class (reg);
- return (reg_class_subset_p (rclass, class)
- || reg_class_subset_p (class, rclass));
+ enum reg_class rclass2 = reg_preferred_class (reg);
+ return (reg_class_subset_p (rclass2, rclass)
+ || reg_class_subset_p (rclass, rclass2));
}
/* Update the class of QTYNO assuming that REG is being tied to it. */
register is available. If not, return -1. */
static int
-find_free_reg (enum reg_class class, enum machine_mode mode, int qtyno,
+find_free_reg (enum reg_class rclass, enum machine_mode mode, int qtyno,
int accept_call_clobbered, int just_try_suggested,
- int born_index, int dead_index)
+ int born_index, int dead_index, basic_block bb)
{
int i, ins;
HARD_REG_SET first_used, used;
/* Don't let a pseudo live in a reg across a function call
if we might get a nonlocal goto. */
- if (current_function_has_nonlocal_label
+ if (cfun->has_nonlocal_label
&& qty[qtyno].n_calls_crossed > 0)
return -1;
for (ins = born_index; ins < dead_index; ins++)
IOR_HARD_REG_SET (used, regs_live_at[ins]);
- IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) class]);
+ IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) rclass]);
/* Don't use the frame pointer reg in local-alloc even if
we may omit the frame pointer, because if we do that and then we
{
/* Don't try the copy-suggested regs again. */
qty_phys_num_copy_sugg[qtyno] = 0;
- return find_free_reg (class, mode, qtyno, accept_call_clobbered, 1,
- born_index, dead_index);
+ return find_free_reg (rclass, mode, qtyno, accept_call_clobbered, 1,
+ born_index, dead_index, bb);
}
/* We need not check to see if the current function has nonlocal
&& ! just_try_suggested
&& qty[qtyno].n_calls_crossed != 0
&& qty[qtyno].n_throwing_calls_crossed == 0
- && CALLER_SAVE_PROFITABLE (qty[qtyno].n_refs,
- qty[qtyno].n_calls_crossed))
+ && CALLER_SAVE_PROFITABLE (optimize_bb_for_size_p (bb) ? qty[qtyno].n_refs
+ : qty[qtyno].freq,
+ optimize_bb_for_size_p (bb) ? qty[qtyno].n_calls_crossed
+ : qty[qtyno].freq_calls_crossed))
{
- i = find_free_reg (class, mode, qtyno, 1, 0, born_index, dead_index);
+ i = find_free_reg (rclass, mode, qtyno, 1, 0, born_index, dead_index, bb);
if (i >= 0)
caller_save_needed = 1;
return i;
}
}
\f
-/* INSN is the CLOBBER insn that starts a REG_NO_NOCONFLICT block, R0
- is the register being clobbered, and R1 is a register being used in
- the equivalent expression.
-
- If R1 dies in the block and has a REG_NO_CONFLICT note on every insn
- in which it is used, return 1.
-
- Otherwise, return 0. */
-
-static int
-no_conflict_p (rtx insn, rtx r0 ATTRIBUTE_UNUSED, rtx r1)
-{
- int ok = 0;
- rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
- rtx p, last;
-
- /* If R1 is a hard register, return 0 since we handle this case
- when we scan the insns that actually use it. */
-
- if (note == 0
- || (REG_P (r1) && REGNO (r1) < FIRST_PSEUDO_REGISTER)
- || (GET_CODE (r1) == SUBREG && REG_P (SUBREG_REG (r1))
- && REGNO (SUBREG_REG (r1)) < FIRST_PSEUDO_REGISTER))
- return 0;
-
- last = XEXP (note, 0);
-
- for (p = NEXT_INSN (insn); p && p != last; p = NEXT_INSN (p))
- if (INSN_P (p))
- {
- if (find_reg_note (p, REG_DEAD, r1))
- ok = 1;
-
- /* There must be a REG_NO_CONFLICT note on every insn, otherwise
- some earlier optimization pass has inserted instructions into
- the sequence, and it is not safe to perform this optimization.
- Note that emit_no_conflict_block always ensures that this is
- true when these sequences are created. */
- if (! find_reg_note (p, REG_NO_CONFLICT, r1))
- return 0;
- }
-
- return ok;
-}
-\f
/* Return the number of alternatives for which the constraint string P
indicates that the operand must be equal to operand 0 and that no register
is acceptable. */
fprintf (file, ";; Register %d in %d.\n", i, reg_renumber[i]);
}
+#ifdef STACK_REGS
+static void
+find_stack_regs (void)
+{
+ bitmap stack_regs = BITMAP_ALLOC (NULL);
+ int i;
+ HARD_REG_SET stack_hard_regs, used;
+ basic_block bb;
+
+ /* Any register that MAY be allocated to a register stack (like the
+ 387) is treated poorly. Each such register is marked as being
+ live everywhere. This keeps the register allocator and the
+ subsequent passes from doing anything useful with these values.
+
+ FIXME: This seems like an incredibly poor idea. */
+
+ CLEAR_HARD_REG_SET (stack_hard_regs);
+ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
+ SET_HARD_REG_BIT (stack_hard_regs, i);
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ COPY_HARD_REG_SET (used, reg_class_contents[reg_preferred_class (i)]);
+ IOR_HARD_REG_SET (used, reg_class_contents[reg_alternate_class (i)]);
+ AND_HARD_REG_SET (used, stack_hard_regs);
+ if (!hard_reg_set_empty_p (used))
+ bitmap_set_bit (stack_regs, i);
+ }
+
+ if (dump_file)
+ bitmap_print (dump_file, stack_regs, "stack regs:", "\n");
+
+ FOR_EACH_BB (bb)
+ {
+ bitmap_ior_into (DF_LIVE_IN (bb), stack_regs);
+ bitmap_and_into (DF_LIVE_IN (bb), DF_LR_IN (bb));
+ bitmap_ior_into (DF_LIVE_OUT (bb), stack_regs);
+ bitmap_and_into (DF_LIVE_OUT (bb), DF_LR_OUT (bb));
+ }
+ BITMAP_FREE (stack_regs);
+}
+#endif
+
+static bool
+gate_handle_local_alloc (void)
+{
+ return ! flag_ira;
+}
+
/* Run old register allocator. Return TRUE if we must exit
rest_of_compilation upon return. */
static unsigned int
df_note_add_problem ();
- if (optimize > 1)
- df_remove_problem (df_live);
- /* Create a new version of df that has the special version of UR if
- we are doing optimization. */
- if (optimize)
- df_urec_add_problem ();
+ if (optimize == 1)
+ {
+ df_live_add_problem ();
+ df_live_set_all_dirty ();
+ }
#ifdef ENABLE_CHECKING
df->changeable_flags |= DF_VERIFY_SCHEDULED;
#endif
df_analyze ();
+#ifdef STACK_REGS
+ if (optimize)
+ find_stack_regs ();
+#endif
regstat_init_n_sets_and_refs ();
regstat_compute_ri ();
- /* There is just too much going on in the register allocators to
- keep things up to date. At the end we have to rescan anyway
- because things change when the reload_completed flag is set.
- So we just turn off scanning and we will rescan by hand. */
- df_set_flags (DF_NO_INSN_RESCAN);
-
-
/* If we are not optimizing, then this is the only place before
register allocation where dataflow is done. And that is needed
to generate these warnings. */
return 0;
}
-struct tree_opt_pass pass_local_alloc =
+struct rtl_opt_pass pass_local_alloc =
{
+ {
+ RTL_PASS,
"lreg", /* name */
- NULL, /* gate */
+ gate_handle_local_alloc, /* gate */
rest_of_handle_local_alloc, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_dump_func |
- TODO_ggc_collect, /* todo_flags_finish */
- 'l' /* letter */
+ TODO_ggc_collect /* todo_flags_finish */
+ }
};