rtx, int, rtx));
static int labels_in_range_p PARAMS ((rtx, int));
static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
-
-static void count_loop_regs_set PARAMS ((const struct loop *, int *));
static void note_addr_stored PARAMS ((rtx, rtx, void *));
static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
- rtx *, rtx));
+ rtx *));
static void loop_ivs_free PARAMS((struct loop *));
static void strength_reduce PARAMS ((struct loop *, int, int));
static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
int, int));
static void check_final_value PARAMS ((const struct loop *,
struct induction *));
+static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
+static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
static void record_giv PARAMS ((const struct loop *, struct induction *,
static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
int, int, int));
static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
- struct iv_class *, int, rtx));
+ struct iv_class *, int,
+ basic_block, rtx));
static int last_use_this_basic_block PARAMS ((rtx, rtx));
static void record_initial PARAMS ((rtx, rtx, void *));
static void update_reg_last_use PARAMS ((rtx, rtx));
static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
-static void load_mems_and_recount_loop_regs_set PARAMS ((const struct loop*,
- int *));
+static void loop_regs_scan PARAMS ((const struct loop*, int, int *));
static void load_mems PARAMS ((const struct loop *));
static int insert_loop_mem PARAMS ((rtx *, void *));
static int replace_loop_mem PARAMS ((rtx *, void *));
static int replace_label PARAMS ((rtx *, void *));
static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
+static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
+static void loop_regs_update PARAMS ((const struct loop *, rtx));
static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
+static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
+ rtx, rtx));
+static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
+ basic_block, rtx, rtx));
+static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
+static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
+
static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
+void debug_ivs PARAMS ((const struct loop *));
+void debug_iv_class PARAMS ((const struct iv_class *));
void debug_biv PARAMS ((const struct induction *));
void debug_giv PARAMS ((const struct induction *));
void debug_loop PARAMS ((const struct loop *));
loop->scan_start = p;
+ /* If loop end is the end of the current function, then emit a
+ NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
+ note insn. This is the position we use when sinking insns out of
+ the loop. */
+ if (NEXT_INSN (loop->end) != 0)
+ loop->sink = NEXT_INSN (loop->end);
+ else
+ loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
+
/* Set up variables describing this loop. */
prescan_loop (loop);
threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
return;
}
- /* Count number of times each reg is set during this loop. Set
- regs->array[I].may_not_optimize if it is not safe to move out the
- setting of register I. Set regs->array[I].single_usage. */
-
- regs->num = max_reg_num ();
-
- /* Allocate extra space for REGs that might be created by
- load_mems. We allocate a little extra slop as well, in the hopes
- that even after the moving of movables creates some new registers
- we won't have to reallocate these arrays. However, we do grow
- the arrays, if necessary, in load_mems_recount_loop_regs_set. */
- regs->size = regs->num + loop_info->mems_idx + 16;
- regs->array = (struct loop_reg *)
- xmalloc (regs->size * sizeof (*regs->array));
-
- for (i = 0; i < regs->num; i++)
- {
- regs->array[i].set_in_loop = 0;
- regs->array[i].may_not_optimize = 0;
- regs->array[i].single_usage = NULL_RTX;
- }
-
- count_loop_regs_set (loop, &insn_count);
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- regs->array[i].may_not_optimize = 1;
- regs->array[i].set_in_loop = 1;
- }
-
-#ifdef AVOID_CCMODE_COPIES
- /* Don't try to move insns which set CC registers if we should not
- create CCmode register copies. */
- for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
- if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
- regs->array[i].may_not_optimize = 1;
-#endif
-
- for (i = 0; i < regs->num; i++)
- regs->array[i].n_times_set = regs->array[i].set_in_loop;
+ /* Allocate extra space for REGs that might be created by load_mems.
+ We allocate a little extra slop as well, in the hopes that we
+ won't have to reallocate the regs array. */
+ loop_regs_scan (loop, loop_info->mems_idx + 16, &insn_count);
if (loop_dump_stream)
{
/* Now that we've moved some things out of the loop, we might be able to
hoist even more memory references. */
- load_mems_and_recount_loop_regs_set (loop, &insn_count);
+ load_mems (loop);
+
+ /* Recalculate regs->array if load_mems has created new registers. */
+ if (max_reg_num () > regs->num)
+ loop_regs_scan (loop, 0, &insn_count);
for (update_start = loop_start;
PREV_INSN (update_start)
for (m1 = m; m1->match; m1 = m1->match);
newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
SET_DEST (PATTERN (m1->insn)));
- i1 = emit_insn_before (newpat, loop_start);
+ i1 = loop_insn_hoist (loop, newpat);
/* Mark the moved, invariant reg as being allowed to
share a hard reg with the other matching invariant. */
the move insn before the loop. */
else if (m->move_insn)
{
- rtx i1, temp;
+ rtx i1, temp, seq;
for (count = m->consec; count >= 0; count--)
{
start_sequence ();
emit_move_insn (m->set_dest, m->set_src);
temp = get_insns ();
+ seq = gen_sequence ();
end_sequence ();
add_label_notes (m->set_src, temp);
- i1 = emit_insns_before (temp, loop_start);
+ i1 = loop_insn_hoist (loop, seq);
if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
REG_NOTES (i1)
= gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
if (GET_CODE (temp) == CALL_INSN
&& fn_address != 0
&& reg_referenced_p (fn_reg, body))
- emit_insn_after (gen_move_insn (fn_reg,
- fn_address),
- fn_address_insn);
+ loop_insn_emit_after (loop, 0, fn_address_insn,
+ gen_move_insn
+ (fn_reg, fn_address));
if (GET_CODE (temp) == CALL_INSN)
{
- i1 = emit_call_insn_before (body, loop_start);
+ i1 = loop_call_insn_hoist (loop, body);
/* Because the USAGE information potentially
contains objects other than hard registers
we need to copy it. */
= copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
}
else
- i1 = emit_insn_before (body, loop_start);
+ i1 = loop_insn_hoist (loop, body);
if (first == 0)
first = i1;
if (temp == fn_address_insn)
emit_move_insn (reg, tem);
sequence = gen_sequence ();
end_sequence ();
- i1 = emit_insn_before (sequence, loop_start);
+ i1 = loop_insn_hoist (loop, sequence);
}
else if (GET_CODE (p) == CALL_INSN)
{
- i1 = emit_call_insn_before (PATTERN (p), loop_start);
+ i1 = loop_call_insn_hoist (loop, PATTERN (p));
/* Because the USAGE information potentially
contains objects other than hard registers
we need to copy it. */
}
else if (count == m->consec && m->move_insn_first)
{
+ rtx seq;
/* The SET_SRC might not be invariant, so we must
use the REG_EQUAL note. */
start_sequence ();
emit_move_insn (m->set_dest, m->set_src);
temp = get_insns ();
+ seq = gen_sequence ();
end_sequence ();
add_label_notes (m->set_src, temp);
- i1 = emit_insns_before (temp, loop_start);
+ i1 = loop_insn_hoist (loop, seq);
if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
REG_NOTES (i1)
= gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
m->set_src, REG_NOTES (i1));
}
else
- i1 = emit_insn_before (PATTERN (p), loop_start);
+ i1 = loop_insn_hoist (loop, PATTERN (p));
if (REG_NOTES (i1) == 0)
{
}
}
}
-
-/* Increment REGS->array[I].SET_IN_LOOP at the index I of each
- register that is modified by an insn between FROM and TO. If the
- value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
- more, stop incrementing it, to avoid overflow.
-
- Store in REGS->array[I].SINGLE_USAGE[I] the single insn in which
- register I is used, if it is only used once. Otherwise, it is set
- to 0 (for no uses) or const0_rtx for more than one use. This
- parameter may be zero, in which case this processing is not done.
-
- Store in *COUNT_PTR the number of actual instruction
- in the loop. We use this to decide what is worth moving out. */
-
-/* last_set[n] is nonzero iff reg n has been set in the current basic block.
- In that case, it is the insn that last set reg n. */
-
-static void
-count_loop_regs_set (loop, count_ptr)
- const struct loop *loop;
- int *count_ptr;
-{
- struct loop_regs *regs = LOOP_REGS (loop);
- register rtx *last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
- register rtx insn;
- register int count = 0;
-
- for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
- insn = NEXT_INSN (insn))
- {
- if (INSN_P (insn))
- {
- ++count;
-
- /* Record registers that have exactly one use. */
- find_single_use_in_loop (regs, insn, PATTERN (insn));
-
- /* Include uses in REG_EQUAL notes. */
- if (REG_NOTES (insn))
- find_single_use_in_loop (regs, insn, REG_NOTES (insn));
-
- if (GET_CODE (PATTERN (insn)) == SET
- || GET_CODE (PATTERN (insn)) == CLOBBER)
- count_one_set (regs, insn, PATTERN (insn), last_set);
- else if (GET_CODE (PATTERN (insn)) == PARALLEL)
- {
- register int i;
- for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
- count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
- last_set);
- }
- }
-
- if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
- memset ((char *) last_set, 0, regs->num * sizeof (rtx));
- }
- *count_ptr = count;
-
- /* Clean up. */
- free (last_set);
-}
\f
/* Given a loop that is bounded by LOOP->START and LOOP->END and that
is entered at LOOP->SCAN_START, return 1 if the register set in SET
insert_before = v->insn;
if (tv->mult_val == const1_rtx)
- emit_iv_add_mult (tv->add_val, v->mult_val,
- v->new_reg, v->new_reg, insert_before);
+ loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
+ v->new_reg, v->new_reg,
+ 0, insert_before);
else /* tv->mult_val == const0_rtx */
/* A multiply is acceptable here
since this is presumed to be seldom executed. */
- emit_iv_add_mult (tv->add_val, v->mult_val,
- v->add_val, v->new_reg, insert_before);
+ loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
+ v->add_val, v->new_reg,
+ 0, insert_before);
}
/* Add code at loop start to initialize giv's reduced reg. */
- emit_iv_add_mult (extend_value_for_giv (v, bl->initial_value),
- v->mult_val, v->add_val, v->new_reg,
- loop->start);
+ loop_iv_add_mult_hoist (loop,
+ extend_value_for_giv (v, bl->initial_value),
+ v->mult_val, v->add_val, v->new_reg);
}
}
}
static void
-loop_givs_rescan (loop, bl, reg_map, end_insert_before)
+loop_givs_rescan (loop, bl, reg_map)
struct loop *loop;
struct iv_class *bl;
rtx *reg_map;
- rtx end_insert_before;
{
struct induction *v;
{
/* Not replaceable; emit an insn to set the original giv reg from
the reduced giv, same as above. */
- emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
- v->insn);
+ loop_insn_emit_after (loop, 0, v->insn,
+ gen_move_insn (v->dest_reg, v->new_reg));
}
/* When a loop is reversed, givs which depend on the reversed
not replaceable. The correct final value is the same as the
value that the giv starts the reversed loop with. */
if (bl->reversed && ! v->replaceable)
- emit_iv_add_mult (extend_value_for_giv (v, bl->initial_value),
- v->mult_val, v->add_val, v->dest_reg,
- end_insert_before);
+ loop_iv_add_mult_sink (loop,
+ extend_value_for_giv (v, bl->initial_value),
+ v->mult_val, v->add_val, v->dest_reg);
else if (v->final_value)
- {
- rtx insert_before;
-
- /* If the loop has multiple exits, emit the insn before the
- loop to ensure that it will always be executed no matter
- how the loop exits. Otherwise, emit the insn after the loop,
- since this is slightly more efficient. */
- if (loop->exit_count)
- insert_before = loop->start;
- else
- insert_before = end_insert_before;
- emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
- insert_before);
- }
+ loop_insn_sink_or_swim (loop,
+ gen_move_insn (v->dest_reg, v->final_value));
if (loop_dump_stream)
{
&& benefit > 0
&& GET_CODE (v->mult_val) == CONST_INT)
{
+ int size = GET_MODE_SIZE (GET_MODE (v->mem));
+
if (HAVE_POST_INCREMENT
- && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ && INTVAL (v->mult_val) == size)
benefit += add_cost * bl->biv_count;
else if (HAVE_PRE_INCREMENT
- && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ && INTVAL (v->mult_val) == size)
benefit += add_cost * bl->biv_count;
else if (HAVE_POST_DECREMENT
- && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ && -INTVAL (v->mult_val) == size)
benefit += add_cost * bl->biv_count;
else if (HAVE_PRE_DECREMENT
- && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ && -INTVAL (v->mult_val) == size)
benefit += add_cost * bl->biv_count;
}
#endif
/* Map of pseudo-register replacements. */
rtx *reg_map = NULL;
int reg_map_size;
- rtx end_insert_before;
int unrolled_insn_copies = 0;
rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
addr_placeholder = gen_reg_rtx (Pmode);
- /* Save insn immediately after the loop_end. Insns inserted after loop_end
- must be put before this insn, so that they will appear in the right
- order (i.e. loop order).
-
- If loop_end is the end of the current function, then emit a
- NOTE_INSN_DELETED after loop_end and set end_insert_before to the
- dummy note insn. */
- if (NEXT_INSN (loop->end) != 0)
- end_insert_before = NEXT_INSN (loop->end);
- else
- end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop->end);
-
ivs->n_regs = max_reg_before_loop;
ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
/* Can still unroll the loop anyways, but indicate that there is no
strength reduction info available. */
if (flags & LOOP_UNROLL)
- unroll_loop (loop, insn_count, end_insert_before, 0);
+ unroll_loop (loop, insn_count, 0);
loop_ivs_free (loop);
return;
For each giv register that can be reduced now: if replaceable,
substitute reduced reg wherever the old giv occurs;
else add new move insn "giv_reg = reduced_reg". */
- loop_givs_rescan (loop, bl, reg_map, end_insert_before);
+ loop_givs_rescan (loop, bl, reg_map);
/* All the givs based on the biv bl have been reduced if they
merit it. */
value, so we don't need another one. We can't calculate the
proper final value for such a biv here anyways. */
if (bl->final_value && ! bl->reversed)
- {
- rtx insert_before;
-
- /* If the loop has multiple exits, emit the insn before the
- loop to ensure that it will always be executed no matter
- how the loop exits. Otherwise, emit the insn after the
- loop, since this is slightly more efficient. */
- if (loop->exit_count)
- insert_before = loop->start;
- else
- insert_before = end_insert_before;
-
- emit_insn_before (gen_move_insn (bl->biv->dest_reg,
- bl->final_value),
- end_insert_before);
- }
+ loop_insn_sink_or_swim (loop, gen_move_insn
+ (bl->biv->dest_reg, bl->final_value));
if (loop_dump_stream)
fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
if ((flags & LOOP_UNROLL)
|| (loop_info->n_iterations > 0
&& unrolled_insn_copies <= insn_count))
- unroll_loop (loop, insn_count, end_insert_before, 1);
+ unroll_loop (loop, insn_count, 1);
#ifdef HAVE_doloop_end
if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
add_val, ext_val, benefit, DEST_ADDR,
not_every_iteration, maybe_multiple, &XEXP (x, 0));
- v->mem_mode = GET_MODE (x);
+ v->mem = x;
}
}
return;
the expression of G2 in terms of G1 can be used. */
if (ret != NULL_RTX
&& g2->giv_type == DEST_ADDR
- && memory_address_p (g2->mem_mode, ret)
+ && memory_address_p (GET_MODE (g2->mem), ret)
/* ??? Looses, especially with -fforce-addr, where *g2->location
will always be a register, and so anything more complicated
gets discarded. */
int ze_ok = 0, se_ok = 0, info_ok = 0;
enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
HOST_WIDE_INT start_val;
- unsigned HOST_WIDE_INT u_end_val, u_start_val;
+ unsigned HOST_WIDE_INT u_end_val = 0;
+ unsigned HOST_WIDE_INT u_start_val = 0;
rtx incr = pc_rtx;
struct induction *v;
free (can_combine);
}
\f
-/* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
+/* Generate sequence for REG = B * M + A. */
-void
-emit_iv_add_mult (b, m, a, reg, insert_before)
+static rtx
+gen_add_mult (b, m, a, reg)
rtx b; /* initial value of basic induction variable */
rtx m; /* multiplicative constant */
rtx a; /* additive constant */
rtx reg; /* destination register */
- rtx insert_before;
{
rtx seq;
rtx result;
- /* Prevent unexpected sharing of these rtx. */
- a = copy_rtx (a);
- b = copy_rtx (b);
-
- /* Increase the lifetime of any invariants moved further in code. */
- update_reg_last_use (a, insert_before);
- update_reg_last_use (b, insert_before);
- update_reg_last_use (m, insert_before);
-
start_sequence ();
+ /* Use unsigned arithmetic. */
result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
if (reg != result)
emit_move_insn (reg, result);
seq = gen_sequence ();
end_sequence ();
- emit_insn_before (seq, insert_before);
+ return seq;
+}
+
- /* It is entirely possible that the expansion created lots of new
- registers. Iterate over the sequence we just created and
- record them all. */
+/* Update registers created in insn sequence SEQ. */
+
+static void
+loop_regs_update (loop, seq)
+ const struct loop *loop ATTRIBUTE_UNUSED;
+ rtx seq;
+{
+ /* Update register info for alias analysis. */
if (GET_CODE (seq) == SEQUENCE)
{
record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
}
}
- else if (GET_CODE (seq) == SET
- && GET_CODE (SET_DEST (seq)) == REG)
- record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
+ else
+ {
+ rtx set = single_set (seq);
+ if (set && GET_CODE (SET_DEST (set)) == REG)
+ record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
+ }
+}
+
+
+/* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
+
+void
+loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
+ const struct loop *loop;
+ rtx b; /* initial value of basic induction variable */
+ rtx m; /* multiplicative constant */
+ rtx a; /* additive constant */
+ rtx reg; /* destination register */
+ basic_block before_bb;
+ rtx before_insn;
+{
+ rtx seq;
+
+ if (! before_insn)
+ {
+ loop_iv_add_mult_hoist (loop, b, m, a, reg);
+ return;
+ }
+
+ /* Use copy_rtx to prevent unexpected sharing of these rtx. */
+ seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
+
+ /* Increase the lifetime of any invariants moved further in code. */
+ update_reg_last_use (a, before_insn);
+ update_reg_last_use (b, before_insn);
+ update_reg_last_use (m, before_insn);
+
+ loop_insn_emit_before (loop, before_bb, before_insn, seq);
+
+ /* It is possible that the expansion created lots of new registers.
+ Iterate over the sequence we just created and record them all. */
+ loop_regs_update (loop, seq);
+}
+
+
+/* Emit insns in loop pre-header to set REG = B * M + A. */
+
+void
+loop_iv_add_mult_sink (loop, b, m, a, reg)
+ const struct loop *loop;
+ rtx b; /* initial value of basic induction variable */
+ rtx m; /* multiplicative constant */
+ rtx a; /* additive constant */
+ rtx reg; /* destination register */
+{
+ rtx seq;
+
+ /* Use copy_rtx to prevent unexpected sharing of these rtx. */
+ seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
+
+ /* Increase the lifetime of any invariants moved further in code.
+ ???? Is this really necessary? */
+ update_reg_last_use (a, loop->sink);
+ update_reg_last_use (b, loop->sink);
+ update_reg_last_use (m, loop->sink);
+
+ loop_insn_sink (loop, seq);
+
+ /* It is possible that the expansion created lots of new registers.
+ Iterate over the sequence we just created and record them all. */
+ loop_regs_update (loop, seq);
+}
+
+
+/* Emit insns after loop to set REG = B * M + A. */
+
+void
+loop_iv_add_mult_hoist (loop, b, m, a, reg)
+ const struct loop *loop;
+ rtx b; /* initial value of basic induction variable */
+ rtx m; /* multiplicative constant */
+ rtx a; /* additive constant */
+ rtx reg; /* destination register */
+{
+ rtx seq;
+
+ /* Use copy_rtx to prevent unexpected sharing of these rtx. */
+ seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
+
+ loop_insn_hoist (loop, seq);
+
+ /* It is possible that the expansion created lots of new registers.
+ Iterate over the sequence we just created and record them all. */
+ loop_regs_update (loop, seq);
}
-/* Similar to emit_iv_add_mult, but compute cost rather than emitting
- insns. */
+
+
+/* Similar to gen_add_mult, but compute cost rather than generating
+ sequence. */
+
static int
iv_add_mult_cost (b, m, a, reg)
rtx b; /* initial value of basic induction variable */
rtx last, result;
start_sequence ();
- result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
+ result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
if (reg != result)
emit_move_insn (reg, result);
last = get_last_insn ();
if (bl->giv_count == 0 && ! loop->exit_count)
{
rtx bivreg = regno_reg_rtx[bl->regno];
+ struct iv_class *blt;
/* If there are no givs for this biv, and the only exit is the
fall through at the end of the loop, then
break;
}
}
+
+ /* A biv has uses besides counting if it is used to set another biv. */
+ for (blt = ivs->list; blt; blt = blt->next)
+ if (blt->init_set && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
+ {
+ no_use_except_counting = 0;
+ break;
+ }
}
if (no_use_except_counting)
&& GET_CODE (comparison_value) == CONST_INT)
{
start_value = GEN_INT (comparison_val - add_adjust);
- emit_insn_before (gen_move_insn (reg, start_value),
- loop_start);
+ loop_insn_hoist (loop, gen_move_insn (reg, start_value));
}
else if (GET_CODE (initial_value) == CONST_INT)
{
return 0;
start_value
= gen_rtx_PLUS (mode, comparison_value, offset);
- emit_insn_before ((GEN_FCN (icode)
- (reg, comparison_value, offset)),
- loop_start);
+ loop_insn_hoist (loop, (GEN_FCN (icode)
+ (reg, comparison_value, offset)));
if (GET_CODE (comparison) == LE)
final_value = gen_rtx_PLUS (mode, comparison_value,
GEN_INT (add_val));
return 0;
start_value
= gen_rtx_MINUS (mode, comparison_value, initial_value);
- emit_insn_before ((GEN_FCN (icode)
- (reg, comparison_value, initial_value)),
- loop_start);
+ loop_insn_hoist (loop, (GEN_FCN (icode)
+ (reg, comparison_value,
+ initial_value)));
}
else
/* We could handle the other cases too, but it'll be
tem = gen_sequence ();
end_sequence ();
- p = emit_insn_before (tem, bl->biv->insn);
+ p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
delete_insn (bl->biv->insn);
/* Update biv info to reflect its new status. */
if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
|| ! bl->init_insn
|| REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
- emit_insn_after (gen_move_insn (reg, final_value),
- loop_end);
+ loop_insn_sink (loop, gen_move_insn (reg, final_value));
/* Delete compare/branch at end of loop. */
delete_insn (PREV_INSN (loop_end));
{
struct loop_ivs *ivs = LOOP_IVS (loop);
rtx reg = bl->biv->dest_reg;
- rtx loop_start = loop->start;
- rtx loop_end = loop->end;
rtx p;
/* Scan all insns in the loop, stopping if we find one that uses the
biv in a way that we cannot eliminate. */
- for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
+ for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
{
enum rtx_code code = GET_CODE (p);
- rtx where = threshold >= insn_count ? loop_start : p;
+ basic_block where_bb = 0;
+ rtx where_insn = threshold >= insn_count ? 0 : p;
/* If this is a libcall that sets a giv, skip ahead to its end. */
if (GET_RTX_CLASS (code) == 'i')
if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
&& reg_mentioned_p (reg, PATTERN (p))
&& ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
- eliminate_p, where))
+ eliminate_p, where_bb, where_insn))
{
if (loop_dump_stream)
fprintf (loop_dump_stream,
}
}
- if (p == loop_end)
+ if (p == loop->end)
{
if (loop_dump_stream)
fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
If BIV does not appear in X, return 1.
- If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
- where extra insns should be added. Depending on how many items have been
- moved out of the loop, it will either be before INSN or at the start of
- the loop. */
+ If ELIMINATE_P is non-zero, actually do the elimination.
+ WHERE_INSN/WHERE_BB indicate where extra insns should be added.
+ Depending on how many items have been moved out of the loop, it
+ will either be before INSN (when WHERE_INSN is non-zero) or at the
+ start of the loop (when WHERE_INSN is zero). */
static int
-maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where)
+maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
const struct loop *loop;
rtx x, insn;
struct iv_class *bl;
int eliminate_p;
- rtx where;
+ basic_block where_bb;
+ rtx where_insn;
{
enum rtx_code code = GET_CODE (x);
rtx reg = bl->biv->dest_reg;
into a register (it will be a loop invariant.) */
tem = gen_reg_rtx (GET_MODE (v->new_reg));
- emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
- where);
+ loop_insn_emit_before (loop, 0, where_insn,
+ gen_move_insn (tem,
+ copy_rtx (v->add_val)));
/* Substitute the new register for its invariant value in
the compare expression. */
{
/* Otherwise, load it into a register. */
tem = gen_reg_rtx (mode);
- emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
+ loop_iv_add_mult_emit_before (loop, arg,
+ v->mult_val, v->add_val,
+ tem, where_bb, where_insn);
validate_change (insn, &XEXP (x, arg_operand), tem, 1);
}
if (apply_change_group ())
v->new_reg, 1);
/* Compute value to compare against. */
- emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
+ loop_iv_add_mult_emit_before (loop, arg,
+ v->mult_val, v->add_val,
+ tem, where_bb, where_insn);
/* Use it in this insn. */
validate_change (insn, &XEXP (x, arg_operand), tem, 1);
if (apply_change_group ())
v->new_reg, 1);
/* Compute value to compare against. */
- emit_iv_add_mult (arg, v->mult_val, v->add_val,
- tem, where);
+ loop_iv_add_mult_emit_before (loop, arg,
+ v->mult_val, v->add_val,
+ tem, where_bb, where_insn);
validate_change (insn, &XEXP (x, arg_operand), tem, 1);
if (apply_change_group ())
return 1;
{
case 'e':
if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
- eliminate_p, where))
+ eliminate_p, where_bb, where_insn))
return 0;
break;
case 'E':
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
- eliminate_p, where))
+ eliminate_p, where_bb, where_insn))
return 0;
break;
}
/* If any of the registers in X are "old" and currently have a last use earlier
than INSN, update them to have a last use of INSN. Their actual last use
will be the previous insn but it will not have a valid uid_luid so we can't
- use it. */
+ use it. X must be a source expression only. */
static void
update_reg_last_use (x, insn)
/* Check for the case where INSN does not have a valid luid. In this case,
there is no need to modify the regno_last_uid, as this can only happen
when code is inserted after the loop_end to set a pseudo's final value,
- and hence this insn will never be the last use of x. */
+ and hence this insn will never be the last use of x.
+ ???? This comment is not correct. See for example loop_givs_reduce.
+ This may insert an insn before another new insn. */
if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
&& INSN_UID (insn) < max_uid_for_loop
&& REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
- REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
+ {
+ REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
+ }
else
{
register int i, j;
rtx tem;
rtx op0, op1;
int reverse_code = 0;
- int did_reverse_condition = 0;
enum machine_mode mode;
code = GET_CODE (cond);
op1 = XEXP (cond, 1);
if (reverse)
- {
- code = reverse_condition (code);
- did_reverse_condition ^= 1;
- }
+ code = reversed_comparison_code (cond, insn);
+ if (code == UNKNOWN)
+ return 0;
if (earliest)
*earliest = insn;
if ((prev = prev_nonnote_insn (prev)) == 0
|| GET_CODE (prev) != INSN
- || FIND_REG_INC_NOTE (prev, 0)
- || (set = single_set (prev)) == 0)
+ || FIND_REG_INC_NOTE (prev, 0))
+ break;
+
+ set = set_of (op0, prev);
+
+ if (set
+ && (GET_CODE (set) != SET
+ || !rtx_equal_p (SET_DEST (set), op0)))
break;
/* If this is setting OP0, get what it sets it to if it looks
relevant. */
- if (rtx_equal_p (SET_DEST (set), op0))
+ if (set)
{
enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
|| mode == VOIDmode || inner_mode == VOIDmode))
{
- /* We might have reversed a LT to get a GE here. But this wasn't
- actually the comparison of data, so we don't flag that we
- have had to reverse the condition. */
- did_reverse_condition ^= 1;
reverse_code = 1;
x = SET_SRC (set);
}
code = GET_CODE (x);
if (reverse_code)
{
- code = reverse_condition (code);
+ code = reversed_comparison_code (x, prev);
if (code == UNKNOWN)
return 0;
- did_reverse_condition ^= 1;
reverse_code = 0;
}
}
}
- /* If this was floating-point and we reversed anything other than an
- EQ or NE or (UN)ORDERED, return zero. */
- if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
- && did_reverse_condition
- && code != NE && code != EQ && code != UNORDERED && code != ORDERED
- && ! flag_fast_math
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
- return 0;
-
#ifdef HAVE_cc0
/* Never return CC0; return zero instead. */
if (op0 == cc0_rtx)
return 0;
}
-/* Like load_mems, but also ensures that REGS->array[I].SET_IN_LOOP,
- REGS->array[I].MAY_NOT_OPTIMIZE, REGS->array[I].SINGLE_USAGE, and
- INSN_COUNT have the correct values after load_mems. */
+
+/* Allocate REGS->ARRAY or reallocate it if it is too small.
+
+ Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
+ register that is modified by an insn between FROM and TO. If the
+ value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
+ more, stop incrementing it, to avoid overflow.
+
+ Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
+ register I is used, if it is only used once. Otherwise, it is set
+ to 0 (for no uses) or const0_rtx for more than one use. This
+ parameter may be zero, in which case this processing is not done.
+
+ Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
+ optimize register I.
+
+ Store in *COUNT_PTR the number of actual instructions
+ in the loop. We use this to decide what is worth moving out. */
static void
-load_mems_and_recount_loop_regs_set (loop, insn_count)
+loop_regs_scan (loop, extra_size, count_ptr)
const struct loop *loop;
- int *insn_count;
+ int extra_size;
+ int *count_ptr;
{
struct loop_regs *regs = LOOP_REGS (loop);
+ int old_nregs;
+ /* last_set[n] is nonzero iff reg n has been set in the current
+ basic block. In that case, it is the insn that last set reg n. */
+ rtx *last_set;
+ rtx insn;
+ int count = 0;
+ int i;
- load_mems (loop);
+ old_nregs = regs->num;
+ regs->num = max_reg_num ();
- /* Recalculate regs->array since load_mems may have created new
- registers. */
- if (max_reg_num () > regs->num)
+ /* Grow the regs array if not allocated or too small. */
+ if (regs->num >= regs->size)
{
- int i;
- int old_nregs;
+ regs->size = regs->num + extra_size;
+
+ regs->array = (struct loop_reg *)
+ xrealloc (regs->array, regs->size * sizeof (*regs->array));
- old_nregs = regs->num;
- regs->num = max_reg_num ();
+ /* Zero the new elements. */
+ memset (regs->array + old_nregs, 0,
+ (regs->size - old_nregs) * sizeof (*regs->array));
+ }
- if (regs->num >= regs->size)
- {
- regs->size = regs->num;
+ /* Clear previously scanned fields but do not clear n_times_set. */
+ for (i = 0; i < old_nregs; i++)
+ {
+ regs->array[i].set_in_loop = 0;
+ regs->array[i].may_not_optimize = 0;
+ regs->array[i].single_usage = NULL_RTX;
+ }
- /* Grow the array. */
- regs->array = (struct loop_reg *)
- xrealloc (regs->array, regs->size * sizeof (*regs->array));
- }
+ last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
- for (i = 0; i < regs->num; i++)
+ /* Scan the loop, recording register usage. */
+ for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
+ insn = NEXT_INSN (insn))
+ {
+ if (INSN_P (insn))
{
- regs->array[i].set_in_loop = 0;
- regs->array[i].may_not_optimize = 0;
- regs->array[i].single_usage = NULL_RTX;
- }
+ ++count;
- count_loop_regs_set (loop, insn_count);
+ /* Record registers that have exactly one use. */
+ find_single_use_in_loop (regs, insn, PATTERN (insn));
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- regs->array[i].may_not_optimize = 1;
- regs->array[i].set_in_loop = 1;
+ /* Include uses in REG_EQUAL notes. */
+ if (REG_NOTES (insn))
+ find_single_use_in_loop (regs, insn, REG_NOTES (insn));
+
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ count_one_set (regs, insn, PATTERN (insn), last_set);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
+ last_set);
+ }
}
+ if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
+ memset (last_set, 0, regs->num * sizeof (rtx));
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ regs->array[i].may_not_optimize = 1;
+ regs->array[i].set_in_loop = 1;
+ }
+
#ifdef AVOID_CCMODE_COPIES
- /* Don't try to move insns which set CC registers if we should not
- create CCmode register copies. */
- for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
- if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
- regs->array[i].may_not_optimize = 1;
+ /* Don't try to move insns which set CC registers if we should not
+ create CCmode register copies. */
+ for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+ if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
+ regs->array[i].may_not_optimize = 1;
#endif
+
+ /* Set regs->array[I].n_times_set for the new registers. */
+ for (i = old_nregs; i < regs->num; i++)
+ regs->array[i].n_times_set = regs->array[i].set_in_loop;
- /* Set regs->array[I].n_times_set for the new registers. */
- for (i = old_nregs; i < regs->num; i++)
- regs->array[i].n_times_set = regs->array[i].set_in_loop;
- }
+ free (last_set);
+ *count_ptr = count;
}
+
/* Move MEMs into registers for the duration of the loop. */
static void
rtx end_label;
/* Nonzero if the next instruction may never be executed. */
int next_maybe_never = 0;
- int last_max_reg = max_reg_num ();
+ unsigned int last_max_reg = max_reg_num ();
if (loop_info->mems_idx == 0)
return;
best = copy_rtx (best_equiv->loc);
}
set = gen_move_insn (reg, best);
- set = emit_insn_before (set, loop->start);
+ set = loop_insn_hoist (loop, set);
if (const_equiv)
REG_NOTES (set) = gen_rtx_EXPR_LIST (REG_EQUAL,
copy_rtx (const_equiv->loc),
/* Store the memory immediately after END, which is
the NOTE_LOOP_END. */
set = gen_move_insn (copy_rtx (mem), reg);
- emit_insn_after (set, label);
+ loop_insn_emit_after (loop, 0, label, set);
}
if (loop_dump_stream)
unsigned int regno;
{
rtx insn;
- rtx set;
+ rtx set = NULL_RTX;
unsigned int new_regno;
new_regno = REGNO (replacement);
insn = next_insn_in_loop (loop, insn))
{
/* Search for the insn that copies REGNO to NEW_REGNO? */
- if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ if (INSN_P (insn)
&& (set = single_set (insn))
&& GET_CODE (SET_DEST (set)) == REG
&& REGNO (SET_DEST (set)) == new_regno
break;
}
- if (insn != NULL_RTX)
+ if (set)
{
rtx prev_insn;
rtx prev_set;
prev_insn = PREV_INSN (insn);
- if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ if (INSN_P (insn)
&& (prev_set = single_set (prev_insn))
&& GET_CODE (SET_DEST (prev_set)) == REG
&& REGNO (SET_DEST (prev_set)) == regno)
return 0;
}
\f
+/* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
+ (ignored in the interim). */
+
+static rtx
+loop_insn_emit_after (loop, where_bb, where_insn, pattern)
+ const struct loop *loop ATTRIBUTE_UNUSED;
+ basic_block where_bb ATTRIBUTE_UNUSED;
+ rtx where_insn;
+ rtx pattern;
+{
+ return emit_insn_after (pattern, where_insn);
+}
+
+
+/* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
+ in basic block WHERE_BB (ignored in the interim) within the loop
+ otherwise hoist PATTERN into the loop pre-header. */
+
+rtx
+loop_insn_emit_before (loop, where_bb, where_insn, pattern)
+ const struct loop *loop;
+ basic_block where_bb ATTRIBUTE_UNUSED;
+ rtx where_insn;
+ rtx pattern;
+{
+ if (! where_insn)
+ return loop_insn_hoist (loop, pattern);
+ return emit_insn_before (pattern, where_insn);
+}
+
+
+/* Emit call insn for PATTERN before WHERE_INSN in basic block
+ WHERE_BB (ignored in the interim) within the loop. */
+
+static rtx
+loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
+ const struct loop *loop ATTRIBUTE_UNUSED;
+ basic_block where_bb ATTRIBUTE_UNUSED;
+ rtx where_insn;
+ rtx pattern;
+{
+ return emit_call_insn_before (pattern, where_insn);
+}
+
+
+/* Hoist insn for PATTERN into the loop pre-header. */
+
+rtx
+loop_insn_hoist (loop, pattern)
+ const struct loop *loop;
+ rtx pattern;
+{
+ return loop_insn_emit_before (loop, 0, loop->start, pattern);
+}
+
+
+/* Hoist call insn for PATTERN into the loop pre-header. */
+
+static rtx
+loop_call_insn_hoist (loop, pattern)
+ const struct loop *loop;
+ rtx pattern;
+{
+ return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
+}
+
+
+/* Sink insn for PATTERN after the loop end. */
+
+rtx
+loop_insn_sink (loop, pattern)
+ const struct loop *loop;
+ rtx pattern;
+{
+ return loop_insn_emit_before (loop, 0, loop->sink, pattern);
+}
+
+
+/* If the loop has multiple exits, emit insn for PATTERN before the
+ loop to ensure that it will always be executed no matter how the
+ loop exits. Otherwise, emit the insn for PATTERN after the loop,
+ since this is slightly more efficient. */
+
+static rtx
+loop_insn_sink_or_swim (loop, pattern)
+ const struct loop *loop;
+ rtx pattern;
+{
+ if (loop->exit_count)
+ return loop_insn_hoist (loop, pattern);
+ else
+ return loop_insn_sink (loop, pattern);
+}
+\f
+static void
+loop_ivs_dump (loop, file, verbose)
+ const struct loop *loop;
+ FILE *file;
+ int verbose;
+{
+ struct iv_class *bl;
+ int iv_num = 0;
+
+ if (! loop || ! file)
+ return;
+
+ for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
+ iv_num++;
+
+ fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
+
+ for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
+ {
+ loop_iv_class_dump (bl, file, verbose);
+ fputc ('\n', file);
+ }
+}
+
+
+static void
+loop_iv_class_dump (bl, file, verbose)
+ const struct iv_class *bl;
+ FILE *file;
+ int verbose ATTRIBUTE_UNUSED;
+{
+ struct induction *v;
+ rtx incr;
+ int i;
+
+ if (! bl || ! file)
+ return;
+
+ fprintf (file, "IV class for reg %d, benefit %d\n",
+ bl->regno, bl->total_benefit);
+
+ fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
+ if (bl->initial_value)
+ {
+ fprintf (file, ", init val: ");
+ print_simple_rtl (file, bl->initial_value);
+ }
+ if (bl->initial_test)
+ {
+ fprintf (file, ", init test: ");
+ print_simple_rtl (file, bl->initial_test);
+ }
+ fputc ('\n', file);
+
+ if (bl->final_value)
+ {
+ fprintf (file, " Final val: ");
+ print_simple_rtl (file, bl->final_value);
+ fputc ('\n', file);
+ }
+
+ if ((incr = biv_total_increment (bl)))
+ {
+ fprintf (file, " Total increment: ");
+ print_simple_rtl (file, incr);
+ fputc ('\n', file);
+ }
+
+ /* List the increments. */
+ for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
+ {
+ fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
+ print_simple_rtl (file, v->add_val);
+ fputc ('\n', file);
+ }
+
+ /* List the givs. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
+ {
+ fprintf (file, " Giv%d: insn %d, benefit %d, ",
+ i, INSN_UID (v->insn), v->benefit);
+ if (v->giv_type == DEST_ADDR)
+ print_simple_rtl (file, v->mem);
+ else
+ print_simple_rtl (file, single_set (v->insn));
+ fputc ('\n', file);
+ }
+}
+
+
static void
loop_biv_dump (v, file, verbose)
const struct induction *v;
void
+debug_ivs (loop)
+ const struct loop *loop;
+{
+ loop_ivs_dump (loop, stderr, 1);
+}
+
+
+void
+debug_iv_class (bl)
+ const struct iv_class *bl;
+{
+ loop_iv_class_dump (bl, stderr, 1);
+}
+
+
+void
debug_biv (v)
const struct induction *v;
{