/* Perform various loop optimizations, including strength reduction.
- Copyright (C) 1987, 88, 89, 91-99, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
+ 1998, 1999, 2000 Free Software Foundation, Inc.
This file is part of GNU CC.
#include "flags.h"
#include "real.h"
#include "loop.h"
+#include "cselib.h"
#include "except.h"
#include "toplev.h"
-/* Information about the current loop being processed used to compute
- the number of loop iterations for loop unrolling and doloop
- optimization. */
-static struct loop_info *current_loop_info;
-
/* Vector mapping INSN_UIDs to luids.
The luids are like uids but increase monotonically always.
We use them to see whether a jump comes from outside a given loop. */
/* Bound on pseudo register number before loop optimization.
A pseudo has valid regscan info if its number is < max_reg_before_loop. */
-int max_reg_before_loop;
+unsigned int max_reg_before_loop;
/* The value to pass to the next call of reg_scan_update. */
static int loop_max_reg;
of any registers used within the LIBCALL. */
int consec; /* Number of consecutive following insns
that must be moved with this one. */
- int regno; /* The register it sets */
+ unsigned int regno; /* The register it sets */
short lifetime; /* lifetime of that register;
may be adjusted when matching movables
that load the same value are found. */
static void mark_loop_jump PARAMS ((rtx, struct loop *));
static void prescan_loop PARAMS ((struct loop *));
static int reg_in_basic_block_p PARAMS ((rtx, rtx));
-static int consec_sets_invariant_p PARAMS ((rtx, int, rtx));
+static int consec_sets_invariant_p PARAMS ((const struct loop *,
+ rtx, int, rtx));
static int labels_in_range_p PARAMS ((rtx, int));
static void count_one_set PARAMS ((rtx, rtx, varray_type, rtx *));
static void count_loop_regs_set PARAMS ((rtx, rtx, varray_type, varray_type,
- int *, int));
+ int *, int));
static void note_addr_stored PARAMS ((rtx, rtx, void *));
static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
static int regs_match_p PARAMS ((rtx, rtx, struct movable *));
static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct movable *));
static void add_label_notes PARAMS ((rtx, rtx));
-static void move_movables PARAMS ((struct movable *, int, int, rtx, rtx, int));
-static int count_nonfixed_reads PARAMS ((rtx));
+static void move_movables PARAMS ((struct loop *loop, struct movable *,
+ int, int, int));
+static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
static void strength_reduce PARAMS ((struct loop *, int, int, int));
static void find_single_use_in_loop PARAMS ((rtx, rtx, varray_type));
static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
-static void find_mem_givs PARAMS ((rtx, rtx, int, int, rtx, rtx));
-static void record_biv PARAMS ((struct induction *, rtx, rtx, rtx, rtx, rtx *, int, int, int));
-static void check_final_value PARAMS ((struct induction *, rtx, rtx,
- unsigned HOST_WIDE_INT));
-static void record_giv PARAMS ((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, int, rtx *, rtx, rtx));
-static void update_giv_derive PARAMS ((rtx));
-static int basic_induction_var PARAMS ((rtx, enum machine_mode, rtx, rtx, int, rtx *, rtx *, rtx **, int *));
-static rtx simplify_giv_expr PARAMS ((rtx, int *));
-static int general_induction_var PARAMS ((rtx, rtx *, rtx *, rtx *, int, int *));
-static int consec_sets_giv PARAMS ((int, rtx, rtx, rtx, rtx *, rtx *, rtx *));
+static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
+static void record_biv PARAMS ((struct induction *, rtx, rtx, rtx, rtx, rtx *,
+ int, int, int));
+static void check_final_value PARAMS ((const struct loop *,
+ struct induction *));
+static void record_giv PARAMS ((const struct loop *, struct induction *,
+ rtx, rtx, rtx, rtx, rtx, int, enum g_types,
+ int, int, rtx *));
+static void update_giv_derive PARAMS ((const struct loop *, rtx));
+static int basic_induction_var PARAMS ((const struct loop *, rtx,
+ enum machine_mode, rtx, rtx,
+ rtx *, rtx *, rtx **, int *));
+static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, int *));
+static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
+ rtx *, rtx *, int, int *));
+static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
+ rtx, rtx, rtx *, rtx *, rtx *));
static int check_dbra_loop PARAMS ((struct loop *, int));
static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
static void combine_givs PARAMS ((struct iv_class *));
struct recombine_givs_stats;
-static int find_life_end PARAMS ((rtx, struct recombine_givs_stats *, rtx, rtx));
-static void recombine_givs PARAMS ((struct iv_class *, rtx, rtx, int));
+static int find_life_end PARAMS ((rtx, struct recombine_givs_stats *,
+ rtx, rtx));
+static void recombine_givs PARAMS ((const struct loop *, struct iv_class *,
+ int));
static int product_cheap_p PARAMS ((rtx, rtx));
-static int maybe_eliminate_biv PARAMS ((struct iv_class *, rtx, rtx, int, int, int));
-static int maybe_eliminate_biv_1 PARAMS ((rtx, rtx, struct iv_class *, int, rtx));
+static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
+ int, int, int));
+static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
+ struct iv_class *, int, rtx));
static int last_use_this_basic_block PARAMS ((rtx, rtx));
static void record_initial PARAMS ((rtx, rtx, void *));
static void update_reg_last_use PARAMS ((rtx, rtx));
static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
static void load_mems_and_recount_loop_regs_set PARAMS ((const struct loop*,
- int *));
+ int *));
static void load_mems PARAMS ((const struct loop *));
static int insert_loop_mem PARAMS ((rtx *, void *));
static int replace_loop_mem PARAMS ((rtx *, void *));
static int replace_loop_reg PARAMS ((rtx *, void *));
static void note_reg_stored PARAMS ((rtx, rtx, void *));
-static void try_copy_prop PARAMS ((const struct loop *, rtx, int));
+static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
static int replace_label PARAMS ((rtx *, void *));
typedef struct rtx_and_int {
/* Allocate and initialize auxiliary loop information. */
loops_info = xcalloc (loops->num, sizeof (struct loop_info));
for (i = 0; i < loops->num; i++)
- loops->array[i].info = loops_info + i;
+ loops->array[i].aux = loops_info + i;
/* Now find all register lifetimes. This must be done after
find_and_verify_loops, because it might reorder the insns in the
if (uid_luid[i] == 0)
uid_luid[i] = uid_luid[i - 1];
- /* If debugging and unrolling loops, we must replicate the tree
- nodes corresponding to the BLOCKs inside the loop, so that the
- original one to one mapping will remain. We sometimes unroll
- loops even when unroll_p is false, so we must always do this when
- debugging. */
- if (write_symbols != NO_DEBUG)
- find_loop_tree_blocks ();
-
/* Determine if the function has indirect jump. On some systems
this prevents low overhead loop instructions from being used. */
indirect_jump_in_function = indirect_jump_in_function_p (f);
scan_loop (loop, unroll_p, bct_p);
}
- /* Replicate the BLOCKs. */
+ /* If there were lexical blocks inside the loop, they have been
+ replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
+ and NOTE_INSN_BLOCK_END for each such block. We must duplicate
+ the BLOCKs as well. */
if (write_symbols != NO_DEBUG)
- unroll_block_trees ();
+ reorder_blocks ();
end_alias_analysis ();
\f
/* Returns the next insn, in execution order, after INSN. START and
END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
- respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
+ respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
insn-stream; it is used with loops that are entered near the
bottom. */
register int i;
rtx loop_start = loop->start;
rtx loop_end = loop->end;
- struct loop_info *loop_info = loop->info;
+ /* Additional information about the current loop being processed
+ that is used to compute the number of loop iterations for loop
+ unrolling and doloop optimization. */
+ struct loop_info *loop_info = LOOP_INFO (loop);
rtx p;
/* 1 if we are scanning insns that could be executed zero times. */
int maybe_never = 0;
int loop_depth = 0;
int nregs;
- current_loop_info = loop_info;
loop->top = 0;
/* Determine whether this loop starts with a jump down to a test at
which was not created by the user and not used in an exit test.
That behavior is incorrect and was removed. */
;
- else if ((tem = invariant_p (src))
+ else if ((tem = loop_invariant_p (loop, src))
&& (dependencies == 0
- || (tem2 = invariant_p (dependencies)) != 0)
+ || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
&& (VARRAY_INT (set_in_loop,
REGNO (SET_DEST (set))) == 1
|| (tem1
= consec_sets_invariant_p
- (SET_DEST (set),
+ (loop, SET_DEST (set),
VARRAY_INT (set_in_loop, REGNO (SET_DEST (set))),
p)))
/* If the insn can cause a trap (such as divide by zero),
m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
m->savemode = VOIDmode;
m->regno = regno;
- /* Set M->cond if either invariant_p or consec_sets_invariant_p
- returned 2 (only conditionally invariant). */
+ /* Set M->cond if either loop_invariant_p
+ or consec_sets_invariant_p returned 2
+ (only conditionally invariant). */
m->cond = ((tem | tem1 | tem2) > 1);
m->global = (uid_luid[REGNO_LAST_UID (regno)]
> INSN_LUID (loop_end)
optimizing for code size. */
if (! optimize_size)
- move_movables (movables, threshold,
- insn_count, loop_start, loop_end, nregs);
+ move_movables (loop, movables, threshold, insn_count, nregs);
/* Now candidates that still are negative are those not moved.
Change set_in_loop to indicate that those are not actually invariant. */
load_mems_and_recount_loop_regs_set (loop, &insn_count);
for (update_start = loop_start;
- PREV_INSN (update_start) && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
+ PREV_INSN (update_start)
+ && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
update_start = PREV_INSN (update_start))
;
update_end = NEXT_INSN (loop_end);
}
}
- /* The "last use" doesn't follow the "first use"?? */
- abort ();
+ /* The "last use" that was recorded can't be found after the first
+ use. This can happen when the last use was deleted while
+ processing an inner loop, this inner loop was then completely
+ unrolled, and the outer loop is always exited after the inner loop,
+ so that everything after the first use becomes a single basic block. */
+ return 1;
}
\f
/* Compute the benefit of eliminating the insns in the block whose
rtx x, y;
struct movable *movables;
{
- int xn = REGNO (x);
- int yn = REGNO (y);
+ unsigned int xn = REGNO (x);
+ unsigned int yn = REGNO (y);
struct movable *mx, *my;
for (mx = movables; mx; mx = mx->next)
other throughout. */
static void
-move_movables (movables, threshold, insn_count, loop_start, end, nregs)
+move_movables (loop, movables, threshold, insn_count, nregs)
+ struct loop *loop;
struct movable *movables;
int threshold;
int insn_count;
- rtx loop_start;
- rtx end;
int nregs;
{
rtx new_start = 0;
register struct movable *m;
register rtx p;
+ rtx loop_start = loop->start;
+ rtx loop_end = loop->end;
/* Map of pseudo-register replacements to handle combining
when we move several insns that load the same value
into different pseudo-registers. */
if (!m->done
&& (! m->cond
- || (1 == invariant_p (m->set_src)
+ || (1 == loop_invariant_p (loop, m->set_src)
&& (m->dependencies == 0
- || 1 == invariant_p (m->dependencies))
+ || 1 == loop_invariant_p (loop, m->dependencies))
&& (m->consec == 0
- || 1 == consec_sets_invariant_p (m->set_dest,
+ || 1 == consec_sets_invariant_p (loop, m->set_dest,
m->consec + 1,
m->insn))))
&& (! m->forces || m->forces->done))
like this as a result of record_jump_cond. */
if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
- && ! invariant_p (XEXP (temp, 0)))
+ && ! loop_invariant_p (loop, XEXP (temp, 0)))
remove_note (i1, temp);
}
We can't use the moved insn because it is out of range
in uid_luid. Only the old insns have luids. */
REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
- if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
- REGNO_LAST_UID (regno) = INSN_UID (end);
+ if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (loop_end))
+ REGNO_LAST_UID (regno) = INSN_UID (loop_end);
/* Combine with this moved insn any other matching movables. */
/* Go through all the instructions in the loop, making
all the register substitutions scheduled in REG_MAP. */
- for (p = new_start; p != end; p = NEXT_INSN (p))
+ for (p = new_start; p != loop_end; p = NEXT_INSN (p))
if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
|| GET_CODE (p) == CALL_INSN)
{
in the rtx X. */
static int
-count_nonfixed_reads (x)
+count_nonfixed_reads (loop, x)
+ const struct loop *loop;
rtx x;
{
register enum rtx_code code;
return 0;
case MEM:
- return ((invariant_p (XEXP (x, 0)) != 1)
- + count_nonfixed_reads (XEXP (x, 0)));
+ return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
+ + count_nonfixed_reads (loop, XEXP (x, 0)));
default:
break;
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- value += count_nonfixed_reads (XEXP (x, i));
- else if (fmt[i] == 'E')
+ value += count_nonfixed_reads (loop, XEXP (x, i));
+ if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
- value += count_nonfixed_reads (XVECEXP (x, i, j));
+ value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
}
}
return value;
#endif
\f
/* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
- `has_call', `has_volatile', and `has_tablejump' within LOOP_INFO.
+ `has_call', `has_volatile', and `has_tablejump' within LOOP.
Set the global variables `unknown_address_altered',
`unknown_constant_address_altered', and `num_mem_sets'. Also, fill
in the array `loop_mems' and the list `loop_store_mems'. */
{
register int level = 1;
rtx insn;
- struct loop_info *loop_info = loop->info;
+ struct loop_info *loop_info = LOOP_INFO (loop);
rtx start = loop->start;
rtx end = loop->end;
/* The label after END. Jumping here is just like falling off the
anything stored in `loop_store_mems'. */
int
-invariant_p (x)
+loop_invariant_p (loop, x)
+ const struct loop *loop;
register rtx x;
{
register int i;
&& ! current_function_has_nonlocal_goto)
return 1;
- if (current_loop_info->has_call
+ if (LOOP_INFO (loop)->has_call
&& REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
return 0;
{
if (fmt[i] == 'e')
{
- int tem = invariant_p (XEXP (x, i));
+ int tem = loop_invariant_p (loop, XEXP (x, i));
if (tem == 0)
return 0;
if (tem == 2)
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
{
- int tem = invariant_p (XVECEXP (x, i, j));
+ int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
if (tem == 0)
return 0;
if (tem == 2)
and that its source is invariant. */
static int
-consec_sets_invariant_p (reg, n_sets, insn)
+consec_sets_invariant_p (loop, reg, n_sets, insn)
+ const struct loop *loop;
int n_sets;
rtx reg, insn;
{
- register rtx p = insn;
- register int regno = REGNO (reg);
+ rtx p = insn;
+ unsigned int regno = REGNO (reg);
rtx temp;
/* Number of sets we have to insist on finding after INSN. */
int count = n_sets - 1;
&& GET_CODE (SET_DEST (set)) == REG
&& REGNO (SET_DEST (set)) == regno)
{
- this = invariant_p (SET_SRC (set));
+ this = loop_invariant_p (loop, SET_SRC (set));
if (this != 0)
value |= this;
else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
notes are OK. */
this = (CONSTANT_P (XEXP (temp, 0))
|| (find_reg_note (p, REG_RETVAL, NULL_RTX)
- && invariant_p (XEXP (temp, 0))));
+ && loop_invariant_p (loop, XEXP (temp, 0))));
if (this != 0)
value |= this;
}
}
VARRAY_INT (set_in_loop, regno) = old;
- /* If invariant_p ever returned 2, we return 2. */
+ /* If loop_invariant_p ever returned 2, we return 2. */
return 1 + (value & 2);
}
&& GET_CODE (SET_DEST (PATTERN (p))) == REG
&& REGNO (SET_DEST (PATTERN (p))) == regno)
{
- if (!invariant_p (SET_SRC (PATTERN (p)), table))
+ if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
return 0;
}
}
free (last_set);
}
\f
-/* Given a loop that is bounded by LOOP_START and LOOP_END
- and that is entered at LOOP_SCAN_START,
- return 1 if the register set in SET contained in insn INSN is used by
- any insn that precedes INSN in cyclic order starting
- from the loop entry point.
+/* Given a loop that is bounded by LOOP->START and LOOP->END and that
+ is entered at LOOP->SCAN_START, return 1 if the register set in SET
+ contained in insn INSN is used by any insn that precedes INSN in
+ cyclic order starting from the loop entry point.
We don't want to use INSN_LUID here because if we restrict INSN to those
that have a valid INSN_LUID, it means we cannot move an invariant out
/* Givs made from biv increments are always splittable for loop unrolling.
Since there is no regscan info for them, we have to keep track of them
separately. */
-int first_increment_giv, last_increment_giv;
+unsigned int first_increment_giv, last_increment_giv;
/* Communication with routines called via `note_stores'. */
valid index in several tables including n_times_set and regno_last_uid.
This does not cause a problem here, because the added registers cannot be
givs outside of their loop, and hence will never be reconsidered.
- But scan_loop must check regnos to make sure they are in bounds.
-
- LOOP_SCAN_START is the first instruction in the loop, as the loop would
- actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
- the first instruction in the loop, as it is layed out in the
- instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG.
- LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
+ But scan_loop must check regnos to make sure they are in bounds. */
static void
strength_reduce (loop, insn_count, unroll_p, bct_p)
int past_loop_latch = 0;
/* Temporary list pointers for traversing loop_iv_list. */
struct iv_class *bl, **backbl;
- struct loop_info *loop_info = loop->info;
+ struct loop_info *loop_info = LOOP_INFO (loop);
/* Ratio of extra register life span we can justify
for saving an instruction. More if loop doesn't call subroutines
since in that case saving an insn makes more difference
/* If loop_scan_start points to the loop exit test, we have to be wary of
subversive use of gotos inside expression statements. */
if (prev_nonnote_insn (loop_scan_start) != prev_nonnote_insn (loop_start))
- maybe_multiple = back_branch_in_range_p (loop_scan_start, loop_start, loop_end);
+ maybe_multiple = back_branch_in_range_p (loop, loop_scan_start);
VARRAY_INT_INIT (reg_iv_type, max_reg_before_loop, "reg_iv_type");
VARRAY_GENERIC_PTR_INIT (reg_iv_info, max_reg_before_loop, "reg_iv_info");
{
int multi_insn_incr = 0;
- if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
- dest_reg, p, loop->level,
- &inc_val, &mult_val,
+ if (basic_induction_var (loop, SET_SRC (set),
+ GET_MODE (SET_SRC (set)),
+ dest_reg, p, &inc_val, &mult_val,
&location, &multi_insn_incr))
{
/* It is a possible basic induction variable.
matter. Check to see if the target of this branch is on the
loop->exits_labels list. */
- for (label = uid_loop[INSN_UID (loop_start)]->exit_labels;
- label;
- label = LABEL_NEXTREF (label))
+ for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
if (XEXP (label, 0) == JUMP_LABEL (p))
break;
if (GET_CODE (p) == JUMP_INSN
&& JUMP_LABEL (p) != 0
&& next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
- && (test = get_condition_for_loop (p)) != 0
+ && (test = get_condition_for_loop (loop, p)) != 0
&& GET_CODE (XEXP (test, 0)) == REG
&& REGNO (XEXP (test, 0)) < max_reg_before_loop
&& (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
&& GET_CODE (src) == PLUS
&& GET_CODE (XEXP (src, 0)) == REG
&& CONSTANT_P (XEXP (src, 1))
- && ((increment = biv_total_increment (bl, loop_start, loop_end))
- != NULL_RTX))
+ && ((increment = biv_total_increment (bl)) != NULL_RTX))
{
- int regno = REGNO (XEXP (src, 0));
+ unsigned int regno = REGNO (XEXP (src, 0));
for (bl2 = loop_iv_list; bl2; bl2 = bl2->next)
if (bl2->regno == regno)
/* Now, can we transform this biv into a giv? */
if (bl2
&& bl2->biv_count == 1
- && rtx_equal_p (increment,
- biv_total_increment (bl2, loop_start, loop_end))
+ && rtx_equal_p (increment, biv_total_increment (bl2))
/* init_insn is only set to insns that are before loop_start
without any intervening labels. */
&& ! reg_set_between_p (bl2->biv->src_reg,
&SET_SRC (single_set (bl->biv->insn)),
copy_rtx (src), 0))
{
- rtx dominator = uid_loop[INSN_UID (loop_start)]->cont_dominator;
+ rtx dominator = loop->cont_dominator;
rtx giv = bl->biv->src_reg;
rtx giv_insn = bl->biv->insn;
rtx after_giv = NEXT_INSN (giv_insn);
markers. */
if (n_extra_increment && ! loop_info->has_volatile)
{
- int nregs = first_increment_giv + n_extra_increment;
+ unsigned int nregs = first_increment_giv + n_extra_increment;
/* Reallocate reg_iv_type and reg_iv_info. */
VARRAY_GROW (reg_iv_type, nregs);
continue;
if (/* SET_SRC is a giv. */
- (general_induction_var (SET_SRC (set), &src_reg, &add_val,
+ (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
&mult_val, 0, &benefit)
/* Equivalent expression is a giv. */
|| ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
- && general_induction_var (XEXP (regnote, 0), &src_reg,
+ && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
&add_val, &mult_val, 0,
&benefit)))
/* Don't try to handle any regs made by loop optimization.
/* This must be the only place where the register is set. */
&& (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1
/* or all sets must be consecutive and make a giv. */
- || (benefit = consec_sets_giv (benefit, p,
+ || (benefit = consec_sets_giv (loop, benefit, p,
src_reg, dest_reg,
&add_val, &mult_val,
&last_consec_insn))))
if (VARRAY_INT (n_times_set, REGNO (dest_reg)) != 1)
p = last_consec_insn;
- record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
- DEST_REG, not_every_iteration, maybe_multiple,
- NULL_PTR, loop_start, loop_end);
+ record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
+ benefit, DEST_REG, not_every_iteration,
+ maybe_multiple, NULL_PTR);
}
}
/* This resulted in worse code on a VAX 8600. I wonder if it
still does. */
if (GET_CODE (p) == INSN)
- find_mem_givs (PATTERN (p), p, not_every_iteration, maybe_multiple,
- loop_start, loop_end);
+ find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
+ maybe_multiple);
#endif
/* Update the status of whether giv can derive other givs. This can
change when we pass a label or an insn that updates a biv. */
if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
|| GET_CODE (p) == CODE_LABEL)
- update_giv_derive (p);
+ update_giv_derive (loop, p);
/* Past CODE_LABEL, we get to insns that may be executed multiple
times. The only way we can be sure that they can't is if every
matter. Check to see if the target of this branch is on the
loop->exits_labels list. */
- for (label = uid_loop[INSN_UID (loop_start)]->exit_labels;
- label;
- label = LABEL_NEXTREF (label))
+ for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
if (XEXP (label, 0) == JUMP_LABEL (p))
break;
for (v = bl->giv; v; v = v->next_iv)
if (! v->replaceable && ! v->not_replaceable)
- check_final_value (v, loop_start, loop_end, loop_info->n_iterations);
+ check_final_value (loop, v);
}
/* Try to prove that the loop counter variable (if any) is always
&& ! bl->nonneg
#endif
&& ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
- || ((final_value = final_biv_value (bl, loop_start, loop_end,
- loop_info->n_iterations))
+ || ((final_value = final_biv_value (loop, bl))
#ifdef HAVE_decrement_and_branch_until_zero
&& ! bl->nonneg
#endif
))
- bl->eliminable = maybe_eliminate_biv (bl, loop_start, loop_end, 0,
- threshold, insn_count);
+ bl->eliminable = maybe_eliminate_biv (loop, bl, 0, threshold,
+ insn_count);
else
{
if (loop_dump_stream)
VARRAY_GROW (reg_iv_type, nregs);
VARRAY_GROW (reg_iv_info, nregs);
}
- recombine_givs (bl, loop_start, loop_end, unroll_p);
+ recombine_givs (loop, bl, unroll_p);
/* Reduce each giv that we decided to reduce. */
loop to ensure that it will always be executed no matter
how the loop exits. Otherwise, emit the insn after the loop,
since this is slightly more efficient. */
- if (uid_loop[INSN_UID (loop_start)]->exit_count)
+ if (loop->exit_count)
insert_before = loop_start;
else
insert_before = end_insert_before;
doing so in the rare cases where it can occur. */
if (all_reduced == 1 && bl->eliminable
- && maybe_eliminate_biv (bl, loop_start, loop_end, 1,
- threshold, insn_count))
-
+ && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
{
/* ?? If we created a new test to bypass the loop entirely,
or otherwise drop straight in, based on this test, then
loop to ensure that it will always be executed no matter
how the loop exits. Otherwise, emit the insn after the
loop, since this is slightly more efficient. */
- if (uid_loop[INSN_UID (loop_start)]->exit_count)
+ if (loop->exit_count)
insert_before = loop_start;
else
insert_before = end_insert_before;
more thanonce in each loop iteration. */
static void
-find_mem_givs (x, insn, not_every_iteration, maybe_multiple, loop_start,
- loop_end)
+find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
+ const struct loop *loop;
rtx x;
rtx insn;
int not_every_iteration, maybe_multiple;
- rtx loop_start, loop_end;
{
register int i, j;
register enum rtx_code code;
it comes time to combine a set of related DEST_ADDR GIVs, since
this one would not be seen. */
- if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
+ if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
&mult_val, 1, &benefit))
{
/* Found one; record it. */
struct induction *v
= (struct induction *) oballoc (sizeof (struct induction));
- record_giv (v, insn, src_reg, addr_placeholder, mult_val,
+ record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
add_val, benefit, DEST_ADDR, not_every_iteration,
- maybe_multiple, &XEXP (x, 0), loop_start, loop_end);
+ maybe_multiple, &XEXP (x, 0));
v->mem_mode = GET_MODE (x);
}
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
if (fmt[i] == 'e')
- find_mem_givs (XEXP (x, i), insn, not_every_iteration, maybe_multiple,
- loop_start, loop_end);
+ find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
+ maybe_multiple);
else if (fmt[i] == 'E')
for (j = 0; j < XVECLEN (x, i); j++)
- find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
- maybe_multiple, loop_start, loop_end);
+ find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
+ maybe_multiple);
}
\f
/* Fill in the data about one biv update.
LOCATION points to the place where this giv's value appears in INSN. */
static void
-record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
- type, not_every_iteration, maybe_multiple, location, loop_start,
- loop_end)
+record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
+ type, not_every_iteration, maybe_multiple, location)
+ const struct loop *loop;
struct induction *v;
rtx insn;
rtx src_reg;
enum g_types type;
int not_every_iteration, maybe_multiple;
rtx *location;
- rtx loop_start, loop_end;
{
struct induction *b;
struct iv_class *bl;
if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
/* Previous line always fails if INSN was moved by loop opt. */
- && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
+ && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
+ < INSN_LUID (loop->end)
&& (! not_every_iteration
|| last_use_this_basic_block (dest_reg, insn)))
{
biv update to before it, then this giv is not replaceable. */
if (v->replaceable)
for (b = bl->biv; b; b = b->next_iv)
- if (back_branch_in_range_p (b->insn, loop_start, loop_end))
+ if (back_branch_in_range_p (loop, b->insn))
{
v->replaceable = 0;
v->not_replaceable = 1;
have been identified. */
static void
-check_final_value (v, loop_start, loop_end, n_iterations)
+check_final_value (loop, v)
+ const struct loop *loop;
struct induction *v;
- rtx loop_start, loop_end;
- unsigned HOST_WIDE_INT n_iterations;
{
struct iv_class *bl;
rtx final_value = 0;
v->replaceable = 0;
#endif
- if ((final_value = final_giv_value (v, loop_start, loop_end, n_iterations))
+ if ((final_value = final_giv_value (loop, v))
&& (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
{
int biv_increment_seen = 0;
while (1)
{
p = NEXT_INSN (p);
- if (p == loop_end)
- p = NEXT_INSN (loop_start);
+ if (p == loop->end)
+ p = NEXT_INSN (loop->start);
if (p == v->insn)
break;
while (1)
{
p = NEXT_INSN (p);
- if (p == loop_end)
- p = NEXT_INSN (loop_start);
+ if (p == loop->end)
+ p = NEXT_INSN (loop->start);
if (p == last_giv_use)
break;
if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
&& LABEL_NAME (JUMP_LABEL (p))
&& ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
- && loop_insn_first_p (loop_start, JUMP_LABEL (p)))
+ && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
|| (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
- && loop_insn_first_p (JUMP_LABEL (p), loop_end))))
+ && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
{
v->replaceable = 0;
v->not_replaceable = 1;
The cases we look at are when a label or an update to a biv is passed. */
static void
-update_giv_derive (p)
+update_giv_derive (loop, p)
+ const struct loop *loop;
rtx p;
{
struct iv_class *bl;
tem = 0;
if (biv->mult_val == const1_rtx)
- tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
+ tem = simplify_giv_expr (loop,
+ gen_rtx_MULT (giv->mode,
biv->add_val,
giv->mult_val),
&dummy);
if (tem && giv->derive_adjustment)
tem = simplify_giv_expr
- (gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
+ (loop,
+ gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
&dummy);
if (tem)
If we cannot find a biv, we return 0. */
static int
-basic_induction_var (x, mode, dest_reg, p, level, inc_val, mult_val,
+basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val,
location, multi_insn_incr)
+ const struct loop *loop;
register rtx x;
enum machine_mode mode;
rtx dest_reg;
rtx p;
- int level;
rtx *inc_val;
rtx *mult_val;
rtx **location;
return 0;
arg = *argp;
- if (invariant_p (arg) != 1)
+ if (loop_invariant_p (loop, arg) != 1)
return 0;
*inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
/* If this is a SUBREG for a promoted variable, check the inner
value. */
if (SUBREG_PROMOTED_VAR_P (x))
- return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
- dest_reg, p, level,
- inc_val, mult_val, location,
+ return basic_induction_var (loop, SUBREG_REG (x),
+ GET_MODE (SUBREG_REG (x)),
+ dest_reg, p, inc_val, mult_val, location,
multi_insn_incr);
return 0;
&& (GET_MODE_CLASS (GET_MODE (SET_DEST (set)))
== MODE_INT)
&& SUBREG_REG (SET_DEST (set)) == x))
- && basic_induction_var (SET_SRC (set),
+ && basic_induction_var (loop, SET_SRC (set),
(GET_MODE (SET_SRC (set)) == VOIDmode
? GET_MODE (x)
: GET_MODE (SET_SRC (set))),
- dest_reg, insn, level,
+ dest_reg, insn,
inc_val, mult_val, location,
multi_insn_incr))
{
as a biv of the outer loop,
causing code to be moved INTO the inner loop. */
case MEM:
- if (invariant_p (x) != 1)
+ if (loop_invariant_p (loop, x) != 1)
return 0;
case CONST_INT:
case SYMBOL_REF:
/* convert_modes aborts if we try to convert to or from CCmode, so just
exclude that case. It is very unlikely that a condition code value
would be a useful iterator anyways. */
- if (level == 0
+ if (loop->level == 1
&& GET_MODE_CLASS (mode) != MODE_CC
&& GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
{
return 0;
case SIGN_EXTEND:
- return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
- dest_reg, p, level, inc_val, mult_val,
- location, multi_insn_incr);
+ return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
+ dest_reg, p, inc_val, mult_val, location,
+ multi_insn_incr);
case ASHIFTRT:
/* Similar, since this can be a sign extension. */
&& INTVAL (XEXP (x, 1)) >= 0
&& GET_CODE (SET_SRC (set)) == ASHIFT
&& XEXP (x, 1) == XEXP (SET_SRC (set), 1)
- && basic_induction_var (XEXP (SET_SRC (set), 0),
+ && basic_induction_var (loop, XEXP (SET_SRC (set), 0),
GET_MODE (XEXP (x, 0)),
- dest_reg, insn, level, inc_val, mult_val,
+ dest_reg, insn, inc_val, mult_val,
location, multi_insn_incr))
{
*multi_insn_incr = 1;
such that the value of X is biv * mult + add; */
static int
-general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
+general_induction_var (loop, x, src_reg, add_val, mult_val, is_addr, pbenefit)
+ const struct loop *loop;
rtx x;
rtx *src_reg;
rtx *add_val;
char *storage;
/* If this is an invariant, forget it, it isn't a giv. */
- if (invariant_p (x) == 1)
+ if (loop_invariant_p (loop, x) == 1)
return 0;
/* See if the expression could be a giv and get its form.
Mark our place on the obstack in case we don't find a giv. */
storage = (char *) oballoc (0);
*pbenefit = 0;
- x = simplify_giv_expr (x, pbenefit);
+ x = simplify_giv_expr (loop, x, pbenefit);
if (x == 0)
{
obfree (storage);
static int cmp_recombine_givs_stats PARAMS ((const PTR, const PTR));
static rtx
-simplify_giv_expr (x, benefit)
+simplify_giv_expr (loop, x, benefit)
+ const struct loop *loop;
rtx x;
int *benefit;
{
switch (GET_CODE (x))
{
case PLUS:
- arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
- arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
+ arg0 = simplify_giv_expr (loop, XEXP (x, 0), benefit);
+ arg1 = simplify_giv_expr (loop, XEXP (x, 1), benefit);
if (arg0 == 0 || arg1 == 0)
return NULL_RTX;
case PLUS:
/* (a + invar_1) + invar_2. Associate. */
return
- simplify_giv_expr (gen_rtx_PLUS (mode,
+ simplify_giv_expr (loop,
+ gen_rtx_PLUS (mode,
XEXP (arg0, 0),
gen_rtx_PLUS (mode,
XEXP (arg0, 1),
if (GET_CODE (arg1) == PLUS)
return
- simplify_giv_expr (gen_rtx_PLUS (mode,
+ simplify_giv_expr (loop,
+ gen_rtx_PLUS (mode,
gen_rtx_PLUS (mode, arg0,
XEXP (arg1, 0)),
XEXP (arg1, 1)),
if (!rtx_equal_p (arg0, arg1))
return NULL_RTX;
- return simplify_giv_expr (gen_rtx_MULT (mode,
+ return simplify_giv_expr (loop,
+ gen_rtx_MULT (mode,
XEXP (arg0, 0),
gen_rtx_PLUS (mode,
XEXP (arg0, 1),
case MINUS:
/* Handle "a - b" as "a + b * (-1)". */
- return simplify_giv_expr (gen_rtx_PLUS (mode,
+ return simplify_giv_expr (loop,
+ gen_rtx_PLUS (mode,
XEXP (x, 0),
gen_rtx_MULT (mode,
XEXP (x, 1),
benefit);
case MULT:
- arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
- arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
+ arg0 = simplify_giv_expr (loop, XEXP (x, 0), benefit);
+ arg1 = simplify_giv_expr (loop, XEXP (x, 1), benefit);
if (arg0 == 0 || arg1 == 0)
return NULL_RTX;
case MULT:
/* (a * invar_1) * invar_2. Associate. */
- return simplify_giv_expr (gen_rtx_MULT (mode,
+ return simplify_giv_expr (loop,
+ gen_rtx_MULT (mode,
XEXP (arg0, 0),
gen_rtx_MULT (mode,
XEXP (arg0, 1),
case PLUS:
/* (a + invar_1) * invar_2. Distribute. */
- return simplify_giv_expr (gen_rtx_PLUS (mode,
+ return simplify_giv_expr (loop,
+ gen_rtx_PLUS (mode,
gen_rtx_MULT (mode,
XEXP (arg0, 0),
arg1),
return 0;
return
- simplify_giv_expr (gen_rtx_MULT (mode,
+ simplify_giv_expr (loop,
+ gen_rtx_MULT (mode,
XEXP (x, 0),
GEN_INT ((HOST_WIDE_INT) 1
<< INTVAL (XEXP (x, 1)))),
case NEG:
/* "-a" is "a * (-1)" */
- return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
+ return simplify_giv_expr (loop,
+ gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
benefit);
case NOT:
/* "~a" is "-a - 1". Silly, but easy. */
- return simplify_giv_expr (gen_rtx_MINUS (mode,
+ return simplify_giv_expr (loop,
+ gen_rtx_MINUS (mode,
gen_rtx_NEG (mode, XEXP (x, 0)),
const1_rtx),
benefit);
if (v->derive_adjustment)
tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
- return simplify_giv_expr (tem, benefit);
+ return simplify_giv_expr (loop, tem, benefit);
}
default:
/* If it isn't an induction variable, and it is invariant, we
may be able to simplify things further by looking through
the bits we just moved outside the loop. */
- if (invariant_p (x) == 1)
+ if (loop_invariant_p (loop, x) == 1)
{
struct movable *m;
/* If we match another movable, we must use that, as
this one is going away. */
if (m->match)
- return simplify_giv_expr (m->match->set_dest, benefit);
+ return simplify_giv_expr (loop, m->match->set_dest,
+ benefit);
/* If consec is non-zero, this is a member of a group of
instructions that were moved together. We handle this
|| GET_CODE (tem) == CONST_INT
|| GET_CODE (tem) == SYMBOL_REF)
{
- tem = simplify_giv_expr (tem, benefit);
+ tem = simplify_giv_expr (loop, tem, benefit);
if (tem)
return tem;
}
&& GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
&& GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
{
- tem = simplify_giv_expr (XEXP (tem, 0), benefit);
+ tem = simplify_giv_expr (loop, XEXP (tem, 0),
+ benefit);
if (tem)
return tem;
}
if (GET_CODE (x) == USE)
x = XEXP (x, 0);
- if (invariant_p (x) == 1)
+ if (loop_invariant_p (loop, x) == 1)
{
if (GET_CODE (x) == CONST_INT)
return x;
*MULT_VAL and *ADD_VAL. */
static int
-consec_sets_giv (first_benefit, p, src_reg, dest_reg,
+consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
add_val, mult_val, last_consec_insn)
+ const struct loop *loop;
int first_benefit;
rtx p;
rtx src_reg;
&& (set = single_set (p))
&& GET_CODE (SET_DEST (set)) == REG
&& SET_DEST (set) == dest_reg
- && (general_induction_var (SET_SRC (set), &src_reg,
+ && (general_induction_var (loop, SET_SRC (set), &src_reg,
add_val, mult_val, 0, &benefit)
/* Giv created by equivalent expression. */
|| ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
- && general_induction_var (XEXP (temp, 0), &src_reg,
+ && general_induction_var (loop, XEXP (temp, 0), &src_reg,
add_val, mult_val, 0, &benefit)))
&& src_reg == v->src_reg)
{
This tends to shorten giv lifetimes, and helps the next step:
try to derive givs from other givs. */
static void
-recombine_givs (bl, loop_start, loop_end, unroll_p)
+recombine_givs (loop, bl, unroll_p)
+ const struct loop *loop;
struct iv_class *bl;
- rtx loop_start, loop_end;
int unroll_p;
{
struct induction *v, **giv_array, *last_giv;
else
{
stats[i].end_luid = uid_luid[REGNO_LAST_UID (regno)];
- if (stats[i].end_luid > INSN_LUID (loop_end))
+ if (stats[i].end_luid > INSN_LUID (loop->end))
{
stats[i].end_luid = -1;
ends_need_computing++;
if (ends_need_computing)
{
rtx biv = bl->biv->src_reg;
- rtx p = loop_end;
+ rtx p = loop->end;
do
{
- if (p == loop_start)
- p = loop_end;
+ if (p == loop->start)
+ p = loop->end;
p = PREV_INSN (p);
if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
continue;
derived giv would defeat the purpose of reducing register
pressure.
??? We could arrange to move the insn. */
- && ((unsigned) stats[i].end_luid - INSN_LUID (loop_start)
- > (unsigned) stats[i].start_luid - INSN_LUID (loop_start))
+ && ((unsigned) stats[i].end_luid - INSN_LUID (loop->start)
+ > (unsigned) stats[i].start_luid - INSN_LUID (loop->start))
&& rtx_equal_p (last_giv->mult_val, v->mult_val)
/* ??? Could handle libcalls, but would need more logic. */
&& ! find_reg_note (v->insn, REG_RETVAL, NULL_RTX)
int compare_and_branch;
rtx loop_start = loop->start;
rtx loop_end = loop->end;
- struct loop_info *loop_info = loop->info;
+ struct loop_info *loop_info = LOOP_INFO (loop);
/* If last insn is a conditional branch, and the insn before tests a
register value, try to optimize it. Otherwise, we can't do anything. */
jump = PREV_INSN (loop_end);
- comparison = get_condition_for_loop (jump);
+ comparison = get_condition_for_loop (loop, jump);
if (comparison == 0)
return 0;
if (GET_CODE (p) != JUMP_INSN)
continue;
- before_comparison = get_condition_for_loop (p);
+ before_comparison = get_condition_for_loop (loop, p);
if (before_comparison
&& XEXP (before_comparison, 0) == bl->biv->dest_reg
&& GET_CODE (before_comparison) == LT
which is reversible. */
int reversible_mem_store = 1;
- if (bl->giv_count == 0
- && ! uid_loop[INSN_UID (loop_start)]->exit_count)
+ if (bl->giv_count == 0 && ! loop->exit_count)
{
rtx bivreg = regno_reg_rtx[bl->regno];
{
for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
- num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
+ num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
/* If the loop has a single store, and the destination address is
invariant, then we can't reverse the loop, because this address
reversible_mem_store
= (! unknown_address_altered
&& ! unknown_constant_address_altered
- && ! invariant_p (XEXP (XEXP (loop_store_mems, 0), 0)));
+ && ! loop_invariant_p (loop,
+ XEXP (XEXP (loop_store_mems, 0),
+ 0)));
/* If the store depends on a register that is set after the
store, it depends on the initial value, and is thus not
??? If the insns which initialize the comparison value as
a whole compute an invariant result, then we could move
them out of the loop and proceed with loop reversal. */
- if (!invariant_p (comparison_value))
+ if (! loop_invariant_p (loop, comparison_value))
return 0;
if (GET_CODE (comparison_value) == CONST_INT)
\f
/* Verify whether the biv BL appears to be eliminable,
based on the insns in the loop that refer to it.
- LOOP_START is the first insn of the loop, and END is the end insn.
If ELIMINATE_P is non-zero, actually do the elimination.
start of the loop. */
static int
-maybe_eliminate_biv (bl, loop_start, loop_end, eliminate_p, threshold,
- insn_count)
+maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
+ const struct loop *loop;
struct iv_class *bl;
- rtx loop_start;
- rtx loop_end;
int eliminate_p;
int threshold, insn_count;
{
rtx reg = bl->biv->dest_reg;
+ rtx loop_start = loop->start;
+ rtx loop_end = loop->end;
rtx p;
/* Scan all insns in the loop, stopping if we find one that uses the
if (set && GET_CODE (SET_DEST (set)) == REG)
{
- int regno = REGNO (SET_DEST (set));
+ unsigned int regno = REGNO (SET_DEST (set));
if (regno < max_reg_before_loop
&& REG_IV_TYPE (regno) == GENERAL_INDUCT
}
if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
&& reg_mentioned_p (reg, PATTERN (p))
- && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
+ && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
+ eliminate_p, where))
{
if (loop_dump_stream)
fprintf (loop_dump_stream,
the loop. */
static int
-maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
+maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where)
+ const struct loop *loop;
rtx x, insn;
struct iv_class *bl;
int eliminate_p;
}
else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
{
- if (invariant_p (arg) == 1)
+ if (loop_invariant_p (loop, arg) == 1)
{
/* Look for giv with constant positive mult_val and nonconst
add_val. Insert insns to compute new compare value.
switch (fmt[i])
{
case 'e':
- if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
+ if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
eliminate_p, where))
return 0;
break;
case 'E':
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
+ if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
eliminate_p, where))
return 0;
break;
}
}
\f
-/* Given a jump insn JUMP, return the condition that will cause it to branch
- to its JUMP_LABEL. If the condition cannot be understood, or is an
- inequality floating-point comparison which needs to be reversed, 0 will
- be returned.
-
- If EARLIEST is non-zero, it is a pointer to a place where the earliest
- insn used in locating the condition was found. If a replacement test
- of the condition is desired, it should be placed in front of that
- insn and we will be sure that the inputs are still valid.
-
- The condition will be returned in a canonical form to simplify testing by
- callers. Specifically:
+/* Given an insn INSN and condition COND, return the condition in a
+ canonical form to simplify testing by callers. Specifically:
(1) The code will always be a comparison operation (EQ, NE, GT, etc.).
(2) Both operands will be machine operands; (cc0) will have been replaced.
(3) If an operand is a constant, it will be the second operand.
(4) (LE x const) will be replaced with (LT x <const+1>) and similarly
- for GE, GEU, and LEU. */
+ for GE, GEU, and LEU.
+
+ If the condition cannot be understood, or is an inequality floating-point
+ comparison which needs to be reversed, 0 will be returned.
+
+ If REVERSE is non-zero, then reverse the condition prior to canonizing it.
+
+ If EARLIEST is non-zero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid. */
rtx
-get_condition (jump, earliest)
- rtx jump;
+canonicalize_condition (insn, cond, reverse, earliest)
+ rtx insn;
+ rtx cond;
+ int reverse;
rtx *earliest;
{
enum rtx_code code;
- rtx prev = jump;
+ rtx prev = insn;
rtx set;
rtx tem;
rtx op0, op1;
int did_reverse_condition = 0;
enum machine_mode mode;
- /* If this is not a standard conditional jump, we can't parse it. */
- if (GET_CODE (jump) != JUMP_INSN
- || ! condjump_p (jump) || simplejump_p (jump))
- return 0;
+ code = GET_CODE (cond);
+ mode = GET_MODE (cond);
+ op0 = XEXP (cond, 0);
+ op1 = XEXP (cond, 1);
- code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
- mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
- op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
- op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
+ if (reverse)
+ {
+ code = reverse_condition (code);
+ did_reverse_condition ^= 1;
+ }
if (earliest)
- *earliest = jump;
-
- /* If this branches to JUMP_LABEL when the condition is false, reverse
- the condition. */
- if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
- && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
- code = reverse_condition (code), did_reverse_condition ^= 1;
+ *earliest = insn;
/* If we are comparing a register with zero, see if the register is set
in the previous insn to a COMPARE or a comparison operation. Perform
if (reverse_code)
{
code = reverse_condition (code);
+ if (code == UNKNOWN)
+ return 0;
did_reverse_condition ^= 1;
reverse_code = 0;
}
}
/* If this was floating-point and we reversed anything other than an
- EQ or NE, return zero. */
+ EQ or NE or (UN)ORDERED, return zero. */
if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
- && did_reverse_condition && code != NE && code != EQ
+ && did_reverse_condition
+ && code != NE && code != EQ && code != UNORDERED && code != ORDERED
&& ! flag_fast_math
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
return 0;
return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
}
+/* Given a jump insn JUMP, return the condition that will cause it to branch
+ to its JUMP_LABEL. If the condition cannot be understood, or is an
+ inequality floating-point comparison which needs to be reversed, 0 will
+ be returned.
+
+ If EARLIEST is non-zero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid. */
+
+rtx
+get_condition (jump, earliest)
+ rtx jump;
+ rtx *earliest;
+{
+ rtx cond;
+ int reverse;
+
+ /* If this is not a standard conditional jump, we can't parse it. */
+ if (GET_CODE (jump) != JUMP_INSN
+ || ! condjump_p (jump) || simplejump_p (jump))
+ return 0;
+
+ cond = XEXP (SET_SRC (PATTERN (jump)), 0);
+
+ /* If this branches to JUMP_LABEL when the condition is false, reverse
+ the condition. */
+ reverse
+ = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
+ && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump);
+
+ return canonicalize_condition (jump, cond, reverse, earliest);
+}
+
/* Similar to above routine, except that we also put an invariant last
unless both operands are invariants. */
rtx
-get_condition_for_loop (x)
+get_condition_for_loop (loop, x)
+ const struct loop *loop;
rtx x;
{
rtx comparison = get_condition (x, NULL_PTR);
if (comparison == 0
- || ! invariant_p (XEXP (comparison, 0))
- || invariant_p (XEXP (comparison, 1)))
+ || ! loop_invariant_p (loop, XEXP (comparison, 0))
+ || loop_invariant_p (loop, XEXP (comparison, 1)))
return comparison;
return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
unsigned HOST_WIDE_INT n_iterations;
rtx loop_start = loop->start;
rtx loop_end = loop->end;
- struct loop_info *loop_info = loop->info;
+ struct loop_info *loop_info = LOOP_INFO (loop);
int loop_num = loop->num;
#if 0
/* Mark all enclosing loops that they cannot use count register. */
for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
{
- outer_loop_info = outer_loop->info;
+ outer_loop_info = LOOP_INFO (outer_loop);
outer_loop_info->used_count_register = 1;
}
instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
if (loop_mems_idx == 0)
return;
+ /* Find start of the extended basic block that enters the loop. */
+ for (p = loop->start;
+ PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
+ p = PREV_INSN (p))
+ ;
+
+ cselib_init ();
+
+ /* Build table of mems that get set to constant values before the
+ loop. */
+ for (; p != loop->start; p = NEXT_INSN (p))
+ cselib_process_insn (p);
+
/* Check to see if it's possible that some instructions in the
loop are never executed. */
for (p = next_insn_in_loop (loop, loop->scan_start);
rtx mem_list_entry;
if (MEM_VOLATILE_P (mem)
- || invariant_p (XEXP (mem, 0)) != 1)
+ || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
/* There's no telling whether or not MEM is modified. */
loop_mems[i].optimize = 0;
loop_mems[i].optimize = 0;
else
{
- int j;
+ /* Load the memory immediately before LOOP->START, which is
+ the NOTE_LOOP_BEG. */
+ cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
rtx set;
+ rtx best = mem;
+ int j;
+ struct elt_loc_list *const_equiv = 0;
- /* Load the memory immediately before START, which is
- the NOTE_LOOP_BEG. */
- set = gen_move_insn (reg, mem);
- emit_insn_before (set, loop->start);
+ if (e)
+ {
+ struct elt_loc_list *equiv;
+ struct elt_loc_list *best_equiv = 0;
+ for (equiv = e->locs; equiv; equiv = equiv->next)
+ {
+ if (CONSTANT_P (equiv->loc))
+ const_equiv = equiv;
+ else if (GET_CODE (equiv->loc) == REG)
+ best_equiv = equiv;
+ }
+ /* Use the constant equivalence if that is cheap enough. */
+ if (! best_equiv)
+ best_equiv = const_equiv;
+ else if (const_equiv
+ && (rtx_cost (const_equiv->loc, SET)
+ <= rtx_cost (best_equiv->loc, SET)))
+ {
+ best_equiv = const_equiv;
+ const_equiv = 0;
+ }
+
+ /* If best_equiv is nonzero, we know that MEM is set to a
+ constant or register before the loop. We will use this
+ knowledge to initialize the shadow register with that
+ constant or reg rather than by loading from MEM. */
+ if (best_equiv)
+ best = copy_rtx (best_equiv->loc);
+ }
+ set = gen_move_insn (reg, best);
+ set = emit_insn_before (set, loop->start);
+ if (const_equiv)
+ REG_NOTES (set) = gen_rtx_EXPR_LIST (REG_EQUAL,
+ copy_rtx (const_equiv->loc),
+ REG_NOTES (set));
if (written)
{
JUMP_LABEL (p) = label;
}
}
+
+ cselib_finish ();
}
/* For communication between note_reg_stored and its caller. */
/* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
There must be exactly one insn that sets this pseudo; it will be
deleted if all replacements succeed and we can prove that the register
- is not used after the loop.
- The arguments SCAN_START, LOOP_TOP and END are as in load_mems. */
+ is not used after the loop. */
+
static void
try_copy_prop (loop, replacement, regno)
const struct loop *loop;
rtx replacement;
- int regno;
+ unsigned int regno;
{
/* This is the reg that we are copying from. */
rtx reg_rtx = regno_reg_rtx[regno];