/* Perform various loop optimizations, including strength reduction.
Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
- 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
This file is part of GCC.
/* This is the loop optimization pass of the compiler.
It finds invariant computations within loops and moves them
to the beginning of the loop. Then it identifies basic and
- general induction variables. Strength reduction is applied to the general
- induction variables, and induction variable elimination is applied to
+ general induction variables.
+
+ Basic induction variables (BIVs) are a pseudo registers which are set within
+ a loop only by incrementing or decrementing its value. General induction
+ variables (GIVs) are pseudo registers with a value which is a linear function
+ of a basic induction variable. BIVs are recognized by `basic_induction_var';
+ GIVs by `general_induction_var'.
+
+ Once induction variables are identified, strength reduction is applied to the
+ general induction variables, and induction variable elimination is applied to
the basic induction variables.
It also finds cases where
#include "insn-flags.h"
#include "optabs.h"
#include "cfgloop.h"
+#include "ggc.h"
/* Not really meaningful values, but at least something. */
#ifndef SIMULTANEOUS_PREFETCHES
#define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
((REGNO) < FIRST_PSEUDO_REGISTER \
- ? (int) HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
+ ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
/* Vector mapping INSN_UIDs to luids.
static void note_addr_stored (rtx, rtx, void *);
static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
+static rtx find_regs_nested (rtx, rtx);
static void scan_loop (struct loop*, int);
#if 0
static void replace_call_address (rtx, rtx, rtx);
#endif
static rtx skip_consec_insns (rtx, int);
static int libcall_benefit (rtx);
+static rtx libcall_other_reg (rtx, rtx);
+static void record_excess_regs (rtx, rtx, rtx *);
static void ignore_some_movables (struct loop_movables *);
static void force_movables (struct loop_movables *);
static void combine_movables (struct loop_movables *, struct loop_regs *);
rtx, rtx, rtx, rtx, int, enum g_types, int, int,
rtx *);
static void update_giv_derive (const struct loop *, rtx);
-static void check_ext_dependent_givs (struct iv_class *, struct loop_info *);
+static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
rtx, rtx, rtx *, rtx *, rtx **);
static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
continue;
/* Don't assign luids to line-number NOTEs, so that the distance in
luids between two insns is not affected by -g. */
- if (GET_CODE (insn) != NOTE
+ if (!NOTE_P (insn)
|| NOTE_LINE_NUMBER (insn) <= 0)
uid_luid[INSN_UID (insn)] = ++i;
else
max_loop_num = 0;
for (insn = f; insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == NOTE
+ if (NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
max_loop_num++;
}
Leave some space for labels allocated by find_and_verify_loops. */
max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
- uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
- uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
- sizeof (struct loop *));
+ uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
+ uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
/* Allocate storage for array of loops. */
- loops->array = (struct loop *)
- xcalloc (loops->num, sizeof (struct loop));
+ loops->array = xcalloc (loops->num, sizeof (struct loop));
/* Find and process each loop.
First, find them, and record them in order of their beginnings. */
struct loop *loop = &loops->array[i];
if (! loop->invalid && loop->end)
- scan_loop (loop, flags);
+ {
+ scan_loop (loop, flags);
+ ggc_collect ();
+ }
}
end_alias_analysis ();
/* Clean up. */
+ for (i = 0; i < (int) loops->num; i++)
+ free (loops_info[i].mems);
+
free (uid_luid);
free (uid_loop);
free (loops_info);
return insn;
}
+/* Find any register references hidden inside X and add them to
+ the dependency list DEPS. This is used to look inside CLOBBER (MEM
+ when checking whether a PARALLEL can be pulled out of a loop. */
+
+static rtx
+find_regs_nested (rtx deps, rtx x)
+{
+ enum rtx_code code = GET_CODE (x);
+ if (code == REG)
+ deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
+ else
+ {
+ const char *fmt = GET_RTX_FORMAT (code);
+ int i, j;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ deps = find_regs_nested (deps, XEXP (x, i));
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ deps = find_regs_nested (deps, XVECEXP (x, i, j));
+ }
+ }
+ return deps;
+}
+
/* Optimize one loop described by LOOP. */
/* ??? Could also move memory writes out of loops if the destination address
for (p = NEXT_INSN (loop_start);
p != loop_end
- && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
- && (GET_CODE (p) != NOTE
+ && !LABEL_P (p) && ! INSN_P (p)
+ && (!NOTE_P (p)
|| (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
&& NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
p = NEXT_INSN (p))
Start scan from there.
But record in LOOP->TOP the place where the end-test jumps
back to so we can scan that after the end of the loop. */
- if (GET_CODE (p) == JUMP_INSN
+ if (JUMP_P (p)
/* Loop entry must be unconditional jump (and not a RETURN) */
&& any_uncondjump_p (p)
&& JUMP_LABEL (p) != 0
test above. */
if (INSN_UID (loop->scan_start) >= max_uid_for_loop
- || GET_CODE (loop->scan_start) != CODE_LABEL)
+ || !LABEL_P (loop->scan_start))
{
if (loop_dump_stream)
fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
{
if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
in_libcall--;
- if (GET_CODE (p) == INSN)
+ if (NONJUMP_INSN_P (p))
{
temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
if (temp)
in_libcall++;
if (! in_libcall
&& (set = single_set (p))
- && GET_CODE (SET_DEST (set)) == REG
+ && REG_P (SET_DEST (set))
#ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& SET_DEST (set) != pic_offset_table_rtx
#endif
}
/* For parallels, add any possible uses to the dependencies, as
- we can't move the insn without resolving them first. */
+ we can't move the insn without resolving them first.
+ MEMs inside CLOBBERs may also reference registers; these
+ count as implicit uses. */
if (GET_CODE (PATTERN (p)) == PARALLEL)
{
for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
dependencies
= gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
dependencies);
+ else if (GET_CODE (x) == CLOBBER
+ && MEM_P (XEXP (x, 0)))
+ dependencies = find_regs_nested (dependencies,
+ XEXP (XEXP (x, 0), 0));
}
}
else if (insert_temp
&& (optimize_size
|| ! can_copy_p (GET_MODE (SET_SRC (set)))
- || GET_CODE (SET_SRC (set)) == REG
+ || REG_P (SET_SRC (set))
|| (CONSTANT_P (SET_SRC (set))
&& LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
;
&& ! side_effects_p (SET_SRC (set))
&& ! find_reg_note (p, REG_RETVAL, NULL_RTX)
&& (! SMALL_REGISTER_CLASSES
- || (! (GET_CODE (SET_SRC (set)) == REG
+ || (! (REG_P (SET_SRC (set))
&& (REGNO (SET_SRC (set))
< FIRST_PSEUDO_REGISTER))))
+ && regno >= FIRST_PSEUDO_REGISTER
/* This test is not redundant; SET_SRC (set) might be
a call-clobbered register and the life of REGNO
might span a call. */
continue;
}
- m = (struct movable *) xmalloc (sizeof (struct movable));
+ m = xmalloc (sizeof (struct movable));
m->next = 0;
m->insn = p;
m->set_src = src;
Also, if the value loaded into the register
depends on the same register, this cannot be done. */
else if (SET_SRC (set) == const0_rtx
- && GET_CODE (NEXT_INSN (p)) == INSN
+ && NONJUMP_INSN_P (NEXT_INSN (p))
&& (set1 = single_set (NEXT_INSN (p)))
&& GET_CODE (set1) == SET
&& (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
if (regs->array[regno].set_in_loop == 2)
{
struct movable *m;
- m = (struct movable *) xmalloc (sizeof (struct movable));
+ m = xmalloc (sizeof (struct movable));
m->next = 0;
m->insn = p;
m->set_dest = SET_DEST (set);
/* Past a call insn, we get to insns which might not be executed
because the call might exit. This matters for insns that trap.
Constant and pure call insns always return, so they don't count. */
- else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
+ else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
call_passed = 1;
/* Past a label or a jump, we get to insns for which we
can't count on whether or how many times they will be
only move out sets of trivial variables
(those not used after the loop). */
/* Similar code appears twice in strength_reduce. */
- else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
+ else if ((LABEL_P (p) || JUMP_P (p))
/* If we enter the loop in the middle, and scan around to the
beginning, don't set maybe_never for that. This must be an
unconditional jump, otherwise the code at the top of the
loop might never be executed. Unconditional jumps are
followed by a barrier then the loop_end. */
- && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
+ && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
&& NEXT_INSN (NEXT_INSN (p)) == loop_end
&& any_uncondjump_p (p)))
maybe_never = 1;
- else if (GET_CODE (p) == NOTE)
+ else if (NOTE_P (p))
{
/* At the virtual top of a converted loop, insns are again known to
be executed: logically, the loop begins here even though the exit
loop_regs_scan (loop, 0);
for (update_start = loop_start;
PREV_INSN (update_start)
- && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
+ && !LABEL_P (PREV_INSN (update_start));
update_start = PREV_INSN (update_start))
;
update_end = NEXT_INSN (loop_end);
for (update_start = loop_start;
PREV_INSN (update_start)
- && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
+ && !LABEL_P (PREV_INSN (update_start));
update_start = PREV_INSN (update_start))
;
update_end = NEXT_INSN (loop_end);
if (flag_strength_reduce)
{
- if (update_end && GET_CODE (update_end) == CODE_LABEL)
+ if (update_end && LABEL_P (update_end))
/* Ensure our label doesn't go away. */
LABEL_NUSES (update_end)++;
reg_scan_update (update_start, update_end, loop_max_reg);
loop_max_reg = max_reg_num ();
- if (update_end && GET_CODE (update_end) == CODE_LABEL
+ if (update_end && LABEL_P (update_end)
&& --LABEL_NUSES (update_end) == 0)
delete_related_insns (update_end);
}
/* Add elements to *OUTPUT to record all the pseudo-regs
mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
-void
+static void
record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
{
enum rtx_code code;
If there are none, return 0.
If there are one or more, return an EXPR_LIST containing all of them. */
-rtx
+static rtx
libcall_other_reg (rtx insn, rtx equiv)
{
rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
while (p != insn)
{
- if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
- || GET_CODE (p) == CALL_INSN)
+ if (INSN_P (p))
record_excess_regs (PATTERN (p), equiv, &output);
p = NEXT_INSN (p);
}
for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
insn != last; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
benefit += 10; /* Assume at least this many insns in a library
routine. */
- else if (GET_CODE (insn) == INSN
+ else if (NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
benefit++;
/* If first insn of libcall sequence, skip to end. */
/* Do this at start of loop, since INSN is guaranteed to
be an insn here. */
- if (GET_CODE (insn) != NOTE
+ if (!NOTE_P (insn)
&& (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
insn = XEXP (temp, 0);
do
insn = NEXT_INSN (insn);
- while (GET_CODE (insn) == NOTE);
+ while (NOTE_P (insn));
}
return insn;
m = 0;
/* Increase the priority of the moving the first insn
- since it permits the second to be moved as well. */
+ since it permits the second to be moved as well.
+ Likewise for insns already forced by the first insn. */
if (m != 0)
{
+ struct movable *m2;
+
m->forces = m1;
- m1->lifetime += m->lifetime;
- m1->savings += m->savings;
+ for (m2 = m1; m2; m2 = m2->forces)
+ {
+ m2->lifetime += m->lifetime;
+ m2->savings += m->savings;
+ }
}
}
}
combine_movables (struct loop_movables *movables, struct loop_regs *regs)
{
struct movable *m;
- char *matched_regs = (char *) xmalloc (regs->num);
+ char *matched_regs = xmalloc (regs->num);
enum machine_mode mode;
/* Regs that are set more than once are not allowed to match
&& (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
>= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
/* See if the source of M1 says it matches M. */
- && ((GET_CODE (m1->set_src) == REG
+ && ((REG_P (m1->set_src)
&& matched_regs[REGNO (m1->set_src)])
|| rtx_equal_for_loop_p (m->set_src, m1->set_src,
movables, regs))))
/* If we have a register and a constant, they may sometimes be
equal. */
- if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
+ if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
&& CONSTANT_P (y))
{
for (m = movables->head; m; m = m->next)
&& rtx_equal_p (m->set_src, y))
return 1;
}
- else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
+ else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
&& CONSTANT_P (x))
{
for (m = movables->head; m; m = m->next)
/* Map of pseudo-register replacements to handle combining
when we move several insns that load the same value
into different pseudo-registers. */
- rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
- char *already_moved = (char *) xcalloc (nregs, sizeof (char));
+ rtx *reg_map = xcalloc (nregs, sizeof (rtx));
+ char *already_moved = xcalloc (nregs, sizeof (char));
for (m = movables->head; m; m = m->next)
{
{
/* If this is the first insn of a library call sequence,
something is very wrong. */
- if (GET_CODE (p) != NOTE
+ if (!NOTE_P (p)
&& (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
abort ();
/* If this is the last insn of a libcall sequence, then
delete every insn in the sequence except the last.
The last insn is handled in the normal manner. */
- if (GET_CODE (p) != NOTE
+ if (!NOTE_P (p)
&& (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
{
temp = XEXP (temp, 0);
pointers, but when we skip over a NOTE we must fix
it up. Otherwise that code walks into the non-deleted
insn stream. */
- while (p && GET_CODE (p) == NOTE)
+ while (p && NOTE_P (p))
p = NEXT_INSN (temp) = NEXT_INSN (p);
if (m->insert_temp)
/* If first insn of libcall sequence, skip to end. */
/* Do this at start of loop, since p is guaranteed to
be an insn here. */
- if (GET_CODE (p) != NOTE
+ if (!NOTE_P (p)
&& (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
p = XEXP (temp, 0);
/* If last insn of libcall sequence, move all
insns except the last before the loop. The last
insn is handled in the normal manner. */
- if (GET_CODE (p) != NOTE
+ if (!NOTE_P (p)
&& (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
{
rtx fn_address = 0;
rtx n;
rtx next;
- if (GET_CODE (temp) == NOTE)
+ if (NOTE_P (temp))
continue;
body = PATTERN (temp);
not counting USE or NOTE insns. */
for (next = NEXT_INSN (temp); next != p;
next = NEXT_INSN (next))
- if (! (GET_CODE (next) == INSN
+ if (! (NONJUMP_INSN_P (next)
&& GET_CODE (PATTERN (next)) == USE)
- && GET_CODE (next) != NOTE)
+ && !NOTE_P (next))
break;
/* If that is the call, this may be the insn
function address into the register that the
call insn will use. flow.c will delete any
redundant stores that we have created. */
- if (GET_CODE (next) == CALL_INSN
+ if (CALL_P (next)
&& GET_CODE (body) == SET
- && GET_CODE (SET_DEST (body)) == REG
+ && REG_P (SET_DEST (body))
&& (n = find_reg_note (temp, REG_EQUAL,
NULL_RTX)))
{
fn_reg = SET_SRC (body);
- if (GET_CODE (fn_reg) != REG)
+ if (!REG_P (fn_reg))
fn_reg = SET_DEST (body);
fn_address = XEXP (n, 0);
fn_address_insn = temp;
/* We have the call insn.
If it uses the register we suspect it might,
load it with the correct address directly. */
- if (GET_CODE (temp) == CALL_INSN
+ if (CALL_P (temp)
&& fn_address != 0
&& reg_referenced_p (fn_reg, body))
loop_insn_emit_after (loop, 0, fn_address_insn,
gen_move_insn
(fn_reg, fn_address));
- if (GET_CODE (temp) == CALL_INSN)
+ if (CALL_P (temp))
{
i1 = loop_call_insn_hoist (loop, body);
/* Because the USAGE information potentially
end_sequence ();
i1 = loop_insn_hoist (loop, sequence);
}
- else if (GET_CODE (p) == CALL_INSN)
+ else if (CALL_P (p))
{
i1 = loop_call_insn_hoist (loop, PATTERN (p));
/* Because the USAGE information potentially
/* The SET_SRC might not be invariant, so we must
use the REG_EQUAL note. */
start_sequence ();
- emit_move_insn (m->set_dest, m->set_src);
+ emit_move_insn (m->insert_temp ? newreg : m->set_dest,
+ m->set_src);
seq = get_insns ();
end_sequence ();
}
else if (m->insert_temp)
{
- rtx *reg_map2 = (rtx *) xcalloc (REGNO (newreg),
- sizeof(rtx));
+ rtx *reg_map2 = xcalloc (REGNO (newreg),
+ sizeof(rtx));
reg_map2 [m->regno] = newreg;
i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
pointers, but when we skip over a NOTE we must fix
it up. Otherwise that code walks into the non-deleted
insn stream. */
- while (p && GET_CODE (p) == NOTE)
+ while (p && NOTE_P (p))
p = NEXT_INSN (temp) = NEXT_INSN (p);
if (m->insert_temp)
and prevent further processing of it. */
m1->done = 1;
- /* if library call, delete all insns. */
+ /* If library call, delete all insns. */
if ((temp = find_reg_note (m1->insn, REG_RETVAL,
NULL_RTX)))
delete_insn_chain (XEXP (temp, 0), m1->insn);
/* Go through all the instructions in the loop, making
all the register substitutions scheduled in REG_MAP. */
for (p = new_start; p != loop_end; p = NEXT_INSN (p))
- if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
- || GET_CODE (p) == CALL_INSN)
+ if (INSN_P (p))
{
replace_regs (PATTERN (p), reg_map, nregs, 0);
replace_regs (REG_NOTES (p), reg_map, nregs, 0);
/* If loop opts run twice, this was set on 1st pass for 2nd. */
loop_info->preconditioned = NOTE_PRECONDITIONED (end);
- for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
+ for (insn = start; insn && !LABEL_P (insn);
insn = PREV_INSN (insn))
{
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
loop_info->pre_header_has_call = 1;
break;
rtx fusage = XEXP (fusage_entry, 0);
if (GET_CODE (fusage) == CLOBBER
- && GET_CODE (XEXP (fusage, 0)) == MEM
+ && MEM_P (XEXP (fusage, 0))
&& RTX_UNCHANGING_P (XEXP (fusage, 0)))
{
note_stores (fusage, note_addr_stored, loop_info);
loop_info->has_multiple_exit_targets = 1;
}
}
- /* FALLTHRU */
+ /* Fall through. */
case INSN:
if (volatile_refs_p (PATTERN (insn)))
loop_info->has_volatile = 1;
- if (GET_CODE (insn) == JUMP_INSN
+ if (JUMP_P (insn)
&& (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
|| GET_CODE (PATTERN (insn)) == ADDR_VEC))
loop_info->has_tablejump = 1;
current_loop = NULL;
for (insn = f; insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == NOTE)
+ if (NOTE_P (insn))
switch (NOTE_LINE_NUMBER (insn))
{
case NOTE_INSN_LOOP_BEG:
break;
}
- if (GET_CODE (insn) == CALL_INSN
+ if (CALL_P (insn)
&& find_reg_note (insn, REG_SETJMP, NULL))
{
/* In this case, we must invalidate our current loop and any
{
struct loop *this_loop = uid_loop[INSN_UID (insn)];
- if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
+ if (NONJUMP_INSN_P (insn) || CALL_P (insn))
{
rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
if (note)
invalidate_loops_containing_label (XEXP (note, 0));
}
- if (GET_CODE (insn) != JUMP_INSN)
+ if (!JUMP_P (insn))
continue;
mark_loop_jump (PATTERN (insn), this_loop);
/* Go backwards until we reach the start of the loop, a label,
or a JUMP_INSN. */
for (p = PREV_INSN (insn);
- GET_CODE (p) != CODE_LABEL
- && ! (GET_CODE (p) == NOTE
+ !LABEL_P (p)
+ && ! (NOTE_P (p)
&& NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
- && GET_CODE (p) != JUMP_INSN;
+ && !JUMP_P (p);
p = PREV_INSN (p))
;
/* Make sure that the target of P is within the current loop. */
- if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ if (JUMP_P (p) && JUMP_LABEL (p)
&& uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
outer_loop = this_loop;
and move the block of code to the spot we found. */
if (! outer_loop
- && GET_CODE (p) == JUMP_INSN
+ && JUMP_P (p)
&& JUMP_LABEL (p) != 0
/* Just ignore jumps to labels that were never emitted.
These always indicate compilation errors. */
/* Search for possible garbage past the conditional jumps
and look for the last barrier. */
for (tmp = last_insn_to_move;
- tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
- if (GET_CODE (tmp) == BARRIER)
+ tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
+ if (BARRIER_P (tmp))
last_insn_to_move = tmp;
for (loc = target; loc; loc = PREV_INSN (loc))
- if (GET_CODE (loc) == BARRIER
+ if (BARRIER_P (loc)
/* Don't move things inside a tablejump. */
&& ((loc2 = next_nonnote_insn (loc)) == 0
- || GET_CODE (loc2) != CODE_LABEL
+ || !LABEL_P (loc2)
|| (loc2 = next_nonnote_insn (loc2)) == 0
- || GET_CODE (loc2) != JUMP_INSN
+ || !JUMP_P (loc2)
|| (GET_CODE (PATTERN (loc2)) != ADDR_VEC
&& GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
&& uid_loop[INSN_UID (loc)] == target_loop)
if (loc == 0)
for (loc = target; loc; loc = NEXT_INSN (loc))
- if (GET_CODE (loc) == BARRIER
+ if (BARRIER_P (loc)
/* Don't move things inside a tablejump. */
&& ((loc2 = next_nonnote_insn (loc)) == 0
- || GET_CODE (loc2) != CODE_LABEL
+ || !LABEL_P (loc2)
|| (loc2 = next_nonnote_insn (loc2)) == 0
- || GET_CODE (loc2) != JUMP_INSN
+ || !JUMP_P (loc2)
|| (GET_CODE (PATTERN (loc2)) != ADDR_VEC
&& GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
&& uid_loop[INSN_UID (loc)] == target_loop)
{
while (insn && INSN_LUID (insn) <= end)
{
- if (GET_CODE (insn) == CODE_LABEL)
+ if (LABEL_P (insn))
return 1;
insn = NEXT_INSN (insn);
}
{
struct loop_info *loop_info = data;
- if (x == 0 || GET_CODE (x) != MEM)
+ if (x == 0 || !MEM_P (x))
return;
/* Count number of memory writes.
|| GET_CODE (x) == SUBREG)
x = XEXP (x, 0);
- if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
+ if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
return;
/* If we do not have usage information, or if we know the register
return 0;
/* Out-of-range regs can occur when we are called from unrolling.
- These have always been created by the unroller and are set in
- the loop, hence are never invariant. */
+ These registers created by the unroller are set in the loop,
+ hence are never invariant.
+ Other out-of-range regs can be generated by load_mems; those that
+ are written to in the loop are not invariant, while those that are
+ not written to are invariant. It would be easy for load_mems
+ to set n_times_set correctly for these registers, however, there
+ is no easy way to distinguish them from registers created by the
+ unroller. */
if (REGNO (x) >= (unsigned) regs->num)
return 0;
this = 0;
if (code == INSN
&& (set = single_set (p))
- && GET_CODE (SET_DEST (set)) == REG
+ && REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) == regno)
{
this = loop_invariant_p (loop, SET_SRC (set));
/* If loop_invariant_p ever returned 2, we return 2. */
return 1 + (value & 2);
}
-
-#if 0
-/* I don't think this condition is sufficient to allow INSN
- to be moved, so we no longer test it. */
-
-/* Return 1 if all insns in the basic block of INSN and following INSN
- that set REG are invariant according to TABLE. */
-
-static int
-all_sets_invariant_p (rtx reg, rtx insn, short *table)
-{
- rtx p = insn;
- int regno = REGNO (reg);
-
- while (1)
- {
- enum rtx_code code;
- p = NEXT_INSN (p);
- code = GET_CODE (p);
- if (code == CODE_LABEL || code == JUMP_INSN)
- return 1;
- if (code == INSN && GET_CODE (PATTERN (p)) == SET
- && GET_CODE (SET_DEST (PATTERN (p))) == REG
- && REGNO (SET_DEST (PATTERN (p))) == regno)
- {
- if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
- return 0;
- }
- }
-}
-#endif /* 0 */
\f
/* Look at all uses (not sets) of registers in X. For each, if it is
the single use, set USAGE[REGNO] to INSN; if there was a previous use in
in SET_DEST because if a register is partially modified, it won't
show up as a potential movable so we don't care how USAGE is set
for it. */
- if (GET_CODE (SET_DEST (x)) != REG)
+ if (!REG_P (SET_DEST (x)))
find_single_use_in_loop (regs, insn, SET_DEST (x));
find_single_use_in_loop (regs, insn, SET_SRC (x));
}
static void
count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
{
- if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
+ if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
/* Don't move a reg that has an explicit clobber.
It's not worth the pain to try to do it correctly. */
regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
|| GET_CODE (dest) == SIGN_EXTRACT
|| GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
- if (GET_CODE (dest) == REG)
+ if (REG_P (dest))
{
int i;
int regno = REGNO (dest);
{
struct check_store_data *d = (struct check_store_data *) data;
- if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
+ if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
d->mem_write = 1;
}
\f
if (code != GET_CODE (y))
return 0;
- code = GET_CODE (x);
-
- if (GET_RTX_CLASS (code) == 'c')
+ if (COMMUTATIVE_ARITH_P (x))
{
return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
&& rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
|| (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
&& rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
}
+
/* Compare the elements. If any pair of corresponding elements fails to
match, return 0 for the whole thing. */
return;
}
\f
-/* A "basic induction variable" or biv is a pseudo reg that is set
- (within this loop) only by incrementing or decrementing it. */
-/* A "general induction variable" or giv is a pseudo reg whose
- value is a linear function of a biv. */
-
-/* Bivs are recognized by `basic_induction_var';
- Givs by `general_induction_var'. */
-
/* Communication with routines called via `note_stores'. */
static rtx note_insn;
returns, exits the loop, is a jump to a location that is still
behind the label, or is a jump to the loop start. */
- if (GET_CODE (p) == CODE_LABEL)
+ if (LABEL_P (p))
{
rtx insn = p;
break;
}
- if (GET_CODE (insn) == JUMP_INSN
+ if (JUMP_P (insn)
&& GET_CODE (PATTERN (insn)) != RETURN
&& (!any_condjump_p (insn)
|| (JUMP_LABEL (insn) != 0
on whether they will be executed during each iteration. */
/* This code appears twice in strength_reduce. There is also similar
code in scan_loop. */
- if (GET_CODE (p) == JUMP_INSN
+ if (JUMP_P (p)
/* If we enter the loop in the middle, and scan around to the
beginning, don't set not_every_iteration for that.
This can be any kind of jump, since we want to know if insns
not_every_iteration = 1;
}
- else if (GET_CODE (p) == NOTE)
+ else if (NOTE_P (p))
{
/* At the virtual top of a converted loop, insns are again known to
be executed each iteration: logically, the loop begins here
Note that LOOP_TOP is only set for rotated loops and we need
this check for all loops, so compare against the CODE_LABEL
which immediately follows LOOP_START. */
- if (GET_CODE (p) == JUMP_INSN
+ if (JUMP_P (p)
&& JUMP_LABEL (p) == NEXT_INSN (loop->start))
past_loop_latch = 1;
if (not_every_iteration
&& !past_loop_latch
- && GET_CODE (p) == CODE_LABEL
+ && LABEL_P (p)
&& no_labels_between_p (p, loop->end)
&& loop_insn_first_p (p, loop->cont))
not_every_iteration = 0;
halting at first label. Also record any test condition. */
call_seen = 0;
- for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
+ for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
{
rtx test;
note_insn = p;
- if (GET_CODE (p) == CALL_INSN)
+ if (CALL_P (p))
call_seen = 1;
if (INSN_P (p))
/* Record any test of a biv that branches around the loop if no store
between it and the start of loop. We only care about tests with
constants and registers and only certain of those. */
- if (GET_CODE (p) == JUMP_INSN
+ if (JUMP_P (p)
&& JUMP_LABEL (p) != 0
&& next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
&& (test = get_condition_for_loop (loop, p)) != 0
- && GET_CODE (XEXP (test, 0)) == REG
+ && REG_P (XEXP (test, 0))
&& REGNO (XEXP (test, 0)) < max_reg_before_loop
&& (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
&& valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
this is an address giv, then try to put the increment
immediately after its use, so that flow can create an
auto-increment addressing mode. */
+ /* Don't do this for loops entered at the bottom, to avoid
+ this invalid transformation:
+ jmp L; -> jmp L;
+ TOP: TOP:
+ use giv use giv
+ L: inc giv
+ inc biv L:
+ test biv test giv
+ cbr TOP cbr TOP
+ */
if (v->giv_type == DEST_ADDR && bl->biv_count == 1
&& bl->biv->always_executed && ! bl->biv->maybe_multiple
/* We don't handle reversed biv's because bl->biv->insn
does not have a valid INSN_LUID. */
&& ! bl->reversed
&& v->always_executed && ! v->maybe_multiple
- && INSN_UID (v->insn) < max_uid_for_loop)
+ && INSN_UID (v->insn) < max_uid_for_loop
+ && !loop->top)
{
/* If other giv's have been combined with this one, then
this will work only if all uses of the other giv's occur
computational information. If not, and this is a DEST_ADDR
giv, at least we know that it's a pointer, though we don't know
the alignment. */
- if (GET_CODE (v->new_reg) == REG
+ if (REG_P (v->new_reg)
&& v->giv_type == DEST_REG
&& REG_POINTER (v->dest_reg))
mark_reg_pointer (v->new_reg,
REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
- else if (GET_CODE (v->new_reg) == REG
+ else if (REG_P (v->new_reg)
&& REG_POINTER (v->src_reg))
{
unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
mark_reg_pointer (v->new_reg, align);
}
- else if (GET_CODE (v->new_reg) == REG
- && GET_CODE (v->add_val) == REG
+ else if (REG_P (v->new_reg)
+ && REG_P (v->add_val)
&& REG_POINTER (v->add_val))
{
unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
mark_reg_pointer (v->new_reg, align);
}
- else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
+ else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
mark_reg_pointer (v->new_reg, 0);
if (v->giv_type == DEST_ADDR)
addr_placeholder = gen_reg_rtx (Pmode);
ivs->n_regs = max_reg_before_loop;
- ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
+ ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
/* Find all BIVs in loop. */
loop_bivs_find (loop);
Some givs might have been made from biv increments, so look at
ivs->reg_iv_type for a suitable size. */
reg_map_size = ivs->n_regs;
- reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
+ reg_map = xcalloc (reg_map_size, sizeof (rtx));
/* Examine each iv class for feasibility of strength reduction/induction
variable elimination. */
/* Check each extension dependent giv in this class to see if its
root biv is safe from wrapping in the interior mode. */
- check_ext_dependent_givs (bl, loop_info);
+ check_ext_dependent_givs (loop, bl);
/* Combine all giv's for this iv_class. */
combine_givs (regs, bl);
register substitutions scheduled in REG_MAP. */
for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
- if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
- || GET_CODE (p) == CALL_INSN)
+ if (INSN_P (p))
{
replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
&& unrolled_insn_copies <= insn_count))
unroll_loop (loop, insn_count, 1);
-#ifdef HAVE_doloop_end
- if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
- doloop_optimize (loop);
-#endif /* HAVE_doloop_end */
-
- /* In case number of iterations is known, drop branch prediction note
- in the branch. Do that only in second loop pass, as loop unrolling
- may change the number of iterations performed. */
- if (flags & LOOP_BCT)
- {
- unsigned HOST_WIDE_INT n
- = loop_info->n_iterations / loop_info->unroll_number;
- if (n > 1)
- predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS,
- REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
- }
-
if (loop_dump_stream)
fprintf (loop_dump_stream, "\n");
rtx mult_val;
rtx *location;
- if (GET_CODE (p) == INSN
+ if (NONJUMP_INSN_P (p)
&& (set = single_set (p))
- && GET_CODE (SET_DEST (set)) == REG)
+ && REG_P (SET_DEST (set)))
{
dest_reg = SET_DEST (set);
if (REGNO (dest_reg) < max_reg_before_loop
/* It is a possible basic induction variable.
Create and initialize an induction structure for it. */
- struct induction *v
- = (struct induction *) xmalloc (sizeof (struct induction));
+ struct induction *v = xmalloc (sizeof (struct induction));
record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
not_every_iteration, maybe_multiple);
rtx set;
/* Look for a general induction variable in a register. */
- if (GET_CODE (p) == INSN
+ if (NONJUMP_INSN_P (p)
&& (set = single_set (p))
- && GET_CODE (SET_DEST (set)) == REG
+ && REG_P (SET_DEST (set))
&& ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
{
rtx src_reg;
&add_val, &mult_val, &ext_val,
&last_consec_insn))))
{
- struct induction *v
- = (struct induction *) xmalloc (sizeof (struct induction));
+ struct induction *v = xmalloc (sizeof (struct induction));
/* If this is a library call, increase benefit. */
if (find_reg_note (p, REG_RETVAL, NULL_RTX))
}
/* Look for givs which are memory addresses. */
- if (GET_CODE (p) == INSN)
+ if (NONJUMP_INSN_P (p))
find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
maybe_multiple);
/* Update the status of whether giv can derive other givs. This can
change when we pass a label or an insn that updates a biv. */
- if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
- || GET_CODE (p) == CODE_LABEL)
+ if (INSN_P (p) || LABEL_P (p))
update_giv_derive (loop, p);
return p;
}
/* Only consider pseudos we know about initialized in insns whose luids
we know. */
- if (GET_CODE (x) != REG
+ if (!REG_P (x)
|| REGNO (x) >= max_reg_before_loop)
return 0;
GET_MODE (x)))
{
/* Found one; record it. */
- struct induction *v
- = (struct induction *) xmalloc (sizeof (struct induction));
+ struct induction *v = xmalloc (sizeof (struct induction));
record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
add_val, ext_val, benefit, DEST_ADDR,
{
/* Create and initialize new iv_class. */
- bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
+ bl = xmalloc (sizeof (struct iv_class));
bl->regno = REGNO (dest_reg);
bl->biv = 0;
/* Set initial value to the reg itself. */
bl->initial_value = dest_reg;
bl->final_value = 0;
- /* We haven't seen the initializing insn yet */
+ /* We haven't seen the initializing insn yet. */
bl->init_insn = 0;
bl->init_set = 0;
bl->initial_test = 0;
if (p == v->insn)
break;
- if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
- || GET_CODE (p) == CALL_INSN)
+ if (INSN_P (p))
{
/* It is possible for the BIV increment to use the GIV if we
have a cycle. Thus we must be sure to check each insn for
if (p == last_giv_use)
break;
- if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ if (JUMP_P (p) && JUMP_LABEL (p)
&& LABEL_NAME (JUMP_LABEL (p))
&& ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
&& loop_insn_first_p (loop->start, JUMP_LABEL (p)))
for (bl = ivs->list; bl; bl = bl->next)
for (biv = bl->biv; biv; biv = biv->next_iv)
- if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ if (LABEL_P (p) || JUMP_P (p)
|| biv->insn == p)
{
+ /* Skip if location is the same as a previous one. */
+ if (biv->same)
+ continue;
+
for (giv = bl->giv; giv; giv = giv->next_iv)
{
/* If cant_derive is already true, there is no point in
/* If this giv is conditionally set and we have passed a label,
it cannot derive anything. */
- if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
+ if (LABEL_P (p) && ! giv->always_computable)
giv->cant_derive = 1;
/* Skip givs that have mult_val == 0, since
else
giv->cant_derive = 1;
}
- else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
- || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
+ else if ((LABEL_P (p) && ! biv->always_computable)
+ || (JUMP_P (p) && biv->maybe_multiple))
giv->cant_derive = 1;
}
}
*MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
We also want to detect a BIV when it corresponds to a variable
- whose mode was promoted via PROMOTED_MODE. In that case, an increment
+ whose mode was promoted. In that case, an increment
of the variable may be a PLUS that adds a SUBREG of that variable to
an invariant and then sign- or zero-extends the result of the PLUS
into the variable.
{
enum rtx_code code;
rtx *argp, arg;
- rtx insn, set = 0;
+ rtx insn, set = 0, last, inc;
code = GET_CODE (x);
*location = NULL;
if (loop_invariant_p (loop, arg) != 1)
return 0;
- *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
+ /* convert_modes can emit new instructions, e.g. when arg is a loop
+ invariant MEM and dest_reg has a different mode.
+ These instructions would be emitted after the end of the function
+ and then *inc_val would be an uninitialized pseudo.
+ Detect this and bail in this case.
+ Other alternatives to solve this can be introducing a convert_modes
+ variant which is allowed to fail but not allowed to emit new
+ instructions, emit these instructions before loop start and let
+ it be garbage collected if *inc_val is never used or saving the
+ *inc_val initialization sequence generated here and when *inc_val
+ is going to be actually used, emit it at some suitable place. */
+ last = get_last_insn ();
+ inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
+ if (get_last_insn () != last)
+ {
+ delete_insns_since (last);
+ return 0;
+ }
+
+ *inc_val = inc;
*mult_val = const1_rtx;
*location = argp;
return 1;
{
insn = PREV_INSN (insn);
}
- while (insn && GET_CODE (insn) == NOTE
+ while (insn && NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
if (!insn)
&& GET_MODE_CLASS (mode) != MODE_CC)
{
/* Possible bug here? Perhaps we don't know the mode of X. */
- *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
+ last = get_last_insn ();
+ inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
+ if (get_last_insn () != last)
+ {
+ delete_insns_since (last);
+ return 0;
+ }
+
+ *inc_val = inc;
*mult_val = const0_rtx;
return 1;
}
case ASHIFTRT:
/* Similar, since this can be a sign extension. */
for (insn = PREV_INSN (p);
- (insn && GET_CODE (insn) == NOTE
+ (insn && NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
insn = PREV_INSN (insn))
;
/* Each argument must be either REG, PLUS, or MULT. Convert REG to
MULT to reduce cases. */
- if (GET_CODE (arg0) == REG)
+ if (REG_P (arg0))
arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
- if (GET_CODE (arg1) == REG)
+ if (REG_P (arg1))
arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
/* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
arg1)),
ext_val, benefit);
}
- /* Propagate the MULT expressions to the intermost nodes. */
+ /* Propagate the MULT expressions to the innermost nodes. */
else if (GET_CODE (arg0) == PLUS)
{
/* (invar_0 + invar_1) * invar_2. Distribute. */
if (*ext_val == NULL_RTX)
{
arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
- if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
+ if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
{
*ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
return arg0;
if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
return 0;
- v = (struct induction *) alloca (sizeof (struct induction));
+ v = alloca (sizeof (struct induction));
v->src_reg = src_reg;
v->mult_val = *mult_val;
v->add_val = *add_val;
if (code == INSN
&& (set = single_set (p))
- && GET_CODE (SET_DEST (set)) == REG
+ && REG_P (SET_DEST (set))
&& SET_DEST (set) == dest_reg
&& (general_induction_var (loop, SET_SRC (set), &src_reg,
add_val, mult_val, ext_val, 0,
&& GET_CODE (g2->mult_val) == CONST_INT)
{
if (g1->mult_val == const0_rtx
+ || (g1->mult_val == constm1_rtx
+ && INTVAL (g2->mult_val)
+ == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
|| INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
return NULL_RTX;
mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
make the giv illegal. */
static void
-check_ext_dependent_givs (struct iv_class *bl, struct loop_info *loop_info)
+check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
{
+ struct loop_info *loop_info = LOOP_INFO (loop);
int ze_ok = 0, se_ok = 0, info_ok = 0;
enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
HOST_WIDE_INT start_val;
/* Make sure the iteration data is available. We must have
constants in order to be certain of no overflow. */
- /* ??? An unknown iteration count with an increment of +-1
- combined with friendly exit tests of against an invariant
- value is also amenable to optimization. Not implemented. */
if (loop_info->n_iterations > 0
&& bl->initial_value
&& GET_CODE (bl->initial_value) == CONST_INT
}
}
+ /* If we know the BIV is compared at run-time against an
+ invariant value, and the increment is +/- 1, we may also
+ be able to prove that the BIV cannot overflow. */
+ else if (bl->biv->src_reg == loop_info->iteration_var
+ && loop_info->comparison_value
+ && loop_invariant_p (loop, loop_info->comparison_value)
+ && (incr = biv_total_increment (bl))
+ && GET_CODE (incr) == CONST_INT)
+ {
+ /* If the increment is +1, and the exit test is a <,
+ the BIV cannot overflow. (For <=, we have the
+ problematic case that the comparison value might
+ be the maximum value of the range.) */
+ if (INTVAL (incr) == 1)
+ {
+ if (loop_info->comparison_code == LT)
+ se_ok = ze_ok = 1;
+ else if (loop_info->comparison_code == LTU)
+ ze_ok = 1;
+ }
+
+ /* Likewise for increment -1 and exit test >. */
+ if (INTVAL (incr) == -1)
+ {
+ if (loop_info->comparison_code == GT)
+ se_ok = ze_ok = 1;
+ else if (loop_info->comparison_code == GTU)
+ ze_ok = 1;
+ }
+ }
+
/* Invalidate givs that fail the tests. */
for (v = bl->giv; v; v = v->next_iv)
if (v->ext_dependent)
signed or unsigned, so to safely truncate we must satisfy
both. The initial check here verifies the BIV itself;
once that is successful we may check its range wrt the
- derived GIV. */
- if (se_ok && ze_ok)
+ derived GIV. This works only if we were able to determine
+ constant start and end values above. */
+ if (se_ok && ze_ok && info_ok)
{
enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
if (!g1->ignore)
giv_count++;
- giv_array
- = (struct induction **) alloca (giv_count * sizeof (struct induction *));
+ giv_array = alloca (giv_count * sizeof (struct induction *));
i = 0;
for (g1 = bl->giv; g1; g1 = g1->next_iv)
if (!g1->ignore)
giv_array[i++] = g1;
- stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
- can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
+ stats = xcalloc (giv_count, sizeof (*stats));
+ can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
for (i = 0; i < giv_count; i++)
{
/* Update register info for alias analysis. */
- if (seq == NULL_RTX)
- return;
-
- if (INSN_P (seq))
+ insn = seq;
+ while (insn != NULL_RTX)
{
- insn = seq;
- while (insn != NULL_RTX)
- {
- rtx set = single_set (insn);
+ rtx set = single_set (insn);
- if (set && GET_CODE (SET_DEST (set)) == REG)
- record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
+ if (set && REG_P (SET_DEST (set)))
+ record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
- insn = NEXT_INSN (insn);
- }
+ insn = NEXT_INSN (insn);
}
- else if (GET_CODE (seq) == SET
- && GET_CODE (SET_DEST (seq)) == REG)
- record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
}
update_reg_last_use (b, before_insn);
update_reg_last_use (m, before_insn);
- loop_insn_emit_before (loop, before_bb, before_insn, seq);
-
/* It is possible that the expansion created lots of new registers.
- Iterate over the sequence we just created and record them all. */
+ Iterate over the sequence we just created and record them all. We
+ must do this before inserting the sequence. */
loop_regs_update (loop, seq);
+
+ loop_insn_emit_before (loop, before_bb, before_insn, seq);
}
update_reg_last_use (b, loop->sink);
update_reg_last_use (m, loop->sink);
- loop_insn_sink (loop, seq);
-
/* It is possible that the expansion created lots of new registers.
- Iterate over the sequence we just created and record them all. */
+ Iterate over the sequence we just created and record them all. We
+ must do this before inserting the sequence. */
loop_regs_update (loop, seq);
+
+ loop_insn_sink (loop, seq);
}
/* Use copy_rtx to prevent unexpected sharing of these rtx. */
seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
- loop_insn_hoist (loop, seq);
-
/* It is possible that the expansion created lots of new registers.
- Iterate over the sequence we just created and record them all. */
+ Iterate over the sequence we just created and record them all. We
+ must do this before inserting the sequence. */
loop_regs_update (loop, seq);
+
+ loop_insn_hoist (loop, seq);
}
rtx next = NEXT_INSN (tmp);
if (++n_insns > 3
- || GET_CODE (tmp) != INSN
+ || !NONJUMP_INSN_P (tmp)
|| (GET_CODE (PATTERN (tmp)) == SET
&& GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
|| (GET_CODE (PATTERN (tmp)) == PARALLEL
struct loop_ivs *ivs = LOOP_IVS (loop);
struct iv_class *bl;
rtx reg;
+ enum machine_mode mode;
rtx jump_label;
rtx final_value;
rtx start_value;
/* Try to compute whether the compare/branch at the loop end is one or
two instructions. */
- get_condition (jump, &first_compare);
+ get_condition (jump, &first_compare, false);
if (first_compare == jump)
compare_and_branch = 1;
else if (first_compare == prev_nonnote_insn (jump))
rtx jump1;
if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
- if (GET_CODE (jump1) == JUMP_INSN)
+ if (JUMP_P (jump1))
return 0;
}
break;
}
+ /* Try swapping the comparison to identify a suitable biv. */
+ if (!bl)
+ for (bl = ivs->list; bl; bl = bl->next)
+ if (bl->biv_count == 1
+ && ! bl->biv->maybe_multiple
+ && bl->biv->dest_reg == XEXP (comparison, 1)
+ && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
+ first_compare))
+ {
+ comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
+ VOIDmode,
+ XEXP (comparison, 1),
+ XEXP (comparison, 0));
+ break;
+ }
+
if (! bl)
return 0;
In this case, add a reg_note REG_NONNEG, which allows the
m68k DBRA instruction to be used. */
- if (((GET_CODE (comparison) == GT
- && GET_CODE (XEXP (comparison, 1)) == CONST_INT
- && INTVAL (XEXP (comparison, 1)) == -1)
+ if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
|| (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
&& GET_CODE (bl->biv->add_val) == CONST_INT
&& INTVAL (bl->biv->add_val) < 0)
&& (INTVAL (bl->initial_value)
% (-INTVAL (bl->biv->add_val))) == 0)
{
- /* register always nonnegative, add REG_NOTE to branch */
+ /* Register always nonnegative, add REG_NOTE to branch. */
if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
REG_NOTES (jump)
= gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
the loop, then we can safely optimize. */
for (p = loop_start; p; p = PREV_INSN (p))
{
- if (GET_CODE (p) == CODE_LABEL)
+ if (LABEL_P (p))
break;
- if (GET_CODE (p) != JUMP_INSN)
+ if (!JUMP_P (p))
continue;
before_comparison = get_condition_for_loop (loop, p);
if (before_comparison
&& XEXP (before_comparison, 0) == bl->biv->dest_reg
- && GET_CODE (before_comparison) == LT
+ && (GET_CODE (before_comparison) == LT
+ || GET_CODE (before_comparison) == LTU)
&& XEXP (before_comparison, 1) == const0_rtx
&& ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
&& INTVAL (bl->biv->add_val) == -1)
{
rtx set = single_set (p);
- if (set && GET_CODE (SET_DEST (set)) == REG
+ if (set && REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) == bl->regno)
/* An insn that sets the biv is okay. */
;
/* for constants, LE gets turned into LT */
&& (GET_CODE (comparison) == LT
|| (GET_CODE (comparison) == LE
- && no_use_except_counting)))
+ && no_use_except_counting)
+ || GET_CODE (comparison) == LTU))
{
HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
rtx initial_value, comparison_value;
enum rtx_code cmp_code;
int comparison_const_width;
unsigned HOST_WIDE_INT comparison_sign_mask;
+ bool keep_first_compare;
add_val = INTVAL (bl->biv->add_val);
comparison_value = XEXP (comparison, 1);
/* Save some info needed to produce the new insns. */
reg = bl->biv->dest_reg;
+ mode = GET_MODE (reg);
jump_label = condjump_label (PREV_INSN (loop_end));
new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
if (initial_value == const0_rtx
&& GET_CODE (comparison_value) == CONST_INT)
{
- start_value = GEN_INT (comparison_val - add_adjust);
+ start_value
+ = gen_int_mode (comparison_val - add_adjust, mode);
loop_insn_hoist (loop, gen_move_insn (reg, start_value));
}
else if (GET_CODE (initial_value) == CONST_INT)
{
- enum machine_mode mode = GET_MODE (reg);
rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
}
else if (! add_adjust)
{
- enum machine_mode mode = GET_MODE (reg);
rtx sub_insn = gen_sub3_insn (reg, comparison_value,
initial_value);
not delete the label. */
LABEL_NUSES (XEXP (jump_label, 0))++;
+ /* If we have a separate comparison insn that does more
+ than just set cc0, the result of the comparison might
+ be used outside the loop. */
+ keep_first_compare = (compare_and_branch == 2
+#ifdef HAVE_CC0
+ && sets_cc0_p (first_compare) <= 0
+#endif
+ );
+
/* Emit an insn after the end of the loop to set the biv's
proper exit value if it is used anywhere outside the loop. */
- if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
+ if (keep_first_compare
+ || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
|| ! bl->init_insn
|| REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
+ if (keep_first_compare)
+ loop_insn_sink (loop, PATTERN (first_compare));
+
/* Delete compare/branch at end of loop. */
delete_related_insns (PREV_INSN (loop_end));
if (compare_and_branch == 2)
/* Add new compare/branch insn at end of loop. */
start_sequence ();
emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
- GET_MODE (reg), 0,
+ mode, 0,
XEXP (jump_label, 0));
tem = get_insns ();
end_sequence ();
emit_jump_insn_before (tem, loop_end);
for (tem = PREV_INSN (loop_end);
- tem && GET_CODE (tem) != JUMP_INSN;
+ tem && !JUMP_P (tem);
tem = PREV_INSN (tem))
;
/* If this is a set of a GIV based on the reversed biv, any
REG_EQUAL notes should still be correct. */
if (! set
- || GET_CODE (SET_DEST (set)) != REG
+ || !REG_P (SET_DEST (set))
|| (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
|| REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
|| REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
rtx note;
/* If this is a libcall that sets a giv, skip ahead to its end. */
- if (GET_RTX_CLASS (code) == 'i')
+ if (INSN_P (p))
{
note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
rtx last = XEXP (note, 0);
rtx set = single_set (last);
- if (set && GET_CODE (SET_DEST (set)) == REG)
+ if (set && REG_P (SET_DEST (set)))
{
unsigned int regno = REGNO (SET_DEST (set));
P is a note. */
if (INSN_UID (p) < max_uid_for_loop
&& INSN_UID (q) < max_uid_for_loop
- && GET_CODE (p) != NOTE)
+ && !NOTE_P (p))
return INSN_LUID (p) <= INSN_LUID (q);
if (INSN_UID (p) >= max_uid_for_loop
- || GET_CODE (p) == NOTE)
+ || NOTE_P (p))
p = NEXT_INSN (p);
if (INSN_UID (q) >= max_uid_for_loop)
q = NEXT_INSN (q);
&& (GET_CODE (v->add_val) == SYMBOL_REF
|| GET_CODE (v->add_val) == LABEL_REF
|| GET_CODE (v->add_val) == CONST
- || (GET_CODE (v->add_val) == REG
+ || (REG_P (v->add_val)
&& REG_POINTER (v->add_val))))
{
if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
&& (GET_CODE (v->add_val) == SYMBOL_REF
|| GET_CODE (v->add_val) == LABEL_REF
|| GET_CODE (v->add_val) == CONST
- || (GET_CODE (v->add_val) == REG
+ || (REG_P (v->add_val)
&& REG_POINTER (v->add_val)))
&& ! v->ignore && ! v->maybe_dead && v->always_computable
&& v->mode == mode)
return 1;
}
}
- else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
+ else if (REG_P (arg) || MEM_P (arg))
{
if (loop_invariant_p (loop, arg) == 1)
{
#if 0
/* Otherwise the reg compared with had better be a biv. */
- if (GET_CODE (arg) != REG
+ if (!REG_P (arg)
|| REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
return 0;
{
rtx n;
for (n = insn;
- n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
+ n && !LABEL_P (n) && !JUMP_P (n);
n = NEXT_INSN (n))
{
if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
struct loop_ivs *ivs = (struct loop_ivs *) data;
struct iv_class *bl;
- if (GET_CODE (dest) != REG
+ if (!REG_P (dest)
|| REGNO (dest) >= ivs->n_regs
|| REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
return;
and hence this insn will never be the last use of x.
???? This comment is not correct. See for example loop_givs_reduce.
This may insert an insn before another new insn. */
- if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
+ if (REG_P (x) && REGNO (x) < max_reg_before_loop
&& INSN_UID (insn) < max_uid_for_loop
&& REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
{
If WANT_REG is nonzero, we wish the condition to be relative to that
register, if possible. Therefore, do not canonicalize the condition
- further. */
+ further. If ALLOW_CC_MODE is nonzero, allow the condition returned
+ to be a compare to a CC mode register. */
rtx
canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
- rtx want_reg)
+ rtx want_reg, int allow_cc_mode)
{
enum rtx_code code;
rtx prev = insn;
the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
in cse.c */
- while (GET_RTX_CLASS (code) == '<'
+ while ((GET_RTX_CLASS (code) == RTX_COMPARE
+ || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
&& op1 == CONST0_RTX (GET_MODE (op0))
&& op0 != want_reg)
{
if (op0 == cc0_rtx)
{
if ((prev = prev_nonnote_insn (prev)) == 0
- || GET_CODE (prev) != INSN
+ || !NONJUMP_INSN_P (prev)
|| (set = single_set (prev)) == 0
|| SET_DEST (set) != cc0_rtx)
return 0;
op0 = XEXP (op0, 0);
continue;
}
- else if (GET_CODE (op0) != REG)
+ else if (!REG_P (op0))
break;
/* Go back to the previous insn. Stop if it is not an INSN. We also
we don't want to bother dealing with it. */
if ((prev = prev_nonnote_insn (prev)) == 0
- || GET_CODE (prev) != INSN
+ || !NONJUMP_INSN_P (prev)
|| FIND_REG_INC_NOTE (prev, NULL_RTX))
break;
REAL_VALUE_NEGATIVE (fsfv)))
#endif
))
- && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
+ && COMPARISON_P (SET_SRC (set))))
&& (((GET_MODE_CLASS (mode) == MODE_CC)
== (GET_MODE_CLASS (inner_mode) == MODE_CC))
|| mode == VOIDmode || inner_mode == VOIDmode))
REAL_VALUE_NEGATIVE (fsfv)))
#endif
))
- && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
+ && COMPARISON_P (SET_SRC (set))
&& (((GET_MODE_CLASS (mode) == MODE_CC)
== (GET_MODE_CLASS (inner_mode) == MODE_CC))
|| mode == VOIDmode || inner_mode == VOIDmode))
if (x)
{
- if (GET_RTX_CLASS (GET_CODE (x)) == '<')
+ if (COMPARISON_P (x))
code = GET_CODE (x);
if (reverse_code)
{
/* If OP0 is the result of a comparison, we weren't able to find what
was really being compared, so fail. */
- if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
+ if (!allow_cc_mode
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
return 0;
/* Canonicalize any ordered comparison with integers involving equality
if we can do computations in the relevant mode and we do not
overflow. */
- if (GET_CODE (op1) == CONST_INT
+ if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
+ && GET_CODE (op1) == CONST_INT
&& GET_MODE (op0) != VOIDmode
&& GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
{
If EARLIEST is nonzero, it is a pointer to a place where the earliest
insn used in locating the condition was found. If a replacement test
of the condition is desired, it should be placed in front of that
- insn and we will be sure that the inputs are still valid. */
+ insn and we will be sure that the inputs are still valid.
+
+ If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
+ compare CC mode register. */
rtx
-get_condition (rtx jump, rtx *earliest)
+get_condition (rtx jump, rtx *earliest, int allow_cc_mode)
{
rtx cond;
int reverse;
rtx set;
/* If this is not a standard conditional jump, we can't parse it. */
- if (GET_CODE (jump) != JUMP_INSN
+ if (!JUMP_P (jump)
|| ! any_condjump_p (jump))
return 0;
set = pc_set (jump);
= GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
&& XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
- return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
+ return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
+ allow_cc_mode);
}
/* Similar to above routine, except that we also put an invariant last
rtx
get_condition_for_loop (const struct loop *loop, rtx x)
{
- rtx comparison = get_condition (x, (rtx*) 0);
+ rtx comparison = get_condition (x, (rtx*) 0, false);
if (comparison == 0
|| ! loop_invariant_p (loop, XEXP (comparison, 0))
for (i = 0; i < loop_info->mems_idx; ++i)
if (rtx_equal_p (m, loop_info->mems[i].mem))
{
+ if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
+ loop_info->mems[i].mem = m;
if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
/* The modes of the two memory accesses are different. If
this happens, something tricky is going on, and we just
else
loop_info->mems_allocated = 32;
- loop_info->mems = (loop_mem_info *)
- xrealloc (loop_info->mems,
- loop_info->mems_allocated * sizeof (loop_mem_info));
+ loop_info->mems = xrealloc (loop_info->mems,
+ loop_info->mems_allocated * sizeof (loop_mem_info));
}
/* Actually insert the MEM. */
{
regs->size = regs->num + extra_size;
- regs->array = (struct loop_reg *)
- xrealloc (regs->array, regs->size * sizeof (*regs->array));
+ regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
/* Zero the new elements. */
memset (regs->array + old_nregs, 0,
regs->array[i].single_usage = NULL_RTX;
}
- last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
+ last_set = xcalloc (regs->num, sizeof (rtx));
/* Scan the loop, recording register usage. */
for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
}
}
- if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
+ if (LABEL_P (insn) || JUMP_P (insn))
memset (last_set, 0, regs->num * sizeof (rtx));
/* Invalidate all registers used for function argument passing.
We check rtx_varies_p for the same reason as below, to allow
optimizing PIC calculations. */
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
rtx link;
for (link = CALL_INSN_FUNCTION_USAGE (insn);
rtx op, reg;
if (GET_CODE (op = XEXP (link, 0)) == USE
- && GET_CODE (reg = XEXP (op, 0)) == REG
+ && REG_P (reg = XEXP (op, 0))
&& rtx_varies_p (reg, 1))
regs->array[REGNO (reg)].may_not_optimize = 1;
}
/* We cannot use next_label here because it skips over normal insns. */
end_label = next_nonnote_insn (loop->end);
- if (end_label && GET_CODE (end_label) != CODE_LABEL)
+ if (end_label && !LABEL_P (end_label))
end_label = NULL_RTX;
/* Check to see if it's possible that some instructions in the loop are
p != NULL_RTX;
p = next_insn_in_loop (loop, p))
{
- if (GET_CODE (p) == CODE_LABEL)
+ if (LABEL_P (p))
maybe_never = 1;
- else if (GET_CODE (p) == JUMP_INSN
+ else if (JUMP_P (p)
/* If we enter the loop in the middle, and scan
around to the beginning, don't set maybe_never
for that. This must be an unconditional jump,
otherwise the code at the top of the loop might
never be executed. Unconditional jumps are
followed a by barrier then loop end. */
- && ! (GET_CODE (p) == JUMP_INSN
+ && ! (JUMP_P (p)
&& JUMP_LABEL (p) == loop->top
&& NEXT_INSN (NEXT_INSN (p)) == loop->end
&& any_uncondjump_p (p)))
/* Find start of the extended basic block that enters the loop. */
for (p = loop->start;
- PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
+ PREV_INSN (p) && !LABEL_P (p);
p = PREV_INSN (p))
;
prev_ebb_head = p;
- cselib_init ();
+ cselib_init (true);
/* Build table of mems that get set to constant values before the
loop. */
if (set
/* @@@ This test is _way_ too conservative. */
&& ! maybe_never
- && GET_CODE (SET_DEST (set)) == REG
+ && REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
&& REGNO (SET_DEST (set)) < last_max_reg
&& regs->array[REGNO (SET_DEST (set))].n_times_set == 1
to untangle things for the BIV detection code. */
if (set
&& ! maybe_never
- && GET_CODE (SET_SRC (set)) == REG
+ && REG_P (SET_SRC (set))
&& REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
&& REGNO (SET_SRC (set)) < last_max_reg
&& regs->array[REGNO (SET_SRC (set))].n_times_set == 1
/* If this is a call which uses / clobbers this memory
location, we must not change the interface here. */
- if (GET_CODE (p) == CALL_INSN
+ if (CALL_P (p)
&& reg_mentioned_p (loop_info->mems[i].mem,
CALL_INSN_FUNCTION_USAGE (p)))
{
loop_info->mems[i].reg, written);
}
- if (GET_CODE (p) == CODE_LABEL
- || GET_CODE (p) == JUMP_INSN)
+ if (LABEL_P (p)
+ || JUMP_P (p))
maybe_never = 1;
}
{
if (CONSTANT_P (equiv->loc))
const_equiv = equiv;
- else if (GET_CODE (equiv->loc) == REG
+ else if (REG_P (equiv->loc)
/* Extending hard register lifetimes causes crash
on SRC targets. Doing so on non-SRC is
probably also not good idea, since we most
}
}
+ /* Now, we need to replace all references to the previous exit
+ label with the new one. */
if (label != NULL_RTX && end_label != NULL_RTX)
- {
- /* Now, we need to replace all references to the previous exit
- label with the new one. */
- replace_label_data rr;
- rr.r1 = end_label;
- rr.r2 = label;
- rr.update_label_nuses = true;
-
- for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
- {
- for_each_rtx (&p, replace_label, &rr);
- }
- }
+ for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
+ if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
+ redirect_jump (p, label, false);
cselib_finish ();
}
/* Only substitute within one extended basic block from the initializing
insn. */
- if (GET_CODE (insn) == CODE_LABEL && init_insn)
+ if (LABEL_P (insn) && init_insn)
break;
if (! INSN_P (insn))
/* Is this the initializing insn? */
set = single_set (insn);
if (set
- && GET_CODE (SET_DEST (set)) == REG
+ && REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) == regno)
{
if (init_insn)
/* Search for the insn that copies REGNO to NEW_REGNO? */
if (INSN_P (insn)
&& (set = single_set (insn))
- && GET_CODE (SET_DEST (set)) == REG
+ && REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) == new_regno
- && GET_CODE (SET_SRC (set)) == REG
+ && REG_P (SET_SRC (set))
&& REGNO (SET_SRC (set)) == regno)
break;
}
if (INSN_P (insn)
&& (prev_set = single_set (prev_insn))
- && GET_CODE (SET_DEST (prev_set)) == REG
+ && REG_P (SET_DEST (prev_set))
&& REGNO (SET_DEST (prev_set)) == regno)
{
/* We have:
static int
find_mem_in_note_1 (rtx *x, void *data)
{
- if (*x != NULL_RTX && GET_CODE (*x) == MEM)
+ if (*x != NULL_RTX && MEM_P (*x))
{
rtx *res = (rtx *) data;
*res = *x;
/* The notes do not have an assigned block, so look at the next insn. */
#define LOOP_BLOCK_NUM(INSN) \
-((INSN) ? (GET_CODE (INSN) == NOTE \
+((INSN) ? (NOTE_P (INSN) \
? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
: LOOP_BLOCK_NUM_1 (INSN)) \
: -1)
/* Print diagnostics to compare our concept of a loop with
what the loop notes say. */
- if (! PREV_INSN (loop->first->head)
- || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
- || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
+ if (! PREV_INSN (BB_HEAD (loop->first))
+ || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
+ || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
!= NOTE_INSN_LOOP_BEG)
fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
- INSN_UID (PREV_INSN (loop->first->head)));
- if (! NEXT_INSN (loop->last->end)
- || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
- || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
+ INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
+ if (! NEXT_INSN (BB_END (loop->last))
+ || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
+ || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
!= NOTE_INSN_LOOP_END)
fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
- INSN_UID (NEXT_INSN (loop->last->end)));
+ INSN_UID (NEXT_INSN (BB_END (loop->last))));
if (loop->start)
{