int last_set_label;
/* These fields are maintained in parallel with last_set_value and are
- used to store the mode in which the register was last set, te bits
+ used to store the mode in which the register was last set, the bits
that were known to be zero when it was last set, and the number of
sign bits copies it was known to have when it was last set. */
those blocks as starting points. */
static sbitmap refresh_blocks;
\f
-/* The following array records the combine_insn_cost for every insn
+/* The following array records the insn_rtx_cost for every insn
in the instruction stream. */
static int *uid_insn_cost;
#define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
\f
-/* Calculate the rtx_cost of a single instruction. A return value of zero
- indicates an instruction without a known cost. */
-
-static int
-combine_insn_cost (rtx pat)
-{
- int i, cost;
- rtx set;
-
- /* Extract the single set rtx from the instruction pattern.
- We can't use single_set since we only have the pattern. */
- if (GET_CODE (pat) == SET)
- set = pat;
- else if (GET_CODE (pat) == PARALLEL)
- {
- set = NULL_RTX;
- for (i = 0; i < XVECLEN (pat, 0); i++)
- {
- rtx x = XVECEXP (pat, 0, i);
- if (GET_CODE (x) == SET)
- {
- if (set)
- return 0;
- set = x;
- }
- }
- if (!set)
- return 0;
- }
- else
- return 0;
-
- cost = rtx_cost (SET_SRC (set), SET);
- return cost > 0 ? cost : COSTS_N_INSNS (1);
-}
-
/* Subroutine of try_combine. Determine whether the combine replacement
- patterns NEWPAT and NEWI2PAT are cheaper according to combine_insn_cost
+ patterns NEWPAT and NEWI2PAT are cheaper according to insn_rtx_cost
that the original instruction sequence I1, I2 and I3. Note that I1
and/or NEWI2PAT may be NULL_RTX. This function returns false, if the
costs of all instructions can be estimated, and the replacements are
int new_i2_cost, new_i3_cost;
int old_cost, new_cost;
- /* Lookup the original combine_insn_costs. */
+ /* Lookup the original insn_rtx_costs. */
i2_cost = INSN_UID (i2) <= last_insn_cost
? uid_insn_cost[INSN_UID (i2)] : 0;
i3_cost = INSN_UID (i3) <= last_insn_cost
i1_cost = 0;
}
- /* Calculate the replacement combine_insn_costs. */
- new_i3_cost = combine_insn_cost (newpat);
+ /* Calculate the replacement insn_rtx_costs. */
+ new_i3_cost = insn_rtx_cost (newpat);
if (newi2pat)
{
- new_i2_cost = combine_insn_cost (newi2pat);
+ new_i2_cost = insn_rtx_cost (newi2pat);
new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
? new_i2_cost + new_i3_cost : 0;
}
refresh_blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (refresh_blocks);
- /* Allocate array of current combine_insn_costs. */
+ /* Allocate array of current insn_rtx_costs. */
uid_insn_cost = xcalloc (max_uid_cuid + 1, sizeof (int));
last_insn_cost = max_uid_cuid;
NULL);
#endif
- /* Record the current combine_insn_cost of this instruction. */
- uid_insn_cost[INSN_UID (insn)] = combine_insn_cost (PATTERN (insn));
+ /* Record the current insn_rtx_cost of this instruction. */
+ if (NONJUMP_INSN_P (insn))
+ uid_insn_cost[INSN_UID (insn)] = insn_rtx_cost (PATTERN (insn));
if (dump_file)
fprintf(dump_file, "insn_cost %d: %d\n",
INSN_UID (insn), uid_insn_cost[INSN_UID (insn)]);
}
- if (GET_CODE (insn) == CODE_LABEL)
+ if (LABEL_P (insn))
label_tick++;
}
{
next = 0;
- if (GET_CODE (insn) == CODE_LABEL)
+ if (LABEL_P (insn))
label_tick++;
else if (INSN_P (insn))
/* If the linked insn has been replaced by a note, then there
is no point in pursuing this chain any further. */
- if (GET_CODE (link) == NOTE)
+ if (NOTE_P (link))
continue;
for (nextlinks = LOG_LINKS (link);
We need this special code because data flow connections
via CC0 do not get entered in LOG_LINKS. */
- if (GET_CODE (insn) == JUMP_INSN
+ if (JUMP_P (insn)
&& (prev = prev_nonnote_insn (insn)) != 0
- && GET_CODE (prev) == INSN
+ && NONJUMP_INSN_P (prev)
&& sets_cc0_p (PATTERN (prev)))
{
if ((next = try_combine (insn, prev,
}
/* Do the same for an insn that explicitly references CC0. */
- if (GET_CODE (insn) == INSN
+ if (NONJUMP_INSN_P (insn)
&& (prev = prev_nonnote_insn (insn)) != 0
- && GET_CODE (prev) == INSN
+ && NONJUMP_INSN_P (prev)
&& sets_cc0_p (PATTERN (prev))
&& GET_CODE (PATTERN (insn)) == SET
&& reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
explicitly references CC0. If so, try this insn, that insn,
and its predecessor if it sets CC0. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
- if (GET_CODE (XEXP (links, 0)) == INSN
+ if (NONJUMP_INSN_P (XEXP (links, 0))
&& GET_CODE (PATTERN (XEXP (links, 0))) == SET
&& reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
&& (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
- && GET_CODE (prev) == INSN
+ && NONJUMP_INSN_P (prev)
&& sets_cc0_p (PATTERN (prev))
&& (next = try_combine (insn, XEXP (links, 0),
prev, &new_direct_jump_p)) != 0)
}
}
- if (GET_CODE (insn) != NOTE)
+ if (!NOTE_P (insn))
record_dead_and_set_regs (insn);
retry:
/* Can't merge a function call. */
|| GET_CODE (src) == CALL
/* Don't eliminate a function call argument. */
- || (GET_CODE (i3) == CALL_INSN
+ || (CALL_P (i3)
&& (find_reg_fusage (i3, USE, dest)
|| (REG_P (dest)
&& REGNO (dest) < FIRST_PSEUDO_REGISTER
are intervening stores. Also, don't move a volatile asm or
UNSPEC_VOLATILE across any other insns. */
|| (! all_adjacent
- && (((GET_CODE (src) != MEM
+ && (((!MEM_P (src)
|| ! find_reg_note (insn, REG_EQUIV, src))
&& use_crosses_set_p (src, INSN_CUID (insn)))
|| (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
#ifdef AUTO_INC_DEC
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_INC
- && (GET_CODE (i3) == JUMP_INSN
+ && (JUMP_P (i3)
|| reg_used_between_p (XEXP (link, 0), insn, i3)
|| reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
return 0;
but that would be much slower, and this ought to be equivalent. */
p = prev_nonnote_insn (insn);
- if (p && p != pred && GET_CODE (p) == INSN && sets_cc0_p (PATTERN (p))
+ if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
&& ! all_adjacent)
return 0;
#endif
into the address of a MEM, so only prevent the combination if
i1 or i2 set the same MEM. */
if ((inner_dest != dest &&
- (GET_CODE (inner_dest) != MEM
+ (!MEM_P (inner_dest)
|| rtx_equal_p (i2dest, inner_dest)
|| (i1dest && rtx_equal_p (i1dest, inner_dest)))
&& (reg_overlap_mentioned_p (i2dest, inner_dest)
where I2 and I3 are adjacent to avoid making difficult register
usage tests. */
- if (i1 == 0 && GET_CODE (i3) == INSN && GET_CODE (PATTERN (i3)) == SET
+ if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
&& REG_P (SET_SRC (PATTERN (i3)))
&& REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
&& find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
#if 0
if (!(GET_CODE (PATTERN (i3)) == SET
&& REG_P (SET_SRC (PATTERN (i3)))
- && GET_CODE (SET_DEST (PATTERN (i3))) == MEM
+ && MEM_P (SET_DEST (PATTERN (i3)))
&& (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
|| GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
/* It's not the exception. */
#ifdef INSN_SCHEDULING
/* If *SPLIT is a paradoxical SUBREG, when we split it, it should
be written as a ZERO_EXTEND. */
- if (split_code == SUBREG && GET_CODE (SUBREG_REG (*split)) == MEM)
+ if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
{
#ifdef LOAD_EXTEND_OP
/* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
they are adjacent to each other or not. */
{
rtx p = prev_nonnote_insn (i3);
- if (p && p != i2 && GET_CODE (p) == INSN && newi2pat
+ if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
&& sets_cc0_p (newi2pat))
{
undo_all ();
}
#endif
- /* Only allow this combination if combine_insn_costs reports that the
+ /* Only allow this combination if insn_rtx_costs reports that the
replacement instructions are cheaper than the originals. */
if (!combine_validate_cost (i1, i2, i3, newpat, newi2pat))
{
INSN_CODE (i3) = insn_code_number;
PATTERN (i3) = newpat;
- if (GET_CODE (i3) == CALL_INSN && CALL_INSN_FUNCTION_USAGE (i3))
+ if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
{
rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
PATTERN (i2) = newi2pat;
}
else
- {
- PUT_CODE (i2, NOTE);
- NOTE_LINE_NUMBER (i2) = NOTE_INSN_DELETED;
- NOTE_SOURCE_FILE (i2) = 0;
- }
+ SET_INSN_DELETED (i2);
if (i1)
{
LOG_LINKS (i1) = 0;
REG_NOTES (i1) = 0;
- PUT_CODE (i1, NOTE);
- NOTE_LINE_NUMBER (i1) = NOTE_INSN_DELETED;
- NOTE_SOURCE_FILE (i1) = 0;
+ SET_INSN_DELETED (i1);
}
/* Get death notes for everything that is now used in either I3 or
mark_jump_label (PATTERN (i3), i3, 0);
if ((temp = next_nonnote_insn (i3)) == NULL_RTX
- || GET_CODE (temp) != BARRIER)
+ || !BARRIER_P (temp))
emit_barrier_after (i3);
}
*new_direct_jump_p = 1;
if ((temp = next_nonnote_insn (undobuf.other_insn)) == NULL_RTX
- || GET_CODE (temp) != BARRIER)
+ || !BARRIER_P (temp))
emit_barrier_after (undobuf.other_insn);
}
#ifdef INSN_SCHEDULING
/* If we are making a paradoxical SUBREG invalid, it becomes a split
point. */
- if (GET_CODE (SUBREG_REG (x)) == MEM)
+ if (MEM_P (SUBREG_REG (x)))
return loc;
#endif
return find_split_point (&SUBREG_REG (x), insn);
if (seq
&& NEXT_INSN (seq) != NULL_RTX
&& NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
- && GET_CODE (seq) == INSN
+ && NONJUMP_INSN_P (seq)
&& GET_CODE (PATTERN (seq)) == SET
&& SET_DEST (PATTERN (seq)) == reg
&& ! reg_mentioned_p (reg,
SET_SRC (PATTERN (seq)))
- && GET_CODE (NEXT_INSN (seq)) == INSN
+ && NONJUMP_INSN_P (NEXT_INSN (seq))
&& GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
&& SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
&& memory_address_p (GET_MODE (x),
/* Don't change the mode of the MEM if that would change the meaning
of the address. */
- if (GET_CODE (SUBREG_REG (x)) == MEM
+ if (MEM_P (SUBREG_REG (x))
&& (MEM_VOLATILE_P (SUBREG_REG (x))
|| mode_dependent_address_p (XEXP (SUBREG_REG (x), 0))))
return gen_rtx_CLOBBER (mode, const0_rtx);
rtx f = make_compound_operation (false_rtx, SET);
rtx cond_op0 = XEXP (cond, 0);
rtx cond_op1 = XEXP (cond, 1);
- enum rtx_code op = NIL, extend_op = NIL;
+ enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
enum machine_mode m = mode;
rtx z = 0, c1 = NULL_RTX;
temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
temp = gen_binary (op, m, gen_lowpart (m, z), temp);
- if (extend_op != NIL)
+ if (extend_op != UNKNOWN)
temp = simplify_gen_unary (extend_op, mode, temp, m);
return temp;
if (GET_CODE (src) == COMPARE)
op0 = XEXP (src, 0), op1 = XEXP (src, 1);
else
- op0 = src, op1 = const0_rtx;
+ op0 = src, op1 = CONST0_RTX (GET_MODE (src));
tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
op0, op1);
zero_extend to avoid the reload that would otherwise be required. */
if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
- && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != NIL
+ && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
&& SUBREG_BYTE (src) == 0
&& (GET_MODE_SIZE (GET_MODE (src))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
- && GET_CODE (SUBREG_REG (src)) == MEM)
+ && MEM_P (SUBREG_REG (src)))
{
SUBST (SET_SRC (x),
gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
The subreg adds or removes high bits; its mode is
irrelevant to the meaning of this extraction,
since POS and LEN count from the lsb. */
- if (GET_CODE (SUBREG_REG (inner)) == MEM)
+ if (MEM_P (SUBREG_REG (inner)))
is_mode = GET_MODE (SUBREG_REG (inner));
inner = SUBREG_REG (inner);
}
if (tmode != BLKmode
&& ! (spans_byte && inner_mode != tmode)
&& ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
- && GET_CODE (inner) != MEM
+ && !MEM_P (inner)
&& (! in_dest
|| (REG_P (inner)
&& have_insn_for (STRICT_LOW_PART, tmode))))
- || (GET_CODE (inner) == MEM && pos_rtx == 0
+ || (MEM_P (inner) && pos_rtx == 0
&& (pos
% (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
: BITS_PER_UNIT)) == 0
If INNER is not a MEM, get a piece consisting of just the field
of interest (in this case POS % BITS_PER_WORD must be 0). */
- if (GET_CODE (inner) == MEM)
+ if (MEM_P (inner))
{
HOST_WIDE_INT offset;
make a STRICT_LOW_PART unless we made a MEM. */
if (in_dest)
- return (GET_CODE (new) == MEM ? new
+ return (MEM_P (new) ? new
: (GET_CODE (new) != SUBREG
? gen_rtx_CLOBBER (tmode, const0_rtx)
: gen_rtx_STRICT_LOW_PART (VOIDmode, new)));
length is not 1. In all other cases, we would only be going outside
our object in cases when an original shift would have been
undefined. */
- if (! spans_byte && GET_CODE (inner) == MEM
+ if (! spans_byte && MEM_P (inner)
&& ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
|| (pos_rtx != 0 && len != 1)))
return 0;
/* If this is not from memory, the desired mode is wanted_inner_reg_mode;
if we have to change the mode of memory and cannot, the desired mode is
EXTRACTION_MODE. */
- if (GET_CODE (inner) != MEM)
+ if (!MEM_P (inner))
wanted_inner_mode = wanted_inner_reg_mode;
else if (inner_mode != wanted_inner_mode
&& (mode_dependent_address_p (XEXP (inner, 0))
If it's a MEM we need to recompute POS relative to that.
However, if we're extracting from (or inserting into) a register,
we want to recompute POS relative to wanted_inner_mode. */
- int width = (GET_CODE (inner) == MEM
+ int width = (MEM_P (inner)
? GET_MODE_BITSIZE (is_mode)
: GET_MODE_BITSIZE (wanted_inner_mode));
pos_rtx
= gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
/* POS may be less than 0 now, but we check for that below.
- Note that it can only be less than 0 if GET_CODE (inner) != MEM. */
+ Note that it can only be less than 0 if !MEM_P (inner). */
}
/* If INNER has a wider mode, make it smaller. If this is a constant
the value. */
if (wanted_inner_mode != VOIDmode
&& GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
- && ((GET_CODE (inner) == MEM
+ && ((MEM_P (inner)
&& (inner_mode == wanted_inner_mode
|| (! mode_dependent_address_p (XEXP (inner, 0))
&& ! MEM_VOLATILE_P (inner))))))
/* If INNER is not memory, we can always get it into the proper mode. If we
are changing its mode, POS must be a constant and smaller than the size
of the new mode. */
- else if (GET_CODE (inner) != MEM)
+ else if (!MEM_P (inner))
{
if (GET_MODE (inner) != wanted_inner_mode
&& (pos_rtx != 0
/* We can only change the mode of the shift if we can do arithmetic
in the mode of the shift and INNER_MASK is no wider than the
- width of OP_MODE. */
- if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT
- || (inner_mask & ~GET_MODE_MASK (op_mode)) != 0)
+ width of X's mode. */
+ if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
op_mode = GET_MODE (x);
inner = force_to_mode (inner, op_mode, inner_mask, reg, next_select);
/* Check for a paradoxical SUBREG of a MEM compared with the MEM.
Note that all SUBREGs of MEM are paradoxical; otherwise they
would have been rewritten. */
- if (GET_CODE (x) == MEM && GET_CODE (y) == SUBREG
- && GET_CODE (SUBREG_REG (y)) == MEM
+ if (MEM_P (x) && GET_CODE (y) == SUBREG
+ && MEM_P (SUBREG_REG (y))
&& rtx_equal_p (SUBREG_REG (y),
gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
return 1;
- if (GET_CODE (y) == MEM && GET_CODE (x) == SUBREG
- && GET_CODE (SUBREG_REG (x)) == MEM
+ if (MEM_P (y) && GET_CODE (x) == SUBREG
+ && MEM_P (SUBREG_REG (x))
&& rtx_equal_p (SUBREG_REG (x),
gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
return 1;
the width of this mode matter. It is assumed that the width of this mode
is smaller than or equal to HOST_BITS_PER_WIDE_INT.
- If *POP0 or OP1 are NIL, it means no operation is required. Only NEG, PLUS,
+ If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
result is simply *PCONST0.
if (op0 == AND)
const1 &= const0;
- /* If OP0 or OP1 is NIL, this is easy. Similarly if they are the same or
+ /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
if OP0 is SET. */
- if (op1 == NIL || op0 == SET)
+ if (op1 == UNKNOWN || op0 == SET)
return 1;
- else if (op0 == NIL)
+ else if (op0 == UNKNOWN)
op0 = op1, const0 = const1;
else if (op0 == op1)
const0 += const1;
break;
case NEG:
- op0 = NIL;
+ op0 = UNKNOWN;
break;
default:
break;
const0 &= GET_MODE_MASK (mode);
if (const0 == 0
&& (op0 == IOR || op0 == XOR || op0 == PLUS))
- op0 = NIL;
+ op0 = UNKNOWN;
else if (const0 == 0 && op0 == AND)
op0 = SET;
else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
&& op0 == AND)
- op0 = NIL;
+ op0 = UNKNOWN;
/* ??? Slightly redundant with the above mask, but not entirely.
Moving this above means we'd have to sign-extend the mode mask
unsigned int mode_words
= (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
/* We form (outer_op (code varop count) (outer_const)). */
- enum rtx_code outer_op = NIL;
+ enum rtx_code outer_op = UNKNOWN;
HOST_WIDE_INT outer_const = 0;
rtx const_rtx;
int complement_p = 0;
/* We have now finished analyzing the shift. The result should be
a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
- OUTER_OP is non-NIL, it is an operation that needs to be applied
+ OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
to the result of the shift. OUTER_CONST is the relevant constant,
but we must turn off all bits turned off in the shift.
for the outer operation. So try to do the simplification
recursively. */
- if (outer_op != NIL && GET_CODE (x) == code
+ if (outer_op != UNKNOWN && GET_CODE (x) == code
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
x = simplify_shift_const (x, code, shift_mode, XEXP (x, 0),
INTVAL (XEXP (x, 1)));
if (complement_p)
x = simplify_gen_unary (NOT, result_mode, x, result_mode);
- if (outer_op != NIL)
+ if (outer_op != UNKNOWN)
{
if (GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
outer_const = trunc_int_for_mode (outer_const, result_mode);
/* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
won't know what to do. So we will strip off the SUBREG here and
process normally. */
- if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == MEM)
+ if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
{
x = SUBREG_REG (x);
if (GET_MODE (x) == mode)
if (result)
return result;
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
{
int offset = 0;
break;
case GEU:
- /* >= C is equivalent to < (C - 1). */
+ /* >= C is equivalent to > (C - 1). */
if (const_op > 1)
{
const_op -= 1;
those bits.
3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
- NIL. In that case we know those bits are zeros or ones. We must
+ UNKNOWN. In that case we know those bits are zeros or ones. We must
also be sure that they are the same as the upper bits of op1.
We can never remove a SUBREG for a non-equality comparison because
else
record_value_for_reg (dest, record_dead_insn, NULL_RTX);
}
- else if (GET_CODE (dest) == MEM
+ else if (MEM_P (dest)
/* Ignore pushes, they clobber nothing. */
&& ! push_operand (dest, GET_MODE (dest)))
mem_last_set = INSN_CUID (record_dead_insn);
record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
}
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
/* If this is a memory reference, make sure that there were
no stores after it that might have clobbered the value. We don't
have alias info, so we assume any store invalidates it. */
- else if (GET_CODE (x) == MEM && ! RTX_UNCHANGING_P (x)
+ else if (MEM_P (x) && !MEM_READONLY_P (x)
&& INSN_CUID (insn) <= mem_last_set)
{
if (replace)
/* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, label, or
beginning of function. */
- for (; insn && GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != BARRIER;
+ for (; insn && !LABEL_P (insn) && !BARRIER_P (insn);
insn = prev_nonnote_insn (insn))
{
note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
case CLOBBER:
/* If we are clobbering a MEM, mark any hard registers inside the
address as used. */
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
return;
|| GET_CODE (testreg) == STRICT_LOW_PART)
testreg = XEXP (testreg, 0);
- if (GET_CODE (testreg) == MEM)
+ if (MEM_P (testreg))
mark_used_regs_combine (XEXP (testreg, 0));
mark_used_regs_combine (SET_SRC (x));
For a REG (the only other possibility), the entire value is
being replaced so the old value is not used in this insn. */
- if (GET_CODE (dest) == MEM)
+ if (MEM_P (dest))
move_deaths (XEXP (dest, 0), maybe_kill_insn, from_cuid,
to_insn, pnotes);
return;
/* Just get rid of this note, as it is unused later anyway. */
break;
- case REG_VTABLE_REF:
- /* ??? Should remain with *a particular* memory load. Given the
- nature of vtable data, the last insn seems relatively safe. */
- place = i3;
- break;
-
case REG_NON_LOCAL_GOTO:
- if (GET_CODE (i3) == JUMP_INSN)
+ if (JUMP_P (i3))
place = i3;
- else if (i2 && GET_CODE (i2) == JUMP_INSN)
+ else if (i2 && JUMP_P (i2))
place = i2;
else
abort ();
case REG_EH_REGION:
/* These notes must remain with the call or trapping instruction. */
- if (GET_CODE (i3) == CALL_INSN)
+ if (CALL_P (i3))
place = i3;
- else if (i2 && GET_CODE (i2) == CALL_INSN)
+ else if (i2 && CALL_P (i2))
place = i2;
else if (flag_non_call_exceptions)
{
case REG_SETJMP:
/* These notes must remain with the call. It should not be
possible for both I2 and I3 to be a call. */
- if (GET_CODE (i3) == CALL_INSN)
+ if (CALL_P (i3))
place = i3;
- else if (i2 && GET_CODE (i2) == CALL_INSN)
+ else if (i2 && CALL_P (i2))
place = i2;
else
abort ();
/* Don't attach REG_LABEL note to a JUMP_INSN which has
JUMP_LABEL already. Instead, decrement LABEL_NUSES. */
- if (place && GET_CODE (place) == JUMP_INSN && JUMP_LABEL (place))
+ if (place && JUMP_P (place) && JUMP_LABEL (place))
{
if (JUMP_LABEL (place) != XEXP (note, 0))
abort ();
- if (GET_CODE (JUMP_LABEL (place)) == CODE_LABEL)
+ if (LABEL_P (JUMP_LABEL (place)))
LABEL_NUSES (JUMP_LABEL (place))--;
place = 0;
}
- if (place2 && GET_CODE (place2) == JUMP_INSN && JUMP_LABEL (place2))
+ if (place2 && JUMP_P (place2) && JUMP_LABEL (place2))
{
if (JUMP_LABEL (place2) != XEXP (note, 0))
abort ();
- if (GET_CODE (JUMP_LABEL (place2)) == CODE_LABEL)
+ if (LABEL_P (JUMP_LABEL (place2)))
LABEL_NUSES (JUMP_LABEL (place2))--;
place2 = 0;
}
/* If the insn previously containing this note still exists,
put it back where it was. Otherwise move it to the previous
insn. Adjust the corresponding REG_LIBCALL note. */
- if (GET_CODE (from_insn) != NOTE)
+ if (!NOTE_P (from_insn))
place = from_insn;
else
{
case REG_LIBCALL:
/* This is handled similarly to REG_RETVAL. */
- if (GET_CODE (from_insn) != NOTE)
+ if (!NOTE_P (from_insn))
place = from_insn;
else
{
use of A and put the death note there. */
if (from_insn
- && GET_CODE (from_insn) == CALL_INSN
+ && CALL_P (from_insn)
&& find_reg_fusage (from_insn, USE, XEXP (note, 0)))
place = from_insn;
else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
distribute_notes (old_notes, tem, tem, NULL_RTX);
distribute_links (LOG_LINKS (tem));
- PUT_CODE (tem, NOTE);
- NOTE_LINE_NUMBER (tem) = NOTE_INSN_DELETED;
- NOTE_SOURCE_FILE (tem) = 0;
+ SET_INSN_DELETED (tem);
#ifdef HAVE_cc0
/* Delete the setter too. */
cc0_setter, NULL_RTX);
distribute_links (LOG_LINKS (cc0_setter));
- PUT_CODE (cc0_setter, NOTE);
- NOTE_LINE_NUMBER (cc0_setter)
- = NOTE_INSN_DELETED;
- NOTE_SOURCE_FILE (cc0_setter) = 0;
+ SET_INSN_DELETED (cc0_setter);
}
#endif
}
}
}
else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
- || (GET_CODE (tem) == CALL_INSN
+ || (CALL_P (tem)
&& find_reg_fusage (tem, USE, XEXP (note, 0))))
{
place = tem;
replace I3, I2, and I1 by I3 and I2. But in that case the
destination of I2 also remains unchanged. */
- if (GET_CODE (XEXP (link, 0)) == NOTE
+ if (NOTE_P (XEXP (link, 0))
|| (set = single_set (XEXP (link, 0))) == 0)
continue;
place = insn;
break;
}
- else if (GET_CODE (insn) == CALL_INSN
+ else if (CALL_P (insn)
&& find_reg_fusage (insn, USE, reg))
{
place = insn;
rtx x = *loc;
if (x != NULL_RTX
- && (REG_P (x) || GET_CODE (x) == MEM)
+ && (REG_P (x) || MEM_P (x))
&& ! reg_mentioned_p (x, (rtx) expr))
return 1;
return 0;
insn_cuid (rtx insn)
{
while (insn != 0 && INSN_UID (insn) > max_uid_cuid
- && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE)
+ && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE)
insn = NEXT_INSN (insn);
if (INSN_UID (insn) > max_uid_cuid)