/* Optimize by combining instructions for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
- 2011 Free Software Foundation, Inc.
+ 2011, 2012 Free Software Foundation, Inc.
This file is part of GCC.
/* Record one modification to rtl structure
to be undone by storing old_contents into *where. */
-enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE };
+enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
struct undo
{
struct undo *next;
enum undo_kind kind;
- union { rtx r; int i; enum machine_mode m; } old_contents;
- union { rtx *r; int *i; } where;
+ union { rtx r; int i; enum machine_mode m; struct insn_link *l; } old_contents;
+ union { rtx *r; int *i; struct insn_link **l; } where;
};
/* Record a bunch of changes to be undone, up to MAX_UNDO of them.
static void undo_all (void);
static void undo_commit (void);
static rtx *find_split_point (rtx *, rtx, bool);
-static rtx subst (rtx, rtx, rtx, int, int);
-static rtx combine_simplify_rtx (rtx, enum machine_mode, int);
+static rtx subst (rtx, rtx, rtx, int, int, int);
+static rtx combine_simplify_rtx (rtx, enum machine_mode, int, int);
static rtx simplify_if_then_else (rtx);
static rtx simplify_set (rtx);
static rtx simplify_logical (rtx);
int);
static int recog_for_combine (rtx *, rtx, rtx *);
static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
+static enum rtx_code simplify_compare_const (enum rtx_code, rtx, rtx *);
static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
static void update_table_tick (rtx);
static void record_value_for_reg (rtx, rtx, rtx);
}
#define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE(&(INTO), (NEWVAL))
+
+#ifndef HAVE_cc0
+/* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
+
+static void
+do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
+{
+ struct undo *buf;
+ struct insn_link * oldval = *into;
+
+ if (oldval == newval)
+ return;
+
+ if (undobuf.frees)
+ buf = undobuf.frees, undobuf.frees = buf->next;
+ else
+ buf = XNEW (struct undo);
+
+ buf->kind = UNDO_LINKS;
+ buf->where.l = into;
+ buf->old_contents.l = oldval;
+ *into = newval;
+
+ buf->next = undobuf.undos, undobuf.undos = buf;
+}
+
+#define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
+#endif
\f
/* Subroutine of try_combine. Determine whether the replacement patterns
NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
say what its contents were. */
&& ! REGNO_REG_SET_P
(DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
- && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
+ && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
{
reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
set what we know about X. */
if (SET_DEST (set) == x
- || (GET_CODE (SET_DEST (set)) == SUBREG
- && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set)))))
+ || (paradoxical_subreg_p (SET_DEST (set))
&& SUBREG_REG (SET_DEST (set)) == x))
{
rtx src = SET_SRC (set);
??? For 2.5, try to tighten up the MD files in this regard
instead of this kludge. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+ if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
&& CONST_INT_P (src)
&& INTVAL (src) > 0
- && 0 != (UINTVAL (src)
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
- src = GEN_INT (UINTVAL (src)
- | ((unsigned HOST_WIDE_INT) (-1)
- << GET_MODE_BITSIZE (GET_MODE (x))));
+ && val_signbit_known_set_p (GET_MODE (x), INTVAL (src)))
+ src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (GET_MODE (x)));
#endif
/* Don't call nonzero_bits if it cannot change anything. */
rtx link;
#endif
bool all_adjacent = true;
+ int (*is_volatile_p) (const_rtx);
if (succ)
{
if (set == 0)
return 0;
+ /* The simplification in expand_field_assignment may call back to
+ get_last_value, so set safe guard here. */
+ subst_low_luid = DF_INSN_LUID (insn);
+
set = expand_field_assignment (set);
src = SET_SRC (set), dest = SET_DEST (set);
&& REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
return 0;
- /* If there are any volatile insns between INSN and I3, reject, because
- they might affect machine state. */
+ /* If INSN contains volatile references (specifically volatile MEMs),
+ we cannot combine across any other volatile references.
+ Even if INSN doesn't contain volatile references, any intervening
+ volatile insn might affect machine state. */
+ is_volatile_p = volatile_refs_p (PATTERN (insn))
+ ? volatile_refs_p
+ : volatile_insn_p;
+
for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
- if (INSN_P (p) && p != succ && p != succ2 && volatile_insn_p (PATTERN (p)))
+ if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
return 0;
/* If INSN contains an autoincrement or autodecrement, make sure that
update_cfg_for_uncondjump (rtx insn)
{
basic_block bb = BLOCK_FOR_INSN (insn);
- bool at_end = (BB_END (bb) == insn);
+ gcc_assert (BB_END (bb) == insn);
- if (at_end)
- purge_dead_edges (bb);
+ purge_dead_edges (bb);
delete_insn (insn);
- if (at_end && EDGE_COUNT (bb->succs) == 1)
+ if (EDGE_COUNT (bb->succs) == 1)
{
rtx insn;
rtx i3dest_killed = 0;
/* SET_DEST and SET_SRC of I2, I1 and I0. */
rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
- /* Copy of SET_SRC of I1, if needed. */
- rtx i1src_copy = 0;
+ /* Copy of SET_SRC of I1 and I0, if needed. */
+ rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
/* Set if I2DEST was reused as a scratch register. */
bool i2scratch = false;
/* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
offset = INTVAL (XEXP (dest, 2));
dest = XEXP (dest, 0);
if (BITS_BIG_ENDIAN)
- offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset;
+ offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
}
}
else
{
if (GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
- width = GET_MODE_BITSIZE (GET_MODE (dest));
+ width = GET_MODE_PRECISION (GET_MODE (dest));
offset = 0;
}
if (subreg_lowpart_p (dest))
;
/* Handle the case where inner is twice the size of outer. */
- else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
- == 2 * GET_MODE_BITSIZE (GET_MODE (dest)))
- offset += GET_MODE_BITSIZE (GET_MODE (dest));
+ else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
+ == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
+ offset += GET_MODE_PRECISION (GET_MODE (dest));
/* Otherwise give up for now. */
else
offset = -1;
}
if (offset >= 0
- && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
+ && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
<= HOST_BITS_PER_DOUBLE_INT))
{
double_int m, o, i;
SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
SET_DEST (PATTERN (i1)));
+ SUBST_LINK (LOG_LINKS (i2), alloc_insn_link (i1, LOG_LINKS (i2)));
}
}
#endif
if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
&& GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
- && XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx
+ && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
&& rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
{
-#ifdef SELECT_CC_MODE
- rtx *cc_use;
- enum machine_mode compare_mode;
-#endif
+ rtx newpat_dest;
+ rtx *cc_use_loc = NULL, cc_use_insn = NULL_RTX;
+ rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
+ enum machine_mode compare_mode, orig_compare_mode;
+ enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
newpat = PATTERN (i3);
- SUBST (XEXP (SET_SRC (newpat), 0), i2src);
+ newpat_dest = SET_DEST (newpat);
+ compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
- i2_is_used = 1;
-
-#ifdef SELECT_CC_MODE
- /* See if a COMPARE with the operand we substituted in should be done
- with the mode that is currently being used. If not, do the same
- processing we do in `subst' for a SET; namely, if the destination
- is used only once, try to replace it with a register of the proper
- mode and also replace the COMPARE. */
if (undobuf.other_insn == 0
- && (cc_use = find_single_use (SET_DEST (newpat), i3,
- &undobuf.other_insn))
- && ((compare_mode = SELECT_CC_MODE (GET_CODE (*cc_use),
- i2src, const0_rtx))
- != GET_MODE (SET_DEST (newpat))))
+ && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
+ &cc_use_insn)))
{
- if (can_change_dest_mode (SET_DEST (newpat), added_sets_2,
- compare_mode))
- {
- unsigned int regno = REGNO (SET_DEST (newpat));
- rtx new_dest;
+ compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
+ compare_code = simplify_compare_const (compare_code,
+ op0, &op1);
+#ifdef CANONICALIZE_COMPARISON
+ CANONICALIZE_COMPARISON (compare_code, op0, op1);
+#endif
+ }
- if (regno < FIRST_PSEUDO_REGISTER)
- new_dest = gen_rtx_REG (compare_mode, regno);
- else
+ /* Do the rest only if op1 is const0_rtx, which may be the
+ result of simplification. */
+ if (op1 == const0_rtx)
+ {
+ /* If a single use of the CC is found, prepare to modify it
+ when SELECT_CC_MODE returns a new CC-class mode, or when
+ the above simplify_compare_const() returned a new comparison
+ operator. undobuf.other_insn is assigned the CC use insn
+ when modifying it. */
+ if (cc_use_loc)
+ {
+#ifdef SELECT_CC_MODE
+ enum machine_mode new_mode
+ = SELECT_CC_MODE (compare_code, op0, op1);
+ if (new_mode != orig_compare_mode
+ && can_change_dest_mode (SET_DEST (newpat),
+ added_sets_2, new_mode))
{
- SUBST_MODE (regno_reg_rtx[regno], compare_mode);
- new_dest = regno_reg_rtx[regno];
+ unsigned int regno = REGNO (newpat_dest);
+ compare_mode = new_mode;
+ if (regno < FIRST_PSEUDO_REGISTER)
+ newpat_dest = gen_rtx_REG (compare_mode, regno);
+ else
+ {
+ SUBST_MODE (regno_reg_rtx[regno], compare_mode);
+ newpat_dest = regno_reg_rtx[regno];
+ }
}
-
- SUBST (SET_DEST (newpat), new_dest);
- SUBST (XEXP (*cc_use, 0), new_dest);
- SUBST (SET_SRC (newpat),
- gen_rtx_COMPARE (compare_mode, i2src, const0_rtx));
- }
- else
- undobuf.other_insn = 0;
- }
#endif
+ /* Cases for modifying the CC-using comparison. */
+ if (compare_code != orig_compare_code
+ /* ??? Do we need to verify the zero rtx? */
+ && XEXP (*cc_use_loc, 1) == const0_rtx)
+ {
+ /* Replace cc_use_loc with entire new RTX. */
+ SUBST (*cc_use_loc,
+ gen_rtx_fmt_ee (compare_code, compare_mode,
+ newpat_dest, const0_rtx));
+ undobuf.other_insn = cc_use_insn;
+ }
+ else if (compare_mode != orig_compare_mode)
+ {
+ /* Just replace the CC reg with a new mode. */
+ SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
+ undobuf.other_insn = cc_use_insn;
+ }
+ }
+
+ /* Now we modify the current newpat:
+ First, SET_DEST(newpat) is updated if the CC mode has been
+ altered. For targets without SELECT_CC_MODE, this should be
+ optimized away. */
+ if (compare_mode != orig_compare_mode)
+ SUBST (SET_DEST (newpat), newpat_dest);
+ /* This is always done to propagate i2src into newpat. */
+ SUBST (SET_SRC (newpat),
+ gen_rtx_COMPARE (compare_mode, op0, op1));
+ /* Create new version of i2pat if needed; the below PARALLEL
+ creation needs this to work correctly. */
+ if (! rtx_equal_p (i2src, op0))
+ i2pat = gen_rtx_SET (VOIDmode, i2dest, op0);
+ i2_is_used = 1;
+ }
}
- else
#endif
+
+ if (i2_is_used == 0)
{
/* It is possible that the source of I2 or I1 may be performing
an unneeded operation, such as a ZERO_EXTEND of something
if (i1)
{
subst_low_luid = DF_INSN_LUID (i1);
- i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
+ i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
}
subst_low_luid = DF_INSN_LUID (i2);
- i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
+ i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
}
n_occurrences = 0; /* `subst' counts here */
self-referential RTL when we will be substituting I1SRC for I1DEST
later. Likewise if I0 feeds into I2, either directly or indirectly
through I1, and I0DEST is in I0SRC. */
- newpat = subst (PATTERN (i3), i2dest, i2src, 0,
+ newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
(i1_feeds_i2_n && i1dest_in_i1src)
|| ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
&& i0dest_in_i0src));
n_occurrences = 0;
subst_low_luid = DF_INSN_LUID (i1);
+ /* If the following substitution will modify I1SRC, make a copy of it
+ for the case where it is substituted for I1DEST in I2PAT later. */
+ if (added_sets_2 && i1_feeds_i2_n)
+ i1src_copy = copy_rtx (i1src);
+
/* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
copy of I1SRC each time we substitute it, in order to avoid creating
self-referential RTL when we will be substituting I0SRC for I0DEST
later. */
- newpat = subst (newpat, i1dest, i1src, 0,
+ newpat = subst (newpat, i1dest, i1src, 0, 0,
i0_feeds_i1_n && i0dest_in_i0src);
substed_i1 = 1;
return 0;
}
- /* If the following substitution will modify I1SRC, make a copy of it
- for the case where it is substituted for I1DEST in I2PAT later. */
- if (i0_feeds_i1_n && added_sets_2 && i1_feeds_i2_n)
- i1src_copy = copy_rtx (i1src);
+ /* If the following substitution will modify I0SRC, make a copy of it
+ for the case where it is substituted for I0DEST in I1PAT later. */
+ if (added_sets_1 && i0_feeds_i1_n)
+ i0src_copy = copy_rtx (i0src);
+ /* And a copy for I0DEST in I2PAT substitution. */
+ if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
+ || (i0_feeds_i2_n)))
+ i0src_copy2 = copy_rtx (i0src);
n_occurrences = 0;
subst_low_luid = DF_INSN_LUID (i0);
- newpat = subst (newpat, i0dest, i0src, 0, 0);
+ newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
substed_i0 = 1;
}
{
rtx t = i1pat;
if (i0_feeds_i1_n)
- t = subst (t, i0dest, i0src, 0, 0);
+ t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
XVECEXP (newpat, 0, --total_sets) = t;
}
{
rtx t = i2pat;
if (i1_feeds_i2_n)
- t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0,
+ t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
i0_feeds_i1_n && i0dest_in_i0src);
if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
- t = subst (t, i0dest, i0src, 0, 0);
+ t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
XVECEXP (newpat, 0, --total_sets) = t;
}
newpat = m_split;
}
else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
- && (next_real_insn (i2) == i3
+ && (next_nonnote_nondebug_insn (i2) == i3
|| ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
{
rtx i2set, i3set;
is used between I2 and I3, we also can't use these insns. */
if (i2_code_number >= 0 && i2set && i3set
- && (next_real_insn (i2) == i3
+ && (next_nonnote_nondebug_insn (i2) == i3
|| ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
insn_code_number = recog_for_combine (&newi3pat, i3,
&new_i3_notes);
|| GET_MODE (*split) == VOIDmode
|| can_change_dest_mode (i2dest, added_sets_2,
GET_MODE (*split)))
- && (next_real_insn (i2) == i3
+ && (next_nonnote_nondebug_insn (i2) == i3
|| ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
/* We can't overwrite I2DEST if its value is still used by
NEWPAT. */
(REG_P (temp)
&& VEC_index (reg_stat_type, reg_stat,
REGNO (temp))->nonzero_bits != 0
- && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
+ && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
+ && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
&& (VEC_index (reg_stat_type, reg_stat,
REGNO (temp))->nonzero_bits
!= GET_MODE_MASK (word_mode))))
(REG_P (temp)
&& VEC_index (reg_stat_type, reg_stat,
REGNO (temp))->nonzero_bits != 0
- && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
+ && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
+ && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
&& (VEC_index (reg_stat_type, reg_stat,
REGNO (temp))->nonzero_bits
!= GET_MODE_MASK (word_mode)))))
/* A noop might also need cleaning up of CFG, if it comes from the
simplification of a jump. */
- if (GET_CODE (newpat) == SET
+ if (JUMP_P (i3)
+ && GET_CODE (newpat) == SET
&& SET_SRC (newpat) == pc_rtx
&& SET_DEST (newpat) == pc_rtx)
{
}
if (undobuf.other_insn != NULL_RTX
+ && JUMP_P (undobuf.other_insn)
&& GET_CODE (PATTERN (undobuf.other_insn)) == SET
&& SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
&& SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
case UNDO_MODE:
adjust_reg_mode (*undo->where.r, undo->old_contents.m);
break;
+ case UNDO_LINKS:
+ *undo->where.l = undo->old_contents.l;
+ break;
default:
gcc_unreachable ();
}
/* See if this is a bitfield assignment with everything constant. If
so, this is an IOR of an AND, so split it into that. */
if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
- <= HOST_BITS_PER_WIDE_INT)
+ && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
&& CONST_INT_P (XEXP (SET_DEST (x), 1))
&& CONST_INT_P (XEXP (SET_DEST (x), 2))
&& CONST_INT_P (SET_SRC (x))
&& ((INTVAL (XEXP (SET_DEST (x), 1))
+ INTVAL (XEXP (SET_DEST (x), 2)))
- <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
+ <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
&& ! side_effects_p (XEXP (SET_DEST (x), 0)))
{
HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
rtx or_mask;
if (BITS_BIG_ENDIAN)
- pos = GET_MODE_BITSIZE (mode) - len - pos;
+ pos = GET_MODE_PRECISION (mode) - len - pos;
or_mask = gen_int_mode (src << pos, mode);
if (src == mask)
break;
pos = 0;
- len = GET_MODE_BITSIZE (GET_MODE (inner));
+ len = GET_MODE_PRECISION (GET_MODE (inner));
unsignedp = 0;
break;
pos = INTVAL (XEXP (SET_SRC (x), 2));
if (BITS_BIG_ENDIAN)
- pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos;
+ pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
unsignedp = (code == ZERO_EXTRACT);
}
break;
break;
}
- if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner)))
+ if (len && pos >= 0
+ && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
{
enum machine_mode mode = GET_MODE (SET_SRC (x));
(unsignedp ? LSHIFTRT : ASHIFTRT, mode,
gen_rtx_ASHIFT (mode,
gen_lowpart (mode, inner),
- GEN_INT (GET_MODE_BITSIZE (mode)
+ GEN_INT (GET_MODE_PRECISION (mode)
- len - pos)),
- GEN_INT (GET_MODE_BITSIZE (mode) - len)));
+ GEN_INT (GET_MODE_PRECISION (mode) - len)));
split = find_split_point (&SET_SRC (x), insn, true);
if (split && split != &SET_SRC (x))
IN_DEST is nonzero if we are processing the SET_DEST of a SET.
+ IN_COND is nonzero if we are at the top level of a condition.
+
UNIQUE_COPY is nonzero if each substitution must be unique. We do this
by copying if `n_occurrences' is nonzero. */
static rtx
-subst (rtx x, rtx from, rtx to, int in_dest, int unique_copy)
+subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode op0_mode = VOIDmode;
&& GET_CODE (XVECEXP (x, 0, 0)) == SET
&& GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
{
- new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, unique_copy);
+ new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
/* If this substitution failed, this whole thing fails. */
if (GET_CODE (new_rtx) == CLOBBER
&& GET_CODE (dest) != CC0
&& GET_CODE (dest) != PC)
{
- new_rtx = subst (dest, from, to, 0, unique_copy);
+ new_rtx = subst (dest, from, to, 0, 0, unique_copy);
/* If this substitution failed, this whole thing fails. */
if (GET_CODE (new_rtx) == CLOBBER
}
else
{
- new_rtx = subst (XVECEXP (x, i, j), from, to, 0,
- unique_copy);
+ new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
+ unique_copy);
/* If this substitution failed, this whole thing
fails. */
&& (code == SUBREG || code == STRICT_LOW_PART
|| code == ZERO_EXTRACT))
|| code == SET)
- && i == 0), unique_copy);
+ && i == 0),
+ code == IF_THEN_ELSE && i == 0,
+ unique_copy);
/* If we found that we will have to reject this combination,
indicate that by returning the CLOBBER ourselves, rather than
/* If X is sufficiently simple, don't bother trying to do anything
with it. */
if (code != CONST_INT && code != REG && code != CLOBBER)
- x = combine_simplify_rtx (x, op0_mode, in_dest);
+ x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
if (GET_CODE (x) == code)
break;
expression.
OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
- if we are inside a SET_DEST. */
+ if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
+ of a condition. */
static rtx
-combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest)
+combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest,
+ int in_cond)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
false arms to store-flag values. Be careful to use copy_rtx
here since true_rtx or false_rtx might share RTL with x as a
result of the if_then_else_cond call above. */
- true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0);
- false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0);
+ true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
+ false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
/* If true_rtx and false_rtx are not general_operands, an if_then_else
is unlikely to be simpler. */
if (GET_CODE (temp) == ASHIFTRT
&& CONST_INT_P (XEXP (temp, 1))
- && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
INTVAL (XEXP (temp, 1)));
rtx temp1 = simplify_shift_const
(NULL_RTX, ASHIFTRT, mode,
simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
- GET_MODE_BITSIZE (mode) - 1 - i),
- GET_MODE_BITSIZE (mode) - 1 - i);
+ GET_MODE_PRECISION (mode) - 1 - i),
+ GET_MODE_PRECISION (mode) - 1 - i);
/* If all we did was surround TEMP with the two shifts, we
haven't improved anything, so don't use it. Otherwise,
if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
break;
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ if (HWI_COMPUTABLE_MODE_P (mode))
SUBST (XEXP (x, 0),
force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
GET_MODE_MASK (mode), 0));
/* Similarly to what we do in simplify-rtx.c, a truncate of a register
whose value is a comparison can be replaced with a subreg if
STORE_FLAG_VALUE permits. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
&& (temp = get_last_value (XEXP (x, 0)))
&& COMPARISON_P (temp))
&& INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
&& ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
|| (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (mode)
&& ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
&& CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
&& (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
== ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
|| (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
+ && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
== (unsigned int) i + 1))))
return simplify_shift_const
(NULL_RTX, ASHIFTRT, mode,
simplify_shift_const (NULL_RTX, ASHIFT, mode,
XEXP (XEXP (XEXP (x, 0), 0), 0),
- GET_MODE_BITSIZE (mode) - (i + 1)),
- GET_MODE_BITSIZE (mode) - (i + 1));
+ GET_MODE_PRECISION (mode) - (i + 1)),
+ GET_MODE_PRECISION (mode) - (i + 1));
/* If only the low-order bit of X is possibly nonzero, (plus x -1)
can become (ashiftrt (ashift (xor x 1) C) C) where C is
return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
simplify_shift_const (NULL_RTX, ASHIFT, mode,
gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
- GET_MODE_BITSIZE (mode) - 1),
- GET_MODE_BITSIZE (mode) - 1);
+ GET_MODE_PRECISION (mode) - 1),
+ GET_MODE_PRECISION (mode) - 1);
/* If we are adding two things that have no bits in common, convert
the addition into an IOR. This will often be further simplified,
for example in cases like ((a & 1) + (a & 2)), which can
become a & 3. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& (nonzero_bits (XEXP (x, 0), mode)
& nonzero_bits (XEXP (x, 1), mode)) == 0)
{
/* Try to simplify the expression further. */
rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
- temp = combine_simplify_rtx (tor, mode, in_dest);
+ temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
/* If we could, great. If not, do not go ahead with the IOR
replacement, since PLUS appears in many special purpose
address arithmetic instructions. */
- if (GET_CODE (temp) != CLOBBER && temp != tor)
+ if (GET_CODE (temp) != CLOBBER
+ && (GET_CODE (temp) != IOR
+ || ((XEXP (temp, 0) != XEXP (x, 0)
+ || XEXP (temp, 1) != XEXP (x, 1))
+ && (XEXP (temp, 0) != XEXP (x, 1)
+ || XEXP (temp, 1) != XEXP (x, 0)))))
return temp;
}
break;
Remove any ZERO_EXTRACT we made when thinking this was a
comparison. It may now be simpler to use, e.g., an AND. If a
ZERO_EXTRACT is indeed appropriate, it will be placed back by
- the call to make_compound_operation in the SET case. */
+ the call to make_compound_operation in the SET case.
- if (STORE_FLAG_VALUE == 1
+ Don't apply these optimizations if the caller would
+ prefer a comparison rather than a value.
+ E.g., for the condition in an IF_THEN_ELSE most targets need
+ an explicit comparison. */
+
+ if (in_cond)
+ ;
+
+ else if (STORE_FLAG_VALUE == 1
&& new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
{
op0 = expand_compound_operation (op0);
return simplify_gen_unary (NEG, mode,
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
{
op0 = expand_compound_operation (op0);
return plus_constant (gen_lowpart (mode, op0), 1);
/* If STORE_FLAG_VALUE is -1, we have cases similar to
those above. */
- if (STORE_FLAG_VALUE == -1
+ if (in_cond)
+ ;
+
+ else if (STORE_FLAG_VALUE == -1
&& new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
&& (num_sign_bit_copies (op0, mode)
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_lowpart (mode,
expand_compound_operation (op0));
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
{
op0 = expand_compound_operation (op0);
return simplify_gen_unary (NOT, mode,
AND with STORE_FLAG_VALUE when we are done, since we are only
going to test the sign bit. */
if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && val_signbit_p (mode, STORE_FLAG_VALUE)
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
{
x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
expand_compound_operation (op0),
- GET_MODE_BITSIZE (mode) - 1 - i);
+ GET_MODE_PRECISION (mode) - 1 - i);
if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
return XEXP (x, 0);
else
&& exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
{
false_code = EQ;
- false_val = GEN_INT (trunc_int_for_mode (nzb, GET_MODE (from)));
+ false_val = gen_int_mode (nzb, GET_MODE (from));
}
else if (true_code == EQ && true_val == const0_rtx
&& (num_sign_bit_copies (from, GET_MODE (from))
- == GET_MODE_BITSIZE (GET_MODE (from))))
+ == GET_MODE_PRECISION (GET_MODE (from))))
{
false_code = EQ;
false_val = constm1_rtx;
if (reg_mentioned_p (from, true_rtx))
true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
from, true_val),
- pc_rtx, pc_rtx, 0, 0);
+ pc_rtx, pc_rtx, 0, 0, 0);
if (reg_mentioned_p (from, false_rtx))
false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
from, false_val),
- pc_rtx, pc_rtx, 0, 0);
+ pc_rtx, pc_rtx, 0, 0, 0);
SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
&& (num_sign_bit_copies (f, GET_MODE (f))
> (unsigned int)
- (GET_MODE_BITSIZE (mode)
- - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0))))))
+ (GET_MODE_PRECISION (mode)
+ - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
{
c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
extend_op = SIGN_EXTEND;
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
&& (num_sign_bit_copies (f, GET_MODE (f))
> (unsigned int)
- (GET_MODE_BITSIZE (mode)
- - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1))))))
+ (GET_MODE_PRECISION (mode)
+ - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
{
c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
extend_op = SIGN_EXTEND;
|| GET_CODE (XEXP (t, 0)) == LSHIFTRT
|| GET_CODE (XEXP (t, 0)) == ASHIFTRT)
&& GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (mode)
&& subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
&& ((nonzero_bits (f, GET_MODE (f))
|| GET_CODE (XEXP (t, 0)) == IOR
|| GET_CODE (XEXP (t, 0)) == XOR)
&& GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (mode)
&& subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
&& ((nonzero_bits (f, GET_MODE (f))
{
temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
cond_op0, cond_op1),
- pc_rtx, pc_rtx, 0, 0);
+ pc_rtx, pc_rtx, 0, 0, 0);
temp = simplify_gen_binary (MULT, m, temp,
simplify_gen_binary (MULT, m, c1,
const_true_rtx));
- temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
+ temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
if (extend_op != UNKNOWN)
&& ((1 == nonzero_bits (XEXP (cond, 0), mode)
&& (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
|| ((num_sign_bit_copies (XEXP (cond, 0), mode)
- == GET_MODE_BITSIZE (mode))
+ == GET_MODE_PRECISION (mode))
&& (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
return
simplify_shift_const (NULL_RTX, ASHIFT, mode,
rtx *cc_use;
/* (set (pc) (return)) gets written as (return). */
- if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
+ if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
return src;
/* Now that we know for sure which bits of SRC we are using, see if we can
simplify the expression for the object knowing that we only need the
low-order bits. */
- if (GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
{
src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
SUBST (SET_SRC (x), src);
if (((old_code == NE && new_code == EQ)
|| (old_code == EQ && new_code == NE))
&& ! other_changed_previously && op1 == const0_rtx
- && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
&& exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
{
rtx pat = PATTERN (other_insn), note = 0;
if (dest == cc0_rtx
&& GET_CODE (src) == SUBREG
&& subreg_lowpart_p (src)
- && (GET_MODE_BITSIZE (GET_MODE (src))
- < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src)))))
+ && (GET_MODE_PRECISION (GET_MODE (src))
+ < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
{
rtx inner = SUBREG_REG (src);
enum machine_mode inner_mode = GET_MODE (inner);
/* Here we make sure that we don't have a sign bit on. */
- if (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (inner, inner_mode)
- < ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (src)) - 1))))
+ if (val_signbit_known_clear_p (GET_MODE (src),
+ nonzero_bits (inner, inner_mode)))
{
SUBST (SET_SRC (x), inner);
src = SET_SRC (x);
&& INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
&& LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
&& SUBREG_BYTE (src) == 0
- && (GET_MODE_SIZE (GET_MODE (src))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
+ && paradoxical_subreg_p (src)
&& MEM_P (SUBREG_REG (src)))
{
SUBST (SET_SRC (x),
#endif
&& (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
GET_MODE (XEXP (XEXP (src, 0), 0)))
- == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
+ == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
&& ! side_effects_p (src))
{
rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
any (sign) bits when converting INTVAL (op1) to
"unsigned HOST_WIDE_INT". */
if (CONST_INT_P (op1)
- && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (HWI_COMPUTABLE_MODE_P (mode)
|| INTVAL (op1) > 0))
{
x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
return x;
- len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
+ len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
/* If the inner object has VOIDmode (the only way this can happen
is if it is an ASM_OPERANDS), we can't do anything since we don't
know how much masking to do. */
pos = INTVAL (XEXP (x, 2));
/* This should stay within the object being extracted, fail otherwise. */
- if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
+ if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
return x;
if (BITS_BIG_ENDIAN)
- pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;
+ pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
break;
bit is not set, as this is easier to optimize. It will be converted
back to cheaper alternative in make_extraction. */
if (GET_CODE (x) == SIGN_EXTEND
- && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
&& ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
& ~(((unsigned HOST_WIDE_INT)
GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
rtx temp2 = expand_compound_operation (temp);
/* Make sure this is a profitable operation. */
- if (rtx_cost (x, SET, optimize_this_for_speed_p)
- > rtx_cost (temp2, SET, optimize_this_for_speed_p))
+ if (set_src_cost (x, optimize_this_for_speed_p)
+ > set_src_cost (temp2, optimize_this_for_speed_p))
return temp2;
- else if (rtx_cost (x, SET, optimize_this_for_speed_p)
- > rtx_cost (temp, SET, optimize_this_for_speed_p))
+ else if (set_src_cost (x, optimize_this_for_speed_p)
+ > set_src_cost (temp, optimize_this_for_speed_p))
return temp;
else
return x;
set. */
if (GET_CODE (XEXP (x, 0)) == TRUNCATE
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
- && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
&& (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
& ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return XEXP (XEXP (x, 0), 0);
if (GET_CODE (XEXP (x, 0)) == SUBREG
&& GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
&& subreg_lowpart_p (XEXP (x, 0))
- && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
&& (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
& ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return SUBREG_REG (XEXP (x, 0));
if (GET_CODE (XEXP (x, 0)) == TRUNCATE
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
&& COMPARISON_P (XEXP (XEXP (x, 0), 0))
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
<= HOST_BITS_PER_WIDE_INT)
&& (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return XEXP (XEXP (x, 0), 0);
&& GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
&& subreg_lowpart_p (XEXP (x, 0))
&& COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
<= HOST_BITS_PER_WIDE_INT)
&& (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return SUBREG_REG (XEXP (x, 0));
extraction. Then the constant of 31 would be substituted in
to produce such a position. */
- modewidth = GET_MODE_BITSIZE (GET_MODE (x));
+ modewidth = GET_MODE_PRECISION (GET_MODE (x));
if (modewidth >= pos + len)
{
enum machine_mode mode = GET_MODE (x);
&& GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
{
inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
- len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
+ len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
}
else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
/* A constant position should stay within the width of INNER. */
if (CONST_INT_P (pos)
- && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
+ && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
break;
if (BITS_BIG_ENDIAN)
{
if (CONST_INT_P (pos))
- pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len
+ pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
- INTVAL (pos));
else if (GET_CODE (pos) == MINUS
&& CONST_INT_P (XEXP (pos, 1))
&& (INTVAL (XEXP (pos, 1))
- == GET_MODE_BITSIZE (GET_MODE (inner)) - len))
+ == GET_MODE_PRECISION (GET_MODE (inner)) - len))
/* If position is ADJUST - X, new position is X. */
pos = XEXP (pos, 0);
else
pos = simplify_gen_binary (MINUS, GET_MODE (pos),
- GEN_INT (GET_MODE_BITSIZE (
+ GEN_INT (GET_MODE_PRECISION (
GET_MODE (inner))
- len),
pos);
&& !MEM_P (inner)
&& (inner_mode == tmode
|| !REG_P (inner)
- || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
- GET_MODE_BITSIZE (inner_mode))
+ || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
|| reg_truncated_to_mode (tmode, inner))
&& (! in_dest
|| (REG_P (inner)
: BITS_PER_UNIT)) == 0
/* We can't do this if we are widening INNER_MODE (it
may not be aligned, for one thing). */
- && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode)
+ && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
&& (inner_mode == tmode
|| (! mode_dependent_address_p (XEXP (inner, 0))
&& ! MEM_VOLATILE_P (inner))))))
/* POS counts from lsb, but make OFFSET count in memory order. */
if (BYTES_BIG_ENDIAN)
- offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
+ offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
else
offset = pos / BITS_PER_UNIT;
bit is not set, convert the extraction to the cheaper of
sign and zero extension, that are equivalent in these cases. */
if (flag_expensive_optimizations
- && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
+ && (HWI_COMPUTABLE_MODE_P (tmode)
&& ((nonzero_bits (new_rtx, tmode)
- & ~(((unsigned HOST_WIDE_INT)
- GET_MODE_MASK (tmode))
- >> 1))
+ & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
== 0)))
{
rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
/* Prefer ZERO_EXTENSION, since it gives more information to
backends. */
- if (rtx_cost (temp, SET, optimize_this_for_speed_p)
- <= rtx_cost (temp1, SET, optimize_this_for_speed_p))
+ if (set_src_cost (temp, optimize_this_for_speed_p)
+ <= set_src_cost (temp1, optimize_this_for_speed_p))
return temp;
return temp1;
}
other cases, we would only be going outside our object in cases when
an original shift would have been undefined. */
if (MEM_P (inner)
- && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
+ && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
|| (pos_rtx != 0 && len != 1)))
return 0;
/* On the LHS, don't create paradoxical subregs implicitely truncating
the register unless TRULY_NOOP_TRUNCATION. */
if (in_dest
- && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (inner)),
- GET_MODE_BITSIZE (wanted_inner_mode)))
+ && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
+ wanted_inner_mode))
return NULL_RTX;
if (GET_MODE (inner) != wanted_inner_mode
SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
cases. */
if (flag_expensive_optimizations
- && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT
+ && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
&& ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
& ~(((unsigned HOST_WIDE_INT)
GET_MODE_MASK (GET_MODE (pos_rtx)))
/* Prefer ZERO_EXTENSION, since it gives more information to
backends. */
- if (rtx_cost (temp1, SET, optimize_this_for_speed_p)
- < rtx_cost (temp, SET, optimize_this_for_speed_p))
+ if (set_src_cost (temp1, optimize_this_for_speed_p)
+ < set_src_cost (temp, optimize_this_for_speed_p))
temp = temp1;
}
pos_rtx = temp;
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
- int mode_width = GET_MODE_BITSIZE (mode);
+ int mode_width = GET_MODE_PRECISION (mode);
rtx rhs, lhs;
enum rtx_code next_code;
int i, j;
{
new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
new_rtx = make_extraction (mode, new_rtx,
- (GET_MODE_BITSIZE (mode)
+ (GET_MODE_PRECISION (mode)
- INTVAL (XEXP (XEXP (x, 0), 1))),
NULL_RTX, i, 1, 0, in_code == COMPARE);
}
&& GET_CODE (lhs) == ASHIFT
&& CONST_INT_P (XEXP (lhs, 1))
&& INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
+ && INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (rhs) < mode_width)
{
new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
code = GET_CODE (x);
}
- /* Now recursively process each operand of this operation. */
+ /* Now recursively process each operand of this operation. We need to
+ handle ZERO_EXTEND specially so that we don't lose track of the
+ inner mode. */
+ if (GET_CODE (x) == ZERO_EXTEND)
+ {
+ new_rtx = make_compound_operation (XEXP (x, 0), next_code);
+ tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
+ new_rtx, GET_MODE (XEXP (x, 0)));
+ if (tem)
+ return tem;
+ SUBST (XEXP (x, 0), new_rtx);
+ return x;
+ }
+
fmt = GET_RTX_FORMAT (code);
for (i = 0; i < GET_RTX_LENGTH (code); i++)
if (fmt[i] == 'e')
{
if (!CONST_INT_P (x)
&& GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
- && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
- GET_MODE_BITSIZE (GET_MODE (x)))
+ && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
&& !(REG_P (x) && reg_truncated_to_mode (mode, x)))
{
/* Bit-cast X into an integer mode. */
/* It is not valid to do a right-shift in a narrower mode
than the one it came in with. */
if ((code == LSHIFTRT || code == ASHIFTRT)
- && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x)))
+ && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
op_mode = GET_MODE (x);
/* Truncate MASK to fit OP_MODE. */
if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
&& GET_MODE_MASK (GET_MODE (x)) != mask
- && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
+ && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
{
unsigned HOST_WIDE_INT cval
= UINTVAL (XEXP (x, 1))
| (GET_MODE_MASK (GET_MODE (x)) & ~mask);
- int width = GET_MODE_BITSIZE (GET_MODE (x));
+ int width = GET_MODE_PRECISION (GET_MODE (x));
rtx y;
/* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
y = simplify_gen_binary (AND, GET_MODE (x),
XEXP (x, 0), GEN_INT (cval));
- if (rtx_cost (y, SET, optimize_this_for_speed_p)
- < rtx_cost (x, SET, optimize_this_for_speed_p))
+ if (set_src_cost (y, optimize_this_for_speed_p)
+ < set_src_cost (x, optimize_this_for_speed_p))
x = y;
}
This may eliminate that PLUS and, later, the AND. */
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
unsigned HOST_WIDE_INT smask = mask;
/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
&& CONST_INT_P (XEXP (x, 1))
&& ((INTVAL (XEXP (XEXP (x, 0), 1))
+ floor_log2 (INTVAL (XEXP (x, 1))))
- < GET_MODE_BITSIZE (GET_MODE (x)))
+ < GET_MODE_PRECISION (GET_MODE (x)))
&& (UINTVAL (XEXP (x, 1))
& ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
{
if (! (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
- && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode))
+ && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
&& ! (GET_MODE (XEXP (x, 1)) != VOIDmode
&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
- < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
+ < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
break;
/* If the shift count is a constant and we can do arithmetic in
conservative form of the mask. */
if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
- && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode)
- && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
+ && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
+ && HWI_COMPUTABLE_MODE_P (op_mode))
mask >>= INTVAL (XEXP (x, 1));
else
mask = fuller_mask;
if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
- && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
+ && HWI_COMPUTABLE_MODE_P (op_mode))
{
rtx inner = XEXP (x, 0);
unsigned HOST_WIDE_INT inner_mask;
bit. */
&& ((INTVAL (XEXP (x, 1))
+ num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
- >= GET_MODE_BITSIZE (GET_MODE (x)))
+ >= GET_MODE_PRECISION (GET_MODE (x)))
&& exact_log2 (mask + 1) >= 0
/* Number of bits left after the shift must be more than the mask
needs. */
&& ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
- <= GET_MODE_BITSIZE (GET_MODE (x)))
+ <= GET_MODE_PRECISION (GET_MODE (x)))
/* Must be more sign bit copies than the mask needs. */
&& ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
>= exact_log2 (mask + 1)))
x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
- GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
+ GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
- exact_log2 (mask + 1)));
goto shiftrt;
case ASHIFTRT:
/* If we are just looking for the sign bit, we don't need this shift at
all, even if it has a variable count. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
- && (mask == ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
+ if (val_signbit_p (GET_MODE (x), mask))
return force_to_mode (XEXP (x, 0), mode, mask, next_select);
/* If this is a shift by a constant, get a mask that contains those bits
represent a mask for all its bits in a single scalar.
But we only care about the lower bits, so calculate these. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
{
nonzero = ~(unsigned HOST_WIDE_INT) 0;
- /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
+ /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
is the number of bits a full-width mask would have set.
We need only shift if these are fewer than nonzero can
hold. If not, we must keep all bits set in nonzero. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
+ if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
< HOST_BITS_PER_WIDE_INT)
nonzero >>= INTVAL (XEXP (x, 1))
+ HOST_BITS_PER_WIDE_INT
- - GET_MODE_BITSIZE (GET_MODE (x)) ;
+ - GET_MODE_PRECISION (GET_MODE (x)) ;
}
else
{
{
x = simplify_shift_const
(NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
- GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
+ GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
if (GET_CODE (x) != ASHIFTRT)
return force_to_mode (x, mode, mask, next_select);
&& CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
&& (INTVAL (XEXP (x, 1))
- <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
+ <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
&& GET_CODE (XEXP (x, 0)) == ASHIFT
&& XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
&& CONST_INT_P (XEXP (XEXP (x, 0), 1))
&& INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
&& (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
- < GET_MODE_BITSIZE (GET_MODE (x)))
+ < GET_MODE_PRECISION (GET_MODE (x)))
&& INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
{
temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
false values when testing X. */
else if (x == constm1_rtx || x == const0_rtx
|| (mode != VOIDmode
- && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode)))
+ && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
{
*ptrue = constm1_rtx, *pfalse = const0_rtx;
return x;
}
/* Likewise for 0 or a single bit. */
- else if (SCALAR_INT_MODE_P (mode)
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ else if (HWI_COMPUTABLE_MODE_P (mode)
&& exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
{
*ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
return x;
pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
- if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
- || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
+ if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
+ || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
|| (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
return x;
other, pos),
dest);
src = force_to_mode (src, mode,
- GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
+ GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0
: ((unsigned HOST_WIDE_INT) 1 << len) - 1,
0);
|| ! subreg_lowpart_p (lhs)
|| (GET_MODE_CLASS (GET_MODE (lhs))
!= GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
- || (GET_MODE_SIZE (GET_MODE (lhs))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
+ || paradoxical_subreg_p (lhs)
|| VECTOR_MODE_P (GET_MODE (lhs))
|| GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD
/* Result might need to be truncated. Don't change mode if
explicit truncation is needed. */
- || !TRULY_NOOP_TRUNCATION
- (GET_MODE_BITSIZE (GET_MODE (x)),
- GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (lhs)))))
+ || !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (x),
+ GET_MODE (SUBREG_REG (lhs))))
return x;
tem = simplify_gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
new_op0, new_op1));
if (GET_CODE (tmp) != outer_code
- && rtx_cost (tmp, SET, optimize_this_for_speed_p)
- < rtx_cost (x, SET, optimize_this_for_speed_p))
+ && (set_src_cost (tmp, optimize_this_for_speed_p)
+ < set_src_cost (x, optimize_this_for_speed_p)))
return tmp;
return NULL_RTX;
??? For 2.5, try to tighten up the MD files in this regard
instead of this kludge. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)
+ if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)
&& CONST_INT_P (tem)
&& INTVAL (tem) > 0
- && 0 != (UINTVAL (tem)
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
- tem = GEN_INT (UINTVAL (tem)
- | ((unsigned HOST_WIDE_INT) (-1)
- << GET_MODE_BITSIZE (GET_MODE (x))));
+ && val_signbit_known_set_p (GET_MODE (x), INTVAL (tem)))
+ tem = GEN_INT (INTVAL (tem) | ~GET_MODE_MASK (GET_MODE (x)));
#endif
return tem;
}
{
unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
- if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode))
+ if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
/* We don't know anything about the upper bits. */
mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
*nonzero &= mask;
return tem;
if (nonzero_sign_valid && rsp->sign_bit_copies != 0
- && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode))
+ && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
*result = rsp->sign_bit_copies;
return NULL;
return 0;
return (unsignedp
- ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1
+ ? (HWI_COMPUTABLE_MODE_P (mode)
+ ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
- floor_log2 (nonzero_bits (x, mode)))
: 0)
: num_sign_bit_copies (x, mode) - 1);
{
if (orig_mode == mode)
return mode;
- gcc_assert (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (orig_mode));
+ gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
/* In general we can't perform in wider mode for right shift and rotate. */
switch (code)
/* We can still widen if the bits brought in from the left are identical
to the sign bit of ORIG_MODE. */
if (num_sign_bit_copies (op, mode)
- > (unsigned) (GET_MODE_BITSIZE (mode)
- - GET_MODE_BITSIZE (orig_mode)))
+ > (unsigned) (GET_MODE_PRECISION (mode)
+ - GET_MODE_PRECISION (orig_mode)))
return mode;
return orig_mode;
case LSHIFTRT:
/* Similarly here but with zero bits. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
return mode;
int care_bits = low_bitmask_len (orig_mode, outer_const);
if (care_bits >= 0
- && GET_MODE_BITSIZE (orig_mode) - care_bits >= count)
+ && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
return mode;
}
/* fall through */
}
}
-/* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
- The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot
- simplify it. Otherwise, return a simplified value.
+/* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
+ of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
+ if we cannot simplify it. Otherwise, return a simplified value.
The shift is normally computed in the widest mode we find in VAROP, as
long as it isn't a different number of words than RESULT_MODE. Exceptions
/* If we were given an invalid count, don't do anything except exactly
what was requested. */
- if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode))
+ if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
return NULL_RTX;
count = orig_count;
/* Convert ROTATERT to ROTATE. */
if (code == ROTATERT)
{
- unsigned int bitsize = GET_MODE_BITSIZE (result_mode);;
+ unsigned int bitsize = GET_MODE_PRECISION (result_mode);
code = ROTATE;
if (VECTOR_MODE_P (result_mode))
count = bitsize / GET_MODE_NUNITS (result_mode) - count;
multiple operations, each of which are defined, we know what the
result is supposed to be. */
- if (count > (GET_MODE_BITSIZE (shift_mode) - 1))
+ if (count > (GET_MODE_PRECISION (shift_mode) - 1))
{
if (code == ASHIFTRT)
- count = GET_MODE_BITSIZE (shift_mode) - 1;
+ count = GET_MODE_PRECISION (shift_mode) - 1;
else if (code == ROTATE || code == ROTATERT)
- count %= GET_MODE_BITSIZE (shift_mode);
+ count %= GET_MODE_PRECISION (shift_mode);
else
{
/* We can't simply return zero because there may be an
is a no-op. */
if (code == ASHIFTRT
&& (num_sign_bit_copies (varop, shift_mode)
- == GET_MODE_BITSIZE (shift_mode)))
+ == GET_MODE_PRECISION (shift_mode)))
{
count = 0;
break;
if (code == ASHIFTRT
&& (count + num_sign_bit_copies (varop, shift_mode)
- >= GET_MODE_BITSIZE (shift_mode)))
- count = GET_MODE_BITSIZE (shift_mode) - 1;
+ >= GET_MODE_PRECISION (shift_mode)))
+ count = GET_MODE_PRECISION (shift_mode) - 1;
/* We simplify the tests below and elsewhere by converting
ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
`make_compound_operation' will convert it to an ASHIFTRT for
those machines (such as VAX) that don't have an LSHIFTRT. */
- if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
- && code == ASHIFTRT
- && ((nonzero_bits (varop, shift_mode)
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (shift_mode) - 1))) == 0))
+ if (code == ASHIFTRT
+ && val_signbit_known_clear_p (shift_mode,
+ nonzero_bits (varop, shift_mode)))
code = LSHIFTRT;
if (((code == LSHIFTRT
- && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (shift_mode)
&& !(nonzero_bits (varop, shift_mode) >> count))
|| (code == ASHIFT
- && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (shift_mode)
&& !((nonzero_bits (varop, shift_mode) << count)
& GET_MODE_MASK (shift_mode))))
&& !side_effects_p (varop))
AND of a new shift with a mask. We compute the result below. */
if (CONST_INT_P (XEXP (varop, 1))
&& INTVAL (XEXP (varop, 1)) >= 0
- && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
- && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
+ && HWI_COMPUTABLE_MODE_P (result_mode)
+ && HWI_COMPUTABLE_MODE_P (mode)
&& !VECTOR_MODE_P (result_mode))
{
enum rtx_code first_code = GET_CODE (varop);
we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
we can convert it to
- (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
+ (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
This simplifies certain SIGN_EXTEND operations. */
if (code == ASHIFT && first_code == ASHIFTRT
- && count == (GET_MODE_BITSIZE (result_mode)
- - GET_MODE_BITSIZE (GET_MODE (varop))))
+ && count == (GET_MODE_PRECISION (result_mode)
+ - GET_MODE_PRECISION (GET_MODE (varop))))
{
/* C3 has the low-order C1 bits zero. */
if (code == ASHIFTRT
|| (code == ROTATE && first_code == ASHIFTRT)
- || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
|| (GET_MODE (varop) != result_mode
&& (first_code == ASHIFTRT || first_code == LSHIFTRT
|| first_code == ROTATE
break;
/* Make this fit the case below. */
- varop = gen_rtx_XOR (mode, XEXP (varop, 0),
- GEN_INT (GET_MODE_MASK (mode)));
+ varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
continue;
case IOR:
&& XEXP (XEXP (varop, 0), 1) == constm1_rtx
&& (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& (code == LSHIFTRT || code == ASHIFTRT)
- && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
+ && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
&& rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
{
count = 0;
case EQ:
/* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
says that the sign bit can be tested, FOO has mode MODE, C is
- GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
+ GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
that may be nonzero. */
if (code == LSHIFTRT
&& XEXP (varop, 1) == const0_rtx
&& GET_MODE (XEXP (varop, 0)) == result_mode
- && count == (GET_MODE_BITSIZE (result_mode) - 1)
- && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
+ && count == (GET_MODE_PRECISION (result_mode) - 1)
+ && HWI_COMPUTABLE_MODE_P (result_mode)
&& STORE_FLAG_VALUE == -1
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1
&& merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
/* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
than the number of bits in the mode is equivalent to A. */
if (code == LSHIFTRT
- && count == (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_PRECISION (result_mode) - 1)
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1)
{
varop = XEXP (varop, 0);
is one less than the number of bits in the mode is
equivalent to (xor A 1). */
if (code == LSHIFTRT
- && count == (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_PRECISION (result_mode) - 1)
&& XEXP (varop, 1) == constm1_rtx
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1
&& merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
}
else if ((code == ASHIFTRT || code == LSHIFTRT)
&& count < HOST_BITS_PER_WIDE_INT
- && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (result_mode)
&& 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
>> count)
&& 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& GET_CODE (XEXP (varop, 0)) == ASHIFTRT
- && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
+ && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
&& (code == LSHIFTRT || code == ASHIFTRT)
&& CONST_INT_P (XEXP (XEXP (varop, 0), 1))
&& INTVAL (XEXP (XEXP (varop, 0), 1)) == count
&& GET_CODE (XEXP (varop, 0)) == LSHIFTRT
&& CONST_INT_P (XEXP (XEXP (varop, 0), 1))
&& (INTVAL (XEXP (XEXP (varop, 0), 1))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0)))
- - GET_MODE_BITSIZE (GET_MODE (varop)))))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
+ - GET_MODE_PRECISION (GET_MODE (varop)))))
{
rtx varop_inner = XEXP (varop, 0);
if (outer_op != UNKNOWN)
{
if (GET_RTX_CLASS (outer_op) != RTX_UNARY
- && GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
outer_const = trunc_int_for_mode (outer_const, result_mode);
if (outer_op == AND)
return gen_rtx_CLOBBER (omode, const0_rtx);
}
\f
+/* Try to simplify a comparison between OP0 and a constant OP1,
+ where CODE is the comparison code that will be tested, into a
+ (CODE OP0 const0_rtx) form.
+
+ The result is a possibly different comparison code to use.
+ *POP1 may be updated. */
+
+static enum rtx_code
+simplify_compare_const (enum rtx_code code, rtx op0, rtx *pop1)
+{
+ enum machine_mode mode = GET_MODE (op0);
+ unsigned int mode_width = GET_MODE_PRECISION (mode);
+ HOST_WIDE_INT const_op = INTVAL (*pop1);
+
+ /* Get the constant we are comparing against and turn off all bits
+ not on in our mode. */
+ if (mode != VOIDmode)
+ const_op = trunc_int_for_mode (const_op, mode);
+
+ /* If we are comparing against a constant power of two and the value
+ being compared can only have that single bit nonzero (e.g., it was
+ `and'ed with that bit), we can replace this with a comparison
+ with zero. */
+ if (const_op
+ && (code == EQ || code == NE || code == GE || code == GEU
+ || code == LT || code == LTU)
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && exact_log2 (const_op) >= 0
+ && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
+ {
+ code = (code == EQ || code == GE || code == GEU ? NE : EQ);
+ const_op = 0;
+ }
+
+ /* Similarly, if we are comparing a value known to be either -1 or
+ 0 with -1, change it to the opposite comparison against zero. */
+ if (const_op == -1
+ && (code == EQ || code == NE || code == GT || code == LE
+ || code == GEU || code == LTU)
+ && num_sign_bit_copies (op0, mode) == mode_width)
+ {
+ code = (code == EQ || code == LE || code == GEU ? NE : EQ);
+ const_op = 0;
+ }
+
+ /* Do some canonicalizations based on the comparison code. We prefer
+ comparisons against zero and then prefer equality comparisons.
+ If we can reduce the size of a constant, we will do that too. */
+ switch (code)
+ {
+ case LT:
+ /* < C is equivalent to <= (C - 1) */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ code = LE;
+ /* ... fall through to LE case below. */
+ }
+ else
+ break;
+
+ case LE:
+ /* <= C is equivalent to < (C + 1); we do this for C < 0 */
+ if (const_op < 0)
+ {
+ const_op += 1;
+ code = LT;
+ }
+
+ /* If we are doing a <= 0 comparison on a value known to have
+ a zero sign bit, we can replace this with == 0. */
+ else if (const_op == 0
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode)
+ & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
+ == 0)
+ code = EQ;
+ break;
+
+ case GE:
+ /* >= C is equivalent to > (C - 1). */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ code = GT;
+ /* ... fall through to GT below. */
+ }
+ else
+ break;
+
+ case GT:
+ /* > C is equivalent to >= (C + 1); we do this for C < 0. */
+ if (const_op < 0)
+ {
+ const_op += 1;
+ code = GE;
+ }
+
+ /* If we are doing a > 0 comparison on a value known to have
+ a zero sign bit, we can replace this with != 0. */
+ else if (const_op == 0
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode)
+ & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
+ == 0)
+ code = NE;
+ break;
+
+ case LTU:
+ /* < C is equivalent to <= (C - 1). */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ code = LEU;
+ /* ... fall through ... */
+ }
+ /* (unsigned) < 0x80000000 is equivalent to >= 0. */
+ else if (mode_width <= HOST_BITS_PER_WIDE_INT
+ && (unsigned HOST_WIDE_INT) const_op
+ == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
+ {
+ const_op = 0;
+ code = GE;
+ break;
+ }
+ else
+ break;
+
+ case LEU:
+ /* unsigned <= 0 is equivalent to == 0 */
+ if (const_op == 0)
+ code = EQ;
+ /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
+ else if (mode_width <= HOST_BITS_PER_WIDE_INT
+ && (unsigned HOST_WIDE_INT) const_op
+ == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
+ {
+ const_op = 0;
+ code = GE;
+ }
+ break;
+
+ case GEU:
+ /* >= C is equivalent to > (C - 1). */
+ if (const_op > 1)
+ {
+ const_op -= 1;
+ code = GTU;
+ /* ... fall through ... */
+ }
+
+ /* (unsigned) >= 0x80000000 is equivalent to < 0. */
+ else if (mode_width <= HOST_BITS_PER_WIDE_INT
+ && (unsigned HOST_WIDE_INT) const_op
+ == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
+ {
+ const_op = 0;
+ code = LT;
+ break;
+ }
+ else
+ break;
+
+ case GTU:
+ /* unsigned > 0 is equivalent to != 0 */
+ if (const_op == 0)
+ code = NE;
+ /* (unsigned) > 0x7fffffff is equivalent to < 0. */
+ else if (mode_width <= HOST_BITS_PER_WIDE_INT
+ && (unsigned HOST_WIDE_INT) const_op
+ == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
+ {
+ const_op = 0;
+ code = LT;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ *pop1 = GEN_INT (const_op);
+ return code;
+}
+\f
/* Simplify a comparison between *POP0 and *POP1 where CODE is the
comparison code that will be tested.
&& XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
&& XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
&& (INTVAL (XEXP (op0, 1))
- == (GET_MODE_BITSIZE (GET_MODE (op0))
- - (GET_MODE_BITSIZE
+ == (GET_MODE_PRECISION (GET_MODE (op0))
+ - (GET_MODE_PRECISION
(GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
{
op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
this shift are known to be zero for both inputs and if the type of
comparison is compatible with the shift. */
if (GET_CODE (op0) == GET_CODE (op1)
- && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (GET_MODE(op0))
&& ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
|| ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
&& (code != GT && code != LT && code != GE && code != LE))
HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
int changed = 0;
- if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG
- && (GET_MODE_SIZE (GET_MODE (inner_op0))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0))))
+ if (paradoxical_subreg_p (inner_op0)
+ && GET_CODE (inner_op1) == SUBREG
&& (GET_MODE (SUBREG_REG (inner_op0))
== GET_MODE (SUBREG_REG (inner_op1)))
- && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0)))
+ && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
<= HOST_BITS_PER_WIDE_INT)
&& (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
GET_MODE (SUBREG_REG (inner_op0)))))
while (CONST_INT_P (op1))
{
enum machine_mode mode = GET_MODE (op0);
- unsigned int mode_width = GET_MODE_BITSIZE (mode);
+ unsigned int mode_width = GET_MODE_PRECISION (mode);
unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
int equality_comparison_p;
int sign_bit_comparison_p;
&& (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
break;
- /* Get the constant we are comparing against and turn off all bits
- not on in our mode. */
+ /* Try to simplify the compare to constant, possibly changing the
+ comparison op, and/or changing op1 to zero. */
+ code = simplify_compare_const (code, op0, &op1);
const_op = INTVAL (op1);
- if (mode != VOIDmode)
- const_op = trunc_int_for_mode (const_op, mode);
- op1 = GEN_INT (const_op);
-
- /* If we are comparing against a constant power of two and the value
- being compared can only have that single bit nonzero (e.g., it was
- `and'ed with that bit), we can replace this with a comparison
- with zero. */
- if (const_op
- && (code == EQ || code == NE || code == GE || code == GEU
- || code == LT || code == LTU)
- && mode_width <= HOST_BITS_PER_WIDE_INT
- && exact_log2 (const_op) >= 0
- && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
- {
- code = (code == EQ || code == GE || code == GEU ? NE : EQ);
- op1 = const0_rtx, const_op = 0;
- }
-
- /* Similarly, if we are comparing a value known to be either -1 or
- 0 with -1, change it to the opposite comparison against zero. */
-
- if (const_op == -1
- && (code == EQ || code == NE || code == GT || code == LE
- || code == GEU || code == LTU)
- && num_sign_bit_copies (op0, mode) == mode_width)
- {
- code = (code == EQ || code == LE || code == GEU ? NE : EQ);
- op1 = const0_rtx, const_op = 0;
- }
-
- /* Do some canonicalizations based on the comparison code. We prefer
- comparisons against zero and then prefer equality comparisons.
- If we can reduce the size of a constant, we will do that too. */
-
- switch (code)
- {
- case LT:
- /* < C is equivalent to <= (C - 1) */
- if (const_op > 0)
- {
- const_op -= 1;
- op1 = GEN_INT (const_op);
- code = LE;
- /* ... fall through to LE case below. */
- }
- else
- break;
-
- case LE:
- /* <= C is equivalent to < (C + 1); we do this for C < 0 */
- if (const_op < 0)
- {
- const_op += 1;
- op1 = GEN_INT (const_op);
- code = LT;
- }
-
- /* If we are doing a <= 0 comparison on a value known to have
- a zero sign bit, we can replace this with == 0. */
- else if (const_op == 0
- && mode_width <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode)
- & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
- == 0)
- code = EQ;
- break;
-
- case GE:
- /* >= C is equivalent to > (C - 1). */
- if (const_op > 0)
- {
- const_op -= 1;
- op1 = GEN_INT (const_op);
- code = GT;
- /* ... fall through to GT below. */
- }
- else
- break;
-
- case GT:
- /* > C is equivalent to >= (C + 1); we do this for C < 0. */
- if (const_op < 0)
- {
- const_op += 1;
- op1 = GEN_INT (const_op);
- code = GE;
- }
-
- /* If we are doing a > 0 comparison on a value known to have
- a zero sign bit, we can replace this with != 0. */
- else if (const_op == 0
- && mode_width <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode)
- & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
- == 0)
- code = NE;
- break;
-
- case LTU:
- /* < C is equivalent to <= (C - 1). */
- if (const_op > 0)
- {
- const_op -= 1;
- op1 = GEN_INT (const_op);
- code = LEU;
- /* ... fall through ... */
- }
-
- /* (unsigned) < 0x80000000 is equivalent to >= 0. */
- else if (mode_width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) const_op
- == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
- {
- const_op = 0, op1 = const0_rtx;
- code = GE;
- break;
- }
- else
- break;
-
- case LEU:
- /* unsigned <= 0 is equivalent to == 0 */
- if (const_op == 0)
- code = EQ;
-
- /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
- else if (mode_width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) const_op
- == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
- {
- const_op = 0, op1 = const0_rtx;
- code = GE;
- }
- break;
-
- case GEU:
- /* >= C is equivalent to > (C - 1). */
- if (const_op > 1)
- {
- const_op -= 1;
- op1 = GEN_INT (const_op);
- code = GTU;
- /* ... fall through ... */
- }
-
- /* (unsigned) >= 0x80000000 is equivalent to < 0. */
- else if (mode_width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) const_op
- == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
- {
- const_op = 0, op1 = const0_rtx;
- code = LT;
- break;
- }
- else
- break;
-
- case GTU:
- /* unsigned > 0 is equivalent to != 0 */
- if (const_op == 0)
- code = NE;
-
- /* (unsigned) > 0x7fffffff is equivalent to < 0. */
- else if (mode_width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) const_op
- == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
- {
- const_op = 0, op1 = const0_rtx;
- code = LT;
- }
- break;
-
- default:
- break;
- }
/* Compute some predicates to simplify code below. */
/* If this is a sign bit comparison and we can do arithmetic in
MODE, say that we will only be needing the sign bit of OP0. */
- if (sign_bit_comparison_p
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
op0 = force_to_mode (op0, mode,
(unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (mode) - 1),
+ << (GET_MODE_PRECISION (mode) - 1),
0);
/* Now try cases based on the opcode of OP0. If none of the cases
else
{
mode = new_mode;
- i = (GET_MODE_BITSIZE (mode) - 1 - i);
+ i = (GET_MODE_PRECISION (mode) - 1 - i);
}
}
later on, and then we wouldn't know whether to sign- or
zero-extend. */
mode = GET_MODE (XEXP (op0, 0));
- if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ if (GET_MODE_CLASS (mode) == MODE_INT
&& ! unsigned_comparison_p
- && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- && ((unsigned HOST_WIDE_INT) const_op
- < (((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (mode) - 1))))
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && trunc_int_for_mode (const_op, mode) == const_op
&& have_insn_for (COMPARE, mode))
{
op0 = XEXP (op0, 0);
if (mode_width <= HOST_BITS_PER_WIDE_INT
&& subreg_lowpart_p (op0)
- && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width
+ && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
&& GET_CODE (SUBREG_REG (op0)) == PLUS
&& CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
{
/* (A - C1) sign-extends if it is positive and 1-extends
if it is negative, C2 both sign- and 1-extends. */
|| (num_sign_bit_copies (a, inner_mode)
- > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
+ > (unsigned int) (GET_MODE_PRECISION (inner_mode)
- mode_width)
&& const_op < 0)))
|| ((unsigned HOST_WIDE_INT) c1
< (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
/* (A - C1) always sign-extends, like C2. */
&& num_sign_bit_copies (a, inner_mode)
- > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
+ > (unsigned int) (GET_MODE_PRECISION (inner_mode)
- (mode_width - 1))))
{
op0 = SUBREG_REG (op0);
/* If the inner mode is narrower and we are extracting the low part,
we can treat the SUBREG as if it were a ZERO_EXTEND. */
if (subreg_lowpart_p (op0)
- && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width)
+ && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
/* Fall through */ ;
else
break;
case ZERO_EXTEND:
mode = GET_MODE (XEXP (op0, 0));
- if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ if (GET_MODE_CLASS (mode) == MODE_INT
&& (unsigned_comparison_p || equality_comparison_p)
- && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode))
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
+ && const_op >= 0
&& have_insn_for (COMPARE, mode))
{
op0 = XEXP (op0, 0);
/* Check for the cases where we simply want the result of the
earlier test or the opposite of that result. */
if (code == NE || code == EQ
- || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
- && (STORE_FLAG_VALUE
- & (((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
&& (code == LT || code == GE)))
{
enum rtx_code new_code;
+ 1)) >= 0
&& const_op >> i == 0
&& (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
- && (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
- GET_MODE_BITSIZE (GET_MODE (op0)))
+ && (TRULY_NOOP_TRUNCATION_MODES_P (tmode, GET_MODE (op0))
|| (REG_P (XEXP (op0, 0))
&& reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
{
the code has been changed. */
&& (0
#ifdef WORD_REGISTER_OPERATIONS
- || (mode_width > GET_MODE_BITSIZE (tmode)
+ || (mode_width > GET_MODE_PRECISION (tmode)
&& mode_width <= BITS_PER_WORD)
#endif
- || (mode_width <= GET_MODE_BITSIZE (tmode)
+ || (mode_width <= GET_MODE_PRECISION (tmode)
&& subreg_lowpart_p (XEXP (op0, 0))))
&& CONST_INT_P (XEXP (op0, 1))
&& mode_width <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (tmode)
&& ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
&& (c1 & ~GET_MODE_MASK (tmode)) == 0
&& c1 != mask
|| (GET_CODE (shift_op) == XOR
&& CONST_INT_P (XEXP (shift_op, 1))
&& CONST_INT_P (shift_count)
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && HWI_COMPUTABLE_MODE_P (mode)
&& (UINTVAL (XEXP (shift_op, 1))
== (unsigned HOST_WIDE_INT) 1
<< INTVAL (shift_count))))
&& GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
&& (code == NE || code == EQ))
{
- if (GET_MODE_SIZE (GET_MODE (op0))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))
+ if (paradoxical_subreg_p (op0))
{
/* For paradoxical subregs, allow case 1 as above. Case 3 isn't
implemented. */
op1 = gen_lowpart (GET_MODE (op0), op1);
}
}
- else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
+ else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
<= HOST_BITS_PER_WIDE_INT)
&& (nonzero_bits (SUBREG_REG (op0),
GET_MODE (SUBREG_REG (op0)))
&& GET_MODE_SIZE (mode) < UNITS_PER_WORD
&& ! have_insn_for (COMPARE, mode))
for (tmode = GET_MODE_WIDER_MODE (mode);
- (tmode != VOIDmode
- && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT);
+ (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
tmode = GET_MODE_WIDER_MODE (tmode))
if (have_insn_for (COMPARE, tmode))
{
a paradoxical subreg to extend OP0. */
if (op1 == const0_rtx && (code == LT || code == GE)
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ && HWI_COMPUTABLE_MODE_P (mode))
{
op0 = simplify_gen_binary (AND, tmode,
gen_lowpart (tmode, op0),
if (zero_extended
|| ((num_sign_bit_copies (op0, tmode)
- > (unsigned int) (GET_MODE_BITSIZE (tmode)
- - GET_MODE_BITSIZE (mode)))
+ > (unsigned int) (GET_MODE_PRECISION (tmode)
+ - GET_MODE_PRECISION (mode)))
&& (num_sign_bit_copies (op1, tmode)
- > (unsigned int) (GET_MODE_BITSIZE (tmode)
- - GET_MODE_BITSIZE (mode)))))
+ > (unsigned int) (GET_MODE_PRECISION (tmode)
+ - GET_MODE_PRECISION (mode)))))
{
/* If OP0 is an AND and we don't have an AND in MODE either,
make a new AND in the proper mode. */
subst_low_luid = DF_INSN_LUID (insn);
rsp->last_set_mode = mode;
if (GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ && HWI_COMPUTABLE_MODE_P (mode))
mode = nonzero_bits_mode;
rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
rsp->last_set_sign_bit_copies
else if (GET_CODE (setter) == SET
&& GET_CODE (SET_DEST (setter)) == SUBREG
&& SUBREG_REG (SET_DEST (setter)) == dest
- && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD
+ && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
&& subreg_lowpart_p (SET_DEST (setter)))
record_value_for_reg (dest, record_dead_insn,
gen_lowpart (GET_MODE (dest),
unsigned int regno = REGNO (SUBREG_REG (subreg));
enum machine_mode mode = GET_MODE (subreg);
- if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
return;
for (links = LOG_LINKS (insn); links;)
return false;
if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
return true;
- if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
- GET_MODE_BITSIZE (truncated)))
+ if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
return true;
return false;
}
if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
return -1;
- if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (truncated_mode),
- GET_MODE_BITSIZE (original_mode)))
+ if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
return -1;
x = SUBREG_REG (x);
we cannot predict what values the "extra" bits might have. */
if (GET_CODE (x) == SUBREG
&& subreg_lowpart_p (x)
- && (GET_MODE_SIZE (GET_MODE (x))
- <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && !paradoxical_subreg_p (x)
&& (value = get_last_value (SUBREG_REG (x))) != 0)
return gen_lowpart (GET_MODE (x), value);
}
break;
+ case REG_ARGS_SIZE:
+ /* ??? How to distribute between i3-i1. Assume i3 contains the
+ entire adjustment. Assert i3 contains at least some adjust. */
+ if (!noop_move_p (i3))
+ {
+ int old_size, args_size = INTVAL (XEXP (note, 0));
+ /* fixup_args_size_notes looks at REG_NORETURN note,
+ so ensure the note is placed there first. */
+ if (CALL_P (i3))
+ {
+ rtx *np;
+ for (np = &next_note; *np; np = &XEXP (*np, 1))
+ if (REG_NOTE_KIND (*np) == REG_NORETURN)
+ {
+ rtx n = *np;
+ *np = XEXP (n, 1);
+ XEXP (n, 1) = REG_NOTES (i3);
+ REG_NOTES (i3) = n;
+ break;
+ }
+ }
+ old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
+ /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
+ REG_ARGS_SIZE note to all noreturn calls, allow that here. */
+ gcc_assert (old_size != args_size
+ || (CALL_P (i3)
+ && !ACCUMULATE_OUTGOING_ARGS
+ && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
+ }
+ break;
+
case REG_NORETURN:
case REG_SETJMP:
+ case REG_TM:
/* These notes must remain with the call. It should not be
possible for both I2 and I3 to be a call. */
if (CALL_P (i3))
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func |
TODO_df_finish | TODO_verify_rtl_sharing |
TODO_ggc_collect, /* todo_flags_finish */
}