#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
+#include "tree.h"
#include "tm_p.h"
#include "flags.h"
#include "regs.h"
#include "recog.h"
#include "real.h"
#include "toplev.h"
+#include "target.h"
/* It is not safe to use ordinary gen_lowpart in combine.
Use gen_lowpart_for_combine instead. See comments there. */
static void set_nonzero_bits_and_sign_copies (rtx, rtx, void *);
static int cant_combine_insn_p (rtx);
static int can_combine_p (rtx, rtx, rtx, rtx, rtx *, rtx *);
-static int sets_function_arg_p (rtx);
static int combinable_i3pat (rtx, rtx *, rtx, rtx, int, rtx *);
static int contains_muldiv (rtx);
static rtx try_combine (rtx, rtx, rtx, int *);
if (undobuf.frees)
buf = undobuf.frees, undobuf.frees = buf->next;
else
- buf = (struct undo *) xmalloc (sizeof (struct undo));
+ buf = xmalloc (sizeof (struct undo));
buf->is_int = 0;
buf->where.r = into;
if (undobuf.frees)
buf = undobuf.frees, undobuf.frees = buf->next;
else
- buf = (struct undo *) xmalloc (sizeof (struct undo));
+ buf = xmalloc (sizeof (struct undo));
buf->is_int = 1;
buf->where.i = into;
combine_max_regno = nregs;
- reg_nonzero_bits = ((unsigned HOST_WIDE_INT *)
- xcalloc (nregs, sizeof (unsigned HOST_WIDE_INT)));
- reg_sign_bit_copies
- = (unsigned char *) xcalloc (nregs, sizeof (unsigned char));
-
- reg_last_death = (rtx *) xmalloc (nregs * sizeof (rtx));
- reg_last_set = (rtx *) xmalloc (nregs * sizeof (rtx));
- reg_last_set_value = (rtx *) xmalloc (nregs * sizeof (rtx));
- reg_last_set_table_tick = (int *) xmalloc (nregs * sizeof (int));
- reg_last_set_label = (int *) xmalloc (nregs * sizeof (int));
- reg_last_set_invalid = (char *) xmalloc (nregs * sizeof (char));
- reg_last_set_mode
- = (enum machine_mode *) xmalloc (nregs * sizeof (enum machine_mode));
- reg_last_set_nonzero_bits
- = (unsigned HOST_WIDE_INT *) xmalloc (nregs * sizeof (HOST_WIDE_INT));
- reg_last_set_sign_bit_copies
- = (char *) xmalloc (nregs * sizeof (char));
+ reg_nonzero_bits = xcalloc (nregs, sizeof (unsigned HOST_WIDE_INT));
+ reg_sign_bit_copies = xcalloc (nregs, sizeof (unsigned char));
+
+ reg_last_death = xmalloc (nregs * sizeof (rtx));
+ reg_last_set = xmalloc (nregs * sizeof (rtx));
+ reg_last_set_value = xmalloc (nregs * sizeof (rtx));
+ reg_last_set_table_tick = xmalloc (nregs * sizeof (int));
+ reg_last_set_label = xmalloc (nregs * sizeof (int));
+ reg_last_set_invalid = xmalloc (nregs * sizeof (char));
+ reg_last_set_mode = xmalloc (nregs * sizeof (enum machine_mode));
+ reg_last_set_nonzero_bits = xmalloc (nregs * sizeof (HOST_WIDE_INT));
+ reg_last_set_sign_bit_copies = xmalloc (nregs * sizeof (char));
init_reg_last_arrays ();
if (INSN_UID (insn) > i)
i = INSN_UID (insn);
- uid_cuid = (int *) xmalloc ((i + 1) * sizeof (int));
+ uid_cuid = xmalloc ((i + 1) * sizeof (int));
max_uid_cuid = i;
nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
FOR_EACH_BB (this_basic_block)
{
- for (insn = this_basic_block->head;
- insn != NEXT_INSN (this_basic_block->end);
+ for (insn = BB_HEAD (this_basic_block);
+ insn != NEXT_INSN (BB_END (this_basic_block));
insn = next ? next : NEXT_INSN (insn))
{
next = 0;
{
unsigned int nregs = combine_max_regno;
- memset ((char *) reg_last_death, 0, nregs * sizeof (rtx));
- memset ((char *) reg_last_set, 0, nregs * sizeof (rtx));
- memset ((char *) reg_last_set_value, 0, nregs * sizeof (rtx));
- memset ((char *) reg_last_set_table_tick, 0, nregs * sizeof (int));
- memset ((char *) reg_last_set_label, 0, nregs * sizeof (int));
+ memset (reg_last_death, 0, nregs * sizeof (rtx));
+ memset (reg_last_set, 0, nregs * sizeof (rtx));
+ memset (reg_last_set_value, 0, nregs * sizeof (rtx));
+ memset (reg_last_set_table_tick, 0, nregs * sizeof (int));
+ memset (reg_last_set_label, 0, nregs * sizeof (int));
memset (reg_last_set_invalid, 0, nregs * sizeof (char));
- memset ((char *) reg_last_set_mode, 0, nregs * sizeof (enum machine_mode));
- memset ((char *) reg_last_set_nonzero_bits, 0, nregs * sizeof (HOST_WIDE_INT));
+ memset (reg_last_set_mode, 0, nregs * sizeof (enum machine_mode));
+ memset (reg_last_set_nonzero_bits, 0, nregs * sizeof (HOST_WIDE_INT));
memset (reg_last_set_sign_bit_copies, 0, nregs * sizeof (char));
}
\f
static void
setup_incoming_promotions (void)
{
-#ifdef PROMOTE_FUNCTION_ARGS
unsigned int regno;
rtx reg;
enum machine_mode mode;
int unsignedp;
rtx first = get_insns ();
+ if (targetm.calls.promote_function_args (TREE_TYPE (cfun->decl)))
+ {
#ifndef OUTGOING_REGNO
#define OUTGOING_REGNO(N) N
#endif
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- /* Check whether this register can hold an incoming pointer
- argument. FUNCTION_ARG_REGNO_P tests outgoing register
- numbers, so translate if necessary due to register windows. */
- if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (regno))
- && (reg = promoted_input_arg (regno, &mode, &unsignedp)) != 0)
- {
- record_value_for_reg
- (reg, first, gen_rtx_fmt_e ((unsignedp ? ZERO_EXTEND
- : SIGN_EXTEND),
- GET_MODE (reg),
- gen_rtx_CLOBBER (mode, const0_rtx)));
- }
-#endif
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ /* Check whether this register can hold an incoming pointer
+ argument. FUNCTION_ARG_REGNO_P tests outgoing register
+ numbers, so translate if necessary due to register windows. */
+ if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (regno))
+ && (reg = promoted_input_arg (regno, &mode, &unsignedp)) != 0)
+ {
+ record_value_for_reg
+ (reg, first, gen_rtx_fmt_e ((unsignedp ? ZERO_EXTEND
+ : SIGN_EXTEND),
+ GET_MODE (reg),
+ gen_rtx_CLOBBER (mode, const0_rtx)));
+ }
+ }
}
\f
/* Called via note_stores. If X is a pseudo that is narrower than
return 1;
}
\f
-/* Check if PAT is an insn - or a part of it - used to set up an
- argument for a function in a hard register. */
-
-static int
-sets_function_arg_p (rtx pat)
-{
- int i;
- rtx inner_dest;
-
- switch (GET_CODE (pat))
- {
- case INSN:
- return sets_function_arg_p (PATTERN (pat));
-
- case PARALLEL:
- for (i = XVECLEN (pat, 0); --i >= 0;)
- if (sets_function_arg_p (XVECEXP (pat, 0, i)))
- return 1;
-
- break;
-
- case SET:
- inner_dest = SET_DEST (pat);
- while (GET_CODE (inner_dest) == STRICT_LOW_PART
- || GET_CODE (inner_dest) == SUBREG
- || GET_CODE (inner_dest) == ZERO_EXTRACT)
- inner_dest = XEXP (inner_dest, 0);
-
- return (GET_CODE (inner_dest) == REG
- && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
- && FUNCTION_ARG_REGNO_P (REGNO (inner_dest)));
-
- default:
- break;
- }
-
- return 0;
-}
-
/* LOC is the location within I3 that contains its pattern or the component
of a PARALLEL of the pattern. We validate that it is valid for combining.
|| GET_CODE (inner_dest) == ZERO_EXTRACT)
inner_dest = XEXP (inner_dest, 0);
- /* Check for the case where I3 modifies its output, as
- discussed above. */
- if ((inner_dest != dest
+ /* Check for the case where I3 modifies its output, as discussed
+ above. We don't want to prevent pseudos from being combined
+ into the address of a MEM, so only prevent the combination if
+ i1 or i2 set the same MEM. */
+ if ((inner_dest != dest &&
+ (GET_CODE (inner_dest) != MEM
+ || rtx_equal_p (i2dest, inner_dest)
+ || (i1dest && rtx_equal_p (i1dest, inner_dest)))
&& (reg_overlap_mentioned_p (i2dest, inner_dest)
|| (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))))
return 0;
}
+/* Adjust INSN after we made a change to its destination.
+
+ Changing the destination can invalidate notes that say something about
+ the results of the insn and a LOG_LINK pointing to the insn. */
+
+static void
+adjust_for_new_dest (rtx insn)
+{
+ rtx *loc;
+
+ /* For notes, be conservative and simply remove them. */
+ loc = ®_NOTES (insn);
+ while (*loc)
+ {
+ enum reg_note kind = REG_NOTE_KIND (*loc);
+ if (kind == REG_EQUAL || kind == REG_EQUIV)
+ *loc = XEXP (*loc, 1);
+ else
+ loc = &XEXP (*loc, 1);
+ }
+
+ /* The new insn will have a destination that was previously the destination
+ of an insn just above it. Call distribute_links to make a LOG_LINK from
+ the next use of that destination. */
+ distribute_links (gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX));
+}
+
/* Try to combine the insns I1 and I2 into I3.
Here I1 and I2 appear earlier than I3.
I1 can be zero; then we combine just I2 into I3.
&& XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx
&& rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
{
-#ifdef EXTRA_CC_MODES
+#ifdef SELECT_CC_MODE
rtx *cc_use;
enum machine_mode compare_mode;
#endif
i2_is_used = 1;
-#ifdef EXTRA_CC_MODES
+#ifdef SELECT_CC_MODE
/* See if a COMPARE with the operand we substituted in should be done
with the mode that is currently being used. If not, do the same
processing we do in `subst' for a SET; namely, if the destination
{
newpat = XVECEXP (newpat, 0, 1);
insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+
+ if (insn_code_number >= 0)
+ {
+ /* If we will be able to accept this, we have made a change to the
+ destination of I3. This requires us to do a few adjustments. */
+ PATTERN (i3) = newpat;
+ adjust_for_new_dest (i3);
+ }
}
/* If we were combining three insns and the result is a simple SET
rtx link;
/* If we will be able to accept this, we have made a change to the
- destination of I3. This can invalidate a LOG_LINKS pointing
- to I3. No other part of combine.c makes such a transformation.
-
- The new I3 will have a destination that was previously the
- destination of I1 or I2 and which was used in i2 or I3. Call
- distribute_links to make a LOG_LINK from the next use of
- that destination. */
-
+ destination of I3. This requires us to do a few adjustments. */
PATTERN (i3) = newpat;
- distribute_links (gen_rtx_INSN_LIST (VOIDmode, i3, NULL_RTX));
+ adjust_for_new_dest (i3);
/* I3 now uses what used to be its destination and which is
now I2's destination. That means we need a LOG_LINK from
for (insn = NEXT_INSN (i3);
insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
- || insn != this_basic_block->next_bb->head);
+ || insn != BB_HEAD (this_basic_block->next_bb));
insn = NEXT_INSN (insn))
{
if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
for (temp = NEXT_INSN (i2);
temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
- || this_basic_block->head != temp);
+ || BB_HEAD (this_basic_block) != temp);
temp = NEXT_INSN (temp))
if (temp != i3 && INSN_P (temp))
for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
if (returnjump_p (i3) || any_uncondjump_p (i3))
{
*new_direct_jump_p = 1;
+ mark_jump_label (PATTERN (i3), i3, 0);
if ((temp = next_nonnote_insn (i3)) == NULL_RTX
|| GET_CODE (temp) != BARRIER)
break;
case NE:
- /* if STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
+ /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
is known to be on, this can be converted into a NEG of a shift. */
if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
&& GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx)
return new;
- if (GET_CODE (new) == CONST_INT && GET_CODE (x) == SUBREG)
+ if (GET_CODE (x) == SUBREG
+ && (GET_CODE (new) == CONST_INT
+ || GET_CODE (new) == CONST_DOUBLE))
{
enum machine_mode mode = GET_MODE (x);
return x;
/* Simplify the alternative arms; this may collapse the true and
- false arms to store-flag values. */
- true_rtx = subst (true_rtx, pc_rtx, pc_rtx, 0, 0);
- false_rtx = subst (false_rtx, pc_rtx, pc_rtx, 0, 0);
+ false arms to store-flag values. Be careful to use copy_rtx
+ here since true_rtx or false_rtx might share RTL with x as a
+ result of the if_then_else_cond call above. */
+ true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0);
+ false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0);
/* If true_rtx and false_rtx are not general_operands, an if_then_else
is unlikely to be simpler. */
break;
case NOT:
- /* (not (plus X -1)) can become (neg X). */
- if (GET_CODE (XEXP (x, 0)) == PLUS
- && XEXP (XEXP (x, 0), 1) == constm1_rtx)
- return gen_rtx_NEG (mode, XEXP (XEXP (x, 0), 0));
-
- /* Similarly, (not (neg X)) is (plus X -1). */
- if (GET_CODE (XEXP (x, 0)) == NEG)
- return gen_rtx_PLUS (mode, XEXP (XEXP (x, 0), 0), constm1_rtx);
-
- /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
- if (GET_CODE (XEXP (x, 0)) == XOR
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && (temp = simplify_unary_operation (NOT, mode,
- XEXP (XEXP (x, 0), 1),
- mode)) != 0)
- return gen_binary (XOR, mode, XEXP (XEXP (x, 0), 0), temp);
-
- /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for operands
- other than 1, but that is not valid. We could do a similar
- simplification for (not (lshiftrt C X)) where C is just the sign bit,
- but this doesn't seem common enough to bother with. */
- if (GET_CODE (XEXP (x, 0)) == ASHIFT
- && XEXP (XEXP (x, 0), 0) == const1_rtx)
- return gen_rtx_ROTATE (mode, simplify_gen_unary (NOT, mode,
- const1_rtx, mode),
- XEXP (XEXP (x, 0), 1));
-
if (GET_CODE (XEXP (x, 0)) == SUBREG
&& subreg_lowpart_p (XEXP (x, 0))
&& (GET_MODE_SIZE (GET_MODE (XEXP (x, 0)))
return gen_lowpart_for_combine (mode, x);
}
- /* If STORE_FLAG_VALUE is -1, (not (comparison foo bar)) can be done by
- reversing the comparison code if valid. */
- if (STORE_FLAG_VALUE == -1
- && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
- && (reversed = reversed_comparison (x, mode, XEXP (XEXP (x, 0), 0),
- XEXP (XEXP (x, 0), 1))))
- return reversed;
-
- /* (not (ashiftrt foo C)) where C is the number of bits in FOO minus 1
- is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1, so we can
- perform the above simplification. */
-
- if (STORE_FLAG_VALUE == -1
- && GET_CODE (XEXP (x, 0)) == ASHIFTRT
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && INTVAL (XEXP (XEXP (x, 0), 1)) == GET_MODE_BITSIZE (mode) - 1)
- return gen_rtx_GE (mode, XEXP (XEXP (x, 0), 0), const0_rtx);
-
/* Apply De Morgan's laws to reduce number of patterns for machines
with negating logical insns (and-not, nand, etc.). If result has
only one NOT, put it first, since that is how the patterns are
break;
case NEG:
- /* (neg (plus X 1)) can become (not X). */
- if (GET_CODE (XEXP (x, 0)) == PLUS
- && XEXP (XEXP (x, 0), 1) == const1_rtx)
- return gen_rtx_NOT (mode, XEXP (XEXP (x, 0), 0));
-
- /* Similarly, (neg (not X)) is (plus X 1). */
- if (GET_CODE (XEXP (x, 0)) == NOT)
- return plus_constant (XEXP (XEXP (x, 0), 0), 1);
-
- /* (neg (minus X Y)) can become (minus Y X). This transformation
- isn't safe for modes with signed zeros, since if X and Y are
- both +0, (minus Y X) is the same as (minus X Y). If the rounding
- mode is towards +infinity (or -infinity) then the two expressions
- will be rounded differently. */
- if (GET_CODE (XEXP (x, 0)) == MINUS
- && !HONOR_SIGNED_ZEROS (mode)
- && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
- return gen_binary (MINUS, mode, XEXP (XEXP (x, 0), 1),
- XEXP (XEXP (x, 0), 0));
-
- /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
- if (GET_CODE (XEXP (x, 0)) == PLUS
- && !HONOR_SIGNED_ZEROS (mode)
- && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
- {
- temp = simplify_gen_unary (NEG, mode, XEXP (XEXP (x, 0), 0), mode);
- temp = combine_simplify_rtx (temp, mode, last, in_dest);
- return gen_binary (MINUS, mode, temp, XEXP (XEXP (x, 0), 1));
- }
-
- /* (neg (mult A B)) becomes (mult (neg A) B).
- This works even for floating-point values. */
- if (GET_CODE (XEXP (x, 0)) == MULT)
- {
- temp = simplify_gen_unary (NEG, mode, XEXP (XEXP (x, 0), 0), mode);
- return gen_binary (MULT, mode, temp, XEXP (XEXP (x, 0), 1));
- }
-
/* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
- if (GET_CODE (XEXP (x, 0)) == XOR && XEXP (XEXP (x, 0), 1) == const1_rtx
+ if (GET_CODE (XEXP (x, 0)) == XOR
+ && XEXP (XEXP (x, 0), 1) == const1_rtx
&& nonzero_bits (XEXP (XEXP (x, 0), 0), mode) == 1)
return gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0), constm1_rtx);
- /* NEG commutes with ASHIFT since it is multiplication. Only do this
- if we can then eliminate the NEG (e.g.,
- if the operand is a constant). */
-
- if (GET_CODE (XEXP (x, 0)) == ASHIFT)
- {
- temp = simplify_unary_operation (NEG, mode,
- XEXP (XEXP (x, 0), 0), mode);
- if (temp)
- return gen_binary (ASHIFT, mode, temp, XEXP (XEXP (x, 0), 1));
- }
-
temp = expand_compound_operation (XEXP (x, 0));
/* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
/* (float_truncate:SF (float_truncate:DF foo:XF))
= (float_truncate:SF foo:XF).
- This may elliminate double rounding, so it is unsafe.
+ This may eliminate double rounding, so it is unsafe.
(float_truncate:SF (float_extend:XF foo:DF))
= (float_truncate:SF foo:DF).
/* Simplify our comparison, if possible. */
new_code = simplify_comparison (old_code, &op0, &op1);
-#ifdef EXTRA_CC_MODES
+#ifdef SELECT_CC_MODE
/* If this machine has CC modes other than CCmode, check to see if we
need to use a different CC mode here. */
compare_mode = SELECT_CC_MODE (new_code, op0, op1);
-#endif /* EXTRA_CC_MODES */
-#if !defined (HAVE_cc0) && defined (EXTRA_CC_MODES)
+#ifndef HAVE_cc0
/* If the mode changed, we have to change SET_DEST, the mode in the
compare, and the mode in the place SET_DEST is used. If SET_DEST is
a hard register, just build new versions with the proper mode. If it
dest = new_dest;
}
}
-#endif
+#endif /* cc0 */
+#endif /* SELECT_CC_MODE */
/* If the code changed, we have to build a new comparison in
undobuf.other_insn. */
if (new_code != old_code)
{
+ int other_changed_previously = other_changed;
unsigned HOST_WIDE_INT mask;
SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
dest, const0_rtx));
+ other_changed = 1;
/* If the only change we made was to change an EQ into an NE or
vice versa, OP0 has only one bit that might be nonzero, and OP1
if (((old_code == NE && new_code == EQ)
|| (old_code == EQ && new_code == NE))
- && ! other_changed && op1 == const0_rtx
+ && ! other_changed_previously && op1 == const0_rtx
&& GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
&& exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
{
&& ! check_asm_operands (pat)))
{
PUT_CODE (*cc_use, old_code);
- other_insn = 0;
+ other_changed = 0;
op0 = gen_binary (XOR, GET_MODE (op0), op0, GEN_INT (mask));
}
}
-
- other_changed = 1;
}
if (other_changed)
{
if (tmode != inner_mode)
{
- if (in_dest)
+ /* We can't call gen_lowpart_for_combine in a DEST since we
+ always want a SUBREG (see below) and it would sometimes
+ return a new hard register. */
+ if (pos || in_dest)
{
- /* We can't call gen_lowpart_for_combine here since we always want
- a SUBREG and it would sometimes return a new hard register. */
HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
if (WORDS_BIG_ENDIAN
mask &= GET_MODE_MASK (op_mode);
/* When we have an arithmetic operation, or a shift whose count we
- do not know, we need to assume that all bit the up to the highest-order
+ do not know, we need to assume that all bits up to the highest-order
bit in MASK will be needed. This is how we form such a mask. */
- if (op_mode)
- fuller_mask = (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT
- ? GET_MODE_MASK (op_mode)
- : (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
- - 1));
+ if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
+ fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
else
- fuller_mask = ~(HOST_WIDE_INT) 0;
+ fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
+ - 1);
/* Determine what bits of X are guaranteed to be (non)zero. */
nonzero = nonzero_bits (x, mode);
&& (INTVAL (XEXP (x, 1))
<= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
&& GET_CODE (XEXP (x, 0)) == ASHIFT
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && INTVAL (XEXP (XEXP (x, 0), 1)) == INTVAL (XEXP (x, 1)))
+ && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
reg, next_select);
/* If we are comparing a value against zero, we are done. */
if ((code == NE || code == EQ)
- && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 0)
+ && XEXP (x, 1) == const0_rtx)
{
*ptrue = (code == NE) ? const_true_rtx : const0_rtx;
*pfalse = (code == NE) ? const0_rtx : const_true_rtx;
}
/* Likewise for 0 or a single bit. */
- else if (mode != VOIDmode
+ else if (SCALAR_INT_MODE_P (mode)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
{
&& (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
&& GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
+ && GET_CODE (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == CONST_INT
&& INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
&& rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
{
apply_distributive_law (rtx x)
{
enum rtx_code code = GET_CODE (x);
+ enum rtx_code inner_code;
rtx lhs, rhs, other;
rtx tem;
- enum rtx_code inner_code;
- /* Distributivity is not true for floating point.
- It can change the value. So don't do it.
- -- rms and moshier@world.std.com. */
- if (FLOAT_MODE_P (GET_MODE (x)))
+ /* Distributivity is not true for floating point as it can change the
+ value. So we don't do it unless -funsafe-math-optimizations. */
+ if (FLOAT_MODE_P (GET_MODE (x))
+ && ! flag_unsafe_math_optimizations)
return x;
/* The outer operation can only be one of the following: */
&& code != PLUS && code != MINUS)
return x;
- lhs = XEXP (x, 0), rhs = XEXP (x, 1);
+ lhs = XEXP (x, 0);
+ rhs = XEXP (x, 1);
/* If either operand is a primitive we can't do anything, so get out
fast. */
tem = gen_binary (code, GET_MODE (x), lhs, rhs);
/* There is one exception to the general way of distributing:
- (a ^ b) | (a ^ c) -> (~a) & (b ^ c) */
+ (a | c) ^ (b | c) -> (a ^ b) & ~c */
if (code == XOR && inner_code == IOR)
{
inner_code = AND;
stack to be momentarily aligned only to that amount,
so we pick the least alignment. */
if (x == stack_pointer_rtx && PUSH_ARGS)
- alignment = MIN (PUSH_ROUNDING (1), alignment);
+ alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
+ alignment);
#endif
nonzero &= ~(alignment - 1);
break;
case EQ:
- /* convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
+ /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
says that the sign bit can be tested, FOO has mode MODE, C is
GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
that may be nonzero. */
rtx result;
rtx tem;
+ if (GET_CODE (op0) == CLOBBER)
+ return op0;
+ else if (GET_CODE (op1) == CLOBBER)
+ return op1;
+
if (GET_RTX_CLASS (code) == 'c'
&& swap_commutative_operands_p (op0, op1))
tem = op0, op0 = op1, op1 = tem;
&& (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
&& GET_CODE (XEXP (op0, 1)) == CONST_INT
- && GET_CODE (XEXP (op1, 1)) == CONST_INT
- && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT
- && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (op1, 1))
- && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (XEXP (op0, 0), 1))
- && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (XEXP (op1, 0), 1))
+ && XEXP (op0, 1) == XEXP (op1, 1)
+ && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
+ && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
&& (INTVAL (XEXP (op0, 1))
== (GET_MODE_BITSIZE (GET_MODE (op0))
- (GET_MODE_BITSIZE
}
}
- /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
- (eq (and (lshiftrt X) 1) 0). */
+ /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
if (const_op == 0 && equality_comparison_p
&& XEXP (op0, 1) == const1_rtx
- && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
- && GET_CODE (XEXP (XEXP (op0, 0), 0)) == NOT)
+ && GET_CODE (XEXP (op0, 0)) == NOT)
{
op0 = simplify_and_const_int
- (op0, mode,
- gen_rtx_LSHIFTRT (mode, XEXP (XEXP (XEXP (op0, 0), 0), 0),
- XEXP (XEXP (op0, 0), 1)),
- (HOST_WIDE_INT) 1);
+ (NULL_RTX, mode, XEXP (XEXP (op0, 0), 0), (HOST_WIDE_INT) 1);
code = (code == NE ? EQ : NE);
continue;
}
+
+ /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
+ (eq (and (lshiftrt X) 1) 0).
+ Also handle the case where (not X) is expressed using xor. */
+ if (const_op == 0 && equality_comparison_p
+ && XEXP (op0, 1) == const1_rtx
+ && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
+ {
+ rtx shift_op = XEXP (XEXP (op0, 0), 0);
+ rtx shift_count = XEXP (XEXP (op0, 0), 1);
+
+ if (GET_CODE (shift_op) == NOT
+ || (GET_CODE (shift_op) == XOR
+ && GET_CODE (XEXP (shift_op, 1)) == CONST_INT
+ && GET_CODE (shift_count) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (INTVAL (XEXP (shift_op, 1))
+ == (HOST_WIDE_INT) 1 << INTVAL (shift_count))))
+ {
+ op0 = simplify_and_const_int
+ (NULL_RTX, mode,
+ gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count),
+ (HOST_WIDE_INT) 1);
+ code = (code == NE ? EQ : NE);
+ continue;
+ }
+ }
break;
case ASHIFT:
op1 = make_compound_operation (op1, SET);
if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
- /* Case 3 above, to sometimes allow (subreg (mem x)), isn't
- implemented. */
- && GET_CODE (SUBREG_REG (op0)) == REG
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
&& GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
&& (code == NE || code == EQ))
if (GET_MODE_SIZE (GET_MODE (op0))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))
{
- op0 = SUBREG_REG (op0);
- op1 = gen_lowpart_for_combine (GET_MODE (op0), op1);
+ /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
+ implemented. */
+ if (GET_CODE (SUBREG_REG (op0)) == REG)
+ {
+ op0 = SUBREG_REG (op0);
+ op1 = gen_lowpart_for_combine (GET_MODE (op0), op1);
+ }
}
else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
<= HOST_BITS_PER_WIDE_INT)
else
{
FOR_EACH_BB (block)
- if (insn == block->head)
+ if (insn == BB_HEAD (block))
break;
if (block == EXIT_BLOCK_PTR)
place = i3;
break;
+ case REG_VALUE_PROFILE:
+ /* Just get rid of this note, as it is unused later anyway. */
+ break;
+
case REG_VTABLE_REF:
/* ??? Should remain with *a particular* memory load. Given the
nature of vtable data, the last insn seems relatively safe. */
abort ();
break;
+ case REG_ALWAYS_RETURN:
case REG_NORETURN:
case REG_SETJMP:
/* These notes must remain with the call. It should not be
break;
case REG_NONNEG:
- case REG_WAS_0:
- /* These notes say something about the value of a register prior
+ /* This note says something about the value of a register prior
to the execution of an insn. It is too much trouble to see
if the note is still correct in all situations. It is better
to simply delete it. */
{
if (! INSN_P (tem))
{
- if (tem == bb->head)
+ if (tem == BB_HEAD (bb))
break;
continue;
}
This might delete other dead insns recursively.
First set the pattern to something that won't use
any register. */
+ rtx old_notes = REG_NOTES (tem);
PATTERN (tem) = pc_rtx;
+ REG_NOTES (tem) = NULL;
- distribute_notes (REG_NOTES (tem), tem, tem,
- NULL_RTX);
+ distribute_notes (old_notes, tem, tem, NULL_RTX);
distribute_links (LOG_LINKS (tem));
PUT_CODE (tem, NOTE);
if (cc0_setter)
{
PATTERN (cc0_setter) = pc_rtx;
+ old_notes = REG_NOTES (cc0_setter);
+ REG_NOTES (cc0_setter) = NULL;
- distribute_notes (REG_NOTES (cc0_setter),
- cc0_setter, cc0_setter,
- NULL_RTX);
+ distribute_notes (old_notes, cc0_setter,
+ cc0_setter, NULL_RTX);
distribute_links (LOG_LINKS (cc0_setter));
PUT_CODE (cc0_setter, NOTE);
break;
}
- if (tem == bb->head)
+ if (tem == BB_HEAD (bb))
break;
}
{
if (! INSN_P (tem))
{
- if (tem == bb->head)
+ if (tem == BB_HEAD (bb))
{
SET_BIT (refresh_blocks,
this_basic_block->index);
}
\f
/* Similarly to above, distribute the LOG_LINKS that used to be present on
- I3, I2, and I1 to new locations. This is also called in one case to
- add a link pointing at I3 when I3's destination is changed. */
+ I3, I2, and I1 to new locations. This is also called to add a link
+ pointing at I3 when I3's destination is changed. */
static void
distribute_links (rtx links)
for (insn = NEXT_INSN (XEXP (link, 0));
(insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
- || this_basic_block->next_bb->head != insn));
+ || BB_HEAD (this_basic_block->next_bb) != insn));
insn = NEXT_INSN (insn))
if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
{
place = insn;
break;
}
+ else if (INSN_P (insn) && reg_set_p (reg, insn))
+ break;
/* If we found a place to put the link, place it there unless there
is already a link to the same insn as LINK at that point. */