+ {
+ unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
+ rtx new = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
+ known_mode, known_ret,
+ &nonzero_for_hook);
+
+ if (new)
+ nonzero_for_hook &= cached_nonzero_bits (new, mode, known_x,
+ known_mode, known_ret);
+
+ return nonzero_for_hook;
+ }
+
+ case CONST_INT:
+#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
+ /* If X is negative in MODE, sign-extend the value. */
+ if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
+ && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
+ return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
+#endif
+
+ return INTVAL (x);
+
+ case MEM:
+#ifdef LOAD_EXTEND_OP
+ /* In many, if not most, RISC machines, reading a byte from memory
+ zeros the rest of the register. Noticing that fact saves a lot
+ of extra zero-extends. */
+ if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
+ nonzero &= GET_MODE_MASK (GET_MODE (x));
+#endif
+ break;
+
+ case EQ: case NE:
+ case UNEQ: case LTGT:
+ case GT: case GTU: case UNGT:
+ case LT: case LTU: case UNLT:
+ case GE: case GEU: case UNGE:
+ case LE: case LEU: case UNLE:
+ case UNORDERED: case ORDERED:
+ /* If this produces an integer result, we know which bits are set.
+ Code here used to clear bits outside the mode of X, but that is
+ now done above. */
+ /* Mind that MODE is the mode the caller wants to look at this
+ operation in, and not the actual operation mode. We can wind
+ up with (subreg:DI (gt:V4HI x y)), and we don't have anything
+ that describes the results of a vector compare. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ nonzero = STORE_FLAG_VALUE;
+ break;
+
+ case NEG:
+#if 0
+ /* Disabled to avoid exponential mutual recursion between nonzero_bits
+ and num_sign_bit_copies. */
+ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
+ == GET_MODE_BITSIZE (GET_MODE (x)))
+ nonzero = 1;
+#endif
+
+ if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
+ nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
+ break;
+
+ case ABS:
+#if 0
+ /* Disabled to avoid exponential mutual recursion between nonzero_bits
+ and num_sign_bit_copies. */
+ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
+ == GET_MODE_BITSIZE (GET_MODE (x)))
+ nonzero = 1;
+#endif
+ break;
+
+ case TRUNCATE:
+ nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret)
+ & GET_MODE_MASK (mode));
+ break;
+
+ case ZERO_EXTEND:
+ nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (GET_MODE (XEXP (x, 0)) != VOIDmode)
+ nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
+ break;
+
+ case SIGN_EXTEND:
+ /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
+ Otherwise, show all the bits in the outer mode but not the inner
+ may be nonzero. */
+ inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (GET_MODE (XEXP (x, 0)) != VOIDmode)
+ {
+ inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
+ if (inner_nz
+ & (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
+ inner_nz |= (GET_MODE_MASK (mode)
+ & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
+ }
+
+ nonzero &= inner_nz;
+ break;
+
+ case AND:
+ nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret)
+ & cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ break;
+
+ case XOR: case IOR:
+ case UMIN: case UMAX: case SMIN: case SMAX:
+ {
+ unsigned HOST_WIDE_INT nonzero0 =
+ cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+
+ /* Don't call nonzero_bits for the second time if it cannot change
+ anything. */
+ if ((nonzero & nonzero0) != nonzero)
+ nonzero &= nonzero0
+ | cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ }
+ break;
+
+ case PLUS: case MINUS:
+ case MULT:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ /* We can apply the rules of arithmetic to compute the number of
+ high- and low-order zero bits of these operations. We start by
+ computing the width (position of the highest-order nonzero bit)
+ and the number of low-order zero bits for each value. */
+ {
+ unsigned HOST_WIDE_INT nz0 =
+ cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ unsigned HOST_WIDE_INT nz1 =
+ cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
+ int width0 = floor_log2 (nz0) + 1;
+ int width1 = floor_log2 (nz1) + 1;
+ int low0 = floor_log2 (nz0 & -nz0);
+ int low1 = floor_log2 (nz1 & -nz1);
+ HOST_WIDE_INT op0_maybe_minusp
+ = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
+ HOST_WIDE_INT op1_maybe_minusp
+ = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
+ unsigned int result_width = mode_width;
+ int result_low = 0;
+
+ switch (code)
+ {
+ case PLUS:
+ result_width = MAX (width0, width1) + 1;
+ result_low = MIN (low0, low1);
+ break;
+ case MINUS:
+ result_low = MIN (low0, low1);
+ break;
+ case MULT:
+ result_width = width0 + width1;
+ result_low = low0 + low1;
+ break;
+ case DIV:
+ if (width1 == 0)
+ break;
+ if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ result_width = width0;
+ break;
+ case UDIV:
+ if (width1 == 0)
+ break;
+ result_width = width0;
+ break;
+ case MOD:
+ if (width1 == 0)
+ break;
+ if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ result_width = MIN (width0, width1);
+ result_low = MIN (low0, low1);
+ break;
+ case UMOD:
+ if (width1 == 0)
+ break;
+ result_width = MIN (width0, width1);
+ result_low = MIN (low0, low1);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (result_width < mode_width)
+ nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
+
+ if (result_low > 0)
+ nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If pointers extend unsigned and this is an addition or subtraction
+ to a pointer in Pmode, all the bits above ptr_mode are known to be
+ zero. */
+ if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
+ && (code == PLUS || code == MINUS)
+ && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
+ nonzero &= GET_MODE_MASK (ptr_mode);
+#endif
+ }
+ break;
+
+ case ZERO_EXTRACT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
+ break;
+
+ case SUBREG:
+ /* If this is a SUBREG formed for a promoted variable that has
+ been zero-extended, we know that at least the high-order bits
+ are zero, though others might be too. */
+
+ if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
+ nonzero = GET_MODE_MASK (GET_MODE (x))
+ & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
+ known_x, known_mode, known_ret);
+
+ /* If the inner mode is a single word for both the host and target
+ machines, we can compute this from which bits of the inner
+ object might be nonzero. */
+ if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ <= HOST_BITS_PER_WIDE_INT))
+ {
+ nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
+ known_x, known_mode, known_ret);
+
+#if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
+ /* If this is a typical RISC machine, we only have to worry
+ about the way loads are extended. */
+ if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
+ ? (((nonzero
+ & (((unsigned HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
+ != 0))
+ : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
+ || !MEM_P (SUBREG_REG (x)))
+#endif
+ {
+ /* On many CISC machines, accessing an object in a wider mode
+ causes the high-order bits to become undefined. So they are
+ not known to be zero. */
+ if (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ nonzero |= (GET_MODE_MASK (GET_MODE (x))
+ & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
+ }
+ }
+ break;
+
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ASHIFT:
+ case ROTATE:
+ /* The nonzero bits are in two classes: any bits within MODE
+ that aren't in GET_MODE (x) are always significant. The rest of the
+ nonzero bits are those that are significant in the operand of
+ the shift when shifted the appropriate number of bits. This
+ shows that high-order bits are cleared by the right shift and
+ low-order bits by left shifts. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ enum machine_mode inner_mode = GET_MODE (x);
+ unsigned int width = GET_MODE_BITSIZE (inner_mode);
+ int count = INTVAL (XEXP (x, 1));
+ unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
+ unsigned HOST_WIDE_INT op_nonzero =
+ cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
+ unsigned HOST_WIDE_INT outer = 0;
+
+ if (mode_width > width)
+ outer = (op_nonzero & nonzero & ~mode_mask);
+
+ if (code == LSHIFTRT)
+ inner >>= count;
+ else if (code == ASHIFTRT)
+ {
+ inner >>= count;
+
+ /* If the sign bit may have been nonzero before the shift, we
+ need to mark all the places it could have been copied to
+ by the shift as possibly nonzero. */
+ if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
+ inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
+ }
+ else if (code == ASHIFT)
+ inner <<= count;
+ else
+ inner = ((inner << (count % width)
+ | (inner >> (width - (count % width)))) & mode_mask);
+
+ nonzero &= (outer | inner);
+ }
+ break;
+
+ case FFS:
+ case POPCOUNT:
+ /* This is at most the number of bits in the mode. */
+ nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
+ break;
+
+ case CLZ:
+ /* If CLZ has a known value at zero, then the nonzero bits are
+ that value, plus the number of bits in the mode minus one. */
+ if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
+ nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
+ else
+ nonzero = -1;
+ break;
+
+ case CTZ:
+ /* If CTZ has a known value at zero, then the nonzero bits are
+ that value, plus the number of bits in the mode minus one. */
+ if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
+ nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
+ else
+ nonzero = -1;
+ break;
+
+ case PARITY:
+ nonzero = 1;
+ break;
+
+ case IF_THEN_ELSE:
+ {
+ unsigned HOST_WIDE_INT nonzero_true =
+ cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+
+ /* Don't call nonzero_bits for the second time if it cannot change
+ anything. */
+ if ((nonzero & nonzero_true) != nonzero)
+ nonzero &= nonzero_true
+ | cached_nonzero_bits (XEXP (x, 2), mode,
+ known_x, known_mode, known_ret);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return nonzero;
+}
+
+/* See the macro definition above. */
+#undef cached_num_sign_bit_copies
+
+\f
+/* The function cached_num_sign_bit_copies is a wrapper around
+ num_sign_bit_copies1. It avoids exponential behavior in
+ num_sign_bit_copies1 when X has identical subexpressions on the
+ first or the second level. */
+
+static unsigned int
+cached_num_sign_bit_copies (rtx x, enum machine_mode mode, rtx known_x,
+ enum machine_mode known_mode,
+ unsigned int known_ret)
+{
+ if (x == known_x && mode == known_mode)
+ return known_ret;
+
+ /* Try to find identical subexpressions. If found call
+ num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
+ the precomputed value for the subexpression as KNOWN_RET. */
+
+ if (ARITHMETIC_P (x))
+ {
+ rtx x0 = XEXP (x, 0);
+ rtx x1 = XEXP (x, 1);
+
+ /* Check the first level. */
+ if (x0 == x1)
+ return
+ num_sign_bit_copies1 (x, mode, x0, mode,
+ cached_num_sign_bit_copies (x0, mode, known_x,
+ known_mode,
+ known_ret));
+
+ /* Check the second level. */
+ if (ARITHMETIC_P (x0)
+ && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
+ return
+ num_sign_bit_copies1 (x, mode, x1, mode,
+ cached_num_sign_bit_copies (x1, mode, known_x,
+ known_mode,
+ known_ret));
+
+ if (ARITHMETIC_P (x1)
+ && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
+ return
+ num_sign_bit_copies1 (x, mode, x0, mode,
+ cached_num_sign_bit_copies (x0, mode, known_x,
+ known_mode,
+ known_ret));
+ }
+
+ return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
+}
+
+/* Return the number of bits at the high-order end of X that are known to
+ be equal to the sign bit. X will be used in mode MODE; if MODE is
+ VOIDmode, X will be used in its own mode. The returned value will always
+ be between 1 and the number of bits in MODE. */
+
+static unsigned int
+num_sign_bit_copies1 (rtx x, enum machine_mode mode, rtx known_x,
+ enum machine_mode known_mode,
+ unsigned int known_ret)
+{
+ enum rtx_code code = GET_CODE (x);
+ unsigned int bitwidth = GET_MODE_BITSIZE (mode);
+ int num0, num1, result;
+ unsigned HOST_WIDE_INT nonzero;
+
+ /* If we weren't given a mode, use the mode of X. If the mode is still
+ VOIDmode, we don't know anything. Likewise if one of the modes is
+ floating-point. */
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (x);
+
+ if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
+ return 1;
+
+ /* For a smaller object, just ignore the high bits. */
+ if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
+ {
+ num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
+ known_x, known_mode, known_ret);
+ return MAX (1,
+ num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
+ }
+
+ if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
+ {
+#ifndef WORD_REGISTER_OPERATIONS
+ /* If this machine does not do all register operations on the entire
+ register and MODE is wider than the mode of X, we can say nothing
+ at all about the high-order bits. */
+ return 1;
+#else
+ /* Likewise on machines that do, if the mode of the object is smaller
+ than a word and loads of that size don't sign extend, we can say
+ nothing about the high order bits. */
+ if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+#ifdef LOAD_EXTEND_OP
+ && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
+#endif
+ )
+ return 1;
+#endif
+ }
+
+ switch (code)
+ {
+ case REG:
+
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+ /* If pointers extend signed and this is a pointer in Pmode, say that
+ all the bits above ptr_mode are known to be sign bit copies. */
+ if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
+ && REG_POINTER (x))
+ return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
+#endif
+
+ {
+ unsigned int copies_for_hook = 1, copies = 1;
+ rtx new = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
+ known_mode, known_ret,
+ &copies_for_hook);
+
+ if (new)
+ copies = cached_num_sign_bit_copies (new, mode, known_x,
+ known_mode, known_ret);
+
+ if (copies > 1 || copies_for_hook > 1)
+ return MAX (copies, copies_for_hook);
+
+ /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
+ }
+ break;
+
+ case MEM:
+#ifdef LOAD_EXTEND_OP
+ /* Some RISC machines sign-extend all loads of smaller than a word. */
+ if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
+ return MAX (1, ((int) bitwidth
+ - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
+#endif
+ break;
+
+ case CONST_INT:
+ /* If the constant is negative, take its 1's complement and remask.
+ Then see how many zero bits we have. */
+ nonzero = INTVAL (x) & GET_MODE_MASK (mode);
+ if (bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ nonzero = (~nonzero) & GET_MODE_MASK (mode);
+
+ return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
+
+ case SUBREG:
+ /* If this is a SUBREG for a promoted object that is sign-extended
+ and we are looking at it in a wider mode, we know that at least the
+ high-order bits are known to be sign bit copies. */
+
+ if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
+ {
+ num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
+ known_x, known_mode, known_ret);
+ return MAX ((int) bitwidth
+ - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
+ num0);
+ }
+
+ /* For a smaller object, just ignore the high bits. */
+ if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
+ {
+ num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
+ known_x, known_mode, known_ret);
+ return MAX (1, (num0
+ - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ - bitwidth)));
+ }
+
+#ifdef WORD_REGISTER_OPERATIONS
+#ifdef LOAD_EXTEND_OP
+ /* For paradoxical SUBREGs on machines where all register operations
+ affect the entire register, just look inside. Note that we are
+ passing MODE to the recursive call, so the number of sign bit copies
+ will remain relative to that mode, not the inner mode. */
+
+ /* This works only if loads sign extend. Otherwise, if we get a
+ reload for the inner part, it may be loaded from the stack, and
+ then we lose all sign bit copies that existed before the store
+ to the stack. */
+
+ if ((GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
+ && MEM_P (SUBREG_REG (x)))
+ return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
+ known_x, known_mode, known_ret);
+#endif
+#endif
+ break;
+
+ case SIGN_EXTRACT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
+ break;
+
+ case SIGN_EXTEND:
+ return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
+ known_x, known_mode, known_ret));
+
+ case TRUNCATE:
+ /* For a smaller object, just ignore the high bits. */
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
+ known_x, known_mode, known_ret);
+ return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ - bitwidth)));
+
+ case NOT:
+ return cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+
+ case ROTATE: case ROTATERT:
+ /* If we are rotating left by a number of bits less than the number
+ of sign bit copies, we can just subtract that amount from the
+ number. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < (int) bitwidth)
+ {
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
+ : (int) bitwidth - INTVAL (XEXP (x, 1))));
+ }
+ break;
+
+ case NEG:
+ /* In general, this subtracts one sign bit copy. But if the value
+ is known to be positive, the number of sign bit copies is the
+ same as that of the input. Finally, if the input has just one bit
+ that might be nonzero, all the bits are copies of the sign bit. */
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return num0 > 1 ? num0 - 1 : 1;
+
+ nonzero = nonzero_bits (XEXP (x, 0), mode);
+ if (nonzero == 1)
+ return bitwidth;
+
+ if (num0 > 1
+ && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
+ num0--;
+
+ return num0;
+
+ case IOR: case AND: case XOR:
+ case SMIN: case SMAX: case UMIN: case UMAX:
+ /* Logical operations will preserve the number of sign-bit copies.
+ MIN and MAX operations always return one of the operands. */
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ return MIN (num0, num1);
+
+ case PLUS: case MINUS:
+ /* For addition and subtraction, we can have a 1-bit carry. However,
+ if we are subtracting 1 from a positive number, there will not
+ be such a carry. Furthermore, if the positive number is known to
+ be 0 or 1, we know the result is either -1 or 0. */
+
+ if (code == PLUS && XEXP (x, 1) == constm1_rtx
+ && bitwidth <= HOST_BITS_PER_WIDE_INT)
+ {
+ nonzero = nonzero_bits (XEXP (x, 0), mode);
+ if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
+ return (nonzero == 1 || nonzero == 0 ? bitwidth
+ : bitwidth - floor_log2 (nonzero) - 1);
+ }
+
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ result = MAX (1, MIN (num0, num1) - 1);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If pointers extend signed and this is an addition or subtraction
+ to a pointer in Pmode, all the bits above ptr_mode are known to be
+ sign bit copies. */
+ if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
+ && (code == PLUS || code == MINUS)
+ && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
+ result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
+ - GET_MODE_BITSIZE (ptr_mode) + 1),
+ result);
+#endif
+ return result;
+
+ case MULT:
+ /* The number of bits of the product is the sum of the number of
+ bits of both terms. However, unless one of the terms if known
+ to be positive, we must allow for an additional bit since negating
+ a negative number can remove one sign bit copy. */
+
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+
+ result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
+ if (result > 0
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (((nonzero_bits (XEXP (x, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ && ((nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
+ result--;
+
+ return MAX (1, result);
+
+ case UDIV:
+ /* The result must be <= the first operand. If the first operand
+ has the high bit set, we know nothing about the number of sign
+ bit copies. */
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return 1;
+ else if ((nonzero_bits (XEXP (x, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ return 1;
+ else
+ return cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+
+ case UMOD:
+ /* The result must be <= the second operand. */
+ return cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+
+ case DIV:
+ /* Similar to unsigned division, except that we have to worry about
+ the case where the divisor is negative, in which case we have
+ to add 1. */
+ result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (result > 1
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
+ result--;
+
+ return result;
+
+ case MOD:
+ result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ if (result > 1
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
+ result--;
+
+ return result;
+
+ case ASHIFTRT:
+ /* Shifts by a constant add to the number of bits equal to the
+ sign bit. */
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) > 0)
+ num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
+
+ return num0;
+
+ case ASHIFT:
+ /* Left shifts destroy copies. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || INTVAL (XEXP (x, 1)) < 0
+ || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
+ return 1;
+
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ return MAX (1, num0 - INTVAL (XEXP (x, 1)));
+
+ case IF_THEN_ELSE:
+ num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
+ known_x, known_mode, known_ret);
+ return MIN (num0, num1);
+
+ case EQ: case NE: case GE: case GT: case LE: case LT:
+ case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
+ case GEU: case GTU: case LEU: case LTU:
+ case UNORDERED: case ORDERED:
+ /* If the constant is negative, take its 1's complement and remask.
+ Then see how many zero bits we have. */
+ nonzero = STORE_FLAG_VALUE;
+ if (bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ nonzero = (~nonzero) & GET_MODE_MASK (mode);
+
+ return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
+
+ default:
+ break;
+ }
+
+ /* If we haven't been able to figure it out by one of the above rules,
+ see if some of the high-order bits are known to be zero. If so,
+ count those bits and return one less than that amount. If we can't
+ safely compute the mask for this mode, always return BITWIDTH. */
+
+ bitwidth = GET_MODE_BITSIZE (mode);
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return 1;
+
+ nonzero = nonzero_bits (x, mode);
+ return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
+ ? 1 : bitwidth - floor_log2 (nonzero) - 1;
+}
+
+/* Calculate the rtx_cost of a single instruction. A return value of
+ zero indicates an instruction pattern without a known cost. */
+
+int
+insn_rtx_cost (rtx pat)
+{
+ int i, cost;
+ rtx set;
+
+ /* Extract the single set rtx from the instruction pattern.
+ We can't use single_set since we only have the pattern. */
+ if (GET_CODE (pat) == SET)
+ set = pat;
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ set = NULL_RTX;
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx x = XVECEXP (pat, 0, i);
+ if (GET_CODE (x) == SET)
+ {
+ if (set)
+ return 0;
+ set = x;
+ }
+ }
+ if (!set)
+ return 0;
+ }
+ else
+ return 0;
+
+ cost = rtx_cost (SET_SRC (set), SET);
+ return cost > 0 ? cost : COSTS_N_INSNS (1);
+}
+
+/* Given an insn INSN and condition COND, return the condition in a
+ canonical form to simplify testing by callers. Specifically:
+
+ (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
+ (2) Both operands will be machine operands; (cc0) will have been replaced.
+ (3) If an operand is a constant, it will be the second operand.
+ (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
+ for GE, GEU, and LEU.
+
+ If the condition cannot be understood, or is an inequality floating-point
+ comparison which needs to be reversed, 0 will be returned.
+
+ If REVERSE is nonzero, then reverse the condition prior to canonizing it.
+
+ If EARLIEST is nonzero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid.
+
+ If WANT_REG is nonzero, we wish the condition to be relative to that
+ register, if possible. Therefore, do not canonicalize the condition
+ further. If ALLOW_CC_MODE is nonzero, allow the condition returned
+ to be a compare to a CC mode register.
+
+ If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
+ and at INSN. */
+
+rtx
+canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
+ rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
+{
+ enum rtx_code code;
+ rtx prev = insn;
+ rtx set;
+ rtx tem;
+ rtx op0, op1;
+ int reverse_code = 0;
+ enum machine_mode mode;
+
+ code = GET_CODE (cond);
+ mode = GET_MODE (cond);
+ op0 = XEXP (cond, 0);
+ op1 = XEXP (cond, 1);
+
+ if (reverse)
+ code = reversed_comparison_code (cond, insn);
+ if (code == UNKNOWN)
+ return 0;
+
+ if (earliest)
+ *earliest = insn;
+
+ /* If we are comparing a register with zero, see if the register is set
+ in the previous insn to a COMPARE or a comparison operation. Perform
+ the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
+ in cse.c */
+
+ while ((GET_RTX_CLASS (code) == RTX_COMPARE
+ || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
+ && op1 == CONST0_RTX (GET_MODE (op0))
+ && op0 != want_reg)
+ {
+ /* Set nonzero when we find something of interest. */
+ rtx x = 0;
+
+#ifdef HAVE_cc0
+ /* If comparison with cc0, import actual comparison from compare
+ insn. */
+ if (op0 == cc0_rtx)
+ {
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || !NONJUMP_INSN_P (prev)
+ || (set = single_set (prev)) == 0
+ || SET_DEST (set) != cc0_rtx)
+ return 0;
+
+ op0 = SET_SRC (set);
+ op1 = CONST0_RTX (GET_MODE (op0));
+ if (earliest)
+ *earliest = prev;
+ }
+#endif
+
+ /* If this is a COMPARE, pick up the two things being compared. */
+ if (GET_CODE (op0) == COMPARE)
+ {
+ op1 = XEXP (op0, 1);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ else if (!REG_P (op0))
+ break;
+
+ /* Go back to the previous insn. Stop if it is not an INSN. We also
+ stop if it isn't a single set or if it has a REG_INC note because
+ we don't want to bother dealing with it. */
+
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || !NONJUMP_INSN_P (prev)
+ || FIND_REG_INC_NOTE (prev, NULL_RTX))
+ break;
+
+ set = set_of (op0, prev);
+
+ if (set
+ && (GET_CODE (set) != SET
+ || !rtx_equal_p (SET_DEST (set), op0)))
+ break;
+
+ /* If this is setting OP0, get what it sets it to if it looks
+ relevant. */
+ if (set)
+ {
+ enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
+#ifdef FLOAT_STORE_FLAG_VALUE
+ REAL_VALUE_TYPE fsfv;
+#endif
+
+ /* ??? We may not combine comparisons done in a CCmode with
+ comparisons not done in a CCmode. This is to aid targets
+ like Alpha that have an IEEE compliant EQ instruction, and
+ a non-IEEE compliant BEQ instruction. The use of CCmode is
+ actually artificial, simply to prevent the combination, but
+ should not affect other platforms.
+
+ However, we must allow VOIDmode comparisons to match either
+ CCmode or non-CCmode comparison, because some ports have
+ modeless comparisons inside branch patterns.
+
+ ??? This mode check should perhaps look more like the mode check
+ in simplify_comparison in combine. */
+
+ if ((GET_CODE (SET_SRC (set)) == COMPARE
+ || (((code == NE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
+ REAL_VALUE_NEGATIVE (fsfv)))
+#endif
+ ))
+ && COMPARISON_P (SET_SRC (set))))
+ && (((GET_MODE_CLASS (mode) == MODE_CC)
+ == (GET_MODE_CLASS (inner_mode) == MODE_CC))
+ || mode == VOIDmode || inner_mode == VOIDmode))
+ x = SET_SRC (set);
+ else if (((code == EQ
+ || (code == GE
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == GE
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
+ REAL_VALUE_NEGATIVE (fsfv)))
+#endif
+ ))
+ && COMPARISON_P (SET_SRC (set))
+ && (((GET_MODE_CLASS (mode) == MODE_CC)
+ == (GET_MODE_CLASS (inner_mode) == MODE_CC))
+ || mode == VOIDmode || inner_mode == VOIDmode))
+
+ {
+ reverse_code = 1;
+ x = SET_SRC (set);
+ }
+ else
+ break;
+ }
+
+ else if (reg_set_p (op0, prev))
+ /* If this sets OP0, but not directly, we have to give up. */
+ break;
+
+ if (x)
+ {
+ /* If the caller is expecting the condition to be valid at INSN,
+ make sure X doesn't change before INSN. */
+ if (valid_at_insn_p)
+ if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
+ break;
+ if (COMPARISON_P (x))
+ code = GET_CODE (x);
+ if (reverse_code)
+ {
+ code = reversed_comparison_code (x, prev);
+ if (code == UNKNOWN)
+ return 0;
+ reverse_code = 0;
+ }
+
+ op0 = XEXP (x, 0), op1 = XEXP (x, 1);
+ if (earliest)
+ *earliest = prev;
+ }
+ }
+
+ /* If constant is first, put it last. */
+ if (CONSTANT_P (op0))
+ code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
+
+ /* If OP0 is the result of a comparison, we weren't able to find what
+ was really being compared, so fail. */
+ if (!allow_cc_mode
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
+ return 0;
+
+ /* Canonicalize any ordered comparison with integers involving equality
+ if we can do computations in the relevant mode and we do not
+ overflow. */
+
+ if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
+ && GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ HOST_WIDE_INT const_val = INTVAL (op1);
+ unsigned HOST_WIDE_INT uconst_val = const_val;
+ unsigned HOST_WIDE_INT max_val
+ = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
+
+ switch (code)
+ {
+ case LE:
+ if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
+ code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
+ break;
+
+ /* When cross-compiling, const_val might be sign-extended from
+ BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
+ case GE:
+ if ((HOST_WIDE_INT) (const_val & max_val)
+ != (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
+ break;
+
+ case LEU:
+ if (uconst_val < max_val)
+ code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
+ break;
+
+ case GEU:
+ if (uconst_val != 0)
+ code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* Never return CC0; return zero instead. */
+ if (CC0_P (op0))
+ return 0;
+
+ return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
+}
+
+/* Given a jump insn JUMP, return the condition that will cause it to branch
+ to its JUMP_LABEL. If the condition cannot be understood, or is an
+ inequality floating-point comparison which needs to be reversed, 0 will
+ be returned.
+
+ If EARLIEST is nonzero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid. If EARLIEST
+ is null, the returned condition will be valid at INSN.
+
+ If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
+ compare CC mode register.
+
+ VALID_AT_INSN_P is the same as for canonicalize_condition. */
+
+rtx
+get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
+{
+ rtx cond;
+ int reverse;
+ rtx set;
+
+ /* If this is not a standard conditional jump, we can't parse it. */
+ if (!JUMP_P (jump)
+ || ! any_condjump_p (jump))
+ return 0;
+ set = pc_set (jump);
+
+ cond = XEXP (SET_SRC (set), 0);
+
+ /* If this branches to JUMP_LABEL when the condition is false, reverse
+ the condition. */
+ reverse
+ = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
+ && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
+
+ return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
+ allow_cc_mode, valid_at_insn_p);
+}
+
+\f
+/* Initialize non_rtx_starting_operands, which is used to speed up
+ for_each_rtx. */
+void
+init_rtlanal (void)
+{
+ int i;
+ for (i = 0; i < NUM_RTX_CODE; i++)
+ {
+ const char *format = GET_RTX_FORMAT (i);
+ const char *first = strpbrk (format, "eEV");
+ non_rtx_starting_operands[i] = first ? first - format : -1;
+ }