HOST_WIDE_INT *hv;
int arith;
{
+ unsigned HOST_WIDE_INT signmask;
+
if (count < 0)
{
rshift_double (l1, h1, -count, prec, lv, hv, arith);
| (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
*lv = l1 << count;
}
+
+ /* Sign extend all bits that are beyond the precision. */
+
+ signmask = -((prec > HOST_BITS_PER_WIDE_INT
+ ? (*hv >> (prec - HOST_BITS_PER_WIDE_INT - 1))
+ : (*lv >> (prec - 1))) & 1);
+
+ if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if (prec >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
+ }
+ else
+ {
+ *hv = signmask;
+ *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
+ *lv |= signmask << prec;
+ }
}
/* Shift the doubleword integer in L1, H1 right by COUNT places
rshift_double (l1, h1, count, prec, lv, hv, arith)
unsigned HOST_WIDE_INT l1;
HOST_WIDE_INT h1, count;
- unsigned int prec ATTRIBUTE_UNUSED;
+ unsigned int prec;
unsigned HOST_WIDE_INT *lv;
HOST_WIDE_INT *hv;
int arith;
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
- *hv = signmask;
- *lv = signmask;
+ *hv = 0;
+ *lv = 0;
}
else if (count >= HOST_BITS_PER_WIDE_INT)
{
- *hv = signmask;
- *lv = ((signmask << (2 * HOST_BITS_PER_WIDE_INT - count - 1) << 1)
- | ((unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT)));
+ *hv = 0;
+ *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
}
else
{
+ *hv = (unsigned HOST_WIDE_INT) h1 >> count;
*lv = ((l1 >> count)
| ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
- *hv = ((signmask << (HOST_BITS_PER_WIDE_INT - count))
- | ((unsigned HOST_WIDE_INT) h1 >> count));
+ }
+
+ /* Zero / sign extend all bits that are beyond the precision. */
+
+ if (count >= (HOST_WIDE_INT)prec)
+ {
+ *hv = signmask;
+ *lv = signmask;
+ }
+ else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
+ *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
+ }
+ else
+ {
+ *hv = signmask;
+ *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
+ *lv |= signmask << (prec - count);
}
}
\f
enum machine_mode lmode, rmode, nmode;
int lunsignedp, runsignedp;
int lvolatilep = 0, rvolatilep = 0;
- unsigned int alignment;
tree linner, rinner = NULL_TREE;
tree mask;
tree offset;
do anything if the inner expression is a PLACEHOLDER_EXPR since we
then will no longer be able to replace it. */
linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
- &lunsignedp, &lvolatilep, &alignment);
+ &lunsignedp, &lvolatilep);
if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
|| offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
return 0;
/* If this is not a constant, we can only do something if bit positions,
sizes, and signedness are the same. */
rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
- &runsignedp, &rvolatilep, &alignment);
+ &runsignedp, &rvolatilep);
if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
|| lunsignedp != runsignedp || offset != 0
tree mask, inner, offset;
tree unsigned_type;
unsigned int precision;
- unsigned int alignment;
/* All the optimizations using this function assume integer fields.
There are problems with FP fields since the type_for_size call
}
inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
- punsignedp, pvolatilep, &alignment);
+ punsignedp, pvolatilep);
if ((inner == exp && and_mask == 0)
|| *pbitsize < 0 || offset != 0
|| TREE_CODE (inner) == PLACEHOLDER_EXPR)
multiple of the other, in which case we replace this with either an
operation or CODE or TCODE.
- If we have an unsigned type that is not a sizetype, we canot do
+ If we have an unsigned type that is not a sizetype, we cannot do
this since it will change the result if the original computation
overflowed. */
if ((! TREE_UNSIGNED (ctype)
}
return fold_convert (t, arg0);
+ case VIEW_CONVERT_EXPR:
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == VIEW_CONVERT_EXPR)
+ return build1 (VIEW_CONVERT_EXPR, type,
+ TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+ return t;
+
#if 0 /* This loses on &"foo"[0]. */
case ARRAY_REF:
{
fold (build (code, type, imag0, imag1))));
}
+ /* Optimize comparisons of strlen vs zero to a compare of the
+ first character of the string vs zero. To wit,
+ strlen(ptr) == 0 => *ptr == 0
+ strlen(ptr) != 0 => *ptr != 0
+ Other cases should reduce to one of these two (or a constant)
+ due to the return value of strlen being unsigned. */
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && integer_zerop (arg1)
+ && TREE_CODE (arg0) == CALL_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == ADDR_EXPR)
+ {
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
+ tree arglist;
+
+ if (TREE_CODE (fndecl) == FUNCTION_DECL
+ && DECL_BUILT_IN (fndecl)
+ && DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD
+ && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRLEN
+ && (arglist = TREE_OPERAND (arg0, 1))
+ && TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) == POINTER_TYPE
+ && ! TREE_CHAIN (arglist))
+ return fold (build (code, type,
+ build1 (INDIRECT_REF, char_type_node,
+ TREE_VALUE(arglist)),
+ integer_zero_node));
+ }
+
/* From here on, the only cases we handle are when the result is
known to be a constant.