@@ This would also make life easier when this technology is used
@@ for cross-compilers. */
-
/* The entry points in this file are fold, size_int_wide, size_binop
and force_fit_type.
#include "flags.h"
#include "tree.h"
#include "rtl.h"
+#include "expr.h"
#include "tm_p.h"
#include "toplev.h"
#include "ggc.h"
static void encode PARAMS ((HOST_WIDE_INT *,
- HOST_WIDE_INT, HOST_WIDE_INT));
+ unsigned HOST_WIDE_INT,
+ HOST_WIDE_INT));
static void decode PARAMS ((HOST_WIDE_INT *,
- HOST_WIDE_INT *, HOST_WIDE_INT *));
-int div_and_round_double PARAMS ((enum tree_code, int, HOST_WIDE_INT,
- HOST_WIDE_INT, HOST_WIDE_INT,
- HOST_WIDE_INT, HOST_WIDE_INT *,
- HOST_WIDE_INT *, HOST_WIDE_INT *,
+ unsigned HOST_WIDE_INT *,
HOST_WIDE_INT *));
static tree negate_expr PARAMS ((tree));
static tree split_tree PARAMS ((tree, enum tree_code, tree *, tree *,
static tree make_bit_field_ref PARAMS ((tree, tree, int, int, int));
static tree optimize_bit_field_compare PARAMS ((enum tree_code, tree,
tree, tree));
-static tree decode_field_reference PARAMS ((tree, int *, int *,
+static tree decode_field_reference PARAMS ((tree, HOST_WIDE_INT *,
+ HOST_WIDE_INT *,
enum machine_mode *, int *,
int *, tree *, tree *));
static int all_ones_mask_p PARAMS ((tree, int));
#define BRANCH_COST 1
#endif
+#if defined(HOST_EBCDIC)
+/* bit 8 is significant in EBCDIC */
+#define CHARMASK 0xff
+#else
+#define CHARMASK 0x7f
+#endif
+
/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
and SUM1. Then this yields nonzero if overflow occurred during the
static void
encode (words, low, hi)
HOST_WIDE_INT *words;
- HOST_WIDE_INT low, hi;
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT hi;
{
words[0] = LOWPART (low);
words[1] = HIGHPART (low);
static void
decode (words, low, hi)
HOST_WIDE_INT *words;
- HOST_WIDE_INT *low, *hi;
+ unsigned HOST_WIDE_INT *low;
+ HOST_WIDE_INT *hi;
{
*low = words[0] + words[1] * BASE;
*hi = words[2] + words[3] * BASE;
tree t;
int overflow;
{
- HOST_WIDE_INT low, high;
- register int prec;
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT high;
+ unsigned int prec;
if (TREE_CODE (t) == REAL_CST)
{
{
TREE_INT_CST_HIGH (t) = 0;
if (prec < HOST_BITS_PER_WIDE_INT)
- TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
+ TREE_INT_CST_LOW (t) &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
}
- /* Unsigned types do not suffer sign extension or overflow. */
- if (TREE_UNSIGNED (TREE_TYPE (t)))
+ /* Unsigned types do not suffer sign extension or overflow unless they
+ are a sizetype. */
+ if (TREE_UNSIGNED (TREE_TYPE (t))
+ && ! (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (t))))
return overflow;
/* If the value's sign bit is set, extend the sign. */
if (prec != 2 * HOST_BITS_PER_WIDE_INT
&& (prec > HOST_BITS_PER_WIDE_INT
- ? (TREE_INT_CST_HIGH (t)
- & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
- : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
+ ? 0 != (TREE_INT_CST_HIGH (t)
+ & ((HOST_WIDE_INT) 1
+ << (prec - HOST_BITS_PER_WIDE_INT - 1)))
+ : 0 != (TREE_INT_CST_LOW (t)
+ & ((unsigned HOST_WIDE_INT) 1 << (prec - 1)))))
{
/* Value is negative:
set to 1 all the bits that are outside this type's precision. */
{
TREE_INT_CST_HIGH (t) = -1;
if (prec < HOST_BITS_PER_WIDE_INT)
- TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
+ TREE_INT_CST_LOW (t) |= ((unsigned HOST_WIDE_INT) (-1) << prec);
}
}
int
add_double (l1, h1, l2, h2, lv, hv)
- HOST_WIDE_INT l1, h1, l2, h2;
- HOST_WIDE_INT *lv, *hv;
+ unsigned HOST_WIDE_INT l1, l2;
+ HOST_WIDE_INT h1, h2;
+ unsigned HOST_WIDE_INT *lv;
+ HOST_WIDE_INT *hv;
{
- HOST_WIDE_INT l, h;
+ unsigned HOST_WIDE_INT l;
+ HOST_WIDE_INT h;
l = l1 + l2;
- h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < (unsigned HOST_WIDE_INT) l1);
+ h = h1 + h2 + (l < l1);
*lv = l;
*hv = h;
int
neg_double (l1, h1, lv, hv)
- HOST_WIDE_INT l1, h1;
- HOST_WIDE_INT *lv, *hv;
+ unsigned HOST_WIDE_INT l1;
+ HOST_WIDE_INT h1;
+ unsigned HOST_WIDE_INT *lv;
+ HOST_WIDE_INT *hv;
{
if (l1 == 0)
{
}
else
{
- *lv = - l1;
- *hv = ~ h1;
+ *lv = -l1;
+ *hv = ~h1;
return 0;
}
}
int
mul_double (l1, h1, l2, h2, lv, hv)
- HOST_WIDE_INT l1, h1, l2, h2;
- HOST_WIDE_INT *lv, *hv;
+ unsigned HOST_WIDE_INT l1, l2;
+ HOST_WIDE_INT h1, h2;
+ unsigned HOST_WIDE_INT *lv;
+ HOST_WIDE_INT *hv;
{
HOST_WIDE_INT arg1[4];
HOST_WIDE_INT arg2[4];
HOST_WIDE_INT prod[4 * 2];
register unsigned HOST_WIDE_INT carry;
register int i, j, k;
- HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
+ unsigned HOST_WIDE_INT toplow, neglow;
+ HOST_WIDE_INT tophigh, neghigh;
encode (arg1, l1, h1);
encode (arg2, l2, h2);
- bzero ((char *) prod, sizeof prod);
+ memset ((char *) prod, 0, sizeof prod);
for (i = 0; i < 4; i++)
{
/* Check for overflow by calculating the top half of the answer in full;
it should agree with the low half's sign bit. */
- decode (prod+4, &toplow, &tophigh);
+ decode (prod + 4, &toplow, &tophigh);
if (h1 < 0)
{
neg_double (l2, h2, &neglow, &neghigh);
void
lshift_double (l1, h1, count, prec, lv, hv, arith)
- HOST_WIDE_INT l1, h1, count;
- int prec;
- HOST_WIDE_INT *lv, *hv;
+ unsigned HOST_WIDE_INT l1;
+ HOST_WIDE_INT h1, count;
+ unsigned int prec;
+ unsigned HOST_WIDE_INT *lv;
+ HOST_WIDE_INT *hv;
int arith;
{
if (count < 0)
{
- rshift_double (l1, h1, - count, prec, lv, hv, arith);
+ rshift_double (l1, h1, -count, prec, lv, hv, arith);
return;
}
-
+
#ifdef SHIFT_COUNT_TRUNCATED
if (SHIFT_COUNT_TRUNCATED)
count %= prec;
}
else if (count >= HOST_BITS_PER_WIDE_INT)
{
- *hv = (unsigned HOST_WIDE_INT) l1 << (count - HOST_BITS_PER_WIDE_INT);
+ *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
*lv = 0;
}
else
{
*hv = (((unsigned HOST_WIDE_INT) h1 << count)
- | ((unsigned HOST_WIDE_INT) l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
- *lv = (unsigned HOST_WIDE_INT) l1 << count;
+ | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
+ *lv = l1 << count;
}
}
void
rshift_double (l1, h1, count, prec, lv, hv, arith)
- HOST_WIDE_INT l1, h1, count;
- int prec ATTRIBUTE_UNUSED;
- HOST_WIDE_INT *lv, *hv;
+ unsigned HOST_WIDE_INT l1;
+ HOST_WIDE_INT h1, count;
+ unsigned int prec ATTRIBUTE_UNUSED;
+ unsigned HOST_WIDE_INT *lv;
+ HOST_WIDE_INT *hv;
int arith;
{
unsigned HOST_WIDE_INT signmask;
+
signmask = (arith
? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
: 0);
}
else
{
- *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
+ *lv = ((l1 >> count)
| ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
*hv = ((signmask << (HOST_BITS_PER_WIDE_INT - count))
| ((unsigned HOST_WIDE_INT) h1 >> count));
void
lrotate_double (l1, h1, count, prec, lv, hv)
- HOST_WIDE_INT l1, h1, count;
- int prec;
- HOST_WIDE_INT *lv, *hv;
+ unsigned HOST_WIDE_INT l1;
+ HOST_WIDE_INT h1, count;
+ unsigned int prec;
+ unsigned HOST_WIDE_INT *lv;
+ HOST_WIDE_INT *hv;
{
- HOST_WIDE_INT s1l, s1h, s2l, s2h;
+ unsigned HOST_WIDE_INT s1l, s2l;
+ HOST_WIDE_INT s1h, s2h;
count %= prec;
if (count < 0)
void
rrotate_double (l1, h1, count, prec, lv, hv)
- HOST_WIDE_INT l1, h1, count;
- int prec;
- HOST_WIDE_INT *lv, *hv;
+ unsigned HOST_WIDE_INT l1;
+ HOST_WIDE_INT h1, count;
+ unsigned int prec;
+ unsigned HOST_WIDE_INT *lv;
+ HOST_WIDE_INT *hv;
{
- HOST_WIDE_INT s1l, s1h, s2l, s2h;
+ unsigned HOST_WIDE_INT s1l, s2l;
+ HOST_WIDE_INT s1h, s2h;
count %= prec;
if (count < 0)
lquo, hquo, lrem, hrem)
enum tree_code code;
int uns;
- HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
- HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
- HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
+ unsigned HOST_WIDE_INT lnum_orig; /* num == numerator == dividend */
+ HOST_WIDE_INT hnum_orig;
+ unsigned HOST_WIDE_INT lden_orig; /* den == denominator == divisor */
+ HOST_WIDE_INT hden_orig;
+ unsigned HOST_WIDE_INT *lquo, *lrem;
+ HOST_WIDE_INT *hquo, *hrem;
{
int quo_neg = 0;
HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
HOST_WIDE_INT den[4], quo[4];
register int i, j;
unsigned HOST_WIDE_INT work;
- register unsigned HOST_WIDE_INT carry = 0;
- HOST_WIDE_INT lnum = lnum_orig;
+ unsigned HOST_WIDE_INT carry = 0;
+ unsigned HOST_WIDE_INT lnum = lnum_orig;
HOST_WIDE_INT hnum = hnum_orig;
- HOST_WIDE_INT lden = lden_orig;
+ unsigned HOST_WIDE_INT lden = lden_orig;
HOST_WIDE_INT hden = hden_orig;
int overflow = 0;
- if ((hden == 0) && (lden == 0))
+ if (hden == 0 && lden == 0)
overflow = 1, lden = 1;
/* calculate quotient sign and convert operands to unsigned. */
- if (!uns)
+ if (!uns)
{
if (hnum < 0)
{
quo_neg = ~ quo_neg;
/* (minimum integer) / (-1) is the only overflow case. */
- if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
+ if (neg_double (lnum, hnum, &lnum, &hnum)
+ && ((HOST_WIDE_INT) lden & hden) == -1)
overflow = 1;
}
- if (hden < 0)
+ if (hden < 0)
{
quo_neg = ~ quo_neg;
neg_double (lden, hden, &lden, &hden);
{ /* single precision */
*hquo = *hrem = 0;
/* This unsigned division rounds toward zero. */
- *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
+ *lquo = lnum / lden;
goto finish_up;
}
goto finish_up;
}
- bzero ((char *) quo, sizeof quo);
+ memset ((char *) quo, 0, sizeof quo);
- bzero ((char *) num, sizeof num); /* to zero 9th element */
- bzero ((char *) den, sizeof den);
+ memset ((char *) num, 0, sizeof num); /* to zero 9th element */
+ memset ((char *) den, 0, sizeof den);
- encode (num, lnum, hnum);
+ encode (num, lnum, hnum);
encode (den, lden, hden);
/* Special code for when the divisor < BASE. */
- if (hden == 0 && lden < (HOST_WIDE_INT) BASE)
+ if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
{
/* hnum != 0 already checked. */
for (i = 4 - 1; i >= 0; i--)
{
work = num[i] + carry * BASE;
- quo[i] = work / (unsigned HOST_WIDE_INT) lden;
- carry = work % (unsigned HOST_WIDE_INT) lden;
+ quo[i] = work / lden;
+ carry = work % lden;
}
}
else
{
/* Full double precision division,
with thanks to Don Knuth's "Seminumerical Algorithms". */
- int num_hi_sig, den_hi_sig;
- unsigned HOST_WIDE_INT quo_est, scale;
-
- /* Find the highest non-zero divisor digit. */
- for (i = 4 - 1; ; i--)
- if (den[i] != 0) {
- den_hi_sig = i;
- break;
- }
+ int num_hi_sig, den_hi_sig;
+ unsigned HOST_WIDE_INT quo_est, scale;
- /* Insure that the first digit of the divisor is at least BASE/2.
- This is required by the quotient digit estimation algorithm. */
-
- scale = BASE / (den[den_hi_sig] + 1);
- if (scale > 1) { /* scale divisor and dividend */
- carry = 0;
- for (i = 0; i <= 4 - 1; i++) {
- work = (num[i] * scale) + carry;
- num[i] = LOWPART (work);
- carry = HIGHPART (work);
- } num[4] = carry;
- carry = 0;
- for (i = 0; i <= 4 - 1; i++) {
- work = (den[i] * scale) + carry;
- den[i] = LOWPART (work);
- carry = HIGHPART (work);
- if (den[i] != 0) den_hi_sig = i;
- }
- }
-
- num_hi_sig = 4;
+ /* Find the highest non-zero divisor digit. */
+ for (i = 4 - 1;; i--)
+ if (den[i] != 0)
+ {
+ den_hi_sig = i;
+ break;
+ }
- /* Main loop */
- for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
- /* guess the next quotient digit, quo_est, by dividing the first
- two remaining dividend digits by the high order quotient digit.
- quo_est is never low and is at most 2 high. */
- unsigned HOST_WIDE_INT tmp;
+ /* Insure that the first digit of the divisor is at least BASE/2.
+ This is required by the quotient digit estimation algorithm. */
- num_hi_sig = i + den_hi_sig + 1;
- work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
- if (num[num_hi_sig] != den[den_hi_sig])
- quo_est = work / den[den_hi_sig];
- else
- quo_est = BASE - 1;
+ scale = BASE / (den[den_hi_sig] + 1);
+ if (scale > 1)
+ { /* scale divisor and dividend */
+ carry = 0;
+ for (i = 0; i <= 4 - 1; i++)
+ {
+ work = (num[i] * scale) + carry;
+ num[i] = LOWPART (work);
+ carry = HIGHPART (work);
+ }
- /* refine quo_est so it's usually correct, and at most one high. */
- tmp = work - quo_est * den[den_hi_sig];
- if (tmp < BASE
- && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
- quo_est--;
+ num[4] = carry;
+ carry = 0;
+ for (i = 0; i <= 4 - 1; i++)
+ {
+ work = (den[i] * scale) + carry;
+ den[i] = LOWPART (work);
+ carry = HIGHPART (work);
+ if (den[i] != 0) den_hi_sig = i;
+ }
+ }
- /* Try QUO_EST as the quotient digit, by multiplying the
- divisor by QUO_EST and subtracting from the remaining dividend.
- Keep in mind that QUO_EST is the I - 1st digit. */
+ num_hi_sig = 4;
- carry = 0;
- for (j = 0; j <= den_hi_sig; j++)
+ /* Main loop */
+ for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
{
- work = quo_est * den[j] + carry;
- carry = HIGHPART (work);
- work = num[i + j] - LOWPART (work);
- num[i + j] = LOWPART (work);
- carry += HIGHPART (work) != 0;
- }
+ /* Guess the next quotient digit, quo_est, by dividing the first
+ two remaining dividend digits by the high order quotient digit.
+ quo_est is never low and is at most 2 high. */
+ unsigned HOST_WIDE_INT tmp;
+
+ num_hi_sig = i + den_hi_sig + 1;
+ work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
+ if (num[num_hi_sig] != den[den_hi_sig])
+ quo_est = work / den[den_hi_sig];
+ else
+ quo_est = BASE - 1;
- /* if quo_est was high by one, then num[i] went negative and
- we need to correct things. */
+ /* Refine quo_est so it's usually correct, and at most one high. */
+ tmp = work - quo_est * den[den_hi_sig];
+ if (tmp < BASE
+ && (den[den_hi_sig - 1] * quo_est
+ > (tmp * BASE + num[num_hi_sig - 2])))
+ quo_est--;
- if (num[num_hi_sig] < carry)
- {
- quo_est--;
- carry = 0; /* add divisor back in */
+ /* Try QUO_EST as the quotient digit, by multiplying the
+ divisor by QUO_EST and subtracting from the remaining dividend.
+ Keep in mind that QUO_EST is the I - 1st digit. */
+
+ carry = 0;
for (j = 0; j <= den_hi_sig; j++)
{
- work = num[i + j] + den[j] + carry;
+ work = quo_est * den[j] + carry;
carry = HIGHPART (work);
+ work = num[i + j] - LOWPART (work);
num[i + j] = LOWPART (work);
+ carry += HIGHPART (work) != 0;
}
- num [num_hi_sig] += carry;
- }
- /* store the quotient digit. */
- quo[i] = quo_est;
+ /* If quo_est was high by one, then num[i] went negative and
+ we need to correct things. */
+ if (num[num_hi_sig] < carry)
+ {
+ quo_est--;
+ carry = 0; /* add divisor back in */
+ for (j = 0; j <= den_hi_sig; j++)
+ {
+ work = num[i + j] + den[j] + carry;
+ carry = HIGHPART (work);
+ num[i + j] = LOWPART (work);
+ }
+
+ num [num_hi_sig] += carry;
+ }
+
+ /* Store the quotient digit. */
+ quo[i] = quo_est;
+ }
}
- }
decode (quo, lquo, hquo);
add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
lquo, hquo);
}
- else return overflow;
+ else
+ return overflow;
break;
case CEIL_DIV_EXPR:
add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
lquo, hquo);
}
- else return overflow;
+ else
+ return overflow;
break;
-
+
case ROUND_DIV_EXPR:
case ROUND_MOD_EXPR: /* round to closest integer */
{
- HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
- HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
-
- /* get absolute values */
- if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
- if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
-
- /* if (2 * abs (lrem) >= abs (lden)) */
+ unsigned HOST_WIDE_INT labs_rem = *lrem;
+ HOST_WIDE_INT habs_rem = *hrem;
+ unsigned HOST_WIDE_INT labs_den = lden, ltwice;
+ HOST_WIDE_INT habs_den = hden, htwice;
+
+ /* Get absolute values */
+ if (*hrem < 0)
+ neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
+ if (hden < 0)
+ neg_double (lden, hden, &labs_den, &habs_den);
+
+ /* If (2 * abs (lrem) >= abs (lden)) */
mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
labs_rem, habs_rem, <wice, &htwice);
+
if (((unsigned HOST_WIDE_INT) habs_den
< (unsigned HOST_WIDE_INT) htwice)
|| (((unsigned HOST_WIDE_INT) habs_den
== (unsigned HOST_WIDE_INT) htwice)
- && ((HOST_WIDE_INT unsigned) labs_den
- < (unsigned HOST_WIDE_INT) ltwice)))
+ && (labs_den < ltwice)))
{
if (*hquo < 0)
/* quo = quo - 1; */
add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
lquo, hquo);
}
- else return overflow;
+ else
+ return overflow;
}
break;
unsigned mantissa1 : 20;
unsigned exponent : 11;
unsigned sign : 1;
- } big_endian;
+ } big_endian;
} u;
u.d = dconstm1;
unsigned mantissa1 : 20;
unsigned exponent : 11;
unsigned sign : 1;
- } big_endian;
+ } big_endian;
} u;
u.d = dconstm1;
unsigned mantissa1 : 20;
unsigned exponent : 11;
unsigned sign : 1;
- } big_endian;
+ } big_endian;
} u;
u.d = dconstm1;
return 1;
}
-/* Convert C9X hexadecimal floating point string constant S. Return
+/* Convert C99 hexadecimal floating point string constant S. Return
real value type in mode MODE. This function uses the host computer's
floating point arithmetic when there is no REAL_ARITHMETIC. */
char *s;
enum machine_mode mode;
{
- REAL_VALUE_TYPE ip;
- char *p = s;
- unsigned HOST_WIDE_INT low, high;
- int shcount, nrmcount, k;
- int sign, expsign, isfloat;
- int lost = 0;/* Nonzero low order bits shifted out and discarded. */
- int frexpon = 0; /* Bits after the decimal point. */
- int expon = 0; /* Value of exponent. */
- int decpt = 0; /* How many decimal points. */
- int gotp = 0; /* How many P's. */
- char c;
-
- isfloat = 0;
- expsign = 1;
- ip = 0.0;
-
- while (*p == ' ' || *p == '\t')
- ++p;
-
- /* Sign, if any, comes first. */
- sign = 1;
- if (*p == '-')
- {
- sign = -1;
- ++p;
- }
-
- /* The string is supposed to start with 0x or 0X . */
- if (*p == '0')
- {
- ++p;
- if (*p == 'x' || *p == 'X')
- ++p;
- else
- abort ();
- }
- else
- abort ();
-
- while (*p == '0')
- ++p;
-
- high = 0;
- low = 0;
- shcount = 0;
- while ((c = *p) != '\0')
- {
- if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')
- || (c >= 'a' && c <= 'f'))
- {
- k = c & 0x7f;
- if (k >= 'a')
- k = k - 'a' + 10;
- else if (k >= 'A')
- k = k - 'A' + 10;
- else
- k = k - '0';
-
- if ((high & 0xf0000000) == 0)
- {
- high = (high << 4) + ((low >> 28) & 15);
- low = (low << 4) + k;
- shcount += 4;
- if (decpt)
- frexpon += 4;
- }
- else
- {
- /* Record nonzero lost bits. */
- lost |= k;
- if (! decpt)
- frexpon -= 4;
- }
- ++p;
- }
- else if ( c == '.')
- {
- ++decpt;
- ++p;
- }
-
- else if (c == 'p' || c == 'P')
- {
- ++gotp;
- ++p;
- /* Sign of exponent. */
- if (*p == '-')
- {
- expsign = -1;
- ++p;
- }
-
- /* Value of exponent.
- The exponent field is a decimal integer. */
- while (ISDIGIT(*p))
- {
- k = (*p++ & 0x7f) - '0';
- expon = 10 * expon + k;
- }
-
- expon *= expsign;
- /* F suffix is ambiguous in the significand part
- so it must appear after the decimal exponent field. */
- if (*p == 'f' || *p == 'F')
- {
- isfloat = 1;
- ++p;
- break;
- }
- }
-
- else if (c == 'l' || c == 'L')
- {
- ++p;
- break;
- }
- else
- break;
- }
-
- /* Abort if last character read was not legitimate. */
- c = *p;
- if ((c != '\0' && c != ' ' && c != '\n' && c != '\r') || (decpt > 1))
- abort ();
-
- /* There must be either one decimal point or one p. */
- if (decpt == 0 && gotp == 0)
- abort ();
-
- shcount -= 4;
- if (high == 0 && low == 0)
- return dconst0;
-
- /* Normalize. */
- nrmcount = 0;
- if (high == 0)
- {
- high = low;
- low = 0;
- nrmcount += 32;
- }
-
- /* Leave a high guard bit for carry-out. */
- if ((high & 0x80000000) != 0)
- {
- lost |= low & 1;
- low = (low >> 1) | (high << 31);
- high = high >> 1;
- nrmcount -= 1;
- }
-
- if ((high & 0xffff8000) == 0)
- {
- high = (high << 16) + ((low >> 16) & 0xffff);
- low = low << 16;
- nrmcount += 16;
- }
-
- while ((high & 0xc0000000) == 0)
- {
- high = (high << 1) + ((low >> 31) & 1);
- low = low << 1;
- nrmcount += 1;
- }
-
- if (isfloat || GET_MODE_SIZE(mode) == UNITS_PER_WORD)
- {
- /* Keep 24 bits precision, bits 0x7fffff80.
- Rounding bit is 0x40. */
- lost = lost | low | (high & 0x3f);
- low = 0;
- if (high & 0x40)
- {
- if ((high & 0x80) || lost)
- high += 0x40;
- }
- high &= 0xffffff80;
- }
- else
- {
- /* We need real.c to do long double formats, so here default
- to double precision. */
+ REAL_VALUE_TYPE ip;
+ char *p = s;
+ unsigned HOST_WIDE_INT low, high;
+ int shcount, nrmcount, k;
+ int sign, expsign, isfloat;
+ int lost = 0;/* Nonzero low order bits shifted out and discarded. */
+ int frexpon = 0; /* Bits after the decimal point. */
+ int expon = 0; /* Value of exponent. */
+ int decpt = 0; /* How many decimal points. */
+ int gotp = 0; /* How many P's. */
+ char c;
+
+ isfloat = 0;
+ expsign = 1;
+ ip = 0.0;
+
+ while (*p == ' ' || *p == '\t')
+ ++p;
+
+ /* Sign, if any, comes first. */
+ sign = 1;
+ if (*p == '-')
+ {
+ sign = -1;
+ ++p;
+ }
+
+ /* The string is supposed to start with 0x or 0X . */
+ if (*p == '0')
+ {
+ ++p;
+ if (*p == 'x' || *p == 'X')
+ ++p;
+ else
+ abort ();
+ }
+ else
+ abort ();
+
+ while (*p == '0')
+ ++p;
+
+ high = 0;
+ low = 0;
+ shcount = 0;
+ while ((c = *p) != '\0')
+ {
+ if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')
+ || (c >= 'a' && c <= 'f'))
+ {
+ k = c & CHARMASK;
+ if (k >= 'a' && k <= 'f')
+ k = k - 'a' + 10;
+ else if (k >= 'A')
+ k = k - 'A' + 10;
+ else
+ k = k - '0';
+
+ if ((high & 0xf0000000) == 0)
+ {
+ high = (high << 4) + ((low >> 28) & 15);
+ low = (low << 4) + k;
+ shcount += 4;
+ if (decpt)
+ frexpon += 4;
+ }
+ else
+ {
+ /* Record nonzero lost bits. */
+ lost |= k;
+ if (! decpt)
+ frexpon -= 4;
+ }
+ ++p;
+ }
+ else if (c == '.')
+ {
+ ++decpt;
+ ++p;
+ }
+
+ else if (c == 'p' || c == 'P')
+ {
+ ++gotp;
+ ++p;
+ /* Sign of exponent. */
+ if (*p == '-')
+ {
+ expsign = -1;
+ ++p;
+ }
+
+ /* Value of exponent.
+ The exponent field is a decimal integer. */
+ while (ISDIGIT (*p))
+ {
+ k = (*p++ & CHARMASK) - '0';
+ expon = 10 * expon + k;
+ }
+
+ expon *= expsign;
+ /* F suffix is ambiguous in the significand part
+ so it must appear after the decimal exponent field. */
+ if (*p == 'f' || *p == 'F')
+ {
+ isfloat = 1;
+ ++p;
+ break;
+ }
+ }
+
+ else if (c == 'l' || c == 'L')
+ {
+ ++p;
+ break;
+ }
+ else
+ break;
+ }
+
+ /* Abort if last character read was not legitimate. */
+ c = *p;
+ if ((c != '\0' && c != ' ' && c != '\n' && c != '\r') || (decpt > 1))
+ abort ();
+
+ /* There must be either one decimal point or one p. */
+ if (decpt == 0 && gotp == 0)
+ abort ();
+
+ shcount -= 4;
+ if (high == 0 && low == 0)
+ return dconst0;
+
+ /* Normalize. */
+ nrmcount = 0;
+ if (high == 0)
+ {
+ high = low;
+ low = 0;
+ nrmcount += 32;
+ }
+
+ /* Leave a high guard bit for carry-out. */
+ if ((high & 0x80000000) != 0)
+ {
+ lost |= low & 1;
+ low = (low >> 1) | (high << 31);
+ high = high >> 1;
+ nrmcount -= 1;
+ }
+
+ if ((high & 0xffff8000) == 0)
+ {
+ high = (high << 16) + ((low >> 16) & 0xffff);
+ low = low << 16;
+ nrmcount += 16;
+ }
+
+ while ((high & 0xc0000000) == 0)
+ {
+ high = (high << 1) + ((low >> 31) & 1);
+ low = low << 1;
+ nrmcount += 1;
+ }
+
+ if (isfloat || GET_MODE_SIZE (mode) == UNITS_PER_WORD)
+ {
+ /* Keep 24 bits precision, bits 0x7fffff80.
+ Rounding bit is 0x40. */
+ lost = lost | low | (high & 0x3f);
+ low = 0;
+ if (high & 0x40)
+ {
+ if ((high & 0x80) || lost)
+ high += 0x40;
+ }
+ high &= 0xffffff80;
+ }
+ else
+ {
+ /* We need real.c to do long double formats, so here default
+ to double precision. */
#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
- /* IEEE double.
- Keep 53 bits precision, bits 0x7fffffff fffffc00.
- Rounding bit is low word 0x200. */
- lost = lost | (low & 0x1ff);
- if (low & 0x200)
- {
- if ((low & 0x400) || lost)
- {
- low = (low + 0x200) & 0xfffffc00;
- if (low == 0)
- high += 1;
- }
- }
- low &= 0xfffffc00;
+ /* IEEE double.
+ Keep 53 bits precision, bits 0x7fffffff fffffc00.
+ Rounding bit is low word 0x200. */
+ lost = lost | (low & 0x1ff);
+ if (low & 0x200)
+ {
+ if ((low & 0x400) || lost)
+ {
+ low = (low + 0x200) & 0xfffffc00;
+ if (low == 0)
+ high += 1;
+ }
+ }
+ low &= 0xfffffc00;
#else
- /* Assume it's a VAX with 56-bit significand,
- bits 0x7fffffff ffffff80. */
- lost = lost | (low & 0x7f);
- if (low & 0x40)
- {
- if ((low & 0x80) || lost)
- {
- low = (low + 0x40) & 0xffffff80;
- if (low == 0)
- high += 1;
- }
- }
- low &= 0xffffff80;
+ /* Assume it's a VAX with 56-bit significand,
+ bits 0x7fffffff ffffff80. */
+ lost = lost | (low & 0x7f);
+ if (low & 0x40)
+ {
+ if ((low & 0x80) || lost)
+ {
+ low = (low + 0x40) & 0xffffff80;
+ if (low == 0)
+ high += 1;
+ }
+ }
+ low &= 0xffffff80;
#endif
- }
+ }
- ip = (double) high;
- ip = REAL_VALUE_LDEXP (ip, 32) + (double) low;
- /* Apply shifts and exponent value as power of 2. */
- ip = REAL_VALUE_LDEXP (ip, expon - (nrmcount + frexpon));
+ ip = (double) high;
+ ip = REAL_VALUE_LDEXP (ip, 32) + (double) low;
+ /* Apply shifts and exponent value as power of 2. */
+ ip = REAL_VALUE_LDEXP (ip, expon - (nrmcount + frexpon));
- if (sign < 0)
- ip = -ip;
- return ip;
+ if (sign < 0)
+ ip = -ip;
+ return ip;
}
#endif /* no REAL_ARITHMETIC */
register tree arg1, arg2;
int notrunc, forsize;
{
- HOST_WIDE_INT int1l, int1h, int2l, int2h;
- HOST_WIDE_INT low, hi;
- HOST_WIDE_INT garbagel, garbageh;
+ unsigned HOST_WIDE_INT int1l, int2l;
+ HOST_WIDE_INT int1h, int2h;
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT hi;
+ unsigned HOST_WIDE_INT garbagel;
+ HOST_WIDE_INT garbageh;
register tree t;
int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
int overflow = 0;
break;
case RSHIFT_EXPR:
- int2l = - int2l;
+ int2l = -int2l;
case LSHIFT_EXPR:
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- lshift_double (int1l, int1h, int2l,
- TYPE_PRECISION (TREE_TYPE (arg1)),
- &low, &hi,
- !uns);
+ lshift_double (int1l, int1h, int2l, TYPE_PRECISION (TREE_TYPE (arg1)),
+ &low, &hi, !uns);
no_overflow = 1;
break;
case RROTATE_EXPR:
int2l = - int2l;
case LROTATE_EXPR:
- lrotate_double (int1l, int1h, int2l,
- TYPE_PRECISION (TREE_TYPE (arg1)),
+ lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (TREE_TYPE (arg1)),
&low, &hi);
break;
case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
/* This is a shortcut for a common special case. */
- if (int2h == 0 && int2l > 0
+ if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
&& ! TREE_CONSTANT_OVERFLOW (arg1)
&& ! TREE_CONSTANT_OVERFLOW (arg2)
- && int1h == 0 && int1l >= 0)
+ && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
{
if (code == CEIL_DIV_EXPR)
int1l += int2l - 1;
+
low = int1l / int2l, hi = 0;
break;
}
/* ... fall through ... */
- case ROUND_DIV_EXPR:
+ case ROUND_DIV_EXPR:
if (int2h == 0 && int2l == 1)
{
low = int1l, hi = int1h;
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
/* This is a shortcut for a common special case. */
- if (int2h == 0 && int2l > 0
+ if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
&& ! TREE_CONSTANT_OVERFLOW (arg1)
&& ! TREE_CONSTANT_OVERFLOW (arg2)
- && int1h == 0 && int1l >= 0)
+ && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
{
if (code == CEIL_MOD_EXPR)
int1l += int2l - 1;
/* ... fall through ... */
- case ROUND_MOD_EXPR:
+ case ROUND_MOD_EXPR:
overflow = div_and_round_double (code, uns,
int1l, int1h, int2l, int2h,
&garbagel, &garbageh, &low, &hi);
< (unsigned HOST_WIDE_INT) int2h)
|| (((unsigned HOST_WIDE_INT) int1h
== (unsigned HOST_WIDE_INT) int2h)
- && ((unsigned HOST_WIDE_INT) int1l
- < (unsigned HOST_WIDE_INT) int2l)));
+ && int1l < int2l));
else
- low = ((int1h < int2h)
- || ((int1h == int2h)
- && ((unsigned HOST_WIDE_INT) int1l
- < (unsigned HOST_WIDE_INT) int2l)));
+ low = (int1h < int2h
+ || (int1h == int2h && int1l < int2l));
if (low == (code == MIN_EXPR))
low = int1l, hi = int1h;
abort ();
}
- if (forsize && hi == 0 && low >= 0 && low < 1000)
+ if (forsize && hi == 0 && low < 10000
+ && overflow == 0 && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2))
return size_int_type_wide (low, TREE_TYPE (arg1));
else
{
/* Define input and output argument for const_binop_1. */
struct cb_args
{
- enum tree_code code; /* Input: tree code for operation*/
- tree type; /* Input: tree type for operation. */
- REAL_VALUE_TYPE d1, d2; /* Input: floating point operands. */
- tree t; /* Output: constant for result. */
+ enum tree_code code; /* Input: tree code for operation. */
+ tree type; /* Input: tree type for operation. */
+ REAL_VALUE_TYPE d1, d2; /* Input: floating point operands. */
+ tree t; /* Output: constant for result. */
};
/* Do the real arithmetic for const_binop while protected by a
static void
const_binop_1 (data)
- PTR data;
+ PTR data;
{
struct cb_args *args = (struct cb_args *) data;
REAL_VALUE_TYPE value;
case PLUS_EXPR:
value = args->d1 + args->d2;
break;
-
+
case MINUS_EXPR:
value = args->d1 - args->d2;
break;
-
+
case MULT_EXPR:
value = args->d1 * args->d2;
break;
-
+
case RDIV_EXPR:
#ifndef REAL_INFINITY
if (args->d2 == 0)
abort ();
#endif
-
+
value = args->d1 / args->d2;
break;
-
+
case MIN_EXPR:
value = MIN (args->d1, args->d2);
break;
-
+
case MAX_EXPR:
value = MAX (args->d1, args->d2);
break;
-
+
default:
abort ();
}
register tree arg1, arg2;
int notrunc;
{
- STRIP_NOPS (arg1); STRIP_NOPS (arg2);
+ STRIP_NOPS (arg1);
+ STRIP_NOPS (arg2);
if (TREE_CODE (arg1) == INTEGER_CST)
return int_const_binop (code, arg1, arg2, notrunc, 0);
args.d1 = d1;
args.d2 = d2;
args.code = code;
-
+
if (do_float_handler (const_binop_1, (PTR) &args))
/* Receive output from const_binop_1. */
t = args.t;
tree type;
{
/* Type-size nodes already made for small sizes. */
- static tree size_table[2 * HOST_BITS_PER_WIDE_INT + 1];
+ static tree size_table[2048 + 1];
static int init_p = 0;
tree t;
-
- if (ggc_p && ! init_p)
+
+ if (! init_p)
{
ggc_add_tree_root ((tree *) size_table,
sizeof size_table / sizeof (tree));
/* If this is a positive number that fits in the table we use to hold
cached entries, see if it is already in the table and put it there
if not. */
- if (number >= 0
- && number < (int) (sizeof size_table / sizeof size_table[0]) / 2)
+ if (number >= 0 && number < (int) ARRAY_SIZE (size_table))
{
if (size_table[number] != 0)
for (t = size_table[number]; t != 0; t = TREE_CHAIN (t))
if (TREE_TYPE (t) == type)
return t;
- if (! ggc_p)
- {
- /* Make this a permanent node. */
- push_obstacks_nochange ();
- end_temporary_allocation ();
- }
-
t = build_int_2 (number, 0);
TREE_TYPE (t) = type;
TREE_CHAIN (t) = size_table[number];
size_table[number] = t;
- if (! ggc_p)
- pop_obstacks ();
-
return t;
}
{
tree type = TREE_TYPE (arg0);
- if (type != TREE_TYPE (arg1)
- || TREE_CODE (type) != INTEGER_TYPE || ! TYPE_IS_SIZETYPE (type))
+ if (TREE_CODE (type) != INTEGER_TYPE || ! TYPE_IS_SIZETYPE (type)
+ || type != TREE_TYPE (arg1))
abort ();
/* Handle the special case of two integer constants faster. */
tree type = TREE_TYPE (arg0);
tree ctype;
- if (TREE_TYPE (arg1) != type || TREE_CODE (type) != INTEGER_TYPE
- || ! TYPE_IS_SIZETYPE (type))
+ if (TREE_CODE (type) != INTEGER_TYPE || ! TYPE_IS_SIZETYPE (type)
+ || type != TREE_TYPE (arg1))
abort ();
/* If the type is already signed, just do the simple thing. */
static void
fold_convert_1 (data)
- PTR data;
+ PTR data;
{
- struct fc_args * args = (struct fc_args *) data;
+ struct fc_args *args = (struct fc_args *) data;
args->t = build_real (args->type,
real_value_truncate (TYPE_MODE (args->type),
/* If we are trying to make a sizetype for a small integer, use
size_int to pick up cached types to reduce duplicate nodes. */
- if (TREE_CODE (type) == INTEGER_CST && TYPE_IS_SIZETYPE (type)
- && TREE_INT_CST_HIGH (arg1) == 0
- && TREE_INT_CST_LOW (arg1) >= 0
- && TREE_INT_CST_LOW (arg1) < 1000)
+ if (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
+ && compare_tree_int (arg1, 10000) < 0)
return size_int_type_wide (TREE_INT_CST_LOW (arg1), type);
/* Given an integer constant, make new constant with new type,
if (TREE_CODE (arg1) == REAL_CST)
{
struct fc_args args;
-
+
if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
{
t = arg1;
/* Setup input for fold_convert_1() */
args.arg1 = arg1;
args.type = type;
-
+
if (do_float_handler (fold_convert_1, (PTR) &args))
{
/* Receive output from fold_convert_1() */
case INTEGER_CST:
return (! TREE_CONSTANT_OVERFLOW (arg0)
&& ! TREE_CONSTANT_OVERFLOW (arg1)
- && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
- && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1));
+ && tree_int_cst_equal (arg0, arg1));
case REAL_CST:
return (! TREE_CONSTANT_OVERFLOW (arg0)
if (TREE_CODE (arg0) == RTL_EXPR)
return rtx_equal_p (RTL_EXPR_RTL (arg0), RTL_EXPR_RTL (arg1));
return 0;
-
+
default:
return 0;
}
}
\f
/* Similar to operand_equal_p, but see if ARG0 might have been made by
- shorten_compare from ARG1 when ARG1 was being compared with OTHER.
+ shorten_compare from ARG1 when ARG1 was being compared with OTHER.
When in doubt, return 0. */
-static int
+static int
operand_equal_for_comparison_p (arg0, arg1, other)
tree arg0, arg1;
tree other;
{
int unsignedp1, unsignedpo;
tree primarg0, primarg1, primother;
- unsigned correct_width;
+ unsigned int correct_width;
if (operand_equal_p (arg0, arg1, 0))
return 1;
and see if the inner values are the same. This removes any
signedness comparison, which doesn't matter here. */
primarg0 = arg0, primarg1 = arg1;
- STRIP_NOPS (primarg0); STRIP_NOPS (primarg1);
+ STRIP_NOPS (primarg0);
+ STRIP_NOPS (primarg1);
if (operand_equal_p (primarg0, primarg1, 0))
return 1;
/* Make sure shorter operand is extended the right way
to match the longer operand. */
primarg1 = convert (signed_or_unsigned_type (unsignedp1,
- TREE_TYPE (primarg1)),
- primarg1);
+ TREE_TYPE (primarg1)),
+ primarg1);
if (operand_equal_p (arg0, convert (type, primarg1), 0))
return 1;
&& twoval_comparison_p (TREE_OPERAND (arg, 2),
cval1, cval2, save_p));
return 0;
-
+
case '<':
/* First see if we can handle the first operand, then the second. For
the second operand, we know *CVAL1 can't be zero. It must be that
return pedantic_non_lvalue (t);
}
-
-
\f
/* Return a simplified tree node for the truth-negation of ARG. This
never alters ARG itself. We assume that ARG is an operation that
switch (code)
{
case INTEGER_CST:
- return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
- && TREE_INT_CST_HIGH (arg) == 0, 0));
+ return convert (type, build_int_2 (integer_zerop (arg), 0));
case TRUTH_AND_EXPR:
return build (TRUTH_OR_EXPR, type,
tree compare_type;
tree lhs, rhs;
{
- int lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize;
+ HOST_WIDE_INT lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize;
tree type = TREE_TYPE (lhs);
tree signed_type, unsigned_type;
int const_p = TREE_CODE (rhs) == INTEGER_CST;
enum machine_mode lmode, rmode, nmode;
int lunsignedp, runsignedp;
int lvolatilep = 0, rvolatilep = 0;
- int alignment;
+ unsigned int alignment;
tree linner, rinner = NULL_TREE;
tree mask;
tree offset;
error case below. If we didn't, we might generate wrong code.
For unsigned fields, the constant shifted right by the field length should
- be all zero. For signed fields, the high-order bits should agree with
+ be all zero. For signed fields, the high-order bits should agree with
the sign bit. */
if (lunsignedp)
decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
pvolatilep, pmask, pand_mask)
tree exp;
- int *pbitsize, *pbitpos;
+ HOST_WIDE_INT *pbitsize, *pbitpos;
enum machine_mode *pmode;
int *punsignedp, *pvolatilep;
tree *pmask;
tree and_mask = 0;
tree mask, inner, offset;
tree unsigned_type;
- int precision;
- int alignment;
+ unsigned int precision;
+ unsigned int alignment;
- /* All the optimizations using this function assume integer fields.
+ /* All the optimizations using this function assume integer fields.
There are problems with FP fields since the type_for_size call
below can fail for, e.g., XFmode. */
if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
return 0;
}
-
inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
punsignedp, pvolatilep, &alignment);
if ((inner == exp && and_mask == 0)
|| *pbitsize < 0 || offset != 0
|| TREE_CODE (inner) == PLACEHOLDER_EXPR)
return 0;
-
+
/* Compute the mask to access the bitfield. */
unsigned_type = type_for_size (*pbitsize, 1);
precision = TYPE_PRECISION (unsigned_type);
int size;
{
tree type = TREE_TYPE (mask);
- int precision = TYPE_PRECISION (type);
+ unsigned int precision = TYPE_PRECISION (type);
tree tmask;
tmask = build_int_2 (~0, ~0);
TREE_TYPE (tmask) = signed_type (type);
force_fit_type (tmask, 0);
return
- tree_int_cst_equal (mask,
+ tree_int_cst_equal (mask,
const_binop (RSHIFT_EXPR,
const_binop (LSHIFT_EXPR, tmask,
size_int (precision - size),
/* Subroutine for fold_truthop: determine if an operand is simple enough
to be evaluated unconditionally. */
-static int
+static int
simple_operand_p (exp)
tree exp;
{
exp = TREE_OPERAND (exp, 0);
return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
- || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
+ || (DECL_P (exp)
&& ! TREE_ADDRESSABLE (exp)
&& ! TREE_THIS_VOLATILE (exp)
&& ! DECL_NONLOCAL (exp)
try to change a logical combination of comparisons into a range test.
For example, both
- X == 2 && X == 3 && X == 4 && X == 5
+ X == 2 || X == 3 || X == 4 || X == 5
and
X >= 2 && X <= 5
are converted to
return convert (type, result ? integer_one_node : integer_zero_node);
}
-\f
+\f
/* Given EXP, a logical expression, set the range it is testing into
variables denoted by PIN_P, PLOW, and PHIGH. Return the expression
- actually being tested. *PLOW and *PHIGH will have be made the same type
+ actually being tested. *PLOW and *PHIGH will be made of the same type
as the returned expression. If EXP is not a comparison, we will most
likely not be returning a useful value and range. */
if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
{
arg0 = TREE_OPERAND (exp, 0);
- if (TREE_CODE_CLASS (code) == '<'
+ if (TREE_CODE_CLASS (code) == '<'
|| TREE_CODE_CLASS (code) == '1'
|| TREE_CODE_CLASS (code) == '2')
type = TREE_TYPE (arg0);
- if (TREE_CODE_CLASS (code) == '2'
+ if (TREE_CODE_CLASS (code) == '2'
|| TREE_CODE_CLASS (code) == '<'
- || (TREE_CODE_CLASS (code) == 'e'
- && tree_code_length[(int) code] > 1))
+ || (TREE_CODE_CLASS (code) == 'e'
+ && TREE_CODE_LENGTH (code) > 1))
arg1 = TREE_OPERAND (exp, 1);
}
low = range_binop (PLUS_EXPR, type, n_high, 0,
integer_one_node, 0);
high = range_binop (MINUS_EXPR, type, n_low, 0,
- integer_one_node, 0);
- in_p = ! in_p;
+ integer_one_node, 0);
+
+ /* If the range is of the form +/- [ x+1, x ], we won't
+ be able to normalize it. But then, it represents the
+ whole range or the empty set, so make it
+ +/- [ -, - ]. */
+ if (tree_int_cst_equal (n_low, low)
+ && tree_int_cst_equal (n_high, high))
+ low = high = 0;
+ else
+ in_p = ! in_p;
}
else
low = n_low, high = n_high;
high_positive = fold (build (RSHIFT_EXPR, type,
convert (type, high_positive),
convert (type, integer_one_node)));
-
+
/* If the low bound is specified, "and" the range with the
range for which the original unsigned value will be
positive. */
return 0;
}
\f
-/* Given two ranges, see if we can merge them into one. Return 1 if we
+/* Given two ranges, see if we can merge them into one. Return 1 if we
can, 0 if we can't. Set the output range into the specified parameters. */
static int
/* Make range 0 be the range that starts first, or ends last if they
start at the same value. Swap them if it isn't. */
- if (integer_onep (range_binop (GT_EXPR, integer_type_node,
+ if (integer_onep (range_binop (GT_EXPR, integer_type_node,
low0, 0, low1, 0))
|| (lowequal
&& integer_onep (range_binop (GT_EXPR, integer_type_node,
{
in_p = 1, high = high0;
low = range_binop (PLUS_EXPR, NULL_TREE, high1, 0,
- integer_one_node, 0);
+ integer_one_node, 0);
}
else if (! subset || highequal)
{
short-circuited branch and the underlying object on both sides
is the same, make a non-short-circuit operation. */
else if (BRANCH_COST >= 2
+ && lhs != 0 && rhs != 0
&& (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
|| TREE_CODE (exp) == TRUTH_ORIF_EXPR)
&& operand_equal_p (lhs, rhs, 0))
/* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that
- a subsequent call to integer_zerop will work. Hence we must
+ a subsequent call to integer_zerop will work. Hence we must
do the type conversion here. At this point, the constant is either
zero or one, and the conversion to a signed type can never overflow.
We could get an overflow if this conversion is done anywhere else. */
enum tree_code code;
tree truth_type, lhs, rhs;
{
- /* If this is the "or" of two comparisons, we can do something if we
+ /* If this is the "or" of two comparisons, we can do something if
the comparisons are NE_EXPR. If this is the "and", we can do something
- if the comparisons are EQ_EXPR. I.e.,
+ if the comparisons are EQ_EXPR. I.e.,
(a->b == 2 && a->c == 4) can become (a->new == NEW).
WANTED_CODE is this operation code. For single bit fields, we can
enum tree_code lcode, rcode;
tree ll_arg, lr_arg, rl_arg, rr_arg;
tree ll_inner, lr_inner, rl_inner, rr_inner;
- int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
- int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
- int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
- int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
+ HOST_WIDE_INT ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
+ HOST_WIDE_INT rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
+ HOST_WIDE_INT xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
+ HOST_WIDE_INT lnbitsize, lnbitpos, rnbitsize, rnbitpos;
int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
enum machine_mode lnmode, rnmode;
lr_arg = TREE_OPERAND (lhs, 1);
rl_arg = TREE_OPERAND (rhs, 0);
rr_arg = TREE_OPERAND (rhs, 1);
-
+
/* If the RHS can be evaluated unconditionally and its operands are
simple, it wins to evaluate the RHS unconditionally on machines
with expensive branches. In this case, this isn't a comparison
if (l_const)
{
l_const = convert (lntype, l_const);
- l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
+ l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
fold (build1 (BIT_NOT_EXPR,
0)))
{
warning ("comparison is always %d", wanted_code == NE_EXPR);
-
+
return convert (truth_type,
wanted_code == NE_EXPR
? integer_one_node : integer_zero_node);
field containing them both.
Note that we still must mask the lhs/rhs expressions. Furthermore,
- the mask must be shifted to account for the shift done by
+ the mask must be shifted to account for the shift done by
make_bit_field_ref. */
if ((ll_bitsize + ll_bitpos == rl_bitpos
&& lr_bitsize + lr_bitpos == rr_bitpos)
const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
}
\f
-/* Optimize T, which is a comparison of a MIN_EXPR or MAX_EXPR with a
+/* Optimize T, which is a comparison of a MIN_EXPR or MAX_EXPR with a
constant. */
static tree
should be used for the computation if wider than our type.
For example, if we are dividing (X * 8) + (Y + 16) by 4, we can return
- (X * 2) + (Y + 4). We also canonicalize (X + 7) * 4 into X * 4 + 28
- in the hope that either the machine has a multiply-accumulate insn
- or that this is part of an addressing calculation.
+ (X * 2) + (Y + 4). We must, however, be assured that either the original
+ expression would not overflow or that overflow is undefined for the type
+ in the language in question.
+
+ We also canonicalize (X + 7) * 4 into X * 4 + 28 in the hope that either
+ the machine has a multiply-accumulate insn or that this is part of an
+ addressing calculation.
If we return a non-null expression, it is an equivalent form of the
original computation, but need not be in the original type. */
{
tree type = TREE_TYPE (t);
enum tree_code tcode = TREE_CODE (t);
- tree ctype = (wide_type != 0 && (GET_MODE_SIZE (TYPE_MODE (wide_type))
+ tree ctype = (wide_type != 0 && (GET_MODE_SIZE (TYPE_MODE (wide_type))
> GET_MODE_SIZE (TYPE_MODE (type)))
? wide_type : type);
tree t1, t2;
/* Don't deal with constants of zero here; they confuse the code below. */
if (integer_zerop (c))
- return 0;
+ return NULL_TREE;
if (TREE_CODE_CLASS (tcode) == '1')
op0 = TREE_OPERAND (t, 0);
break;
case CONVERT_EXPR: case NON_LVALUE_EXPR: case NOP_EXPR:
+ /* If op0 is an expression, and is unsigned, and the type is
+ smaller than ctype, then we cannot widen the expression. */
+ if ((TREE_CODE_CLASS (TREE_CODE (op0)) == '<'
+ || TREE_CODE_CLASS (TREE_CODE (op0)) == '1'
+ || TREE_CODE_CLASS (TREE_CODE (op0)) == '2'
+ || TREE_CODE_CLASS (TREE_CODE (op0)) == 'e')
+ && TREE_UNSIGNED (TREE_TYPE (op0))
+ && ! (TREE_CODE (TREE_TYPE (op0)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (op0)))
+ && (GET_MODE_SIZE (TYPE_MODE (ctype))
+ > GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op0)))))
+ break;
/* Pass the constant down and see if we can make a simplification. If
we can, replace this expression with the inner simplification for
break;
case MIN_EXPR: case MAX_EXPR:
+ /* If widening the type changes the signedness, then we can't perform
+ this optimization as that changes the result. */
+ if (TREE_UNSIGNED (ctype) != TREE_UNSIGNED (type))
+ break;
+
/* MIN (a, b) / 5 -> MIN (a / 5, b / 5) */
if ((t1 = extract_muldiv (op0, c, code, wide_type)) != 0
&& (t2 = extract_muldiv (op1, c, code, wide_type)) != 0)
or floor division, by a power of two, so we can treat it that
way unless the multiplier or divisor overflows. */
if (TREE_CODE (op1) == INTEGER_CST
+ /* const_binop may not detect overflow correctly,
+ so check for it explicitly here. */
+ && TYPE_PRECISION (TREE_TYPE (size_one_node)) > TREE_INT_CST_LOW (op1)
+ && TREE_INT_CST_HIGH (op1) == 0
&& 0 != (t1 = convert (ctype,
const_binop (LSHIFT_EXPR, size_one_node,
op1, 0)))
break;
}
- /* Now do the operation and verify it doesn't overflow. */
- op1 = const_binop (code, convert (ctype, op1), convert (ctype, c), 0);
- if (op1 == 0 || TREE_OVERFLOW (op1))
+ /* If it's a multiply or a division/modulus operation of a multiple
+ of our constant, do the operation and verify it doesn't overflow. */
+ if (code == MULT_EXPR
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
+ {
+ op1 = const_binop (code, convert (ctype, op1), convert (ctype, c), 0);
+ if (op1 == 0 || TREE_OVERFLOW (op1))
+ break;
+ }
+ else
+ break;
+
+ /* If we have an unsigned type is not a sizetype, we cannot widen
+ the operation since it will change the result if the original
+ computation overflowed. */
+ if (TREE_UNSIGNED (ctype)
+ && ! (TREE_CODE (ctype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (ctype))
+ && ctype != type)
break;
/* If we were able to eliminate our operation from the first side,
/* If these operations "cancel" each other, we have the main
optimizations of this pass, which occur when either constant is a
multiple of the other, in which case we replace this with either an
- operation or CODE or TCODE. If we have an unsigned type that is
- not a sizetype, we canot do this for division since it will change
- the result if the original computation overflowed. */
- if ((code == MULT_EXPR && tcode == EXACT_DIV_EXPR
- && (! TREE_UNSIGNED (ctype)
- || (TREE_CODE (ctype) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (ctype))))
- || (tcode == MULT_EXPR
- && code != TRUNC_MOD_EXPR && code != CEIL_MOD_EXPR
- && code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR))
+ operation or CODE or TCODE.
+
+ If we have an unsigned type that is not a sizetype, we canot do
+ this since it will change the result if the original computation
+ overflowed. */
+ if ((! TREE_UNSIGNED (ctype)
+ || (TREE_CODE (ctype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (ctype)))
+ && ((code == MULT_EXPR && tcode == EXACT_DIV_EXPR)
+ || (tcode == MULT_EXPR
+ && code != TRUNC_MOD_EXPR && code != CEIL_MOD_EXPR
+ && code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR)))
{
if (integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
return fold (build (tcode, ctype, convert (ctype, op0),
return value ? integer_one_node : integer_zero_node;
else if (TREE_CODE (type) == BOOLEAN_TYPE)
return truthvalue_conversion (value ? integer_one_node :
- integer_zero_node);
- else
+ integer_zero_node);
+ else
{
tree t = build_int_2 (value, 0);
but we can constant-fold them if they have constant operands. */
tree
-fold (expr)
+fold (expr)
tree expr;
{
register tree t = expr;
if all operands are constant. */
int wins = 1;
- /* Don't try to process an RTL_EXPR since its operands aren't trees.
+ /* Don't try to process an RTL_EXPR since its operands aren't trees.
Likewise for a SAVE_EXPR that's already been evaluated. */
if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t)) != 0)
return t;
return DECL_INITIAL (t);
return t;
}
-
+
#ifdef MAX_INTEGER_COMPUTATION_MODE
check_max_integer_computation_mode (expr);
#endif
do arithmetic on them. */
wins = 0;
}
- else if (kind == 'e' || kind == '<'
- || kind == '1' || kind == '2' || kind == 'r')
+ else if (IS_EXPR_CODE_CLASS (kind) || kind == 'r')
{
- register int len = tree_code_length[(int) code];
+ register int len = TREE_CODE_LENGTH (code);
register int i;
for (i = 0; i < len; i++)
{
STRIP_SIGN_NOPS (op);
}
else
- {
- /* Strip any conversions that don't change the mode. */
- STRIP_NOPS (op);
- }
-
+ /* Strip any conversions that don't change the mode. */
+ STRIP_NOPS (op);
+
if (TREE_CODE (op) == COMPLEX_CST)
subop = TREE_REALPART (op);
else
The also optimizes non-constant cases that used to be done in
expand_expr.
- Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
+ Before we do that, see if this is a BIT_AND_EXPR or a BIT_IOR_EXPR,
one of the operands is a comparison and the other is a comparison, a
BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
code below would make the expression more complex. Change it to a
- TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
+ TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
return t;
}
- else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
+ else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
return fold (build (COND_EXPR, type, arg0,
fold (build1 (code, type, integer_one_node)),
fold (build1 (code, type, integer_zero_node))));
&& TREE_CODE (arg1) == COMPOUND_EXPR)
return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
-
+
switch (code)
{
case INTEGER_CST:
int inside_int = INTEGRAL_TYPE_P (inside_type);
int inside_ptr = POINTER_TYPE_P (inside_type);
int inside_float = FLOAT_TYPE_P (inside_type);
- int inside_prec = TYPE_PRECISION (inside_type);
+ unsigned int inside_prec = TYPE_PRECISION (inside_type);
int inside_unsignedp = TREE_UNSIGNED (inside_type);
int inter_int = INTEGRAL_TYPE_P (inter_type);
int inter_ptr = POINTER_TYPE_P (inter_type);
int inter_float = FLOAT_TYPE_P (inter_type);
- int inter_prec = TYPE_PRECISION (inter_type);
+ unsigned int inter_prec = TYPE_PRECISION (inter_type);
int inter_unsignedp = TREE_UNSIGNED (inter_type);
int final_int = INTEGRAL_TYPE_P (final_type);
int final_ptr = POINTER_TYPE_P (final_type);
int final_float = FLOAT_TYPE_P (final_type);
- int final_prec = TYPE_PRECISION (final_type);
+ unsigned int final_prec = TYPE_PRECISION (final_type);
int final_unsignedp = TREE_UNSIGNED (final_type);
- /* In addition to the cases of two conversions in a row
+ /* In addition to the cases of two conversions in a row
handled below, if we are converting something to its own
type via an object of identical or wider precision, neither
conversion is needed. */
- if (inside_type == final_type
+ if (TYPE_MAIN_VARIANT (inside_type) == TYPE_MAIN_VARIANT (final_type)
&& ((inter_int && final_int) || (inter_float && final_float))
&& inter_prec >= final_prec)
- return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+ return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
/* Likewise, if the intermediate and final types are either both
float or both integer, we don't need the middle conversion if
and the outermost type is wider than the intermediate, or
- the initial type is a pointer type and the precisions of the
intermediate and final types differ, or
- - the final type is a pointer type and the precisions of the
+ - the final type is a pointer type and the precisions of the
initial and intermediate types differ. */
if (! inside_float && ! inter_float && ! final_float
&& (inter_prec > inside_prec || inter_prec > final_prec)
/* Fold an expression like: "foo"[2] */
if (TREE_CODE (arg0) == STRING_CST
&& TREE_CODE (arg1) == INTEGER_CST
- && !TREE_INT_CST_HIGH (arg1)
- && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
+ && compare_tree_int (arg1, TREE_STRING_LENGTH (arg0)) < 0)
{
- t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
+ t = build_int_2 (TREE_STRING_POINTER (arg0)[TREE_INT_CST_LOW (arg))], 0);
TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
force_fit_type (t, 0);
}
{
if (TREE_CODE (arg0) == INTEGER_CST)
{
- HOST_WIDE_INT low, high;
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT high;
int overflow = neg_double (TREE_INT_CST_LOW (arg0),
TREE_INT_CST_HIGH (arg0),
&low, &high);
if (! TREE_UNSIGNED (type)
&& TREE_INT_CST_HIGH (arg0) < 0)
{
- HOST_WIDE_INT low, high;
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT high;
int overflow = neg_double (TREE_INT_CST_LOW (arg0),
TREE_INT_CST_HIGH (arg0),
&low, &high);
case CONJ_EXPR:
if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
- return arg0;
+ return convert (type, arg0);
else if (TREE_CODE (arg0) == COMPLEX_EXPR)
- return build (COMPLEX_EXPR, TREE_TYPE (arg0),
+ return build (COMPLEX_EXPR, type,
TREE_OPERAND (arg0, 0),
negate_expr (TREE_OPERAND (arg0, 1)));
else if (TREE_CODE (arg0) == COMPLEX_CST)
}
/* Reassociate (plus (plus (mult) (foo)) (mult)) as
- (plus (plus (mult) (mult)) (foo)) so that we can
+ (plus (plus (mult) (mult)) (foo)) so that we can
take advantage of the factoring cases below. */
if ((TREE_CODE (arg0) == PLUS_EXPR
&& TREE_CODE (arg1) == MULT_EXPR)
|| (TREE_CODE (arg1) == PLUS_EXPR
- && TREE_CODE (arg0) == MULT_EXPR))
+ && TREE_CODE (arg0) == MULT_EXPR))
{
tree parg0, parg1, parg, marg;
}
if (same)
- return fold (build (MULT_EXPR, type,
+ return fold (build (MULT_EXPR, type,
fold (build (PLUS_EXPR, type, alt0, alt1)),
same));
}
/* (A << B) + (A >> (Z - B)) if A is unsigned and Z is the size of A
is a rotate of A by B bits. */
{
- register enum tree_code code0, code1;
- code0 = TREE_CODE (arg0);
- code1 = TREE_CODE (arg1);
- if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
- || (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR))
+ register enum tree_code code0, code1;
+ code0 = TREE_CODE (arg0);
+ code1 = TREE_CODE (arg1);
+ if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
+ || (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR))
&& operand_equal_p (TREE_OPERAND (arg0, 0),
- TREE_OPERAND (arg1,0), 0)
+ TREE_OPERAND (arg1, 0), 0)
&& TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
{
register tree tree01, tree11;
code01 = TREE_CODE (tree01);
code11 = TREE_CODE (tree11);
if (code01 == INTEGER_CST
- && code11 == INTEGER_CST
- && TREE_INT_CST_HIGH (tree01) == 0
- && TREE_INT_CST_HIGH (tree11) == 0
- && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
- == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
+ && code11 == INTEGER_CST
+ && TREE_INT_CST_HIGH (tree01) == 0
+ && TREE_INT_CST_HIGH (tree11) == 0
+ && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
- code0 == LSHIFT_EXPR ? tree01 : tree11);
+ code0 == LSHIFT_EXPR ? tree01 : tree11);
else if (code11 == MINUS_EXPR)
{
- tree tree110, tree111;
- tree110 = TREE_OPERAND (tree11, 0);
- tree111 = TREE_OPERAND (tree11, 1);
- STRIP_NOPS (tree110);
- STRIP_NOPS (tree111);
- if (TREE_CODE (tree110) == INTEGER_CST
- && TREE_INT_CST_HIGH (tree110) == 0
- && (TREE_INT_CST_LOW (tree110)
- == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))))
+ tree tree110, tree111;
+ tree110 = TREE_OPERAND (tree11, 0);
+ tree111 = TREE_OPERAND (tree11, 1);
+ STRIP_NOPS (tree110);
+ STRIP_NOPS (tree111);
+ if (TREE_CODE (tree110) == INTEGER_CST
+ && 0 == compare_tree_int (tree110,
+ TYPE_PRECISION
+ (TREE_TYPE (TREE_OPERAND
+ (arg0, 0))))
&& operand_equal_p (tree01, tree111, 0))
- return build ((code0 == LSHIFT_EXPR
- ? LROTATE_EXPR
- : RROTATE_EXPR),
- type, TREE_OPERAND (arg0, 0), tree01);
+ return build ((code0 == LSHIFT_EXPR
+ ? LROTATE_EXPR
+ : RROTATE_EXPR),
+ type, TREE_OPERAND (arg0, 0), tree01);
}
else if (code01 == MINUS_EXPR)
{
- tree tree010, tree011;
- tree010 = TREE_OPERAND (tree01, 0);
- tree011 = TREE_OPERAND (tree01, 1);
- STRIP_NOPS (tree010);
- STRIP_NOPS (tree011);
- if (TREE_CODE (tree010) == INTEGER_CST
- && TREE_INT_CST_HIGH (tree010) == 0
- && (TREE_INT_CST_LOW (tree010)
- == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))))
+ tree tree010, tree011;
+ tree010 = TREE_OPERAND (tree01, 0);
+ tree011 = TREE_OPERAND (tree01, 1);
+ STRIP_NOPS (tree010);
+ STRIP_NOPS (tree011);
+ if (TREE_CODE (tree010) == INTEGER_CST
+ && 0 == compare_tree_int (tree010,
+ TYPE_PRECISION
+ (TREE_TYPE (TREE_OPERAND
+ (arg0, 0))))
&& operand_equal_p (tree11, tree011, 0))
- return build ((code0 != LSHIFT_EXPR
- ? LROTATE_EXPR
- : RROTATE_EXPR),
- type, TREE_OPERAND (arg0, 0), tree11);
+ return build ((code0 != LSHIFT_EXPR
+ ? LROTATE_EXPR
+ : RROTATE_EXPR),
+ type, TREE_OPERAND (arg0, 0), tree11);
}
}
}
-
associate:
/* In most languages, can't associate operations on floats through
parentheses. Rather than remember where the parentheses were, we
/* (-A) - CST -> (-CST) - A for floating point (what about ints ?) */
if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == REAL_CST)
return
- fold (build (MINUS_EXPR, type,
+ fold (build (MINUS_EXPR, type,
build_real (TREE_TYPE (arg1),
REAL_VALUE_NEGATE (TREE_REAL_CST (arg1))),
TREE_OPERAND (arg0, 0)));
if (! FLOAT_TYPE_P (type))
{
if (! wins && integer_zerop (arg0))
- return negate_expr (arg1);
+ return negate_expr (convert (type, arg1));
if (integer_zerop (arg1))
return non_lvalue (convert (type, arg0));
{
/* Except with IEEE floating point, 0-x equals -x. */
if (! wins && real_zerop (arg0))
- return negate_expr (arg1);
+ return negate_expr (convert (type, arg1));
/* Except with IEEE floating point, x-0 equals x. */
if (real_zerop (arg1))
return non_lvalue (convert (type, arg0));
}
- /* Fold &x - &x. This can happen from &x.foo - &x.
+ /* Fold &x - &x. This can happen from &x.foo - &x.
This is unsafe for certain floats even in non-IEEE formats.
In IEEE, it is unsafe because it does wrong for NaNs.
Also note that operand_equal_p is always false if an operand
/* (-A) * (-B) -> A * B */
if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == NEGATE_EXPR)
return fold (build (MULT_EXPR, type, TREE_OPERAND (arg0, 0),
- TREE_OPERAND (arg1, 0)));
+ TREE_OPERAND (arg1, 0)));
if (! FLOAT_TYPE_P (type))
{
/* Convert (or (not arg0) (not arg1)) to (not (and (arg0) (arg1))).
- This results in more efficient code for machines without a NAND
+ This results in more efficient code for machines without a NAND
instruction. Combine will canonicalize to the first form
which will allow use of NAND instructions provided by the
backend if they exist. */
&& integer_zerop (const_binop (BIT_AND_EXPR,
TREE_OPERAND (arg0, 1),
TREE_OPERAND (arg1, 1), 0)))
- {
- code = BIT_IOR_EXPR;
- goto bit_ior;
- }
+ {
+ code = BIT_IOR_EXPR;
+ goto bit_ior;
+ }
/* See if this can be simplified into a rotate first. If that
is unsuccessful continue in the association code. */
if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
&& TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
{
- int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
+ unsigned int prec
+ = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
+
if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
&& (~TREE_INT_CST_LOW (arg0)
& (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
&& TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
{
- int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
+ unsigned int prec
+ = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
+
if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
&& (~TREE_INT_CST_LOW (arg1)
& (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
/* Convert (and (not arg0) (not arg1)) to (not (or (arg0) (arg1))).
- This results in more efficient code for machines without a NOR
+ This results in more efficient code for machines without a NOR
instruction. Combine will canonicalize to the first form
which will allow use of NOR instructions provided by the
backend if they exist. */
REAL_VALUE_TYPE r;
r = TREE_REAL_CST (arg1);
if (exact_real_inverse (TYPE_MODE(TREE_TYPE(arg0)), &r))
- {
- tem = build_real (type, r);
- return fold (build (MULT_EXPR, type, arg0, tem));
- }
+ {
+ tem = build_real (type, r);
+ return fold (build (MULT_EXPR, type, arg0, tem));
+ }
}
}
goto binary;
&& multiple_of_p (type, arg0, arg1))
return fold (build (EXACT_DIV_EXPR, type, arg0, arg1));
- if (TREE_CODE (arg1) == INTEGER_CST
+ if (TREE_CODE (arg1) == INTEGER_CST
&& 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), arg1,
code, NULL_TREE)))
return convert (type, tem);
&& TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
&& ((TREE_INT_CST_LOW (arg1)
+ TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
- == GET_MODE_BITSIZE (TYPE_MODE (type))))
+ == (unsigned int) GET_MODE_BITSIZE (TYPE_MODE (type))))
return TREE_OPERAND (arg0, 0);
goto binary;
case MIN_EXPR:
if (operand_equal_p (arg0, arg1, 0))
- return arg0;
+ return omit_one_operand (type, arg0, arg1);
if (INTEGRAL_TYPE_P (type)
&& operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
return omit_one_operand (type, arg1, arg0);
case MAX_EXPR:
if (operand_equal_p (arg0, arg1, 0))
- return arg0;
+ return omit_one_operand (type, arg0, arg1);
if (INTEGRAL_TYPE_P (type)
&& TYPE_MAX_VALUE (type)
&& operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
("true" is a fixed value perhaps depending on the language.) */
/* If first arg is constant zero, return it. */
if (integer_zerop (arg0))
- return arg0;
+ return convert (type, arg0);
case TRUTH_AND_EXPR:
/* If either arg is constant true, drop it. */
if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
- return non_lvalue (arg1);
- if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
- return non_lvalue (arg0);
+ return non_lvalue (convert (type, arg1));
+ if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1)
+ /* Preserve sequence points. */
+ && (code != TRUTH_ANDIF_EXPR || ! TREE_SIDE_EFFECTS (arg0)))
+ return non_lvalue (convert (type, arg0));
/* If second arg is constant zero, result is zero, but first arg
must be evaluated. */
if (integer_zerop (arg1))
("true" is a fixed value perhaps depending on the language.) */
/* If first arg is constant true, return it. */
if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
- return arg0;
+ return convert (type, arg0);
case TRUTH_OR_EXPR:
/* If either arg is constant zero, drop it. */
if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
- return non_lvalue (arg1);
- if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
- return non_lvalue (arg0);
+ return non_lvalue (convert (type, arg1));
+ if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1)
+ /* Preserve sequence points. */
+ && (code != TRUTH_ORIF_EXPR || ! TREE_SIDE_EFFECTS (arg0)))
+ return non_lvalue (convert (type, arg0));
/* If second arg is constant true, result is true, but we must
evaluate first arg. */
if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
case TRUTH_XOR_EXPR:
/* If either arg is constant zero, drop it. */
if (integer_zerop (arg0))
- return non_lvalue (arg1);
+ return non_lvalue (convert (type, arg1));
if (integer_zerop (arg1))
- return non_lvalue (arg0);
+ return non_lvalue (convert (type, arg0));
/* If either arg is constant true, this is a logical inversion. */
if (integer_onep (arg0))
- return non_lvalue (invert_truthvalue (arg1));
+ return non_lvalue (convert (type, invert_truthvalue (arg1)));
if (integer_onep (arg1))
- return non_lvalue (invert_truthvalue (arg0));
+ return non_lvalue (convert (type, invert_truthvalue (arg0)));
return t;
case EQ_EXPR:
if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == REAL_CST)
return
fold (build
- (swap_tree_comparison (code), type,
- TREE_OPERAND (arg0, 0),
- build_real (TREE_TYPE (arg1),
- REAL_VALUE_NEGATE (TREE_REAL_CST (arg1)))));
+ (swap_tree_comparison (code), type,
+ TREE_OPERAND (arg0, 0),
+ build_real (TREE_TYPE (arg1),
+ REAL_VALUE_NEGATE (TREE_REAL_CST (arg1)))));
/* IEEE doesn't distinguish +0 and -0 in comparisons. */
/* a CMP (-0) -> a CMP 0 */
if (TREE_CODE (arg1) == REAL_CST
build_real (TREE_TYPE (arg1), dconst0)));
}
-
/* If one arg is a constant integer, put it last. */
if (TREE_CODE (arg0) == INTEGER_CST
&& TREE_CODE (arg1) != INTEGER_CST)
tree newconst
= fold (build (PLUS_EXPR, TREE_TYPE (varop),
constop, TREE_OPERAND (varop, 1)));
- TREE_SET_CODE (varop, PREINCREMENT_EXPR);
+
+ /* Do not overwrite the current varop to be a preincrement,
+ create a new node so that we won't confuse our caller who
+ might create trees and throw them away, reusing the
+ arguments that they passed to build. This shows up in
+ the THEN or ELSE parts of ?: being postincrements. */
+ varop = build (PREINCREMENT_EXPR, TREE_TYPE (varop),
+ TREE_OPERAND (varop, 0),
+ TREE_OPERAND (varop, 1));
/* If VAROP is a reference to a bitfield, we must mask
the constant by the width of the field. */
(TREE_OPERAND
(TREE_OPERAND (varop, 0), 1)));
tree mask, unsigned_type;
- int precision;
+ unsigned int precision;
tree folded_compare;
/* First check whether the comparison would come out
convert (TREE_TYPE (varop),
mask)));
}
-
- t = build (code, type, TREE_OPERAND (t, 0),
- TREE_OPERAND (t, 1));
- TREE_OPERAND (t, constopnum) = newconst;
+ t = build (code, type,
+ (constopnum == 0) ? newconst : varop,
+ (constopnum == 1) ? newconst : varop);
return t;
}
}
tree newconst
= fold (build (MINUS_EXPR, TREE_TYPE (varop),
constop, TREE_OPERAND (varop, 1)));
- TREE_SET_CODE (varop, PREDECREMENT_EXPR);
+
+ /* Do not overwrite the current varop to be a predecrement,
+ create a new node so that we won't confuse our caller who
+ might create trees and throw them away, reusing the
+ arguments that they passed to build. This shows up in
+ the THEN or ELSE parts of ?: being postdecrements. */
+ varop = build (PREDECREMENT_EXPR, TREE_TYPE (varop),
+ TREE_OPERAND (varop, 0),
+ TREE_OPERAND (varop, 1));
if (TREE_CODE (TREE_OPERAND (varop, 0)) == COMPONENT_REF
&& DECL_BIT_FIELD(TREE_OPERAND
(TREE_OPERAND
(TREE_OPERAND (varop, 0), 1)));
tree mask, unsigned_type;
- int precision;
+ unsigned int precision;
tree folded_compare;
if (constopnum == 0)
convert (TREE_TYPE (varop),
mask)));
}
-
- t = build (code, type, TREE_OPERAND (t, 0),
- TREE_OPERAND (t, 1));
- TREE_OPERAND (t, constopnum) = newconst;
+ t = build (code, type,
+ (constopnum == 0) ? newconst : varop,
+ (constopnum == 1) ? newconst : varop);
return t;
}
}
|| (TREE_CODE (t1) == INTEGER_CST
&& int_fits_type_p (t1, TREE_TYPE (tem)))))
return fold (build (code, type, tem, convert (TREE_TYPE (tem), t1)));
-
+
/* If this is comparing a constant with a MIN_EXPR or a MAX_EXPR of a
constant, we can simplify it. */
else if (TREE_CODE (arg1) == INTEGER_CST
build (GE_EXPR, type, TREE_OPERAND (arg0, 0), tem),
build (LE_EXPR, type,
TREE_OPERAND (arg0, 0), arg1)));
-
+
/* If this is an EQ or NE comparison with zero and ARG0 is
(1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
two operations, but the latter can be done in one less insn
&& TREE_UNSIGNED (TREE_TYPE (arg0))
&& TREE_CODE (arg1) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg1, 0)))
- return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
TREE_OPERAND (arg1, 1)),
convert (TREE_TYPE (arg0), integer_zero_node));
{
if (TREE_INT_CST_HIGH (arg1) == 0
&& (TREE_INT_CST_LOW (arg1)
- == ((HOST_WIDE_INT) 1 << (width - 1)) - 1)
+ == ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1)
&& ! TREE_UNSIGNED (TREE_TYPE (arg1)))
switch (TREE_CODE (t))
{
else if (TREE_INT_CST_HIGH (arg1) == -1
&& (- TREE_INT_CST_LOW (arg1)
- == ((HOST_WIDE_INT) 1 << (width - 1)))
+ == ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
&& ! TREE_UNSIGNED (TREE_TYPE (arg1)))
switch (TREE_CODE (t))
{
else if (TREE_INT_CST_HIGH (arg1) == 0
&& (TREE_INT_CST_LOW (arg1)
- == ((HOST_WIDE_INT) 1 << (width - 1)) - 1)
+ == ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1)
&& TREE_UNSIGNED (TREE_TYPE (arg1)))
-
+
switch (TREE_CODE (t))
{
case LE_EXPR:
if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
{
if (code == EQ_EXPR)
- t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
- == TREE_INT_CST_LOW (arg1))
- && (TREE_INT_CST_HIGH (arg0)
- == TREE_INT_CST_HIGH (arg1)),
- 0);
+ t1 = build_int_2 (tree_int_cst_equal (arg0, arg1), 0);
else
t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
? INT_CST_LT_UNSIGNED (arg0, arg1)
switch (comp_code)
{
case EQ_EXPR:
- return pedantic_non_lvalue (negate_expr (arg1));
+ return
+ pedantic_non_lvalue
+ (convert (type,
+ negate_expr
+ (convert (TREE_TYPE (TREE_OPERAND (t, 1)),
+ arg1))));
+
case NE_EXPR:
return pedantic_non_lvalue (convert (type, arg1));
case GE_EXPR:
tree comp_op1 = TREE_OPERAND (arg0, 1);
tree comp_type = TREE_TYPE (comp_op0);
+ /* Avoid adding NOP_EXPRs in case this is an lvalue. */
+ if (TYPE_MAIN_VARIANT (comp_type) == TYPE_MAIN_VARIANT (type))
+ comp_type = type;
+
switch (comp_code)
{
case EQ_EXPR:
case LT_EXPR:
/* In C++ a ?: expression can be an lvalue, so put the
operand which will be used if they are equal first
- so that we can convert this back to the
+ so that we can convert this back to the
corresponding COND_EXPR. */
return pedantic_non_lvalue
- (convert (type, (fold (build (MIN_EXPR, comp_type,
- (comp_code == LE_EXPR
- ? comp_op0 : comp_op1),
- (comp_code == LE_EXPR
- ? comp_op1 : comp_op0))))));
+ (convert (type, fold (build (MIN_EXPR, comp_type,
+ (comp_code == LE_EXPR
+ ? comp_op0 : comp_op1),
+ (comp_code == LE_EXPR
+ ? comp_op1 : comp_op0)))));
break;
case GE_EXPR:
case GT_EXPR:
/* If the second operand is simpler than the third, swap them
since that produces better jump optimization results. */
- if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
+ if ((TREE_CONSTANT (arg1) || DECL_P (arg1)
|| TREE_CODE (arg1) == SAVE_EXPR)
&& ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
- || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
+ || DECL_P (TREE_OPERAND (t, 2))
|| TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
{
/* See if this can be inverted. If it can't, possibly because
if (integer_onep (TREE_OPERAND (t, 1))
&& integer_zerop (TREE_OPERAND (t, 2))
/* If we try to convert TREE_OPERAND (t, 0) to our type, the
- call to fold will try to move the conversion inside
+ call to fold will try to move the conversion inside
a COND, which will recurse. In that case, the COND_EXPR
is probably the best choice, so leave it alone. */
&& type == TREE_TYPE (arg0))
return t;
/* Don't let (0, 0) be null pointer constant. */
if (integer_zerop (arg1))
- return build1 (NOP_EXPR, TREE_TYPE (arg1), arg1);
- return arg1;
+ return build1 (NOP_EXPR, type, arg1);
+ return convert (type, arg1);
case COMPLEX_EXPR:
if (wins)
tree arg01;
if (kind0 == '1' || code0 == TRUTH_NOT_EXPR)
- return fold (build1 (code0, type,
+ return fold (build1 (code0, type,
fold (build1 (CLEANUP_POINT_EXPR,
TREE_TYPE (arg00), arg00))));
return t;
}
+ case CALL_EXPR:
+ /* Check for a built-in function. */
+ if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (expr, 0), 0))
+ == FUNCTION_DECL)
+ && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
+ {
+ tree tmp = fold_builtin (expr);
+ if (tmp)
+ return tmp;
+ }
+ return t;
+
default:
return t;
} /* switch (code) */
return 0;
}
}
+
+/* Return true if `t' is known to be non-negative. */
+
+int
+tree_expr_nonnegative_p (t)
+ tree t;
+{
+ switch (TREE_CODE (t))
+ {
+ case INTEGER_CST:
+ return tree_int_cst_sgn (t) >= 0;
+ case COND_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 1))
+ && tree_expr_nonnegative_p (TREE_OPERAND (t, 2));
+ case BIND_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case RTL_EXPR:
+ return rtl_expr_nonnegative_p (RTL_EXPR_RTL (t));
+
+ default:
+ if (truth_value_p (TREE_CODE (t)))
+ /* Truth values evaluate to 0 or 1, which is nonnegative. */
+ return 1;
+ else
+ /* We don't know sign of `t', so be conservative and return false. */
+ return 0;
+ }
+}
+
+/* Return true if `r' is known to be non-negative.
+ Only handles constants at the moment. */
+
+int
+rtl_expr_nonnegative_p (r)
+ rtx r;
+{
+ switch (GET_CODE (r))
+ {
+ case CONST_INT:
+ return INTVAL (r) >= 0;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (r) == VOIDmode)
+ return CONST_DOUBLE_HIGH (r) >= 0;
+ return 0;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ /* These are always nonnegative. */
+ return 1;
+
+ default:
+ return 0;
+ }
+}