1 /* Copyright (C) 2007 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free
7 Software Foundation; either version 2, or (at your option) any later
10 In addition to the permissions in the GNU General Public License, the
11 Free Software Foundation gives you unlimited permission to link the
12 compiled version of this file into combinations with other programs,
13 and to distribute those combinations without any restriction coming
14 from the use of this file. (The General Public License restrictions
15 do apply in other respects; for example, they cover modification of
16 the file, and distribution when not linked into a combine
19 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
20 WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 You should have received a copy of the GNU General Public License
25 along with GCC; see the file COPYING. If not, write to the Free
26 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
29 #include "bid_internal.h"
31 /*****************************************************************************
32 * BID128_to_int32_rnint
33 ****************************************************************************/
35 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_rnint, x)
40 int exp; // unbiased exponent
41 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
44 unsigned int x_nr_bits;
47 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
52 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
53 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
54 C1.w[1] = x.w[1] & MASK_COEFF;
57 // check for NaN or Infinity
58 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
60 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
61 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
63 *pfpsf |= INVALID_EXCEPTION;
64 // return Integer Indefinite
68 *pfpsf |= INVALID_EXCEPTION;
69 // return Integer Indefinite
73 } else { // x is not a NaN, so it must be infinity
74 if (!x_sign) { // x is +inf
76 *pfpsf |= INVALID_EXCEPTION;
77 // return Integer Indefinite
81 *pfpsf |= INVALID_EXCEPTION;
82 // return Integer Indefinite
88 // check for non-canonical values (after the check for special values)
89 if ((C1.w[1] > 0x0001ed09bead87c0ull)
90 || (C1.w[1] == 0x0001ed09bead87c0ull
91 && (C1.w[0] > 0x378d8e63ffffffffull))
92 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
95 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
99 } else { // x is not special and is not zero
101 // q = nr. of decimal digits in x
102 // determine first the nr. of bits in x
104 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
105 // split the 64-bit value in two 32-bit halves to avoid rounding errors
106 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
107 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
109 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
111 tmp1.d = (double) (C1.w[0]); // exact conversion
113 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
115 } else { // if x < 2^53
116 tmp1.d = (double) C1.w[0]; // exact conversion
118 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
120 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
121 tmp1.d = (double) C1.w[1]; // exact conversion
123 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
125 q = __bid_nr_digits[x_nr_bits - 1].digits;
127 q = __bid_nr_digits[x_nr_bits - 1].digits1;
128 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
129 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
130 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
133 exp = (x_exp >> 49) - 6176;
134 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
136 *pfpsf |= INVALID_EXCEPTION;
137 // return Integer Indefinite
140 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
141 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
142 // so x rounded to an integer may or may not fit in a signed 32-bit int
143 // the cases that do not fit are identified here; the ones that fit
144 // fall through and will be handled with other cases further,
145 // under '1 <= q + exp <= 10'
146 if (x_sign) { // if n < 0 and q + exp = 10
147 // if n < -2^31 - 1/2 then n is too large
148 // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31+1/2
149 // <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000005, 1<=q<=34
151 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
152 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
153 if (tmp64 > 0x500000005ull) {
155 *pfpsf |= INVALID_EXCEPTION;
156 // return Integer Indefinite
160 // else cases that can be rounded to a 32-bit int fall through
161 // to '1 <= q + exp <= 10'
162 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
163 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000005 <=>
164 // C > 0x500000005 * 10^(q-11) where 1 <= q - 11 <= 23
165 // (scale 2^31+1/2 up)
166 tmp64 = 0x500000005ull;
167 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
168 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
169 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
170 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
172 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
174 *pfpsf |= INVALID_EXCEPTION;
175 // return Integer Indefinite
179 // else cases that can be rounded to a 32-bit int fall through
180 // to '1 <= q + exp <= 10'
182 } else { // if n > 0 and q + exp = 10
183 // if n >= 2^31 - 1/2 then n is too large
184 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31-1/2
185 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb, 1<=q<=34
187 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
188 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
189 if (tmp64 >= 0x4fffffffbull) {
191 *pfpsf |= INVALID_EXCEPTION;
192 // return Integer Indefinite
196 // else cases that can be rounded to a 32-bit int fall through
197 // to '1 <= q + exp <= 10'
198 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
199 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb <=>
200 // C >= 0x4fffffffb * 10^(q-11) where 1 <= q - 11 <= 23
201 // (scale 2^31-1/2 up)
202 tmp64 = 0x4fffffffbull;
203 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
204 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
205 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
206 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
208 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1]
209 && C1.w[0] >= C.w[0])) {
211 *pfpsf |= INVALID_EXCEPTION;
212 // return Integer Indefinite
216 // else cases that can be rounded to a 32-bit int fall through
217 // to '1 <= q + exp <= 10'
221 // n is not too large to be converted to int32: -2^31 - 1/2 < n < 2^31 - 1/2
222 // Note: some of the cases tested for above fall through to this point
223 if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
227 } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
228 // if 0.c(0)c(1)...c(q-1) <= 0.5 <=> c(0)c(1)...c(q-1) <= 5 * 10^(q-1)
233 if (ind <= 18) { // 0 <= ind <= 18
234 if ((C1.w[1] == 0) && (C1.w[0] <= __bid_midpoint64[ind])) {
235 res = 0x00000000; // return 0
236 } else if (x_sign) { // n < 0
237 res = 0xffffffff; // return -1
239 res = 0x00000001; // return +1
241 } else { // 19 <= ind <= 33
242 if ((C1.w[1] < __bid_midpoint128[ind - 19].w[1])
243 || ((C1.w[1] == __bid_midpoint128[ind - 19].w[1])
244 && (C1.w[0] <= __bid_midpoint128[ind - 19].w[0]))) {
245 res = 0x00000000; // return 0
246 } else if (x_sign) { // n < 0
247 res = 0xffffffff; // return -1
249 res = 0x00000001; // return +1
252 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
253 // -2^31-1/2 <= x <= -1 or 1 <= x < 2^31-1/2 so x can be rounded
254 // to nearest to a 32-bit signed integer
255 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
256 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
257 // chop off ind digits from the lower part of C1
258 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
261 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
263 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
264 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
268 // calculate C* and f*
269 // C* is actually floor(C*) in this case
270 // C* and f* need shifting and masking, as shown by
271 // __bid_shiftright128[] and __bid_maskhigh128[]
273 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
274 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
275 // the approximation of 10^(-x) was rounded up to 118 bits
276 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
277 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
278 Cstar.w[1] = P256.w[3];
279 Cstar.w[0] = P256.w[2];
281 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
282 fstar.w[1] = P256.w[1];
283 fstar.w[0] = P256.w[0];
284 } else { // 22 <= ind - 1 <= 33
286 Cstar.w[0] = P256.w[3];
287 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
288 fstar.w[2] = P256.w[2];
289 fstar.w[1] = P256.w[1];
290 fstar.w[0] = P256.w[0];
292 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
293 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
294 // if (0 < f* < 10^(-x)) then the result is a midpoint
295 // if floor(C*) is even then C* = floor(C*) - logical right
296 // shift; C* has p decimal digits, correct by Prop. 1)
297 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
298 // shift; C* has p decimal digits, correct by Pr. 1)
300 // C* = floor(C*) (logical right shift; C has p decimal digits,
301 // correct by Property 1)
304 // shift right C* by Ex-128 = __bid_shiftright128[ind]
305 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
306 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
308 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
309 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
310 } else { // 22 <= ind - 1 <= 33
311 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
313 // if the result was a midpoint it was rounded away from zero, so
314 // it will need a correction
315 // check for midpoints
316 if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
317 && (fstar.w[1] || fstar.w[0])
318 && (fstar.w[1] < __bid_ten2mk128trunc[ind - 1].w[1]
319 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
320 && fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[0]))) {
321 // the result is a midpoint; round to nearest
322 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
323 // if floor(C*) is odd C = floor(C*) - 1; the result >= 1
324 Cstar.w[0]--; // Cstar.w[0] is now even
325 } // else MP in [ODD, EVEN]
331 } else if (exp == 0) {
333 // res = +/-C (exact)
338 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
339 // res = +/-C * 10^exp (exact)
341 res = -C1.w[0] * __bid_ten2k64[exp];
343 res = C1.w[0] * __bid_ten2k64[exp];
350 /*****************************************************************************
351 * BID128_to_int32_xrnint
352 ****************************************************************************/
354 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_xrnint, x)
359 int exp; // unbiased exponent
360 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
361 UINT64 tmp64, tmp64A;
363 unsigned int x_nr_bits;
366 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
371 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
372 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
373 C1.w[1] = x.w[1] & MASK_COEFF;
376 // check for NaN or Infinity
377 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
379 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
380 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
382 *pfpsf |= INVALID_EXCEPTION;
383 // return Integer Indefinite
385 } else { // x is QNaN
387 *pfpsf |= INVALID_EXCEPTION;
388 // return Integer Indefinite
392 } else { // x is not a NaN, so it must be infinity
393 if (!x_sign) { // x is +inf
395 *pfpsf |= INVALID_EXCEPTION;
396 // return Integer Indefinite
398 } else { // x is -inf
400 *pfpsf |= INVALID_EXCEPTION;
401 // return Integer Indefinite
407 // check for non-canonical values (after the check for special values)
408 if ((C1.w[1] > 0x0001ed09bead87c0ull)
409 || (C1.w[1] == 0x0001ed09bead87c0ull
410 && (C1.w[0] > 0x378d8e63ffffffffull))
411 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
414 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
418 } else { // x is not special and is not zero
420 // q = nr. of decimal digits in x
421 // determine first the nr. of bits in x
423 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
424 // split the 64-bit value in two 32-bit halves to avoid rounding errors
425 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
426 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
428 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
430 tmp1.d = (double) (C1.w[0]); // exact conversion
432 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
434 } else { // if x < 2^53
435 tmp1.d = (double) C1.w[0]; // exact conversion
437 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
439 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
440 tmp1.d = (double) C1.w[1]; // exact conversion
442 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
444 q = __bid_nr_digits[x_nr_bits - 1].digits;
446 q = __bid_nr_digits[x_nr_bits - 1].digits1;
447 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
448 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
449 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
452 exp = (x_exp >> 49) - 6176;
453 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
455 *pfpsf |= INVALID_EXCEPTION;
456 // return Integer Indefinite
459 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
460 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
461 // so x rounded to an integer may or may not fit in a signed 32-bit int
462 // the cases that do not fit are identified here; the ones that fit
463 // fall through and will be handled with other cases further,
464 // under '1 <= q + exp <= 10'
465 if (x_sign) { // if n < 0 and q + exp = 10
466 // if n < -2^31 - 1/2 then n is too large
467 // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31+1/2
468 // <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000005, 1<=q<=34
470 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
471 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
472 if (tmp64 > 0x500000005ull) {
474 *pfpsf |= INVALID_EXCEPTION;
475 // return Integer Indefinite
479 // else cases that can be rounded to a 32-bit int fall through
480 // to '1 <= q + exp <= 10'
481 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
482 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000005 <=>
483 // C > 0x500000005 * 10^(q-11) where 1 <= q - 11 <= 23
484 // (scale 2^31+1/2 up)
485 tmp64 = 0x500000005ull;
486 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
487 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
488 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
489 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
491 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
493 *pfpsf |= INVALID_EXCEPTION;
494 // return Integer Indefinite
498 // else cases that can be rounded to a 32-bit int fall through
499 // to '1 <= q + exp <= 10'
501 } else { // if n > 0 and q + exp = 10
502 // if n >= 2^31 - 1/2 then n is too large
503 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31-1/2
504 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb, 1<=q<=34
506 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
507 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
508 if (tmp64 >= 0x4fffffffbull) {
510 *pfpsf |= INVALID_EXCEPTION;
511 // return Integer Indefinite
515 // else cases that can be rounded to a 32-bit int fall through
516 // to '1 <= q + exp <= 10'
517 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
518 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb <=>
519 // C >= 0x4fffffffb * 10^(q-11) where 1 <= q - 11 <= 23
520 // (scale 2^31-1/2 up)
521 tmp64 = 0x4fffffffbull;
522 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
523 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
524 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
525 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
527 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
529 *pfpsf |= INVALID_EXCEPTION;
530 // return Integer Indefinite
534 // else cases that can be rounded to a 32-bit int fall through
535 // to '1 <= q + exp <= 10'
539 // n is not too large to be converted to int32: -2^31 - 1/2 < n < 2^31 - 1/2
540 // Note: some of the cases tested for above fall through to this point
541 if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
543 *pfpsf |= INEXACT_EXCEPTION;
547 } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
548 // if 0.c(0)c(1)...c(q-1) <= 0.5 <=> c(0)c(1)...c(q-1) <= 5 * 10^(q-1)
553 if (ind <= 18) { // 0 <= ind <= 18
554 if ((C1.w[1] == 0) && (C1.w[0] <= __bid_midpoint64[ind])) {
555 res = 0x00000000; // return 0
556 } else if (x_sign) { // n < 0
557 res = 0xffffffff; // return -1
559 res = 0x00000001; // return +1
561 } else { // 19 <= ind <= 33
562 if ((C1.w[1] < __bid_midpoint128[ind - 19].w[1])
563 || ((C1.w[1] == __bid_midpoint128[ind - 19].w[1])
564 && (C1.w[0] <= __bid_midpoint128[ind - 19].w[0]))) {
565 res = 0x00000000; // return 0
566 } else if (x_sign) { // n < 0
567 res = 0xffffffff; // return -1
569 res = 0x00000001; // return +1
573 *pfpsf |= INEXACT_EXCEPTION;
574 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
575 // -2^31-1/2 <= x <= -1 or 1 <= x < 2^31-1/2 so x can be rounded
576 // to nearest to a 32-bit signed integer
577 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
578 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
579 // chop off ind digits from the lower part of C1
580 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
583 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
585 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
586 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
590 // calculate C* and f*
591 // C* is actually floor(C*) in this case
592 // C* and f* need shifting and masking, as shown by
593 // __bid_shiftright128[] and __bid_maskhigh128[]
595 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
596 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
597 // the approximation of 10^(-x) was rounded up to 118 bits
598 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
599 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
600 Cstar.w[1] = P256.w[3];
601 Cstar.w[0] = P256.w[2];
603 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
604 fstar.w[1] = P256.w[1];
605 fstar.w[0] = P256.w[0];
606 } else { // 22 <= ind - 1 <= 33
608 Cstar.w[0] = P256.w[3];
609 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
610 fstar.w[2] = P256.w[2];
611 fstar.w[1] = P256.w[1];
612 fstar.w[0] = P256.w[0];
614 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
615 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
616 // if (0 < f* < 10^(-x)) then the result is a midpoint
617 // if floor(C*) is even then C* = floor(C*) - logical right
618 // shift; C* has p decimal digits, correct by Prop. 1)
619 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
620 // shift; C* has p decimal digits, correct by Pr. 1)
622 // C* = floor(C*) (logical right shift; C has p decimal digits,
623 // correct by Property 1)
626 // shift right C* by Ex-128 = __bid_shiftright128[ind]
627 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
628 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
630 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
631 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
632 } else { // 22 <= ind - 1 <= 33
633 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
635 // determine inexactness of the rounding of C*
636 // if (0 < f* - 1/2 < 10^(-x)) then
637 // the result is exact
638 // else // if (f* - 1/2 > T*) then
639 // the result is inexact
641 if (fstar.w[1] > 0x8000000000000000ull ||
642 (fstar.w[1] == 0x8000000000000000ull &&
643 fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact
644 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
645 if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1]
646 || (tmp64 == __bid_ten2mk128trunc[ind - 1].w[1]
647 && fstar.w[0] >= __bid_ten2mk128trunc[ind - 1].w[0]))) {
648 // set the inexact flag
649 *pfpsf |= INEXACT_EXCEPTION;
650 } // else the result is exact
651 } else { // the result is inexact; f2* <= 1/2
652 // set the inexact flag
653 *pfpsf |= INEXACT_EXCEPTION;
655 } else if (ind - 1 <= 21) { // if 3 <= ind <= 21
656 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0
657 && fstar.w[2] > __bid_one_half128[ind - 1]) || (fstar.w[3] == 0x0
658 && fstar.w[2] == __bid_one_half128[ind - 1] && (fstar.w[1]
660 // f2* > 1/2 and the result may be exact
661 // Calculate f2* - 1/2
662 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
664 if (tmp64 > fstar.w[2])
667 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
668 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
669 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
670 // set the inexact flag
671 *pfpsf |= INEXACT_EXCEPTION;
672 } // else the result is exact
673 } else { // the result is inexact; f2* <= 1/2
674 // set the inexact flag
675 *pfpsf |= INEXACT_EXCEPTION;
677 } else { // if 22 <= ind <= 33
678 if (fstar.w[3] > __bid_one_half128[ind - 1]
679 || (fstar.w[3] == __bid_one_half128[ind - 1] && (fstar.w[2]
680 || fstar.w[1] || fstar.w[0]))) {
681 // f2* > 1/2 and the result may be exact
682 // Calculate f2* - 1/2
683 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
684 if (tmp64 || fstar.w[2]
685 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
686 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
687 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
688 // set the inexact flag
689 *pfpsf |= INEXACT_EXCEPTION;
690 } // else the result is exact
691 } else { // the result is inexact; f2* <= 1/2
692 // set the inexact flag
693 *pfpsf |= INEXACT_EXCEPTION;
696 // if the result was a midpoint it was rounded away from zero, so
697 // it will need a correction
698 // check for midpoints
699 if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
700 && (fstar.w[1] || fstar.w[0])
701 && (fstar.w[1] < __bid_ten2mk128trunc[ind - 1].w[1]
702 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
703 && fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[0]))) {
704 // the result is a midpoint; round to nearest
705 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
706 // if floor(C*) is odd C = floor(C*) - 1; the result >= 1
707 Cstar.w[0]--; // Cstar.w[0] is now even
708 } // else MP in [ODD, EVEN]
714 } else if (exp == 0) {
716 // res = +/-C (exact)
721 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
722 // res = +/-C * 10^exp (exact)
724 res = -C1.w[0] * __bid_ten2k64[exp];
726 res = C1.w[0] * __bid_ten2k64[exp];
733 /*****************************************************************************
734 * BID128_to_int32_floor
735 ****************************************************************************/
737 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_floor, x)
742 int exp; // unbiased exponent
743 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
744 UINT64 tmp64, tmp64A;
746 unsigned int x_nr_bits;
749 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
752 int is_inexact_lt_midpoint = 0;
753 int is_inexact_gt_midpoint = 0;
754 int is_midpoint_lt_even = 0;
755 int is_midpoint_gt_even = 0;
758 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
759 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
760 C1.w[1] = x.w[1] & MASK_COEFF;
763 // check for NaN or Infinity
764 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
766 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
767 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
769 *pfpsf |= INVALID_EXCEPTION;
770 // return Integer Indefinite
772 } else { // x is QNaN
774 *pfpsf |= INVALID_EXCEPTION;
775 // return Integer Indefinite
779 } else { // x is not a NaN, so it must be infinity
780 if (!x_sign) { // x is +inf
782 *pfpsf |= INVALID_EXCEPTION;
783 // return Integer Indefinite
785 } else { // x is -inf
787 *pfpsf |= INVALID_EXCEPTION;
788 // return Integer Indefinite
794 // check for non-canonical values (after the check for special values)
795 if ((C1.w[1] > 0x0001ed09bead87c0ull)
796 || (C1.w[1] == 0x0001ed09bead87c0ull
797 && (C1.w[0] > 0x378d8e63ffffffffull))
798 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
801 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
805 } else { // x is not special and is not zero
807 // q = nr. of decimal digits in x
808 // determine first the nr. of bits in x
810 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
811 // split the 64-bit value in two 32-bit halves to avoid rounding errors
812 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
813 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
815 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
817 tmp1.d = (double) (C1.w[0]); // exact conversion
819 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
821 } else { // if x < 2^53
822 tmp1.d = (double) C1.w[0]; // exact conversion
824 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
826 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
827 tmp1.d = (double) C1.w[1]; // exact conversion
829 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
831 q = __bid_nr_digits[x_nr_bits - 1].digits;
833 q = __bid_nr_digits[x_nr_bits - 1].digits1;
834 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
835 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
836 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
839 exp = (x_exp >> 49) - 6176;
840 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
842 *pfpsf |= INVALID_EXCEPTION;
843 // return Integer Indefinite
846 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
847 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
848 // so x rounded to an integer may or may not fit in a signed 32-bit int
849 // the cases that do not fit are identified here; the ones that fit
850 // fall through and will be handled with other cases further,
851 // under '1 <= q + exp <= 10'
852 if (x_sign) { // if n < 0 and q + exp = 10
853 // if n < -2^31 then n is too large
854 // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31
855 // <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000000, 1<=q<=34
857 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
858 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
859 if (tmp64 > 0x500000000ull) {
861 *pfpsf |= INVALID_EXCEPTION;
862 // return Integer Indefinite
866 // else cases that can be rounded to a 32-bit int fall through
867 // to '1 <= q + exp <= 10'
868 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
869 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000000 <=>
870 // C > 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23
872 tmp64 = 0x500000000ull;
873 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
874 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
875 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
876 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
878 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
880 *pfpsf |= INVALID_EXCEPTION;
881 // return Integer Indefinite
885 // else cases that can be rounded to a 32-bit int fall through
886 // to '1 <= q + exp <= 10'
888 } else { // if n > 0 and q + exp = 10
889 // if n >= 2^31 then n is too large
890 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31
891 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000, 1<=q<=34
893 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
894 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
895 if (tmp64 >= 0x500000000ull) {
897 *pfpsf |= INVALID_EXCEPTION;
898 // return Integer Indefinite
902 // else cases that can be rounded to a 32-bit int fall through
903 // to '1 <= q + exp <= 10'
904 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
905 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000 <=>
906 // C >= 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23
908 tmp64 = 0x500000000ull;
909 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
910 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
911 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
912 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
914 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
916 *pfpsf |= INVALID_EXCEPTION;
917 // return Integer Indefinite
921 // else cases that can be rounded to a 32-bit int fall through
922 // to '1 <= q + exp <= 10'
926 // n is not too large to be converted to int32: -2^31 <= n < 2^31
927 // Note: some of the cases tested for above fall through to this point
928 if ((q + exp) <= 0) {
929 // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
936 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
937 // -2^31 <= x <= -1 or 1 <= x < 2^31 so x can be rounded
938 // toward negative infinity to a 32-bit signed integer
939 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
940 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
941 // chop off ind digits from the lower part of C1
942 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
945 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
947 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
948 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
952 // calculate C* and f*
953 // C* is actually floor(C*) in this case
954 // C* and f* need shifting and masking, as shown by
955 // __bid_shiftright128[] and __bid_maskhigh128[]
957 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
958 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
959 // the approximation of 10^(-x) was rounded up to 118 bits
960 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
961 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
962 Cstar.w[1] = P256.w[3];
963 Cstar.w[0] = P256.w[2];
965 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
966 fstar.w[1] = P256.w[1];
967 fstar.w[0] = P256.w[0];
968 } else { // 22 <= ind - 1 <= 33
970 Cstar.w[0] = P256.w[3];
971 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
972 fstar.w[2] = P256.w[2];
973 fstar.w[1] = P256.w[1];
974 fstar.w[0] = P256.w[0];
976 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
977 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
978 // if (0 < f* < 10^(-x)) then the result is a midpoint
979 // if floor(C*) is even then C* = floor(C*) - logical right
980 // shift; C* has p decimal digits, correct by Prop. 1)
981 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
982 // shift; C* has p decimal digits, correct by Pr. 1)
984 // C* = floor(C*) (logical right shift; C has p decimal digits,
985 // correct by Property 1)
988 // shift right C* by Ex-128 = __bid_shiftright128[ind]
989 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
990 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
992 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
993 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
994 } else { // 22 <= ind - 1 <= 33
995 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
997 // determine inexactness of the rounding of C*
998 // if (0 < f* - 1/2 < 10^(-x)) then
999 // the result is exact
1000 // else // if (f* - 1/2 > T*) then
1001 // the result is inexact
1003 if (fstar.w[1] > 0x8000000000000000ull ||
1004 (fstar.w[1] == 0x8000000000000000ull &&
1005 fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact
1006 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
1007 if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1]
1008 || (tmp64 == __bid_ten2mk128trunc[ind - 1].w[1]
1009 && fstar.w[0] >= __bid_ten2mk128trunc[ind - 1].w[0]))) {
1010 is_inexact_lt_midpoint = 1;
1011 } // else the result is exact
1012 } else { // the result is inexact; f2* <= 1/2
1013 is_inexact_gt_midpoint = 1;
1015 } else if (ind - 1 <= 21) { // if 3 <= ind <= 21
1016 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0
1017 && fstar.w[2] > __bid_one_half128[ind - 1]) || (fstar.w[3] == 0x0
1018 && fstar.w[2] == __bid_one_half128[ind - 1] && (fstar.w[1]
1020 // f2* > 1/2 and the result may be exact
1021 // Calculate f2* - 1/2
1022 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
1023 tmp64A = fstar.w[3];
1024 if (tmp64 > fstar.w[2])
1027 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
1028 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1029 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
1030 is_inexact_lt_midpoint = 1;
1031 } // else the result is exact
1032 } else { // the result is inexact; f2* <= 1/2
1033 is_inexact_gt_midpoint = 1;
1035 } else { // if 22 <= ind <= 33
1036 if (fstar.w[3] > __bid_one_half128[ind - 1]
1037 || (fstar.w[3] == __bid_one_half128[ind - 1] && (fstar.w[2]
1038 || fstar.w[1] || fstar.w[0]))) {
1039 // f2* > 1/2 and the result may be exact
1040 // Calculate f2* - 1/2
1041 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
1042 if (tmp64 || fstar.w[2]
1043 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
1044 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1045 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
1046 is_inexact_lt_midpoint = 1;
1047 } // else the result is exact
1048 } else { // the result is inexact; f2* <= 1/2
1049 is_inexact_gt_midpoint = 1;
1053 // if the result was a midpoint it was rounded away from zero, so
1054 // it will need a correction
1055 // check for midpoints
1056 if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
1057 && (fstar.w[1] || fstar.w[0])
1058 && (fstar.w[1] < __bid_ten2mk128trunc[ind - 1].w[1]
1059 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1060 && fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[0]))) {
1061 // the result is a midpoint; round to nearest
1062 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
1063 // if floor(C*) is odd C = floor(C*) - 1; the result >= 1
1064 Cstar.w[0]--; // Cstar.w[0] is now even
1065 is_midpoint_gt_even = 1;
1066 is_inexact_lt_midpoint = 0;
1067 is_inexact_gt_midpoint = 0;
1068 } else { // else MP in [ODD, EVEN]
1069 is_midpoint_lt_even = 1;
1070 is_inexact_lt_midpoint = 0;
1071 is_inexact_gt_midpoint = 0;
1074 // general correction for RM
1075 if (x_sign && (is_midpoint_gt_even || is_inexact_lt_midpoint)) {
1076 Cstar.w[0] = Cstar.w[0] + 1;
1078 && (is_midpoint_lt_even || is_inexact_gt_midpoint)) {
1079 Cstar.w[0] = Cstar.w[0] - 1;
1081 ; // the result is already correct
1087 } else if (exp == 0) {
1089 // res = +/-C (exact)
1094 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
1095 // res = +/-C * 10^exp (exact)
1097 res = -C1.w[0] * __bid_ten2k64[exp];
1099 res = C1.w[0] * __bid_ten2k64[exp];
1107 /*****************************************************************************
1108 * BID128_to_int32_xfloor
1109 ****************************************************************************/
1111 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_xfloor, x)
1116 int exp; // unbiased exponent
1117 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
1118 UINT64 tmp64, tmp64A;
1119 BID_UI64DOUBLE tmp1;
1120 unsigned int x_nr_bits;
1123 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
1126 int is_inexact_lt_midpoint = 0;
1127 int is_inexact_gt_midpoint = 0;
1128 int is_midpoint_lt_even = 0;
1129 int is_midpoint_gt_even = 0;
1132 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1133 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1134 C1.w[1] = x.w[1] & MASK_COEFF;
1137 // check for NaN or Infinity
1138 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1140 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1141 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1143 *pfpsf |= INVALID_EXCEPTION;
1144 // return Integer Indefinite
1146 } else { // x is QNaN
1148 *pfpsf |= INVALID_EXCEPTION;
1149 // return Integer Indefinite
1153 } else { // x is not a NaN, so it must be infinity
1154 if (!x_sign) { // x is +inf
1156 *pfpsf |= INVALID_EXCEPTION;
1157 // return Integer Indefinite
1159 } else { // x is -inf
1161 *pfpsf |= INVALID_EXCEPTION;
1162 // return Integer Indefinite
1168 // check for non-canonical values (after the check for special values)
1169 if ((C1.w[1] > 0x0001ed09bead87c0ull)
1170 || (C1.w[1] == 0x0001ed09bead87c0ull
1171 && (C1.w[0] > 0x378d8e63ffffffffull))
1172 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1175 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1179 } else { // x is not special and is not zero
1181 // q = nr. of decimal digits in x
1182 // determine first the nr. of bits in x
1184 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1185 // split the 64-bit value in two 32-bit halves to avoid rounding errors
1186 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1187 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1189 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1190 } else { // x < 2^32
1191 tmp1.d = (double) (C1.w[0]); // exact conversion
1193 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1195 } else { // if x < 2^53
1196 tmp1.d = (double) C1.w[0]; // exact conversion
1198 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1200 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
1201 tmp1.d = (double) C1.w[1]; // exact conversion
1203 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1205 q = __bid_nr_digits[x_nr_bits - 1].digits;
1207 q = __bid_nr_digits[x_nr_bits - 1].digits1;
1208 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
1209 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
1210 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
1213 exp = (x_exp >> 49) - 6176;
1214 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
1216 *pfpsf |= INVALID_EXCEPTION;
1217 // return Integer Indefinite
1220 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
1221 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
1222 // so x rounded to an integer may or may not fit in a signed 32-bit int
1223 // the cases that do not fit are identified here; the ones that fit
1224 // fall through and will be handled with other cases further,
1225 // under '1 <= q + exp <= 10'
1226 if (x_sign) { // if n < 0 and q + exp = 10
1227 // if n < -2^31 then n is too large
1228 // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31
1229 // <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000000, 1<=q<=34
1231 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
1232 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
1233 if (tmp64 > 0x500000000ull) {
1235 *pfpsf |= INVALID_EXCEPTION;
1236 // return Integer Indefinite
1240 // else cases that can be rounded to a 32-bit int fall through
1241 // to '1 <= q + exp <= 10'
1242 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
1243 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000000 <=>
1244 // C > 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23
1246 tmp64 = 0x500000000ull;
1247 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
1248 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
1249 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
1250 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
1252 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
1254 *pfpsf |= INVALID_EXCEPTION;
1255 // return Integer Indefinite
1259 // else cases that can be rounded to a 32-bit int fall through
1260 // to '1 <= q + exp <= 10'
1262 } else { // if n > 0 and q + exp = 10
1263 // if n >= 2^31 then n is too large
1264 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31
1265 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000, 1<=q<=34
1267 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
1268 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
1269 if (tmp64 >= 0x500000000ull) {
1271 *pfpsf |= INVALID_EXCEPTION;
1272 // return Integer Indefinite
1276 // else cases that can be rounded to a 32-bit int fall through
1277 // to '1 <= q + exp <= 10'
1278 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
1279 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000 <=>
1280 // C >= 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23
1282 tmp64 = 0x500000000ull;
1283 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
1284 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
1285 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
1286 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
1288 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
1290 *pfpsf |= INVALID_EXCEPTION;
1291 // return Integer Indefinite
1295 // else cases that can be rounded to a 32-bit int fall through
1296 // to '1 <= q + exp <= 10'
1300 // n is not too large to be converted to int32: -2^31 <= n < 2^31
1301 // Note: some of the cases tested for above fall through to this point
1302 if ((q + exp) <= 0) {
1303 // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
1305 *pfpsf |= INEXACT_EXCEPTION;
1312 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
1313 // -2^31 <= x <= -1 or 1 <= x < 2^31 so x can be rounded
1314 // toward negative infinity to a 32-bit signed integer
1315 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
1316 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
1317 // chop off ind digits from the lower part of C1
1318 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
1321 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
1323 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
1324 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
1326 if (C1.w[0] < tmp64)
1328 // calculate C* and f*
1329 // C* is actually floor(C*) in this case
1330 // C* and f* need shifting and masking, as shown by
1331 // __bid_shiftright128[] and __bid_maskhigh128[]
1333 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
1334 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
1335 // the approximation of 10^(-x) was rounded up to 118 bits
1336 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
1337 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
1338 Cstar.w[1] = P256.w[3];
1339 Cstar.w[0] = P256.w[2];
1341 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
1342 fstar.w[1] = P256.w[1];
1343 fstar.w[0] = P256.w[0];
1344 } else { // 22 <= ind - 1 <= 33
1346 Cstar.w[0] = P256.w[3];
1347 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
1348 fstar.w[2] = P256.w[2];
1349 fstar.w[1] = P256.w[1];
1350 fstar.w[0] = P256.w[0];
1352 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
1353 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
1354 // if (0 < f* < 10^(-x)) then the result is a midpoint
1355 // if floor(C*) is even then C* = floor(C*) - logical right
1356 // shift; C* has p decimal digits, correct by Prop. 1)
1357 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
1358 // shift; C* has p decimal digits, correct by Pr. 1)
1360 // C* = floor(C*) (logical right shift; C has p decimal digits,
1361 // correct by Property 1)
1362 // n = C* * 10^(e+x)
1364 // shift right C* by Ex-128 = __bid_shiftright128[ind]
1365 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
1366 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
1368 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
1369 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
1370 } else { // 22 <= ind - 1 <= 33
1371 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
1373 // determine inexactness of the rounding of C*
1374 // if (0 < f* - 1/2 < 10^(-x)) then
1375 // the result is exact
1376 // else // if (f* - 1/2 > T*) then
1377 // the result is inexact
1379 if (fstar.w[1] > 0x8000000000000000ull ||
1380 (fstar.w[1] == 0x8000000000000000ull &&
1381 fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact
1382 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
1383 if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1]
1384 || (tmp64 == __bid_ten2mk128trunc[ind - 1].w[1]
1385 && fstar.w[0] >= __bid_ten2mk128trunc[ind - 1].w[0]))) {
1386 // set the inexact flag
1387 *pfpsf |= INEXACT_EXCEPTION;
1388 is_inexact_lt_midpoint = 1;
1389 } // else the result is exact
1390 } else { // the result is inexact; f2* <= 1/2
1391 // set the inexact flag
1392 *pfpsf |= INEXACT_EXCEPTION;
1393 is_inexact_gt_midpoint = 1;
1395 } else if (ind - 1 <= 21) { // if 3 <= ind <= 21
1396 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0
1397 && fstar.w[2] > __bid_one_half128[ind - 1]) || (fstar.w[3] == 0x0
1398 && fstar.w[2] == __bid_one_half128[ind - 1] && (fstar.w[1]
1400 // f2* > 1/2 and the result may be exact
1401 // Calculate f2* - 1/2
1402 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
1403 tmp64A = fstar.w[3];
1404 if (tmp64 > fstar.w[2])
1407 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
1408 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1409 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
1410 // set the inexact flag
1411 *pfpsf |= INEXACT_EXCEPTION;
1412 is_inexact_lt_midpoint = 1;
1413 } // else the result is exact
1414 } else { // the result is inexact; f2* <= 1/2
1415 // set the inexact flag
1416 *pfpsf |= INEXACT_EXCEPTION;
1417 is_inexact_gt_midpoint = 1;
1419 } else { // if 22 <= ind <= 33
1420 if (fstar.w[3] > __bid_one_half128[ind - 1]
1421 || (fstar.w[3] == __bid_one_half128[ind - 1] && (fstar.w[2]
1422 || fstar.w[1] || fstar.w[0]))) {
1423 // f2* > 1/2 and the result may be exact
1424 // Calculate f2* - 1/2
1425 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
1426 if (tmp64 || fstar.w[2]
1427 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
1428 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1429 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
1430 // set the inexact flag
1431 *pfpsf |= INEXACT_EXCEPTION;
1432 is_inexact_lt_midpoint = 1;
1433 } // else the result is exact
1434 } else { // the result is inexact; f2* <= 1/2
1435 // set the inexact flag
1436 *pfpsf |= INEXACT_EXCEPTION;
1437 is_inexact_gt_midpoint = 1;
1441 // if the result was a midpoint it was rounded away from zero, so
1442 // it will need a correction
1443 // check for midpoints
1444 if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
1445 && (fstar.w[1] || fstar.w[0])
1446 && (fstar.w[1] < __bid_ten2mk128trunc[ind - 1].w[1]
1447 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1448 && fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[0]))) {
1449 // the result is a midpoint; round to nearest
1450 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
1451 // if floor(C*) is odd C = floor(C*) - 1; the result >= 1
1452 Cstar.w[0]--; // Cstar.w[0] is now even
1453 is_midpoint_gt_even = 1;
1454 is_inexact_lt_midpoint = 0;
1455 is_inexact_gt_midpoint = 0;
1456 } else { // else MP in [ODD, EVEN]
1457 is_midpoint_lt_even = 1;
1458 is_inexact_lt_midpoint = 0;
1459 is_inexact_gt_midpoint = 0;
1462 // general correction for RM
1463 if (x_sign && (is_midpoint_gt_even || is_inexact_lt_midpoint)) {
1464 Cstar.w[0] = Cstar.w[0] + 1;
1466 && (is_midpoint_lt_even || is_inexact_gt_midpoint)) {
1467 Cstar.w[0] = Cstar.w[0] - 1;
1469 ; // the result is already correct
1475 } else if (exp == 0) {
1477 // res = +/-C (exact)
1482 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
1483 // res = +/-C * 10^exp (exact)
1485 res = -C1.w[0] * __bid_ten2k64[exp];
1487 res = C1.w[0] * __bid_ten2k64[exp];
1494 /*****************************************************************************
1495 * BID128_to_int32_ceil
1496 ****************************************************************************/
1498 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_ceil, x)
1503 int exp; // unbiased exponent
1504 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
1505 UINT64 tmp64, tmp64A;
1506 BID_UI64DOUBLE tmp1;
1507 unsigned int x_nr_bits;
1510 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
1513 int is_inexact_lt_midpoint = 0;
1514 int is_inexact_gt_midpoint = 0;
1515 int is_midpoint_lt_even = 0;
1516 int is_midpoint_gt_even = 0;
1519 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1520 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1521 C1.w[1] = x.w[1] & MASK_COEFF;
1524 // check for NaN or Infinity
1525 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1527 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1528 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1530 *pfpsf |= INVALID_EXCEPTION;
1531 // return Integer Indefinite
1533 } else { // x is QNaN
1535 *pfpsf |= INVALID_EXCEPTION;
1536 // return Integer Indefinite
1540 } else { // x is not a NaN, so it must be infinity
1541 if (!x_sign) { // x is +inf
1543 *pfpsf |= INVALID_EXCEPTION;
1544 // return Integer Indefinite
1546 } else { // x is -inf
1548 *pfpsf |= INVALID_EXCEPTION;
1549 // return Integer Indefinite
1555 // check for non-canonical values (after the check for special values)
1556 if ((C1.w[1] > 0x0001ed09bead87c0ull)
1557 || (C1.w[1] == 0x0001ed09bead87c0ull
1558 && (C1.w[0] > 0x378d8e63ffffffffull))
1559 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1562 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1566 } else { // x is not special and is not zero
1568 // q = nr. of decimal digits in x
1569 // determine first the nr. of bits in x
1571 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1572 // split the 64-bit value in two 32-bit halves to avoid rounding errors
1573 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1574 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1576 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1577 } else { // x < 2^32
1578 tmp1.d = (double) (C1.w[0]); // exact conversion
1580 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1582 } else { // if x < 2^53
1583 tmp1.d = (double) C1.w[0]; // exact conversion
1585 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1587 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
1588 tmp1.d = (double) C1.w[1]; // exact conversion
1590 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1592 q = __bid_nr_digits[x_nr_bits - 1].digits;
1594 q = __bid_nr_digits[x_nr_bits - 1].digits1;
1595 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
1596 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
1597 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
1600 exp = (x_exp >> 49) - 6176;
1601 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
1603 *pfpsf |= INVALID_EXCEPTION;
1604 // return Integer Indefinite
1607 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
1608 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
1609 // so x rounded to an integer may or may not fit in a signed 32-bit int
1610 // the cases that do not fit are identified here; the ones that fit
1611 // fall through and will be handled with other cases further,
1612 // under '1 <= q + exp <= 10'
1613 if (x_sign) { // if n < 0 and q + exp = 10
1614 // if n <= -2^31-1 then n is too large
1615 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1
1616 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34
1618 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
1619 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
1620 if (tmp64 >= 0x50000000aull) {
1622 *pfpsf |= INVALID_EXCEPTION;
1623 // return Integer Indefinite
1627 // else cases that can be rounded to a 32-bit int fall through
1628 // to '1 <= q + exp <= 10'
1629 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
1630 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a <=>
1631 // C >= 0x50000000a * 10^(q-11) where 1 <= q - 11 <= 23
1632 // (scale 2^31+1 up)
1633 tmp64 = 0x50000000aull;
1634 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
1635 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
1636 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
1637 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
1639 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
1641 *pfpsf |= INVALID_EXCEPTION;
1642 // return Integer Indefinite
1646 // else cases that can be rounded to a 32-bit int fall through
1647 // to '1 <= q + exp <= 10'
1649 } else { // if n > 0 and q + exp = 10
1650 // if n > 2^31 - 1 then n is too large
1651 // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31 - 1
1652 // too large if 0.c(0)c(1)...c(q-1) * 10^11 > 0x4fffffff6, 1<=q<=34
1654 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
1655 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
1656 if (tmp64 > 0x4fffffff6ull) {
1658 *pfpsf |= INVALID_EXCEPTION;
1659 // return Integer Indefinite
1663 // else cases that can be rounded to a 32-bit int fall through
1664 // to '1 <= q + exp <= 10'
1665 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
1666 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x4fffffff6 <=>
1667 // C > 0x4fffffff6 * 10^(q-11) where 1 <= q - 11 <= 23
1669 tmp64 = 0x4fffffff6ull;
1670 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
1671 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
1672 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
1673 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
1675 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
1677 *pfpsf |= INVALID_EXCEPTION;
1678 // return Integer Indefinite
1682 // else cases that can be rounded to a 32-bit int fall through
1683 // to '1 <= q + exp <= 10'
1687 // n is not too large to be converted to int32: -2^31-1 < n <= 2^31-1
1688 // Note: some of the cases tested for above fall through to this point
1689 if ((q + exp) <= 0) {
1690 // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
1697 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
1698 // -2^31-1 < x <= -1 or 1 <= x <= 2^31-1 so x can be rounded
1699 // toward positive infinity to a 32-bit signed integer
1700 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
1701 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
1702 // chop off ind digits from the lower part of C1
1703 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
1706 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
1708 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
1709 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
1711 if (C1.w[0] < tmp64)
1713 // calculate C* and f*
1714 // C* is actually floor(C*) in this case
1715 // C* and f* need shifting and masking, as shown by
1716 // __bid_shiftright128[] and __bid_maskhigh128[]
1718 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
1719 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
1720 // the approximation of 10^(-x) was rounded up to 118 bits
1721 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
1722 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
1723 Cstar.w[1] = P256.w[3];
1724 Cstar.w[0] = P256.w[2];
1726 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
1727 fstar.w[1] = P256.w[1];
1728 fstar.w[0] = P256.w[0];
1729 } else { // 22 <= ind - 1 <= 33
1731 Cstar.w[0] = P256.w[3];
1732 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
1733 fstar.w[2] = P256.w[2];
1734 fstar.w[1] = P256.w[1];
1735 fstar.w[0] = P256.w[0];
1737 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
1738 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
1739 // if (0 < f* < 10^(-x)) then the result is a midpoint
1740 // if floor(C*) is even then C* = floor(C*) - logical right
1741 // shift; C* has p decimal digits, correct by Prop. 1)
1742 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
1743 // shift; C* has p decimal digits, correct by Pr. 1)
1745 // C* = floor(C*) (logical right shift; C has p decimal digits,
1746 // correct by Property 1)
1747 // n = C* * 10^(e+x)
1749 // shift right C* by Ex-128 = __bid_shiftright128[ind]
1750 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
1751 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
1753 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
1754 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
1755 } else { // 22 <= ind - 1 <= 33
1756 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
1758 // determine inexactness of the rounding of C*
1759 // if (0 < f* - 1/2 < 10^(-x)) then
1760 // the result is exact
1761 // else // if (f* - 1/2 > T*) then
1762 // the result is inexact
1764 if (fstar.w[1] > 0x8000000000000000ull ||
1765 (fstar.w[1] == 0x8000000000000000ull &&
1766 fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact
1767 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
1768 if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1]
1769 || (tmp64 == __bid_ten2mk128trunc[ind - 1].w[1]
1770 && fstar.w[0] >= __bid_ten2mk128trunc[ind - 1].w[0]))) {
1771 is_inexact_lt_midpoint = 1;
1772 } // else the result is exact
1773 } else { // the result is inexact; f2* <= 1/2
1774 is_inexact_gt_midpoint = 1;
1776 } else if (ind - 1 <= 21) { // if 3 <= ind <= 21
1777 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0
1778 && fstar.w[2] > __bid_one_half128[ind - 1]) || (fstar.w[3] == 0x0
1779 && fstar.w[2] == __bid_one_half128[ind - 1] && (fstar.w[1]
1781 // f2* > 1/2 and the result may be exact
1782 // Calculate f2* - 1/2
1783 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
1784 tmp64A = fstar.w[3];
1785 if (tmp64 > fstar.w[2])
1788 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
1789 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1790 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
1791 is_inexact_lt_midpoint = 1;
1792 } // else the result is exact
1793 } else { // the result is inexact; f2* <= 1/2
1794 is_inexact_gt_midpoint = 1;
1796 } else { // if 22 <= ind <= 33
1797 if (fstar.w[3] > __bid_one_half128[ind - 1]
1798 || (fstar.w[3] == __bid_one_half128[ind - 1] && (fstar.w[2]
1801 // f2* > 1/2 and the result may be exact
1802 // Calculate f2* - 1/2
1803 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
1804 if (tmp64 || fstar.w[2]
1805 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
1806 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1807 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
1808 is_inexact_lt_midpoint = 1;
1809 } // else the result is exact
1810 } else { // the result is inexact; f2* <= 1/2
1811 is_inexact_gt_midpoint = 1;
1815 // if the result was a midpoint it was rounded away from zero, so
1816 // it will need a correction
1817 // check for midpoints
1818 if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
1819 && (fstar.w[1] || fstar.w[0])
1820 && (fstar.w[1] < __bid_ten2mk128trunc[ind - 1].w[1]
1821 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
1822 && fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[0]))) {
1823 // the result is a midpoint; round to nearest
1824 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
1825 // if floor(C*) is odd C = floor(C*) - 1; the result >= 1
1826 Cstar.w[0]--; // Cstar.w[0] is now even
1827 is_midpoint_gt_even = 1;
1828 is_inexact_lt_midpoint = 0;
1829 is_inexact_gt_midpoint = 0;
1830 } else { // else MP in [ODD, EVEN]
1831 is_midpoint_lt_even = 1;
1832 is_inexact_lt_midpoint = 0;
1833 is_inexact_gt_midpoint = 0;
1836 // general correction for RM
1837 if (x_sign && (is_midpoint_lt_even || is_inexact_gt_midpoint)) {
1838 Cstar.w[0] = Cstar.w[0] - 1;
1840 && (is_midpoint_gt_even || is_inexact_lt_midpoint)) {
1841 Cstar.w[0] = Cstar.w[0] + 1;
1843 ; // the result is already correct
1849 } else if (exp == 0) {
1851 // res = +/-C (exact)
1856 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
1857 // res = +/-C * 10^exp (exact)
1859 res = -C1.w[0] * __bid_ten2k64[exp];
1861 res = C1.w[0] * __bid_ten2k64[exp];
1868 /*****************************************************************************
1869 * BID128_to_int32_xceil
1870 ****************************************************************************/
1872 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_xceil, x)
1877 int exp; // unbiased exponent
1878 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
1879 UINT64 tmp64, tmp64A;
1880 BID_UI64DOUBLE tmp1;
1881 unsigned int x_nr_bits;
1884 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
1887 int is_inexact_lt_midpoint = 0;
1888 int is_inexact_gt_midpoint = 0;
1889 int is_midpoint_lt_even = 0;
1890 int is_midpoint_gt_even = 0;
1893 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1894 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1895 C1.w[1] = x.w[1] & MASK_COEFF;
1898 // check for NaN or Infinity
1899 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1901 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1902 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1904 *pfpsf |= INVALID_EXCEPTION;
1905 // return Integer Indefinite
1907 } else { // x is QNaN
1909 *pfpsf |= INVALID_EXCEPTION;
1910 // return Integer Indefinite
1914 } else { // x is not a NaN, so it must be infinity
1915 if (!x_sign) { // x is +inf
1917 *pfpsf |= INVALID_EXCEPTION;
1918 // return Integer Indefinite
1920 } else { // x is -inf
1922 *pfpsf |= INVALID_EXCEPTION;
1923 // return Integer Indefinite
1929 // check for non-canonical values (after the check for special values)
1930 if ((C1.w[1] > 0x0001ed09bead87c0ull)
1931 || (C1.w[1] == 0x0001ed09bead87c0ull
1932 && (C1.w[0] > 0x378d8e63ffffffffull))
1933 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1936 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1940 } else { // x is not special and is not zero
1942 // q = nr. of decimal digits in x
1943 // determine first the nr. of bits in x
1945 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1946 // split the 64-bit value in two 32-bit halves to avoid rounding errors
1947 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1948 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1950 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1951 } else { // x < 2^32
1952 tmp1.d = (double) (C1.w[0]); // exact conversion
1954 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1956 } else { // if x < 2^53
1957 tmp1.d = (double) C1.w[0]; // exact conversion
1959 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1961 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
1962 tmp1.d = (double) C1.w[1]; // exact conversion
1964 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
1966 q = __bid_nr_digits[x_nr_bits - 1].digits;
1968 q = __bid_nr_digits[x_nr_bits - 1].digits1;
1969 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
1970 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
1971 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
1974 exp = (x_exp >> 49) - 6176;
1975 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
1977 *pfpsf |= INVALID_EXCEPTION;
1978 // return Integer Indefinite
1981 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
1982 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
1983 // so x rounded to an integer may or may not fit in a signed 32-bit int
1984 // the cases that do not fit are identified here; the ones that fit
1985 // fall through and will be handled with other cases further,
1986 // under '1 <= q + exp <= 10'
1987 if (x_sign) { // if n < 0 and q + exp = 10
1988 // if n <= -2^31-1 then n is too large
1989 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1
1990 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34
1992 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
1993 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
1994 if (tmp64 >= 0x50000000aull) {
1996 *pfpsf |= INVALID_EXCEPTION;
1997 // return Integer Indefinite
2001 // else cases that can be rounded to a 32-bit int fall through
2002 // to '1 <= q + exp <= 10'
2003 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
2004 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a <=>
2005 // C >= 0x50000000a * 10^(q-11) where 1 <= q - 11 <= 23
2006 // (scale 2^31+1 up)
2007 tmp64 = 0x50000000aull;
2008 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
2009 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
2010 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
2011 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
2013 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2015 *pfpsf |= INVALID_EXCEPTION;
2016 // return Integer Indefinite
2020 // else cases that can be rounded to a 32-bit int fall through
2021 // to '1 <= q + exp <= 10'
2023 } else { // if n > 0 and q + exp = 10
2024 // if n > 2^31 - 1 then n is too large
2025 // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31 - 1
2026 // too large if 0.c(0)c(1)...c(q-1) * 10^11 > 0x4fffffff6, 1<=q<=34
2028 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
2029 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
2030 if (tmp64 > 0x4fffffff6ull) {
2032 *pfpsf |= INVALID_EXCEPTION;
2033 // return Integer Indefinite
2037 // else cases that can be rounded to a 32-bit int fall through
2038 // to '1 <= q + exp <= 10'
2039 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
2040 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x4fffffff6 <=>
2041 // C > 0x4fffffff6 * 10^(q-11) where 1 <= q - 11 <= 23
2043 tmp64 = 0x4fffffff6ull;
2044 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
2045 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
2046 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
2047 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
2049 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
2051 *pfpsf |= INVALID_EXCEPTION;
2052 // return Integer Indefinite
2056 // else cases that can be rounded to a 32-bit int fall through
2057 // to '1 <= q + exp <= 10'
2061 // n is not too large to be converted to int32: -2^31-1 < n <= 2^31-1
2062 // Note: some of the cases tested for above fall through to this point
2063 if ((q + exp) <= 0) {
2064 // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
2066 *pfpsf |= INEXACT_EXCEPTION;
2073 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
2074 // -2^31-1 < x <= -1 or 1 <= x <= 2^31-1 so x can be rounded
2075 // toward positive infinity to a 32-bit signed integer
2076 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
2077 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
2078 // chop off ind digits from the lower part of C1
2079 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
2082 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
2084 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
2085 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
2087 if (C1.w[0] < tmp64)
2089 // calculate C* and f*
2090 // C* is actually floor(C*) in this case
2091 // C* and f* need shifting and masking, as shown by
2092 // __bid_shiftright128[] and __bid_maskhigh128[]
2094 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
2095 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
2096 // the approximation of 10^(-x) was rounded up to 118 bits
2097 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
2098 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
2099 Cstar.w[1] = P256.w[3];
2100 Cstar.w[0] = P256.w[2];
2102 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
2103 fstar.w[1] = P256.w[1];
2104 fstar.w[0] = P256.w[0];
2105 } else { // 22 <= ind - 1 <= 33
2107 Cstar.w[0] = P256.w[3];
2108 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
2109 fstar.w[2] = P256.w[2];
2110 fstar.w[1] = P256.w[1];
2111 fstar.w[0] = P256.w[0];
2113 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
2114 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
2115 // if (0 < f* < 10^(-x)) then the result is a midpoint
2116 // if floor(C*) is even then C* = floor(C*) - logical right
2117 // shift; C* has p decimal digits, correct by Prop. 1)
2118 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
2119 // shift; C* has p decimal digits, correct by Pr. 1)
2121 // C* = floor(C*) (logical right shift; C has p decimal digits,
2122 // correct by Property 1)
2123 // n = C* * 10^(e+x)
2125 // shift right C* by Ex-128 = __bid_shiftright128[ind]
2126 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
2127 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
2129 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
2130 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
2131 } else { // 22 <= ind - 1 <= 33
2132 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
2134 // determine inexactness of the rounding of C*
2135 // if (0 < f* - 1/2 < 10^(-x)) then
2136 // the result is exact
2137 // else // if (f* - 1/2 > T*) then
2138 // the result is inexact
2140 if (fstar.w[1] > 0x8000000000000000ull ||
2141 (fstar.w[1] == 0x8000000000000000ull &&
2142 fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact
2143 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
2144 if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1]
2145 || (tmp64 == __bid_ten2mk128trunc[ind - 1].w[1]
2146 && fstar.w[0] >= __bid_ten2mk128trunc[ind - 1].w[0]))) {
2147 // set the inexact flag
2148 *pfpsf |= INEXACT_EXCEPTION;
2149 is_inexact_lt_midpoint = 1;
2150 } // else the result is exact
2151 } else { // the result is inexact; f2* <= 1/2
2152 // set the inexact flag
2153 *pfpsf |= INEXACT_EXCEPTION;
2154 is_inexact_gt_midpoint = 1;
2156 } else if (ind - 1 <= 21) { // if 3 <= ind <= 21
2157 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0
2158 && fstar.w[2] > __bid_one_half128[ind - 1]) || (fstar.w[3] == 0x0
2159 && fstar.w[2] == __bid_one_half128[ind - 1] && (fstar.w[1]
2161 // f2* > 1/2 and the result may be exact
2162 // Calculate f2* - 1/2
2163 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
2164 tmp64A = fstar.w[3];
2165 if (tmp64 > fstar.w[2])
2168 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
2169 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2170 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
2171 // set the inexact flag
2172 *pfpsf |= INEXACT_EXCEPTION;
2173 is_inexact_lt_midpoint = 1;
2174 } // else the result is exact
2175 } else { // the result is inexact; f2* <= 1/2
2176 // set the inexact flag
2177 *pfpsf |= INEXACT_EXCEPTION;
2178 is_inexact_gt_midpoint = 1;
2180 } else { // if 22 <= ind <= 33
2181 if (fstar.w[3] > __bid_one_half128[ind - 1]
2182 || (fstar.w[3] == __bid_one_half128[ind - 1] && (fstar.w[2]
2185 // f2* > 1/2 and the result may be exact
2186 // Calculate f2* - 1/2
2187 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
2188 if (tmp64 || fstar.w[2]
2189 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
2190 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2191 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
2192 // set the inexact flag
2193 *pfpsf |= INEXACT_EXCEPTION;
2194 is_inexact_lt_midpoint = 1;
2195 } // else the result is exact
2196 } else { // the result is inexact; f2* <= 1/2
2197 // set the inexact flag
2198 *pfpsf |= INEXACT_EXCEPTION;
2199 is_inexact_gt_midpoint = 1;
2203 // if the result was a midpoint it was rounded away from zero, so
2204 // it will need a correction
2205 // check for midpoints
2206 if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
2207 && (fstar.w[1] || fstar.w[0])
2208 && (fstar.w[1] < __bid_ten2mk128trunc[ind - 1].w[1]
2209 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2210 && fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[0]))) {
2211 // the result is a midpoint; round to nearest
2212 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
2213 // if floor(C*) is odd C = floor(C*) - 1; the result >= 1
2214 Cstar.w[0]--; // Cstar.w[0] is now even
2215 is_midpoint_gt_even = 1;
2216 is_inexact_lt_midpoint = 0;
2217 is_inexact_gt_midpoint = 0;
2218 } else { // else MP in [ODD, EVEN]
2219 is_midpoint_lt_even = 1;
2220 is_inexact_lt_midpoint = 0;
2221 is_inexact_gt_midpoint = 0;
2224 // general correction for RM
2225 if (x_sign && (is_midpoint_lt_even || is_inexact_gt_midpoint)) {
2226 Cstar.w[0] = Cstar.w[0] - 1;
2228 && (is_midpoint_gt_even || is_inexact_lt_midpoint)) {
2229 Cstar.w[0] = Cstar.w[0] + 1;
2231 ; // the result is already correct
2237 } else if (exp == 0) {
2239 // res = +/-C (exact)
2244 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
2245 // res = +/-C * 10^exp (exact)
2247 res = -C1.w[0] * __bid_ten2k64[exp];
2249 res = C1.w[0] * __bid_ten2k64[exp];
2256 /*****************************************************************************
2257 * BID128_to_int32_int
2258 ****************************************************************************/
2260 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_int, x)
2265 int exp; // unbiased exponent
2266 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
2267 UINT64 tmp64, tmp64A;
2268 BID_UI64DOUBLE tmp1;
2269 unsigned int x_nr_bits;
2272 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
2275 int is_inexact_gt_midpoint = 0;
2276 int is_midpoint_lt_even = 0;
2279 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
2280 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
2281 C1.w[1] = x.w[1] & MASK_COEFF;
2284 // check for NaN or Infinity
2285 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
2287 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
2288 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
2290 *pfpsf |= INVALID_EXCEPTION;
2291 // return Integer Indefinite
2293 } else { // x is QNaN
2295 *pfpsf |= INVALID_EXCEPTION;
2296 // return Integer Indefinite
2300 } else { // x is not a NaN, so it must be infinity
2301 if (!x_sign) { // x is +inf
2303 *pfpsf |= INVALID_EXCEPTION;
2304 // return Integer Indefinite
2306 } else { // x is -inf
2308 *pfpsf |= INVALID_EXCEPTION;
2309 // return Integer Indefinite
2315 // check for non-canonical values (after the check for special values)
2316 if ((C1.w[1] > 0x0001ed09bead87c0ull)
2317 || (C1.w[1] == 0x0001ed09bead87c0ull
2318 && (C1.w[0] > 0x378d8e63ffffffffull))
2319 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
2322 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
2326 } else { // x is not special and is not zero
2328 // q = nr. of decimal digits in x
2329 // determine first the nr. of bits in x
2331 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
2332 // split the 64-bit value in two 32-bit halves to avoid rounding errors
2333 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
2334 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
2336 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
2337 } else { // x < 2^32
2338 tmp1.d = (double) (C1.w[0]); // exact conversion
2340 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
2342 } else { // if x < 2^53
2343 tmp1.d = (double) C1.w[0]; // exact conversion
2345 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
2347 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
2348 tmp1.d = (double) C1.w[1]; // exact conversion
2350 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
2352 q = __bid_nr_digits[x_nr_bits - 1].digits;
2354 q = __bid_nr_digits[x_nr_bits - 1].digits1;
2355 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
2356 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
2357 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
2360 exp = (x_exp >> 49) - 6176;
2361 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
2363 *pfpsf |= INVALID_EXCEPTION;
2364 // return Integer Indefinite
2367 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
2368 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
2369 // so x rounded to an integer may or may not fit in a signed 32-bit int
2370 // the cases that do not fit are identified here; the ones that fit
2371 // fall through and will be handled with other cases further,
2372 // under '1 <= q + exp <= 10'
2373 if (x_sign) { // if n < 0 and q + exp = 10
2374 // if n <= -2^31 - 1 then n is too large
2375 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1
2376 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34
2378 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
2379 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
2380 if (tmp64 >= 0x50000000aull) {
2382 *pfpsf |= INVALID_EXCEPTION;
2383 // return Integer Indefinite
2387 // else cases that can be rounded to a 32-bit int fall through
2388 // to '1 <= q + exp <= 10'
2389 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
2390 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a <=>
2391 // C >= 0x50000000a * 10^(q-11) where 1 <= q - 11 <= 23
2392 // (scale 2^31+1 up)
2393 tmp64 = 0x50000000aull;
2394 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
2395 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
2396 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
2397 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
2399 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2401 *pfpsf |= INVALID_EXCEPTION;
2402 // return Integer Indefinite
2406 // else cases that can be rounded to a 32-bit int fall through
2407 // to '1 <= q + exp <= 10'
2409 } else { // if n > 0 and q + exp = 10
2410 // if n >= 2^31 then n is too large
2411 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31
2412 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000, 1<=q<=34
2414 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
2415 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
2416 if (tmp64 >= 0x500000000ull) {
2418 *pfpsf |= INVALID_EXCEPTION;
2419 // return Integer Indefinite
2423 // else cases that can be rounded to a 32-bit int fall through
2424 // to '1 <= q + exp <= 10'
2425 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
2426 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000 <=>
2427 // C >= 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23
2428 // (scale 2^31-1/2 up)
2429 tmp64 = 0x500000000ull;
2430 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
2431 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
2432 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
2433 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
2435 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2437 *pfpsf |= INVALID_EXCEPTION;
2438 // return Integer Indefinite
2442 // else cases that can be rounded to a 32-bit int fall through
2443 // to '1 <= q + exp <= 10'
2447 // n is not too large to be converted to int32: -2^31 - 1 < n < 2^31
2448 // Note: some of the cases tested for above fall through to this point
2449 if ((q + exp) <= 0) {
2450 // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
2454 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
2455 // -2^31-1 < x <= -1 or 1 <= x < 2^31 so x can be rounded
2456 // toward zero to a 32-bit signed integer
2457 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
2458 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
2459 // chop off ind digits from the lower part of C1
2460 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
2463 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
2465 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
2466 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
2468 if (C1.w[0] < tmp64)
2470 // calculate C* and f*
2471 // C* is actually floor(C*) in this case
2472 // C* and f* need shifting and masking, as shown by
2473 // __bid_shiftright128[] and __bid_maskhigh128[]
2475 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
2476 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
2477 // the approximation of 10^(-x) was rounded up to 118 bits
2478 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
2479 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
2480 Cstar.w[1] = P256.w[3];
2481 Cstar.w[0] = P256.w[2];
2483 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
2484 fstar.w[1] = P256.w[1];
2485 fstar.w[0] = P256.w[0];
2486 } else { // 22 <= ind - 1 <= 33
2488 Cstar.w[0] = P256.w[3];
2489 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
2490 fstar.w[2] = P256.w[2];
2491 fstar.w[1] = P256.w[1];
2492 fstar.w[0] = P256.w[0];
2494 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
2495 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
2496 // if (0 < f* < 10^(-x)) then the result is a midpoint
2497 // if floor(C*) is even then C* = floor(C*) - logical right
2498 // shift; C* has p decimal digits, correct by Prop. 1)
2499 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
2500 // shift; C* has p decimal digits, correct by Pr. 1)
2502 // C* = floor(C*) (logical right shift; C has p decimal digits,
2503 // correct by Property 1)
2504 // n = C* * 10^(e+x)
2506 // shift right C* by Ex-128 = __bid_shiftright128[ind]
2507 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
2508 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
2510 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
2511 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
2512 } else { // 22 <= ind - 1 <= 33
2513 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
2515 // determine inexactness of the rounding of C*
2516 // if (0 < f* - 1/2 < 10^(-x)) then
2517 // the result is exact
2518 // else // if (f* - 1/2 > T*) then
2519 // the result is inexact
2521 if (fstar.w[1] > 0x8000000000000000ull ||
2522 (fstar.w[1] == 0x8000000000000000ull &&
2523 fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact
2524 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
2525 if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1]
2526 || (tmp64 == __bid_ten2mk128trunc[ind - 1].w[1]
2527 && fstar.w[0] >= __bid_ten2mk128trunc[ind - 1].w[0]))) {
2528 } // else the result is exact
2529 } else { // the result is inexact; f2* <= 1/2
2530 is_inexact_gt_midpoint = 1;
2532 } else if (ind - 1 <= 21) { // if 3 <= ind <= 21
2533 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0
2534 && fstar.w[2] > __bid_one_half128[ind - 1]) || (fstar.w[3] == 0x0
2535 && fstar.w[2] == __bid_one_half128[ind - 1] && (fstar.w[1]
2537 // f2* > 1/2 and the result may be exact
2538 // Calculate f2* - 1/2
2539 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
2540 tmp64A = fstar.w[3];
2541 if (tmp64 > fstar.w[2])
2544 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
2545 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2546 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
2547 } // else the result is exact
2548 } else { // the result is inexact; f2* <= 1/2
2549 is_inexact_gt_midpoint = 1;
2551 } else { // if 22 <= ind <= 33
2552 if (fstar.w[3] > __bid_one_half128[ind - 1]
2553 || (fstar.w[3] == __bid_one_half128[ind - 1] && (fstar.w[2]
2556 // f2* > 1/2 and the result may be exact
2557 // Calculate f2* - 1/2
2558 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
2559 if (tmp64 || fstar.w[2]
2560 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
2561 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2562 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
2563 } // else the result is exact
2564 } else { // the result is inexact; f2* <= 1/2
2565 is_inexact_gt_midpoint = 1;
2569 // if the result was a midpoint it was rounded away from zero, so
2570 // it will need a correction
2571 // check for midpoints
2572 if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
2573 && (fstar.w[1] || fstar.w[0])
2574 && (fstar.w[1] < __bid_ten2mk128trunc[ind - 1].w[1]
2575 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2576 && fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[0]))) {
2577 // the result is a midpoint; round to nearest
2578 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
2579 // if floor(C*) is odd C = floor(C*) - 1; the result >= 1
2580 Cstar.w[0]--; // Cstar.w[0] is now even
2581 is_inexact_gt_midpoint = 0;
2582 } else { // else MP in [ODD, EVEN]
2583 is_midpoint_lt_even = 1;
2584 is_inexact_gt_midpoint = 0;
2587 // general correction for RZ
2588 if (is_midpoint_lt_even || is_inexact_gt_midpoint) {
2589 Cstar.w[0] = Cstar.w[0] - 1;
2591 ; // exact, the result is already correct
2597 } else if (exp == 0) {
2599 // res = +/-C (exact)
2604 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
2605 // res = +/-C * 10^exp (exact)
2607 res = -C1.w[0] * __bid_ten2k64[exp];
2609 res = C1.w[0] * __bid_ten2k64[exp];
2616 /*****************************************************************************
2617 * BID128_to_int32_xint
2618 ****************************************************************************/
2620 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_xint, x)
2625 int exp; // unbiased exponent
2626 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
2627 UINT64 tmp64, tmp64A;
2628 BID_UI64DOUBLE tmp1;
2629 unsigned int x_nr_bits;
2632 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
2635 int is_inexact_gt_midpoint = 0;
2636 int is_midpoint_lt_even = 0;
2639 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
2640 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
2641 C1.w[1] = x.w[1] & MASK_COEFF;
2644 // check for NaN or Infinity
2645 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
2647 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
2648 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
2650 *pfpsf |= INVALID_EXCEPTION;
2651 // return Integer Indefinite
2653 } else { // x is QNaN
2655 *pfpsf |= INVALID_EXCEPTION;
2656 // return Integer Indefinite
2660 } else { // x is not a NaN, so it must be infinity
2661 if (!x_sign) { // x is +inf
2663 *pfpsf |= INVALID_EXCEPTION;
2664 // return Integer Indefinite
2666 } else { // x is -inf
2668 *pfpsf |= INVALID_EXCEPTION;
2669 // return Integer Indefinite
2675 // check for non-canonical values (after the check for special values)
2676 if ((C1.w[1] > 0x0001ed09bead87c0ull)
2677 || (C1.w[1] == 0x0001ed09bead87c0ull
2678 && (C1.w[0] > 0x378d8e63ffffffffull))
2679 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
2682 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
2686 } else { // x is not special and is not zero
2688 // q = nr. of decimal digits in x
2689 // determine first the nr. of bits in x
2691 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
2692 // split the 64-bit value in two 32-bit halves to avoid rounding errors
2693 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
2694 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
2696 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
2697 } else { // x < 2^32
2698 tmp1.d = (double) (C1.w[0]); // exact conversion
2700 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
2702 } else { // if x < 2^53
2703 tmp1.d = (double) C1.w[0]; // exact conversion
2705 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
2707 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
2708 tmp1.d = (double) C1.w[1]; // exact conversion
2710 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
2712 q = __bid_nr_digits[x_nr_bits - 1].digits;
2714 q = __bid_nr_digits[x_nr_bits - 1].digits1;
2715 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
2716 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
2717 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
2720 exp = (x_exp >> 49) - 6176;
2721 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
2723 *pfpsf |= INVALID_EXCEPTION;
2724 // return Integer Indefinite
2727 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
2728 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
2729 // so x rounded to an integer may or may not fit in a signed 32-bit int
2730 // the cases that do not fit are identified here; the ones that fit
2731 // fall through and will be handled with other cases further,
2732 // under '1 <= q + exp <= 10'
2733 if (x_sign) { // if n < 0 and q + exp = 10
2734 // if n <= -2^31 - 1 then n is too large
2735 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1
2736 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34
2738 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
2739 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
2740 if (tmp64 >= 0x50000000aull) {
2742 *pfpsf |= INVALID_EXCEPTION;
2743 // return Integer Indefinite
2747 // else cases that can be rounded to a 32-bit int fall through
2748 // to '1 <= q + exp <= 10'
2749 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
2750 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a <=>
2751 // C >= 0x50000000a * 10^(q-11) where 1 <= q - 11 <= 23
2752 // (scale 2^31+1 up)
2753 tmp64 = 0x50000000aull;
2754 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
2755 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
2756 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
2757 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
2759 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2761 *pfpsf |= INVALID_EXCEPTION;
2762 // return Integer Indefinite
2766 // else cases that can be rounded to a 32-bit int fall through
2767 // to '1 <= q + exp <= 10'
2769 } else { // if n > 0 and q + exp = 10
2770 // if n >= 2^31 then n is too large
2771 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31
2772 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000, 1<=q<=34
2774 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
2775 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
2776 if (tmp64 >= 0x500000000ull) {
2778 *pfpsf |= INVALID_EXCEPTION;
2779 // return Integer Indefinite
2783 // else cases that can be rounded to a 32-bit int fall through
2784 // to '1 <= q + exp <= 10'
2785 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
2786 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000 <=>
2787 // C >= 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23
2788 // (scale 2^31-1/2 up)
2789 tmp64 = 0x500000000ull;
2790 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
2791 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
2792 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
2793 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
2795 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2797 *pfpsf |= INVALID_EXCEPTION;
2798 // return Integer Indefinite
2802 // else cases that can be rounded to a 32-bit int fall through
2803 // to '1 <= q + exp <= 10'
2807 // n is not too large to be converted to int32: -2^31 - 1 < n < 2^31
2808 // Note: some of the cases tested for above fall through to this point
2809 if ((q + exp) <= 0) {
2810 // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
2812 *pfpsf |= INEXACT_EXCEPTION;
2816 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
2817 // -2^31-1 < x <= -1 or 1 <= x < 2^31 so x can be rounded
2818 // toward zero to a 32-bit signed integer
2819 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
2820 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
2821 // chop off ind digits from the lower part of C1
2822 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
2825 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
2827 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
2828 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
2830 if (C1.w[0] < tmp64)
2832 // calculate C* and f*
2833 // C* is actually floor(C*) in this case
2834 // C* and f* need shifting and masking, as shown by
2835 // __bid_shiftright128[] and __bid_maskhigh128[]
2837 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
2838 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
2839 // the approximation of 10^(-x) was rounded up to 118 bits
2840 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
2841 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
2842 Cstar.w[1] = P256.w[3];
2843 Cstar.w[0] = P256.w[2];
2845 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
2846 fstar.w[1] = P256.w[1];
2847 fstar.w[0] = P256.w[0];
2848 } else { // 22 <= ind - 1 <= 33
2850 Cstar.w[0] = P256.w[3];
2851 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
2852 fstar.w[2] = P256.w[2];
2853 fstar.w[1] = P256.w[1];
2854 fstar.w[0] = P256.w[0];
2856 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
2857 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
2858 // if (0 < f* < 10^(-x)) then the result is a midpoint
2859 // if floor(C*) is even then C* = floor(C*) - logical right
2860 // shift; C* has p decimal digits, correct by Prop. 1)
2861 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
2862 // shift; C* has p decimal digits, correct by Pr. 1)
2864 // C* = floor(C*) (logical right shift; C has p decimal digits,
2865 // correct by Property 1)
2866 // n = C* * 10^(e+x)
2868 // shift right C* by Ex-128 = __bid_shiftright128[ind]
2869 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
2870 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
2872 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
2873 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
2874 } else { // 22 <= ind - 1 <= 33
2875 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
2877 // determine inexactness of the rounding of C*
2878 // if (0 < f* - 1/2 < 10^(-x)) then
2879 // the result is exact
2880 // else // if (f* - 1/2 > T*) then
2881 // the result is inexact
2883 if (fstar.w[1] > 0x8000000000000000ull ||
2884 (fstar.w[1] == 0x8000000000000000ull &&
2885 fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact
2886 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
2887 if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1]
2888 || (tmp64 == __bid_ten2mk128trunc[ind - 1].w[1]
2889 && fstar.w[0] >= __bid_ten2mk128trunc[ind - 1].w[0]))) {
2890 // set the inexact flag
2891 *pfpsf |= INEXACT_EXCEPTION;
2892 } // else the result is exact
2893 } else { // the result is inexact; f2* <= 1/2
2894 // set the inexact flag
2895 *pfpsf |= INEXACT_EXCEPTION;
2896 is_inexact_gt_midpoint = 1;
2898 } else if (ind - 1 <= 21) { // if 3 <= ind <= 21
2899 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0
2900 && fstar.w[2] > __bid_one_half128[ind - 1]) || (fstar.w[3] == 0x0
2901 && fstar.w[2] == __bid_one_half128[ind - 1] && (fstar.w[1]
2903 // f2* > 1/2 and the result may be exact
2904 // Calculate f2* - 1/2
2905 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
2906 tmp64A = fstar.w[3];
2907 if (tmp64 > fstar.w[2])
2910 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
2911 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2912 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
2913 // set the inexact flag
2914 *pfpsf |= INEXACT_EXCEPTION;
2915 } // else the result is exact
2916 } else { // the result is inexact; f2* <= 1/2
2917 // set the inexact flag
2918 *pfpsf |= INEXACT_EXCEPTION;
2919 is_inexact_gt_midpoint = 1;
2921 } else { // if 22 <= ind <= 33
2922 if (fstar.w[3] > __bid_one_half128[ind - 1]
2923 || (fstar.w[3] == __bid_one_half128[ind - 1] && (fstar.w[2]
2926 // f2* > 1/2 and the result may be exact
2927 // Calculate f2* - 1/2
2928 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
2929 if (tmp64 || fstar.w[2]
2930 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
2931 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2932 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
2933 // set the inexact flag
2934 *pfpsf |= INEXACT_EXCEPTION;
2935 } // else the result is exact
2936 } else { // the result is inexact; f2* <= 1/2
2937 // set the inexact flag
2938 *pfpsf |= INEXACT_EXCEPTION;
2939 is_inexact_gt_midpoint = 1;
2943 // if the result was a midpoint it was rounded away from zero, so
2944 // it will need a correction
2945 // check for midpoints
2946 if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
2947 && (fstar.w[1] || fstar.w[0])
2948 && (fstar.w[1] < __bid_ten2mk128trunc[ind - 1].w[1]
2949 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
2950 && fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[0]))) {
2951 // the result is a midpoint; round to nearest
2952 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
2953 // if floor(C*) is odd C = floor(C*) - 1; the result >= 1
2954 Cstar.w[0]--; // Cstar.w[0] is now even
2955 is_inexact_gt_midpoint = 0;
2956 } else { // else MP in [ODD, EVEN]
2957 is_midpoint_lt_even = 1;
2958 is_inexact_gt_midpoint = 0;
2961 // general correction for RZ
2962 if (is_midpoint_lt_even || is_inexact_gt_midpoint) {
2963 Cstar.w[0] = Cstar.w[0] - 1;
2965 ; // exact, the result is already correct
2971 } else if (exp == 0) {
2973 // res = +/-C (exact)
2978 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
2979 // res = +/-C * 10^exp (exact)
2981 res = -C1.w[0] * __bid_ten2k64[exp];
2983 res = C1.w[0] * __bid_ten2k64[exp];
2990 /*****************************************************************************
2991 * BID128_to_int32_rninta
2992 ****************************************************************************/
2994 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_rninta, x)
2999 int exp; // unbiased exponent
3000 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
3002 BID_UI64DOUBLE tmp1;
3003 unsigned int x_nr_bits;
3006 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
3010 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
3011 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
3012 C1.w[1] = x.w[1] & MASK_COEFF;
3015 // check for NaN or Infinity
3016 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
3018 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
3019 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
3021 *pfpsf |= INVALID_EXCEPTION;
3022 // return Integer Indefinite
3024 } else { // x is QNaN
3026 *pfpsf |= INVALID_EXCEPTION;
3027 // return Integer Indefinite
3031 } else { // x is not a NaN, so it must be infinity
3032 if (!x_sign) { // x is +inf
3034 *pfpsf |= INVALID_EXCEPTION;
3035 // return Integer Indefinite
3037 } else { // x is -inf
3039 *pfpsf |= INVALID_EXCEPTION;
3040 // return Integer Indefinite
3046 // check for non-canonical values (after the check for special values)
3047 if ((C1.w[1] > 0x0001ed09bead87c0ull)
3048 || (C1.w[1] == 0x0001ed09bead87c0ull
3049 && (C1.w[0] > 0x378d8e63ffffffffull))
3050 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
3053 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
3057 } else { // x is not special and is not zero
3059 // q = nr. of decimal digits in x
3060 // determine first the nr. of bits in x
3062 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
3063 // split the 64-bit value in two 32-bit halves to avoid rounding errors
3064 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
3065 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
3067 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
3068 } else { // x < 2^32
3069 tmp1.d = (double) (C1.w[0]); // exact conversion
3071 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
3073 } else { // if x < 2^53
3074 tmp1.d = (double) C1.w[0]; // exact conversion
3076 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
3078 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
3079 tmp1.d = (double) C1.w[1]; // exact conversion
3081 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
3083 q = __bid_nr_digits[x_nr_bits - 1].digits;
3085 q = __bid_nr_digits[x_nr_bits - 1].digits1;
3086 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
3087 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
3088 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
3091 exp = (x_exp >> 49) - 6176;
3092 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
3094 *pfpsf |= INVALID_EXCEPTION;
3095 // return Integer Indefinite
3098 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
3099 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
3100 // so x rounded to an integer may or may not fit in a signed 32-bit int
3101 // the cases that do not fit are identified here; the ones that fit
3102 // fall through and will be handled with other cases further,
3103 // under '1 <= q + exp <= 10'
3104 if (x_sign) { // if n < 0 and q + exp = 10
3105 // if n <= -2^31 - 1/2 then n is too large
3106 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1/2
3107 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000005, 1<=q<=34
3109 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
3110 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
3111 if (tmp64 >= 0x500000005ull) {
3113 *pfpsf |= INVALID_EXCEPTION;
3114 // return Integer Indefinite
3118 // else cases that can be rounded to a 32-bit int fall through
3119 // to '1 <= q + exp <= 10'
3120 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
3121 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000005 <=>
3122 // C >= 0x500000005 * 10^(q-11) where 1 <= q - 11 <= 23
3123 // (scale 2^31+1/2 up)
3124 tmp64 = 0x500000005ull;
3125 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
3126 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
3127 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
3128 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
3130 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
3132 *pfpsf |= INVALID_EXCEPTION;
3133 // return Integer Indefinite
3137 // else cases that can be rounded to a 32-bit int fall through
3138 // to '1 <= q + exp <= 10'
3140 } else { // if n > 0 and q + exp = 10
3141 // if n >= 2^31 - 1/2 then n is too large
3142 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31-1/2
3143 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb, 1<=q<=34
3145 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
3146 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
3147 if (tmp64 >= 0x4fffffffbull) {
3149 *pfpsf |= INVALID_EXCEPTION;
3150 // return Integer Indefinite
3154 // else cases that can be rounded to a 32-bit int fall through
3155 // to '1 <= q + exp <= 10'
3156 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
3157 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb <=>
3158 // C >= 0x4fffffffb * 10^(q-11) where 1 <= q - 11 <= 23
3159 // (scale 2^31-1/2 up)
3160 tmp64 = 0x4fffffffbull;
3161 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
3162 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
3163 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
3164 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
3166 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
3168 *pfpsf |= INVALID_EXCEPTION;
3169 // return Integer Indefinite
3173 // else cases that can be rounded to a 32-bit int fall through
3174 // to '1 <= q + exp <= 10'
3178 // n is not too large to be converted to int32: -2^31 - 1/2 < n < 2^31 - 1/2
3179 // Note: some of the cases tested for above fall through to this point
3180 if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
3184 } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
3185 // if 0.c(0)c(1)...c(q-1) < 0.5 <=> c(0)c(1)...c(q-1) < 5 * 10^(q-1)
3190 if (ind <= 18) { // 0 <= ind <= 18
3191 if ((C1.w[1] == 0) && (C1.w[0] < __bid_midpoint64[ind])) {
3192 res = 0x00000000; // return 0
3193 } else if (x_sign) { // n < 0
3194 res = 0xffffffff; // return -1
3196 res = 0x00000001; // return +1
3198 } else { // 19 <= ind <= 33
3199 if ((C1.w[1] < __bid_midpoint128[ind - 19].w[1])
3200 || ((C1.w[1] == __bid_midpoint128[ind - 19].w[1])
3201 && (C1.w[0] < __bid_midpoint128[ind - 19].w[0]))) {
3202 res = 0x00000000; // return 0
3203 } else if (x_sign) { // n < 0
3204 res = 0xffffffff; // return -1
3206 res = 0x00000001; // return +1
3209 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
3210 // -2^31-1/2 < x <= -1 or 1 <= x < 2^31-1/2 so x can be rounded
3211 // to nearest-away to a 32-bit signed integer
3212 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
3213 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
3214 // chop off ind digits from the lower part of C1
3215 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
3218 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
3220 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
3221 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
3223 if (C1.w[0] < tmp64)
3225 // calculate C* and f*
3226 // C* is actually floor(C*) in this case
3227 // C* and f* need shifting and masking, as shown by
3228 // __bid_shiftright128[] and __bid_maskhigh128[]
3230 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
3231 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
3232 // the approximation of 10^(-x) was rounded up to 118 bits
3233 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
3234 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
3235 Cstar.w[1] = P256.w[3];
3236 Cstar.w[0] = P256.w[2];
3237 } else { // 22 <= ind - 1 <= 33
3239 Cstar.w[0] = P256.w[3];
3241 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
3242 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
3243 // if (0 < f* < 10^(-x)) then the result is a midpoint
3244 // if floor(C*) is even then C* = floor(C*) - logical right
3245 // shift; C* has p decimal digits, correct by Prop. 1)
3246 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
3247 // shift; C* has p decimal digits, correct by Pr. 1)
3249 // C* = floor(C*) (logical right shift; C has p decimal digits,
3250 // correct by Property 1)
3251 // n = C* * 10^(e+x)
3253 // shift right C* by Ex-128 = __bid_shiftright128[ind]
3254 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
3255 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
3257 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
3258 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
3259 } else { // 22 <= ind - 1 <= 33
3260 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
3262 // if the result was a midpoint, it was already rounded away from zero
3267 // no need to check for midpoints - already rounded away from zero!
3268 } else if (exp == 0) {
3270 // res = +/-C (exact)
3275 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
3276 // res = +/-C * 10^exp (exact)
3278 res = -C1.w[0] * __bid_ten2k64[exp];
3280 res = C1.w[0] * __bid_ten2k64[exp];
3287 /*****************************************************************************
3288 * BID128_to_int32_xrninta
3289 ****************************************************************************/
3291 BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE(int, __bid128_to_int32_xrninta, x)
3296 int exp; // unbiased exponent
3297 // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
3298 UINT64 tmp64, tmp64A;
3299 BID_UI64DOUBLE tmp1;
3300 unsigned int x_nr_bits;
3303 UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
3308 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
3309 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
3310 C1.w[1] = x.w[1] & MASK_COEFF;
3313 // check for NaN or Infinity
3314 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
3316 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
3317 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
3319 *pfpsf |= INVALID_EXCEPTION;
3320 // return Integer Indefinite
3322 } else { // x is QNaN
3324 *pfpsf |= INVALID_EXCEPTION;
3325 // return Integer Indefinite
3329 } else { // x is not a NaN, so it must be infinity
3330 if (!x_sign) { // x is +inf
3332 *pfpsf |= INVALID_EXCEPTION;
3333 // return Integer Indefinite
3335 } else { // x is -inf
3337 *pfpsf |= INVALID_EXCEPTION;
3338 // return Integer Indefinite
3344 // check for non-canonical values (after the check for special values)
3345 if ((C1.w[1] > 0x0001ed09bead87c0ull)
3346 || (C1.w[1] == 0x0001ed09bead87c0ull
3347 && (C1.w[0] > 0x378d8e63ffffffffull))
3348 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
3351 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
3355 } else { // x is not special and is not zero
3357 // q = nr. of decimal digits in x
3358 // determine first the nr. of bits in x
3360 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
3361 // split the 64-bit value in two 32-bit halves to avoid rounding errors
3362 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
3363 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
3365 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
3366 } else { // x < 2^32
3367 tmp1.d = (double) (C1.w[0]); // exact conversion
3369 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
3371 } else { // if x < 2^53
3372 tmp1.d = (double) C1.w[0]; // exact conversion
3374 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
3376 } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
3377 tmp1.d = (double) C1.w[1]; // exact conversion
3379 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
3381 q = __bid_nr_digits[x_nr_bits - 1].digits;
3383 q = __bid_nr_digits[x_nr_bits - 1].digits1;
3384 if (C1.w[1] > __bid_nr_digits[x_nr_bits - 1].threshold_hi
3385 || (C1.w[1] == __bid_nr_digits[x_nr_bits - 1].threshold_hi
3386 && C1.w[0] >= __bid_nr_digits[x_nr_bits - 1].threshold_lo))
3389 exp = (x_exp >> 49) - 6176;
3390 if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
3392 *pfpsf |= INVALID_EXCEPTION;
3393 // return Integer Indefinite
3396 } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
3397 // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
3398 // so x rounded to an integer may or may not fit in a signed 32-bit int
3399 // the cases that do not fit are identified here; the ones that fit
3400 // fall through and will be handled with other cases further,
3401 // under '1 <= q + exp <= 10'
3402 if (x_sign) { // if n < 0 and q + exp = 10
3403 // if n <= -2^31 - 1/2 then n is too large
3404 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1/2
3405 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000005, 1<=q<=34
3407 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
3408 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
3409 if (tmp64 >= 0x500000005ull) {
3411 *pfpsf |= INVALID_EXCEPTION;
3412 // return Integer Indefinite
3416 // else cases that can be rounded to a 32-bit int fall through
3417 // to '1 <= q + exp <= 10'
3418 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
3419 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000005 <=>
3420 // C >= 0x500000005 * 10^(q-11) where 1 <= q - 11 <= 23
3421 // (scale 2^31+1/2 up)
3422 tmp64 = 0x500000005ull;
3423 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
3424 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
3425 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
3426 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
3428 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
3430 *pfpsf |= INVALID_EXCEPTION;
3431 // return Integer Indefinite
3435 // else cases that can be rounded to a 32-bit int fall through
3436 // to '1 <= q + exp <= 10'
3438 } else { // if n > 0 and q + exp = 10
3439 // if n >= 2^31 - 1/2 then n is too large
3440 // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31-1/2
3441 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb, 1<=q<=34
3443 tmp64 = C1.w[0] * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
3444 // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
3445 if (tmp64 >= 0x4fffffffbull) {
3447 *pfpsf |= INVALID_EXCEPTION;
3448 // return Integer Indefinite
3452 // else cases that can be rounded to a 32-bit int fall through
3453 // to '1 <= q + exp <= 10'
3454 } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
3455 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb <=>
3456 // C >= 0x4fffffffb * 10^(q-11) where 1 <= q - 11 <= 23
3457 // (scale 2^31-1/2 up)
3458 tmp64 = 0x4fffffffbull;
3459 if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
3460 __mul_64x64_to_128MACH (C, tmp64, __bid_ten2k64[q - 11]);
3461 } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
3462 __mul_128x64_to_128 (C, tmp64, __bid_ten2k128[q - 31]);
3464 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
3466 *pfpsf |= INVALID_EXCEPTION;
3467 // return Integer Indefinite
3471 // else cases that can be rounded to a 32-bit int fall through
3472 // to '1 <= q + exp <= 10'
3476 // n is not too large to be converted to int32: -2^31 - 1/2 < n < 2^31 - 1/2
3477 // Note: some of the cases tested for above fall through to this point
3478 if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
3480 *pfpsf |= INEXACT_EXCEPTION;
3484 } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
3485 // if 0.c(0)c(1)...c(q-1) < 0.5 <=> c(0)c(1)...c(q-1) < 5 * 10^(q-1)
3490 if (ind <= 18) { // 0 <= ind <= 18
3491 if ((C1.w[1] == 0) && (C1.w[0] < __bid_midpoint64[ind])) {
3492 res = 0x00000000; // return 0
3493 } else if (x_sign) { // n < 0
3494 res = 0xffffffff; // return -1
3496 res = 0x00000001; // return +1
3498 } else { // 19 <= ind <= 33
3499 if ((C1.w[1] < __bid_midpoint128[ind - 19].w[1])
3500 || ((C1.w[1] == __bid_midpoint128[ind - 19].w[1])
3501 && (C1.w[0] < __bid_midpoint128[ind - 19].w[0]))) {
3502 res = 0x00000000; // return 0
3503 } else if (x_sign) { // n < 0
3504 res = 0xffffffff; // return -1
3506 res = 0x00000001; // return +1
3510 *pfpsf |= INEXACT_EXCEPTION;
3511 } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
3512 // -2^31-1/2 < x <= -1 or 1 <= x < 2^31-1/2 so x can be rounded
3513 // to nearest-away to a 32-bit signed integer
3514 if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
3515 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
3516 // chop off ind digits from the lower part of C1
3517 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
3520 C1.w[0] = C1.w[0] + __bid_midpoint64[ind - 1];
3522 C1.w[0] = C1.w[0] + __bid_midpoint128[ind - 20].w[0];
3523 C1.w[1] = C1.w[1] + __bid_midpoint128[ind - 20].w[1];
3525 if (C1.w[0] < tmp64)
3527 // calculate C* and f*
3528 // C* is actually floor(C*) in this case
3529 // C* and f* need shifting and masking, as shown by
3530 // __bid_shiftright128[] and __bid_maskhigh128[]
3532 // kx = 10^(-x) = __bid_ten2mk128[ind - 1]
3533 // C* = (C1 + 1/2 * 10^x) * 10^(-x)
3534 // the approximation of 10^(-x) was rounded up to 118 bits
3535 __mul_128x128_to_256 (P256, C1, __bid_ten2mk128[ind - 1]);
3536 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
3537 Cstar.w[1] = P256.w[3];
3538 Cstar.w[0] = P256.w[2];
3540 fstar.w[2] = P256.w[2] & __bid_maskhigh128[ind - 1];
3541 fstar.w[1] = P256.w[1];
3542 fstar.w[0] = P256.w[0];
3543 } else { // 22 <= ind - 1 <= 33
3545 Cstar.w[0] = P256.w[3];
3546 fstar.w[3] = P256.w[3] & __bid_maskhigh128[ind - 1];
3547 fstar.w[2] = P256.w[2];
3548 fstar.w[1] = P256.w[1];
3549 fstar.w[0] = P256.w[0];
3551 // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind], e.g.
3552 // if x=1, T*=__bid_ten2mk128trunc[0]=0x19999999999999999999999999999999
3553 // if (0 < f* < 10^(-x)) then the result is a midpoint
3554 // if floor(C*) is even then C* = floor(C*) - logical right
3555 // shift; C* has p decimal digits, correct by Prop. 1)
3556 // else if floor(C*) is odd C* = floor(C*)-1 (logical right
3557 // shift; C* has p decimal digits, correct by Pr. 1)
3559 // C* = floor(C*) (logical right shift; C has p decimal digits,
3560 // correct by Property 1)
3561 // n = C* * 10^(e+x)
3563 // shift right C* by Ex-128 = __bid_shiftright128[ind]
3564 shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 102
3565 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
3567 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
3568 // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
3569 } else { // 22 <= ind - 1 <= 33
3570 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
3572 // if the result was a midpoint, it was already rounded away from zero
3577 // determine inexactness of the rounding of C*
3578 // if (0 < f* - 1/2 < 10^(-x)) then
3579 // the result is exact
3580 // else // if (f* - 1/2 > T*) then
3581 // the result is inexact
3583 if (fstar.w[1] > 0x8000000000000000ull ||
3584 (fstar.w[1] == 0x8000000000000000ull &&
3585 fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact
3586 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
3587 if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1]
3588 || (tmp64 == __bid_ten2mk128trunc[ind - 1].w[1]
3589 && fstar.w[0] >= __bid_ten2mk128trunc[ind - 1].w[0]))) {
3590 // set the inexact flag
3591 *pfpsf |= INEXACT_EXCEPTION;
3592 } // else the result is exact
3593 } else { // the result is inexact; f2* <= 1/2
3594 // set the inexact flag
3595 *pfpsf |= INEXACT_EXCEPTION;
3597 } else if (ind - 1 <= 21) { // if 3 <= ind <= 21
3598 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0
3599 && fstar.w[2] > __bid_one_half128[ind - 1]) || (fstar.w[3] == 0x0
3600 && fstar.w[2] == __bid_one_half128[ind - 1] && (fstar.w[1]
3602 // f2* > 1/2 and the result may be exact
3603 // Calculate f2* - 1/2
3604 tmp64 = fstar.w[2] - __bid_one_half128[ind - 1];
3605 tmp64A = fstar.w[3];
3606 if (tmp64 > fstar.w[2])
3609 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
3610 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
3611 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
3612 // set the inexact flag
3613 *pfpsf |= INEXACT_EXCEPTION;
3614 } // else the result is exact
3615 } else { // the result is inexact; f2* <= 1/2
3616 // set the inexact flag
3617 *pfpsf |= INEXACT_EXCEPTION;
3619 } else { // if 22 <= ind <= 33
3620 if (fstar.w[3] > __bid_one_half128[ind - 1]
3621 || (fstar.w[3] == __bid_one_half128[ind - 1] && (fstar.w[2]
3624 // f2* > 1/2 and the result may be exact
3625 // Calculate f2* - 1/2
3626 tmp64 = fstar.w[3] - __bid_one_half128[ind - 1];
3627 if (tmp64 || fstar.w[2]
3628 || fstar.w[1] > __bid_ten2mk128trunc[ind - 1].w[1]
3629 || (fstar.w[1] == __bid_ten2mk128trunc[ind - 1].w[1]
3630 && fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[0])) {
3631 // set the inexact flag
3632 *pfpsf |= INEXACT_EXCEPTION;
3633 } // else the result is exact
3634 } else { // the result is inexact; f2* <= 1/2
3635 // set the inexact flag
3636 *pfpsf |= INEXACT_EXCEPTION;
3639 // no need to check for midpoints - already rounded away from zero!
3640 } else if (exp == 0) {
3642 // res = +/-C (exact)
3647 } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
3648 // res = +/-C * 10^exp (exact)
3650 res = -C1.w[0] * __bid_ten2k64[exp];
3652 res = C1.w[0] * __bid_ten2k64[exp];