1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 Copyright (C) 1991, 1992 Free Software Foundation, Inc.
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
19 #define SI_TYPE_SIZE 32
22 #define __BITS4 (SI_TYPE_SIZE / 4)
23 #define __ll_B (1L << (SI_TYPE_SIZE / 2))
24 #define __ll_lowpart(t) ((USItype) (t) % __ll_B)
25 #define __ll_highpart(t) ((USItype) (t) / __ll_B)
27 /* Define auxiliary asm macros.
29 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
30 multiplies two USItype integers MULTIPLER and MULTIPLICAND,
31 and generates a two-part USItype product in HIGH_PROD and
34 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
35 and returns a UDItype product. This is just a variant of umul_ppmm.
37 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
38 denominator) divides a two-word unsigned integer, composed by the
39 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
40 places the quotient in QUOTIENT and the remainder in REMAINDER.
41 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
42 If, in addition, the most significant bit of DENOMINATOR must be 1,
43 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
45 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
46 denominator). Like udiv_qrnnd but the numbers are signed. The
47 quotient is rounded towards 0.
49 5) count_leading_zeros(count, x) counts the number of zero-bits from
50 the msb to the first non-zero bit. This is the number of steps X
51 needs to be shifted left to set the msb. Undefined for X == 0.
53 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
54 high_addend_2, low_addend_2) adds two two-word unsigned integers,
55 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
56 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
57 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
60 7) sub_ddmmss(high_difference, low_difference, high_minuend,
61 low_minuend, high_subtrahend, low_subtrahend) subtracts two
62 two-word unsigned integers, composed by HIGH_MINUEND_1 and
63 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
64 respectively. The result is placed in HIGH_DIFFERENCE and
65 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
68 If any of these macros are left undefined for a particular CPU,
71 /* The CPUs come in alphabetical order below.
73 Please add support for more CPUs here, or improve the current support
75 (E.g. WE32100, i960, IBM360.) */
77 #if defined (__GNUC__) && !defined (NO_ASM)
79 /* We sometimes need to clobber "cc" with gcc2, but that would not be
80 understood by gcc1. Use cpp to avoid major code duplication. */
83 #define __AND_CLOBBER_CC
84 #else /* __GNUC__ >= 2 */
85 #define __CLOBBER_CC : "cc"
86 #define __AND_CLOBBER_CC , "cc"
87 #endif /* __GNUC__ < 2 */
89 #if defined (__a29k__) || defined (___AM29K__)
90 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
91 __asm__ ("add %1,%4,%5
93 : "=r" ((USItype)(sh)), \
94 "=&r" ((USItype)(sl)) \
95 : "%r" ((USItype)(ah)), \
96 "rI" ((USItype)(bh)), \
97 "%r" ((USItype)(al)), \
99 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
100 __asm__ ("sub %1,%4,%5
102 : "=r" ((USItype)(sh)), \
103 "=&r" ((USItype)(sl)) \
104 : "r" ((USItype)(ah)), \
105 "rI" ((USItype)(bh)), \
106 "r" ((USItype)(al)), \
107 "rI" ((USItype)(bl)))
108 #define umul_ppmm(xh, xl, m0, m1) \
110 USItype __m0 = (m0), __m1 = (m1); \
111 __asm__ ("multiplu %0,%1,%2" \
112 : "=r" ((USItype)(xl)) \
115 __asm__ ("multmu %0,%1,%2" \
116 : "=r" ((USItype)(xh)) \
120 #define udiv_qrnnd(q, r, n1, n0, d) \
121 __asm__ ("dividu %0,%3,%4" \
122 : "=r" ((USItype)(q)), \
123 "=q" ((USItype)(r)) \
124 : "1" ((USItype)(n1)), \
125 "r" ((USItype)(n0)), \
127 #define count_leading_zeros(count, x) \
128 __asm__ ("clz %0,%1" \
129 : "=r" ((USItype)(count)) \
130 : "r" ((USItype)(x)))
131 #endif /* __a29k__ */
133 #if defined (__arm__)
134 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
135 __asm__ ("adds %1,%4,%5
137 : "=r" ((USItype)(sh)), \
138 "=&r" ((USItype)(sl)) \
139 : "%r" ((USItype)(ah)), \
140 "rI" ((USItype)(bh)), \
141 "%r" ((USItype)(al)), \
142 "rI" ((USItype)(bl)))
143 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
144 __asm__ ("subs %1,%4,%5
146 : "=r" ((USItype)(sh)), \
147 "=&r" ((USItype)(sl)) \
148 : "r" ((USItype)(ah)), \
149 "rI" ((USItype)(bh)), \
150 "r" ((USItype)(al)), \
151 "rI" ((USItype)(bl)))
154 #if defined (__gmicro__)
155 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
156 __asm__ ("add.w %5,%1
158 : "=g" ((USItype)(sh)), \
159 "=&g" ((USItype)(sl)) \
160 : "%0" ((USItype)(ah)), \
161 "g" ((USItype)(bh)), \
162 "%1" ((USItype)(al)), \
164 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
165 __asm__ ("sub.w %5,%1
167 : "=g" ((USItype)(sh)), \
168 "=&g" ((USItype)(sl)) \
169 : "0" ((USItype)(ah)), \
170 "g" ((USItype)(bh)), \
171 "1" ((USItype)(al)), \
173 #define umul_ppmm(ph, pl, m0, m1) \
174 __asm__ ("mulx %3,%0,%1" \
175 : "=g" ((USItype)(ph)), \
176 "=r" ((USItype)(pl)) \
177 : "%0" ((USItype)(m0)), \
179 #define udiv_qrnnd(q, r, nh, nl, d) \
180 __asm__ ("divx %4,%0,%1" \
181 : "=g" ((USItype)(q)), \
182 "=r" ((USItype)(r)) \
183 : "1" ((USItype)(nh)), \
184 "0" ((USItype)(nl)), \
186 #define count_leading_zeros(count, x) \
187 __asm__ ("bsch/1 %1,%0" \
189 : "g" ((USItype)(x)), \
194 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
195 __asm__ ("add %4,%5,%1
197 : "=r" ((USItype)(sh)), \
198 "=&r" ((USItype)(sl)) \
199 : "%rM" ((USItype)(ah)), \
200 "rM" ((USItype)(bh)), \
201 "%rM" ((USItype)(al)), \
202 "rM" ((USItype)(bl)))
203 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
204 __asm__ ("sub %4,%5,%1
206 : "=r" ((USItype)(sh)), \
207 "=&r" ((USItype)(sl)) \
208 : "rM" ((USItype)(ah)), \
209 "rM" ((USItype)(bh)), \
210 "rM" ((USItype)(al)), \
211 "rM" ((USItype)(bl)))
212 #if defined (_PA_RISC1_1)
213 #define umul_ppmm(w1, w0, u, v) \
218 struct {USItype __w1, __w0;} __w1w0; \
220 __asm__ ("xmpyu %1,%2,%0" \
222 : "x" ((USItype)(u)), \
223 "x" ((USItype)(v))); \
224 (w1) = __t.__w1w0.__w1; \
225 (w0) = __t.__w1w0.__w0; \
234 #if defined (__i386__) || defined (__i486__)
235 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
238 : "=r" ((USItype)(sh)), \
239 "=&r" ((USItype)(sl)) \
240 : "%0" ((USItype)(ah)), \
241 "g" ((USItype)(bh)), \
242 "%1" ((USItype)(al)), \
244 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
247 : "=r" ((USItype)(sh)), \
248 "=&r" ((USItype)(sl)) \
249 : "0" ((USItype)(ah)), \
250 "g" ((USItype)(bh)), \
251 "1" ((USItype)(al)), \
253 #define umul_ppmm(w1, w0, u, v) \
255 : "=a" ((USItype)(w0)), \
256 "=d" ((USItype)(w1)) \
257 : "%0" ((USItype)(u)), \
259 #define udiv_qrnnd(q, r, n1, n0, d) \
261 : "=a" ((USItype)(q)), \
262 "=d" ((USItype)(r)) \
263 : "0" ((USItype)(n0)), \
264 "1" ((USItype)(n1)), \
266 #define count_leading_zeros(count, x) \
269 __asm__ ("bsrl %1,%0" \
270 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
271 (count) = __cbtmp ^ 31; \
277 #if defined (__i860__)
279 /* Make sure these patterns really improve the code before
280 switching them on. */
281 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
286 struct {USItype __l, __h;} __i; \
288 __a.__i.__l = (al); \
289 __a.__i.__h = (ah); \
290 __b.__i.__l = (bl); \
291 __b.__i.__h = (bh); \
292 __asm__ ("fiadd.dd %1,%2,%0" \
294 : "%f" (__a.__ll), "f" (__b.__ll)); \
295 (sh) = __s.__i.__h; \
296 (sl) = __s.__i.__l; \
298 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
303 struct {USItype __l, __h;} __i; \
305 __a.__i.__l = (al); \
306 __a.__i.__h = (ah); \
307 __b.__i.__l = (bl); \
308 __b.__i.__h = (bh); \
309 __asm__ ("fisub.dd %1,%2,%0" \
311 : "%f" (__a.__ll), "f" (__b.__ll)); \
312 (sh) = __s.__i.__h; \
313 (sl) = __s.__i.__l; \
316 #endif /* __i860__ */
318 #if defined (___IBMR2__) /* IBM RS6000 */
319 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
320 __asm__ ("a%I5 %1,%4,%5
322 : "=r" ((USItype)(sh)), \
323 "=&r" ((USItype)(sl)) \
324 : "%r" ((USItype)(ah)), \
325 "r" ((USItype)(bh)), \
326 "%r" ((USItype)(al)), \
327 "rI" ((USItype)(bl)))
328 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
329 __asm__ ("sf%I4 %1,%5,%4
331 : "=r" ((USItype)(sh)), \
332 "=&r" ((USItype)(sl)) \
333 : "r" ((USItype)(ah)), \
334 "r" ((USItype)(bh)), \
335 "rI" ((USItype)(al)), \
337 #define umul_ppmm(xh, xl, m0, m1) \
339 USItype __m0 = (m0), __m1 = (m1); \
340 __asm__ ("mul %0,%2,%3" \
341 : "=r" ((USItype)(xh)), \
342 "=q" ((USItype)(xl)) \
345 (xh) += ((((SItype) __m0 >> 31) & __m1) \
346 + (((SItype) __m1 >> 31) & __m0)); \
348 #define smul_ppmm(xh, xl, m0, m1) \
349 __asm__ ("mul %0,%2,%3" \
350 : "=r" ((USItype)(xh)), \
351 "=q" ((USItype)(xl)) \
352 : "r" ((USItype)(m0)), \
355 #define sdiv_qrnnd(q, r, nh, nl, d) \
356 __asm__ ("div %0,%2,%4" \
357 : "=r" ((USItype)(q)), "=q" ((USItype)(r)) \
358 : "r" ((USItype)(nh)), "1" ((USItype)(nl)), "r" ((USItype)(d)))
360 #define UDIV_NEEDS_NORMALIZATION 1
361 #define count_leading_zeros(count, x) \
362 __asm__ ("cntlz %0,%1" \
363 : "=r" ((USItype)(count)) \
364 : "r" ((USItype)(x)))
365 #endif /* ___IBMR2__ */
367 #if defined (__mc68000__)
368 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
369 __asm__ ("add%.l %5,%1
371 : "=d" ((USItype)(sh)), \
372 "=&d" ((USItype)(sl)) \
373 : "%0" ((USItype)(ah)), \
374 "d" ((USItype)(bh)), \
375 "%1" ((USItype)(al)), \
377 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
378 __asm__ ("sub%.l %5,%1
380 : "=d" ((USItype)(sh)), \
381 "=&d" ((USItype)(sl)) \
382 : "0" ((USItype)(ah)), \
383 "d" ((USItype)(bh)), \
384 "1" ((USItype)(al)), \
386 #if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
387 #define umul_ppmm(w1, w0, u, v) \
388 __asm__ ("mulu%.l %3,%1:%0" \
389 : "=d" ((USItype)(w0)), \
390 "=d" ((USItype)(w1)) \
391 : "%0" ((USItype)(u)), \
392 "dmi" ((USItype)(v)))
394 #define udiv_qrnnd(q, r, n1, n0, d) \
395 __asm__ ("divu%.l %4,%1:%0" \
396 : "=d" ((USItype)(q)), \
397 "=d" ((USItype)(r)) \
398 : "0" ((USItype)(n0)), \
399 "1" ((USItype)(n1)), \
400 "dmi" ((USItype)(d)))
402 #define sdiv_qrnnd(q, r, n1, n0, d) \
403 __asm__ ("divs%.l %4,%1:%0" \
404 : "=d" ((USItype)(q)), \
405 "=d" ((USItype)(r)) \
406 : "0" ((USItype)(n0)), \
407 "1" ((USItype)(n1)), \
408 "dmi" ((USItype)(d)))
409 #define count_leading_zeros(count, x) \
410 __asm__ ("bfffo %1{%b2:%b2},%0" \
411 : "=d" ((USItype)(count)) \
412 : "od" ((USItype)(x)), "n" (0))
413 #else /* not mc68020 */
414 /* %/ inserts REGISTER_PREFIX. */
415 #define umul_ppmm(xh, xl, a, b) \
416 __asm__ ("| Inlined umul_ppmm
442 : "=g" ((USItype)(xh)), \
443 "=g" ((USItype)(xl)) \
444 : "g" ((USItype)(a)), \
446 : "d0", "d1", "d2", "d3", "d4")
447 #define UMUL_TIME 100
448 #define UDIV_TIME 400
449 #endif /* not mc68020 */
452 #if defined (__m88000__)
453 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
454 __asm__ ("addu.co %1,%r4,%r5
455 addu.ci %0,%r2,%r3" \
456 : "=r" ((USItype)(sh)), \
457 "=&r" ((USItype)(sl)) \
458 : "%rJ" ((USItype)(ah)), \
459 "rJ" ((USItype)(bh)), \
460 "%rJ" ((USItype)(al)), \
461 "rJ" ((USItype)(bl)))
462 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
463 __asm__ ("subu.co %1,%r4,%r5
464 subu.ci %0,%r2,%r3" \
465 : "=r" ((USItype)(sh)), \
466 "=&r" ((USItype)(sl)) \
467 : "rJ" ((USItype)(ah)), \
468 "rJ" ((USItype)(bh)), \
469 "rJ" ((USItype)(al)), \
470 "rJ" ((USItype)(bl)))
472 #define UDIV_TIME 150
473 #define count_leading_zeros(count, x) \
476 __asm__ ("ff1 %0,%1" \
478 : "r" ((USItype)(x))); \
479 (count) = __cbtmp ^ 31; \
481 #if defined (__mc88110__)
482 #define umul_ppmm(w1, w0, u, v) \
483 __asm__ ("mulu.d r10,%2,%3
488 : "r" ((USItype)(u)), \
491 #define udiv_qrnnd(q, r, n1, n0, d) \
492 __asm__ ("or r10,%2,0
500 : "r" ((USItype)(n1)), \
501 "r" ((USItype)(n0)), \
505 #endif /* __m88000__ */
507 #if defined (__mips__)
508 #define umul_ppmm(w1, w0, u, v) \
509 __asm__ ("multu %2,%3
512 : "=d" ((USItype)(w0)), \
513 "=d" ((USItype)(w1)) \
514 : "d" ((USItype)(u)), \
517 #define UDIV_TIME 100
518 #endif /* __mips__ */
520 #if defined (__ns32000__)
521 #define __umulsidi3(u, v) \
523 __asm__ ("meid %2,%0" \
525 : "%0" ((USItype)(u)), \
526 "g" ((USItype)(v))); \
528 #define div_qrnnd(q, r, n1, n0, d) \
534 : "=g" ((USItype)(q)), \
535 "=g" ((USItype)(r)) \
536 : "g" ((USItype)(n0)), \
537 "g" ((USItype)(n1)), \
540 #endif /* __ns32000__ */
542 #if defined (__pyr__)
543 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
546 : "=r" ((USItype)(sh)), \
547 "=&r" ((USItype)(sl)) \
548 : "%0" ((USItype)(ah)), \
549 "g" ((USItype)(bh)), \
550 "%1" ((USItype)(al)), \
552 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
555 : "=r" ((USItype)(sh)), \
556 "=&r" ((USItype)(sl)) \
557 : "0" ((USItype)(ah)), \
558 "g" ((USItype)(bh)), \
559 "1" ((USItype)(al)), \
561 /* This insn doesn't work on ancient pyramids. */
562 #define umul_ppmm(w1, w0, u, v) \
565 struct {USItype __h, __l;} __i; \
568 __asm__ ("uemul %3,%0" \
569 : "=r" (__xx.__i.__h), \
570 "=r" (__xx.__i.__l) \
571 : "1" (__xx.__i.__l), \
572 "g" ((UDItype)(v))); \
573 (w1) = __xx.__i.__h; \
574 (w0) = __xx.__i.__l;})
577 #if defined (__ibm032__) /* RT/ROMP */
578 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
581 : "=r" ((USItype)(sh)), \
582 "=&r" ((USItype)(sl)) \
583 : "%0" ((USItype)(ah)), \
584 "r" ((USItype)(bh)), \
585 "%1" ((USItype)(al)), \
587 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
590 : "=r" ((USItype)(sh)), \
591 "=&r" ((USItype)(sl)) \
592 : "0" ((USItype)(ah)), \
593 "r" ((USItype)(bh)), \
594 "1" ((USItype)(al)), \
596 #define umul_ppmm(ph, pl, m0, m1) \
598 USItype __m0 = (m0), __m1 = (m1); \
620 : "=r" ((USItype)(ph)), \
621 "=r" ((USItype)(pl)) \
625 (ph) += ((((SItype) __m0 >> 31) & __m1) \
626 + (((SItype) __m1 >> 31) & __m0)); \
629 #define UDIV_TIME 200
630 #define count_leading_zeros(count, x) \
632 if ((x) >= 0x10000) \
633 __asm__ ("clz %0,%1" \
634 : "=r" ((USItype)(count)) \
635 : "r" ((USItype)(x) >> 16)); \
638 __asm__ ("clz %0,%1" \
639 : "=r" ((USItype)(count)) \
640 : "r" ((USItype)(x))); \
646 #if defined (__sparc__)
647 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
648 __asm__ ("addcc %4,%5,%1
650 : "=r" ((USItype)(sh)), \
651 "=&r" ((USItype)(sl)) \
652 : "%r" ((USItype)(ah)), \
653 "rI" ((USItype)(bh)), \
654 "%r" ((USItype)(al)), \
655 "rI" ((USItype)(bl)) \
657 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
658 __asm__ ("subcc %4,%5,%1
660 : "=r" ((USItype)(sh)), \
661 "=&r" ((USItype)(sl)) \
662 : "r" ((USItype)(ah)), \
663 "rI" ((USItype)(bh)), \
664 "r" ((USItype)(al)), \
665 "rI" ((USItype)(bl)) \
667 #if defined (__sparc_v8__)
668 #define umul_ppmm(w1, w0, u, v) \
669 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
670 : "=r" ((USItype)(w1)), \
671 "=r" ((USItype)(w0)) \
672 : "r" ((USItype)(u)), \
674 #define udiv_qrnnd(q, r, n1, n0, d) \
675 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
676 : "=&r" ((USItype)(q)), \
677 "=&r" ((USItype)(r)) \
678 : "r" ((USItype)(n1)), \
679 "r" ((USItype)(n0)), \
682 #if defined (__sparclite__)
683 /* This has hardware multiply but not divide. It also has two additional
684 instructions scan (ffs from high bit) and divscc. */
685 #define umul_ppmm(w1, w0, u, v) \
686 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
687 : "=r" ((USItype)(w1)), \
688 "=r" ((USItype)(w0)) \
689 : "r" ((USItype)(u)), \
691 #define udiv_qrnnd(q, r, n1, n0, d) \
692 __asm__ ("! Inlined udiv_qrnnd
693 wr %%g0,%2,%%y ! Not a delayed write for sparclite
730 1: ! End of inline udiv_qrnnd" \
731 : "=r" ((USItype)(q)), \
732 "=r" ((USItype)(r)) \
733 : "r" ((USItype)(n1)), \
734 "r" ((USItype)(n0)), \
735 "rI" ((USItype)(d)) \
736 : "%g1" __AND_CLOBBER_CC)
738 #define count_leading_zeros(count, x) \
739 __asm__ ("scan %1,0,%0" \
740 : "=r" ((USItype)(x)) \
741 : "r" ((USItype)(count)))
743 /* SPARC without integer multiplication and divide instructions.
744 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
745 #define umul_ppmm(w1, w0, u, v) \
746 __asm__ ("! Inlined umul_ppmm
747 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
748 sra %3,31,%%g2 ! Don't move this insn
749 and %2,%%g2,%%g2 ! Don't move this insn
750 andcc %%g0,0,%%g1 ! Don't move this insn
786 : "=r" ((USItype)(w1)), \
787 "=r" ((USItype)(w0)) \
788 : "%rI" ((USItype)(u)), \
790 : "%g1", "%g2" __AND_CLOBBER_CC)
791 #define UMUL_TIME 39 /* 39 instructions */
792 /* It's quite necessary to add this much assembler for the sparc.
793 The default udiv_qrnnd (in C) is more than 10 times slower! */
794 #define udiv_qrnnd(q, r, n1, n0, d) \
795 __asm__ ("! Inlined udiv_qrnnd
799 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
800 sub %1,%2,%1 ! this kills msb of n
801 addx %1,%1,%1 ! so this can't give carry
806 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
808 sub %1,%2,%1 ! this kills msb of n
813 ! Got carry from n. Subtract next step to cancel this carry.
815 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
818 ! End of inline udiv_qrnnd" \
819 : "=&r" ((USItype)(q)), \
820 "=&r" ((USItype)(r)) \
821 : "r" ((USItype)(d)), \
822 "1" ((USItype)(n1)), \
823 "0" ((USItype)(n0)) : "%g1" __AND_CLOBBER_CC)
824 #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
825 #endif /* __sparclite__ */
826 #endif /* __sparc_v8__ */
827 #endif /* __sparc__ */
829 #if defined (__vax__)
830 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
831 __asm__ ("addl2 %5,%1
833 : "=g" ((USItype)(sh)), \
834 "=&g" ((USItype)(sl)) \
835 : "%0" ((USItype)(ah)), \
836 "g" ((USItype)(bh)), \
837 "%1" ((USItype)(al)), \
839 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
840 __asm__ ("subl2 %5,%1
842 : "=g" ((USItype)(sh)), \
843 "=&g" ((USItype)(sl)) \
844 : "0" ((USItype)(ah)), \
845 "g" ((USItype)(bh)), \
846 "1" ((USItype)(al)), \
848 #define umul_ppmm(xh, xl, m0, m1) \
852 struct {USItype __l, __h;} __i; \
854 USItype __m0 = (m0), __m1 = (m1); \
855 __asm__ ("emul %1,%2,$0,%0" \
859 (xh) = __xx.__i.__h; \
860 (xl) = __xx.__i.__l; \
861 (xh) += ((((SItype) __m0 >> 31) & __m1) \
862 + (((SItype) __m1 >> 31) & __m0)); \
866 #endif /* __GNUC__ */
868 /* If this machine has no inline assembler, use C macros. */
870 #if !defined (add_ssaaaa)
871 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
875 (sh) = (ah) + (bh) + (__x < (al)); \
880 #if !defined (sub_ddmmss)
881 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
885 (sh) = (ah) - (bh) - (__x > (al)); \
890 #if !defined (umul_ppmm)
891 #define umul_ppmm(w1, w0, u, v) \
893 USItype __x0, __x1, __x2, __x3; \
894 USItype __ul, __vl, __uh, __vh; \
896 __ul = __ll_lowpart (u); \
897 __uh = __ll_highpart (u); \
898 __vl = __ll_lowpart (v); \
899 __vh = __ll_highpart (v); \
901 __x0 = (USItype) __ul * __vl; \
902 __x1 = (USItype) __ul * __vh; \
903 __x2 = (USItype) __uh * __vl; \
904 __x3 = (USItype) __uh * __vh; \
906 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
907 __x1 += __x2; /* but this indeed can */ \
908 if (__x1 < __x2) /* did we get it? */ \
909 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
911 (w1) = __x3 + __ll_highpart (__x1); \
912 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
916 #if !defined (__umulsidi3)
917 #define __umulsidi3(u, v) \
919 umul_ppmm (__w.s.high, __w.s.low, u, v); \
923 /* Define this unconditionally, so it can be used for debugging. */
924 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
926 USItype __d1, __d0, __q1, __q0; \
927 USItype __r1, __r0, __m; \
928 __d1 = __ll_highpart (d); \
929 __d0 = __ll_lowpart (d); \
931 __r1 = (n1) % __d1; \
932 __q1 = (n1) / __d1; \
933 __m = (USItype) __q1 * __d0; \
934 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
937 __q1--, __r1 += (d); \
938 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
940 __q1--, __r1 += (d); \
944 __r0 = __r1 % __d1; \
945 __q0 = __r1 / __d1; \
946 __m = (USItype) __q0 * __d0; \
947 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
950 __q0--, __r0 += (d); \
953 __q0--, __r0 += (d); \
957 (q) = (USItype) __q1 * __ll_B | __q0; \
961 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
962 __udiv_w_sdiv (defined in libgcc or elsewhere). */
963 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
964 #define udiv_qrnnd(q, r, nh, nl, d) \
967 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
972 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
973 #if !defined (udiv_qrnnd)
974 #define UDIV_NEEDS_NORMALIZATION 1
975 #define udiv_qrnnd __udiv_qrnnd_c
978 #if !defined (count_leading_zeros)
979 extern const UQItype __clz_tab[];
980 #define count_leading_zeros(count, x) \
982 USItype __xr = (x); \
985 if (SI_TYPE_SIZE <= 32) \
987 __a = __xr < (1<<2*__BITS4) \
988 ? (__xr < (1<<__BITS4) ? 0 : __BITS4) \
989 : (__xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
993 for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
994 if (((__xr >> __a) & 0xff) != 0) \
998 (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
1002 #ifndef UDIV_NEEDS_NORMALIZATION
1003 #define UDIV_NEEDS_NORMALIZATION 0