1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 Copyright (C) 1991, 1992 Free Software Foundation, Inc.
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
18 #ifndef LONG_TYPE_SIZE
19 #define LONG_TYPE_SIZE 32
22 #define __BITS4 (LONG_TYPE_SIZE / 4)
23 #define __ll_B (1L << (LONG_TYPE_SIZE / 2))
24 #define __ll_lowpart(t) ((unsigned long int) (t) % __ll_B)
25 #define __ll_highpart(t) ((unsigned long int) (t) / __ll_B)
27 /* Define auxiliary asm macros.
29 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
30 multiplies two unsigned long integers MULTIPLER and MULTIPLICAND,
31 and generates a two unsigned word product in HIGH_PROD and
34 2) __umulsidi3(a,b) multiplies two unsigned long integers A and B,
35 and returns a long long product. This is just a variant of umul_ppmm.
37 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
38 denominator) divides a two-word unsigned integer, composed by the
39 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
40 places the quotient in QUOTIENT and the remainder in REMAINDER.
41 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
42 If, in addition, the most significant bit of DENOMINATOR must be 1,
43 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
45 4) count_leading_zeros(count, x) counts the number of zero-bits from
46 the msb to the first non-zero bit. This is the number of steps X
47 needs to be shifted left to set the msb. Undefined for X == 0.
49 5) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
50 high_addend_2, low_addend_2) adds two two-word unsigned integers,
51 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
52 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
53 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
56 6) sub_ddmmss(high_difference, low_difference, high_minuend,
57 low_minuend, high_subtrahend, low_subtrahend) subtracts two
58 two-word unsigned integers, composed by HIGH_MINUEND_1 and
59 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
60 respectively. The result is placed in HIGH_DIFFERENCE and
61 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
64 If any of these macros are left undefined for a particular CPU,
67 /* The CPUs come in alphabetical order below.
69 Please add support for more CPUs here, or improve the current support
71 (E.g. WE32100, i960, IBM360.) */
73 #if defined (__GNUC__) && !defined (NO_ASM)
75 /* We sometimes need to clobber "cc" with gcc2, but that would not be
76 understood by gcc1. Use cpp to avoid major code duplication. */
79 #define __AND_CLOBBER_CC
80 #else /* __GNUC__ >= 2 */
81 #define __CLOBBER_CC : "cc"
82 #define __AND_CLOBBER_CC , "cc"
83 #endif /* __GNUC__ < 2 */
85 #if defined (__a29k__) || defined (___AM29K__)
86 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
87 __asm__ ("add %1,%4,%5
89 : "=r" ((unsigned long int)(sh)), \
90 "=&r" ((unsigned long int)(sl)) \
91 : "%r" ((unsigned long int)(ah)), \
92 "rI" ((unsigned long int)(bh)), \
93 "%r" ((unsigned long int)(al)), \
94 "rI" ((unsigned long int)(bl)))
95 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
96 __asm__ ("sub %1,%4,%5
98 : "=r" ((unsigned long int)(sh)), \
99 "=&r" ((unsigned long int)(sl)) \
100 : "r" ((unsigned long int)(ah)), \
101 "rI" ((unsigned long int)(bh)), \
102 "r" ((unsigned long int)(al)), \
103 "rI" ((unsigned long int)(bl)))
104 #define umul_ppmm(xh, xl, m0, m1) \
106 unsigned long int __m0 = (m0), __m1 = (m1); \
107 __asm__ ("multiplu %0,%1,%2" \
108 : "=r" ((unsigned long int)(xl)) \
111 __asm__ ("multmu %0,%1,%2" \
112 : "=r" ((unsigned long int)(xh)) \
116 #define udiv_qrnnd(q, r, n1, n0, d) \
117 __asm__ ("dividu %0,%3,%4" \
118 : "=r" ((unsigned long int)(q)), \
119 "=q" ((unsigned long int)(r)) \
120 : "1" ((unsigned long int)(n1)), \
121 "r" ((unsigned long int)(n0)), \
122 "r" ((unsigned long int)(d)))
123 #define count_leading_zeros(count, x) \
124 __asm__ ("clz %0,%1" \
125 : "=r" ((unsigned long int)(count)) \
126 : "r" ((unsigned long int)(x)))
127 #endif /* __a29k__ */
129 #if defined (__arm__)
130 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
131 __asm__ ("adds %1,%4,%5
133 : "=r" ((unsigned long int)(sh)), \
134 "=&r" ((unsigned long int)(sl)) \
135 : "%r" ((unsigned long int)(ah)), \
136 "rI" ((unsigned long int)(bh)), \
137 "%r" ((unsigned long int)(al)), \
138 "rI" ((unsigned long int)(bl)))
139 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
140 __asm__ ("subs %1,%4,%5
142 : "=r" ((unsigned long int)(sh)), \
143 "=&r" ((unsigned long int)(sl)) \
144 : "r" ((unsigned long int)(ah)), \
145 "rI" ((unsigned long int)(bh)), \
146 "r" ((unsigned long int)(al)), \
147 "rI" ((unsigned long int)(bl)))
150 #if defined (__gmicro__)
151 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
152 __asm__ ("add.w %5,%1
154 : "=g" ((unsigned long int)(sh)), \
155 "=&g" ((unsigned long int)(sl)) \
156 : "%0" ((unsigned long int)(ah)), \
157 "g" ((unsigned long int)(bh)), \
158 "%1" ((unsigned long int)(al)), \
159 "g" ((unsigned long int)(bl)))
160 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
161 __asm__ ("sub.w %5,%1
163 : "=g" ((unsigned long int)(sh)), \
164 "=&g" ((unsigned long int)(sl)) \
165 : "0" ((unsigned long int)(ah)), \
166 "g" ((unsigned long int)(bh)), \
167 "1" ((unsigned long int)(al)), \
168 "g" ((unsigned long int)(bl)))
169 #define umul_ppmm(ph, pl, m0, m1) \
170 __asm__ ("mulx %3,%0,%1" \
171 : "=g" ((unsigned long int)(ph)), \
172 "=r" ((unsigned long int)(pl)) \
173 : "%0" ((unsigned long int)(m0)), \
174 "g" ((unsigned long int)(m1)))
175 #define udiv_qrnnd(q, r, nh, nl, d) \
176 __asm__ ("divx %4,%0,%1" \
177 : "=g" ((unsigned long int)(q)), \
178 "=r" ((unsigned long int)(r)) \
179 : "1" ((unsigned long int)(nh)), \
180 "0" ((unsigned long int)(nl)), \
181 "g" ((unsigned long int)(d)))
182 #define count_leading_zeros(count, x) \
183 __asm__ ("bsch/1 %1,%0" \
185 : "g" ((unsigned long int)(x)), \
190 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
191 __asm__ ("add %4,%5,%1
193 : "=r" ((unsigned long int)(sh)), \
194 "=&r" ((unsigned long int)(sl)) \
195 : "%rM" ((unsigned long int)(ah)), \
196 "rM" ((unsigned long int)(bh)), \
197 "%rM" ((unsigned long int)(al)), \
198 "rM" ((unsigned long int)(bl)))
199 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
200 __asm__ ("sub %5,%4,%1
202 : "=r" ((unsigned long int)(sh)), \
203 "=&r" ((unsigned long int)(sl)) \
204 : "rM" ((unsigned long int)(ah)), \
205 "rM" ((unsigned long int)(bh)), \
206 "rM" ((unsigned long int)(al)), \
207 "rM" ((unsigned long int)(bl)))
208 #if defined (_PA_RISC1_1)
209 #define umul_ppmm(w1, w0, u, v) \
214 struct {unsigned long int __w1, __w0;} __w1w0; \
216 __asm__ ("xmpyu %1,%2,%0" \
220 (w1) = __t.__w1w0.__w1; \
221 (w0) = __t.__w1w0.__w0; \
230 #if defined (__i386__) || defined (__i486__)
231 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
234 : "=r" ((unsigned long int)(sh)), \
235 "=&r" ((unsigned long int)(sl)) \
236 : "%0" ((unsigned long int)(ah)), \
237 "g" ((unsigned long int)(bh)), \
238 "%1" ((unsigned long int)(al)), \
239 "g" ((unsigned long int)(bl)))
240 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
243 : "=r" ((unsigned long int)(sh)), \
244 "=&r" ((unsigned long int)(sl)) \
245 : "0" ((unsigned long int)(ah)), \
246 "g" ((unsigned long int)(bh)), \
247 "1" ((unsigned long int)(al)), \
248 "g" ((unsigned long int)(bl)))
249 #define umul_ppmm(w1, w0, u, v) \
251 : "=a" ((unsigned long int)(w0)), \
252 "=d" ((unsigned long int)(w1)) \
253 : "%0" ((unsigned long int)(u)), \
254 "rm" ((unsigned long int)(v)))
255 #define udiv_qrnnd(q, r, n1, n0, d) \
257 : "=a" ((unsigned long int)(q)), \
258 "=d" ((unsigned long int)(r)) \
259 : "0" ((unsigned long int)(n0)), \
260 "1" ((unsigned long int)(n1)), \
261 "rm" ((unsigned long int)(d)))
262 #define count_leading_zeros(count, x) \
264 unsigned long int __cbtmp; \
265 __asm__ ("bsrl %1,%0" \
266 : "=r" (__cbtmp) : "rm" ((unsigned long int)(x))); \
267 (count) = __cbtmp ^ 31; \
273 #if defined (__i860__)
275 /* Make sure these patterns really improve the code before
276 switching them on. */
277 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
282 struct {unsigned long int l, h;} i; \
284 __a.i.l = (al); __a.i.h = (ah); \
285 __b.i.l = (bl); __b.i.h = (bh); \
286 __asm__ ("fiadd.dd %1,%2,%0" \
288 : "%f" (__a.ll), "f" (__b.ll)); \
289 (sh) = __s.i.h; (sl) = __s.i.l; \
291 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
296 struct {unsigned long int l, h;} i; \
298 __a.i.l = (al); __a.i.h = (ah); \
299 __b.i.l = (bl); __b.i.h = (bh); \
300 __asm__ ("fisub.dd %1,%2,%0" \
302 : "%f" (__a.ll), "f" (__b.ll)); \
303 (sh) = __s.i.h; (sl) = __s.i.l; \
306 #endif /* __i860__ */
308 #if defined (___IBMR2__) /* IBM RS6000 */
309 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
310 __asm__ ("a%I5 %1,%4,%5
312 : "=r" ((unsigned long int)(sh)), \
313 "=&r" ((unsigned long int)(sl)) \
314 : "%r" ((unsigned long int)(ah)), \
315 "r" ((unsigned long int)(bh)), \
316 "%r" ((unsigned long int)(al)), \
317 "rI" ((unsigned long int)(bl)))
318 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
319 __asm__ ("sf%I4 %1,%5,%4
321 : "=r" ((unsigned long int)(sh)), \
322 "=&r" ((unsigned long int)(sl)) \
323 : "r" ((unsigned long int)(ah)), \
324 "r" ((unsigned long int)(bh)), \
325 "rI" ((unsigned long int)(al)), \
326 "r" ((unsigned long int)(bl)))
327 #define umul_ppmm(xh, xl, m0, m1) \
329 unsigned long int __m0 = (m0), __m1 = (m1); \
330 __asm__ ("mul %0,%2,%3" \
331 : "=r" ((unsigned long int)(xh)), \
332 "=q" ((unsigned long int)(xl)) \
335 (xh) += ((((signed long int) __m0 >> 31) & __m1) \
336 + (((signed long int) __m1 >> 31) & __m0)); \
338 #define smul_ppmm(xh, xl, m0, m1) \
339 __asm__ ("mul %0,%2,%3" \
340 : "=r" ((unsigned long int)(xh)), \
341 "=q" ((unsigned long int)(xl)) \
345 #define sdiv_qrnnd(q, r, nh, nl, d) \
346 __asm__ ("div %0,%2,%4" \
347 : "=r" (q), "=q" (r) \
348 : "r" (nh), "1" (nl), "r" (d))
350 #define UDIV_NEEDS_NORMALIZATION 1
351 #define count_leading_zeros(count, x) \
352 __asm__ ("cntlz %0,%1" \
353 : "=r" ((unsigned long int)(count)) \
354 : "r" ((unsigned long int)(x)))
355 #endif /* ___IBMR2__ */
357 #if defined (__mc68000__)
358 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
359 __asm__ ("add%.l %5,%1
361 : "=d" ((unsigned long int)(sh)), \
362 "=&d" ((unsigned long int)(sl)) \
363 : "%0" ((unsigned long int)(ah)), \
364 "d" ((unsigned long int)(bh)), \
365 "%1" ((unsigned long int)(al)), \
366 "g" ((unsigned long int)(bl)))
367 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
368 __asm__ ("sub%.l %5,%1
370 : "=d" ((unsigned long int)(sh)), \
371 "=&d" ((unsigned long int)(sl)) \
372 : "0" ((unsigned long int)(ah)), \
373 "d" ((unsigned long int)(bh)), \
374 "1" ((unsigned long int)(al)), \
375 "g" ((unsigned long int)(bl)))
376 #if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
377 #define umul_ppmm(w1, w0, u, v) \
378 __asm__ ("mulu%.l %3,%1:%0" \
379 : "=d" ((unsigned long int)(w0)), \
380 "=d" ((unsigned long int)(w1)) \
381 : "%0" ((unsigned long int)(u)), \
382 "dmi" ((unsigned long int)(v)))
384 #define udiv_qrnnd(q, r, n1, n0, d) \
385 __asm__ ("divu%.l %4,%1:%0" \
386 : "=d" ((unsigned long int)(q)), \
387 "=d" ((unsigned long int)(r)) \
388 : "0" ((unsigned long int)(n0)), \
389 "1" ((unsigned long int)(n1)), \
390 "dmi" ((unsigned long int)(d)))
392 #define sdiv_qrnnd(q, r, n1, n0, d) \
393 __asm__ ("divs%.l %4,%1:%0" \
394 : "=d" ((unsigned long int)(q)), \
395 "=d" ((unsigned long int)(r)) \
396 : "0" ((unsigned long int)(n0)), \
397 "1" ((unsigned long int)(n1)), \
398 "dmi" ((unsigned long int)(d)))
399 #define count_leading_zeros(count, x) \
400 __asm__ ("bfffo %1{%b2:%b2},%0" \
401 : "=d" ((unsigned long int)(count)) \
402 : "od" ((unsigned long int)(x)), "n" (0))
403 #else /* not mc68020 */
404 #define umul_ppmm(xh, xl, a, b) \
405 __asm__ ("| Inlined umul_ppmm
431 : "=g" ((unsigned long int)(xh)), \
432 "=g" ((unsigned long int)(xl)) \
433 : "g" ((unsigned long int)(a)), \
434 "g" ((unsigned long int)(b)) \
435 : "d0", "d1", "d2", "d3", "d4")
436 #define UMUL_TIME 100
437 #define UDIV_TIME 400
438 #endif /* not mc68020 */
441 #if defined (__m88000__)
442 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
443 __asm__ ("addu.co %1,%r4,%r5
444 addu.ci %0,%r2,%r3" \
445 : "=r" ((unsigned long int)(sh)), \
446 "=&r" ((unsigned long int)(sl)) \
447 : "%rJ" ((unsigned long int)(ah)), \
448 "rJ" ((unsigned long int)(bh)), \
449 "%rJ" ((unsigned long int)(al)), \
450 "rJ" ((unsigned long int)(bl)))
451 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
452 __asm__ ("subu.co %1,%r4,%r5
453 subu.ci %0,%r2,%r3" \
454 : "=r" ((unsigned long int)(sh)), \
455 "=&r" ((unsigned long int)(sl)) \
456 : "rJ" ((unsigned long int)(ah)), \
457 "rJ" ((unsigned long int)(bh)), \
458 "rJ" ((unsigned long int)(al)), \
459 "rJ" ((unsigned long int)(bl)))
461 #define UDIV_TIME 150
462 #define count_leading_zeros(count, x) \
464 unsigned long int __cbtmp; \
465 __asm__ ("ff1 %0,%1" \
467 : "r" ((unsigned long int)(x))); \
468 (count) = __cbtmp ^ 31; \
470 #if defined (__mc88110__)
471 #define umul_ppmm(w1, w0, u, v) \
472 __asm__ ("mulu.d r10,%2,%3
480 #define udiv_qrnnd(q, r, n1, n0, d) \
481 __asm__ ("or r10,%2,0
494 #endif /* __m88000__ */
496 #if defined (__mips__)
497 #define umul_ppmm(w1, w0, u, v) \
498 __asm__ ("multu %2,%3
501 : "=d" ((unsigned long int)(w0)), \
502 "=d" ((unsigned long int)(w1)) \
503 : "d" ((unsigned long int)(u)), \
504 "d" ((unsigned long int)(v)))
506 #define UDIV_TIME 100
507 #endif /* __mips__ */
509 #if defined (__ns32000__)
510 #define __umulsidi3(u, v) \
511 ({long long int __w; \
512 __asm__ ("meid %2,%0" \
514 : "%0" ((unsigned long int)(u)), \
515 "g" ((unsigned long int)(v))); \
517 #define div_qrnnd(q, r, n1, n0, d) \
523 : "=g" ((unsigned long int)(q)), \
524 "=g" ((unsigned long int)(r)) \
525 : "g" ((unsigned long int)(n0)), \
526 "g" ((unsigned long int)(n1)), \
527 "g" ((unsigned long int)(d)) \
529 #endif /* __ns32000__ */
531 #if defined (__pyr__)
532 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
535 : "=r" ((unsigned long int)(sh)), \
536 "=&r" ((unsigned long int)(sl)) \
537 : "%0" ((unsigned long int)(ah)), \
538 "g" ((unsigned long int)(bh)), \
539 "%1" ((unsigned long int)(al)), \
540 "g" ((unsigned long int)(bl)))
541 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
544 : "=r" ((unsigned long int)(sh)), \
545 "=&r" ((unsigned long int)(sl)) \
546 : "0" ((unsigned long int)(ah)), \
547 "g" ((unsigned long int)(bh)), \
548 "1" ((unsigned long int)(al)), \
549 "g" ((unsigned long int)(bl)))
550 /* This insn doesn't work on ancient pyramids. */
551 #define umul_ppmm(w1, w0, u, v) \
552 ({union {long long int ll;struct {unsigned long int h, l;} i;} __xx; \
554 __asm__ ("uemul %3,%0" \
559 (w1) = __xx.i.h; (w0) = __xx.i.l;})
562 #if defined (__ibm032__) /* RT/ROMP */
563 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
566 : "=r" ((unsigned long int)(sh)), \
567 "=&r" ((unsigned long int)(sl)) \
568 : "%0" ((unsigned long int)(ah)), \
569 "r" ((unsigned long int)(bh)), \
570 "%1" ((unsigned long int)(al)), \
571 "r" ((unsigned long int)(bl)))
572 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
575 : "=r" ((unsigned long int)(sh)), \
576 "=&r" ((unsigned long int)(sl)) \
577 : "0" ((unsigned long int)(ah)), \
578 "r" ((unsigned long int)(bh)), \
579 "1" ((unsigned long int)(al)), \
580 "r" ((unsigned long int)(bl)))
581 #define umul_ppmm(ph, pl, m0, m1) \
583 unsigned long int __m0 = (m0), __m1 = (m1); \
605 : "=r" ((unsigned long int)(ph)), \
606 "=r" ((unsigned long int)(pl)) \
610 (ph) += ((((signed long int) __m0 >> 31) & __m1) \
611 + (((signed long int) __m1 >> 31) & __m0)); \
614 #define UDIV_TIME 200
615 #define count_leading_zeros(count, x) \
617 if ((x) >= 0x10000) \
618 __asm__ ("clz %0,%1" \
619 : "=r" ((unsigned long int)(count)) \
620 : "r" ((unsigned long int)(x) >> 16)); \
623 __asm__ ("clz %0,%1" \
624 : "=r" ((unsigned long int)(count)) \
625 : "r" ((unsigned long int)(x))); \
631 #if defined (__sparc__)
632 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
633 __asm__ ("addcc %4,%5,%1
635 : "=r" ((unsigned long int)(sh)), \
636 "=&r" ((unsigned long int)(sl)) \
637 : "%r" ((unsigned long int)(ah)), \
638 "rI" ((unsigned long int)(bh)), \
639 "%r" ((unsigned long int)(al)), \
640 "rI" ((unsigned long int)(bl)) \
642 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
643 __asm__ ("subcc %4,%5,%1
645 : "=r" ((unsigned long int)(sh)), \
646 "=&r" ((unsigned long int)(sl)) \
647 : "r" ((unsigned long int)(ah)), \
648 "rI" ((unsigned long int)(bh)), \
649 "r" ((unsigned long int)(al)), \
650 "rI" ((unsigned long int)(bl)) \
652 #if defined (__sparc8__) /* How do we recog. version 8 SPARC? */
653 #define umul_ppmm(w1, w0, u, v) \
654 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
655 : "=r" ((unsigned long int)(w1)), \
656 "=r" ((unsigned long int)(w0)) \
657 : "r" ((unsigned long int)(u)), \
658 "r" ((unsigned long int)(v)))
659 #define udiv_qrnnd(q, r, n1, n0, d) \
660 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
661 : "=&r" ((unsigned long int)(q)), \
662 "=&r" ((unsigned long int)(r)) \
663 : "r" ((unsigned long int)(n1)), \
664 "r" ((unsigned long int)(n0)), \
665 "r" ((unsigned long int)(d)))
667 /* SPARC without integer multiplication and divide instructions.
668 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
669 #define umul_ppmm(w1, w0, u, v) \
670 __asm__ ("! Inlined umul_ppmm
671 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
672 sra %3,31,%%g2 ! Don't move this insn
673 and %2,%%g2,%%g2 ! Don't move this insn
674 andcc %%g0,0,%%g1 ! Don't move this insn
710 : "=r" ((unsigned long int)(w1)), \
711 "=r" ((unsigned long int)(w0)) \
712 : "%rI" ((unsigned long int)(u)), \
713 "r" ((unsigned long int)(v)) \
714 : "%g1", "%g2" __AND_CLOBBER_CC)
715 #define UMUL_TIME 39 /* 39 instructions */
716 /* It's quite necessary to add this much assembler for the sparc.
717 The default udiv_qrnnd (in C) is more than 10 times slower! */
718 #define udiv_qrnnd(q, r, n1, n0, d) \
719 __asm__ ("! Inlined udiv_qrnnd
723 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
724 sub %1,%2,%1 ! this kills msb of n
725 addx %1,%1,%1 ! so this can't give carry
730 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
732 sub %1,%2,%1 ! this kills msb of n
737 ! Got carry from n. Subtract next step to cancel this carry.
739 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
742 ! End of inline udiv_qrnnd" \
743 : "=&r" ((unsigned long int)(q)), \
744 "=&r" ((unsigned long int)(r)) \
745 : "r" ((unsigned long int)(d)), \
746 "1" ((unsigned long int)(n1)), \
747 "0" ((unsigned long int)(n0)) : "%g1" __AND_CLOBBER_CC)
748 #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
749 #endif /* __sparc8__ */
750 #endif /* __sparc__ */
752 #if defined (__vax__)
753 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
754 __asm__ ("addl2 %5,%1
756 : "=g" ((unsigned long int)(sh)), \
757 "=&g" ((unsigned long int)(sl)) \
758 : "%0" ((unsigned long int)(ah)), \
759 "g" ((unsigned long int)(bh)), \
760 "%1" ((unsigned long int)(al)), \
761 "g" ((unsigned long int)(bl)))
762 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
763 __asm__ ("subl2 %5,%1
765 : "=g" ((unsigned long int)(sh)), \
766 "=&g" ((unsigned long int)(sl)) \
767 : "0" ((unsigned long int)(ah)), \
768 "g" ((unsigned long int)(bh)), \
769 "1" ((unsigned long int)(al)), \
770 "g" ((unsigned long int)(bl)))
771 #define umul_ppmm(xh, xl, m0, m1) \
773 union {long long int ll;struct {unsigned long int l, h;} i;} __xx; \
774 unsigned long int __m0 = (m0), __m1 = (m1); \
775 __asm__ ("emul %1,%2,$0,%0" \
779 (xh) = __xx.i.h; (xl) = __xx.i.l; \
780 (xh) += ((((signed long int) __m0 >> 31) & __m1) \
781 + (((signed long int) __m1 >> 31) & __m0)); \
785 #endif /* __GNUC__ */
787 /* If this machine has no inline assembler, use C macros. */
789 #if !defined (add_ssaaaa)
790 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
792 unsigned long int __x; \
794 (sh) = (ah) + (bh) + (__x < (al)); \
799 #if !defined (sub_ddmmss)
800 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
802 unsigned long int __x; \
804 (sh) = (ah) - (bh) - (__x > (al)); \
809 #if !defined (umul_ppmm)
810 #define umul_ppmm(w1, w0, u, v) \
812 unsigned long int __x0, __x1, __x2, __x3; \
813 unsigned int __ul, __vl, __uh, __vh; \
815 __ul = __ll_lowpart (u); \
816 __uh = __ll_highpart (u); \
817 __vl = __ll_lowpart (v); \
818 __vh = __ll_highpart (v); \
820 __x0 = (unsigned long int) __ul * __vl; \
821 __x1 = (unsigned long int) __ul * __vh; \
822 __x2 = (unsigned long int) __uh * __vl; \
823 __x3 = (unsigned long int) __uh * __vh; \
825 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
826 __x1 += __x2; /* but this indeed can */ \
827 if (__x1 < __x2) /* did we get it? */ \
828 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
830 (w1) = __x3 + __ll_highpart (__x1); \
831 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
835 #if !defined (__umulsidi3)
836 #define __umulsidi3(u, v) \
838 umul_ppmm (__w.s.high, __w.s.low, u, v); \
842 /* Define this unconditionally, so it can be used for debugging. */
843 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
845 unsigned int __d1, __d0, __q1, __q0; \
846 unsigned long int __r1, __r0, __m; \
847 __d1 = __ll_highpart (d); \
848 __d0 = __ll_lowpart (d); \
850 __r1 = (n1) % __d1; \
851 __q1 = (n1) / __d1; \
852 __m = (unsigned long int) __q1 * __d0; \
853 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
856 __q1--, __r1 += (d); \
857 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
859 __q1--, __r1 += (d); \
863 __r0 = __r1 % __d1; \
864 __q0 = __r1 / __d1; \
865 __m = (unsigned long int) __q0 * __d0; \
866 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
869 __q0--, __r0 += (d); \
872 __q0--, __r0 += (d); \
876 (q) = (unsigned long int) __q1 * __ll_B | __q0; \
879 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
880 #if !defined (udiv_qrnnd)
881 #define UDIV_NEEDS_NORMALIZATION 1
882 #define udiv_qrnnd __udiv_qrnnd_c
885 #if !defined (count_leading_zeros)
886 extern const unsigned char __clz_tab[];
887 #define count_leading_zeros(count, x) \
889 unsigned long int __xr = (x); \
892 if (LONG_TYPE_SIZE <= 32) \
894 __a = __xr < (1<<2*__BITS4) \
895 ? (__xr < (1<<__BITS4) ? 0 : __BITS4) \
896 : (__xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
900 for (__a = LONG_TYPE_SIZE - 8; __a > 0; __a -= 8) \
901 if (((__xr >> __a) & 0xff) != 0) \
905 (count) = LONG_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
909 #ifndef UDIV_NEEDS_NORMALIZATION
910 #define UDIV_NEEDS_NORMALIZATION 0