OSDN Git Service

*** empty log message ***
[pf3gnuchains/gcc-fork.git] / gcc / longlong.h
1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2    Copyright (C) 1991, 1992 Free Software Foundation, Inc.
3
4    This definition file is free software; you can redistribute it
5    and/or modify it under the terms of the GNU General Public
6    License as published by the Free Software Foundation; either
7    version 2, or (at your option) any later version.
8
9    This definition file is distributed in the hope that it will be
10    useful, but WITHOUT ANY WARRANTY; without even the implied
11    warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12    See the GNU General Public License for more details.
13
14    You should have received a copy of the GNU General Public License
15    along with this program; if not, write to the Free Software
16    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  */
17
18 #ifndef LONG_TYPE_SIZE
19 #define LONG_TYPE_SIZE 32
20 #endif
21
22 #define __BITS4 (LONG_TYPE_SIZE / 4)
23 #define __ll_B (1L << (LONG_TYPE_SIZE / 2))
24 #define __ll_lowpart(t) ((unsigned long int) (t) % __ll_B)
25 #define __ll_highpart(t) ((unsigned long int) (t) / __ll_B)
26
27 /* Define auxiliary asm macros.
28
29    1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
30    multiplies two unsigned long integers MULTIPLER and MULTIPLICAND,
31    and generates a two unsigned word product in HIGH_PROD and
32    LOW_PROD.
33
34    2) __umulsidi3(a,b) multiplies two unsigned long integers A and B,
35    and returns a long long product.  This is just a variant of umul_ppmm.
36
37    3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
38    denominator) divides a two-word unsigned integer, composed by the
39    integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
40    places the quotient in QUOTIENT and the remainder in REMAINDER.
41    HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
42    If, in addition, the most significant bit of DENOMINATOR must be 1,
43    then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
44
45    4) count_leading_zeros(count, x) counts the number of zero-bits from
46    the msb to the first non-zero bit.  This is the number of steps X
47    needs to be shifted left to set the msb.  Undefined for X == 0.
48
49    5) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
50    high_addend_2, low_addend_2) adds two two-word unsigned integers,
51    composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
52    LOW_ADDEND_2 respectively.  The result is placed in HIGH_SUM and
53    LOW_SUM.  Overflow (i.e. carry out) is not stored anywhere, and is
54    lost.
55
56    6) sub_ddmmss(high_difference, low_difference, high_minuend,
57    low_minuend, high_subtrahend, low_subtrahend) subtracts two
58    two-word unsigned integers, composed by HIGH_MINUEND_1 and
59    LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
60    respectively.  The result is placed in HIGH_DIFFERENCE and
61    LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
62    and is lost.
63
64    If any of these macros are left undefined for a particular CPU,
65    C macros are used.  */
66
67 /* The CPUs come in alphabetical order below.
68
69    Please add support for more CPUs here, or improve the current support
70    for the CPUs below!
71    (E.g. WE32100, i960, IBM360.)  */
72
73 #if defined (__GNUC__) && !defined (NO_ASM)
74
75 /* We sometimes need to clobber "cc" with gcc2, but that would not be
76    understood by gcc1.  Use cpp to avoid major code duplication.  */
77 #if __GNUC__ < 2
78 #define __CLOBBER_CC
79 #define __AND_CLOBBER_CC
80 #else /* __GNUC__ >= 2 */
81 #define __CLOBBER_CC : "cc"
82 #define __AND_CLOBBER_CC , "cc"
83 #endif /* __GNUC__ < 2 */
84
85 #if defined (__a29k__) || defined (___AM29K__)
86 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
87   __asm__ ("add %1,%4,%5
88         addc %0,%2,%3"                                                  \
89            : "=r" ((unsigned long int)(sh)),                            \
90             "=&r" ((unsigned long int)(sl))                             \
91            : "%r" ((unsigned long int)(ah)),                            \
92              "rI" ((unsigned long int)(bh)),                            \
93              "%r" ((unsigned long int)(al)),                            \
94              "rI" ((unsigned long int)(bl)))
95 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
96   __asm__ ("sub %1,%4,%5
97         subc %0,%2,%3"                                                  \
98            : "=r" ((unsigned long int)(sh)),                            \
99              "=&r" ((unsigned long int)(sl))                            \
100            : "r" ((unsigned long int)(ah)),                             \
101              "rI" ((unsigned long int)(bh)),                            \
102              "r" ((unsigned long int)(al)),                             \
103              "rI" ((unsigned long int)(bl)))
104 #define umul_ppmm(xh, xl, m0, m1) \
105   do {                                                                  \
106     unsigned long int __m0 = (m0), __m1 = (m1);                         \
107     __asm__ ("multiplu %0,%1,%2"                                        \
108              : "=r" ((unsigned long int)(xl))                           \
109              : "r" (__m0),                                              \
110                "r" (__m1));                                             \
111     __asm__ ("multmu %0,%1,%2"                                          \
112              : "=r" ((unsigned long int)(xh))                           \
113              : "r" (__m0),                                              \
114                "r" (__m1));                                             \
115   } while (0)
116 #define udiv_qrnnd(q, r, n1, n0, d) \
117   __asm__ ("dividu %0,%3,%4"                                            \
118            : "=r" ((unsigned long int)(q)),                             \
119              "=q" ((unsigned long int)(r))                              \
120            : "1" ((unsigned long int)(n1)),                             \
121              "r" ((unsigned long int)(n0)),                             \
122              "r" ((unsigned long int)(d)))
123 #define count_leading_zeros(count, x) \
124     __asm__ ("clz %0,%1"                                                \
125              : "=r" ((unsigned long int)(count))                        \
126              : "r" ((unsigned long int)(x)))
127 #endif /* __a29k__ */
128
129 #if defined (__arm__)
130 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
131   __asm__ ("adds %1,%4,%5
132         adc %0,%2,%3"                                                   \
133            : "=r" ((unsigned long int)(sh)),                            \
134              "=&r" ((unsigned long int)(sl))                            \
135            : "%r" ((unsigned long int)(ah)),                            \
136              "rI" ((unsigned long int)(bh)),                            \
137              "%r" ((unsigned long int)(al)),                            \
138              "rI" ((unsigned long int)(bl)))
139 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
140   __asm__ ("subs %1,%4,%5
141         sbc %0,%2,%3"                                                   \
142            : "=r" ((unsigned long int)(sh)),                            \
143              "=&r" ((unsigned long int)(sl))                            \
144            : "r" ((unsigned long int)(ah)),                             \
145              "rI" ((unsigned long int)(bh)),                            \
146              "r" ((unsigned long int)(al)),                             \
147              "rI" ((unsigned long int)(bl)))
148 #endif /* __arm__ */
149
150 #if defined (__gmicro__)
151 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
152   __asm__ ("add.w %5,%1
153         addx %3,%0"                                                     \
154            : "=g" ((unsigned long int)(sh)),                            \
155              "=&g" ((unsigned long int)(sl))                            \
156            : "%0" ((unsigned long int)(ah)),                            \
157              "g" ((unsigned long int)(bh)),                             \
158              "%1" ((unsigned long int)(al)),                            \
159              "g" ((unsigned long int)(bl)))
160 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
161   __asm__ ("sub.w %5,%1
162         subx %3,%0"                                                     \
163            : "=g" ((unsigned long int)(sh)),                            \
164              "=&g" ((unsigned long int)(sl))                            \
165            : "0" ((unsigned long int)(ah)),                             \
166              "g" ((unsigned long int)(bh)),                             \
167              "1" ((unsigned long int)(al)),                             \
168              "g" ((unsigned long int)(bl)))
169 #define umul_ppmm(ph, pl, m0, m1) \
170   __asm__ ("mulx %3,%0,%1"                                              \
171            : "=g" ((unsigned long int)(ph)),                            \
172              "=r" ((unsigned long int)(pl))                             \
173            : "%0" ((unsigned long int)(m0)),                            \
174              "g" ((unsigned long int)(m1)))
175 #define udiv_qrnnd(q, r, nh, nl, d) \
176   __asm__ ("divx %4,%0,%1"                                              \
177            : "=g" ((unsigned long int)(q)),                             \
178              "=r" ((unsigned long int)(r))                              \
179            : "1" ((unsigned long int)(nh)),                             \
180              "0" ((unsigned long int)(nl)),                             \
181              "g" ((unsigned long int)(d)))
182 #define count_leading_zeros(count, x) \
183   __asm__ ("bsch/1 %1,%0"                                               \
184            : "=g" (count)                                               \
185            : "g" ((unsigned long int)(x)),                              \
186              "0" (0UL))
187 #endif
188
189 #if defined (__hppa)
190 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
191   __asm__ ("add %4,%5,%1
192         addc %2,%3,%0"                                                  \
193            : "=r" ((unsigned long int)(sh)),                            \
194              "=&r" ((unsigned long int)(sl))                            \
195            : "%rM" ((unsigned long int)(ah)),                           \
196              "rM" ((unsigned long int)(bh)),                            \
197              "%rM" ((unsigned long int)(al)),                           \
198              "rM" ((unsigned long int)(bl)))
199 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
200   __asm__ ("sub %5,%4,%1
201         subb %3,%2,%0"                                                  \
202            : "=r" ((unsigned long int)(sh)),                            \
203              "=&r" ((unsigned long int)(sl))                            \
204            : "rM" ((unsigned long int)(ah)),                            \
205              "rM" ((unsigned long int)(bh)),                            \
206              "rM" ((unsigned long int)(al)),                            \
207              "rM" ((unsigned long int)(bl)))
208 #if defined (_PA_RISC1_1)
209 #define umul_ppmm(w1, w0, u, v) \
210   do {                                                                  \
211     union                                                               \
212       {                                                                 \
213         long long __f;                                                  \
214         struct {unsigned long int __w1, __w0;} __w1w0;                  \
215       } __t;                                                            \
216     __asm__ ("xmpyu %1,%2,%0"                                           \
217              : "=x" (__t.__f)                                           \
218              : "x" ((u)),                                               \
219                "x" ((v)));                                              \
220     (w1) = __t.__w1w0.__w1;                                             \
221     (w0) = __t.__w1w0.__w0;                                             \
222      } while (0)
223 #define UMUL_TIME 8
224 #else
225 #define UMUL_TIME 30
226 #endif
227 #define UDIV_TIME 40
228 #endif
229
230 #if defined (__i386__) || defined (__i486__)
231 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
232   __asm__ ("addl %5,%1
233         adcl %3,%0"                                                     \
234            : "=r" ((unsigned long int)(sh)),                            \
235              "=&r" ((unsigned long int)(sl))                            \
236            : "%0" ((unsigned long int)(ah)),                            \
237              "g" ((unsigned long int)(bh)),                             \
238              "%1" ((unsigned long int)(al)),                            \
239              "g" ((unsigned long int)(bl)))
240 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
241   __asm__ ("subl %5,%1
242         sbbl %3,%0"                                                     \
243            : "=r" ((unsigned long int)(sh)),                            \
244              "=&r" ((unsigned long int)(sl))                            \
245            : "0" ((unsigned long int)(ah)),                             \
246              "g" ((unsigned long int)(bh)),                             \
247              "1" ((unsigned long int)(al)),                             \
248              "g" ((unsigned long int)(bl)))
249 #define umul_ppmm(w1, w0, u, v) \
250   __asm__ ("mull %3"                                                    \
251            : "=a" ((unsigned long int)(w0)),                            \
252              "=d" ((unsigned long int)(w1))                             \
253            : "%0" ((unsigned long int)(u)),                             \
254              "rm" ((unsigned long int)(v)))
255 #define udiv_qrnnd(q, r, n1, n0, d) \
256   __asm__ ("divl %4"                                                    \
257            : "=a" ((unsigned long int)(q)),                             \
258              "=d" ((unsigned long int)(r))                              \
259            : "0" ((unsigned long int)(n0)),                             \
260              "1" ((unsigned long int)(n1)),                             \
261              "rm" ((unsigned long int)(d)))
262 #define count_leading_zeros(count, x) \
263   do {                                                                  \
264     unsigned long int __cbtmp;                                          \
265     __asm__ ("bsrl %1,%0"                                               \
266              : "=r" (__cbtmp) : "rm" ((unsigned long int)(x)));         \
267     (count) = __cbtmp ^ 31;                                             \
268   } while (0)
269 #define UMUL_TIME 40
270 #define UDIV_TIME 40
271 #endif /* 80x86 */
272
273 #if defined (__i860__)
274 #if 0
275 /* Make sure these patterns really improve the code before
276    switching them on.  */
277 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
278   do {                                                                  \
279     union                                                               \
280       {                                                                 \
281         long long int ll;                                               \
282         struct {unsigned long int l, h;} i;                             \
283       }  __a, __b, __s;                                                 \
284     __a.i.l = (al); __a.i.h = (ah);                                     \
285     __b.i.l = (bl); __b.i.h = (bh);                                     \
286     __asm__ ("fiadd.dd %1,%2,%0"                                        \
287              : "=f" (__s.ll)                                            \
288              : "%f" (__a.ll), "f" (__b.ll));                            \
289     (sh) = __s.i.h; (sl) = __s.i.l;                                     \
290     } while (0)
291 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
292   do {                                                                  \
293     union                                                               \
294       {                                                                 \
295         long long int ll;                                               \
296         struct {unsigned long int l, h;} i;                             \
297       }  __a, __b, __s;                                                 \
298     __a.i.l = (al); __a.i.h = (ah);                                     \
299     __b.i.l = (bl); __b.i.h = (bh);                                     \
300     __asm__ ("fisub.dd %1,%2,%0"                                        \
301              : "=f" (__s.ll)                                            \
302              : "%f" (__a.ll), "f" (__b.ll));                            \
303     (sh) = __s.i.h; (sl) = __s.i.l;                                     \
304     } while (0)
305 #endif
306 #endif /* __i860__ */
307
308 #if defined (___IBMR2__) /* IBM RS6000 */
309 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
310   __asm__ ("a%I5 %1,%4,%5
311         ae %0,%2,%3"                                                    \
312            : "=r" ((unsigned long int)(sh)),                            \
313              "=&r" ((unsigned long int)(sl))                            \
314            : "%r" ((unsigned long int)(ah)),                            \
315              "r" ((unsigned long int)(bh)),                             \
316              "%r" ((unsigned long int)(al)),                            \
317              "rI" ((unsigned long int)(bl)))
318 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
319   __asm__ ("sf%I4 %1,%5,%4
320         sfe %0,%3,%2"                                                   \
321            : "=r" ((unsigned long int)(sh)),                            \
322              "=&r" ((unsigned long int)(sl))                            \
323            : "r" ((unsigned long int)(ah)),                             \
324              "r" ((unsigned long int)(bh)),                             \
325              "rI" ((unsigned long int)(al)),                            \
326              "r" ((unsigned long int)(bl)))
327 #define umul_ppmm(xh, xl, m0, m1) \
328   do {                                                                  \
329     unsigned long int __m0 = (m0), __m1 = (m1);                         \
330     __asm__ ("mul %0,%2,%3"                                             \
331              : "=r" ((unsigned long int)(xh)),                          \
332                "=q" ((unsigned long int)(xl))                           \
333              : "r" (__m0),                                              \
334                "r" (__m1));                                             \
335     (xh) += ((((signed long int) __m0 >> 31) & __m1)                    \
336              + (((signed long int) __m1 >> 31) & __m0));                \
337   } while (0)
338 #define smul_ppmm(xh, xl, m0, m1) \
339   __asm__ ("mul %0,%2,%3"                                               \
340            : "=r" ((unsigned long int)(xh)),                            \
341              "=q" ((unsigned long int)(xl))                             \
342            : "r" (m0),                                                  \
343              "r" (m1))
344 #define UMUL_TIME 8
345 #define sdiv_qrnnd(q, r, nh, nl, d) \
346   __asm__ ("div %0,%2,%4"                                               \
347            : "=r" (q), "=q" (r)                                         \
348            : "r" (nh), "1" (nl), "r" (d))
349 #define UDIV_TIME 40
350 #define UDIV_NEEDS_NORMALIZATION 1
351 #define count_leading_zeros(count, x) \
352   __asm__ ("cntlz %0,%1"                                                \
353            : "=r" ((unsigned long int)(count))                          \
354            : "r" ((unsigned long int)(x)))
355 #endif /* ___IBMR2__ */
356
357 #if defined (__mc68000__)
358 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
359   __asm__ ("add%.l %5,%1
360         addx%.l %3,%0"                                                  \
361            : "=d" ((unsigned long int)(sh)),                            \
362              "=&d" ((unsigned long int)(sl))                            \
363            : "%0" ((unsigned long int)(ah)),                            \
364              "d" ((unsigned long int)(bh)),                             \
365              "%1" ((unsigned long int)(al)),                            \
366              "g" ((unsigned long int)(bl)))
367 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
368   __asm__ ("sub%.l %5,%1
369         subx%.l %3,%0"                                                  \
370            : "=d" ((unsigned long int)(sh)),                            \
371              "=&d" ((unsigned long int)(sl))                            \
372            : "0" ((unsigned long int)(ah)),                             \
373              "d" ((unsigned long int)(bh)),                             \
374              "1" ((unsigned long int)(al)),                             \
375              "g" ((unsigned long int)(bl)))
376 #if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
377 #define umul_ppmm(w1, w0, u, v) \
378   __asm__ ("mulu%.l %3,%1:%0"                                           \
379            : "=d" ((unsigned long int)(w0)),                            \
380              "=d" ((unsigned long int)(w1))                             \
381            : "%0" ((unsigned long int)(u)),                             \
382              "dmi" ((unsigned long int)(v)))
383 #define UMUL_TIME 45
384 #define udiv_qrnnd(q, r, n1, n0, d) \
385   __asm__ ("divu%.l %4,%1:%0"                                           \
386            : "=d" ((unsigned long int)(q)),                             \
387              "=d" ((unsigned long int)(r))                              \
388            : "0" ((unsigned long int)(n0)),                             \
389              "1" ((unsigned long int)(n1)),                             \
390              "dmi" ((unsigned long int)(d)))
391 #define UDIV_TIME 90
392 #define sdiv_qrnnd(q, r, n1, n0, d) \
393   __asm__ ("divs%.l %4,%1:%0"                                           \
394            : "=d" ((unsigned long int)(q)),                             \
395              "=d" ((unsigned long int)(r))                              \
396            : "0" ((unsigned long int)(n0)),                             \
397              "1" ((unsigned long int)(n1)),                             \
398              "dmi" ((unsigned long int)(d)))
399 #define count_leading_zeros(count, x) \
400   __asm__ ("bfffo %1{%b2:%b2},%0"                                       \
401            : "=d" ((unsigned long int)(count))                          \
402            : "od" ((unsigned long int)(x)), "n" (0))
403 #else /* not mc68020 */
404 #define umul_ppmm(xh, xl, a, b) \
405   __asm__ ("| Inlined umul_ppmm
406         movel   %2,d0
407         movel   %3,d1
408         movel   d0,d2
409         swap    d0
410         movel   d1,d3
411         swap    d1
412         movew   d2,d4
413         mulu    d3,d4
414         mulu    d1,d2
415         mulu    d0,d3
416         mulu    d0,d1
417         movel   d4,d0
418         eorw    d0,d0
419         swap    d0
420         addl    d0,d2
421         addl    d3,d2
422         jcc     1f
423         addl    #65536,d1
424 1:      swap    d2
425         moveq   #0,d0
426         movew   d2,d0
427         movew   d4,d2
428         movel   d2,%1
429         addl    d1,d0
430         movel   d0,%0"                                                  \
431            : "=g" ((unsigned long int)(xh)),                            \
432              "=g" ((unsigned long int)(xl))                             \
433            : "g" ((unsigned long int)(a)),                              \
434              "g" ((unsigned long int)(b))                               \
435            : "d0", "d1", "d2", "d3", "d4")
436 #define UMUL_TIME 100
437 #define UDIV_TIME 400
438 #endif /* not mc68020 */
439 #endif /* mc68000 */
440
441 #if defined (__m88000__)
442 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
443   __asm__ ("addu.co %1,%r4,%r5
444         addu.ci %0,%r2,%r3"                                             \
445            : "=r" ((unsigned long int)(sh)),                            \
446              "=&r" ((unsigned long int)(sl))                            \
447            : "%rJ" ((unsigned long int)(ah)),                           \
448              "rJ" ((unsigned long int)(bh)),                            \
449              "%rJ" ((unsigned long int)(al)),                           \
450              "rJ" ((unsigned long int)(bl)))
451 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
452   __asm__ ("subu.co %1,%r4,%r5
453         subu.ci %0,%r2,%r3"                                             \
454            : "=r" ((unsigned long int)(sh)),                            \
455              "=&r" ((unsigned long int)(sl))                            \
456            : "rJ" ((unsigned long int)(ah)),                            \
457              "rJ" ((unsigned long int)(bh)),                            \
458              "rJ" ((unsigned long int)(al)),                            \
459              "rJ" ((unsigned long int)(bl)))
460 #define UMUL_TIME 17
461 #define UDIV_TIME 150
462 #define count_leading_zeros(count, x) \
463   do {                                                                  \
464     unsigned long int __cbtmp;                                          \
465     __asm__ ("ff1 %0,%1"                                                \
466              : "=r" (__cbtmp)                                           \
467              : "r" ((unsigned long int)(x)));                           \
468     (count) = __cbtmp ^ 31;                                             \
469   } while (0)
470 #if defined (__mc88110__)
471 #define umul_ppmm(w1, w0, u, v) \
472   __asm__ ("mulu.d      r10,%2,%3
473         or      %0,r10,0
474         or      %1,r11,0"                                               \
475            : "=r" (w1),                                                 \
476              "=r" (w0)                                                  \
477            : "r" (u),                                                   \
478              "r" (v)                                                    \
479            : "r10", "r11")
480 #define udiv_qrnnd(q, r, n1, n0, d) \
481   __asm__ ("or  r10,%2,0
482         or      r11,%3,0
483         divu.d  r10,r10,%4
484         mulu    %1,%4,r11
485         subu    %1,%3,%1
486         or      %0,r11,0"                                               \
487            : "=r" (q),                                                  \
488              "=&r" (r)                                                  \
489            : "r" (n1),                                                  \
490              "r" (n0),                                                  \
491              "r" (d)                                                    \
492            : "r10", "r11")
493 #endif
494 #endif /* __m88000__ */
495
496 #if defined (__mips__)
497 #define umul_ppmm(w1, w0, u, v) \
498   __asm__ ("multu %2,%3
499         mflo %0
500         mfhi %1"                                                        \
501            : "=d" ((unsigned long int)(w0)),                            \
502              "=d" ((unsigned long int)(w1))                             \
503            : "d" ((unsigned long int)(u)),                              \
504              "d" ((unsigned long int)(v)))
505 #define UMUL_TIME 5
506 #define UDIV_TIME 100
507 #endif /* __mips__ */
508
509 #if defined (__ns32000__)
510 #define __umulsidi3(u, v) \
511   ({long long int __w;                                                  \
512     __asm__ ("meid %2,%0"                                               \
513              : "=g" (__w)                                               \
514              : "%0" ((unsigned long int)(u)),                           \
515                "g" ((unsigned long int)(v)));                           \
516     __w; })
517 #define div_qrnnd(q, r, n1, n0, d) \
518   __asm__ ("movd %2,r0
519         movd %3,r1
520         deid %4,r0
521         movd r1,%0
522         movd r0,%1"                                                     \
523            : "=g" ((unsigned long int)(q)),                             \
524              "=g" ((unsigned long int)(r))                              \
525            : "g" ((unsigned long int)(n0)),                             \
526              "g" ((unsigned long int)(n1)),                             \
527              "g" ((unsigned long int)(d))                               \
528            : "r0", "r1")
529 #endif /* __ns32000__ */
530
531 #if defined (__pyr__)
532 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
533   __asm__ ("addw        %5,%1
534         addwc   %3,%0"                                                  \
535            : "=r" ((unsigned long int)(sh)),                            \
536              "=&r" ((unsigned long int)(sl))                            \
537            : "%0" ((unsigned long int)(ah)),                            \
538              "g" ((unsigned long int)(bh)),                             \
539              "%1" ((unsigned long int)(al)),                            \
540              "g" ((unsigned long int)(bl)))
541 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
542   __asm__ ("subw        %5,%1
543         subwb   %3,%0"                                                  \
544            : "=r" ((unsigned long int)(sh)),                            \
545              "=&r" ((unsigned long int)(sl))                            \
546            : "0" ((unsigned long int)(ah)),                             \
547              "g" ((unsigned long int)(bh)),                             \
548              "1" ((unsigned long int)(al)),                             \
549              "g" ((unsigned long int)(bl)))
550 /* This insn doesn't work on ancient pyramids.  */
551 #define umul_ppmm(w1, w0, u, v) \
552   ({union {long long int ll;struct {unsigned long int h, l;} i;} __xx;  \
553   __xx.i.l = u;                                                         \
554   __asm__ ("uemul %3,%0"                                                \
555            : "=r" (__xx.i.h),                                           \
556              "=r" (__xx.i.l)                                            \
557            : "1" (__xx.i.l),                                            \
558              "g" (v));                                                  \
559   (w1) = __xx.i.h; (w0) = __xx.i.l;})
560 #endif /* __pyr__ */
561
562 #if defined (__ibm032__) /* RT/ROMP */
563 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
564   __asm__ ("a %1,%5
565         ae %0,%3"                                                       \
566            : "=r" ((unsigned long int)(sh)),                            \
567              "=&r" ((unsigned long int)(sl))                            \
568            : "%0" ((unsigned long int)(ah)),                            \
569              "r" ((unsigned long int)(bh)),                             \
570              "%1" ((unsigned long int)(al)),                            \
571              "r" ((unsigned long int)(bl)))
572 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
573   __asm__ ("s %1,%5
574         se %0,%3"                                                       \
575            : "=r" ((unsigned long int)(sh)),                            \
576              "=&r" ((unsigned long int)(sl))                            \
577            : "0" ((unsigned long int)(ah)),                             \
578              "r" ((unsigned long int)(bh)),                             \
579              "1" ((unsigned long int)(al)),                             \
580              "r" ((unsigned long int)(bl)))
581 #define umul_ppmm(ph, pl, m0, m1) \
582   do {                                                                  \
583     unsigned long int __m0 = (m0), __m1 = (m1);                         \
584     __asm__ (                                                           \
585        "s       r2,r2
586         mts     r10,%2
587         m       r2,%3
588         m       r2,%3
589         m       r2,%3
590         m       r2,%3
591         m       r2,%3
592         m       r2,%3
593         m       r2,%3
594         m       r2,%3
595         m       r2,%3
596         m       r2,%3
597         m       r2,%3
598         m       r2,%3
599         m       r2,%3
600         m       r2,%3
601         m       r2,%3
602         m       r2,%3
603         cas     %0,r2,r0
604         mfs     r10,%1"                                                 \
605              : "=r" ((unsigned long int)(ph)),                          \
606                "=r" ((unsigned long int)(pl))                           \
607              : "%r" (__m0),                                             \
608                 "r" (__m1)                                              \
609              : "r2");                                                   \
610     (ph) += ((((signed long int) __m0 >> 31) & __m1)                    \
611              + (((signed long int) __m1 >> 31) & __m0));                \
612   } while (0)
613 #define UMUL_TIME 20
614 #define UDIV_TIME 200
615 #define count_leading_zeros(count, x) \
616   do {                                                                  \
617     if ((x) >= 0x10000)                                                 \
618       __asm__ ("clz     %0,%1"                                          \
619                : "=r" ((unsigned long int)(count))                      \
620                : "r" ((unsigned long int)(x) >> 16));                   \
621     else                                                                \
622       {                                                                 \
623         __asm__ ("clz   %0,%1"                                          \
624                  : "=r" ((unsigned long int)(count))                    \
625                  : "r" ((unsigned long int)(x)));                       \
626         (count) += 16;                                                  \
627       }                                                                 \
628   } while (0)
629 #endif
630
631 #if defined (__sparc__)
632 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
633   __asm__ ("addcc %4,%5,%1
634         addx %2,%3,%0"                                                  \
635            : "=r" ((unsigned long int)(sh)),                            \
636              "=&r" ((unsigned long int)(sl))                            \
637            : "%r" ((unsigned long int)(ah)),                            \
638              "rI" ((unsigned long int)(bh)),                            \
639              "%r" ((unsigned long int)(al)),                            \
640              "rI" ((unsigned long int)(bl))                             \
641            __CLOBBER_CC)
642 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
643   __asm__ ("subcc %4,%5,%1
644         subx %2,%3,%0"                                                  \
645            : "=r" ((unsigned long int)(sh)),                            \
646              "=&r" ((unsigned long int)(sl))                            \
647            : "r" ((unsigned long int)(ah)),                             \
648              "rI" ((unsigned long int)(bh)),                            \
649              "r" ((unsigned long int)(al)),                             \
650              "rI" ((unsigned long int)(bl))                             \
651            __CLOBBER_CC)
652 #if defined (__sparcv8__)
653 #define umul_ppmm(w1, w0, u, v) \
654   __asm__ ("umul %2,%3,%1;rd %%y,%0"                                    \
655            : "=r" ((unsigned long int)(w1)),                            \
656              "=r" ((unsigned long int)(w0))                             \
657            : "r" ((unsigned long int)(u)),                              \
658              "r" ((unsigned long int)(v)))
659 #define udiv_qrnnd(q, r, n1, n0, d) \
660   __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
661            : "=&r" ((unsigned long int)(q)),                            \
662              "=&r" ((unsigned long int)(r))                             \
663            : "r" ((unsigned long int)(n1)),                             \
664              "r" ((unsigned long int)(n0)),                             \
665              "r" ((unsigned long int)(d)))
666 #else
667 /* SPARC without integer multiplication and divide instructions.
668    (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
669 #define umul_ppmm(w1, w0, u, v) \
670   __asm__ ("! Inlined umul_ppmm
671         wr      %%g0,%2,%%y     ! SPARC has 0-3 delay insn after a wr
672         sra     %3,31,%%g2      ! Don't move this insn
673         and     %2,%%g2,%%g2    ! Don't move this insn
674         andcc   %%g0,0,%%g1     ! Don't move this insn
675         mulscc  %%g1,%3,%%g1
676         mulscc  %%g1,%3,%%g1
677         mulscc  %%g1,%3,%%g1
678         mulscc  %%g1,%3,%%g1
679         mulscc  %%g1,%3,%%g1
680         mulscc  %%g1,%3,%%g1
681         mulscc  %%g1,%3,%%g1
682         mulscc  %%g1,%3,%%g1
683         mulscc  %%g1,%3,%%g1
684         mulscc  %%g1,%3,%%g1
685         mulscc  %%g1,%3,%%g1
686         mulscc  %%g1,%3,%%g1
687         mulscc  %%g1,%3,%%g1
688         mulscc  %%g1,%3,%%g1
689         mulscc  %%g1,%3,%%g1
690         mulscc  %%g1,%3,%%g1
691         mulscc  %%g1,%3,%%g1
692         mulscc  %%g1,%3,%%g1
693         mulscc  %%g1,%3,%%g1
694         mulscc  %%g1,%3,%%g1
695         mulscc  %%g1,%3,%%g1
696         mulscc  %%g1,%3,%%g1
697         mulscc  %%g1,%3,%%g1
698         mulscc  %%g1,%3,%%g1
699         mulscc  %%g1,%3,%%g1
700         mulscc  %%g1,%3,%%g1
701         mulscc  %%g1,%3,%%g1
702         mulscc  %%g1,%3,%%g1
703         mulscc  %%g1,%3,%%g1
704         mulscc  %%g1,%3,%%g1
705         mulscc  %%g1,%3,%%g1
706         mulscc  %%g1,%3,%%g1
707         mulscc  %%g1,0,%%g1
708         add     %%g1,%%g2,%0
709         rd      %%y,%1"                                                 \
710            : "=r" ((unsigned long int)(w1)),                            \
711              "=r" ((unsigned long int)(w0))                             \
712            : "%rI" ((unsigned long int)(u)),                            \
713              "r" ((unsigned long int)(v))                               \
714            : "%g1", "%g2" __AND_CLOBBER_CC)
715 #define UMUL_TIME 39            /* 39 instructions */
716 /* It's quite necessary to add this much assembler for the sparc.
717    The default udiv_qrnnd (in C) is more than 10 times slower!  */
718 #define udiv_qrnnd(q, r, n1, n0, d) \
719   __asm__ ("! Inlined udiv_qrnnd
720         mov     32,%%g1
721         subcc   %1,%2,%%g0
722 1:      bcs     5f
723          addxcc %0,%0,%0        ! shift n1n0 and a q-bit in lsb
724         sub     %1,%2,%1        ! this kills msb of n
725         addx    %1,%1,%1        ! so this can't give carry
726         subcc   %%g1,1,%%g1
727 2:      bne     1b
728          subcc  %1,%2,%%g0
729         bcs     3f
730          addxcc %0,%0,%0        ! shift n1n0 and a q-bit in lsb
731         b       3f
732          sub    %1,%2,%1        ! this kills msb of n
733 4:      sub     %1,%2,%1
734 5:      addxcc  %1,%1,%1
735         bcc     2b
736          subcc  %%g1,1,%%g1
737 ! Got carry from n.  Subtract next step to cancel this carry.
738         bne     4b
739          addcc  %0,%0,%0        ! shift n1n0 and a 0-bit in lsb
740         sub     %1,%2,%1
741 3:      xnor    %0,0,%0
742         ! End of inline udiv_qrnnd"                                     \
743            : "=&r" ((unsigned long int)(q)),                            \
744              "=&r" ((unsigned long int)(r))                             \
745            : "r" ((unsigned long int)(d)),                              \
746              "1" ((unsigned long int)(n1)),                             \
747              "0" ((unsigned long int)(n0)) : "%g1" __AND_CLOBBER_CC)
748 #define UDIV_TIME (3+7*32)      /* 7 instructions/iteration. 32 iterations. */
749 #endif /* __sparc8__ */
750 #endif /* __sparc__ */
751
752 #if defined (__vax__)
753 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
754   __asm__ ("addl2 %5,%1
755         adwc %3,%0"                                                     \
756            : "=g" ((unsigned long int)(sh)),                            \
757              "=&g" ((unsigned long int)(sl))                            \
758            : "%0" ((unsigned long int)(ah)),                            \
759              "g" ((unsigned long int)(bh)),                             \
760              "%1" ((unsigned long int)(al)),                            \
761              "g" ((unsigned long int)(bl)))
762 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
763   __asm__ ("subl2 %5,%1
764         sbwc %3,%0"                                                     \
765            : "=g" ((unsigned long int)(sh)),                            \
766              "=&g" ((unsigned long int)(sl))                            \
767            : "0" ((unsigned long int)(ah)),                             \
768              "g" ((unsigned long int)(bh)),                             \
769              "1" ((unsigned long int)(al)),                             \
770              "g" ((unsigned long int)(bl)))
771 #define umul_ppmm(xh, xl, m0, m1) \
772   do {                                                                  \
773     union {long long int ll;struct {unsigned long int l, h;} i;} __xx;  \
774     unsigned long int __m0 = (m0), __m1 = (m1);                         \
775     __asm__ ("emul %1,%2,$0,%0"                                         \
776              : "=r" (__xx.ll)                                           \
777              : "g" (__m0),                                              \
778                "g" (__m1));                                             \
779     (xh) = __xx.i.h; (xl) = __xx.i.l;                                   \
780     (xh) += ((((signed long int) __m0 >> 31) & __m1)                    \
781              + (((signed long int) __m1 >> 31) & __m0));                \
782   } while (0)
783 #endif /* __vax__ */
784
785 #endif /* __GNUC__ */
786
787 /* If this machine has no inline assembler, use C macros.  */
788
789 #if !defined (add_ssaaaa)
790 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
791   do {                                                                  \
792     unsigned long int __x;                                              \
793     __x = (al) + (bl);                                                  \
794     (sh) = (ah) + (bh) + (__x < (al));                                  \
795     (sl) = __x;                                                         \
796   } while (0)
797 #endif
798
799 #if !defined (sub_ddmmss)
800 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
801   do {                                                                  \
802     unsigned long int __x;                                              \
803     __x = (al) - (bl);                                                  \
804     (sh) = (ah) - (bh) - (__x > (al));                                  \
805     (sl) = __x;                                                         \
806   } while (0)
807 #endif
808
809 #if !defined (umul_ppmm)
810 #define umul_ppmm(w1, w0, u, v)                                         \
811   do {                                                                  \
812     unsigned long int __x0, __x1, __x2, __x3;                           \
813     unsigned int __ul, __vl, __uh, __vh;                                \
814                                                                         \
815     __ul = __ll_lowpart (u);                                            \
816     __uh = __ll_highpart (u);                                           \
817     __vl = __ll_lowpart (v);                                            \
818     __vh = __ll_highpart (v);                                           \
819                                                                         \
820     __x0 = (unsigned long int) __ul * __vl;                             \
821     __x1 = (unsigned long int) __ul * __vh;                             \
822     __x2 = (unsigned long int) __uh * __vl;                             \
823     __x3 = (unsigned long int) __uh * __vh;                             \
824                                                                         \
825     __x1 += __ll_highpart (__x0);/* this can't give carry */            \
826     __x1 += __x2;               /* but this indeed can */               \
827     if (__x1 < __x2)            /* did we get it? */                    \
828       __x3 += __ll_B;           /* yes, add it in the proper pos. */    \
829                                                                         \
830     (w1) = __x3 + __ll_highpart (__x1);                                 \
831     (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0);          \
832   } while (0)
833 #endif
834
835 #if !defined (__umulsidi3)
836 #define __umulsidi3(u, v) \
837   ({long_long __w;                                                      \
838     umul_ppmm (__w.s.high, __w.s.low, u, v);                            \
839     __w.ll; })
840 #endif
841
842 /* Define this unconditionally, so it can be used for debugging.  */
843 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
844   do {                                                                  \
845     unsigned int __d1, __d0, __q1, __q0;                                \
846     unsigned long int __r1, __r0, __m;                                  \
847     __d1 = __ll_highpart (d);                                           \
848     __d0 = __ll_lowpart (d);                                            \
849                                                                         \
850     __r1 = (n1) % __d1;                                                 \
851     __q1 = (n1) / __d1;                                                 \
852     __m = (unsigned long int) __q1 * __d0;                              \
853     __r1 = __r1 * __ll_B | __ll_highpart (n0);                          \
854     if (__r1 < __m)                                                     \
855       {                                                                 \
856         __q1--, __r1 += (d);                                            \
857         if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
858           if (__r1 < __m)                                               \
859             __q1--, __r1 += (d);                                        \
860       }                                                                 \
861     __r1 -= __m;                                                        \
862                                                                         \
863     __r0 = __r1 % __d1;                                                 \
864     __q0 = __r1 / __d1;                                                 \
865     __m = (unsigned long int) __q0 * __d0;                              \
866     __r0 = __r0 * __ll_B | __ll_lowpart (n0);                           \
867     if (__r0 < __m)                                                     \
868       {                                                                 \
869         __q0--, __r0 += (d);                                            \
870         if (__r0 >= (d))                                                \
871           if (__r0 < __m)                                               \
872             __q0--, __r0 += (d);                                        \
873       }                                                                 \
874     __r0 -= __m;                                                        \
875                                                                         \
876     (q) = (unsigned long int) __q1 * __ll_B | __q0;                     \
877     (r) = __r0;                                                         \
878   } while (0)
879 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c.  */
880 #if !defined (udiv_qrnnd)
881 #define UDIV_NEEDS_NORMALIZATION 1
882 #define udiv_qrnnd __udiv_qrnnd_c
883 #endif
884
885 #if !defined (count_leading_zeros)
886 extern const unsigned char __clz_tab[];
887 #define count_leading_zeros(count, x) \
888   do {                                                                  \
889     unsigned long int __xr = (x);                                       \
890     unsigned int __a;                                                   \
891                                                                         \
892     if (LONG_TYPE_SIZE <= 32)                                           \
893       {                                                                 \
894         __a = __xr < (1<<2*__BITS4)                                     \
895           ? (__xr < (1<<__BITS4) ? 0 : __BITS4)                         \
896           : (__xr < (1<<3*__BITS4) ?  2*__BITS4 : 3*__BITS4);           \
897       }                                                                 \
898     else                                                                \
899       {                                                                 \
900         for (__a = LONG_TYPE_SIZE - 8; __a > 0; __a -= 8)               \
901           if (((__xr >> __a) & 0xff) != 0)                              \
902             break;                                                      \
903       }                                                                 \
904                                                                         \
905     (count) = LONG_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a);          \
906   } while (0)
907 #endif
908
909 #ifndef UDIV_NEEDS_NORMALIZATION
910 #define UDIV_NEEDS_NORMALIZATION 0
911 #endif