OSDN Git Service

Initial revision
[pf3gnuchains/gcc-fork.git] / gcc / longlong.h
1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2    Copyright (C) 1991 Free Software Foundation, Inc.
3
4    This definition file is free software; you can redistribute it
5    and/or modify it under the terms of the GNU General Public
6    License as published by the Free Software Foundation; either
7    version 2, or (at your option) any later version.
8
9    This definition file is distributed in the hope that it will be
10    useful, but WITHOUT ANY WARRANTY; without even the implied
11    warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12    See the GNU General Public License for more details.
13
14    You should have received a copy of the GNU General Public License
15    along with this program; if not, write to the Free Software
16    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  */
17
18 #ifndef LONG_TYPE_SIZE
19 #define LONG_TYPE_SIZE 32
20 #endif
21
22 #define __BITS4 (LONG_TYPE_SIZE / 4)
23 #define __ll_B (1L << (LONG_TYPE_SIZE / 2))
24 #define __ll_lowpart(t) ((unsigned long int) (t) % __ll_B)
25 #define __ll_highpart(t) ((unsigned long int) (t) / __ll_B)
26
27 /* Define auxilliary asm macros.
28
29    1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
30    multiplies two unsigned long integers MULTIPLER and MULTIPLICAND,
31    and generates a two unsigned word product in HIGH_PROD and
32    LOW_PROD.
33
34    2) __umulsidi3(a,b) multiplies two unsigned long integers A and B,
35    and returns a long long product.  This is just a variant of umul_ppmm.
36
37    3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
38    denominator) divides a two-word unsigned integer, composed by the
39    integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
40    places the quotient in QUOTIENT and the remainder in REMAINDER.
41    HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
42    If, in addition, the most significant bit of DENOMINATOR must be 1,
43    then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
44
45    4) count_leading_zeros(count, x) counts the number of zero-bits from
46    the msb to the first non-zero bit.  This is the number of steps X
47    needs to be shifted left to set the msb.  Undefined for X == 0.
48
49    5) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
50    high_addend_2, low_addend_2) adds two two-word unsigned integers,
51    composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
52    LOW_ADDEND_2 respectively.  The result is placed in HIGH_SUM and
53    LOW_SUM.  Overflow (i.e. carry out) is not stored anywhere, and is
54    lost.
55
56    6) sub_ddmmss(high_difference, low_difference, high_minuend,
57    low_minuend, high_subtrahend, low_subtrahend) subtracts two
58    two-word unsigned integers, composed by HIGH_MINUEND_1 and
59    LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
60    respectively.  The result is placed in HIGH_DIFFERENCE and
61    LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
62    and is lost.
63
64    If any of these macros are left undefined for a particular CPU,
65    C macros are used.  */
66
67 /* The CPUs come in alphabetical order below.
68
69    Please add support for more CPUs here, or improve the current support
70    for the CPUs below!
71    (E.g. WE32100, HP-PA (xmpyu?), i960, IBM360, TRON.)  */
72
73 #if defined (__GNUC__) && !defined (NO_ASM)
74
75 #if defined (__a29k__) || defined (___AM29K__)
76 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
77   __asm__ ("add %1,%4,%5
78         addc %0,%2,%3"                                                  \
79          : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \
80          : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \
81            "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
82 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
83   __asm__ ("sub %1,%4,%5
84         subc %0,%2,%3"                                                  \
85          : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \
86          : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \
87            "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
88 #define umul_ppmm(xh, xl, m0, m1) \
89   do {                                                                  \
90     unsigned long int __m0 = (m0), __m1 = (m1);                         \
91     __asm__ ("multiplu %0,%1,%2" : "=r" ((unsigned long int)(xl))       \
92              : "r" (__m0), "r" (__m1));                                 \
93     __asm__ ("multmu %0,%1,%2" : "=r" ((unsigned long int)(xh))         \
94              : "r" (__m0), "r" (__m1));                                 \
95   } while (0)
96 #define udiv_qrnnd(q, r, n1, n0, d) \
97   __asm__ ("dividu %0,%3,%4"                                            \
98         : "=r" ((unsigned long int)(q)), "=q" ((unsigned long int)(r))  \
99         : "1" ((unsigned long int)(n1)), "r" ((unsigned long int)(n0)), \
100           "r" ((unsigned long int)(d)))
101 #define count_leading_zeros(count, x) \
102     __asm__ ("clz %0,%1" : "=r" ((unsigned long int)(count))            \
103              : "r" ((unsigned long int)(x)))
104 #endif /* __a29k__ */
105
106 #if defined (__arm__)
107 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
108   __asm__ ("adds %1,%4,%5
109         adc %0,%2,%3"                                                   \
110         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \
111         : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \
112           "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
113 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
114   __asm__ ("subs %1,%4,%5
115         sbc %0,%2,%3"                                                   \
116         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \
117         : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \
118           "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
119 #endif /* __arm__ */
120
121 #if defined (__gmicro__)
122 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
123   __asm__ ("add.w %5,%1
124         addx %3,%0"                                                     \
125        : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\
126        : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
127          "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
128 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
129   __asm__ ("sub.w %5,%1
130         subx %3,%0"                                                     \
131        : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\
132        : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)),  \
133          "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
134 #define umul_ppmm(ph, pl, m0, m1) \
135   __asm__ ("mulx %3,%0,%1"                                              \
136         : "=g" ((unsigned long int)(ph)), "=r" ((unsigned long int)(pl))\
137         : "%0" ((unsigned long int)(m0)), "g" ((unsigned long int)(m1)))
138 #define udiv_qrnnd(q, r, nh, nl, d) \
139   __asm__ ("divx %4,%0,%1"                                              \
140         : "=g" ((unsigned long int)(q)), "=r" ((unsigned long int)(r))  \
141         : "1" ((unsigned long int)(nh)), "0" ((unsigned long int)(nl)), \
142           "g" ((unsigned long int)(d)))
143 #define count_leading_zeros(count, x) \
144   __asm__ ("bsch/1 %1,%0"                                               \
145         : "=g" (count)                                                  \
146         : "g" ((unsigned long int)(x)), "0" (0UL))
147 #endif
148
149 #if defined (__hppa)
150 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
151   __asm__ ("add %4,%5,%1
152         addc %2,%3,%0"                                                  \
153         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
154         : "%r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\
155           "%r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
156 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
157   __asm__ ("sub %5,%4,%1
158         subb %3,%2,%0"                                                  \
159         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
160         : "r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \
161           "r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
162 #endif
163
164 #if defined (__i386__) || defined (__i486__)
165 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
166   __asm__ ("addl %5,%1
167         adcl %3,%0"                                                     \
168        : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
169        : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
170          "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
171 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
172   __asm__ ("subl %5,%1
173         sbbl %3,%0"                                                     \
174        : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
175        : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)),  \
176          "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
177 #define umul_ppmm(w1, w0, u, v) \
178   __asm__ ("mull %3"                                                    \
179         : "=a" ((unsigned long int)(w0)), "=d" ((unsigned long int)(w1))\
180         : "%0" ((unsigned long int)(u)), "rm" ((unsigned long int)(v)))
181 #define udiv_qrnnd(q, r, n1, n0, d) \
182   __asm__ ("divl %4"                                                    \
183         : "=a" ((unsigned long int)(q)), "=d" ((unsigned long int)(r))  \
184         : "0" ((unsigned long int)(n0)), "1" ((unsigned long int)(n1)), \
185           "rm" ((unsigned long int)(d)))
186 #define count_leading_zeros(count, x) \
187   do {                                                                  \
188     unsigned long int __cbtmp;                                          \
189     __asm__ ("bsrl %1,%0"                                               \
190              : "=r" (__cbtmp) : "rm" ((unsigned long int)(x)));         \
191     (count) = __cbtmp ^ 31;                                             \
192   } while (0)
193 #endif /* 80x86 */
194
195 #if defined (__i860__)
196 #if 0
197 /* Make sure these patterns really improve the code before
198    switching them on.  */
199 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
200   do {                                                                  \
201     union                                                               \
202       {                                                                 \
203         long long int ll;                                               \
204         struct {unsigned long int l, h;} i;                             \
205       }  __a, __b, __s;                                                 \
206     __a.i.l = (al); __a.i.h = (ah);                                     \
207     __b.i.l = (bl); __b.i.h = (bh);                                     \
208     __asm__ ("fiadd.dd %1,%2,%0"                                        \
209              : "=f" (__s.ll)                                            \
210              : "%f" (__a.ll), "f" (__b.ll));                            \
211     (sh) = __s.i.h; (sl) = __s.i.l;                                     \
212     } while (0)
213 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
214   do {                                                                  \
215     union                                                               \
216       {                                                                 \
217         long long int ll;                                               \
218         struct {unsigned long int l, h;} i;                             \
219       }  __a, __b, __s;                                                 \
220     __a.i.l = (al); __a.i.h = (ah);                                     \
221     __b.i.l = (bl); __b.i.h = (bh);                                     \
222     __asm__ ("fisub.dd %1,%2,%0"                                        \
223              : "=f" (__s.ll)                                            \
224              : "%f" (__a.ll), "f" (__b.ll));                            \
225     (sh) = __s.i.h; (sl) = __s.i.l;                                     \
226     } while (0)
227 #endif
228 #endif /* __i860__ */
229
230 #if defined (___IBMR2__) /* IBM RS6000 */
231 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
232   __asm__ ("a %1,%4,%5
233         ae %0,%2,%3"                                                    \
234         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
235         : "%r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\
236           "%r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
237 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
238   __asm__ ("sf %1,%5,%4
239         sfe %0,%3,%2"                                                   \
240         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
241         : "r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \
242           "r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
243 #define umul_ppmm(xh, xl, m0, m1) \
244   do {                                                                  \
245     unsigned long int __m0 = (m0), __m1 = (m1);                         \
246     __asm__ ("mul %0,%2,%3"                                             \
247         : "=r" ((unsigned long int)(xh)), "=q" ((unsigned long int)(xl))\
248         : "r" (__m0), "r" (__m1));                                      \
249     (xh) += ((((signed long int) __m0 >> 31) & __m1)                    \
250              + (((signed long int) __m1 >> 31) & __m0));                \
251   } while (0)
252 #define UMUL_TIME 8
253 #define udiv_qrnnd(q, r, nh, nl, d) \
254   do { /* Use the signed "div" insn, and adjust the result. */          \
255     unsigned long int __q, __r, __nh, __nl, __d, __xh, __xl;            \
256     __nh = 0;                                                           \
257     __nl = (((unsigned long int)(nh)) << 30) | ((unsigned long int)(nl) >> 2);\
258     __nh = (unsigned long int)(nh) >> 2;                                \
259     __d = ((unsigned long int)(d) >> 1);                                \
260     __asm__ ("div %0,%2,%4"                                             \
261              : "=r" (__q), "=q" (__r)                                   \
262              : "r" (__nh), "1" (__nl), "r" (__d));                      \
263     __q <<= 1;                                                          \
264     __asm__ ("mul %0,%2,%3"                                             \
265              : "=r" (__xh), "=q" (__xl)                                 \
266              : "r" (__q), "r" ((unsigned long int)(d)));                \
267     __xh += (((signed long int) __q >> 31) & (d)) + __q;                \
268     if ((nh) < __xh || ((nh) == __xh && (nl) < __xl))                   \
269       {                                                                 \
270         do                                                              \
271           {                                                             \
272             sub_ddmmss (__xh, __xl, __xh, __xl, 0, (d));                \
273             __q--;                                                      \
274           }                                                             \
275         while ((nh) < __xh || ((nh) == __xh && (nl) < __xl));           \
276         __xl = (nl) - __xl;                                             \
277       }                                                                 \
278     else                                                                \
279       {                                                                 \
280         sub_ddmmss (__xh, __xl, (nh), (nl), __xh, __xl);                \
281         if (__xh != 0)                                                  \
282           {                                                             \
283             do                                                          \
284               {                                                         \
285                 sub_ddmmss (__xh, __xl, __xh, __xl, 0, (d));            \
286                 __q++;                                                  \
287               }                                                         \
288             while (__xh != 0);                                          \
289           }                                                             \
290         if (__xl >= (d))                                                \
291           {                                                             \
292             __xl -= (d);                                                \
293             __q++;                                                      \
294           }                                                             \
295       }                                                                 \
296     (q) = __q;                                                          \
297     (r) = __xl;                                                         \
298   } while (0)
299 #define UDIV_TIME 40
300 #define UDIV_NEEDS_NORMALIZATION 1
301 #define count_leading_zeros(count, x) \
302   __asm__ ("cntlz %0,%1"                                                \
303         : "=r" ((unsigned long int)(count)) : "r" ((unsigned long int)(x)))
304 #endif /* ___IBMR2__ */
305
306 #if defined (__mc68000__)
307 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
308   __asm__ ("add%.l %5,%1
309         addx%.l %3,%0"                                                  \
310        : "=d" ((unsigned long int)(sh)), "=&d" ((unsigned long int)(sl))\
311        : "%0" ((unsigned long int)(ah)), "d" ((unsigned long int)(bh)), \
312          "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
313 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
314   __asm__ ("sub%.l %5,%1
315         subx%.l %3,%0"                                                  \
316        : "=d" ((unsigned long int)(sh)), "=&d" ((unsigned long int)(sl))\
317        : "0" ((unsigned long int)(ah)), "d" ((unsigned long int)(bh)),  \
318          "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
319 #if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
320 #define umul_ppmm(w1, w0, u, v) \
321   __asm__ ("mulu%.l %3,%1:%0"                                           \
322         : "=d" ((unsigned long int)(w0)), "=d" ((unsigned long int)(w1))\
323         : "%0" ((unsigned long int)(u)), "dmi" ((unsigned long int)(v)))
324 #define udiv_qrnnd(q, r, n1, n0, d) \
325   __asm__ ("divu%.l %4,%1:%0"                                           \
326         : "=d" ((unsigned long int)(q)), "=d" ((unsigned long int)(r))  \
327         : "0" ((unsigned long int)(n0)), "1" ((unsigned long int)(n1)), \
328           "dmi" ((unsigned long int)(d)))
329 #define count_leading_zeros(count, x) \
330   __asm__ ("bfffo %1{%b2:%b2},%0"                                       \
331         : "=d" ((unsigned long int)(count))                             \
332         : "od" ((unsigned long int)(x)), "n" (0))
333 #else /* not mc68020 */
334 #define umul_ppmm(xh, xl, a, b) \
335   __asm__ ("| Inlined umul_ppmm
336         movel   %2,d0
337         movel   %3,d1
338         movel   d0,d2
339         swap    d0
340         movel   d1,d3
341         swap    d1
342         movew   d2,d4
343         mulu    d3,d4
344         mulu    d1,d2
345         mulu    d0,d3
346         mulu    d0,d1
347         movel   d4,d0
348         eorw    d0,d0
349         swap    d0
350         addl    d0,d2
351         addl    d3,d2
352         jcc     1f
353         addl    #65536,d1
354 1:      swap    d2
355         moveq   #0,d0
356         movew   d2,d0
357         movew   d4,d2
358         movel   d2,%1
359         addl    d1,d0
360         movel   d0,%0"                                                  \
361        : "=g" ((unsigned long int)(xh)), "=g" ((unsigned long int)(xl)) \
362        :"g" ((unsigned long int)(a)), "g" ((unsigned long int)(b))      \
363        : "d0", "d1", "d2", "d3", "d4")
364 #endif /* not mc68020 */
365 #endif /* mc68000 */
366
367 #if defined (__m88000__)
368 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
369   __asm__ ("addu.co %1,%r4,%r5
370         addu.ci %0,%r2,%r3"                                             \
371         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
372         : "%rJ" ((unsigned long int)(ah)), "rJ" ((unsigned long int)(bh)),\
373           "%rJ" ((unsigned long int)(al)), "rJ" ((unsigned long int)(bl)))
374 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
375   __asm__ ("subu.co %1,%r4,%r5
376         subu.ci %0,%r2,%r3"                                             \
377         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
378         : "rJ" ((unsigned long int)(ah)), "rJ" ((unsigned long int)(bh)),\
379           "rJ" ((unsigned long int)(al)), "rJ" ((unsigned long int)(bl)))
380 #define UMUL_TIME 17
381 #define UDIV_TIME 150
382 #define count_leading_zeros(count, x) \
383   do {                                                                  \
384     unsigned long int __cbtmp;                                          \
385     __asm__ ("ff1 %0,%1"                                                \
386              : "=r" (__cbtmp) : "r" ((unsigned long int)(x)));          \
387     (count) = __cbtmp ^ 31;                                             \
388   } while (0)
389 #endif /* __m88000__ */
390
391 #if defined (__mips__)
392 #define umul_ppmm(w1, w0, u, v) \
393   __asm__ ("multu %2,%3
394         mflo %0
395         mfhi %1"                                                        \
396         : "=r" ((unsigned long int)(w0)), "=r" ((unsigned long int)(w1))\
397         : "r" ((unsigned long int)(u)), "r" ((unsigned long int)(v)))
398 #define UMUL_TIME 5
399 #define UDIV_TIME 100
400 #endif /* __mips__ */
401
402 #if defined (__ns32000__)
403 #define __umulsidi3(u, v) \
404   ({long long int __w;                                                  \
405       __asm__ ("meid %2,%0" : "=g" (__w)                                \
406         : "%0" ((unsigned long int)(u)), "g" ((unsigned long int)(v))); \
407       __w; })
408 #define div_qrnnd(q, r, n1, n0, d) \
409   __asm__ ("movd %2,r0
410         movd %3,r1
411         deid %4,r0
412         movd r1,%0
413         movd r0,%1"                                                     \
414         : "=g" ((unsigned long int)(q)), "=g" ((unsigned long int)(r))  \
415         : "g" ((unsigned long int)(n0)), "g" ((unsigned long int)(n1)), \
416           "g" ((unsigned long int)(d)) : "r0", "r1")
417 #endif /* __ns32000__ */
418
419 #if defined (__pyr__)
420 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
421   __asm__ ("addw        %5,%1
422         addwc   %3,%0"                                                  \
423         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
424         : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)),\
425           "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
426 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
427   __asm__ ("subw        %5,%1
428         subwb   %3,%0"                                                  \
429         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
430         : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
431           "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
432 /* This insn doesn't work on ancient pyramids.  */
433 #define umul_ppmm(w1, w0, u, v) \
434   __asm__ ("movw %2,tr11
435         uemul %3,tr10
436         movw tr10,%0
437         movw tr11,%1"                                                   \
438         : "=r" ((unsigned long int)(w1)), "=r" ((unsigned long int)(w0))\
439         : "r" ((unsigned long int)(u)), "r" ((unsigned long int)(v))    \
440         : "tr10", "tr11")
441 #endif /* __pyr__ */
442
443 #if defined (__ibm032__) /* RT/ROMP */
444 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
445   __asm__ ("a %1,%5
446         ae %0,%3"                                                       \
447         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
448         : "%0" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\
449           "%1" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
450 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
451   __asm__ ("s %1,%5
452         se %0,%3"                                                       \
453         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
454         : "0" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \
455           "1" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
456 #define umul_ppmm(ph, pl, m0, m1) \
457   do {                                                                  \
458     unsigned long int __m0 = (m0), __m1 = (m1);                         \
459     __asm__ (                                                           \
460        "s       r2,r2
461         mts     r10,%2
462         m       r2,%3
463         m       r2,%3
464         m       r2,%3
465         m       r2,%3
466         m       r2,%3
467         m       r2,%3
468         m       r2,%3
469         m       r2,%3
470         m       r2,%3
471         m       r2,%3
472         m       r2,%3
473         m       r2,%3
474         m       r2,%3
475         m       r2,%3
476         m       r2,%3
477         m       r2,%3
478         cas     %0,r2,r0
479         mfs     r10,%1"                                                 \
480        : "=r" ((unsigned long int)(ph)), "=r" ((unsigned long int)(pl)) \
481        : "%r" (__m0), "r" (__m1)                                        \
482        : "r2");                                                         \
483     (ph) += ((((signed long int) __m0 >> 31) & __m1)                    \
484              + (((signed long int) __m1 >> 31) & __m0));                \
485   } while (0)
486 #define count_leading_zeros(count, x) \
487   do {                                                                  \
488     if ((x) >= 0x10000)                                                 \
489       __asm__ ("clz     %0,%1"                                          \
490                : "=r" ((unsigned long int)(count))                      \
491                : "r" ((unsigned long int)(x) >> 16));                   \
492     else                                                                \
493       {                                                                 \
494         __asm__ ("clz   %0,%1"                                          \
495                  : "=r" ((unsigned long int)(count))                    \
496                  : "r" ((unsigned long int)(x)));                       \
497         (count) += 16;                                                  \
498       }                                                                 \
499   } while (0)
500 #endif
501
502 #if defined (__sparc__)
503 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
504   __asm__ ("addcc %4,%5,%1
505         addx %2,%3,%0"                                                  \
506         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
507         : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)),\
508           "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
509 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
510   __asm__ ("subcc %4,%5,%1
511         subx %2,%3,%0"                                                  \
512         : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
513         : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)),\
514           "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
515 #if defined (__sparc8__)        /* How do we recog. version 8 SPARC?  */
516 #define umul_ppmm(w1, w0, u, v) \
517   __asm__ ("umul %2,%3,%1;rd %%y,%0"                                    \
518         : "=r" ((unsigned long int)(w1)), "=r" ((unsigned long int)(w0))\
519         : "r" ((unsigned long int)(u)), "r" ((unsigned long int)(v)))
520 #define udiv_qrnnd(q, r, n1, n0, d) \
521   __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
522         : "=&r" ((unsigned long int)(q)), "=&r" ((unsigned long int)(r))\
523         : "r" ((unsigned long int)(n1)), "r" ((unsigned long int)(n0)), \
524           "r" ((unsigned long int)(d)))
525 #else
526 /* SPARC without integer multiplication and divide instructions.
527    (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
528 #define umul_ppmm(w1, w0, u, v) \
529   __asm__ ("! Inlined umul_ppmm
530         wr      %%g0,%2,%%y     ! SPARC has 0-3 delay insn after a wr
531         sra     %3,31,%%g2      ! Don't move this insn
532         and     %2,%%g2,%%g2    ! Don't move this insn
533         andcc   %%g0,0,%%g1     ! Don't move this insn
534         mulscc  %%g1,%3,%%g1
535         mulscc  %%g1,%3,%%g1
536         mulscc  %%g1,%3,%%g1
537         mulscc  %%g1,%3,%%g1
538         mulscc  %%g1,%3,%%g1
539         mulscc  %%g1,%3,%%g1
540         mulscc  %%g1,%3,%%g1
541         mulscc  %%g1,%3,%%g1
542         mulscc  %%g1,%3,%%g1
543         mulscc  %%g1,%3,%%g1
544         mulscc  %%g1,%3,%%g1
545         mulscc  %%g1,%3,%%g1
546         mulscc  %%g1,%3,%%g1
547         mulscc  %%g1,%3,%%g1
548         mulscc  %%g1,%3,%%g1
549         mulscc  %%g1,%3,%%g1
550         mulscc  %%g1,%3,%%g1
551         mulscc  %%g1,%3,%%g1
552         mulscc  %%g1,%3,%%g1
553         mulscc  %%g1,%3,%%g1
554         mulscc  %%g1,%3,%%g1
555         mulscc  %%g1,%3,%%g1
556         mulscc  %%g1,%3,%%g1
557         mulscc  %%g1,%3,%%g1
558         mulscc  %%g1,%3,%%g1
559         mulscc  %%g1,%3,%%g1
560         mulscc  %%g1,%3,%%g1
561         mulscc  %%g1,%3,%%g1
562         mulscc  %%g1,%3,%%g1
563         mulscc  %%g1,%3,%%g1
564         mulscc  %%g1,%3,%%g1
565         mulscc  %%g1,%3,%%g1
566         mulscc  %%g1,0,%%g1
567         add     %%g1,%%g2,%0
568         rd      %%y,%1"                                                 \
569         : "=r" ((unsigned long int)(w1)), "=r" ((unsigned long int)(w0))\
570         : "%rI" ((unsigned long int)(u)), "r" ((unsigned long int)(v))  \
571        : "%g1", "%g2")
572 #define UMUL_TIME 39            /* 39 instructions */
573 /* It's quite necessary to add this much assembler for the sparc.
574    The default udiv_qrnnd (in C) is more than 10 times slower!  */
575 #define udiv_qrnnd(q, r, n1, n0, d) \
576   __asm__ ("! Inlined udiv_qrnnd
577         mov     32,%%g1
578         subcc   %1,%2,%%g0
579 1:      bcs     5f
580          addxcc %0,%0,%0        ! shift n1n0 and a q-bit in lsb
581         sub     %1,%2,%1        ! this kills msb of n
582         addx    %1,%1,%1        ! so this can't give carry
583         subcc   %%g1,1,%%g1
584 2:      bne     1b
585          subcc  %1,%2,%%g0
586         bcs     3f
587          addxcc %0,%0,%0        ! shift n1n0 and a q-bit in lsb
588         b       3f
589          sub    %1,%2,%1        ! this kills msb of n
590 4:      sub     %1,%2,%1
591 5:      addxcc  %1,%1,%1
592         bcc     2b
593          subcc  %%g1,1,%%g1
594 ! Got carry from n.  Subtract next step to cancel this carry.
595         bne     4b
596          addcc  %0,%0,%0        ! shift n1n0 and a 0-bit in lsb
597         sub     %1,%2,%1
598 3:      xnor    %0,0,%0
599         ! End of inline udiv_qrnnd"                                     \
600         : "=r&" ((unsigned long int)(q)), "=r&" ((unsigned long int)(r))\
601         : "r" ((unsigned long int)(d)), "1" ((unsigned long int)(n1)),  \
602           "0" ((unsigned long int)(n0)) : "%g1")
603 #define UDIV_TIME (3+7*32)      /* 7 instructions/iteration. 32 iterations. */
604 #endif
605 #endif /* __sparc8__ */
606
607 #if defined (__vax__)
608 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
609   __asm__ ("addl2 %5,%1
610         adwc %3,%0"                                                     \
611         : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\
612         : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)),\
613           "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
614 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
615   __asm__ ("subl2 %5,%1
616         sbwc %3,%0"                                                     \
617         : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\
618         : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
619           "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
620 #define umul_ppmm(xh, xl, m0, m1) \
621   do {                                                                  \
622     union {long long int ll;struct {unsigned long int l, h;} i;} __xx;  \
623     unsigned long int __m0 = (m0), __m1 = (m1);                         \
624     __asm__ ("emul %1,%2,$0,%0"                                         \
625          : "=r" (__xx.ll) : "g" (__m0), "g" (__m1));                    \
626     (xh) = __xx.i.h; (xl) = __xx.i.l;                                   \
627     (xh) += ((((signed long int) __m0 >> 31) & __m1)                    \
628              + (((signed long int) __m1 >> 31) & __m0));                \
629   } while (0)
630 #endif /* __vax__ */
631
632 #endif /* __GNUC__ */
633
634 /* If this machine has no inline assembler, use C macros.  */
635
636 #if !defined (add_ssaaaa)
637 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
638   do {                                                                  \
639     unsigned long int __x;                                              \
640     __x = (al) + (bl);                                                  \
641     (sh) = (ah) + (bh) + (__x < (al));                                  \
642     (sl) = __x;                                                         \
643   } while (0)
644 #endif
645
646 #if !defined (sub_ddmmss)
647 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
648   do {                                                                  \
649     unsigned long int __x;                                              \
650     __x = (al) - (bl);                                                  \
651     (sh) = (ah) - (bh) - (__x > (al));                                  \
652     (sl) = __x;                                                         \
653   } while (0)
654 #endif
655
656 #if !defined (umul_ppmm)
657 #define umul_ppmm(w1, w0, u, v)                                         \
658   do {                                                                  \
659     unsigned long int __x0, __x1, __x2, __x3;                           \
660     unsigned int __ul, __vl, __uh, __vh;                                \
661                                                                         \
662     __ul = __ll_lowpart (u);                                            \
663     __uh = __ll_highpart (u);                                           \
664     __vl = __ll_lowpart (v);                                            \
665     __vh = __ll_highpart (v);                                           \
666                                                                         \
667     __x0 = (unsigned long int) __ul * __vl;                             \
668     __x1 = (unsigned long int) __ul * __vh;                             \
669     __x2 = (unsigned long int) __uh * __vl;                             \
670     __x3 = (unsigned long int) __uh * __vh;                             \
671                                                                         \
672     __x1 += __ll_highpart (__x0);/* this can't give carry */            \
673     __x1 += __x2;               /* but this indeed can */               \
674     if (__x1 < __x2)            /* did we get it? */                    \
675       __x3 += __ll_B;           /* yes, add it in the proper pos. */    \
676                                                                         \
677     (w1) = __x3 + __ll_highpart (__x1);                                 \
678     (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0);          \
679   } while (0)
680 #endif
681
682 #if !defined (__umulsidi3)
683 #define __umulsidi3(u, v) \
684   ({long_long __w;                                                      \
685     umul_ppmm (__w.s.high, __w.s.low, u, v);                            \
686     __w.ll; })
687 #endif
688
689 #if !defined (udiv_qrnnd)  || defined (__LLDEBUG__)
690 #define UDIV_NEEDS_NORMALIZATION 1
691 #ifndef __LLDEBUG__
692 #define udiv_qrnnd udiv_qrnnd_c
693 #endif
694 #define udiv_qrnnd_c(q, r, n1, n0, d) \
695   do {                                                                  \
696     unsigned int __d1, __d0, __q1, __q0;                                \
697     unsigned long int __r1, __r0, __m;                                  \
698     __d1 = __ll_highpart (d);                                           \
699     __d0 = __ll_lowpart (d);                                            \
700                                                                         \
701     __r1 = (n1) % __d1;                                                 \
702     __q1 = (n1) / __d1;                                                 \
703     __m = (unsigned long int) __q1 * __d0;                              \
704     __r1 = __r1 * __ll_B | __ll_highpart (n0);                          \
705     if (__r1 < __m)                                                     \
706       {                                                                 \
707         __q1--, __r1 += (d);                                            \
708         if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
709           if (__r1 < __m)                                               \
710             __q1--, __r1 += (d);                                        \
711       }                                                                 \
712     __r1 -= __m;                                                        \
713                                                                         \
714     __r0 = __r1 % __d1;                                                 \
715     __q0 = __r1 / __d1;                                                 \
716     __m = (unsigned long int) __q0 * __d0;                              \
717     __r0 = __r0 * __ll_B | __ll_lowpart (n0);                           \
718     if (__r0 < __m)                                                     \
719       {                                                                 \
720         __q0--, __r0 += (d);                                            \
721         if (__r0 >= (d))                                                \
722           if (__r0 < __m)                                               \
723             __q0--, __r0 += (d);                                        \
724       }                                                                 \
725     __r0 -= __m;                                                        \
726                                                                         \
727     (q) = (unsigned long int) __q1 * __ll_B | __q0;                     \
728     (r) = __r0;                                                         \
729   } while (0)
730 #endif
731
732 #if !defined (count_leading_zeros)
733 extern const unsigned char __clz_tab[];
734
735 #define count_leading_zeros(count, x) \
736   do {                                                                  \
737     unsigned long int xr = (x);                                         \
738     unsigned int a;                                                     \
739                                                                         \
740     a = xr < (1<<2*__BITS4)                                             \
741       ? (xr < (1<<__BITS4) ? 0 : __BITS4)                               \
742       : (xr < (1<<3*__BITS4) ?  2*__BITS4 : 3*__BITS4);                 \
743                                                                         \
744     (count) = 4*__BITS4 - (__clz_tab[xr >> a] + a);                     \
745   } while (0)
746 #endif
747
748 #ifndef UDIV_NEEDS_NORMALIZATION
749 #define UDIV_NEEDS_NORMALIZATION 0
750 #endif