-/* libgcc1 routines for 68000 w/o floating-point hardware.
- Copyright (C) 1994, 1996, 1997 Free Software Foundation, Inc.
+/* libgcc routines for 68000 w/o floating-point hardware.
+ Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of GNU CC.
| void __clear_sticky_bits(void);
SYM (__clear_sticky_bit):
lea SYM (_fpCCR),a0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
movew IMM (0),a0@(STICK)
#else
clr.w a0@(STICK)
$_exception_handler:
lea SYM (_fpCCR),a0
movew d7,a0@(EBITS) | set __exception_bits
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d7,a0@(STICK) | and __sticky_bits
#else
movew a0@(STICK),d4
movew d5,a0@(LASTO) | and __last_operation
| Now put the operands in place:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (SINGLE_FLOAT),d6
#else
cmpl IMM (SINGLE_FLOAT),d6
movel a6@(12),a0@(OPER2)
2:
| And check whether the exception is trap-enabled:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
andw a0@(TRAPE),d7 | is exception trap-enabled?
#else
clrl d6
beq 1f | no, exit
pea SYM (_fpCCR) | yes, push address of _fpCCR
trap IMM (FPTRAP) | and trap
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
1: moveml sp@+,d2-d7 | restore data registers
#else
1: moveml sp@,d2-d7
muluw sp@(10), d0 /* x0*y1 */
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(8), d1 /* x1*y0 */
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw d1, d0
#else
addl d1, d0
.proc
.globl SYM (__udivsi3)
SYM (__udivsi3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
movel d2, sp@-
movel sp@(12), d1 /* d1 = divisor */
movel sp@(8), d0 /* d0 = dividend */
L6: movel sp@+, d2
rts
-#else /* __mcf5200__ */
+#else /* __mcoldfire__ */
/* Coldfire implementation of non-restoring division algorithm from
Hennessy & Patterson, Appendix A. */
addxl d2,d2
movl d2,d3 | subtract b from p, store in tmp.
subl d1,d3
- jmi L2 | if the result is not is negative, set the
- bset IMM (0),d0 | low order bit of a to 1 and store tmp in p.
- movl d3,d2
+ jcs L2 | if no carry,
+ bset IMM (0),d0 | set the low order bit of a to 1,
+ movl d3,d2 | and store tmp in p.
L2: subql IMM (1),d4
jcc L1
- moveml sp@+,d2-d4 | restore data registers
+ moveml sp@,d2-d4 | restore data registers
unlk a6 | and return
rts
-#endif /* __mcf5200__ */
+#endif /* __mcoldfire__ */
#endif /* L_udivsi3 */
movel sp@(12), d1 /* d1 = divisor */
jpl L1
negl d1
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
negb d2 /* change sign because divisor <0 */
#else
negl d2 /* change sign because divisor <0 */
L1: movel sp@(8), d0 /* d0 = dividend */
jpl L2
negl d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
negb d2
#else
negl d2
jbsr SYM (__udivsi3)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
jbsr SYM (__mulsi3) /* d0 = (a/b)*b */
jbsr SYM (__divsi3)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
jbsr SYM (__mulsi3) /* d0 = (a/b)*b */
| double __adddf3(double, double);
SYM (__adddf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
#else
andl IMM (0x80000000),d7 | isolate a's sign bit '
swap d6 | and also b's sign bit '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
andw IMM (0x8000),d6 |
orw d6,d7 | and combine them into d7, so that a's sign '
| bit is in the high word and b's is in the '
orl d7,d0 | and put hidden bit back
Ladddf$1:
swap d4 | shift right exponent so that it starts
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (5),d4 | in bit 0 and not bit 20
#else
lsrl IMM (5),d4 | in bit 0 and not bit 20
orl d7,d2 | and put hidden bit back
Ladddf$2:
swap d5 | shift right exponent so that it starts
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (5),d5 | in bit 0 and not bit 20
#else
lsrl IMM (5),d5 | in bit 0 and not bit 20
| and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
| exponents in a2-a3.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml a2-a3,sp@- | save the address registers
#else
movel a2,sp@-
| Here we shift the numbers until the exponents are the same, and put
| the largest exponent in a2.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d4,a2 | get exponents back
exg d5,a3 |
cmpw d4,d5 | compare the exponents
| Here we have a's exponent larger than b's, so we have to shift b. We do
| this by using as counter d2:
1: movew d4,d2 | move largest exponent to d2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw d5,d2 | and subtract second exponent
exg d4,a2 | get back the longs we saved
exg d5,a3 |
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d2
#else
cmpl IMM (DBL_MANT_DIG+2),d2
#endif
bge Ladddf$b$small
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (32),d2 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d2 | if difference >= 32, shift by longs
#endif
bge 5f
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (16),d2 | if difference >= 16, shift by words
#else
cmpl IMM (16),d2 | if difference >= 16, shift by words
bra 3f | enter dbra loop
4:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d4
roxrl IMM (1),d5
roxrl IMM (1),d6
12: lsrl IMM (1),d4
#endif
3:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d2,4b
#else
subql IMM (1),d2
movel d5,d6
movel d4,d5
movel IMM (0),d4
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (32),d2
#else
subl IMM (32),d2
swap d5
movew IMM (0),d4
swap d4
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (16),d2
#else
subl IMM (16),d2
bra 3b
9:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d4,d5
movew d4,d6
subw d5,d6 | keep d5 (largest exponent) in d4
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d6
#else
cmpl IMM (DBL_MANT_DIG+2),d6
#endif
bge Ladddf$a$small
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (32),d6 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d6 | if difference >= 32, shift by longs
#endif
bge 5f
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (16),d6 | if difference >= 16, shift by words
#else
cmpl IMM (16),d6 | if difference >= 16, shift by words
bra 3f | enter dbra loop
4:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
12: lsrl IMM (1),d0
#endif
3:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d6,4b
#else
subql IMM (1),d6
movel d1,d2
movel d0,d1
movel IMM (0),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (32),d6
#else
subl IMM (32),d6
swap d1
movew IMM (0),d0
swap d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (16),d6
#else
subl IMM (16),d6
#endif
bra 3b
Ladddf$3:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d4,a2
exg d5,a3
#else
| the signs in a4.
| Here we have to decide whether to add or subtract the numbers:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a0 | get the signs
exg d6,a3 | a3 is free to be used
#else
eorl d7,d6 | compare the signs
bmi Lsubdf$0 | if the signs are different we have
| to subtract
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a0 | else we add the numbers
exg d6,a3 |
#else
movel a0,d7 |
andl IMM (0x80000000),d7 | d7 now has the sign
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
1:
lea Ladddf$5,a0 | to return from rounding routine
lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
bra Lround$to$plus
Ladddf$5:
| Put back the exponent and check for overflow
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (0x7ff),d4 | is the exponent big?
#else
cmpl IMM (0x7ff),d4 | is the exponent big?
#endif
bge 1f
bclr IMM (DBL_MANT_DIG-1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
Lsubdf$0:
| Here we do the subtraction.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a0 | put sign back in a0
exg d6,a3 |
#else
movel a2,d4 | return exponent to d4
movel a0,d7
andl IMM (0x80000000),d7 | isolate sign bit
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3 |
#else
movel sp@+,a4
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
1:
lea Lsubdf$1,a0 | to return from rounding routine
lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
Lsubdf$1:
| Put back the exponent and sign (we don't have overflow). '
bclr IMM (DBL_MANT_DIG-1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
| DBL_MANT_DIG+1) we return the other (and now we don't have to '
| check for finiteness or zero).
Ladddf$a$small:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel a6@(20),d1
lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
rts
Ladddf$b$small:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel a6@(12),d1
lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
bra Ld$infty |
Ladddf$ret$1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3 | restore regs and exit
#else
movel sp@+,a4
lea SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit back
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
Ladddf$ret$den:
| Return a denormalized number.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right once more
roxrl IMM (1),d1 |
#else
| double __muldf3(double, double);
SYM (__muldf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
andl d6,d0 | isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
andl d6,d2 |
orl IMM (0x00100000),d2 | and put hidden bit back
swap d5 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Lmuldf$2: |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw d5,d4 | add exponents
subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
#else
| enough to keep everything in them. So we use the address registers to keep
| some intermediate data.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml a2-a3,sp@- | save a2 and a3 for temporary use
#else
movel a2,sp@-
movel d4,a3 | and a3 will preserve the exponent
| First, shift d2-d3 so bit 20 becomes bit 31:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
rorl IMM (5),d2 | rotate d2 5 places right
swap d2 | and swap it
rorl IMM (5),d3 | do the same thing with d3
| We use a1 as counter:
movel IMM (DBL_MANT_DIG-1),a1
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a1
#else
movel d7,a4
#endif
1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a1 | put counter back in a1
#else
movel d7,a4
addl d7,d7 |
addxl d6,d6 |
bcc 2f | if bit clear skip the following
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
addxl d4,d2 |
addxl d7,d1 |
addxl d7,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
movel a4,a2
#endif
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a1 | put counter in d7
dbf d7,1b | decrement and branch
#else
#endif
movel a3,d4 | restore exponent
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
swap d3
movew d3,d2
movew IMM (0),d3
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
btst IMM (DBL_MANT_DIG+1-32),d0
beq Lround$exit
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
| NaN, in which case we return NaN.
Lmuldf$b$0:
movew IMM (MULTIPLY),d5
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d2,d0 | put b (==0) into d0-d1
exg d3,d1 | and a (with sign bit cleared) into d2-d3
#else
bge Ld$inop | in case NaN or +/-INFINITY return NaN
lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
| double __divdf3(double, double);
SYM (__divdf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
andl d6,d0 | and isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
andl d6,d2 |
orl IMM (0x00100000),d2
swap d5 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Ldivdf$2: |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw d5,d4 | subtract exponents
addw IMM (D_BIAS),d4 | and add bias
#else
bset d5,d6 | set the corresponding bit in d6
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bset d5,d7 | set the corresponding bit in d7
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
beq 3f | if d0==d2 check d1 and d3
2: addl d1,d1 | shift a by 1
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
| to it; if you don't do this the algorithm loses in some cases). '
movel IMM (0),d2
movel d2,d3
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (DBL_MANT_DIG),d5
addw IMM (63),d5
cmpw IMM (31),d5
bhi 2f
1: bset d5,d3
bra 5f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (32),d5
#else
subl IMM (32),d5
| not set:
btst IMM (DBL_MANT_DIG-32+1),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
movel d0,d1 |
lea SYM (_fpCCR),a0 | clear exception flags
movew IMM (0),a0@ |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
| so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
| First check for underlow in the exponent:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (-DBL_MANT_DIG-1),d4
#else
cmpl IMM (-DBL_MANT_DIG-1),d4
movel d7,a0 |
movel IMM (0),d6 | use d6-d7 to collect bits flushed right
movel d6,d7 | use d6-d7 to collect bits flushed right
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (1),d4 | if the exponent is less than 1 we
#else
cmpl IMM (1),d4 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw IMM (1),d4 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
| Now call the rounding routine (which takes care of denormalized numbers):
lea Lround$0,a0 | to return from rounding routine
lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 0x7ff).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (0x07ff),d4
#else
cmpl IMM (0x07ff),d4
beq Ld$den
1:
| Put back the exponents and sign and return.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (4),d4 | exponent back to fourth byte
#else
lsll IMM (4),d4 | exponent back to fourth byte
#endif
bclr IMM (DBL_MANT_DIG-32-1),d0
swap d0 | and put back exponent
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| double __negdf2(double, double);
SYM (__negdf2):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
bra Ld$infty
1: lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| int __cmpdf2(double, double);
SYM (__cmpdf2):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
tstl d6
bpl 1f
| If both are negative exchange them
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d0,d2
exg d1,d3
#else
bne Lcmpdf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
rts
Lcmpdf$a$gt$b:
movel IMM (GREATER),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
rts
Lcmpdf$b$gt$a:
movel IMM (LESS),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -D_BIAS+1).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (1),d4 | remember that the exponent is at least one
#else
cmpl IMM (1),d4 | remember that the exponent is at least one
addxl d2,d2 |
addxl d1,d1 |
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d4,1b |
#else
subql IMM (1), d4
addxl d2,d0
| Shift right once (because we used bit #DBL_MANT_DIG-32!).
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
| 'fraction overflow' ...).
btst IMM (DBL_MANT_DIG-32),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
| float __addsf3(float, float);
SYM (__addsf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers but d0-d1
#else
| same, and put the largest exponent in d6. Note that we are using two
| registers for each number (see the discussion by D. Knuth in "Seminumerical
| Algorithms").
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw d6,d7 | compare exponents
#else
cmpl d6,d7 | compare exponents
1:
subl d6,d7 | keep the largest exponent
negl d7
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (actually, we can just exit) '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$b$small
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 4f
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d7
#else
subql IMM (1), d7
#endif
3:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d2 | shift right second operand
roxrl IMM (1),d3
dbra d7,3b
swap d3
movew d3,d2
swap d2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
bne 2b | if still more bits, go back to normal case
bra Laddsf$3
5:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d6,d7 | exchange the exponents
#else
eorl d6,d7
#endif
subl d6,d7 | keep the largest exponent
negl d7 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (and exit!) '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$a$small
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 8f
6:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d7
#else
subl IMM (1),d7
#endif
7:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right first operand
roxrl IMM (1),d1
dbra d7,7b
swap d1
movew d1,d0
swap d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
Laddsf$3:
| Here we have to decide whether to add or subtract the numbers
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d6,a0 | get signs back
exg d7,a1 | and save the exponents
#else
movel d6,d4
movel a0,d6
- movel d4,d6
+ movel d4,a0
movel d7,d4
- movel a1,d4
+ movel a1,d7
movel d4,a1
#endif
eorl d6,d7 | combine sign bits
| numbers
| Here we have both positive or both negative
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d6,a0 | now we have the exponent in d6
#else
movel d6,d4
| Put the exponent, in the first byte, in d2, to use the "standard" rounding
| routines:
movel d6,d2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (8),d2
#else
lsrl IMM (8),d2
| one more bit we check this:
btst IMM (FLT_MANT_DIG+1),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
1:
lea Laddsf$4,a0 | to return from rounding routine
lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
bra Lround$to$plus
Laddsf$4:
| Put back the exponent, but check for overflow.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (0xff),d2
#else
cmpl IMM (0xff),d2
#endif
bhi 1f
bclr IMM (FLT_MANT_DIG-1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
negl d1
negxl d0
1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d2,a0 | now we have the exponent in d2
lsrw IMM (8),d2 | put it in the first byte
#else
| the rounding routines themselves.
lea Lsubsf$1,a0 | to return from rounding routine
lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
Lsubsf$1:
| Put back the exponent (we can't have overflow!). '
bclr IMM (FLT_MANT_DIG-1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
movel a6@(12),d0
lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
movel a6@(8),d0
lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
lea SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| float __mulsf3(float, float);
SYM (__mulsf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
andl d5,d1 |
orl d4,d1 |
swap d3 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Lmulsf$2: |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw d3,d2 | add exponents
subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
#else
addl d5,d1 | add a
addxl d4,d0
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbf d3,1b | loop back
#else
subql IMM (1),d3
| Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
| (mod 32) of d0 set. The first thing to do now is to normalize it so bit
| FLT_MANT_DIG is set (to do the rounding).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
rorl IMM (6),d1
swap d1
movew d1,d3
lsll IMM (8),d0
addl d0,d0
addl d0,d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d3,d0
#else
orl d3,d0
btst IMM (FLT_MANT_DIG+1),d0
beq Lround$exit
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d2
bge Lf$inop | if b is +/-INFINITY or NaN return NaN
lea SYM (_fpCCR),a0 | else return zero
movew IMM (0),a0@ |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left (until bit 23 is set)
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subql IMM (1),d2 | and adjust exponent
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit 23 is set
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subl IMM (1),d3 | and adjust exponent
| float __divsf3(float, float);
SYM (__divsf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
andl d5,d1 |
orl d4,d1 |
swap d3 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Ldivsf$2: |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw d3,d2 | subtract exponents
addw IMM (F_BIAS),d2 | and add bias
#else
subl d1,d0 | if a >= b a <-- a-b
beq 3f | if a is zero, exit
2: addl d0,d0 | multiply a by 2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM (1),d3
1: cmpl d0,d1
ble 2f
addl d0,d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM(1),d3
movel IMM (0),d1
bra 3f
2: movel IMM (0),d1
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (FLT_MANT_DIG),d3
addw IMM (31),d3
#else
btst IMM (FLT_MANT_DIG+1),d0
beq 1f | if it is not set, then bit 24 is set
lsrl IMM (1),d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw IMM (1),d2 |
#else
addl IMM (1),d2 |
movel IMM (0),d0 | else return zero
lea SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subl IMM (1),d2 | and adjust exponent
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subl IMM (1),d3 | and adjust exponent
| This is a common exit point for __mulsf3 and __divsf3.
| First check for underlow in the exponent:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (-FLT_MANT_DIG-1),d2
#else
cmpl IMM (-FLT_MANT_DIG-1),d2
| exponent until it becomes 1 or the fraction is zero (in the latter case
| we signal underflow and return zero).
movel IMM (0),d6 | d6 is used temporarily
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (1),d2 | if the exponent is less than 1 we
#else
cmpl IMM (1),d2 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw IMM (1),d2 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
| Now call the rounding routine (which takes care of denormalized numbers):
lea Lround$0,a0 | to return from rounding routine
lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 255).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (0x00ff),d2
#else
cmpl IMM (0x00ff),d2
beq Lf$den
1:
| Put back the exponents and sign and return.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (7),d2 | exponent back to fourth byte
#else
lsll IMM (7),d2 | exponent back to fourth byte
#endif
bclr IMM (FLT_MANT_DIG-1),d0
swap d0 | and put back exponent
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d2,d0 |
#else
orl d2,d0
lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| float __negsf2(float);
SYM (__negsf2):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
bra Lf$infty
1: lea SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| int __cmpsf2(float, float);
SYM (__cmpsf2):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
tstl d6
bpl 1f
| If both are negative exchange them
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d0,d1
#else
movel d0,d7
bne Lcmpsf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
rts
Lcmpsf$a$gt$b:
movel IMM (GREATER),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
rts
Lcmpsf$b$gt$a:
movel IMM (LESS),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -F_BIAS+1).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (1),d2 | remember that the exponent is at least one
#else
cmpl IMM (1),d2 | remember that the exponent is at least one
beq 2f | an exponent of one means denormalized
addl d1,d1 | else shift and adjust the exponent
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d2,1b |
#else
subql IMM (1),d2
btst IMM (FLT_MANT_DIG),d0
beq 1f
lsrl IMM (1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw IMM (1),d2
#else
addql IMM (1),d2