1 /* ieee754-df.S double-precision floating point support for ARM
3 Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by Nicolas Pitre (nico@cam.org)
6 This file is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 This file is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
28 * The goal of this code is to be as fast as possible. This is
29 * not meant to be easy to understand for the casual reader.
30 * For slightly simpler code please see the single precision version
33 * Only the default rounding mode is intended for best performances.
34 * Exceptions aren't supported yet, but that can be added quite easily
35 * if necessary without impacting performances.
39 @ For FPA, float words are always big-endian.
40 @ For VFP, floats words follow the memory system mode.
41 #if defined(__VFP_FP__) && !defined(__ARMEB__)
57 ARM_FUNC_ALIAS aeabi_dneg negdf2
60 eor xh, xh, #0x80000000
68 #ifdef L_arm_addsubdf3
70 ARM_FUNC_START aeabi_drsub
72 eor xh, xh, #0x80000000 @ flip sign bit of first arg
76 ARM_FUNC_ALIAS aeabi_dsub subdf3
78 eor yh, yh, #0x80000000 @ flip sign bit of second arg
79 #if defined(__INTERWORKING_STUBS__)
80 b 1f @ Skip Thumb-code prologue
84 ARM_FUNC_ALIAS aeabi_dadd adddf3
86 1: do_push {r4, r5, lr}
88 @ Look for zeroes, equal values, INF, or NAN.
89 shift1 lsl, r4, xh, #1
90 shift1 lsl, r5, yh, #1
95 COND(orr,s,ne) ip, r4, xl
96 COND(orr,s,ne) ip, r5, yl
97 COND(mvn,s,ne) ip, r4, asr #21
98 COND(mvn,s,ne) ip, r5, asr #21
101 @ Compute exponent difference. Make largest exponent in r4,
102 @ corresponding arg in xh-xl, and positive exponent difference in r5.
103 shift1 lsr, r4, r4, #21
104 rsbs r5, r4, r5, lsr #21
116 @ If exponent difference is too large, return largest argument
117 @ already in xh-xl. We need up to 54 bit to handle proper rounding
123 @ Convert mantissa to signed integer.
127 orr xh, ip, xh, lsr #12
129 #if defined(__thumb2__)
131 sbc xh, xh, xh, lsl #1
139 orr yh, ip, yh, lsr #12
141 #if defined(__thumb2__)
143 sbc yh, yh, yh, lsl #1
149 @ If exponent == difference, one or both args were denormalized.
150 @ Since this is not common case, rescale them off line.
155 @ Compensate for the exponent overlapping the mantissa MSB added later
158 @ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip.
161 shift1 lsl, ip, yl, lr
162 shiftop adds xl xl yl lsr r5 yl
164 shiftop adds xl xl yh lsl lr yl
165 shiftop adcs xh xh yh asr r5 yh
170 shift1 lsl,ip, yh, lr
172 orrcs ip, ip, #2 @ 2 not 1, to allow lsr #1 later
173 shiftop adds xl xl yh asr r5 yh
174 adcs xh, xh, yh, asr #31
176 @ We now have a result in xh-xl-ip.
177 @ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above)
178 and r5, xh, #0x80000000
180 #if defined(__thumb2__)
191 @ Determine how to normalize the result.
198 @ Result needs to be shifted right.
204 @ Make sure we did not bust our exponent.
209 @ Our result is now properly aligned into xh-xl, remaining bits in ip.
210 @ Round with MSB of ip. If halfway between two numbers, round towards
212 @ Pack final result together.
216 COND(mov,s,eq) ip, xl, lsr #1
218 adc xh, xh, r4, lsl #20
222 @ Result must be shifted left and exponent adjusted.
231 @ No rounding necessary since ip will always be 0 at this point.
243 movhs r2, r2, lsr #16
253 sublo r3, r3, r2, lsr #1
254 sub r3, r3, r2, lsr #3
269 @ determine how to shift the value.
275 @ shift value left 21 to 31 bits, or actually right 11 to 1 bits
276 @ since a register switch happened above.
279 shift1 lsl, xl, xh, ip
280 shift1 lsr, xh, xh, r2
283 @ actually shift value left 1 to 20 bits, which might also represent
284 @ 32 to 52 bits if counting the register switch that happened earlier.
288 shift1 lsl, xh, xh, r2
289 #if defined(__thumb2__)
295 orrle xh, xh, xl, lsr ip
299 @ adjust exponent accordingly.
302 addge xh, xh, r4, lsl #20
306 @ Exponent too small, denormalize result.
307 @ Find out proper shift value.
314 @ shift result right of 1 to 20 bits, sign is in r5.
317 shift1 lsr, xl, xl, r4
318 shiftop orr xl xl xh lsl r2 yh
319 shiftop orr xh r5 xh lsr r4 yh
322 @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
323 @ a register switch from xh to xl.
326 shift1 lsr, xl, xl, r2
327 shiftop orr xl xl xh lsl r4 yh
331 @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
333 2: shift1 lsr, xl, xh, r4
337 @ Adjust exponents for denormalized arguments.
338 @ Note that r4 must not remain equal to 0.
341 eor yh, yh, #0x00100000
343 eoreq xh, xh, #0x00100000
352 COND(mvn,s,ne) ip, r5, asr #21
360 @ Result is x + 0.0 = x or 0.0 + y = y.
369 @ Result is x - x = 0.
375 @ Result is x + x = 2x.
381 orrcs xh, xh, #0x80000000
383 2: adds r4, r4, #(2 << 21)
385 addcc xh, xh, #(1 << 20)
387 and r5, xh, #0x80000000
389 @ Overflow: return INF.
391 orr xh, r5, #0x7f000000
392 orr xh, xh, #0x00f00000
396 @ At least one of x or y is INF/NAN.
397 @ if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
398 @ if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
399 @ if either is NAN: return NAN
400 @ if opposite sign: return NAN
401 @ otherwise return xh-xl (which is INF or -INF)
407 COND(mvn,s,eq) ip, r5, asr #21
411 orrs r4, xl, xh, lsl #12
413 COND(orr,s,eq) r5, yl, yh, lsl #12
415 orrne xh, xh, #0x00080000 @ quiet NAN
423 ARM_FUNC_START floatunsidf
424 ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
431 mov r4, #0x400 @ initial exponent
432 add r4, r4, #(52-1 - 1)
433 mov r5, #0 @ sign bit is 0
443 ARM_FUNC_START floatsidf
444 ARM_FUNC_ALIAS aeabi_i2d floatsidf
451 mov r4, #0x400 @ initial exponent
452 add r4, r4, #(52-1 - 1)
453 ands r5, r0, #0x80000000 @ sign bit in r5
455 rsbmi r0, r0, #0 @ absolute value
465 ARM_FUNC_START extendsfdf2
466 ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
468 movs r2, r0, lsl #1 @ toss sign bit
469 mov xh, r2, asr #3 @ stretch exponent
470 mov xh, xh, rrx @ retrieve sign bit
471 mov xl, r2, lsl #28 @ retrieve remaining bits
473 COND(and,s,ne) r3, r2, #0xff000000 @ isolate exponent
474 teqne r3, #0xff000000 @ if not 0, check if INF or NAN
475 eorne xh, xh, #0x38000000 @ fixup exponent otherwise.
476 RETc(ne) @ and return it.
478 teq r2, #0 @ if actually 0
480 teqne r3, #0xff000000 @ or INF or NAN
481 RETc(eq) @ we are done already.
483 @ value was denormalized. We can normalize it now.
485 mov r4, #0x380 @ setup corresponding exponent
486 and r5, xh, #0x80000000 @ move sign bit in r5
487 bic xh, xh, #0x80000000
493 ARM_FUNC_START floatundidf
494 ARM_FUNC_ALIAS aeabi_ul2d floatundidf
497 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
505 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
506 @ For hard FPA code we want to return via the tail below so that
507 @ we can return the result in f0 as well as in r0/r1 for backwards
510 @ Push pc as well so that RETLDM works correctly.
511 do_push {r4, r5, ip, lr, pc}
519 ARM_FUNC_START floatdidf
520 ARM_FUNC_ALIAS aeabi_l2d floatdidf
523 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
531 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
532 @ For hard FPA code we want to return via the tail below so that
533 @ we can return the result in f0 as well as in r0/r1 for backwards
536 @ Push pc as well so that RETLDM works correctly.
537 do_push {r4, r5, ip, lr, pc}
542 ands r5, ah, #0x80000000 @ sign bit in r5
544 #if defined(__thumb2__)
546 sbc ah, ah, ah, lsl #1
552 mov r4, #0x400 @ initial exponent
553 add r4, r4, #(52-1 - 1)
555 @ FPA little-endian: must swap the word order.
565 @ The value is too big. Scale it down a bit...
573 add r2, r2, ip, lsr #3
576 shift1 lsl, ip, xl, r3
577 shift1 lsr, xl, xl, r2
578 shiftop orr xl xl xh lsl r3 lr
579 shift1 lsr, xh, xh, r2
583 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
585 @ Legacy code expects the result to be returned in f0. Copy it
599 #endif /* L_addsubdf3 */
601 #ifdef L_arm_muldivdf3
603 ARM_FUNC_START muldf3
604 ARM_FUNC_ALIAS aeabi_dmul muldf3
605 do_push {r4, r5, r6, lr}
607 @ Mask out exponents, trap any zero/denormal/INF/NAN.
610 ands r4, ip, xh, lsr #20
612 COND(and,s,ne) r5, ip, yh, lsr #20
617 @ Add exponents together
620 @ Determine final sign.
623 @ Convert mantissa to unsigned integer.
624 @ If power of two, branch to a separate path.
625 bic xh, xh, ip, lsl #21
626 bic yh, yh, ip, lsl #21
627 orrs r5, xl, xh, lsl #12
629 COND(orr,s,ne) r5, yl, yh, lsl #12
630 orr xh, xh, #0x00100000
631 orr yh, yh, #0x00100000
636 @ Put sign bit in r6, which will be restored in yl later.
637 and r6, r6, #0x80000000
639 @ Well, no way to make it shorter without the umull instruction.
640 stmfd sp!, {r6, r7, r8, r9, sl, fp}
645 bic xl, xl, r7, lsl #16
646 bic yl, yl, r8, lsl #16
647 bic xh, xh, r9, lsl #16
648 bic yh, yh, sl, lsl #16
652 adds ip, ip, fp, lsl #16
653 adc lr, lr, fp, lsr #16
655 adds ip, ip, fp, lsl #16
656 adc lr, lr, fp, lsr #16
659 adds lr, lr, fp, lsl #16
660 adc r5, r5, fp, lsr #16
662 adds lr, lr, fp, lsl #16
663 adc r5, r5, fp, lsr #16
665 adds lr, lr, fp, lsl #16
666 adc r5, r5, fp, lsr #16
668 adds lr, lr, fp, lsl #16
669 adc r5, r5, fp, lsr #16
672 adds r5, r5, fp, lsl #16
673 adc r6, r6, fp, lsr #16
675 adds r5, r5, fp, lsl #16
676 adc r6, r6, fp, lsr #16
692 ldmfd sp!, {yl, r7, r8, r9, sl, fp}
696 @ Here is the actual multiplication.
700 and yl, r6, #0x80000000
707 @ The LSBs in ip are only significant for the final rounding.
713 @ Adjust result upon the MSB position.
715 cmp r6, #(1 << (20-11))
722 @ Shift to final position, add sign to result.
723 orr xh, yl, r6, lsl #11
724 orr xh, xh, r5, lsr #21
726 orr xl, xl, lr, lsr #21
729 @ Check exponent range for under/overflow.
730 subs ip, r4, #(254 - 1)
735 @ Round the result, merge final exponent.
738 COND(mov,s,eq) lr, xl, lsr #1
740 adc xh, xh, r4, lsl #20
743 @ Multiplication by 0x1p*: let''s shortcut a lot of code.
745 and r6, r6, #0x80000000
749 subs r4, r4, ip, lsr #1
751 COND(rsb,s,gt) r5, r4, ip
752 orrgt xh, xh, r4, lsl #20
753 RETLDM "r4, r5, r6" gt
755 @ Under/overflow: fix things up for the code below.
756 orr xh, xh, #0x00100000
764 @ Check if denormalized result is possible, otherwise return signed 0.
768 bicle xh, xh, #0x7fffffff
769 RETLDM "r4, r5, r6" le
771 @ Find out proper shift value.
778 @ shift result right of 1 to 20 bits, preserve sign bit, round, etc.
781 shift1 lsl, r3, xl, r5
782 shift1 lsr, xl, xl, r4
783 shiftop orr xl xl xh lsl r5 r2
784 and r2, xh, #0x80000000
785 bic xh, xh, #0x80000000
786 adds xl, xl, r3, lsr #31
787 shiftop adc xh r2 xh lsr r4 r6
788 orrs lr, lr, r3, lsl #1
790 biceq xl, xl, r3, lsr #31
793 @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
794 @ a register switch from xh to xl. Then round.
797 shift1 lsl, r3, xl, r4
798 shift1 lsr, xl, xl, r5
799 shiftop orr xl xl xh lsl r4 r2
800 bic xh, xh, #0x7fffffff
801 adds xl, xl, r3, lsr #31
803 orrs lr, lr, r3, lsl #1
805 biceq xl, xl, r3, lsr #31
808 @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
809 @ from xh to xl. Leftover bits are in r3-r6-lr for rounding.
811 shiftop orr lr lr xl lsl r5 r2
812 shift1 lsr, r3, xl, r4
813 shiftop orr r3 r3 xh lsl r5 r2
814 shift1 lsr, xl, xh, r4
815 bic xh, xh, #0x7fffffff
816 shiftop bic xl xl xh lsr r4 r2
817 add xl, xl, r3, lsr #31
818 orrs lr, lr, r3, lsl #1
820 biceq xl, xl, r3, lsr #31
823 @ One or both arguments are denormalized.
824 @ Scale them leftwards and preserve sign bit.
828 and r6, xh, #0x80000000
829 1: movs xl, xl, lsl #1
839 2: and r6, yh, #0x80000000
840 3: movs yl, yl, lsl #1
850 @ Isolate the INF and NAN cases away
852 and r5, ip, yh, lsr #20
857 @ Here, one or more arguments are either denormalized or zero.
858 orrs r6, xl, xh, lsl #1
860 COND(orr,s,ne) r6, yl, yh, lsl #1
863 @ Result is 0, but determine sign anyway.
866 and xh, xh, #0x80000000
870 1: @ One or both args are INF or NAN.
871 orrs r6, xl, xh, lsl #1
875 COND(orr,s,ne) r6, yl, yh, lsl #1
876 beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
879 orrs r6, xl, xh, lsl #12
880 bne LSYM(Lml_n) @ NAN * <anything> -> NAN
883 orrs r6, yl, yh, lsl #12
887 bne LSYM(Lml_n) @ <anything> * NAN -> NAN
889 @ Result is INF, but we need to determine its sign.
893 @ Overflow: return INF (sign already in xh).
895 and xh, xh, #0x80000000
896 orr xh, xh, #0x7f000000
897 orr xh, xh, #0x00f00000
901 @ Return a quiet NAN.
903 orr xh, xh, #0x7f000000
904 orr xh, xh, #0x00f80000
910 ARM_FUNC_START divdf3
911 ARM_FUNC_ALIAS aeabi_ddiv divdf3
913 do_push {r4, r5, r6, lr}
915 @ Mask out exponents, trap any zero/denormal/INF/NAN.
918 ands r4, ip, xh, lsr #20
920 COND(and,s,ne) r5, ip, yh, lsr #20
925 @ Substract divisor exponent from dividend''s.
928 @ Preserve final sign into lr.
931 @ Convert mantissa to unsigned integer.
932 @ Dividend -> r5-r6, divisor -> yh-yl.
933 orrs r5, yl, yh, lsl #12
938 orr yh, r5, yh, lsr #4
939 orr yh, yh, yl, lsr #24
941 orr r5, r5, xh, lsr #4
942 orr r5, r5, xl, lsr #24
945 @ Initialize xh with final sign bit.
946 and xh, lr, #0x80000000
948 @ Ensure result will land to known bit position.
949 @ Apply exponent bias accordingly.
953 adc r4, r4, #(255 - 2)
959 @ Perform first substraction to align result to a nibble.
967 @ The actual division loop.
981 orrcs xl, xl, ip, lsr #1
989 orrcs xl, xl, ip, lsr #2
997 orrcs xl, xl, ip, lsr #3
1002 orr r5, r5, r6, lsr #28
1005 orr yh, yh, yl, lsr #29
1010 @ We are done with a word of the result.
1011 @ Loop again for the low word if this pass was for the high word.
1019 @ Be sure result starts in the high word.
1025 @ Check exponent range for under/overflow.
1026 subs ip, r4, #(254 - 1)
1031 @ Round the result, merge final exponent.
1034 COND(sub,s,eq) ip, r6, yl
1035 COND(mov,s,eq) ip, xl, lsr #1
1037 adc xh, xh, r4, lsl #20
1040 @ Division by 0x1p*: shortcut a lot of code.
1042 and lr, lr, #0x80000000
1043 orr xh, lr, xh, lsr #12
1044 adds r4, r4, ip, lsr #1
1046 COND(rsb,s,gt) r5, r4, ip
1047 orrgt xh, xh, r4, lsl #20
1048 RETLDM "r4, r5, r6" gt
1050 orr xh, xh, #0x00100000
1055 @ Result mightt need to be denormalized: put remainder bits
1056 @ in lr for rounding considerations.
1061 @ One or both arguments is either INF, NAN or zero.
1063 and r5, ip, yh, lsr #20
1067 beq LSYM(Lml_n) @ INF/NAN / INF/NAN -> NAN
1070 orrs r4, xl, xh, lsl #12
1071 bne LSYM(Lml_n) @ NAN / <anything> -> NAN
1073 bne LSYM(Lml_i) @ INF / <anything> -> INF
1076 b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
1079 orrs r5, yl, yh, lsl #12
1080 beq LSYM(Lml_z) @ <anything> / INF -> 0
1083 b LSYM(Lml_n) @ <anything> / NAN -> NAN
1084 2: @ If both are nonzero, we need to normalize and resume above.
1085 orrs r6, xl, xh, lsl #1
1087 COND(orr,s,ne) r6, yl, yh, lsl #1
1089 @ One or both arguments are 0.
1090 orrs r4, xl, xh, lsl #1
1091 bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
1092 orrs r5, yl, yh, lsl #1
1093 bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
1094 b LSYM(Lml_n) @ 0 / 0 -> NAN
1099 #endif /* L_muldivdf3 */
1103 @ Note: only r0 (return value) and ip are clobbered here.
1105 ARM_FUNC_START gtdf2
1106 ARM_FUNC_ALIAS gedf2 gtdf2
1110 ARM_FUNC_START ltdf2
1111 ARM_FUNC_ALIAS ledf2 ltdf2
1115 ARM_FUNC_START cmpdf2
1116 ARM_FUNC_ALIAS nedf2 cmpdf2
1117 ARM_FUNC_ALIAS eqdf2 cmpdf2
1118 mov ip, #1 @ how should we specify unordered here?
1120 1: str ip, [sp, #-4]!
1122 @ Trap any INF/NAN first.
1124 mvns ip, ip, asr #21
1127 COND(mvn,s,ne) ip, ip, asr #21
1130 @ Test for equality.
1131 @ Note that 0.0 is equal to -0.0.
1133 orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
1135 COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
1136 teqne xh, yh @ or xh == yh
1138 teqeq xl, yl @ and xl == yl
1139 moveq r0, #0 @ then equal.
1148 @ Compare values if same sign
1156 movcs r0, yh, asr #31
1157 mvncc r0, yh, asr #31
1162 3: mov ip, xh, lsl #1
1163 mvns ip, ip, asr #21
1165 orrs ip, xl, xh, lsl #12
1167 4: mov ip, yh, lsl #1
1168 mvns ip, ip, asr #21
1170 orrs ip, yl, yh, lsl #12
1171 beq 2b @ y is not NAN
1172 5: ldr r0, [sp], #4 @ unordered return code
1183 ARM_FUNC_START aeabi_cdrcmple
1193 ARM_FUNC_START aeabi_cdcmpeq
1194 ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
1196 @ The status-returning routines are required to preserve all
1197 @ registers except ip, lr, and cpsr.
1200 @ Set the Z flag correctly, and the C flag unconditionally.
1202 @ Clear the C flag if the return value was -1, indicating
1203 @ that the first operand was smaller than the second.
1208 FUNC_END aeabi_cdcmple
1209 FUNC_END aeabi_cdcmpeq
1210 FUNC_END aeabi_cdrcmple
1212 ARM_FUNC_START aeabi_dcmpeq
1215 ARM_CALL aeabi_cdcmple
1217 moveq r0, #1 @ Equal to.
1218 movne r0, #0 @ Less than, greater than, or unordered.
1221 FUNC_END aeabi_dcmpeq
1223 ARM_FUNC_START aeabi_dcmplt
1226 ARM_CALL aeabi_cdcmple
1228 movcc r0, #1 @ Less than.
1229 movcs r0, #0 @ Equal to, greater than, or unordered.
1232 FUNC_END aeabi_dcmplt
1234 ARM_FUNC_START aeabi_dcmple
1237 ARM_CALL aeabi_cdcmple
1239 movls r0, #1 @ Less than or equal to.
1240 movhi r0, #0 @ Greater than or unordered.
1243 FUNC_END aeabi_dcmple
1245 ARM_FUNC_START aeabi_dcmpge
1248 ARM_CALL aeabi_cdrcmple
1250 movls r0, #1 @ Operand 2 is less than or equal to operand 1.
1251 movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
1254 FUNC_END aeabi_dcmpge
1256 ARM_FUNC_START aeabi_dcmpgt
1259 ARM_CALL aeabi_cdrcmple
1261 movcc r0, #1 @ Operand 2 is less than operand 1.
1262 movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
1263 @ or they are unordered.
1266 FUNC_END aeabi_dcmpgt
1268 #endif /* L_cmpdf2 */
1270 #ifdef L_arm_unorddf2
1272 ARM_FUNC_START unorddf2
1273 ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
1276 mvns ip, ip, asr #21
1278 orrs ip, xl, xh, lsl #12
1280 1: mov ip, yh, lsl #1
1281 mvns ip, ip, asr #21
1283 orrs ip, yl, yh, lsl #12
1285 2: mov r0, #0 @ arguments are ordered.
1288 3: mov r0, #1 @ arguments are unordered.
1291 FUNC_END aeabi_dcmpun
1294 #endif /* L_unorddf2 */
1296 #ifdef L_arm_fixdfsi
1298 ARM_FUNC_START fixdfsi
1299 ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
1301 @ check exponent range.
1303 adds r2, r2, #(1 << 21)
1304 bcs 2f @ value is INF or NAN
1305 bpl 1f @ value is too small
1306 mov r3, #(0xfffffc00 + 31)
1307 subs r2, r3, r2, asr #21
1308 bls 3f @ value is too large
1312 orr r3, r3, #0x80000000
1313 orr r3, r3, xl, lsr #21
1314 tst xh, #0x80000000 @ the sign bit
1315 shift1 lsr, r0, r3, r2
1323 2: orrs xl, xl, xh, lsl #12
1325 3: ands r0, xh, #0x80000000 @ the sign bit
1327 moveq r0, #0x7fffffff @ maximum signed positive si
1330 4: mov r0, #0 @ How should we convert NAN?
1336 #endif /* L_fixdfsi */
1338 #ifdef L_arm_fixunsdfsi
1340 ARM_FUNC_START fixunsdfsi
1341 ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
1343 @ check exponent range.
1345 bcs 1f @ value is negative
1346 adds r2, r2, #(1 << 21)
1347 bcs 2f @ value is INF or NAN
1348 bpl 1f @ value is too small
1349 mov r3, #(0xfffffc00 + 31)
1350 subs r2, r3, r2, asr #21
1351 bmi 3f @ value is too large
1355 orr r3, r3, #0x80000000
1356 orr r3, r3, xl, lsr #21
1357 shift1 lsr, r0, r3, r2
1363 2: orrs xl, xl, xh, lsl #12
1364 bne 4f @ value is NAN.
1365 3: mov r0, #0xffffffff @ maximum unsigned si
1368 4: mov r0, #0 @ How should we convert NAN?
1371 FUNC_END aeabi_d2uiz
1374 #endif /* L_fixunsdfsi */
1376 #ifdef L_arm_truncdfsf2
1378 ARM_FUNC_START truncdfsf2
1379 ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
1381 @ check exponent range.
1383 subs r3, r2, #((1023 - 127) << 21)
1385 COND(sub,s,cs) ip, r3, #(1 << 21)
1386 COND(rsb,s,cs) ip, ip, #(254 << 21)
1387 bls 2f @ value is out of range
1389 1: @ shift and round mantissa
1390 and ip, xh, #0x80000000
1392 orr xl, ip, xl, lsr #29
1394 adc r0, xl, r3, lsl #2
1399 2: @ either overflow or underflow
1403 @ check if denormalized value is possible
1404 adds r2, r3, #(23 << 21)
1406 andlt r0, xh, #0x80000000 @ too small, return signed 0.
1409 @ denormalize value so we can resume with the code above afterwards.
1410 orr xh, xh, #0x00100000
1414 #if defined(__thumb2__)
1419 shift1 lsr, xl, xl, r2
1421 orrne xl, xl, #1 @ fold r3 for rounding considerations.
1424 shiftop orr xl xl r3 lsl ip ip
1425 shift1 lsr, r3, r3, r2
1430 mvns r3, r2, asr #21
1431 bne 5f @ simple overflow
1432 orrs r3, xl, xh, lsl #12
1434 movne r0, #0x7f000000
1435 orrne r0, r0, #0x00c00000
1436 RETc(ne) @ return NAN
1438 5: @ return INF with sign
1439 and r0, xh, #0x80000000
1440 orr r0, r0, #0x7f000000
1441 orr r0, r0, #0x00800000
1447 #endif /* L_truncdfsf2 */