1 @ libgcc routines for ARM cpu.
2 @ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
4 /* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005, 2007
5 Free Software Foundation, Inc.
7 This file is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 2, or (at your option) any
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
21 This file is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; see the file COPYING. If not, write to
28 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
29 Boston, MA 02110-1301, USA. */
31 /* An executable stack is *not* required for these functions. */
32 #if defined(__ELF__) && defined(__linux__)
33 .section .note.GNU-stack,"",%progbits
37 /* ------------------------------------------------------------------------ */
39 /* We need to know what prefix to add to function names. */
41 #ifndef __USER_LABEL_PREFIX__
42 #error __USER_LABEL_PREFIX__ not defined
45 /* ANSI concatenation macros. */
47 #define CONCAT1(a, b) CONCAT2(a, b)
48 #define CONCAT2(a, b) a ## b
50 /* Use the right prefix for global labels. */
52 #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
56 #define __PLT__ /* Not supported in Thumb assembler (for now). */
57 #elif defined __vxworks && !defined __PIC__
58 #define __PLT__ /* Not supported by the kernel loader. */
62 #define TYPE(x) .type SYM(x),function
63 #define SIZE(x) .size SYM(x), . - SYM(x)
72 /* Function end macros. Variants for interworking. */
74 #if defined(__ARM_ARCH_2__)
75 # define __ARM_ARCH__ 2
78 #if defined(__ARM_ARCH_3__)
79 # define __ARM_ARCH__ 3
82 #if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
83 || defined(__ARM_ARCH_4T__)
84 /* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
85 long multiply instructions. That includes v3M. */
86 # define __ARM_ARCH__ 4
89 #if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
90 || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
91 || defined(__ARM_ARCH_5TEJ__)
92 # define __ARM_ARCH__ 5
95 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
96 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
97 || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
98 # define __ARM_ARCH__ 6
101 #if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
102 || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__)
103 # define __ARM_ARCH__ 7
107 #error Unable to determine architecture.
110 /* How to return from a function call depends on the architecture variant. */
112 #if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
115 # define RETc(x) bx##x lr
117 /* Special precautions for interworking on armv4t. */
118 # if (__ARM_ARCH__ == 4)
120 /* Always use bx, not ldr pc. */
121 # if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
122 # define __INTERWORKING__
123 # endif /* __THUMB__ || __THUMB_INTERWORK__ */
125 /* Include thumb stub before arm mode code. */
126 # if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
127 # define __INTERWORKING_STUBS__
128 # endif /* __thumb__ && !__THUMB_INTERWORK__ */
130 #endif /* __ARM_ARCH == 4 */
134 # define RET mov pc, lr
135 # define RETc(x) mov##x pc, lr
139 .macro cfi_pop advance, reg, cfa_offset
141 .pushsection .debug_frame
142 .byte 0x4 /* DW_CFA_advance_loc4 */
144 .byte (0xc0 | \reg) /* DW_CFA_restore */
145 .byte 0xe /* DW_CFA_def_cfa_offset */
150 .macro cfi_push advance, reg, offset, cfa_offset
152 .pushsection .debug_frame
153 .byte 0x4 /* DW_CFA_advance_loc4 */
155 .byte (0x80 | \reg) /* DW_CFA_offset */
156 .uleb128 (\offset / -4)
157 .byte 0xe /* DW_CFA_def_cfa_offset */
162 .macro cfi_start start_label, end_label
164 .pushsection .debug_frame
166 .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
168 .4byte 0xffffffff @ CIE Identifier Tag
169 .byte 0x1 @ CIE Version
170 .ascii "\0" @ CIE Augmentation
171 .uleb128 0x1 @ CIE Code Alignment Factor
172 .sleb128 -4 @ CIE Data Alignment Factor
173 .byte 0xe @ CIE RA Column
174 .byte 0xc @ DW_CFA_def_cfa
180 .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
182 .4byte LSYM(Lstart_frame) @ FDE CIE offset
183 .4byte \start_label @ FDE initial location
184 .4byte \end_label-\start_label @ FDE address range
188 .macro cfi_end end_label
190 .pushsection .debug_frame
198 /* Don't pass dirn, it's there just to get token pasting right. */
200 .macro RETLDM regs=, cond=, unwind=, dirn=ia
201 #if defined (__INTERWORKING__)
203 ldr\cond lr, [sp], #8
205 # if defined(__thumb2__)
208 ldm\cond\dirn sp!, {\regs, lr}
212 /* Mark LR as restored. */
213 97: cfi_pop 97b - \unwind, 0xe, 0x0
217 /* Caller is responsible for providing IT instruction. */
219 ldr\cond pc, [sp], #8
221 # if defined(__thumb2__)
224 ldm\cond\dirn sp!, {\regs, pc}
230 /* The Unified assembly syntax allows the same code to be assembled for both
231 ARM and Thumb-2. However this is only supported by recent gas, so define
232 a set of macros to allow ARM code on older assemblers. */
233 #if defined(__thumb2__)
234 .macro do_it cond, suffix=""
237 .macro shift1 op, arg0, arg1, arg2
238 \op \arg0, \arg1, \arg2
242 #define COND(op1, op2, cond) op1 ## op2 ## cond
243 /* Perform an arithmetic operation with a variable shift operand. This
244 requires two instructions and a scratch register on Thumb-2. */
245 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
246 \shiftop \tmp, \src2, \shiftreg
247 \name \dest, \src1, \tmp
250 .macro do_it cond, suffix=""
252 .macro shift1 op, arg0, arg1, arg2
253 mov \arg0, \arg1, \op \arg2
255 #define do_push stmfd sp!,
256 #define do_pop ldmfd sp!,
257 #define COND(op1, op2, cond) op1 ## cond ## op2
258 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
259 \name \dest, \src1, \src2, \shiftop \shiftreg
263 .macro ARM_LDIV0 name
265 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
266 bl SYM (__div0) __PLT__
267 mov r0, #0 @ About as wrong as it could be.
272 .macro THUMB_LDIV0 name
274 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
276 mov r0, #0 @ About as wrong as it could be.
277 #if defined (__INTERWORKING__)
289 .macro DIV_FUNC_END name
290 cfi_start __\name, LSYM(Lend_div0)
297 cfi_end LSYM(Lend_div0)
301 .macro THUMB_FUNC_START name
308 /* Function start macros. Variants for ARM and Thumb. */
311 #define THUMB_FUNC .thumb_func
312 #define THUMB_CODE .force_thumb
313 # if defined(__thumb2__)
314 #define THUMB_SYNTAX .syntax divided
324 .macro FUNC_START name
335 /* Special function that will always be coded in ARM assembly, even if
336 in Thumb-only compilation. */
338 #if defined(__thumb2__)
340 /* For Thumb-2 we build everything in thumb mode. */
341 .macro ARM_FUNC_START name
345 #define EQUIV .thumb_set
350 #elif defined(__INTERWORKING_STUBS__)
352 .macro ARM_FUNC_START name
357 /* A hook to tell gdb that we've switched to ARM mode. Also used to call
358 directly from other local arm routines. */
361 #define EQUIV .thumb_set
362 /* Branch directly to a function declared with ARM_FUNC_START.
363 Must be called in arm mode. */
368 #else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
370 .macro ARM_FUNC_START name
385 .macro FUNC_ALIAS new old
387 #if defined (__thumb__)
388 .thumb_set SYM (__\new), SYM (__\old)
390 .set SYM (__\new), SYM (__\old)
394 .macro ARM_FUNC_ALIAS new old
396 EQUIV SYM (__\new), SYM (__\old)
397 #if defined(__INTERWORKING_STUBS__)
398 .set SYM (_L__\new), SYM (_L__\old)
403 /* Register aliases. */
405 work .req r4 @ XXXX is this safe ?
419 /* ------------------------------------------------------------------------ */
420 /* Bodies of the division and modulo routines. */
421 /* ------------------------------------------------------------------------ */
422 .macro ARM_DIV_BODY dividend, divisor, result, curbit
424 #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
426 clz \curbit, \dividend
427 clz \result, \divisor
428 sub \curbit, \result, \curbit
429 rsbs \curbit, \curbit, #31
430 addne \curbit, \curbit, \curbit, lsl #1
432 addne pc, pc, \curbit, lsl #2
436 .set shift, shift - 1
437 cmp \dividend, \divisor, lsl #shift
438 adc \result, \result, \result
439 subcs \dividend, \dividend, \divisor, lsl #shift
442 #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
443 #if __ARM_ARCH__ >= 5
445 clz \curbit, \divisor
446 clz \result, \dividend
447 sub \result, \curbit, \result
449 mov \divisor, \divisor, lsl \result
450 mov \curbit, \curbit, lsl \result
453 #else /* __ARM_ARCH__ < 5 */
455 @ Initially shift the divisor left 3 bits if possible,
456 @ set curbit accordingly. This allows for curbit to be located
457 @ at the left end of each 4-bit nibbles in the division loop
458 @ to save one loop in most cases.
459 tst \divisor, #0xe0000000
460 moveq \divisor, \divisor, lsl #3
464 @ Unless the divisor is very big, shift it up in multiples of
465 @ four bits, since this is the amount of unwinding in the main
466 @ division loop. Continue shifting until the divisor is
467 @ larger than the dividend.
468 1: cmp \divisor, #0x10000000
469 cmplo \divisor, \dividend
470 movlo \divisor, \divisor, lsl #4
471 movlo \curbit, \curbit, lsl #4
474 @ For very big divisors, we must shift it a bit at a time, or
475 @ we will be in danger of overflowing.
476 1: cmp \divisor, #0x80000000
477 cmplo \divisor, \dividend
478 movlo \divisor, \divisor, lsl #1
479 movlo \curbit, \curbit, lsl #1
484 #endif /* __ARM_ARCH__ < 5 */
487 1: cmp \dividend, \divisor
488 subhs \dividend, \dividend, \divisor
489 orrhs \result, \result, \curbit
490 cmp \dividend, \divisor, lsr #1
491 subhs \dividend, \dividend, \divisor, lsr #1
492 orrhs \result, \result, \curbit, lsr #1
493 cmp \dividend, \divisor, lsr #2
494 subhs \dividend, \dividend, \divisor, lsr #2
495 orrhs \result, \result, \curbit, lsr #2
496 cmp \dividend, \divisor, lsr #3
497 subhs \dividend, \dividend, \divisor, lsr #3
498 orrhs \result, \result, \curbit, lsr #3
499 cmp \dividend, #0 @ Early termination?
500 movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
501 movne \divisor, \divisor, lsr #4
504 #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
507 /* ------------------------------------------------------------------------ */
508 .macro ARM_DIV2_ORDER divisor, order
510 #if __ARM_ARCH__ >= 5
513 rsb \order, \order, #31
517 cmp \divisor, #(1 << 16)
518 movhs \divisor, \divisor, lsr #16
522 cmp \divisor, #(1 << 8)
523 movhs \divisor, \divisor, lsr #8
524 addhs \order, \order, #8
526 cmp \divisor, #(1 << 4)
527 movhs \divisor, \divisor, lsr #4
528 addhs \order, \order, #4
530 cmp \divisor, #(1 << 2)
531 addhi \order, \order, #3
532 addls \order, \order, \divisor, lsr #1
537 /* ------------------------------------------------------------------------ */
538 .macro ARM_MOD_BODY dividend, divisor, order, spare
540 #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
543 clz \spare, \dividend
544 sub \order, \order, \spare
545 rsbs \order, \order, #31
546 addne pc, pc, \order, lsl #3
550 .set shift, shift - 1
551 cmp \dividend, \divisor, lsl #shift
552 subcs \dividend, \dividend, \divisor, lsl #shift
555 #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
556 #if __ARM_ARCH__ >= 5
559 clz \spare, \dividend
560 sub \order, \order, \spare
561 mov \divisor, \divisor, lsl \order
563 #else /* __ARM_ARCH__ < 5 */
567 @ Unless the divisor is very big, shift it up in multiples of
568 @ four bits, since this is the amount of unwinding in the main
569 @ division loop. Continue shifting until the divisor is
570 @ larger than the dividend.
571 1: cmp \divisor, #0x10000000
572 cmplo \divisor, \dividend
573 movlo \divisor, \divisor, lsl #4
574 addlo \order, \order, #4
577 @ For very big divisors, we must shift it a bit at a time, or
578 @ we will be in danger of overflowing.
579 1: cmp \divisor, #0x80000000
580 cmplo \divisor, \dividend
581 movlo \divisor, \divisor, lsl #1
582 addlo \order, \order, #1
585 #endif /* __ARM_ARCH__ < 5 */
587 @ Perform all needed substractions to keep only the reminder.
588 @ Do comparisons in batch of 4 first.
589 subs \order, \order, #3 @ yes, 3 is intended here
592 1: cmp \dividend, \divisor
593 subhs \dividend, \dividend, \divisor
594 cmp \dividend, \divisor, lsr #1
595 subhs \dividend, \dividend, \divisor, lsr #1
596 cmp \dividend, \divisor, lsr #2
597 subhs \dividend, \dividend, \divisor, lsr #2
598 cmp \dividend, \divisor, lsr #3
599 subhs \dividend, \dividend, \divisor, lsr #3
601 mov \divisor, \divisor, lsr #4
602 subges \order, \order, #4
609 @ Either 1, 2 or 3 comparison/substractions are left.
613 cmp \dividend, \divisor
614 subhs \dividend, \dividend, \divisor
615 mov \divisor, \divisor, lsr #1
616 3: cmp \dividend, \divisor
617 subhs \dividend, \dividend, \divisor
618 mov \divisor, \divisor, lsr #1
619 4: cmp \dividend, \divisor
620 subhs \dividend, \dividend, \divisor
623 #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
626 /* ------------------------------------------------------------------------ */
627 .macro THUMB_DIV_MOD_BODY modulo
628 @ Load the constant 0x10000000 into our work register.
632 @ Unless the divisor is very big, shift it up in multiples of
633 @ four bits, since this is the amount of unwinding in the main
634 @ division loop. Continue shifting until the divisor is
635 @ larger than the dividend.
638 cmp divisor, dividend
644 @ Set work to 0x80000000
647 @ For very big divisors, we must shift it a bit at a time, or
648 @ we will be in danger of overflowing.
651 cmp divisor, dividend
657 @ Test for possible subtractions ...
659 @ ... On the final pass, this may subtract too much from the dividend,
660 @ so keep track of which subtractions are done, we can fix them up
663 cmp dividend, divisor
665 sub dividend, dividend, divisor
667 lsr work, divisor, #1
670 sub dividend, dividend, work
677 lsr work, divisor, #2
680 sub dividend, dividend, work
687 lsr work, divisor, #3
690 sub dividend, dividend, work
699 @ ... and note which bits are done in the result. On the final pass,
700 @ this may subtract too much from the dividend, but the result will be ok,
701 @ since the "bit" will have been shifted out at the bottom.
702 cmp dividend, divisor
704 sub dividend, dividend, divisor
705 orr result, result, curbit
707 lsr work, divisor, #1
710 sub dividend, dividend, work
714 lsr work, divisor, #2
717 sub dividend, dividend, work
721 lsr work, divisor, #3
724 sub dividend, dividend, work
730 cmp dividend, #0 @ Early termination?
732 lsr curbit, #4 @ No, any more bits to do?
738 @ Any subtractions that we should not have done will be recorded in
739 @ the top three bits of "overdone". Exactly which were not needed
740 @ are governed by the position of the bit, stored in ip.
744 beq LSYM(Lgot_result)
746 @ If we terminated early, because dividend became zero, then the
747 @ bit in ip will not be in the bottom nibble, and we should not
748 @ perform the additions below. We must test for this though
749 @ (rather relying upon the TSTs to prevent the additions) since
750 @ the bit in ip could be in the top two bits which might then match
751 @ with one of the smaller RORs.
755 beq LSYM(Lgot_result)
762 lsr work, divisor, #3
770 lsr work, divisor, #2
777 beq LSYM(Lgot_result)
778 lsr work, divisor, #1
783 /* ------------------------------------------------------------------------ */
784 /* Start of the Real Functions */
785 /* ------------------------------------------------------------------------ */
789 FUNC_ALIAS aeabi_uidiv udivsi3
799 cmp dividend, divisor
800 blo LSYM(Lgot_result)
808 #else /* ARM version. */
818 ARM_DIV_BODY r0, r1, r2, r3
827 12: ARM_DIV2_ORDER r1, r2
832 #endif /* ARM version */
836 FUNC_START aeabi_uidivmod
845 stmfd sp!, { r0, r1, lr }
847 ldmfd sp!, { r1, r2, lr }
852 FUNC_END aeabi_uidivmod
854 #endif /* L_udivsi3 */
855 /* ------------------------------------------------------------------------ */
865 cmp dividend, divisor
877 #else /* ARM version. */
879 subs r2, r1, #1 @ compare divisor with 1
881 cmpne r0, r1 @ compare dividend with divisor
883 tsthi r1, r2 @ see if divisor is power of 2
887 ARM_MOD_BODY r0, r1, r2, r3
891 #endif /* ARM version. */
895 #endif /* L_umodsi3 */
896 /* ------------------------------------------------------------------------ */
900 FUNC_ALIAS aeabi_idiv divsi3
908 eor work, divisor @ Save the sign of the result.
914 neg divisor, divisor @ Loops below use unsigned.
918 neg dividend, dividend
920 cmp dividend, divisor
921 blo LSYM(Lgot_result)
934 #else /* ARM version. */
937 eor ip, r0, r1 @ save the sign of the result.
939 rsbmi r1, r1, #0 @ loops below use unsigned.
940 subs r2, r1, #1 @ division by 1 or -1 ?
943 rsbmi r3, r0, #0 @ positive dividend value
946 tst r1, r2 @ divisor is power of 2 ?
949 ARM_DIV_BODY r3, r1, r0, r2
955 10: teq ip, r0 @ same sign ?
960 moveq r0, ip, asr #31
964 12: ARM_DIV2_ORDER r1, r2
971 #endif /* ARM version */
975 FUNC_START aeabi_idivmod
984 stmfd sp!, { r0, r1, lr }
986 ldmfd sp!, { r1, r2, lr }
991 FUNC_END aeabi_idivmod
993 #endif /* L_divsi3 */
994 /* ------------------------------------------------------------------------ */
1005 neg divisor, divisor @ Loops below use unsigned.
1008 @ Need to save the sign of the dividend, unfortunately, we need
1009 @ work later on. Must do this after saving the original value of
1010 @ the work register, because we will pop this value off first.
1014 neg dividend, dividend
1016 cmp dividend, divisor
1017 blo LSYM(Lgot_result)
1019 THUMB_DIV_MOD_BODY 1
1024 neg dividend, dividend
1029 #else /* ARM version. */
1033 rsbmi r1, r1, #0 @ loops below use unsigned.
1034 movs ip, r0 @ preserve sign of dividend
1035 rsbmi r0, r0, #0 @ if negative make positive
1036 subs r2, r1, #1 @ compare divisor with 1
1037 cmpne r0, r1 @ compare dividend with divisor
1039 tsthi r1, r2 @ see if divisor is power of 2
1043 ARM_MOD_BODY r0, r1, r2, r3
1049 #endif /* ARM version */
1053 #endif /* L_modsi3 */
1054 /* ------------------------------------------------------------------------ */
1058 FUNC_ALIAS aeabi_idiv0 div0
1059 FUNC_ALIAS aeabi_ldiv0 div0
1063 FUNC_END aeabi_ldiv0
1064 FUNC_END aeabi_idiv0
1067 #endif /* L_divmodsi_tools */
1068 /* ------------------------------------------------------------------------ */
1070 @ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
1072 /* Constant taken from <asm/signal.h>. */
1079 bl SYM(raise) __PLT__
1084 #endif /* L_dvmd_lnx */
1085 /* ------------------------------------------------------------------------ */
1086 /* Dword shift operations. */
1087 /* All the following Dword shift variants rely on the fact that
1090 shft xxx, (Reg & 255)
1091 so for Reg value in (32...63) and (-1...-31) we will get zero (in the
1092 case of logical shifts) or the sign (for asr). */
1102 /* Prevent __aeabi double-word shifts from being produced on SymbianOS. */
1108 FUNC_ALIAS aeabi_llsr lshrdi3
1126 movmi al, al, lsr r2
1127 movpl al, ah, lsr r3
1128 orrmi al, al, ah, lsl ip
1140 FUNC_ALIAS aeabi_lasr ashrdi3
1147 @ If r2 is negative at this point the following step would OR
1148 @ the sign bit into all of AL. That's not what we want...
1162 movmi al, al, lsr r2
1163 movpl al, ah, asr r3
1164 orrmi al, al, ah, lsl ip
1177 FUNC_ALIAS aeabi_llsl ashldi3
1195 movmi ah, ah, lsl r2
1196 movpl ah, al, lsl r3
1197 orrmi ah, ah, al, lsr ip
1206 #endif /* __symbian__ */
1208 /* ------------------------------------------------------------------------ */
1209 /* These next two sections are here despite the fact that they contain Thumb
1210 assembler because their presence allows interworked code to be linked even
1211 when the GCC library is this one. */
1213 /* Do not build the interworking functions when the target architecture does
1214 not support Thumb instructions. (This can be a multilib option). */
1215 #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
1216 || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
1217 || __ARM_ARCH__ >= 6
1219 #if defined L_call_via_rX
1221 /* These labels & instructions are used by the Arm/Thumb interworking code.
1222 The address of function to be called is loaded into a register and then
1223 one of these labels is called via a BL instruction. This puts the
1224 return address into the link register with the bottom bit set, and the
1225 code here switches to the correct mode before executing the function. */
1231 .macro call_via register
1232 THUMB_FUNC_START _call_via_\register
1237 SIZE (_call_via_\register)
1256 #endif /* L_call_via_rX */
1258 /* Don't bother with the old interworking routines for Thumb-2. */
1259 /* ??? Maybe only omit these on v7m. */
1262 #if defined L_interwork_call_via_rX
1264 /* These labels & instructions are used by the Arm/Thumb interworking code,
1265 when the target address is in an unknown instruction set. The address
1266 of function to be called is loaded into a register and then one of these
1267 labels is called via a BL instruction. This puts the return address
1268 into the link register with the bottom bit set, and the code here
1269 switches to the correct mode before executing the function. Unfortunately
1270 the target code cannot be relied upon to return via a BX instruction, so
1271 instead we have to store the resturn address on the stack and allow the
1272 called function to return here instead. Upon return we recover the real
1273 return address and use a BX to get back to Thumb mode.
1275 There are three variations of this code. The first,
1276 _interwork_call_via_rN(), will push the return address onto the
1277 stack and pop it in _arm_return(). It should only be used if all
1278 arguments are passed in registers.
1280 The second, _interwork_r7_call_via_rN(), instead stores the return
1281 address at [r7, #-4]. It is the caller's responsibility to ensure
1282 that this address is valid and contains no useful data.
1284 The third, _interwork_r11_call_via_rN(), works in the same way but
1285 uses r11 instead of r7. It is useful if the caller does not really
1286 need a frame pointer. */
1293 LSYM(Lstart_arm_return):
1294 cfi_start LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
1295 cfi_push 0, 0xe, -0x8, 0x8
1296 nop @ This nop is for the benefit of debuggers, so that
1297 @ backtraces will use the correct unwind information.
1299 RETLDM unwind=LSYM(Lstart_arm_return)
1300 cfi_end LSYM(Lend_arm_return)
1302 .globl _arm_return_r7
1307 .globl _arm_return_r11
1312 .macro interwork_with_frame frame, register, name, return
1315 THUMB_FUNC_START \name
1322 streq lr, [\frame, #-4]
1323 adreq lr, _arm_return_\frame
1329 .macro interwork register
1332 THUMB_FUNC_START _interwork_call_via_\register
1338 .globl LSYM(Lchange_\register)
1339 LSYM(Lchange_\register):
1341 streq lr, [sp, #-8]!
1342 adreq lr, _arm_return
1345 SIZE (_interwork_call_via_\register)
1347 interwork_with_frame r7,\register,_interwork_r7_call_via_\register
1348 interwork_with_frame r11,\register,_interwork_r11_call_via_\register
1366 /* The LR case has to be handled a little differently... */
1369 THUMB_FUNC_START _interwork_call_via_lr
1378 stmeqdb r13!, {lr, pc}
1380 adreq lr, _arm_return
1383 SIZE (_interwork_call_via_lr)
1385 #endif /* L_interwork_call_via_rX */
1386 #endif /* !__thumb2__ */
1387 #endif /* Arch supports thumb. */
1390 #include "ieee754-df.S"
1391 #include "ieee754-sf.S"
1393 #endif /* __symbian__ */