1 @ libgcc routines for ARM cpu.
2 @ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
4 /* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005, 2007
5 Free Software Foundation, Inc.
7 This file is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 2, or (at your option) any
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
21 This file is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; see the file COPYING. If not, write to
28 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
29 Boston, MA 02110-1301, USA. */
31 /* An executable stack is *not* required for these functions. */
32 #if defined(__ELF__) && defined(__linux__)
33 .section .note.GNU-stack,"",%progbits
37 /* ------------------------------------------------------------------------ */
39 /* We need to know what prefix to add to function names. */
41 #ifndef __USER_LABEL_PREFIX__
42 #error __USER_LABEL_PREFIX__ not defined
45 /* ANSI concatenation macros. */
47 #define CONCAT1(a, b) CONCAT2(a, b)
48 #define CONCAT2(a, b) a ## b
50 /* Use the right prefix for global labels. */
52 #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
56 #define __PLT__ /* Not supported in Thumb assembler (for now). */
60 #define TYPE(x) .type SYM(x),function
61 #define SIZE(x) .size SYM(x), . - SYM(x)
70 /* Function end macros. Variants for interworking. */
72 #if defined(__ARM_ARCH_2__)
73 # define __ARM_ARCH__ 2
76 #if defined(__ARM_ARCH_3__)
77 # define __ARM_ARCH__ 3
80 #if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
81 || defined(__ARM_ARCH_4T__)
82 /* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
83 long multiply instructions. That includes v3M. */
84 # define __ARM_ARCH__ 4
87 #if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
88 || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
89 || defined(__ARM_ARCH_5TEJ__)
90 # define __ARM_ARCH__ 5
93 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
94 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
95 || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
96 # define __ARM_ARCH__ 6
99 #if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
100 || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__)
101 # define __ARM_ARCH__ 7
105 #error Unable to determine architecture.
108 /* How to return from a function call depends on the architecture variant. */
110 #if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
113 # define RETc(x) bx##x lr
115 /* Special precautions for interworking on armv4t. */
116 # if (__ARM_ARCH__ == 4)
118 /* Always use bx, not ldr pc. */
119 # if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
120 # define __INTERWORKING__
121 # endif /* __THUMB__ || __THUMB_INTERWORK__ */
123 /* Include thumb stub before arm mode code. */
124 # if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
125 # define __INTERWORKING_STUBS__
126 # endif /* __thumb__ && !__THUMB_INTERWORK__ */
128 #endif /* __ARM_ARCH == 4 */
132 # define RET mov pc, lr
133 # define RETc(x) mov##x pc, lr
137 .macro cfi_pop advance, reg, cfa_offset
139 .pushsection .debug_frame
140 .byte 0x4 /* DW_CFA_advance_loc4 */
142 .byte (0xc0 | \reg) /* DW_CFA_restore */
143 .byte 0xe /* DW_CFA_def_cfa_offset */
148 .macro cfi_push advance, reg, offset, cfa_offset
150 .pushsection .debug_frame
151 .byte 0x4 /* DW_CFA_advance_loc4 */
153 .byte (0x80 | \reg) /* DW_CFA_offset */
154 .uleb128 (\offset / -4)
155 .byte 0xe /* DW_CFA_def_cfa_offset */
160 .macro cfi_start start_label, end_label
162 .pushsection .debug_frame
164 .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
166 .4byte 0xffffffff @ CIE Identifier Tag
167 .byte 0x1 @ CIE Version
168 .ascii "\0" @ CIE Augmentation
169 .uleb128 0x1 @ CIE Code Alignment Factor
170 .sleb128 -4 @ CIE Data Alignment Factor
171 .byte 0xe @ CIE RA Column
172 .byte 0xc @ DW_CFA_def_cfa
178 .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
180 .4byte LSYM(Lstart_frame) @ FDE CIE offset
181 .4byte \start_label @ FDE initial location
182 .4byte \end_label-\start_label @ FDE address range
186 .macro cfi_end end_label
188 .pushsection .debug_frame
196 /* Don't pass dirn, it's there just to get token pasting right. */
198 .macro RETLDM regs=, cond=, unwind=, dirn=ia
199 #if defined (__INTERWORKING__)
201 ldr\cond lr, [sp], #8
203 # if defined(__thumb2__)
206 ldm\cond\dirn sp!, {\regs, lr}
210 /* Mark LR as restored. */
211 97: cfi_pop 97b - \unwind, 0xe, 0x0
215 /* Caller is responsible for providing IT instruction. */
217 ldr\cond pc, [sp], #8
219 # if defined(__thumb2__)
222 ldm\cond\dirn sp!, {\regs, pc}
228 /* The Unified assembly syntax allows the same code to be assembled for both
229 ARM and Thumb-2. However this is only supported by recent gas, so define
230 a set of macros to allow ARM code on older assemblers. */
231 #if defined(__thumb2__)
232 .macro do_it cond, suffix=""
235 .macro shift1 op, arg0, arg1, arg2
236 \op \arg0, \arg1, \arg2
240 #define COND(op1, op2, cond) op1 ## op2 ## cond
241 /* Perform an arithmetic operation with a variable shift operand. This
242 requires two instructions and a scratch register on Thumb-2. */
243 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
244 \shiftop \tmp, \src2, \shiftreg
245 \name \dest, \src1, \tmp
248 .macro do_it cond, suffix=""
250 .macro shift1 op, arg0, arg1, arg2
251 mov \arg0, \arg1, \op \arg2
253 #define do_push stmfd sp!,
254 #define do_pop ldmfd sp!,
255 #define COND(op1, op2, cond) op1 ## cond ## op2
256 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
257 \name \dest, \src1, \src2, \shiftop \shiftreg
261 .macro ARM_LDIV0 name
263 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
264 bl SYM (__div0) __PLT__
265 mov r0, #0 @ About as wrong as it could be.
270 .macro THUMB_LDIV0 name
272 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
274 mov r0, #0 @ About as wrong as it could be.
275 #if defined (__INTERWORKING__)
287 .macro DIV_FUNC_END name
288 cfi_start __\name, LSYM(Lend_div0)
295 cfi_end LSYM(Lend_div0)
299 .macro THUMB_FUNC_START name
306 /* Function start macros. Variants for ARM and Thumb. */
309 #define THUMB_FUNC .thumb_func
310 #define THUMB_CODE .force_thumb
311 # if defined(__thumb2__)
312 #define THUMB_SYNTAX .syntax divided
322 .macro FUNC_START name
333 /* Special function that will always be coded in ARM assembly, even if
334 in Thumb-only compilation. */
336 #if defined(__thumb2__)
338 /* For Thumb-2 we build everything in thumb mode. */
339 .macro ARM_FUNC_START name
343 #define EQUIV .thumb_set
348 #elif defined(__INTERWORKING_STUBS__)
350 .macro ARM_FUNC_START name
355 /* A hook to tell gdb that we've switched to ARM mode. Also used to call
356 directly from other local arm routines. */
359 #define EQUIV .thumb_set
360 /* Branch directly to a function declared with ARM_FUNC_START.
361 Must be called in arm mode. */
366 #else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
368 .macro ARM_FUNC_START name
383 .macro FUNC_ALIAS new old
385 #if defined (__thumb__)
386 .thumb_set SYM (__\new), SYM (__\old)
388 .set SYM (__\new), SYM (__\old)
392 .macro ARM_FUNC_ALIAS new old
394 EQUIV SYM (__\new), SYM (__\old)
395 #if defined(__INTERWORKING_STUBS__)
396 .set SYM (_L__\new), SYM (_L__\old)
401 /* Register aliases. */
403 work .req r4 @ XXXX is this safe ?
417 /* ------------------------------------------------------------------------ */
418 /* Bodies of the division and modulo routines. */
419 /* ------------------------------------------------------------------------ */
420 .macro ARM_DIV_BODY dividend, divisor, result, curbit
422 #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
424 clz \curbit, \dividend
425 clz \result, \divisor
426 sub \curbit, \result, \curbit
427 rsbs \curbit, \curbit, #31
428 addne \curbit, \curbit, \curbit, lsl #1
430 addne pc, pc, \curbit, lsl #2
434 .set shift, shift - 1
435 cmp \dividend, \divisor, lsl #shift
436 adc \result, \result, \result
437 subcs \dividend, \dividend, \divisor, lsl #shift
440 #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
441 #if __ARM_ARCH__ >= 5
443 clz \curbit, \divisor
444 clz \result, \dividend
445 sub \result, \curbit, \result
447 mov \divisor, \divisor, lsl \result
448 mov \curbit, \curbit, lsl \result
451 #else /* __ARM_ARCH__ < 5 */
453 @ Initially shift the divisor left 3 bits if possible,
454 @ set curbit accordingly. This allows for curbit to be located
455 @ at the left end of each 4-bit nibbles in the division loop
456 @ to save one loop in most cases.
457 tst \divisor, #0xe0000000
458 moveq \divisor, \divisor, lsl #3
462 @ Unless the divisor is very big, shift it up in multiples of
463 @ four bits, since this is the amount of unwinding in the main
464 @ division loop. Continue shifting until the divisor is
465 @ larger than the dividend.
466 1: cmp \divisor, #0x10000000
467 cmplo \divisor, \dividend
468 movlo \divisor, \divisor, lsl #4
469 movlo \curbit, \curbit, lsl #4
472 @ For very big divisors, we must shift it a bit at a time, or
473 @ we will be in danger of overflowing.
474 1: cmp \divisor, #0x80000000
475 cmplo \divisor, \dividend
476 movlo \divisor, \divisor, lsl #1
477 movlo \curbit, \curbit, lsl #1
482 #endif /* __ARM_ARCH__ < 5 */
485 1: cmp \dividend, \divisor
486 subhs \dividend, \dividend, \divisor
487 orrhs \result, \result, \curbit
488 cmp \dividend, \divisor, lsr #1
489 subhs \dividend, \dividend, \divisor, lsr #1
490 orrhs \result, \result, \curbit, lsr #1
491 cmp \dividend, \divisor, lsr #2
492 subhs \dividend, \dividend, \divisor, lsr #2
493 orrhs \result, \result, \curbit, lsr #2
494 cmp \dividend, \divisor, lsr #3
495 subhs \dividend, \dividend, \divisor, lsr #3
496 orrhs \result, \result, \curbit, lsr #3
497 cmp \dividend, #0 @ Early termination?
498 movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
499 movne \divisor, \divisor, lsr #4
502 #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
505 /* ------------------------------------------------------------------------ */
506 .macro ARM_DIV2_ORDER divisor, order
508 #if __ARM_ARCH__ >= 5
511 rsb \order, \order, #31
515 cmp \divisor, #(1 << 16)
516 movhs \divisor, \divisor, lsr #16
520 cmp \divisor, #(1 << 8)
521 movhs \divisor, \divisor, lsr #8
522 addhs \order, \order, #8
524 cmp \divisor, #(1 << 4)
525 movhs \divisor, \divisor, lsr #4
526 addhs \order, \order, #4
528 cmp \divisor, #(1 << 2)
529 addhi \order, \order, #3
530 addls \order, \order, \divisor, lsr #1
535 /* ------------------------------------------------------------------------ */
536 .macro ARM_MOD_BODY dividend, divisor, order, spare
538 #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
541 clz \spare, \dividend
542 sub \order, \order, \spare
543 rsbs \order, \order, #31
544 addne pc, pc, \order, lsl #3
548 .set shift, shift - 1
549 cmp \dividend, \divisor, lsl #shift
550 subcs \dividend, \dividend, \divisor, lsl #shift
553 #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
554 #if __ARM_ARCH__ >= 5
557 clz \spare, \dividend
558 sub \order, \order, \spare
559 mov \divisor, \divisor, lsl \order
561 #else /* __ARM_ARCH__ < 5 */
565 @ Unless the divisor is very big, shift it up in multiples of
566 @ four bits, since this is the amount of unwinding in the main
567 @ division loop. Continue shifting until the divisor is
568 @ larger than the dividend.
569 1: cmp \divisor, #0x10000000
570 cmplo \divisor, \dividend
571 movlo \divisor, \divisor, lsl #4
572 addlo \order, \order, #4
575 @ For very big divisors, we must shift it a bit at a time, or
576 @ we will be in danger of overflowing.
577 1: cmp \divisor, #0x80000000
578 cmplo \divisor, \dividend
579 movlo \divisor, \divisor, lsl #1
580 addlo \order, \order, #1
583 #endif /* __ARM_ARCH__ < 5 */
585 @ Perform all needed substractions to keep only the reminder.
586 @ Do comparisons in batch of 4 first.
587 subs \order, \order, #3 @ yes, 3 is intended here
590 1: cmp \dividend, \divisor
591 subhs \dividend, \dividend, \divisor
592 cmp \dividend, \divisor, lsr #1
593 subhs \dividend, \dividend, \divisor, lsr #1
594 cmp \dividend, \divisor, lsr #2
595 subhs \dividend, \dividend, \divisor, lsr #2
596 cmp \dividend, \divisor, lsr #3
597 subhs \dividend, \dividend, \divisor, lsr #3
599 mov \divisor, \divisor, lsr #4
600 subges \order, \order, #4
607 @ Either 1, 2 or 3 comparison/substractions are left.
611 cmp \dividend, \divisor
612 subhs \dividend, \dividend, \divisor
613 mov \divisor, \divisor, lsr #1
614 3: cmp \dividend, \divisor
615 subhs \dividend, \dividend, \divisor
616 mov \divisor, \divisor, lsr #1
617 4: cmp \dividend, \divisor
618 subhs \dividend, \dividend, \divisor
621 #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
624 /* ------------------------------------------------------------------------ */
625 .macro THUMB_DIV_MOD_BODY modulo
626 @ Load the constant 0x10000000 into our work register.
630 @ Unless the divisor is very big, shift it up in multiples of
631 @ four bits, since this is the amount of unwinding in the main
632 @ division loop. Continue shifting until the divisor is
633 @ larger than the dividend.
636 cmp divisor, dividend
642 @ Set work to 0x80000000
645 @ For very big divisors, we must shift it a bit at a time, or
646 @ we will be in danger of overflowing.
649 cmp divisor, dividend
655 @ Test for possible subtractions ...
657 @ ... On the final pass, this may subtract too much from the dividend,
658 @ so keep track of which subtractions are done, we can fix them up
661 cmp dividend, divisor
663 sub dividend, dividend, divisor
665 lsr work, divisor, #1
668 sub dividend, dividend, work
675 lsr work, divisor, #2
678 sub dividend, dividend, work
685 lsr work, divisor, #3
688 sub dividend, dividend, work
697 @ ... and note which bits are done in the result. On the final pass,
698 @ this may subtract too much from the dividend, but the result will be ok,
699 @ since the "bit" will have been shifted out at the bottom.
700 cmp dividend, divisor
702 sub dividend, dividend, divisor
703 orr result, result, curbit
705 lsr work, divisor, #1
708 sub dividend, dividend, work
712 lsr work, divisor, #2
715 sub dividend, dividend, work
719 lsr work, divisor, #3
722 sub dividend, dividend, work
728 cmp dividend, #0 @ Early termination?
730 lsr curbit, #4 @ No, any more bits to do?
736 @ Any subtractions that we should not have done will be recorded in
737 @ the top three bits of "overdone". Exactly which were not needed
738 @ are governed by the position of the bit, stored in ip.
742 beq LSYM(Lgot_result)
744 @ If we terminated early, because dividend became zero, then the
745 @ bit in ip will not be in the bottom nibble, and we should not
746 @ perform the additions below. We must test for this though
747 @ (rather relying upon the TSTs to prevent the additions) since
748 @ the bit in ip could be in the top two bits which might then match
749 @ with one of the smaller RORs.
753 beq LSYM(Lgot_result)
760 lsr work, divisor, #3
768 lsr work, divisor, #2
775 beq LSYM(Lgot_result)
776 lsr work, divisor, #1
781 /* ------------------------------------------------------------------------ */
782 /* Start of the Real Functions */
783 /* ------------------------------------------------------------------------ */
787 FUNC_ALIAS aeabi_uidiv udivsi3
797 cmp dividend, divisor
798 blo LSYM(Lgot_result)
806 #else /* ARM version. */
816 ARM_DIV_BODY r0, r1, r2, r3
825 12: ARM_DIV2_ORDER r1, r2
830 #endif /* ARM version */
834 FUNC_START aeabi_uidivmod
843 stmfd sp!, { r0, r1, lr }
845 ldmfd sp!, { r1, r2, lr }
850 FUNC_END aeabi_uidivmod
852 #endif /* L_udivsi3 */
853 /* ------------------------------------------------------------------------ */
863 cmp dividend, divisor
875 #else /* ARM version. */
877 subs r2, r1, #1 @ compare divisor with 1
879 cmpne r0, r1 @ compare dividend with divisor
881 tsthi r1, r2 @ see if divisor is power of 2
885 ARM_MOD_BODY r0, r1, r2, r3
889 #endif /* ARM version. */
893 #endif /* L_umodsi3 */
894 /* ------------------------------------------------------------------------ */
898 FUNC_ALIAS aeabi_idiv divsi3
906 eor work, divisor @ Save the sign of the result.
912 neg divisor, divisor @ Loops below use unsigned.
916 neg dividend, dividend
918 cmp dividend, divisor
919 blo LSYM(Lgot_result)
932 #else /* ARM version. */
935 eor ip, r0, r1 @ save the sign of the result.
937 rsbmi r1, r1, #0 @ loops below use unsigned.
938 subs r2, r1, #1 @ division by 1 or -1 ?
941 rsbmi r3, r0, #0 @ positive dividend value
944 tst r1, r2 @ divisor is power of 2 ?
947 ARM_DIV_BODY r3, r1, r0, r2
953 10: teq ip, r0 @ same sign ?
958 moveq r0, ip, asr #31
962 12: ARM_DIV2_ORDER r1, r2
969 #endif /* ARM version */
973 FUNC_START aeabi_idivmod
982 stmfd sp!, { r0, r1, lr }
984 ldmfd sp!, { r1, r2, lr }
989 FUNC_END aeabi_idivmod
991 #endif /* L_divsi3 */
992 /* ------------------------------------------------------------------------ */
1003 neg divisor, divisor @ Loops below use unsigned.
1006 @ Need to save the sign of the dividend, unfortunately, we need
1007 @ work later on. Must do this after saving the original value of
1008 @ the work register, because we will pop this value off first.
1012 neg dividend, dividend
1014 cmp dividend, divisor
1015 blo LSYM(Lgot_result)
1017 THUMB_DIV_MOD_BODY 1
1022 neg dividend, dividend
1027 #else /* ARM version. */
1031 rsbmi r1, r1, #0 @ loops below use unsigned.
1032 movs ip, r0 @ preserve sign of dividend
1033 rsbmi r0, r0, #0 @ if negative make positive
1034 subs r2, r1, #1 @ compare divisor with 1
1035 cmpne r0, r1 @ compare dividend with divisor
1037 tsthi r1, r2 @ see if divisor is power of 2
1041 ARM_MOD_BODY r0, r1, r2, r3
1047 #endif /* ARM version */
1051 #endif /* L_modsi3 */
1052 /* ------------------------------------------------------------------------ */
1056 FUNC_ALIAS aeabi_idiv0 div0
1057 FUNC_ALIAS aeabi_ldiv0 div0
1061 FUNC_END aeabi_ldiv0
1062 FUNC_END aeabi_idiv0
1065 #endif /* L_divmodsi_tools */
1066 /* ------------------------------------------------------------------------ */
1068 @ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
1070 /* Constant taken from <asm/signal.h>. */
1077 bl SYM(raise) __PLT__
1082 #endif /* L_dvmd_lnx */
1083 /* ------------------------------------------------------------------------ */
1084 /* Dword shift operations. */
1085 /* All the following Dword shift variants rely on the fact that
1088 shft xxx, (Reg & 255)
1089 so for Reg value in (32...63) and (-1...-31) we will get zero (in the
1090 case of logical shifts) or the sign (for asr). */
1100 /* Prevent __aeabi double-word shifts from being produced on SymbianOS. */
1106 FUNC_ALIAS aeabi_llsr lshrdi3
1124 movmi al, al, lsr r2
1125 movpl al, ah, lsr r3
1126 orrmi al, al, ah, lsl ip
1138 FUNC_ALIAS aeabi_lasr ashrdi3
1145 @ If r2 is negative at this point the following step would OR
1146 @ the sign bit into all of AL. That's not what we want...
1160 movmi al, al, lsr r2
1161 movpl al, ah, asr r3
1162 orrmi al, al, ah, lsl ip
1175 FUNC_ALIAS aeabi_llsl ashldi3
1193 movmi ah, ah, lsl r2
1194 movpl ah, al, lsl r3
1195 orrmi ah, ah, al, lsr ip
1204 #endif /* __symbian__ */
1206 /* ------------------------------------------------------------------------ */
1207 /* These next two sections are here despite the fact that they contain Thumb
1208 assembler because their presence allows interworked code to be linked even
1209 when the GCC library is this one. */
1211 /* Do not build the interworking functions when the target architecture does
1212 not support Thumb instructions. (This can be a multilib option). */
1213 #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
1214 || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
1215 || __ARM_ARCH__ >= 6
1217 #if defined L_call_via_rX
1219 /* These labels & instructions are used by the Arm/Thumb interworking code.
1220 The address of function to be called is loaded into a register and then
1221 one of these labels is called via a BL instruction. This puts the
1222 return address into the link register with the bottom bit set, and the
1223 code here switches to the correct mode before executing the function. */
1229 .macro call_via register
1230 THUMB_FUNC_START _call_via_\register
1235 SIZE (_call_via_\register)
1254 #endif /* L_call_via_rX */
1256 /* Don't bother with the old interworking routines for Thumb-2. */
1257 /* ??? Maybe only omit these on v7m. */
1260 #if defined L_interwork_call_via_rX
1262 /* These labels & instructions are used by the Arm/Thumb interworking code,
1263 when the target address is in an unknown instruction set. The address
1264 of function to be called is loaded into a register and then one of these
1265 labels is called via a BL instruction. This puts the return address
1266 into the link register with the bottom bit set, and the code here
1267 switches to the correct mode before executing the function. Unfortunately
1268 the target code cannot be relied upon to return via a BX instruction, so
1269 instead we have to store the resturn address on the stack and allow the
1270 called function to return here instead. Upon return we recover the real
1271 return address and use a BX to get back to Thumb mode.
1273 There are three variations of this code. The first,
1274 _interwork_call_via_rN(), will push the return address onto the
1275 stack and pop it in _arm_return(). It should only be used if all
1276 arguments are passed in registers.
1278 The second, _interwork_r7_call_via_rN(), instead stores the return
1279 address at [r7, #-4]. It is the caller's responsibility to ensure
1280 that this address is valid and contains no useful data.
1282 The third, _interwork_r11_call_via_rN(), works in the same way but
1283 uses r11 instead of r7. It is useful if the caller does not really
1284 need a frame pointer. */
1291 LSYM(Lstart_arm_return):
1292 cfi_start LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
1293 cfi_push 0, 0xe, -0x8, 0x8
1294 nop @ This nop is for the benefit of debuggers, so that
1295 @ backtraces will use the correct unwind information.
1297 RETLDM unwind=LSYM(Lstart_arm_return)
1298 cfi_end LSYM(Lend_arm_return)
1300 .globl _arm_return_r7
1305 .globl _arm_return_r11
1310 .macro interwork_with_frame frame, register, name, return
1313 THUMB_FUNC_START \name
1320 streq lr, [\frame, #-4]
1321 adreq lr, _arm_return_\frame
1327 .macro interwork register
1330 THUMB_FUNC_START _interwork_call_via_\register
1336 .globl LSYM(Lchange_\register)
1337 LSYM(Lchange_\register):
1339 streq lr, [sp, #-8]!
1340 adreq lr, _arm_return
1343 SIZE (_interwork_call_via_\register)
1345 interwork_with_frame r7,\register,_interwork_r7_call_via_\register
1346 interwork_with_frame r11,\register,_interwork_r11_call_via_\register
1364 /* The LR case has to be handled a little differently... */
1367 THUMB_FUNC_START _interwork_call_via_lr
1376 stmeqdb r13!, {lr, pc}
1378 adreq lr, _arm_return
1381 SIZE (_interwork_call_via_lr)
1383 #endif /* L_interwork_call_via_rX */
1384 #endif /* !__thumb2__ */
1385 #endif /* Arch supports thumb. */
1388 #include "ieee754-df.S"
1389 #include "ieee754-sf.S"
1391 #endif /* __symbian__ */