1 /* -*- Mode: Asm -*- */
2 /* Copyright (C) 1998, 1999, 2000, 2007, 2008, 2009
3 Free Software Foundation, Inc.
4 Contributed by Denis Chertykov <chertykov@gmail.com>
6 This file is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 This file is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 #define __zero_reg__ r1
26 #define __tmp_reg__ r0
30 #define __RAMPZ__ 0x3B
32 /* Most of the functions here are called directly from avr.md
33 patterns, instead of using the standard libcall mechanisms.
34 This can make better code because GCC knows exactly which
35 of the call-used registers (not all of them) are clobbered. */
37 .section .text.libgcc, "ax", @progbits
39 .macro mov_l r_dest, r_src
40 #if defined (__AVR_HAVE_MOVW__)
47 .macro mov_h r_dest, r_src
48 #if defined (__AVR_HAVE_MOVW__)
55 #if defined (__AVR_HAVE_JMP_CALL__)
75 /* Note: mulqi3, mulhi3 are open-coded on the enhanced core. */
76 #if !defined (__AVR_HAVE_MUL__)
77 /*******************************************************
79 *******************************************************/
80 #if defined (L_mulqi3)
82 #define r_arg2 r22 /* multiplicand */
83 #define r_arg1 r24 /* multiplier */
84 #define r_res __tmp_reg__ /* result */
89 clr r_res ; clear result
93 add r_arg2,r_arg2 ; shift multiplicand
94 breq __mulqi3_exit ; while multiplicand != 0
96 brne __mulqi3_loop ; exit if multiplier = 0
98 mov r_arg1,r_res ; result to return register
106 #endif /* defined (L_mulqi3) */
108 #if defined (L_mulqihi3)
120 #endif /* defined (L_mulqihi3) */
122 #if defined (L_umulqihi3)
130 #endif /* defined (L_umulqihi3) */
132 /*******************************************************
133 Multiplication 16 x 16
134 *******************************************************/
135 #if defined (L_mulhi3)
136 #define r_arg1L r24 /* multiplier Low */
137 #define r_arg1H r25 /* multiplier High */
138 #define r_arg2L r22 /* multiplicand Low */
139 #define r_arg2H r23 /* multiplicand High */
140 #define r_resL __tmp_reg__ /* result Low */
141 #define r_resH r21 /* result High */
146 clr r_resH ; clear result
147 clr r_resL ; clear result
151 add r_resL,r_arg2L ; result + multiplicand
154 add r_arg2L,r_arg2L ; shift multiplicand
157 cp r_arg2L,__zero_reg__
158 cpc r_arg2H,__zero_reg__
159 breq __mulhi3_exit ; while multiplicand != 0
161 lsr r_arg1H ; gets LSB of multiplier
164 brne __mulhi3_loop ; exit if multiplier = 0
166 mov r_arg1H,r_resH ; result to return register
178 #endif /* defined (L_mulhi3) */
179 #endif /* !defined (__AVR_HAVE_MUL__) */
181 /*******************************************************
182 Widening Multiplication 32 = 16 x 16
183 *******************************************************/
185 #if defined (L_mulhisi3)
187 #if defined (__AVR_HAVE_MUL__)
189 ;; r25:r22 = r19:r18 * r21:r20
201 ; C = (signed)A1 * (signed)B1
209 ; C += (signed)A1 * B0
217 ; C += (signed)B1 * A0
220 XJMP __xmulhisi3_exit
231 #else /* !__AVR_HAVE_MUL__ */
232 ;;; FIXME: This is dead code (noone calls it)
244 #endif /* __AVR_HAVE_MUL__ */
246 #endif /* defined (L_mulhisi3) */
248 #if defined (L_umulhisi3)
250 #if defined (__AVR_HAVE_MUL__)
252 ;; r25:r22 = r19:r18 * r21:r20
281 XJMP __xmulhisi3_exit
292 #else /* !__AVR_HAVE_MUL__ */
293 ;;; FIXME: This is dead code (noone calls it)
301 #endif /* __AVR_HAVE_MUL__ */
303 #endif /* defined (L_umulhisi3) */
305 #if defined (L_xmulhisi3_exit)
307 ;;; Helper for __mulhisi3 resp. __umulhisi3.
314 DEFUN __xmulhisi3_exit
320 ENDF __xmulhisi3_exit
327 #endif /* defined (L_xmulhisi3_exit) */
329 #if defined (L_mulsi3)
330 /*******************************************************
331 Multiplication 32 x 32
332 *******************************************************/
333 #define r_arg1L r22 /* multiplier Low */
336 #define r_arg1HH r25 /* multiplier High */
339 #define r_arg2L r18 /* multiplicand Low */
342 #define r_arg2HH r21 /* multiplicand High */
344 #define r_resL r26 /* result Low */
347 #define r_resHH r31 /* result High */
353 #if defined (__AVR_HAVE_MUL__)
358 mul r_arg1HL, r_arg2L
361 mul r_arg1L, r_arg2HL
364 mul r_arg1HH, r_arg2L
366 mul r_arg1HL, r_arg2H
368 mul r_arg1H, r_arg2HL
370 mul r_arg1L, r_arg2HH
372 clr r_arg1HH ; use instead of __zero_reg__ to add carry
376 adc r_resHH, r_arg1HH ; add carry
380 adc r_resHH, r_arg1HH ; add carry
382 movw r_arg1HL, r_resHL
383 clr r1 ; __zero_reg__ clobbered by "mul"
386 clr r_resHH ; clear result
387 clr r_resHL ; clear result
388 clr r_resH ; clear result
389 clr r_resL ; clear result
393 add r_resL,r_arg2L ; result + multiplicand
398 add r_arg2L,r_arg2L ; shift multiplicand
400 adc r_arg2HL,r_arg2HL
401 adc r_arg2HH,r_arg2HH
403 lsr r_arg1HH ; gets LSB of multiplier
410 brne __mulsi3_loop ; exit if multiplier = 0
412 mov_h r_arg1HH,r_resHH ; result to return register
413 mov_l r_arg1HL,r_resHL
417 #endif /* defined (__AVR_HAVE_MUL__) */
435 #endif /* defined (L_mulsi3) */
437 /*******************************************************
438 Division 8 / 8 => (result + remainder)
439 *******************************************************/
440 #define r_rem r25 /* remainder */
441 #define r_arg1 r24 /* dividend, quotient */
442 #define r_arg2 r22 /* divisor */
443 #define r_cnt r23 /* loop count */
445 #if defined (L_udivmodqi4)
449 sub r_rem,r_rem ; clear remainder and carry
450 ldi r_cnt,9 ; init loop counter
451 rjmp __udivmodqi4_ep ; jump to entry point
453 rol r_rem ; shift dividend into remainder
454 cp r_rem,r_arg2 ; compare remainder & divisor
455 brcs __udivmodqi4_ep ; remainder <= divisor
456 sub r_rem,r_arg2 ; restore remainder
458 rol r_arg1 ; shift dividend (with CARRY)
459 dec r_cnt ; decrement loop counter
460 brne __udivmodqi4_loop
461 com r_arg1 ; complement result
462 ; because C flag was complemented in loop
465 #endif /* defined (L_udivmodqi4) */
467 #if defined (L_divmodqi4)
471 bst r_arg1,7 ; store sign of dividend
472 mov __tmp_reg__,r_arg1
473 eor __tmp_reg__,r_arg2; r0.7 is sign of result
475 neg r_arg1 ; dividend negative : negate
477 neg r_arg2 ; divisor negative : negate
478 rcall __udivmodqi4 ; do the unsigned div/mod
480 neg r_rem ; correct remainder sign
483 neg r_arg1 ; correct result sign
487 #endif /* defined (L_divmodqi4) */
495 /*******************************************************
496 Division 16 / 16 => (result + remainder)
497 *******************************************************/
498 #define r_remL r26 /* remainder Low */
499 #define r_remH r27 /* remainder High */
501 /* return: remainder */
502 #define r_arg1L r24 /* dividend Low */
503 #define r_arg1H r25 /* dividend High */
505 /* return: quotient */
506 #define r_arg2L r22 /* divisor Low */
507 #define r_arg2H r23 /* divisor High */
509 #define r_cnt r21 /* loop count */
511 #if defined (L_udivmodhi4)
516 sub r_remH,r_remH ; clear remainder and carry
517 ldi r_cnt,17 ; init loop counter
518 rjmp __udivmodhi4_ep ; jump to entry point
520 rol r_remL ; shift dividend into remainder
522 cp r_remL,r_arg2L ; compare remainder & divisor
524 brcs __udivmodhi4_ep ; remainder < divisor
525 sub r_remL,r_arg2L ; restore remainder
528 rol r_arg1L ; shift dividend (with CARRY)
530 dec r_cnt ; decrement loop counter
531 brne __udivmodhi4_loop
534 ; div/mod results to return registers, as for the div() function
535 mov_l r_arg2L, r_arg1L ; quotient
536 mov_h r_arg2H, r_arg1H
537 mov_l r_arg1L, r_remL ; remainder
538 mov_h r_arg1H, r_remH
541 #endif /* defined (L_udivmodhi4) */
543 #if defined (L_divmodhi4)
549 bst r_arg1H,7 ; store sign of dividend
550 mov __tmp_reg__,r_arg1H
551 eor __tmp_reg__,r_arg2H ; r0.7 is sign of result
552 rcall __divmodhi4_neg1 ; dividend negative : negate
554 rcall __divmodhi4_neg2 ; divisor negative : negate
555 rcall __udivmodhi4 ; do the unsigned div/mod
556 rcall __divmodhi4_neg1 ; correct remainder sign
558 brpl __divmodhi4_exit
561 neg r_arg2L ; correct divisor/result sign
566 brtc __divmodhi4_exit
568 neg r_arg1L ; correct dividend/remainder sign
572 #endif /* defined (L_divmodhi4) */
585 /*******************************************************
586 Division 32 / 32 => (result + remainder)
587 *******************************************************/
588 #define r_remHH r31 /* remainder High */
591 #define r_remL r26 /* remainder Low */
593 /* return: remainder */
594 #define r_arg1HH r25 /* dividend High */
597 #define r_arg1L r22 /* dividend Low */
599 /* return: quotient */
600 #define r_arg2HH r21 /* divisor High */
603 #define r_arg2L r18 /* divisor Low */
605 #define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
607 #if defined (L_udivmodsi4)
611 ldi r_remL, 33 ; init loop counter
614 sub r_remH,r_remH ; clear remainder and carry
615 mov_l r_remHL, r_remL
616 mov_h r_remHH, r_remH
617 rjmp __udivmodsi4_ep ; jump to entry point
619 rol r_remL ; shift dividend into remainder
623 cp r_remL,r_arg2L ; compare remainder & divisor
627 brcs __udivmodsi4_ep ; remainder <= divisor
628 sub r_remL,r_arg2L ; restore remainder
633 rol r_arg1L ; shift dividend (with CARRY)
637 dec r_cnt ; decrement loop counter
638 brne __udivmodsi4_loop
639 ; __zero_reg__ now restored (r_cnt == 0)
644 ; div/mod results to return registers, as for the ldiv() function
645 mov_l r_arg2L, r_arg1L ; quotient
646 mov_h r_arg2H, r_arg1H
647 mov_l r_arg2HL, r_arg1HL
648 mov_h r_arg2HH, r_arg1HH
649 mov_l r_arg1L, r_remL ; remainder
650 mov_h r_arg1H, r_remH
651 mov_l r_arg1HL, r_remHL
652 mov_h r_arg1HH, r_remHH
655 #endif /* defined (L_udivmodsi4) */
657 #if defined (L_divmodsi4)
661 bst r_arg1HH,7 ; store sign of dividend
662 mov __tmp_reg__,r_arg1HH
663 eor __tmp_reg__,r_arg2HH ; r0.7 is sign of result
664 rcall __divmodsi4_neg1 ; dividend negative : negate
666 rcall __divmodsi4_neg2 ; divisor negative : negate
667 rcall __udivmodsi4 ; do the unsigned div/mod
668 rcall __divmodsi4_neg1 ; correct remainder sign
670 brcc __divmodsi4_exit
675 neg r_arg2L ; correct divisor/quotient sign
682 brtc __divmodsi4_exit
686 neg r_arg1L ; correct dividend/remainder sign
692 #endif /* defined (L_divmodsi4) */
694 /**********************************
695 * This is a prologue subroutine
696 **********************************/
697 #if defined (L_prologue)
699 .global __prologue_saves__
700 .func __prologue_saves__
724 in __tmp_reg__,__SREG__
727 out __SREG__,__tmp_reg__
729 #if defined (__AVR_HAVE_EIJMP_EICALL__)
736 #endif /* defined (L_prologue) */
739 * This is an epilogue subroutine
741 #if defined (L_epilogue)
743 .global __epilogue_restores__
744 .func __epilogue_restores__
745 __epilogue_restores__:
766 in __tmp_reg__,__SREG__
769 out __SREG__,__tmp_reg__
775 #endif /* defined (L_epilogue) */
778 .section .fini9,"ax",@progbits
786 /* Code from .fini8 ... .fini1 sections inserted by ld script. */
788 .section .fini0,"ax",@progbits
792 #endif /* defined (L_exit) */
800 #endif /* defined (L_cleanup) */
803 .global __tablejump2__
808 .global __tablejump__
810 #if defined (__AVR_HAVE_LPMX__)
815 #if defined (__AVR_HAVE_EIJMP_EICALL__)
827 #if defined (__AVR_HAVE_EIJMP_EICALL__)
833 #endif /* defined (L_tablejump) */
836 .section .init4,"ax",@progbits
837 .global __do_copy_data
839 #if defined(__AVR_HAVE_ELPMX__)
840 ldi r17, hi8(__data_end)
841 ldi r26, lo8(__data_start)
842 ldi r27, hi8(__data_start)
843 ldi r30, lo8(__data_load_start)
844 ldi r31, hi8(__data_load_start)
845 ldi r16, hh8(__data_load_start)
847 rjmp .L__do_copy_data_start
848 .L__do_copy_data_loop:
851 .L__do_copy_data_start:
852 cpi r26, lo8(__data_end)
854 brne .L__do_copy_data_loop
855 #elif !defined(__AVR_HAVE_ELPMX__) && defined(__AVR_HAVE_ELPM__)
856 ldi r17, hi8(__data_end)
857 ldi r26, lo8(__data_start)
858 ldi r27, hi8(__data_start)
859 ldi r30, lo8(__data_load_start)
860 ldi r31, hi8(__data_load_start)
861 ldi r16, hh8(__data_load_start - 0x10000)
862 .L__do_copy_data_carry:
865 rjmp .L__do_copy_data_start
866 .L__do_copy_data_loop:
870 brcs .L__do_copy_data_carry
871 .L__do_copy_data_start:
872 cpi r26, lo8(__data_end)
874 brne .L__do_copy_data_loop
875 #elif !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__)
876 ldi r17, hi8(__data_end)
877 ldi r26, lo8(__data_start)
878 ldi r27, hi8(__data_start)
879 ldi r30, lo8(__data_load_start)
880 ldi r31, hi8(__data_load_start)
881 rjmp .L__do_copy_data_start
882 .L__do_copy_data_loop:
883 #if defined (__AVR_HAVE_LPMX__)
890 .L__do_copy_data_start:
891 cpi r26, lo8(__data_end)
893 brne .L__do_copy_data_loop
894 #endif /* !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__) */
895 #endif /* L_copy_data */
897 /* __do_clear_bss is only necessary if there is anything in .bss section. */
900 .section .init4,"ax",@progbits
901 .global __do_clear_bss
903 ldi r17, hi8(__bss_end)
904 ldi r26, lo8(__bss_start)
905 ldi r27, hi8(__bss_start)
906 rjmp .do_clear_bss_start
910 cpi r26, lo8(__bss_end)
912 brne .do_clear_bss_loop
913 #endif /* L_clear_bss */
915 /* __do_global_ctors and __do_global_dtors are only necessary
916 if there are any constructors/destructors. */
919 .section .init6,"ax",@progbits
920 .global __do_global_ctors
921 #if defined(__AVR_HAVE_RAMPZ__)
923 ldi r17, hi8(__ctors_start)
924 ldi r28, lo8(__ctors_end)
925 ldi r29, hi8(__ctors_end)
926 ldi r16, hh8(__ctors_end)
927 rjmp .L__do_global_ctors_start
928 .L__do_global_ctors_loop:
930 sbc r16, __zero_reg__
934 XCALL __tablejump_elpm__
935 .L__do_global_ctors_start:
936 cpi r28, lo8(__ctors_start)
938 ldi r24, hh8(__ctors_start)
940 brne .L__do_global_ctors_loop
943 ldi r17, hi8(__ctors_start)
944 ldi r28, lo8(__ctors_end)
945 ldi r29, hi8(__ctors_end)
946 rjmp .L__do_global_ctors_start
947 .L__do_global_ctors_loop:
952 .L__do_global_ctors_start:
953 cpi r28, lo8(__ctors_start)
955 brne .L__do_global_ctors_loop
956 #endif /* defined(__AVR_HAVE_RAMPZ__) */
960 .section .fini6,"ax",@progbits
961 .global __do_global_dtors
962 #if defined(__AVR_HAVE_RAMPZ__)
964 ldi r17, hi8(__dtors_end)
965 ldi r28, lo8(__dtors_start)
966 ldi r29, hi8(__dtors_start)
967 ldi r16, hh8(__dtors_start)
968 rjmp .L__do_global_dtors_start
969 .L__do_global_dtors_loop:
971 sbc r16, __zero_reg__
975 XCALL __tablejump_elpm__
976 .L__do_global_dtors_start:
977 cpi r28, lo8(__dtors_end)
979 ldi r24, hh8(__dtors_end)
981 brne .L__do_global_dtors_loop
984 ldi r17, hi8(__dtors_end)
985 ldi r28, lo8(__dtors_start)
986 ldi r29, hi8(__dtors_start)
987 rjmp .L__do_global_dtors_start
988 .L__do_global_dtors_loop:
993 .L__do_global_dtors_start:
994 cpi r28, lo8(__dtors_end)
996 brne .L__do_global_dtors_loop
997 #endif /* defined(__AVR_HAVE_RAMPZ__) */
1000 #ifdef L_tablejump_elpm
1001 .global __tablejump_elpm__
1002 .func __tablejump_elpm__
1004 #if defined (__AVR_HAVE_ELPM__)
1005 #if defined (__AVR_HAVE_LPMX__)
1006 elpm __tmp_reg__, Z+
1008 mov r30, __tmp_reg__
1009 #if defined (__AVR_HAVE_EIJMP_EICALL__)
1021 #if defined (__AVR_HAVE_EIJMP_EICALL__)
1026 #endif /* defined (__AVR_HAVE_ELPM__) */
1028 #endif /* defined (L_tablejump_elpm) */
1031 /**********************************
1032 * Find first set Bit (ffs)
1033 **********************************/
1035 #if defined (L_ffssi2)
1036 ;; find first set bit
1037 ;; r25:r24 = ffs32 (r25:r22)
1038 ;; clobbers: r22, r26
1056 #endif /* defined (L_ffssi2) */
1058 #if defined (L_ffshi2)
1059 ;; find first set bit
1060 ;; r25:r24 = ffs16 (r25:r24)
1064 #ifdef __AVR_HAVE_JMP_CALL__
1065 ;; Some cores have problem skipping 2-word instruction
1069 cpse r24, __zero_reg__
1070 #endif /* __AVR_HAVE_JMP_CALL__ */
1071 1: XJMP __loop_ffsqi2
1077 #endif /* defined (L_ffshi2) */
1079 #if defined (L_loop_ffsqi2)
1080 ;; Helper for ffshi2, ffssi2
1081 ;; r25:r24 = r26 + zero_extend16 (ffs8(r24))
1092 #endif /* defined (L_loop_ffsqi2) */
1095 /**********************************
1096 * Count trailing Zeros (ctz)
1097 **********************************/
1099 #if defined (L_ctzsi2)
1100 ;; count trailing zeros
1101 ;; r25:r24 = ctz32 (r25:r22)
1102 ;; clobbers: r26, r22
1104 ;; Note that ctz(0) in undefined for GCC
1110 #endif /* defined (L_ctzsi2) */
1112 #if defined (L_ctzhi2)
1113 ;; count trailing zeros
1114 ;; r25:r24 = ctz16 (r25:r24)
1117 ;; Note that ctz(0) in undefined for GCC
1123 #endif /* defined (L_ctzhi2) */
1126 /**********************************
1127 * Count leading Zeros (clz)
1128 **********************************/
1130 #if defined (L_clzdi2)
1131 ;; count leading zeros
1132 ;; r25:r24 = clz64 (r25:r18)
1133 ;; clobbers: r22, r23, r26
1146 #endif /* defined (L_clzdi2) */
1148 #if defined (L_clzsi2)
1149 ;; count leading zeros
1150 ;; r25:r24 = clz32 (r25:r22)
1162 #endif /* defined (L_clzsi2) */
1164 #if defined (L_clzhi2)
1165 ;; count leading zeros
1166 ;; r25:r24 = clz16 (r25:r24)
1188 #endif /* defined (L_clzhi2) */
1191 /**********************************
1193 **********************************/
1195 #if defined (L_paritydi2)
1196 ;; r25:r24 = parity64 (r25:r18)
1197 ;; clobbers: __tmp_reg__
1205 #endif /* defined (L_paritydi2) */
1207 #if defined (L_paritysi2)
1208 ;; r25:r24 = parity32 (r25:r22)
1209 ;; clobbers: __tmp_reg__
1215 #endif /* defined (L_paritysi2) */
1217 #if defined (L_parityhi2)
1218 ;; r25:r24 = parity16 (r25:r24)
1219 ;; clobbers: __tmp_reg__
1225 ;; r25:r24 = parity8 (r24)
1226 ;; clobbers: __tmp_reg__
1228 ;; parity is in r24[0..7]
1229 mov __tmp_reg__, r24
1231 eor r24, __tmp_reg__
1232 ;; parity is in r24[0..3]
1236 ;; parity is in r24[0,3]
1239 ;; parity is in r24[0]
1244 #endif /* defined (L_parityhi2) */
1247 /**********************************
1249 **********************************/
1251 #if defined (L_popcounthi2)
1253 ;; r25:r24 = popcount16 (r25:r24)
1254 ;; clobbers: __tmp_reg__
1264 DEFUN __popcounthi2_tail
1266 add r24, __tmp_reg__
1268 ENDF __popcounthi2_tail
1269 #endif /* defined (L_popcounthi2) */
1271 #if defined (L_popcountsi2)
1273 ;; r25:r24 = popcount32 (r25:r22)
1274 ;; clobbers: __tmp_reg__
1281 XJMP __popcounthi2_tail
1283 #endif /* defined (L_popcountsi2) */
1285 #if defined (L_popcountdi2)
1287 ;; r25:r24 = popcount64 (r25:r18)
1288 ;; clobbers: r22, r23, __tmp_reg__
1297 XJMP __popcounthi2_tail
1299 #endif /* defined (L_popcountdi2) */
1301 #if defined (L_popcountqi2)
1303 ;; r24 = popcount8 (r24)
1304 ;; clobbers: __tmp_reg__
1306 mov __tmp_reg__, r24
1310 adc r24, __zero_reg__
1312 adc r24, __zero_reg__
1314 adc r24, __zero_reg__
1316 adc r24, __zero_reg__
1318 adc r24, __zero_reg__
1320 adc r24, __tmp_reg__
1323 #endif /* defined (L_popcountqi2) */
1326 /**********************************
1328 **********************************/
1330 ;; swap two registers with different register number
1337 #if defined (L_bswapsi2)
1339 ;; r25:r22 = bswap32 (r25:r22)
1345 #endif /* defined (L_bswapsi2) */
1347 #if defined (L_bswapdi2)
1349 ;; r25:r18 = bswap64 (r25:r18)
1357 #endif /* defined (L_bswapdi2) */
1360 /**********************************
1362 **********************************/
1364 #if defined (L_ashrdi3)
1365 ;; Arithmetic shift right
1366 ;; r25:r18 = ashr64 (r25:r18, r17:r16)
1384 #endif /* defined (L_ashrdi3) */
1386 #if defined (L_lshrdi3)
1387 ;; Logic shift right
1388 ;; r25:r18 = lshr64 (r25:r18, r17:r16)
1406 #endif /* defined (L_lshrdi3) */
1408 #if defined (L_ashldi3)
1410 ;; r25:r18 = ashl64 (r25:r18, r17:r16)
1428 #endif /* defined (L_ashldi3) */
1431 /***********************************************************/
1432 ;;; Softmul versions of FMUL, FMULS and FMULSU to implement
1433 ;;; __builtin_avr_fmul* if !AVR_HAVE_MUL
1434 /***********************************************************/
1440 #define A0 __tmp_reg__
1443 ;;; r23:r22 = fmuls (r24, r25) like in FMULS instruction
1444 ;;; Clobbers: r24, r25, __tmp_reg__
1446 ;; A0.7 = negate result?
1454 #endif /* L_fmuls */
1457 ;;; r23:r22 = fmulsu (r24, r25) like in FMULSU instruction
1458 ;;; Clobbers: r24, r25, __tmp_reg__
1460 ;; A0.7 = negate result?
1465 ;; Helper for __fmuls and __fmulsu
1470 #ifdef __AVR_HAVE_JMP_CALL__
1471 ;; Some cores have problem skipping 2-word instruction
1476 #endif /* __AVR_HAVE_JMP_CALL__ */
1479 ;; C = -C iff A0.7 = 1
1485 #endif /* L_fmulsu */
1489 ;;; r22:r23 = fmul (r24, r25) like in FMUL instruction
1490 ;;; Clobbers: r24, r25, __tmp_reg__
1497 ;; 1.0 = 0x80, so test for bit 7 of B to see if A must to be added to C.