1 /* ARM EABI compliant unwinding routines.
2 Copyright (C) 2004, 2005, 2009 Free Software Foundation, Inc.
3 Contributed by Paul Brook
5 This file is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published by the
7 Free Software Foundation; either version 3, or (at your option) any
10 This file is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 General Public License for more details.
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
26 /* We add a prototype for abort here to avoid creating a dependency on
28 extern void abort (void);
30 /* Definitions for C++ runtime support routines. We make these weak
31 declarations to avoid pulling in libsupc++ unnecessarily. */
32 typedef unsigned char bool;
34 typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */
35 enum __cxa_type_match_result
39 ctm_succeeded_with_ptr_to_base = 2
42 void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp);
43 bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp);
44 enum __cxa_type_match_result __attribute__((weak)) __cxa_type_match
45 (_Unwind_Control_Block *ucbp, const type_info *rttip,
46 bool is_reference, void **matched_object);
48 _Unwind_Ptr __attribute__((weak))
49 __gnu_Unwind_Find_exidx (_Unwind_Ptr, int *);
57 #define EXIDX_CANTUNWIND 1
58 #define uint32_highbit (((_uw) 1) << 31)
60 #define UCB_FORCED_STOP_FN(ucbp) ((ucbp)->unwinder_cache.reserved1)
61 #define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2)
62 #define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3)
63 #define UCB_FORCED_STOP_ARG(ucbp) ((ucbp)->unwinder_cache.reserved4)
70 /* We use normal integer types here to avoid the compiler generating
71 coprocessor instructions. */
80 /* Always populated via VSTM, so no need for the "pad" field from
81 vfp_regs (which is used to store the format word for FSTMX). */
105 /* Unwind descriptors. */
119 /* The ABI specifies that the unwind routines may only use core registers,
120 except when actually manipulating coprocessor state. This allows
121 us to write one implementation that works on all platforms by
122 demand-saving coprocessor registers.
124 During unwinding we hold the coprocessor state in the actual hardware
125 registers and allocate demand-save areas for use during phase1
130 /* The first fields must be the same as a phase2_vrs. */
131 _uw demand_save_flags;
132 struct core_regs core;
133 _uw prev_sp; /* Only valid during forced unwinding. */
135 struct vfpv3_regs vfp_regs_16_to_31;
137 struct wmmxd_regs wmmxd;
138 struct wmmxc_regs wmmxc;
141 #define DEMAND_SAVE_VFP 1 /* VFP state has been saved if not set */
142 #define DEMAND_SAVE_VFP_D 2 /* VFP state is for FLDMD/FSTMD if set */
143 #define DEMAND_SAVE_VFP_V3 4 /* VFPv3 state for regs 16 .. 31 has
144 been saved if not set */
145 #define DEMAND_SAVE_WMMXD 8 /* iWMMXt data registers have been
147 #define DEMAND_SAVE_WMMXC 16 /* iWMMXt control registers have been
150 /* This must match the structure created by the assembly wrappers. */
153 _uw demand_save_flags;
154 struct core_regs core;
158 /* An exception index table entry. */
160 typedef struct __EIT_entry
166 /* Assembly helper functions. */
168 /* Restore core register state. Never returns. */
169 void __attribute__((noreturn)) restore_core_regs (struct core_regs *);
172 /* Coprocessor register state manipulation functions. */
174 /* Routines for FLDMX/FSTMX format... */
175 void __gnu_Unwind_Save_VFP (struct vfp_regs * p);
176 void __gnu_Unwind_Restore_VFP (struct vfp_regs * p);
177 void __gnu_Unwind_Save_WMMXD (struct wmmxd_regs * p);
178 void __gnu_Unwind_Restore_WMMXD (struct wmmxd_regs * p);
179 void __gnu_Unwind_Save_WMMXC (struct wmmxc_regs * p);
180 void __gnu_Unwind_Restore_WMMXC (struct wmmxc_regs * p);
182 /* ...and those for FLDMD/FSTMD format... */
183 void __gnu_Unwind_Save_VFP_D (struct vfp_regs * p);
184 void __gnu_Unwind_Restore_VFP_D (struct vfp_regs * p);
186 /* ...and those for VLDM/VSTM format, saving/restoring only registers
188 void __gnu_Unwind_Save_VFP_D_16_to_31 (struct vfpv3_regs * p);
189 void __gnu_Unwind_Restore_VFP_D_16_to_31 (struct vfpv3_regs * p);
191 /* Restore coprocessor state after phase1 unwinding. */
193 restore_non_core_regs (phase1_vrs * vrs)
195 if ((vrs->demand_save_flags & DEMAND_SAVE_VFP) == 0)
197 if (vrs->demand_save_flags & DEMAND_SAVE_VFP_D)
198 __gnu_Unwind_Restore_VFP_D (&vrs->vfp);
200 __gnu_Unwind_Restore_VFP (&vrs->vfp);
203 if ((vrs->demand_save_flags & DEMAND_SAVE_VFP_V3) == 0)
204 __gnu_Unwind_Restore_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31);
206 if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXD) == 0)
207 __gnu_Unwind_Restore_WMMXD (&vrs->wmmxd);
208 if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXC) == 0)
209 __gnu_Unwind_Restore_WMMXC (&vrs->wmmxc);
212 /* A better way to do this would probably be to compare the absolute address
213 with a segment relative relocation of the same symbol. */
215 extern int __text_start;
216 extern int __data_start;
218 /* The exception index table location. */
219 extern __EIT_entry __exidx_start;
220 extern __EIT_entry __exidx_end;
222 /* ABI defined personality routines. */
223 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr0 (_Unwind_State,
224 _Unwind_Control_Block *, _Unwind_Context *);// __attribute__((weak));
225 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr1 (_Unwind_State,
226 _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
227 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr2 (_Unwind_State,
228 _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
230 /* ABI defined routine to store a virtual register to memory. */
232 _Unwind_VRS_Result _Unwind_VRS_Get (_Unwind_Context *context,
233 _Unwind_VRS_RegClass regclass,
235 _Unwind_VRS_DataRepresentation representation,
238 phase1_vrs *vrs = (phase1_vrs *) context;
243 if (representation != _UVRSD_UINT32
245 return _UVRSR_FAILED;
246 *(_uw *) valuep = vrs->core.r[regno];
253 return _UVRSR_NOT_IMPLEMENTED;
256 return _UVRSR_FAILED;
261 /* ABI defined function to load a virtual register from memory. */
263 _Unwind_VRS_Result _Unwind_VRS_Set (_Unwind_Context *context,
264 _Unwind_VRS_RegClass regclass,
266 _Unwind_VRS_DataRepresentation representation,
269 phase1_vrs *vrs = (phase1_vrs *) context;
274 if (representation != _UVRSD_UINT32
276 return _UVRSR_FAILED;
278 vrs->core.r[regno] = *(_uw *) valuep;
285 return _UVRSR_NOT_IMPLEMENTED;
288 return _UVRSR_FAILED;
293 /* ABI defined function to pop registers off the stack. */
295 _Unwind_VRS_Result _Unwind_VRS_Pop (_Unwind_Context *context,
296 _Unwind_VRS_RegClass regclass,
298 _Unwind_VRS_DataRepresentation representation)
300 phase1_vrs *vrs = (phase1_vrs *) context;
310 if (representation != _UVRSD_UINT32)
311 return _UVRSR_FAILED;
313 mask = discriminator & 0xffff;
314 ptr = (_uw *) vrs->core.r[R_SP];
315 /* Pop the requested registers. */
316 for (i = 0; i < 16; i++)
319 vrs->core.r[i] = *(ptr++);
321 /* Writeback the stack pointer value if it wasn't restored. */
322 if ((mask & (1 << R_SP)) == 0)
323 vrs->core.r[R_SP] = (_uw) ptr;
329 _uw start = discriminator >> 16;
330 _uw count = discriminator & 0xffff;
332 struct vfpv3_regs tmp_16_to_31;
336 int num_vfpv3_regs = 0;
338 /* We use an approximation here by bounding _UVRSD_DOUBLE
339 register numbers at 32 always, since we can't detect if
340 VFPv3 isn't present (in such a case the upper limit is 16). */
341 if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE)
342 || start + count > (representation == _UVRSD_VFPX ? 16 : 32)
343 || (representation == _UVRSD_VFPX && start >= 16))
344 return _UVRSR_FAILED;
346 /* Check if we're being asked to pop VFPv3-only registers
347 (numbers 16 through 31). */
349 num_vfpv3_regs = count;
350 else if (start + count > 16)
351 num_vfpv3_regs = start + count - 16;
353 if (num_vfpv3_regs && representation != _UVRSD_DOUBLE)
354 return _UVRSR_FAILED;
356 /* Demand-save coprocessor registers for stage1. */
357 if (start < 16 && (vrs->demand_save_flags & DEMAND_SAVE_VFP))
359 vrs->demand_save_flags &= ~DEMAND_SAVE_VFP;
361 if (representation == _UVRSD_DOUBLE)
363 /* Save in FLDMD/FSTMD format. */
364 vrs->demand_save_flags |= DEMAND_SAVE_VFP_D;
365 __gnu_Unwind_Save_VFP_D (&vrs->vfp);
369 /* Save in FLDMX/FSTMX format. */
370 vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_D;
371 __gnu_Unwind_Save_VFP (&vrs->vfp);
375 if (num_vfpv3_regs > 0
376 && (vrs->demand_save_flags & DEMAND_SAVE_VFP_V3))
378 vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_V3;
379 __gnu_Unwind_Save_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31);
382 /* Restore the registers from the stack. Do this by saving the
383 current VFP registers to a memory area, moving the in-memory
384 values into that area, and restoring from the whole area.
385 For _UVRSD_VFPX we assume FSTMX standard format 1. */
386 if (representation == _UVRSD_VFPX)
387 __gnu_Unwind_Save_VFP (&tmp);
390 /* Save registers 0 .. 15 if required. */
392 __gnu_Unwind_Save_VFP_D (&tmp);
394 /* Save VFPv3 registers 16 .. 31 if required. */
396 __gnu_Unwind_Save_VFP_D_16_to_31 (&tmp_16_to_31);
399 /* Work out how many registers below register 16 need popping. */
400 tmp_count = num_vfpv3_regs > 0 ? 16 - start : count;
402 /* Copy registers below 16, if needed.
403 The stack address is only guaranteed to be word aligned, so
404 we can't use doubleword copies. */
405 sp = (_uw *) vrs->core.r[R_SP];
409 dest = (_uw *) &tmp.d[start];
414 /* Copy VFPv3 registers numbered >= 16, if needed. */
415 if (num_vfpv3_regs > 0)
417 /* num_vfpv3_regs is needed below, so copy it. */
418 int tmp_count_2 = num_vfpv3_regs * 2;
419 int vfpv3_start = start < 16 ? 16 : start;
421 dest = (_uw *) &tmp_16_to_31.d[vfpv3_start - 16];
422 while (tmp_count_2--)
426 /* Skip the format word space if using FLDMX/FSTMX format. */
427 if (representation == _UVRSD_VFPX)
430 /* Set the new stack pointer. */
431 vrs->core.r[R_SP] = (_uw) sp;
433 /* Reload the registers. */
434 if (representation == _UVRSD_VFPX)
435 __gnu_Unwind_Restore_VFP (&tmp);
438 /* Restore registers 0 .. 15 if required. */
440 __gnu_Unwind_Restore_VFP_D (&tmp);
442 /* Restore VFPv3 registers 16 .. 31 if required. */
443 if (num_vfpv3_regs > 0)
444 __gnu_Unwind_Restore_VFP_D_16_to_31 (&tmp_16_to_31);
450 return _UVRSR_NOT_IMPLEMENTED;
454 _uw start = discriminator >> 16;
455 _uw count = discriminator & 0xffff;
456 struct wmmxd_regs tmp;
460 if ((representation != _UVRSD_UINT64) || start + count > 16)
461 return _UVRSR_FAILED;
463 if (vrs->demand_save_flags & DEMAND_SAVE_WMMXD)
465 /* Demand-save resisters for stage1. */
466 vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXD;
467 __gnu_Unwind_Save_WMMXD (&vrs->wmmxd);
470 /* Restore the registers from the stack. Do this by saving the
471 current WMMXD registers to a memory area, moving the in-memory
472 values into that area, and restoring from the whole area. */
473 __gnu_Unwind_Save_WMMXD (&tmp);
475 /* The stack address is only guaranteed to be word aligned, so
476 we can't use doubleword copies. */
477 sp = (_uw *) vrs->core.r[R_SP];
478 dest = (_uw *) &tmp.wd[start];
483 /* Set the new stack pointer. */
484 vrs->core.r[R_SP] = (_uw) sp;
486 /* Reload the registers. */
487 __gnu_Unwind_Restore_WMMXD (&tmp);
494 struct wmmxc_regs tmp;
497 if ((representation != _UVRSD_UINT32) || discriminator > 16)
498 return _UVRSR_FAILED;
500 if (vrs->demand_save_flags & DEMAND_SAVE_WMMXC)
502 /* Demand-save resisters for stage1. */
503 vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXC;
504 __gnu_Unwind_Save_WMMXC (&vrs->wmmxc);
507 /* Restore the registers from the stack. Do this by saving the
508 current WMMXC registers to a memory area, moving the in-memory
509 values into that area, and restoring from the whole area. */
510 __gnu_Unwind_Save_WMMXC (&tmp);
512 sp = (_uw *) vrs->core.r[R_SP];
513 for (i = 0; i < 4; i++)
514 if (discriminator & (1 << i))
517 /* Set the new stack pointer. */
518 vrs->core.r[R_SP] = (_uw) sp;
520 /* Reload the registers. */
521 __gnu_Unwind_Restore_WMMXC (&tmp);
526 return _UVRSR_FAILED;
531 /* Core unwinding functions. */
533 /* Calculate the address encoded by a 31-bit self-relative offset at address
536 selfrel_offset31 (const _uw *p)
541 /* Sign extend to 32 bits. */
542 if (offset & (1 << 30))
545 offset &= ~(1u << 31);
547 return offset + (_uw) p;
551 /* Perform a binary search for RETURN_ADDRESS in TABLE. The table contains
554 static const __EIT_entry *
555 search_EIT_table (const __EIT_entry * table, int nrec, _uw return_address)
562 return (__EIT_entry *) 0;
569 n = (left + right) / 2;
570 this_fn = selfrel_offset31 (&table[n].fnoffset);
572 next_fn = selfrel_offset31 (&table[n + 1].fnoffset) - 1;
574 next_fn = (_uw)0 - 1;
576 if (return_address < this_fn)
579 return (__EIT_entry *) 0;
582 else if (return_address <= next_fn)
589 /* Find the exception index table eintry for the given address.
590 Fill in the relevant fields of the UCB.
591 Returns _URC_FAILURE if an error occurred, _URC_OK on success. */
593 static _Unwind_Reason_Code
594 get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address)
596 const __EIT_entry * eitp;
599 /* The return address is the address of the instruction following the
600 call instruction (plus one in thumb mode). If this was the last
601 instruction in the function the address will lie in the following
602 function. Subtract 2 from the address so that it points within the call
603 instruction itself. */
606 if (__gnu_Unwind_Find_exidx)
608 eitp = (const __EIT_entry *) __gnu_Unwind_Find_exidx (return_address,
612 UCB_PR_ADDR (ucbp) = 0;
618 eitp = &__exidx_start;
619 nrec = &__exidx_end - &__exidx_start;
622 eitp = search_EIT_table (eitp, nrec, return_address);
626 UCB_PR_ADDR (ucbp) = 0;
629 ucbp->pr_cache.fnstart = selfrel_offset31 (&eitp->fnoffset);
631 /* Can this frame be unwound at all? */
632 if (eitp->content == EXIDX_CANTUNWIND)
634 UCB_PR_ADDR (ucbp) = 0;
635 return _URC_END_OF_STACK;
638 /* Obtain the address of the "real" __EHT_Header word. */
640 if (eitp->content & uint32_highbit)
642 /* It is immediate data. */
643 ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
644 ucbp->pr_cache.additional = 1;
648 /* The low 31 bits of the content field are a self-relative
649 offset to an _Unwind_EHT_Entry structure. */
650 ucbp->pr_cache.ehtp =
651 (_Unwind_EHT_Header *) selfrel_offset31 (&eitp->content);
652 ucbp->pr_cache.additional = 0;
655 /* Discover the personality routine address. */
656 if (*ucbp->pr_cache.ehtp & (1u << 31))
658 /* One of the predefined standard routines. */
659 _uw idx = (*(_uw *) ucbp->pr_cache.ehtp >> 24) & 0xf;
661 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr0;
663 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr1;
665 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr2;
668 UCB_PR_ADDR (ucbp) = 0;
674 /* Execute region offset to PR */
675 UCB_PR_ADDR (ucbp) = selfrel_offset31 (ucbp->pr_cache.ehtp);
681 /* Perform phase2 unwinding. VRS is the initial virtual register state. */
683 static void __attribute__((noreturn))
684 unwind_phase2 (_Unwind_Control_Block * ucbp, phase2_vrs * vrs)
686 _Unwind_Reason_Code pr_result;
690 /* Find the entry for this routine. */
691 if (get_eit_entry (ucbp, vrs->core.r[R_PC]) != _URC_OK)
694 UCB_SAVED_CALLSITE_ADDR (ucbp) = vrs->core.r[R_PC];
696 /* Call the pr to decide what to do. */
697 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
698 (_US_UNWIND_FRAME_STARTING, ucbp, (_Unwind_Context *) vrs);
700 while (pr_result == _URC_CONTINUE_UNWIND);
702 if (pr_result != _URC_INSTALL_CONTEXT)
705 restore_core_regs (&vrs->core);
708 /* Perform phase2 forced unwinding. */
710 static _Unwind_Reason_Code
711 unwind_phase2_forced (_Unwind_Control_Block *ucbp, phase2_vrs *entry_vrs,
714 _Unwind_Stop_Fn stop_fn = (_Unwind_Stop_Fn) UCB_FORCED_STOP_FN (ucbp);
715 void *stop_arg = (void *)UCB_FORCED_STOP_ARG (ucbp);
716 _Unwind_Reason_Code pr_result = 0;
717 /* We use phase1_vrs here even though we do not demand save, for the
719 phase1_vrs saved_vrs, next_vrs;
721 /* Save the core registers. */
722 saved_vrs.core = entry_vrs->core;
723 /* We don't need to demand-save the non-core registers, because we
724 unwind in a single pass. */
725 saved_vrs.demand_save_flags = 0;
727 /* Unwind until we reach a propagation barrier. */
730 _Unwind_State action;
731 _Unwind_Reason_Code entry_code;
732 _Unwind_Reason_Code stop_code;
734 /* Find the entry for this routine. */
735 entry_code = get_eit_entry (ucbp, saved_vrs.core.r[R_PC]);
739 action = _US_UNWIND_FRAME_RESUME | _US_FORCE_UNWIND;
743 action = _US_UNWIND_FRAME_STARTING | _US_FORCE_UNWIND;
745 if (entry_code == _URC_OK)
747 UCB_SAVED_CALLSITE_ADDR (ucbp) = saved_vrs.core.r[R_PC];
749 next_vrs = saved_vrs;
751 /* Call the pr to decide what to do. */
752 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
753 (action, ucbp, (void *) &next_vrs);
755 saved_vrs.prev_sp = next_vrs.core.r[R_SP];
759 /* Treat any failure as the end of unwinding, to cope more
760 gracefully with missing EH information. Mixed EH and
761 non-EH within one object will usually result in failure,
762 because the .ARM.exidx tables do not indicate the end
763 of the code to which they apply; but mixed EH and non-EH
764 shared objects should return an unwind failure at the
765 entry of a non-EH shared object. */
766 action |= _US_END_OF_STACK;
768 saved_vrs.prev_sp = saved_vrs.core.r[R_SP];
771 stop_code = stop_fn (1, action, ucbp->exception_class, ucbp,
772 (void *)&saved_vrs, stop_arg);
773 if (stop_code != _URC_NO_REASON)
776 if (entry_code != _URC_OK)
779 saved_vrs = next_vrs;
781 while (pr_result == _URC_CONTINUE_UNWIND);
783 if (pr_result != _URC_INSTALL_CONTEXT)
785 /* Some sort of failure has occurred in the pr and probably the
786 pr returned _URC_FAILURE. */
790 restore_core_regs (&saved_vrs.core);
793 /* This is a very limited implementation of _Unwind_GetCFA. It returns
794 the stack pointer as it is about to be unwound, and is only valid
795 while calling the stop function during forced unwinding. If the
796 current personality routine result is going to run a cleanup, this
797 will not be the CFA; but when the frame is really unwound, it will
801 _Unwind_GetCFA (_Unwind_Context *context)
803 return ((phase1_vrs *) context)->prev_sp;
806 /* Perform phase1 unwinding. UCBP is the exception being thrown, and
807 entry_VRS is the register state on entry to _Unwind_RaiseException. */
810 __gnu_Unwind_RaiseException (_Unwind_Control_Block *, phase2_vrs *);
813 __gnu_Unwind_RaiseException (_Unwind_Control_Block * ucbp,
814 phase2_vrs * entry_vrs)
816 phase1_vrs saved_vrs;
817 _Unwind_Reason_Code pr_result;
819 /* Set the pc to the call site. */
820 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
822 /* Save the core registers. */
823 saved_vrs.core = entry_vrs->core;
824 /* Set demand-save flags. */
825 saved_vrs.demand_save_flags = ~(_uw) 0;
827 /* Unwind until we reach a propagation barrier. */
830 /* Find the entry for this routine. */
831 if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK)
834 /* Call the pr to decide what to do. */
835 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
836 (_US_VIRTUAL_UNWIND_FRAME, ucbp, (void *) &saved_vrs);
838 while (pr_result == _URC_CONTINUE_UNWIND);
840 /* We've unwound as far as we want to go, so restore the original
842 restore_non_core_regs (&saved_vrs);
843 if (pr_result != _URC_HANDLER_FOUND)
845 /* Some sort of failure has occurred in the pr and probably the
846 pr returned _URC_FAILURE. */
850 unwind_phase2 (ucbp, entry_vrs);
853 /* Resume unwinding after a cleanup has been run. UCBP is the exception
854 being thrown and ENTRY_VRS is the register state on entry to
857 __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *,
858 _Unwind_Stop_Fn, void *, phase2_vrs *);
861 __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *ucbp,
862 _Unwind_Stop_Fn stop_fn, void *stop_arg,
863 phase2_vrs *entry_vrs)
865 UCB_FORCED_STOP_FN (ucbp) = (_uw) stop_fn;
866 UCB_FORCED_STOP_ARG (ucbp) = (_uw) stop_arg;
868 /* Set the pc to the call site. */
869 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
871 return unwind_phase2_forced (ucbp, entry_vrs, 0);
875 __gnu_Unwind_Resume (_Unwind_Control_Block *, phase2_vrs *);
878 __gnu_Unwind_Resume (_Unwind_Control_Block * ucbp, phase2_vrs * entry_vrs)
880 _Unwind_Reason_Code pr_result;
882 /* Recover the saved address. */
883 entry_vrs->core.r[R_PC] = UCB_SAVED_CALLSITE_ADDR (ucbp);
885 if (UCB_FORCED_STOP_FN (ucbp))
887 unwind_phase2_forced (ucbp, entry_vrs, 1);
889 /* We can't return failure at this point. */
893 /* Call the cached PR. */
894 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
895 (_US_UNWIND_FRAME_RESUME, ucbp, (_Unwind_Context *) entry_vrs);
899 case _URC_INSTALL_CONTEXT:
900 /* Upload the registers to enter the landing pad. */
901 restore_core_regs (&entry_vrs->core);
903 case _URC_CONTINUE_UNWIND:
904 /* Continue unwinding the next frame. */
905 unwind_phase2 (ucbp, entry_vrs);
913 __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block *, phase2_vrs *);
916 __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block * ucbp,
917 phase2_vrs * entry_vrs)
919 if (!UCB_FORCED_STOP_FN (ucbp))
920 return __gnu_Unwind_RaiseException (ucbp, entry_vrs);
922 /* Set the pc to the call site. */
923 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
924 /* Continue unwinding the next frame. */
925 return unwind_phase2_forced (ucbp, entry_vrs, 0);
928 /* Clean up an exception object when unwinding is complete. */
930 _Unwind_Complete (_Unwind_Control_Block * ucbp __attribute__((unused)))
935 /* Get the _Unwind_Control_Block from an _Unwind_Context. */
937 static inline _Unwind_Control_Block *
938 unwind_UCB_from_context (_Unwind_Context * context)
940 return (_Unwind_Control_Block *) _Unwind_GetGR (context, R_IP);
944 /* Free an exception. */
947 _Unwind_DeleteException (_Unwind_Exception * exc)
949 if (exc->exception_cleanup)
950 (*exc->exception_cleanup) (_URC_FOREIGN_EXCEPTION_CAUGHT, exc);
954 /* Perform stack backtrace through unwind data. */
956 __gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument,
957 phase2_vrs * entry_vrs);
959 __gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument,
960 phase2_vrs * entry_vrs)
962 phase1_vrs saved_vrs;
963 _Unwind_Reason_Code code;
965 _Unwind_Control_Block ucb;
966 _Unwind_Control_Block *ucbp = &ucb;
968 /* Set the pc to the call site. */
969 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
971 /* Save the core registers. */
972 saved_vrs.core = entry_vrs->core;
973 /* Set demand-save flags. */
974 saved_vrs.demand_save_flags = ~(_uw) 0;
978 /* Find the entry for this routine. */
979 if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK)
985 /* The dwarf unwinder assumes the context structure holds things
986 like the function and LSDA pointers. The ARM implementation
987 caches these in the exception header (UCB). To avoid
988 rewriting everything we make the virtual IP register point at
990 _Unwind_SetGR((_Unwind_Context *)&saved_vrs, 12, (_Unwind_Ptr) ucbp);
992 /* Call trace function. */
993 if ((*trace) ((_Unwind_Context *) &saved_vrs, trace_argument)
1000 /* Call the pr to decide what to do. */
1001 code = ((personality_routine) UCB_PR_ADDR (ucbp))
1002 (_US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND,
1003 ucbp, (void *) &saved_vrs);
1005 while (code != _URC_END_OF_STACK
1006 && code != _URC_FAILURE);
1008 restore_non_core_regs (&saved_vrs);
1013 /* Common implementation for ARM ABI defined personality routines.
1014 ID is the index of the personality routine, other arguments are as defined
1015 by __aeabi_unwind_cpp_pr{0,1,2}. */
1017 static _Unwind_Reason_Code
1018 __gnu_unwind_pr_common (_Unwind_State state,
1019 _Unwind_Control_Block *ucbp,
1020 _Unwind_Context *context,
1023 __gnu_unwind_state uws;
1028 int phase2_call_unexpected_after_unwind = 0;
1030 int forced_unwind = state & _US_FORCE_UNWIND;
1032 state &= _US_ACTION_MASK;
1034 data = (_uw *) ucbp->pr_cache.ehtp;
1035 uws.data = *(data++);
1045 uws.words_left = (uws.data >> 16) & 0xff;
1048 data += uws.words_left;
1051 /* Restore the saved pointer. */
1052 if (state == _US_UNWIND_FRAME_RESUME)
1053 data = (_uw *) ucbp->cleanup_cache.bitpattern[0];
1055 if ((ucbp->pr_cache.additional & 1) == 0)
1057 /* Process descriptors. */
1065 len = ((EHT32 *) data)->length;
1066 offset = ((EHT32 *) data)->offset;
1071 len = ((EHT16 *) data)->length;
1072 offset = ((EHT16 *) data)->offset;
1076 fnstart = ucbp->pr_cache.fnstart + (offset & ~1);
1077 addr = _Unwind_GetGR (context, R_PC);
1078 in_range = (fnstart <= addr && addr < fnstart + (len & ~1));
1080 switch (((offset & 1) << 1) | (len & 1))
1084 if (state != _US_VIRTUAL_UNWIND_FRAME
1087 /* Cleanup in range, and we are running cleanups. */
1090 /* Landing pad address is 31-bit pc-relative offset. */
1091 lp = selfrel_offset31 (data);
1093 /* Save the exception data pointer. */
1094 ucbp->cleanup_cache.bitpattern[0] = (_uw) data;
1095 if (!__cxa_begin_cleanup (ucbp))
1096 return _URC_FAILURE;
1097 /* Setup the VRS to enter the landing pad. */
1098 _Unwind_SetGR (context, R_PC, lp);
1099 return _URC_INSTALL_CONTEXT;
1101 /* Cleanup not in range, or we are in stage 1. */
1106 /* Catch handler. */
1107 if (state == _US_VIRTUAL_UNWIND_FRAME)
1111 /* Check for a barrier. */
1113 bool is_reference = (data[0] & uint32_highbit) != 0;
1115 enum __cxa_type_match_result match_type;
1117 /* Check for no-throw areas. */
1118 if (data[1] == (_uw) -2)
1119 return _URC_FAILURE;
1121 /* The thrown object immediately follows the ECB. */
1122 matched = (void *)(ucbp + 1);
1123 if (data[1] != (_uw) -1)
1125 /* Match a catch specification. */
1126 rtti = _Unwind_decode_target2 ((_uw) &data[1]);
1127 match_type = __cxa_type_match (ucbp,
1133 match_type = ctm_succeeded;
1137 ucbp->barrier_cache.sp =
1138 _Unwind_GetGR (context, R_SP);
1139 // ctm_succeeded_with_ptr_to_base really
1140 // means _c_t_m indirected the pointer
1141 // object. We have to reconstruct the
1142 // additional pointer layer by using a temporary.
1143 if (match_type == ctm_succeeded_with_ptr_to_base)
1145 ucbp->barrier_cache.bitpattern[2]
1147 ucbp->barrier_cache.bitpattern[0]
1148 = (_uw) &ucbp->barrier_cache.bitpattern[2];
1151 ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
1152 ucbp->barrier_cache.bitpattern[1] = (_uw) data;
1153 return _URC_HANDLER_FOUND;
1156 /* Handler out of range, or not matched. */
1158 else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
1159 && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
1161 /* Matched a previous propagation barrier. */
1164 /* Setup for entry to the handler. */
1165 lp = selfrel_offset31 (data);
1166 _Unwind_SetGR (context, R_PC, lp);
1167 _Unwind_SetGR (context, 0, (_uw) ucbp);
1168 return _URC_INSTALL_CONTEXT;
1170 /* Catch handler not matched. Advance to the next descriptor. */
1175 rtti_count = data[0] & 0x7fffffff;
1176 /* Exception specification. */
1177 if (state == _US_VIRTUAL_UNWIND_FRAME)
1179 if (in_range && (!forced_unwind || !rtti_count))
1181 /* Match against the exception specification. */
1186 for (i = 0; i < rtti_count; i++)
1188 matched = (void *)(ucbp + 1);
1189 rtti = _Unwind_decode_target2 ((_uw) &data[i + 1]);
1190 if (__cxa_type_match (ucbp, (type_info *) rtti, 0,
1195 if (i == rtti_count)
1197 /* Exception does not match the spec. */
1198 ucbp->barrier_cache.sp =
1199 _Unwind_GetGR (context, R_SP);
1200 ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
1201 ucbp->barrier_cache.bitpattern[1] = (_uw) data;
1202 return _URC_HANDLER_FOUND;
1205 /* Handler out of range, or exception is permitted. */
1207 else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
1208 && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
1210 /* Matched a previous propagation barrier. */
1212 /* Record the RTTI list for __cxa_call_unexpected. */
1213 ucbp->barrier_cache.bitpattern[1] = rtti_count;
1214 ucbp->barrier_cache.bitpattern[2] = 0;
1215 ucbp->barrier_cache.bitpattern[3] = 4;
1216 ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1];
1218 if (data[0] & uint32_highbit)
1220 data += rtti_count + 1;
1221 /* Setup for entry to the handler. */
1222 lp = selfrel_offset31 (data);
1224 _Unwind_SetGR (context, R_PC, lp);
1225 _Unwind_SetGR (context, 0, (_uw) ucbp);
1226 return _URC_INSTALL_CONTEXT;
1229 phase2_call_unexpected_after_unwind = 1;
1231 if (data[0] & uint32_highbit)
1233 data += rtti_count + 1;
1237 /* Should never happen. */
1238 return _URC_FAILURE;
1240 /* Finished processing this descriptor. */
1244 if (__gnu_unwind_execute (context, &uws) != _URC_OK)
1245 return _URC_FAILURE;
1247 if (phase2_call_unexpected_after_unwind)
1249 /* Enter __cxa_unexpected as if called from the call site. */
1250 _Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC));
1251 _Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected);
1252 return _URC_INSTALL_CONTEXT;
1255 return _URC_CONTINUE_UNWIND;
1259 /* ABI defined personality routine entry points. */
1262 __aeabi_unwind_cpp_pr0 (_Unwind_State state,
1263 _Unwind_Control_Block *ucbp,
1264 _Unwind_Context *context)
1266 return __gnu_unwind_pr_common (state, ucbp, context, 0);
1270 __aeabi_unwind_cpp_pr1 (_Unwind_State state,
1271 _Unwind_Control_Block *ucbp,
1272 _Unwind_Context *context)
1274 return __gnu_unwind_pr_common (state, ucbp, context, 1);
1278 __aeabi_unwind_cpp_pr2 (_Unwind_State state,
1279 _Unwind_Control_Block *ucbp,
1280 _Unwind_Context *context)
1282 return __gnu_unwind_pr_common (state, ucbp, context, 2);