1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
3 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #ifndef _Unwind_Find_FDE
30 #include "coretypes.h"
32 #include "libgcc_tm.h"
35 #define NO_BASE_OF_ENCODED_VALUE
36 #include "unwind-pe.h"
37 #include "unwind-dw2-fde.h"
41 /* The unseen_objects list contains objects that have been registered
42 but not yet categorized in any way. The seen_objects list has had
43 its pc_begin and count fields initialized at minimum, and is sorted
44 by decreasing value of pc_begin. */
45 static struct object *unseen_objects;
46 static struct object *seen_objects;
48 #ifdef __GTHREAD_MUTEX_INIT
49 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
51 static __gthread_mutex_t object_mutex;
54 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
56 init_object_mutex (void)
58 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
62 init_object_mutex_once (void)
64 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
65 __gthread_once (&once, init_object_mutex);
68 #define init_object_mutex_once()
71 /* Called from crtbegin.o to register the unwind info for an object. */
74 __register_frame_info_bases (const void *begin, struct object *ob,
75 void *tbase, void *dbase)
77 /* If .eh_frame is empty, don't register at all. */
78 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
81 ob->pc_begin = (void *)-1;
86 ob->s.b.encoding = DW_EH_PE_omit;
87 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
91 init_object_mutex_once ();
92 __gthread_mutex_lock (&object_mutex);
94 ob->next = unseen_objects;
97 __gthread_mutex_unlock (&object_mutex);
101 __register_frame_info (const void *begin, struct object *ob)
103 __register_frame_info_bases (begin, ob, 0, 0);
107 __register_frame (void *begin)
111 /* If .eh_frame is empty, don't register at all. */
112 if (*(uword *) begin == 0)
115 ob = malloc (sizeof (struct object));
116 __register_frame_info (begin, ob);
119 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
120 for different translation units. Called from the file generated by
124 __register_frame_info_table_bases (void *begin, struct object *ob,
125 void *tbase, void *dbase)
127 ob->pc_begin = (void *)-1;
132 ob->s.b.from_array = 1;
133 ob->s.b.encoding = DW_EH_PE_omit;
135 init_object_mutex_once ();
136 __gthread_mutex_lock (&object_mutex);
138 ob->next = unseen_objects;
141 __gthread_mutex_unlock (&object_mutex);
145 __register_frame_info_table (void *begin, struct object *ob)
147 __register_frame_info_table_bases (begin, ob, 0, 0);
151 __register_frame_table (void *begin)
153 struct object *ob = malloc (sizeof (struct object));
154 __register_frame_info_table (begin, ob);
157 /* Called from crtbegin.o to deregister the unwind info for an object. */
158 /* ??? Glibc has for a while now exported __register_frame_info and
159 __deregister_frame_info. If we call __register_frame_info_bases
160 from crtbegin (wherein it is declared weak), and this object does
161 not get pulled from libgcc.a for other reasons, then the
162 invocation of __deregister_frame_info will be resolved from glibc.
163 Since the registration did not happen there, we'll die.
165 Therefore, declare a new deregistration entry point that does the
166 exact same thing, but will resolve to the same library as
167 implements __register_frame_info_bases. */
170 __deregister_frame_info_bases (const void *begin)
173 struct object *ob = 0;
175 /* If .eh_frame is empty, we haven't registered. */
176 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
179 init_object_mutex_once ();
180 __gthread_mutex_lock (&object_mutex);
182 for (p = &unseen_objects; *p ; p = &(*p)->next)
183 if ((*p)->u.single == begin)
190 for (p = &seen_objects; *p ; p = &(*p)->next)
191 if ((*p)->s.b.sorted)
193 if ((*p)->u.sort->orig_data == begin)
203 if ((*p)->u.single == begin)
212 __gthread_mutex_unlock (&object_mutex);
218 __deregister_frame_info (const void *begin)
220 return __deregister_frame_info_bases (begin);
224 __deregister_frame (void *begin)
226 /* If .eh_frame is empty, we haven't registered. */
227 if (*(uword *) begin != 0)
228 free (__deregister_frame_info (begin));
232 /* Like base_of_encoded_value, but take the base from a struct object
233 instead of an _Unwind_Context. */
236 base_from_object (unsigned char encoding, struct object *ob)
238 if (encoding == DW_EH_PE_omit)
241 switch (encoding & 0x70)
243 case DW_EH_PE_absptr:
245 case DW_EH_PE_aligned:
248 case DW_EH_PE_textrel:
249 return (_Unwind_Ptr) ob->tbase;
250 case DW_EH_PE_datarel:
251 return (_Unwind_Ptr) ob->dbase;
257 /* Return the FDE pointer encoding from the CIE. */
258 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
261 get_cie_encoding (const struct dwarf_cie *cie)
263 const unsigned char *aug, *p;
268 aug = cie->augmentation;
269 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
270 if (__builtin_expect (cie->version >= 4, 0))
272 if (p[0] != sizeof (void *) || p[1] != 0)
273 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
274 address sizes or segment selectors. */
275 p += 2; /* Skip address size and segment size. */
279 return DW_EH_PE_absptr;
281 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
282 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
283 if (cie->version == 1) /* Skip return address column. */
286 p = read_uleb128 (p, &utmp);
288 aug++; /* Skip 'z' */
289 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
292 /* This is what we're looking for. */
295 /* Personality encoding and pointer. */
296 else if (*aug == 'P')
298 /* ??? Avoid dereferencing indirect pointers, since we're
299 faking the base address. Gotta keep DW_EH_PE_aligned
301 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
304 else if (*aug == 'L')
306 /* Otherwise end of string, or unknown augmentation. */
308 return DW_EH_PE_absptr;
314 get_fde_encoding (const struct dwarf_fde *f)
316 return get_cie_encoding (get_cie (f));
320 /* Sorting an array of FDEs by address.
321 (Ideally we would have the linker sort the FDEs so we don't have to do
322 it at run time. But the linkers are not yet prepared for this.) */
324 /* Comparison routines. Three variants of increasing complexity. */
327 fde_unencoded_compare (struct object *ob __attribute__((unused)),
328 const fde *x, const fde *y)
330 _Unwind_Ptr x_ptr, y_ptr;
331 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
332 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
342 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
344 _Unwind_Ptr base, x_ptr, y_ptr;
346 base = base_from_object (ob->s.b.encoding, ob);
347 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
348 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
358 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
360 int x_encoding, y_encoding;
361 _Unwind_Ptr x_ptr, y_ptr;
363 x_encoding = get_fde_encoding (x);
364 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
365 x->pc_begin, &x_ptr);
367 y_encoding = get_fde_encoding (y);
368 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
369 y->pc_begin, &y_ptr);
378 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
381 /* This is a special mix of insertion sort and heap sort, optimized for
382 the data sets that actually occur. They look like
383 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
384 I.e. a linearly increasing sequence (coming from functions in the text
385 section), with additionally a few unordered elements (coming from functions
386 in gnu_linkonce sections) whose values are higher than the values in the
387 surrounding linear sequence (but not necessarily higher than the values
388 at the end of the linear sequence!).
389 The worst-case total run time is O(N) + O(n log (n)), where N is the
390 total number of FDEs and n is the number of erratic ones. */
392 struct fde_accumulator
394 struct fde_vector *linear;
395 struct fde_vector *erratic;
399 start_fde_sort (struct fde_accumulator *accu, size_t count)
405 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
406 if ((accu->linear = malloc (size)))
408 accu->linear->count = 0;
409 if ((accu->erratic = malloc (size)))
410 accu->erratic->count = 0;
418 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
421 accu->linear->array[accu->linear->count++] = this_fde;
424 /* Split LINEAR into a linear sequence with low values and an erratic
425 sequence with high values, put the linear one (of longest possible
426 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
428 Because the longest linear sequence we are trying to locate within the
429 incoming LINEAR array can be interspersed with (high valued) erratic
430 entries. We construct a chain indicating the sequenced entries.
431 To avoid having to allocate this chain, we overlay it onto the space of
432 the ERRATIC array during construction. A final pass iterates over the
433 chain to determine what should be placed in the ERRATIC array, and
434 what is the linear sequence. This overlay is safe from aliasing. */
437 fde_split (struct object *ob, fde_compare_t fde_compare,
438 struct fde_vector *linear, struct fde_vector *erratic)
440 static const fde *marker;
441 size_t count = linear->count;
442 const fde *const *chain_end = ▮
445 /* This should optimize out, but it is wise to make sure this assumption
446 is correct. Should these have different sizes, we cannot cast between
447 them and the overlaying onto ERRATIC will not work. */
448 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
450 for (i = 0; i < count; i++)
452 const fde *const *probe;
454 for (probe = chain_end;
455 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
458 chain_end = (const fde *const*) erratic->array[probe - linear->array];
459 erratic->array[probe - linear->array] = NULL;
461 erratic->array[i] = (const fde *) chain_end;
462 chain_end = &linear->array[i];
465 /* Each entry in LINEAR which is part of the linear sequence we have
466 discovered will correspond to a non-NULL entry in the chain we built in
467 the ERRATIC array. */
468 for (i = j = k = 0; i < count; i++)
469 if (erratic->array[i])
470 linear->array[j++] = linear->array[i];
472 erratic->array[k++] = linear->array[i];
477 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
479 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
480 for the first (root) node; push it down to its rightful place. */
483 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
488 for (i = lo, j = 2*i+1;
492 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
495 if (fde_compare (ob, a[i], a[j]) < 0)
505 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
506 use a name that does not conflict. */
509 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
510 struct fde_vector *erratic)
512 /* For a description of this algorithm, see:
513 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
515 const fde ** a = erratic->array;
516 /* A portion of the array is called a "heap" if for all i>=0:
517 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
518 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
519 size_t n = erratic->count;
522 /* Expand our heap incrementally from the end of the array, heapifying
523 each resulting semi-heap as we go. After each step, a[m] is the top
525 for (m = n/2-1; m >= 0; --m)
526 frame_downheap (ob, fde_compare, a, m, n);
528 /* Shrink our heap incrementally from the end of the array, first
529 swapping out the largest element a[0] and then re-heapifying the
530 resulting semi-heap. After each step, a[0..m) is a heap. */
531 for (m = n-1; m >= 1; --m)
534 frame_downheap (ob, fde_compare, a, 0, m);
539 /* Merge V1 and V2, both sorted, and put the result into V1. */
541 fde_merge (struct object *ob, fde_compare_t fde_compare,
542 struct fde_vector *v1, struct fde_vector *v2)
554 fde2 = v2->array[i2];
555 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
557 v1->array[i1+i2] = v1->array[i1-1];
560 v1->array[i1+i2] = fde2;
563 v1->count += v2->count;
568 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
570 fde_compare_t fde_compare;
572 gcc_assert (!accu->linear || accu->linear->count == count);
574 if (ob->s.b.mixed_encoding)
575 fde_compare = fde_mixed_encoding_compare;
576 else if (ob->s.b.encoding == DW_EH_PE_absptr)
577 fde_compare = fde_unencoded_compare;
579 fde_compare = fde_single_encoding_compare;
583 fde_split (ob, fde_compare, accu->linear, accu->erratic);
584 gcc_assert (accu->linear->count + accu->erratic->count == count);
585 frame_heapsort (ob, fde_compare, accu->erratic);
586 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
587 free (accu->erratic);
591 /* We've not managed to malloc an erratic array,
592 so heap sort in the linear one. */
593 frame_heapsort (ob, fde_compare, accu->linear);
598 /* Update encoding, mixed_encoding, and pc_begin for OB for the
599 fde array beginning at THIS_FDE. Return the number of fdes
600 encountered along the way. */
603 classify_object_over_fdes (struct object *ob, const fde *this_fde)
605 const struct dwarf_cie *last_cie = 0;
607 int encoding = DW_EH_PE_absptr;
608 _Unwind_Ptr base = 0;
610 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
612 const struct dwarf_cie *this_cie;
613 _Unwind_Ptr mask, pc_begin;
616 if (this_fde->CIE_delta == 0)
619 /* Determine the encoding for this FDE. Note mixed encoded
620 objects for later. */
621 this_cie = get_cie (this_fde);
622 if (this_cie != last_cie)
625 encoding = get_cie_encoding (this_cie);
626 if (encoding == DW_EH_PE_omit)
628 base = base_from_object (encoding, ob);
629 if (ob->s.b.encoding == DW_EH_PE_omit)
630 ob->s.b.encoding = encoding;
631 else if (ob->s.b.encoding != encoding)
632 ob->s.b.mixed_encoding = 1;
635 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
638 /* Take care to ignore link-once functions that were removed.
639 In these cases, the function address will be NULL, but if
640 the encoding is smaller than a pointer a true NULL may not
641 be representable. Assume 0 in the representable bits is NULL. */
642 mask = size_of_encoded_value (encoding);
643 if (mask < sizeof (void *))
644 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
648 if ((pc_begin & mask) == 0)
652 if ((void *) pc_begin < ob->pc_begin)
653 ob->pc_begin = (void *) pc_begin;
660 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
662 const struct dwarf_cie *last_cie = 0;
663 int encoding = ob->s.b.encoding;
664 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
666 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
668 const struct dwarf_cie *this_cie;
671 if (this_fde->CIE_delta == 0)
674 if (ob->s.b.mixed_encoding)
676 /* Determine the encoding for this FDE. Note mixed encoded
677 objects for later. */
678 this_cie = get_cie (this_fde);
679 if (this_cie != last_cie)
682 encoding = get_cie_encoding (this_cie);
683 base = base_from_object (encoding, ob);
687 if (encoding == DW_EH_PE_absptr)
690 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
696 _Unwind_Ptr pc_begin, mask;
698 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
701 /* Take care to ignore link-once functions that were removed.
702 In these cases, the function address will be NULL, but if
703 the encoding is smaller than a pointer a true NULL may not
704 be representable. Assume 0 in the representable bits is NULL. */
705 mask = size_of_encoded_value (encoding);
706 if (mask < sizeof (void *))
707 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
711 if ((pc_begin & mask) == 0)
715 fde_insert (accu, this_fde);
719 /* Set up a sorted array of pointers to FDEs for a loaded object. We
720 count up the entries before allocating the array because it's likely to
721 be faster. We can be called multiple times, should we have failed to
722 allocate a sorted fde array on a previous occasion. */
725 init_object (struct object* ob)
727 struct fde_accumulator accu;
730 count = ob->s.b.count;
733 if (ob->s.b.from_array)
735 fde **p = ob->u.array;
736 for (count = 0; *p; ++p)
738 size_t cur_count = classify_object_over_fdes (ob, *p);
739 if (cur_count == (size_t) -1)
746 count = classify_object_over_fdes (ob, ob->u.single);
747 if (count == (size_t) -1)
749 static const fde terminator;
752 ob->s.b.encoding = DW_EH_PE_omit;
753 ob->u.single = &terminator;
758 /* The count field we have in the main struct object is somewhat
759 limited, but should suffice for virtually all cases. If the
760 counted value doesn't fit, re-write a zero. The worst that
761 happens is that we re-count next time -- admittedly non-trivial
762 in that this implies some 2M fdes, but at least we function. */
763 ob->s.b.count = count;
764 if (ob->s.b.count != count)
768 if (!start_fde_sort (&accu, count))
771 if (ob->s.b.from_array)
774 for (p = ob->u.array; *p; ++p)
775 add_fdes (ob, &accu, *p);
778 add_fdes (ob, &accu, ob->u.single);
780 end_fde_sort (ob, &accu, count);
782 /* Save the original fde pointer, since this is the key by which the
783 DSO will deregister the object. */
784 accu.linear->orig_data = ob->u.single;
785 ob->u.sort = accu.linear;
790 /* A linear search through a set of FDEs for the given PC. This is
791 used when there was insufficient memory to allocate and sort an
795 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
797 const struct dwarf_cie *last_cie = 0;
798 int encoding = ob->s.b.encoding;
799 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
801 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
803 const struct dwarf_cie *this_cie;
804 _Unwind_Ptr pc_begin, pc_range;
807 if (this_fde->CIE_delta == 0)
810 if (ob->s.b.mixed_encoding)
812 /* Determine the encoding for this FDE. Note mixed encoded
813 objects for later. */
814 this_cie = get_cie (this_fde);
815 if (this_cie != last_cie)
818 encoding = get_cie_encoding (this_cie);
819 base = base_from_object (encoding, ob);
823 if (encoding == DW_EH_PE_absptr)
825 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
826 pc_begin = pc_array[0];
827 pc_range = pc_array[1];
834 const unsigned char *p;
836 p = read_encoded_value_with_base (encoding, base,
837 this_fde->pc_begin, &pc_begin);
838 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
840 /* Take care to ignore link-once functions that were removed.
841 In these cases, the function address will be NULL, but if
842 the encoding is smaller than a pointer a true NULL may not
843 be representable. Assume 0 in the representable bits is NULL. */
844 mask = size_of_encoded_value (encoding);
845 if (mask < sizeof (void *))
846 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
850 if ((pc_begin & mask) == 0)
854 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
861 /* Binary search for an FDE containing the given PC. Here are three
862 implementations of increasing complexity. */
864 static inline const fde *
865 binary_search_unencoded_fdes (struct object *ob, void *pc)
867 struct fde_vector *vec = ob->u.sort;
870 for (lo = 0, hi = vec->count; lo < hi; )
872 size_t i = (lo + hi) / 2;
873 const fde *const f = vec->array[i];
876 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
877 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
881 else if (pc >= pc_begin + pc_range)
890 static inline const fde *
891 binary_search_single_encoding_fdes (struct object *ob, void *pc)
893 struct fde_vector *vec = ob->u.sort;
894 int encoding = ob->s.b.encoding;
895 _Unwind_Ptr base = base_from_object (encoding, ob);
898 for (lo = 0, hi = vec->count; lo < hi; )
900 size_t i = (lo + hi) / 2;
901 const fde *f = vec->array[i];
902 _Unwind_Ptr pc_begin, pc_range;
903 const unsigned char *p;
905 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
907 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
909 if ((_Unwind_Ptr) pc < pc_begin)
911 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
920 static inline const fde *
921 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
923 struct fde_vector *vec = ob->u.sort;
926 for (lo = 0, hi = vec->count; lo < hi; )
928 size_t i = (lo + hi) / 2;
929 const fde *f = vec->array[i];
930 _Unwind_Ptr pc_begin, pc_range;
931 const unsigned char *p;
934 encoding = get_fde_encoding (f);
935 p = read_encoded_value_with_base (encoding,
936 base_from_object (encoding, ob),
937 f->pc_begin, &pc_begin);
938 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
940 if ((_Unwind_Ptr) pc < pc_begin)
942 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
952 search_object (struct object* ob, void *pc)
954 /* If the data hasn't been sorted, try to do this now. We may have
955 more memory available than last time we tried. */
956 if (! ob->s.b.sorted)
960 /* Despite the above comment, the normal reason to get here is
961 that we've not processed this object before. A quick range
962 check is in order. */
963 if (pc < ob->pc_begin)
969 if (ob->s.b.mixed_encoding)
970 return binary_search_mixed_encoding_fdes (ob, pc);
971 else if (ob->s.b.encoding == DW_EH_PE_absptr)
972 return binary_search_unencoded_fdes (ob, pc);
974 return binary_search_single_encoding_fdes (ob, pc);
978 /* Long slow laborious linear search, cos we've no memory. */
979 if (ob->s.b.from_array)
982 for (p = ob->u.array; *p ; p++)
984 const fde *f = linear_search_fdes (ob, *p, pc);
991 return linear_search_fdes (ob, ob->u.single, pc);
996 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1001 init_object_mutex_once ();
1002 __gthread_mutex_lock (&object_mutex);
1004 /* Linear search through the classified objects, to find the one
1005 containing the pc. Note that pc_begin is sorted descending, and
1006 we expect objects to be non-overlapping. */
1007 for (ob = seen_objects; ob; ob = ob->next)
1008 if (pc >= ob->pc_begin)
1010 f = search_object (ob, pc);
1016 /* Classify and search the objects we've not yet processed. */
1017 while ((ob = unseen_objects))
1021 unseen_objects = ob->next;
1022 f = search_object (ob, pc);
1024 /* Insert the object into the classified list. */
1025 for (p = &seen_objects; *p ; p = &(*p)->next)
1026 if ((*p)->pc_begin < ob->pc_begin)
1036 __gthread_mutex_unlock (&object_mutex);
1043 bases->tbase = ob->tbase;
1044 bases->dbase = ob->dbase;
1046 encoding = ob->s.b.encoding;
1047 if (ob->s.b.mixed_encoding)
1048 encoding = get_fde_encoding (f);
1049 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1050 f->pc_begin, &func);
1051 bases->func = (void *) func;