1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
21 GNU CC is distributed in the hope that it will be useful,
22 but WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 GNU General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with GNU CC; see the file COPYING. If not, write to
28 the Free Software Foundation, 59 Temple Place - Suite 330,
29 Boston, MA 02111-1307, USA. */
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
39 /* The unseen_objects list contains objects that have been registered
40 but not yet categorized in any way. The seen_objects list has had
41 it's pc_begin and count fields initialized at minimum, and is sorted
42 by decreasing value of pc_begin. */
43 static struct object *unseen_objects;
44 static struct object *seen_objects;
46 #ifdef __GTHREAD_MUTEX_INIT
47 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
49 static __gthread_mutex_t object_mutex;
52 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
54 init_object_mutex (void)
56 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
60 init_object_mutex_once (void)
62 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
63 __gthread_once (&once, init_object_mutex);
66 #define init_object_mutex_once()
69 /* Called from crtbegin.o to register the unwind info for an object. */
72 __register_frame_info_bases (void *begin, struct object *ob,
73 void *tbase, void *dbase)
75 ob->pc_begin = (void *)-1;
80 ob->s.b.encoding = DW_EH_PE_omit;
82 init_object_mutex_once ();
83 __gthread_mutex_lock (&object_mutex);
85 ob->next = unseen_objects;
88 __gthread_mutex_unlock (&object_mutex);
92 __register_frame_info (void *begin, struct object *ob)
94 __register_frame_info_bases (begin, ob, 0, 0);
98 __register_frame (void *begin)
100 struct object *ob = (struct object *) malloc (sizeof (struct object));
101 __register_frame_info (begin, ob);
104 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
105 for different translation units. Called from the file generated by
109 __register_frame_info_table_bases (void *begin, struct object *ob,
110 void *tbase, void *dbase)
112 ob->pc_begin = (void *)-1;
117 ob->s.b.from_array = 1;
118 ob->s.b.encoding = DW_EH_PE_omit;
120 init_object_mutex_once ();
121 __gthread_mutex_lock (&object_mutex);
123 ob->next = unseen_objects;
126 __gthread_mutex_unlock (&object_mutex);
130 __register_frame_info_table (void *begin, struct object *ob)
132 __register_frame_info_table_bases (begin, ob, 0, 0);
136 __register_frame_table (void *begin)
138 struct object *ob = (struct object *) malloc (sizeof (struct object));
139 __register_frame_info_table (begin, ob);
142 /* Called from crtbegin.o to deregister the unwind info for an object. */
145 __deregister_frame_info (void *begin)
148 struct object *ob = 0;
150 init_object_mutex_once ();
151 __gthread_mutex_lock (&object_mutex);
153 for (p = &unseen_objects; *p ; p = &(*p)->next)
154 if ((*p)->u.single == begin)
161 for (p = &seen_objects; *p ; p = &(*p)->next)
162 if ((*p)->s.b.sorted)
164 if ((*p)->u.sort->orig_data == begin)
174 if ((*p)->u.single == begin)
182 __gthread_mutex_unlock (&object_mutex);
186 __gthread_mutex_unlock (&object_mutex);
191 __deregister_frame (void *begin)
193 free (__deregister_frame_info (begin));
197 /* Like base_of_encoded_value, but take the base from a struct object
198 instead of an _Unwind_Context. */
201 base_from_object (unsigned char encoding, struct object *ob)
203 if (encoding == DW_EH_PE_omit)
206 switch (encoding & 0x70)
208 case DW_EH_PE_absptr:
212 case DW_EH_PE_textrel:
213 return (_Unwind_Ptr) ob->tbase;
214 case DW_EH_PE_datarel:
215 return (_Unwind_Ptr) ob->dbase;
220 /* Return the FDE pointer encoding from the CIE. */
221 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
224 get_cie_encoding (struct dwarf_cie *cie)
226 const unsigned char *aug, *p;
229 aug = cie->augmentation;
231 return DW_EH_PE_absptr;
233 p = aug + strlen (aug) + 1; /* Skip the augmentation string. */
234 p = read_uleb128 (p, &dummy); /* Skip code alignment. */
235 p = read_sleb128 (p, &dummy); /* Skip data alignment. */
236 p++; /* Skip return address column. */
238 aug++; /* Skip 'z' */
239 p = read_uleb128 (p, &dummy); /* Skip augmentation length. */
242 /* This is what we're looking for. */
245 /* Personality encoding and pointer. */
246 else if (*aug == 'P')
247 p = read_encoded_value_with_base (*p & 0xF, 0, p + 1, &dummy);
249 else if (*aug == 'L')
251 /* Otherwise end of string, or unknown augmentation. */
253 return DW_EH_PE_absptr;
259 get_fde_encoding (struct dwarf_fde *f)
261 return get_cie_encoding (get_cie (f));
265 /* Sorting an array of FDEs by address.
266 (Ideally we would have the linker sort the FDEs so we don't have to do
267 it at run time. But the linkers are not yet prepared for this.) */
269 /* Comparison routines. Three variants of increasing complexity. */
272 fde_unencoded_compare (struct object *ob __attribute__((unused)),
275 return *(saddr *)x->pc_begin - *(saddr *)y->pc_begin;
279 fde_single_encoding_compare (struct object *ob, fde *x, fde *y)
281 _Unwind_Ptr base, x_ptr, y_ptr;
283 base = base_from_object (ob->s.b.encoding, ob);
284 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
285 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
287 return x_ptr - y_ptr;
291 fde_mixed_encoding_compare (struct object *ob, fde *x, fde *y)
293 int x_encoding, y_encoding;
294 _Unwind_Ptr x_ptr, y_ptr;
296 x_encoding = get_fde_encoding (x);
297 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
298 x->pc_begin, &x_ptr);
300 y_encoding = get_fde_encoding (y);
301 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
302 y->pc_begin, &y_ptr);
304 return x_ptr - y_ptr;
307 typedef saddr (*fde_compare_t) (struct object *, fde *, fde *);
310 /* This is a special mix of insertion sort and heap sort, optimized for
311 the data sets that actually occur. They look like
312 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
313 I.e. a linearly increasing sequence (coming from functions in the text
314 section), with additionally a few unordered elements (coming from functions
315 in gnu_linkonce sections) whose values are higher than the values in the
316 surrounding linear sequence (but not necessarily higher than the values
317 at the end of the linear sequence!).
318 The worst-case total run time is O(N) + O(n log (n)), where N is the
319 total number of FDEs and n is the number of erratic ones. */
321 struct fde_accumulator
323 struct fde_vector *linear;
324 struct fde_vector *erratic;
328 start_fde_sort (struct fde_accumulator *accu, size_t count)
334 size = sizeof (struct fde_vector) + sizeof (fde *) * count;
335 if ((accu->linear = (struct fde_vector *) malloc (size)))
337 accu->linear->count = 0;
338 if ((accu->erratic = (struct fde_vector *) malloc (size)))
339 accu->erratic->count = 0;
347 fde_insert (struct fde_accumulator *accu, fde *this_fde)
350 accu->linear->array[accu->linear->count++] = this_fde;
353 /* Split LINEAR into a linear sequence with low values and an erratic
354 sequence with high values, put the linear one (of longest possible
355 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
357 Because the longest linear sequence we are trying to locate within the
358 incoming LINEAR array can be interspersed with (high valued) erratic
359 entries. We construct a chain indicating the sequenced entries.
360 To avoid having to allocate this chain, we overlay it onto the space of
361 the ERRATIC array during construction. A final pass iterates over the
362 chain to determine what should be placed in the ERRATIC array, and
363 what is the linear sequence. This overlay is safe from aliasing. */
366 fde_split (struct object *ob, fde_compare_t fde_compare,
367 struct fde_vector *linear, struct fde_vector *erratic)
370 size_t count = linear->count;
371 fde **chain_end = ▮
374 /* This should optimize out, but it is wise to make sure this assumption
375 is correct. Should these have different sizes, we cannot cast between
376 them and the overlaying onto ERRATIC will not work. */
377 if (sizeof (fde *) != sizeof (fde **))
380 for (i = 0; i < count; i++)
384 for (probe = chain_end;
385 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
388 chain_end = (fde **)erratic->array[probe - linear->array];
389 erratic->array[probe - linear->array] = NULL;
391 erratic->array[i] = (fde *)chain_end;
392 chain_end = &linear->array[i];
395 /* Each entry in LINEAR which is part of the linear sequence we have
396 discovered will correspond to a non-NULL entry in the chain we built in
397 the ERRATIC array. */
398 for (i = j = k = 0; i < count; i++)
399 if (erratic->array[i])
400 linear->array[j++] = linear->array[i];
402 erratic->array[k++] = linear->array[i];
407 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
408 use a name that does not conflict. */
411 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
412 struct fde_vector *erratic)
414 /* For a description of this algorithm, see:
415 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
417 fde ** a = erratic->array;
418 /* A portion of the array is called a "heap" if for all i>=0:
419 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
420 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
421 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
422 size_t n = erratic->count;
428 /* Invariant: a[m..n-1] is a heap. */
430 for (i = m; 2*i+1 < n; )
433 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
434 && fde_compare (ob, a[2*i+2], a[i]) > 0)
436 SWAP (a[i], a[2*i+2]);
439 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
441 SWAP (a[i], a[2*i+1]);
450 /* Invariant: a[0..n-1] is a heap. */
453 for (i = 0; 2*i+1 < n; )
456 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
457 && fde_compare (ob, a[2*i+2], a[i]) > 0)
459 SWAP (a[i], a[2*i+2]);
462 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
464 SWAP (a[i], a[2*i+1]);
474 /* Merge V1 and V2, both sorted, and put the result into V1. */
476 fde_merge (struct object *ob, fde_compare_t fde_compare,
477 struct fde_vector *v1, struct fde_vector *v2)
488 fde2 = v2->array[i2];
489 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
491 v1->array[i1+i2] = v1->array[i1-1];
494 v1->array[i1+i2] = fde2;
496 v1->count += v2->count;
501 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
503 fde_compare_t fde_compare;
505 if (accu->linear && accu->linear->count != count)
508 if (ob->s.b.mixed_encoding)
509 fde_compare = fde_mixed_encoding_compare;
510 else if (ob->s.b.encoding == DW_EH_PE_absptr)
511 fde_compare = fde_unencoded_compare;
513 fde_compare = fde_single_encoding_compare;
517 fde_split (ob, fde_compare, accu->linear, accu->erratic);
518 if (accu->linear->count + accu->erratic->count != count)
520 frame_heapsort (ob, fde_compare, accu->erratic);
521 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
522 free (accu->erratic);
526 /* We've not managed to malloc an erratic array,
527 so heap sort in the linear one. */
528 frame_heapsort (ob, fde_compare, accu->linear);
533 /* Update encoding, mixed_encoding, and pc_begin for OB for the
534 fde array beginning at THIS_FDE. Return the number of fdes
535 encountered along the way. */
538 classify_object_over_fdes (struct object *ob, fde *this_fde)
540 struct dwarf_cie *last_cie = 0;
542 int encoding = DW_EH_PE_absptr;
543 _Unwind_Ptr base = 0;
545 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
547 struct dwarf_cie *this_cie;
548 _Unwind_Ptr mask, pc_begin;
551 if (this_fde->CIE_delta == 0)
554 /* Determine the encoding for this FDE. Note mixed encoded
555 objects for later. */
556 this_cie = get_cie (this_fde);
557 if (this_cie != last_cie)
560 encoding = get_cie_encoding (this_cie);
561 base = base_from_object (encoding, ob);
562 if (ob->s.b.encoding == DW_EH_PE_omit)
563 ob->s.b.encoding = encoding;
564 else if (ob->s.b.encoding != encoding)
565 ob->s.b.mixed_encoding = 1;
568 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
571 /* Take care to ignore link-once functions that were removed.
572 In these cases, the function address will be NULL, but if
573 the encoding is smaller than a pointer a true NULL may not
574 be representable. Assume 0 in the representable bits is NULL. */
575 mask = size_of_encoded_value (encoding);
576 if (mask < sizeof (void *))
577 mask = (1L << (mask << 3)) - 1;
581 if ((pc_begin & mask) == 0)
585 if ((void *)pc_begin < ob->pc_begin)
586 ob->pc_begin = (void *)pc_begin;
593 add_fdes (struct object *ob, struct fde_accumulator *accu, fde *this_fde)
595 struct dwarf_cie *last_cie = 0;
596 int encoding = ob->s.b.encoding;
597 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
599 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
601 struct dwarf_cie *this_cie;
604 if (this_fde->CIE_delta == 0)
607 if (ob->s.b.mixed_encoding)
609 /* Determine the encoding for this FDE. Note mixed encoded
610 objects for later. */
611 this_cie = get_cie (this_fde);
612 if (this_cie != last_cie)
615 encoding = get_cie_encoding (this_cie);
616 base = base_from_object (encoding, ob);
620 if (encoding == DW_EH_PE_absptr)
622 if (*(_Unwind_Ptr *)this_fde->pc_begin == 0)
627 _Unwind_Ptr pc_begin, mask;
629 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
632 /* Take care to ignore link-once functions that were removed.
633 In these cases, the function address will be NULL, but if
634 the encoding is smaller than a pointer a true NULL may not
635 be representable. Assume 0 in the representable bits is NULL. */
636 mask = size_of_encoded_value (encoding);
637 if (mask < sizeof (void *))
638 mask = (1L << (mask << 3)) - 1;
642 if ((pc_begin & mask) == 0)
646 fde_insert (accu, this_fde);
650 /* Set up a sorted array of pointers to FDEs for a loaded object. We
651 count up the entries before allocating the array because it's likely to
652 be faster. We can be called multiple times, should we have failed to
653 allocate a sorted fde array on a previous occasion. */
656 init_object (struct object* ob)
658 struct fde_accumulator accu;
661 count = ob->s.b.count;
664 if (ob->s.b.from_array)
666 fde **p = ob->u.array;
667 for (count = 0; *p; ++p)
668 count += classify_object_over_fdes (ob, *p);
671 count = classify_object_over_fdes (ob, ob->u.single);
673 /* The count field we have in the main struct object is somewhat
674 limited, but should suffice for virtually all cases. If the
675 counted value doesn't fit, re-write a zero. The worst that
676 happens is that we re-count next time -- admittedly non-trivial
677 in that this implies some 2M fdes, but at least we function. */
678 ob->s.b.count = count;
679 if (ob->s.b.count != count)
683 if (!start_fde_sort (&accu, count))
686 if (ob->s.b.from_array)
689 for (p = ob->u.array; *p; ++p)
690 add_fdes (ob, &accu, *p);
693 add_fdes (ob, &accu, ob->u.single);
695 end_fde_sort (ob, &accu, count);
697 /* Save the original fde pointer, since this is the key by which the
698 DSO will deregister the object. */
699 accu.linear->orig_data = ob->u.single;
700 ob->u.sort = accu.linear;
705 /* A linear search through a set of FDEs for the given PC. This is
706 used when there was insufficient memory to allocate and sort an
710 linear_search_fdes (struct object *ob, fde *this_fde, void *pc)
712 struct dwarf_cie *last_cie = 0;
713 int encoding = ob->s.b.encoding;
714 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
716 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
718 struct dwarf_cie *this_cie;
719 _Unwind_Ptr pc_begin, pc_range;
722 if (this_fde->CIE_delta == 0)
725 if (ob->s.b.mixed_encoding)
727 /* Determine the encoding for this FDE. Note mixed encoded
728 objects for later. */
729 this_cie = get_cie (this_fde);
730 if (this_cie != last_cie)
733 encoding = get_cie_encoding (this_cie);
734 base = base_from_object (encoding, ob);
738 if (encoding == DW_EH_PE_absptr)
740 pc_begin = ((_Unwind_Ptr *)this_fde->pc_begin)[0];
741 pc_range = ((_Unwind_Ptr *)this_fde->pc_begin)[1];
750 p = read_encoded_value_with_base (encoding, base,
751 this_fde->pc_begin, &pc_begin);
752 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
754 /* Take care to ignore link-once functions that were removed.
755 In these cases, the function address will be NULL, but if
756 the encoding is smaller than a pointer a true NULL may not
757 be representable. Assume 0 in the representable bits is NULL. */
758 mask = size_of_encoded_value (encoding);
759 if (mask < sizeof (void *))
760 mask = (1L << (mask << 3)) - 1;
764 if ((pc_begin & mask) == 0)
768 if ((_Unwind_Ptr)pc - pc_begin < pc_range)
775 /* Binary search for an FDE containing the given PC. Here are three
776 implementations of increasing complexity. */
779 binary_search_unencoded_fdes (struct object *ob, void *pc)
781 struct fde_vector *vec = ob->u.sort;
784 for (lo = 0, hi = vec->count; lo < hi; )
786 size_t i = (lo + hi) / 2;
787 fde *f = vec->array[i];
791 pc_begin = ((void **)f->pc_begin)[0];
792 pc_range = ((uaddr *)f->pc_begin)[1];
796 else if (pc >= pc_begin + pc_range)
806 binary_search_single_encoding_fdes (struct object *ob, void *pc)
808 struct fde_vector *vec = ob->u.sort;
809 int encoding = ob->s.b.encoding;
810 _Unwind_Ptr base = base_from_object (encoding, ob);
813 for (lo = 0, hi = vec->count; lo < hi; )
815 size_t i = (lo + hi) / 2;
816 fde *f = vec->array[i];
817 _Unwind_Ptr pc_begin, pc_range;
820 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
822 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
824 if ((_Unwind_Ptr)pc < pc_begin)
826 else if ((_Unwind_Ptr)pc >= pc_begin + pc_range)
836 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
838 struct fde_vector *vec = ob->u.sort;
841 for (lo = 0, hi = vec->count; lo < hi; )
843 size_t i = (lo + hi) / 2;
844 fde *f = vec->array[i];
845 _Unwind_Ptr pc_begin, pc_range;
849 encoding = get_fde_encoding (f);
850 p = read_encoded_value_with_base (encoding,
851 base_from_object (encoding, ob),
852 f->pc_begin, &pc_begin);
853 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
855 if ((_Unwind_Ptr)pc < pc_begin)
857 else if ((_Unwind_Ptr)pc >= pc_begin + pc_range)
867 search_object (struct object* ob, void *pc)
869 /* If the data hasn't been sorted, try to do this now. We may have
870 more memory available than last time we tried. */
871 if (! ob->s.b.sorted)
875 /* Despite the above comment, the normal reason to get here is
876 that we've not processed this object before. A quick range
877 check is in order. */
878 if (pc < ob->pc_begin)
884 if (ob->s.b.mixed_encoding)
885 return binary_search_mixed_encoding_fdes (ob, pc);
886 else if (ob->s.b.encoding == DW_EH_PE_absptr)
887 return binary_search_unencoded_fdes (ob, pc);
889 return binary_search_single_encoding_fdes (ob, pc);
893 /* Long slow labourious linear search, cos we've no memory. */
894 if (ob->s.b.from_array)
897 for (p = ob->u.array; *p ; p++)
899 fde *f = linear_search_fdes (ob, *p, pc);
906 return linear_search_fdes (ob, ob->u.single, pc);
911 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
916 init_object_mutex_once ();
917 __gthread_mutex_lock (&object_mutex);
919 /* Linear search through the classified objects, to find the one
920 containing the pc. Note that pc_begin is sorted decending, and
921 we expect objects to be non-overlapping. */
922 for (ob = seen_objects; ob; ob = ob->next)
923 if (pc >= ob->pc_begin)
925 f = search_object (ob, pc);
931 /* Classify and search the objects we've not yet processed. */
932 while ((ob = unseen_objects))
936 unseen_objects = ob->next;
937 f = search_object (ob, pc);
939 /* Insert the object into the classified list. */
940 for (p = &seen_objects; *p ; p = &(*p)->next)
941 if ((*p)->pc_begin < ob->pc_begin)
951 __gthread_mutex_unlock (&object_mutex);
957 bases->tbase = ob->tbase;
958 bases->dbase = ob->dbase;
960 encoding = ob->s.b.encoding;
961 if (ob->s.b.mixed_encoding)
962 encoding = get_fde_encoding (f);
963 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
964 f->pc_begin, (_Unwind_Ptr *)&bases->func);