1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
21 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
22 WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
26 You should have received a copy of the GNU General Public License
27 along with GCC; see the file COPYING. If not, write to the Free
28 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
35 #define NO_BASE_OF_ENCODED_VALUE
36 #include "unwind-pe.h"
37 #include "unwind-dw2-fde.h"
40 /* The unseen_objects list contains objects that have been registered
41 but not yet categorized in any way. The seen_objects list has had
42 it's pc_begin and count fields initialized at minimum, and is sorted
43 by decreasing value of pc_begin. */
44 static struct object *unseen_objects;
45 static struct object *seen_objects;
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
50 static __gthread_mutex_t object_mutex;
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
55 init_object_mutex (void)
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
61 init_object_mutex_once (void)
63 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
64 __gthread_once (&once, init_object_mutex);
67 #define init_object_mutex_once()
70 /* Called from crtbegin.o to register the unwind info for an object. */
73 __register_frame_info_bases (void *begin, struct object *ob,
74 void *tbase, void *dbase)
76 ob->pc_begin = (void *)-1;
81 ob->s.b.encoding = DW_EH_PE_omit;
83 init_object_mutex_once ();
84 __gthread_mutex_lock (&object_mutex);
86 ob->next = unseen_objects;
89 __gthread_mutex_unlock (&object_mutex);
93 __register_frame_info (void *begin, struct object *ob)
95 __register_frame_info_bases (begin, ob, 0, 0);
99 __register_frame (void *begin)
101 struct object *ob = (struct object *) malloc (sizeof (struct object));
102 __register_frame_info (begin, ob);
105 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
106 for different translation units. Called from the file generated by
110 __register_frame_info_table_bases (void *begin, struct object *ob,
111 void *tbase, void *dbase)
113 ob->pc_begin = (void *)-1;
118 ob->s.b.from_array = 1;
119 ob->s.b.encoding = DW_EH_PE_omit;
121 init_object_mutex_once ();
122 __gthread_mutex_lock (&object_mutex);
124 ob->next = unseen_objects;
127 __gthread_mutex_unlock (&object_mutex);
131 __register_frame_info_table (void *begin, struct object *ob)
133 __register_frame_info_table_bases (begin, ob, 0, 0);
137 __register_frame_table (void *begin)
139 struct object *ob = (struct object *) malloc (sizeof (struct object));
140 __register_frame_info_table (begin, ob);
143 /* Called from crtbegin.o to deregister the unwind info for an object. */
144 /* ??? Glibc has for a while now exported __register_frame_info and
145 __deregister_frame_info. If we call __register_frame_info_bases
146 from crtbegin (wherein it is declared weak), and this object does
147 not get pulled from libgcc.a for other reasons, then the
148 invocation of __deregister_frame_info will be resolved from glibc.
149 Since the registration did not happen there, we'll abort.
151 Therefore, declare a new deregistration entry point that does the
152 exact same thing, but will resolve to the same library as
153 implements __register_frame_info_bases. */
156 __deregister_frame_info_bases (void *begin)
159 struct object *ob = 0;
161 init_object_mutex_once ();
162 __gthread_mutex_lock (&object_mutex);
164 for (p = &unseen_objects; *p ; p = &(*p)->next)
165 if ((*p)->u.single == begin)
172 for (p = &seen_objects; *p ; p = &(*p)->next)
173 if ((*p)->s.b.sorted)
175 if ((*p)->u.sort->orig_data == begin)
185 if ((*p)->u.single == begin)
193 __gthread_mutex_unlock (&object_mutex);
197 __gthread_mutex_unlock (&object_mutex);
202 __deregister_frame_info (void *begin)
204 return __deregister_frame_info_bases (begin);
208 __deregister_frame (void *begin)
210 free (__deregister_frame_info (begin));
214 /* Like base_of_encoded_value, but take the base from a struct object
215 instead of an _Unwind_Context. */
218 base_from_object (unsigned char encoding, struct object *ob)
220 if (encoding == DW_EH_PE_omit)
223 switch (encoding & 0x70)
225 case DW_EH_PE_absptr:
227 case DW_EH_PE_aligned:
230 case DW_EH_PE_textrel:
231 return (_Unwind_Ptr) ob->tbase;
232 case DW_EH_PE_datarel:
233 return (_Unwind_Ptr) ob->dbase;
238 /* Return the FDE pointer encoding from the CIE. */
239 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
242 get_cie_encoding (struct dwarf_cie *cie)
244 const unsigned char *aug, *p;
249 aug = cie->augmentation;
251 return DW_EH_PE_absptr;
253 p = aug + strlen (aug) + 1; /* Skip the augmentation string. */
254 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
255 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
256 p++; /* Skip return address column. */
258 aug++; /* Skip 'z' */
259 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
262 /* This is what we're looking for. */
265 /* Personality encoding and pointer. */
266 else if (*aug == 'P')
268 /* ??? Avoid dereferencing indirect pointers, since we're
269 faking the base address. Gotta keep DW_EH_PE_aligned
271 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
274 else if (*aug == 'L')
276 /* Otherwise end of string, or unknown augmentation. */
278 return DW_EH_PE_absptr;
284 get_fde_encoding (struct dwarf_fde *f)
286 return get_cie_encoding (get_cie (f));
290 /* Sorting an array of FDEs by address.
291 (Ideally we would have the linker sort the FDEs so we don't have to do
292 it at run time. But the linkers are not yet prepared for this.) */
294 /* Comparison routines. Three variants of increasing complexity. */
297 fde_unencoded_compare (struct object *ob __attribute__((unused)),
300 return *(saddr *)x->pc_begin - *(saddr *)y->pc_begin;
304 fde_single_encoding_compare (struct object *ob, fde *x, fde *y)
306 _Unwind_Ptr base, x_ptr, y_ptr;
308 base = base_from_object (ob->s.b.encoding, ob);
309 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
310 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
312 return x_ptr - y_ptr;
316 fde_mixed_encoding_compare (struct object *ob, fde *x, fde *y)
318 int x_encoding, y_encoding;
319 _Unwind_Ptr x_ptr, y_ptr;
321 x_encoding = get_fde_encoding (x);
322 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
323 x->pc_begin, &x_ptr);
325 y_encoding = get_fde_encoding (y);
326 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
327 y->pc_begin, &y_ptr);
329 return x_ptr - y_ptr;
332 typedef saddr (*fde_compare_t) (struct object *, fde *, fde *);
335 /* This is a special mix of insertion sort and heap sort, optimized for
336 the data sets that actually occur. They look like
337 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
338 I.e. a linearly increasing sequence (coming from functions in the text
339 section), with additionally a few unordered elements (coming from functions
340 in gnu_linkonce sections) whose values are higher than the values in the
341 surrounding linear sequence (but not necessarily higher than the values
342 at the end of the linear sequence!).
343 The worst-case total run time is O(N) + O(n log (n)), where N is the
344 total number of FDEs and n is the number of erratic ones. */
346 struct fde_accumulator
348 struct fde_vector *linear;
349 struct fde_vector *erratic;
353 start_fde_sort (struct fde_accumulator *accu, size_t count)
359 size = sizeof (struct fde_vector) + sizeof (fde *) * count;
360 if ((accu->linear = (struct fde_vector *) malloc (size)))
362 accu->linear->count = 0;
363 if ((accu->erratic = (struct fde_vector *) malloc (size)))
364 accu->erratic->count = 0;
372 fde_insert (struct fde_accumulator *accu, fde *this_fde)
375 accu->linear->array[accu->linear->count++] = this_fde;
378 /* Split LINEAR into a linear sequence with low values and an erratic
379 sequence with high values, put the linear one (of longest possible
380 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
382 Because the longest linear sequence we are trying to locate within the
383 incoming LINEAR array can be interspersed with (high valued) erratic
384 entries. We construct a chain indicating the sequenced entries.
385 To avoid having to allocate this chain, we overlay it onto the space of
386 the ERRATIC array during construction. A final pass iterates over the
387 chain to determine what should be placed in the ERRATIC array, and
388 what is the linear sequence. This overlay is safe from aliasing. */
391 fde_split (struct object *ob, fde_compare_t fde_compare,
392 struct fde_vector *linear, struct fde_vector *erratic)
395 size_t count = linear->count;
396 fde **chain_end = ▮
399 /* This should optimize out, but it is wise to make sure this assumption
400 is correct. Should these have different sizes, we cannot cast between
401 them and the overlaying onto ERRATIC will not work. */
402 if (sizeof (fde *) != sizeof (fde **))
405 for (i = 0; i < count; i++)
409 for (probe = chain_end;
410 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
413 chain_end = (fde **)erratic->array[probe - linear->array];
414 erratic->array[probe - linear->array] = NULL;
416 erratic->array[i] = (fde *)chain_end;
417 chain_end = &linear->array[i];
420 /* Each entry in LINEAR which is part of the linear sequence we have
421 discovered will correspond to a non-NULL entry in the chain we built in
422 the ERRATIC array. */
423 for (i = j = k = 0; i < count; i++)
424 if (erratic->array[i])
425 linear->array[j++] = linear->array[i];
427 erratic->array[k++] = linear->array[i];
432 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
433 use a name that does not conflict. */
436 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
437 struct fde_vector *erratic)
439 /* For a description of this algorithm, see:
440 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
442 fde ** a = erratic->array;
443 /* A portion of the array is called a "heap" if for all i>=0:
444 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
445 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
446 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
447 size_t n = erratic->count;
453 /* Invariant: a[m..n-1] is a heap. */
455 for (i = m; 2*i+1 < n; )
458 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
459 && fde_compare (ob, a[2*i+2], a[i]) > 0)
461 SWAP (a[i], a[2*i+2]);
464 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
466 SWAP (a[i], a[2*i+1]);
475 /* Invariant: a[0..n-1] is a heap. */
478 for (i = 0; 2*i+1 < n; )
481 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
482 && fde_compare (ob, a[2*i+2], a[i]) > 0)
484 SWAP (a[i], a[2*i+2]);
487 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
489 SWAP (a[i], a[2*i+1]);
499 /* Merge V1 and V2, both sorted, and put the result into V1. */
501 fde_merge (struct object *ob, fde_compare_t fde_compare,
502 struct fde_vector *v1, struct fde_vector *v2)
513 fde2 = v2->array[i2];
514 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
516 v1->array[i1+i2] = v1->array[i1-1];
519 v1->array[i1+i2] = fde2;
521 v1->count += v2->count;
526 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
528 fde_compare_t fde_compare;
530 if (accu->linear && accu->linear->count != count)
533 if (ob->s.b.mixed_encoding)
534 fde_compare = fde_mixed_encoding_compare;
535 else if (ob->s.b.encoding == DW_EH_PE_absptr)
536 fde_compare = fde_unencoded_compare;
538 fde_compare = fde_single_encoding_compare;
542 fde_split (ob, fde_compare, accu->linear, accu->erratic);
543 if (accu->linear->count + accu->erratic->count != count)
545 frame_heapsort (ob, fde_compare, accu->erratic);
546 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
547 free (accu->erratic);
551 /* We've not managed to malloc an erratic array,
552 so heap sort in the linear one. */
553 frame_heapsort (ob, fde_compare, accu->linear);
558 /* Update encoding, mixed_encoding, and pc_begin for OB for the
559 fde array beginning at THIS_FDE. Return the number of fdes
560 encountered along the way. */
563 classify_object_over_fdes (struct object *ob, fde *this_fde)
565 struct dwarf_cie *last_cie = 0;
567 int encoding = DW_EH_PE_absptr;
568 _Unwind_Ptr base = 0;
570 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
572 struct dwarf_cie *this_cie;
573 _Unwind_Ptr mask, pc_begin;
576 if (this_fde->CIE_delta == 0)
579 /* Determine the encoding for this FDE. Note mixed encoded
580 objects for later. */
581 this_cie = get_cie (this_fde);
582 if (this_cie != last_cie)
585 encoding = get_cie_encoding (this_cie);
586 base = base_from_object (encoding, ob);
587 if (ob->s.b.encoding == DW_EH_PE_omit)
588 ob->s.b.encoding = encoding;
589 else if (ob->s.b.encoding != encoding)
590 ob->s.b.mixed_encoding = 1;
593 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
596 /* Take care to ignore link-once functions that were removed.
597 In these cases, the function address will be NULL, but if
598 the encoding is smaller than a pointer a true NULL may not
599 be representable. Assume 0 in the representable bits is NULL. */
600 mask = size_of_encoded_value (encoding);
601 if (mask < sizeof (void *))
602 mask = (1L << (mask << 3)) - 1;
606 if ((pc_begin & mask) == 0)
610 if ((void *)pc_begin < ob->pc_begin)
611 ob->pc_begin = (void *)pc_begin;
618 add_fdes (struct object *ob, struct fde_accumulator *accu, fde *this_fde)
620 struct dwarf_cie *last_cie = 0;
621 int encoding = ob->s.b.encoding;
622 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
624 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
626 struct dwarf_cie *this_cie;
629 if (this_fde->CIE_delta == 0)
632 if (ob->s.b.mixed_encoding)
634 /* Determine the encoding for this FDE. Note mixed encoded
635 objects for later. */
636 this_cie = get_cie (this_fde);
637 if (this_cie != last_cie)
640 encoding = get_cie_encoding (this_cie);
641 base = base_from_object (encoding, ob);
645 if (encoding == DW_EH_PE_absptr)
647 if (*(_Unwind_Ptr *)this_fde->pc_begin == 0)
652 _Unwind_Ptr pc_begin, mask;
654 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
657 /* Take care to ignore link-once functions that were removed.
658 In these cases, the function address will be NULL, but if
659 the encoding is smaller than a pointer a true NULL may not
660 be representable. Assume 0 in the representable bits is NULL. */
661 mask = size_of_encoded_value (encoding);
662 if (mask < sizeof (void *))
663 mask = (1L << (mask << 3)) - 1;
667 if ((pc_begin & mask) == 0)
671 fde_insert (accu, this_fde);
675 /* Set up a sorted array of pointers to FDEs for a loaded object. We
676 count up the entries before allocating the array because it's likely to
677 be faster. We can be called multiple times, should we have failed to
678 allocate a sorted fde array on a previous occasion. */
681 init_object (struct object* ob)
683 struct fde_accumulator accu;
686 count = ob->s.b.count;
689 if (ob->s.b.from_array)
691 fde **p = ob->u.array;
692 for (count = 0; *p; ++p)
693 count += classify_object_over_fdes (ob, *p);
696 count = classify_object_over_fdes (ob, ob->u.single);
698 /* The count field we have in the main struct object is somewhat
699 limited, but should suffice for virtually all cases. If the
700 counted value doesn't fit, re-write a zero. The worst that
701 happens is that we re-count next time -- admittedly non-trivial
702 in that this implies some 2M fdes, but at least we function. */
703 ob->s.b.count = count;
704 if (ob->s.b.count != count)
708 if (!start_fde_sort (&accu, count))
711 if (ob->s.b.from_array)
714 for (p = ob->u.array; *p; ++p)
715 add_fdes (ob, &accu, *p);
718 add_fdes (ob, &accu, ob->u.single);
720 end_fde_sort (ob, &accu, count);
722 /* Save the original fde pointer, since this is the key by which the
723 DSO will deregister the object. */
724 accu.linear->orig_data = ob->u.single;
725 ob->u.sort = accu.linear;
730 /* A linear search through a set of FDEs for the given PC. This is
731 used when there was insufficient memory to allocate and sort an
735 linear_search_fdes (struct object *ob, fde *this_fde, void *pc)
737 struct dwarf_cie *last_cie = 0;
738 int encoding = ob->s.b.encoding;
739 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
741 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
743 struct dwarf_cie *this_cie;
744 _Unwind_Ptr pc_begin, pc_range;
747 if (this_fde->CIE_delta == 0)
750 if (ob->s.b.mixed_encoding)
752 /* Determine the encoding for this FDE. Note mixed encoded
753 objects for later. */
754 this_cie = get_cie (this_fde);
755 if (this_cie != last_cie)
758 encoding = get_cie_encoding (this_cie);
759 base = base_from_object (encoding, ob);
763 if (encoding == DW_EH_PE_absptr)
765 pc_begin = ((_Unwind_Ptr *)this_fde->pc_begin)[0];
766 pc_range = ((_Unwind_Ptr *)this_fde->pc_begin)[1];
775 p = read_encoded_value_with_base (encoding, base,
776 this_fde->pc_begin, &pc_begin);
777 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
779 /* Take care to ignore link-once functions that were removed.
780 In these cases, the function address will be NULL, but if
781 the encoding is smaller than a pointer a true NULL may not
782 be representable. Assume 0 in the representable bits is NULL. */
783 mask = size_of_encoded_value (encoding);
784 if (mask < sizeof (void *))
785 mask = (1L << (mask << 3)) - 1;
789 if ((pc_begin & mask) == 0)
793 if ((_Unwind_Ptr)pc - pc_begin < pc_range)
800 /* Binary search for an FDE containing the given PC. Here are three
801 implementations of increasing complexity. */
804 binary_search_unencoded_fdes (struct object *ob, void *pc)
806 struct fde_vector *vec = ob->u.sort;
809 for (lo = 0, hi = vec->count; lo < hi; )
811 size_t i = (lo + hi) / 2;
812 fde *f = vec->array[i];
816 pc_begin = ((void **)f->pc_begin)[0];
817 pc_range = ((uaddr *)f->pc_begin)[1];
821 else if (pc >= pc_begin + pc_range)
831 binary_search_single_encoding_fdes (struct object *ob, void *pc)
833 struct fde_vector *vec = ob->u.sort;
834 int encoding = ob->s.b.encoding;
835 _Unwind_Ptr base = base_from_object (encoding, ob);
838 for (lo = 0, hi = vec->count; lo < hi; )
840 size_t i = (lo + hi) / 2;
841 fde *f = vec->array[i];
842 _Unwind_Ptr pc_begin, pc_range;
845 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
847 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
849 if ((_Unwind_Ptr)pc < pc_begin)
851 else if ((_Unwind_Ptr)pc >= pc_begin + pc_range)
861 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
863 struct fde_vector *vec = ob->u.sort;
866 for (lo = 0, hi = vec->count; lo < hi; )
868 size_t i = (lo + hi) / 2;
869 fde *f = vec->array[i];
870 _Unwind_Ptr pc_begin, pc_range;
874 encoding = get_fde_encoding (f);
875 p = read_encoded_value_with_base (encoding,
876 base_from_object (encoding, ob),
877 f->pc_begin, &pc_begin);
878 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
880 if ((_Unwind_Ptr)pc < pc_begin)
882 else if ((_Unwind_Ptr)pc >= pc_begin + pc_range)
892 search_object (struct object* ob, void *pc)
894 /* If the data hasn't been sorted, try to do this now. We may have
895 more memory available than last time we tried. */
896 if (! ob->s.b.sorted)
900 /* Despite the above comment, the normal reason to get here is
901 that we've not processed this object before. A quick range
902 check is in order. */
903 if (pc < ob->pc_begin)
909 if (ob->s.b.mixed_encoding)
910 return binary_search_mixed_encoding_fdes (ob, pc);
911 else if (ob->s.b.encoding == DW_EH_PE_absptr)
912 return binary_search_unencoded_fdes (ob, pc);
914 return binary_search_single_encoding_fdes (ob, pc);
918 /* Long slow labourious linear search, cos we've no memory. */
919 if (ob->s.b.from_array)
922 for (p = ob->u.array; *p ; p++)
924 fde *f = linear_search_fdes (ob, *p, pc);
931 return linear_search_fdes (ob, ob->u.single, pc);
936 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
941 init_object_mutex_once ();
942 __gthread_mutex_lock (&object_mutex);
944 /* Linear search through the classified objects, to find the one
945 containing the pc. Note that pc_begin is sorted descending, and
946 we expect objects to be non-overlapping. */
947 for (ob = seen_objects; ob; ob = ob->next)
948 if (pc >= ob->pc_begin)
950 f = search_object (ob, pc);
956 /* Classify and search the objects we've not yet processed. */
957 while ((ob = unseen_objects))
961 unseen_objects = ob->next;
962 f = search_object (ob, pc);
964 /* Insert the object into the classified list. */
965 for (p = &seen_objects; *p ; p = &(*p)->next)
966 if ((*p)->pc_begin < ob->pc_begin)
976 __gthread_mutex_unlock (&object_mutex);
982 bases->tbase = ob->tbase;
983 bases->dbase = ob->dbase;
985 encoding = ob->s.b.encoding;
986 if (ob->s.b.mixed_encoding)
987 encoding = get_fde_encoding (f);
988 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
989 f->pc_begin, (_Unwind_Ptr *)&bases->func);