X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=libjava%2Fboehm.cc;h=855d23cb024a83180e289c3a9e104a35b42be351;hb=2ab9be2b4e373b344d25f143372fd157ff8ce853;hp=af5f979197d3214f68422d21747dac9d06b32f25;hpb=f2941b1f970c1c9551e8ed6ae869a9f6e7c0b2cc;p=pf3gnuchains%2Fgcc-fork.git diff --git a/libjava/boehm.cc b/libjava/boehm.cc index af5f979197d..855d23cb024 100644 --- a/libjava/boehm.cc +++ b/libjava/boehm.cc @@ -1,6 +1,7 @@ // boehm.cc - interface between libjava and Boehm GC. -/* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation +/* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation This file is part of libgcj. @@ -25,32 +26,51 @@ details. */ #undef TRUE #undef FALSE +// We include two autoconf headers. Avoid multiple definition warnings. +#undef PACKAGE_NAME +#undef PACKAGE_STRING +#undef PACKAGE_TARNAME +#undef PACKAGE_VERSION + +#ifdef HAVE_DLFCN_H +#undef _GNU_SOURCE +#define _GNU_SOURCE +#include +#endif + extern "C" { -#include +#include + +// Set GC_DEBUG before including gc.h! +#ifdef LIBGCJ_GC_DEBUG +# define GC_DEBUG +#endif + +#include #include +#include // GC_finalize_all declaration. #ifdef THREAD_LOCAL_ALLOC # define GC_REDIRECT_TO_LOCAL # include #endif - // These aren't declared in any Boehm GC header. - void GC_finalize_all (void); - ptr_t GC_debug_generic_malloc (size_t size, int k, GC_EXTRA_PARAMS); + // From boehm's misc.c + void GC_enable(); + void GC_disable(); }; -#define MAYBE_MARK(Obj, Top, Limit, Source, Exit) \ - Top=GC_MARK_AND_PUSH((GC_PTR)Obj, Top, Limit, (GC_PTR *)Source) +#define MAYBE_MARK(Obj, Top, Limit, Source) \ + Top=GC_MARK_AND_PUSH((GC_PTR) Obj, Top, Limit, (GC_PTR *) Source) // `kind' index used when allocating Java arrays. static int array_kind_x; // Freelist used for Java arrays. -static ptr_t *array_free_list; +static void **array_free_list; -// Lock used to protect access to Boehm's GC_enable/GC_disable functions. -static _Jv_Mutex_t disable_gc_mutex; +static int _Jv_GC_has_static_roots (const char *filename, void *, size_t); @@ -58,15 +78,14 @@ static _Jv_Mutex_t disable_gc_mutex; // object. We use `void *' arguments and return, and not what the // Boehm GC wants, to avoid pollution in our headers. void * -_Jv_MarkObj (void *addr, void *msp, void *msl, void * /* env */) +_Jv_MarkObj (void *addr, void *msp, void *msl, void *env) { - mse *mark_stack_ptr = (mse *) msp; - mse *mark_stack_limit = (mse *) msl; - jobject obj = (jobject) addr; + struct GC_ms_entry *mark_stack_ptr = (struct GC_ms_entry *)msp; + struct GC_ms_entry *mark_stack_limit = (struct GC_ms_entry *)msl; - // FIXME: if env is 1, this object was allocated through the debug - // interface, and addr points to the beginning of the debug header. - // In that case, we should really add the size of the header to addr. + if (env == (void *)1) /* Object allocated with debug allocator. */ + addr = (GC_PTR)GC_USR_PTR_FROM_BASE(addr); + jobject obj = (jobject) addr; _Jv_VTable *dt = *(_Jv_VTable **) addr; // The object might not yet have its vtable set, or it might @@ -78,16 +97,16 @@ _Jv_MarkObj (void *addr, void *msp, void *msl, void * /* env */) if (__builtin_expect (! dt || !(dt -> get_finalizer()), false)) return mark_stack_ptr; jclass klass = dt->clas; - ptr_t p; + GC_PTR p; + + p = (GC_PTR) dt; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj); # ifndef JV_HASH_SYNCHRONIZATION // Every object has a sync_info pointer. - p = (ptr_t) obj->sync_info; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj, o1label); + p = (GC_PTR) obj->sync_info; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj); # endif - // Mark the object's class. - p = (ptr_t) klass; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj, o2label); if (__builtin_expect (klass == &java::lang::Class::class$, false)) { @@ -105,149 +124,57 @@ _Jv_MarkObj (void *addr, void *msp, void *msl, void * /* env */) // of our root set. - HB jclass c = (jclass) addr; - p = (ptr_t) c->name; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c3label); - p = (ptr_t) c->superclass; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c4label); - for (int i = 0; i < c->constants.size; ++i) - { - /* FIXME: We could make this more precise by using the tags -KKT */ - p = (ptr_t) c->constants.data[i].p; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c5label); - } + p = (GC_PTR) c->name; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + p = (GC_PTR) c->superclass; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); -#ifdef INTERPRETER - if (_Jv_IsInterpretedClass (c)) - { - p = (ptr_t) c->constants.tags; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c5alabel); - p = (ptr_t) c->constants.data; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c5blabel); - p = (ptr_t) c->vtable; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c5clabel); - } -#endif + p = (GC_PTR) c->constants.tags; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + p = (GC_PTR) c->constants.data; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); // If the class is an array, then the methods field holds a // pointer to the element class. If the class is primitive, // then the methods field holds a pointer to the array class. - p = (ptr_t) c->methods; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c6label); - - // The vtable might have been set, but the rest of the class - // could still be uninitialized. If this is the case, then - // c.isArray will SEGV. We check for this, and if it is the - // case we just return. - if (__builtin_expect (c->name == NULL, false)) - return mark_stack_ptr; - - if (! c->isArray() && ! c->isPrimitive()) - { - // Scan each method in the cases where `methods' really - // points to a methods structure. - for (int i = 0; i < c->method_count; ++i) - { - p = (ptr_t) c->methods[i].name; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, - cm1label); - p = (ptr_t) c->methods[i].signature; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, - cm2label); - } - } - - // Mark all the fields. - p = (ptr_t) c->fields; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c8label); - for (int i = 0; i < c->field_count; ++i) - { - _Jv_Field* field = &c->fields[i]; - -#ifndef COMPACT_FIELDS - p = (ptr_t) field->name; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c8alabel); -#endif - p = (ptr_t) field->type; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c8blabel); - - // For the interpreter, we also need to mark the memory - // containing static members - if ((field->flags & java::lang::reflect::Modifier::STATIC)) - { - p = (ptr_t) field->u.addr; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c8clabel); - - // also, if the static member is a reference, - // mark also the value pointed to. We check for isResolved - // since marking can happen before memory is allocated for - // static members. - if (JvFieldIsRef (field) && field->isResolved()) - { - jobject val = *(jobject*) field->u.addr; - p = (ptr_t) val; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, - c, c8elabel); - } - } - } - - p = (ptr_t) c->vtable; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c9label); - p = (ptr_t) c->interfaces; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cAlabel); - for (int i = 0; i < c->interface_count; ++i) - { - p = (ptr_t) c->interfaces[i]; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cClabel); - } - p = (ptr_t) c->loader; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cBlabel); - p = (ptr_t) c->arrayclass; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cDlabel); - p = (ptr_t) c->protectionDomain; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cPlabel); - -#ifdef INTERPRETER - if (_Jv_IsInterpretedClass (c)) - { - _Jv_InterpClass* ic = (_Jv_InterpClass*) c; - - p = (ptr_t) ic->interpreted_methods; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, ic, cElabel); - - for (int i = 0; i < c->method_count; i++) - { - p = (ptr_t) ic->interpreted_methods[i]; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, ic, \ - cFlabel); - - // Mark the direct-threaded code. - if ((c->methods[i].accflags - & java::lang::reflect::Modifier::NATIVE) == 0) - { - _Jv_InterpMethod *im - = (_Jv_InterpMethod *) ic->interpreted_methods[i]; - if (im) - { - p = (ptr_t) im->prepared; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, ic, \ - cFlabel); - } - } - - // The interpreter installs a heap-allocated trampoline - // here, so we'll mark it. - p = (ptr_t) c->methods[i].ncode; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, - cm3label); - } - - p = (ptr_t) ic->field_initializers; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, ic, cGlabel); - - } -#endif - + p = (GC_PTR) c->methods; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + + p = (GC_PTR) c->fields; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + + // The vtable might be allocated even for compiled code. + p = (GC_PTR) c->vtable; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + + p = (GC_PTR) c->interfaces; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + p = (GC_PTR) c->loader; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + + // The dispatch tables can be allocated at runtime. + p = (GC_PTR) c->ancestors; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + + p = (GC_PTR) c->idt; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + + p = (GC_PTR) c->arrayclass; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + p = (GC_PTR) c->protectionDomain; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + p = (GC_PTR) c->hack_signers; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + p = (GC_PTR) c->aux_info; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + + p = (GC_PTR) c->reflection_data; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); + + // The class chain must be marked for runtime-allocated Classes + // loaded by the bootstrap ClassLoader. + p = (GC_PTR) c->next_or_version; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c); } else { @@ -269,9 +196,8 @@ _Jv_MarkObj (void *addr, void *msp, void *msl, void * /* env */) if (JvFieldIsRef (field)) { jobject val = JvGetObjectField (obj, field); - p = (ptr_t) val; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, - obj, elabel); + p = (GC_PTR) val; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj); } field = field->getNextField (); } @@ -286,10 +212,13 @@ _Jv_MarkObj (void *addr, void *msp, void *msl, void * /* env */) // array (of objects). We use `void *' arguments and return, and not // what the Boehm GC wants, to avoid pollution in our headers. void * -_Jv_MarkArray (void *addr, void *msp, void *msl, void * /*env*/) +_Jv_MarkArray (void *addr, void *msp, void *msl, void *env) { - mse *mark_stack_ptr = (mse *) msp; - mse *mark_stack_limit = (mse *) msl; + struct GC_ms_entry *mark_stack_ptr = (struct GC_ms_entry *)msp; + struct GC_ms_entry *mark_stack_limit = (struct GC_ms_entry *)msl; + + if (env == (void *)1) /* Object allocated with debug allocator. */ + addr = (void *)GC_USR_PTR_FROM_BASE(addr); jobjectArray array = (jobjectArray) addr; _Jv_VTable *dt = *(_Jv_VTable **) addr; @@ -298,23 +227,22 @@ _Jv_MarkArray (void *addr, void *msp, void *msl, void * /*env*/) // we may need to round up the size. if (__builtin_expect (! dt || !(dt -> get_finalizer()), false)) return mark_stack_ptr; - jclass klass = dt->clas; - ptr_t p; + GC_PTR p; + + p = (GC_PTR) dt; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, array); # ifndef JV_HASH_SYNCHRONIZATION // Every object has a sync_info pointer. - p = (ptr_t) array->sync_info; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, array, e1label); + p = (GC_PTR) array->sync_info; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, array); # endif - // Mark the object's class. - p = (ptr_t) klass; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, &(dt -> clas), o2label); for (int i = 0; i < JvGetArrayLength (array); ++i) { jobject obj = elements (array)[i]; - p = (ptr_t) obj; - MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, array, e2label); + p = (GC_PTR) obj; + MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, array); } return mark_stack_ptr; @@ -362,7 +290,7 @@ _Jv_BuildGCDescr(jclass self) // If we find a field outside the range of our bitmap, // fall back to procedure marker. The bottom 2 bits are // reserved. - if (off >= bits_per_word - 2) + if (off >= (unsigned) bits_per_word - 2) return (void *) (GCJ_DEFAULT_DESCR); desc |= 1ULL << (bits_per_word - off - 1); } @@ -390,12 +318,44 @@ _Jv_AllocBytes (jsize size) return r; } +#ifdef LIBGCJ_GC_DEBUG + +void * +_Jv_AllocObj (jsize size, jclass klass) +{ + return GC_GCJ_MALLOC (size, klass->vtable); +} + +void * +_Jv_AllocPtrFreeObj (jsize size, jclass klass) +{ +#ifdef JV_HASH_SYNCHRONIZATION + void * obj = GC_MALLOC_ATOMIC(size); + *((_Jv_VTable **) obj) = klass->vtable; +#else + void * obj = GC_GCJ_MALLOC(size, klass->vtable); +#endif + return obj; +} + +#endif /* LIBGCJ_GC_DEBUG */ +// In the non-debug case, the above two functions are defined +// as inline functions in boehm-gc.h. In the debug case we +// really want to take advantage of the definitions in gc_gcj.h. + // Allocate space for a new Java array. // Used only for arrays of objects. void * _Jv_AllocArray (jsize size, jclass klass) { void *obj; + +#ifdef LIBGCJ_GC_DEBUG + // There isn't much to lose by scanning this conservatively. + // If we didn't, the mark proc would have to understand that + // it needed to skip the header. + obj = GC_MALLOC(size); +#else const jsize min_heap_addr = 16*1024; // A heuristic. If size is less than this value, the size // stored in the array can't possibly be misinterpreted as @@ -403,12 +363,6 @@ _Jv_AllocArray (jsize size, jclass klass) // completely conservatively, since no misidentification can // take place. -#ifdef GC_DEBUG - // There isn't much to lose by scanning this conservatively. - // If we didn't, the mark proc would have to understand that - // it needed to skip the header. - obj = GC_MALLOC(size); -#else if (size < min_heap_addr) obj = GC_MALLOC(size); else @@ -423,8 +377,33 @@ _Jv_AllocArray (jsize size, jclass klass) void * _Jv_AllocRawObj (jsize size) { - return (void *) GC_MALLOC (size); + return (void *) GC_MALLOC (size ? size : 1); +} + +#ifdef INTERPRETER +typedef _Jv_ClosureList *closure_list_pointer; + +/* Release closures in a _Jv_ClosureList. */ +static void +finalize_closure_list (GC_PTR obj, GC_PTR) +{ + _Jv_ClosureList **clpp = (_Jv_ClosureList **)obj; + _Jv_ClosureList::releaseClosures (clpp); +} + +/* Allocate a double-indirect pointer to a _Jv_ClosureList that will + get garbage-collected after this double-indirect pointer becomes + unreachable by any other objects, including finalizable ones. */ +_Jv_ClosureList ** +_Jv_ClosureListFinalizer () +{ + _Jv_ClosureList **clpp; + clpp = (_Jv_ClosureList **)_Jv_AllocBytes (sizeof (*clpp)); + GC_REGISTER_FINALIZER_UNREACHABLE (clpp, finalize_closure_list, + NULL, NULL, NULL); + return clpp; } +#endif // INTERPRETER static void call_finalizer (GC_PTR obj, GC_PTR client_data) @@ -486,24 +465,22 @@ _Jv_GCSetMaximumHeapSize (size_t size) GC_set_max_heap_size ((GC_word) size); } -// From boehm's misc.c -extern "C" void GC_enable(); -extern "C" void GC_disable(); +int +_Jv_SetGCFreeSpaceDivisor (int div) +{ + return (int)GC_set_free_space_divisor ((GC_word)div); +} void _Jv_DisableGC (void) { - _Jv_MutexLock (&disable_gc_mutex); GC_disable(); - _Jv_MutexUnlock (&disable_gc_mutex); } void _Jv_EnableGC (void) { - _Jv_MutexLock (&disable_gc_mutex); GC_enable(); - _Jv_MutexUnlock (&disable_gc_mutex); } static void * handle_out_of_memory(size_t) @@ -511,16 +488,52 @@ static void * handle_out_of_memory(size_t) _Jv_ThrowNoMemory(); } +static void +gcj_describe_type_fn(void *obj, char *out_buf) +{ + _Jv_VTable *dt = *(_Jv_VTable **) obj; + + if (! dt /* Shouldn't happen */) + { + strcpy(out_buf, "GCJ (bad)"); + return; + } + jclass klass = dt->clas; + if (!klass /* shouldn't happen */) + { + strcpy(out_buf, "GCJ (bad)"); + return; + } + jstring name = klass -> getName(); + size_t len = name -> length(); + if (len >= GC_TYPE_DESCR_LEN) len = GC_TYPE_DESCR_LEN - 1; + JvGetStringUTFRegion (name, 0, len, out_buf); + out_buf[len] = '\0'; +} + void _Jv_InitGC (void) { int proc; + static bool gc_initialized; + + if (gc_initialized) + return; + + gc_initialized = 1; // Ignore pointers that do not point to the start of an object. GC_all_interior_pointers = 0; +#if defined (HAVE_DLFCN_H) && defined (HAVE_DLADDR) + // Tell the collector to ask us before scanning DSOs. + GC_register_has_static_roots_callback (_Jv_GC_has_static_roots); +#endif + // Configure the collector to use the bitmap marking descriptors that we // stash in the class vtable. + // We always use mark proc descriptor 0, since the compiler knows + // about it. GC_init_gcj_malloc (0, (void *) _Jv_MarkObj); // Cause an out of memory error to be thrown from the allocators, @@ -531,23 +544,14 @@ _Jv_InitGC (void) // We use a different mark procedure for object arrays. This code // configures a different object `kind' for object array allocation and - // marking. FIXME: see above. - array_free_list = (ptr_t *) GC_generic_malloc_inner ((MAXOBJSZ + 1) - * sizeof (ptr_t), - PTRFREE); - memset (array_free_list, 0, (MAXOBJSZ + 1) * sizeof (ptr_t)); - - proc = GC_n_mark_procs++; - GC_mark_procs[proc] = (GC_mark_proc) _Jv_MarkArray; - - array_kind_x = GC_n_kinds++; - GC_obj_kinds[array_kind_x].ok_freelist = array_free_list; - GC_obj_kinds[array_kind_x].ok_reclaim_list = 0; - GC_obj_kinds[array_kind_x].ok_descriptor = GC_MAKE_PROC (proc, 0); - GC_obj_kinds[array_kind_x].ok_relocate_descr = FALSE; - GC_obj_kinds[array_kind_x].ok_init = TRUE; - - _Jv_MutexInit (&disable_gc_mutex); + // marking. + array_free_list = GC_new_free_list(); + proc = GC_new_proc((GC_mark_proc)_Jv_MarkArray); + array_kind_x = GC_new_kind(array_free_list, GC_MAKE_PROC (proc, 0), 0, 1); + + // Arrange to have the GC print Java class names in backtraces, etc. + GC_register_describe_type_fn(GC_gcj_kind, gcj_describe_type_fn); + GC_register_describe_type_fn(GC_gcj_debug_kind, gcj_describe_type_fn); } #ifdef JV_HASH_SYNCHRONIZATION @@ -600,7 +604,11 @@ _Jv_GCInitializeFinalizers (void (*notifier) (void)) void _Jv_GCRegisterDisappearingLink (jobject *objp) { - GC_general_register_disappearing_link ((GC_PTR *) objp, (GC_PTR) *objp); + // This test helps to ensure that we meet a precondition of + // GC_general_register_disappearing_link, viz. "Obj must be a + // pointer to the first word of an object we allocated." + if (GC_base(*objp)) + GC_general_register_disappearing_link ((GC_PTR *) objp, (GC_PTR) *objp); } jboolean @@ -609,3 +617,145 @@ _Jv_GCCanReclaimSoftReference (jobject) // For now, always reclaim soft references. FIXME. return true; } + + + +#if defined (HAVE_DLFCN_H) && defined (HAVE_DLADDR) + +// We keep a store of the filenames of DSOs that need to be +// conservatively scanned by the garbage collector. During collection +// the gc calls _Jv_GC_has_static_roots() to see if the data segment +// of a DSO should be scanned. +typedef struct filename_node +{ + char *name; + struct filename_node *link; +} filename_node; + +#define FILENAME_STORE_SIZE 17 +static filename_node *filename_store[FILENAME_STORE_SIZE]; + +// Find a filename in filename_store. +static filename_node ** +find_file (const char *filename) +{ + int index = strlen (filename) % FILENAME_STORE_SIZE; + filename_node **node = &filename_store[index]; + + while (*node) + { + if (strcmp ((*node)->name, filename) == 0) + return node; + node = &(*node)->link; + } + + return node; +} + +// Print the store of filenames of DSOs that need collection. +void +_Jv_print_gc_store (void) +{ + for (int i = 0; i < FILENAME_STORE_SIZE; i++) + { + filename_node *node = filename_store[i]; + while (node) + { + fprintf (stderr, "%s\n", node->name); + node = node->link; + } + } +} + +// Create a new node in the store of libraries to collect. +static filename_node * +new_node (const char *filename) +{ + filename_node *node = (filename_node*)_Jv_Malloc (sizeof (filename_node)); + node->name = (char *)_Jv_Malloc (strlen (filename) + 1); + node->link = NULL; + strcpy (node->name, filename); + + return node; +} + +// Nonzero if the gc should scan this lib. +static int +_Jv_GC_has_static_roots (const char *filename, void *, size_t) +{ + if (filename == NULL || strlen (filename) == 0) + // No filename; better safe than sorry. + return 1; + + filename_node **node = find_file (filename); + if (*node) + return 1; + + return 0; +} + +#endif + +// Register the DSO that contains p for collection. +void +_Jv_RegisterLibForGc (const void *p __attribute__ ((__unused__))) +{ +#if defined (HAVE_DLFCN_H) && defined (HAVE_DLADDR) + Dl_info info; + + if (dladdr (const_cast(p), &info) != 0) + { + filename_node **node = find_file (info.dli_fname); + if (! *node) + *node = new_node (info.dli_fname); + } +#endif +} + +void +_Jv_SuspendThread (_Jv_Thread_t *thread) +{ +#if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \ + && !defined(GC_WIN32_THREADS) && !defined(GC_DARWIN_THREADS) + GC_suspend_thread (_Jv_GetPlatformThreadID (thread)); +#endif +} + +void +_Jv_ResumeThread (_Jv_Thread_t *thread) +{ +#if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \ + && !defined(GC_WIN32_THREADS) && !defined(GC_DARWIN_THREADS) + GC_resume_thread (_Jv_GetPlatformThreadID (thread)); +#endif +} + +int +_Jv_IsThreadSuspended (_Jv_Thread_t *thread) +{ +#if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \ + && !defined(GC_WIN32_THREADS) && !defined(GC_DARWIN_THREADS) + return GC_is_thread_suspended (_Jv_GetPlatformThreadID (thread)); +#else + return 0; +#endif +} + +void +_Jv_GCAttachThread () +{ + // The registration interface is only defined on posixy systems and + // only actually works if pthread_getattr_np is defined. + // FIXME: until gc7 it is simpler to disable this on solaris. +#if defined(HAVE_PTHREAD_GETATTR_NP) && !defined(GC_SOLARIS_THREADS) + GC_register_my_thread (); +#endif +} + +void +_Jv_GCDetachThread () +{ +#if defined(HAVE_PTHREAD_GETATTR_NP) && !defined(GC_SOLARIS_THREADS) + GC_unregister_my_thread (); +#endif +}