// boehm.cc - interface between libjava and Boehm GC.
-/* Copyright (C) 1998, 1999, 2000 Free Software Foundation
+/* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation
This file is part of libgcj.
#include <config.h>
+extern "C"
+{
+#include <gc_config.h>
+
+// Set GC_DEBUG before including gc.h!
+#ifdef LIBGCJ_GC_DEBUG
+# define GC_DEBUG
+#endif
+
+#include <gc_mark.h>
+#include <gc_gcj.h>
+#include <javaxfc.h> // GC_finalize_all declaration.
+
+#ifdef THREAD_LOCAL_ALLOC
+# define GC_REDIRECT_TO_LOCAL
+# include <gc_local_alloc.h>
+#endif
+};
+
#include <stdio.h>
+#include <limits.h>
#include <jvm.h>
#include <gcj/cni.h>
#include <java/lang/reflect/Modifier.h>
#include <java-interp.h>
-// More nastiness: the GC wants to define TRUE and FALSE. We don't
-// need the Java definitions (themselves a hack), so we undefine them.
-#undef TRUE
-#undef FALSE
-
-extern "C"
-{
-#include <gc_priv.h>
-#include <gc_mark.h>
-#include <include/gc_gcj.h>
-
- // These aren't declared in any Boehm GC header.
- void GC_finalize_all (void);
- ptr_t GC_debug_generic_malloc (size_t size, int k, GC_EXTRA_PARAMS);
-};
-
-// FIXME: this should probably be defined in some GC header.
-#ifdef GC_DEBUG
-# define GC_GENERIC_MALLOC(Size, Type) \
- GC_debug_generic_malloc (Size, Type, GC_EXTRAS)
-#else
-# define GC_GENERIC_MALLOC(Size, Type) GC_generic_malloc (Size, Type)
-#endif
-
-// We must check for plausibility ourselves.
#define MAYBE_MARK(Obj, Top, Limit, Source, Exit) \
- if ((ptr_t) (Obj) >= GC_least_plausible_heap_addr \
- && (ptr_t) (Obj) <= GC_greatest_plausible_heap_addr) \
- PUSH_CONTENTS (Obj, Top, Limit, Source, Exit)
-
-\f
-
-// Nonzero if this module has been initialized.
-static int initialized = 0;
-
-#if 0
-// `kind' index used when allocating Java objects.
-static int obj_kind_x;
-
-// Freelist used for Java objects.
-static ptr_t *obj_free_list;
-#endif /* 0 */
+ Top=GC_MARK_AND_PUSH((GC_PTR)Obj, Top, Limit, (GC_PTR *)Source)
// `kind' index used when allocating Java arrays.
static int array_kind_x;
// Freelist used for Java arrays.
-static ptr_t *array_free_list;
-
-// Lock used to protect access to Boehm's GC_enable/GC_disable functions.
-static _Jv_Mutex_t disable_gc_mutex;
+static void * *array_free_list;
\f
// object. We use `void *' arguments and return, and not what the
// Boehm GC wants, to avoid pollution in our headers.
void *
-_Jv_MarkObj (void *addr, void *msp, void *msl, void * /* env */)
+_Jv_MarkObj (void *addr, void *msp, void *msl, void * env)
{
- mse *mark_stack_ptr = (mse *) msp;
- mse *mark_stack_limit = (mse *) msl;
- jobject obj = (jobject) addr;
+ struct GC_ms_entry *mark_stack_ptr = (struct GC_ms_entry *)msp;
+ struct GC_ms_entry *mark_stack_limit = (struct GC_ms_entry *)msl;
- // FIXME: if env is 1, this object was allocated through the debug
- // interface, and addr points to the beginning of the debug header.
- // In that case, we should really add the size of the header to addr.
+ if (env == (void *)1) /* Object allocated with debug allocator. */
+ addr = (GC_PTR)GC_USR_PTR_FROM_BASE(addr);
+ jobject obj = (jobject) addr;
_Jv_VTable *dt = *(_Jv_VTable **) addr;
// The object might not yet have its vtable set, or it might
if (__builtin_expect (! dt || !(dt -> get_finalizer()), false))
return mark_stack_ptr;
jclass klass = dt->clas;
+ GC_PTR p;
- // Every object has a sync_info pointer.
- ptr_t p = (ptr_t) obj->sync_info;
- MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj, o1label);
+# ifndef JV_HASH_SYNCHRONIZATION
+ // Every object has a sync_info pointer.
+ p = (GC_PTR) obj->sync_info;
+ MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj, o1label);
+# endif
// Mark the object's class.
- p = (ptr_t) klass;
+ p = (GC_PTR) klass;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj, o2label);
if (__builtin_expect (klass == &java::lang::Class::class$, false))
// of our root set. - HB
jclass c = (jclass) addr;
- p = (ptr_t) c->name;
+ p = (GC_PTR) c->name;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c3label);
- p = (ptr_t) c->superclass;
+ p = (GC_PTR) c->superclass;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c4label);
for (int i = 0; i < c->constants.size; ++i)
{
/* FIXME: We could make this more precise by using the tags -KKT */
- p = (ptr_t) c->constants.data[i].p;
+ p = (GC_PTR) c->constants.data[i].p;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c5label);
}
#ifdef INTERPRETER
if (_Jv_IsInterpretedClass (c))
{
- p = (ptr_t) c->constants.tags;
+ p = (GC_PTR) c->constants.tags;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c5alabel);
- p = (ptr_t) c->constants.data;
+ p = (GC_PTR) c->constants.data;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c5blabel);
- p = (ptr_t) c->vtable;
+ p = (GC_PTR) c->vtable;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c5clabel);
}
#endif
// If the class is an array, then the methods field holds a
// pointer to the element class. If the class is primitive,
// then the methods field holds a pointer to the array class.
- p = (ptr_t) c->methods;
+ p = (GC_PTR) c->methods;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c6label);
+ // The vtable might have been set, but the rest of the class
+ // could still be uninitialized. If this is the case, then
+ // c.isArray will SEGV. We check for this, and if it is the
+ // case we just return.
+ if (__builtin_expect (c->name == NULL, false))
+ return mark_stack_ptr;
if (! c->isArray() && ! c->isPrimitive())
{
// points to a methods structure.
for (int i = 0; i < c->method_count; ++i)
{
- p = (ptr_t) c->methods[i].name;
+ p = (GC_PTR) c->methods[i].name;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c,
cm1label);
- p = (ptr_t) c->methods[i].signature;
+ p = (GC_PTR) c->methods[i].signature;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c,
cm2label);
-
- // FIXME: `ncode' entry?
-
-#ifdef INTERPRETER
- // The interpreter installs a heap-allocated
- // trampoline here, so we'll mark it.
- if (_Jv_IsInterpretedClass (c))
- {
- p = (ptr_t) c->methods[i].ncode;
- MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c,
- cm3label);
- }
-#endif
}
}
// Mark all the fields.
- p = (ptr_t) c->fields;
+ p = (GC_PTR) c->fields;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c8label);
for (int i = 0; i < c->field_count; ++i)
{
_Jv_Field* field = &c->fields[i];
#ifndef COMPACT_FIELDS
- p = (ptr_t) field->name;
+ p = (GC_PTR) field->name;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c8alabel);
#endif
- p = (ptr_t) field->type;
+ p = (GC_PTR) field->type;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c8blabel);
// For the interpreter, we also need to mark the memory
// containing static members
if ((field->flags & java::lang::reflect::Modifier::STATIC))
{
- p = (ptr_t) field->u.addr;
+ p = (GC_PTR) field->u.addr;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c8clabel);
// also, if the static member is a reference,
if (JvFieldIsRef (field) && field->isResolved())
{
jobject val = *(jobject*) field->u.addr;
- p = (ptr_t) val;
+ p = (GC_PTR) val;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit,
c, c8elabel);
}
}
}
- p = (ptr_t) c->vtable;
+ p = (GC_PTR) c->vtable;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, c9label);
- p = (ptr_t) c->interfaces;
+ p = (GC_PTR) c->interfaces;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cAlabel);
for (int i = 0; i < c->interface_count; ++i)
{
- p = (ptr_t) c->interfaces[i];
+ p = (GC_PTR) c->interfaces[i];
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cClabel);
}
- p = (ptr_t) c->loader;
+ p = (GC_PTR) c->loader;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cBlabel);
- p = (ptr_t) c->arrayclass;
+ p = (GC_PTR) c->arrayclass;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cDlabel);
+ p = (GC_PTR) c->protectionDomain;
+ MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cPlabel);
+ p = (GC_PTR) c->hack_signers;
+ MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cSlabel);
+ p = (GC_PTR) c->aux_info;
+ MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c, cTlabel);
#ifdef INTERPRETER
if (_Jv_IsInterpretedClass (c))
{
- _Jv_InterpClass* ic = (_Jv_InterpClass*)c;
+ _Jv_InterpClass* ic = (_Jv_InterpClass*) c->aux_info;
- p = (ptr_t) ic->interpreted_methods;
+ p = (GC_PTR) ic->interpreted_methods;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, ic, cElabel);
for (int i = 0; i < c->method_count; i++)
{
- p = (ptr_t) ic->interpreted_methods[i];
+ p = (GC_PTR) ic->interpreted_methods[i];
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, ic, \
cFlabel);
+
+ // Mark the direct-threaded code.
+ if ((c->methods[i].accflags
+ & java::lang::reflect::Modifier::NATIVE) == 0)
+ {
+ _Jv_InterpMethod *im
+ = (_Jv_InterpMethod *) ic->interpreted_methods[i];
+ if (im)
+ {
+ p = (GC_PTR) im->prepared;
+ MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, ic, \
+ cFlabel);
+ }
+ }
+
+ // The interpreter installs a heap-allocated trampoline
+ // here, so we'll mark it.
+ p = (GC_PTR) c->methods[i].ncode;
+ MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, c,
+ cm3label);
}
- p = (ptr_t) ic->field_initializers;
+ p = (GC_PTR) ic->field_initializers;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, ic, cGlabel);
}
if (JvFieldIsRef (field))
{
jobject val = JvGetObjectField (obj, field);
- p = (ptr_t) val;
+ p = (GC_PTR) val;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit,
obj, elabel);
}
// array (of objects). We use `void *' arguments and return, and not
// what the Boehm GC wants, to avoid pollution in our headers.
void *
-_Jv_MarkArray (void *addr, void *msp, void *msl, void * /*env*/)
+_Jv_MarkArray (void *addr, void *msp, void *msl, void * env)
{
- mse *mark_stack_ptr = (mse *) msp;
- mse *mark_stack_limit = (mse *) msl;
+ struct GC_ms_entry *mark_stack_ptr = (struct GC_ms_entry *)msp;
+ struct GC_ms_entry *mark_stack_limit = (struct GC_ms_entry *)msl;
+
+ if (env == (void *)1) /* Object allocated with debug allocator. */
+ addr = (void *)GC_USR_PTR_FROM_BASE(addr);
jobjectArray array = (jobjectArray) addr;
_Jv_VTable *dt = *(_Jv_VTable **) addr;
if (__builtin_expect (! dt || !(dt -> get_finalizer()), false))
return mark_stack_ptr;
jclass klass = dt->clas;
+ GC_PTR p;
- // Every object has a sync_info pointer.
- ptr_t p = (ptr_t) array->sync_info;
- MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, array, e1label);
+# ifndef JV_HASH_SYNCHRONIZATION
+ // Every object has a sync_info pointer.
+ p = (GC_PTR) array->sync_info;
+ MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, array, e1label);
+# endif
// Mark the object's class.
- p = (ptr_t) klass;
- MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, obj, o2label);
+ p = (GC_PTR) klass;
+ MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, &(dt -> clas), o2label);
for (int i = 0; i < JvGetArrayLength (array); ++i)
{
jobject obj = elements (array)[i];
- p = (ptr_t) obj;
+ p = (GC_PTR) obj;
MAYBE_MARK (p, mark_stack_ptr, mark_stack_limit, array, e2label);
}
return mark_stack_ptr;
}
-// Return GC descriptor for interpreted class
-#ifdef INTERPRETER
-
+// Generate a GC marking descriptor for a class.
+//
// We assume that the gcj mark proc has index 0. This is a dubious assumption,
// since another one could be registered first. But the compiler also
// knows this, so in that case everything else will break, too.
-#define GCJ_DEFAULT_DESCR MAKE_PROC(GCJ_RESERVED_MARK_PROC_INDEX,0)
+#define GCJ_DEFAULT_DESCR GC_MAKE_PROC(GC_GCJ_RESERVED_MARK_PROC_INDEX,0)
+
void *
-_Jv_BuildGCDescr(jclass klass)
+_Jv_BuildGCDescr(jclass self)
{
- /* FIXME: We should really look at the class and build the descriptor. */
- return (void *)(GCJ_DEFAULT_DESCR);
-}
+ jlong desc = 0;
+ jint bits_per_word = CHAR_BIT * sizeof (void *);
+
+ // Note: for now we only consider a bitmap mark descriptor. We
+ // could also handle the case where the first N fields of a type are
+ // references. However, this is not very likely to be used by many
+ // classes, and it is easier to compute things this way.
+
+ // The vtable pointer.
+ desc |= 1ULL << (bits_per_word - 1);
+#ifndef JV_HASH_SYNCHRONIZATION
+ // The sync_info field.
+ desc |= 1ULL << (bits_per_word - 2);
#endif
-// Allocate space for a new Java object.
+ for (jclass klass = self; klass != NULL; klass = klass->getSuperclass())
+ {
+ jfieldID field = JvGetFirstInstanceField(klass);
+ int count = JvNumInstanceFields(klass);
+
+ for (int i = 0; i < count; ++i)
+ {
+ if (field->isRef())
+ {
+ unsigned int off = field->getOffset();
+ // If we run into a weird situation, we bail.
+ if (off % sizeof (void *) != 0)
+ return (void *) (GCJ_DEFAULT_DESCR);
+ off /= sizeof (void *);
+ // If we find a field outside the range of our bitmap,
+ // fall back to procedure marker. The bottom 2 bits are
+ // reserved.
+ if (off >= (unsigned)bits_per_word - 2)
+ return (void *) (GCJ_DEFAULT_DESCR);
+ desc |= 1ULL << (bits_per_word - off - 1);
+ }
+
+ field = field->getNextField();
+ }
+ }
+
+ // For bitmap mark type, bottom bits are 01.
+ desc |= 1;
+ // Bogus warning avoidance (on many platforms).
+ return (void *) (unsigned long) desc;
+}
+
+// Allocate some space that is known to be pointer-free.
+void *
+_Jv_AllocBytes (jsize size)
+{
+ void *r = GC_MALLOC_ATOMIC (size);
+ // We have to explicitly zero memory here, as the GC doesn't
+ // guarantee that PTRFREE allocations are zeroed. Note that we
+ // don't have to do this for other allocation types because we set
+ // the `ok_init' flag in the type descriptor.
+ memset (r, 0, size);
+ return r;
+}
+
+#ifdef LIBGCJ_GC_DEBUG
+
void *
_Jv_AllocObj (jsize size, jclass klass)
{
return GC_GCJ_MALLOC (size, klass->vtable);
}
+void *
+_Jv_AllocPtrFreeObj (jsize size, jclass klass)
+{
+#ifdef JV_HASH_SYNCHRONIZATION
+ void * obj = GC_MALLOC_ATOMIC(size);
+ *((_Jv_VTable **) obj) = klass->vtable;
+#else
+ void * obj = GC_GCJ_MALLOC(size, klass->vtable);
+#endif
+ return obj;
+}
+
+#endif /* LIBGCJ_GC_DEBUG */
+// In the non-debug case, the above two functions are defined
+// as inline functions in boehm-gc.h. In the debug case we
+// really want to take advantage of the definitions in gc_gcj.h.
+
// Allocate space for a new Java array.
// Used only for arrays of objects.
void *
_Jv_AllocArray (jsize size, jclass klass)
{
void *obj;
+
+#ifdef LIBGCJ_GC_DEBUG
+ // There isn't much to lose by scanning this conservatively.
+ // If we didn't, the mark proc would have to understand that
+ // it needed to skip the header.
+ obj = GC_MALLOC(size);
+#else
const jsize min_heap_addr = 16*1024;
// A heuristic. If size is less than this value, the size
// stored in the array can't possibly be misinterpreted as
// completely conservatively, since no misidentification can
// take place.
-#ifdef GC_DEBUG
- // There isn't much to lose by scanning this conservatively.
- // If we didn't, the mark proc would have to understand that
- // it needed to skip the header.
- obj = GC_MALLOC(size);
-#else
if (size < min_heap_addr)
obj = GC_MALLOC(size);
else
- obj = GC_GENERIC_MALLOC (size, array_kind_x);
+ obj = GC_generic_malloc (size, array_kind_x);
#endif
*((_Jv_VTable **) obj) = klass->vtable;
return obj;
}
-// Allocate some space that is known to be pointer-free.
+/* Allocate space for a new non-Java object, which does not have the usual
+ Java object header but may contain pointers to other GC'ed objects. */
void *
-_Jv_AllocBytes (jsize size)
+_Jv_AllocRawObj (jsize size)
{
- void *r = GC_MALLOC_ATOMIC (size);
- // We have to explicitly zero memory here, as the GC doesn't
- // guarantee that PTRFREE allocations are zeroed. Note that we
- // don't have to do this for other allocation types because we set
- // the `ok_init' flag in the type descriptor.
- if (__builtin_expect (r != NULL, !NULL))
- memset (r, 0, size);
- return r;
+ return (void *) GC_MALLOC (size);
}
static void
void
_Jv_DisableGC (void)
{
- _Jv_MutexLock (&disable_gc_mutex);
GC_disable();
- _Jv_MutexUnlock (&disable_gc_mutex);
}
void
_Jv_EnableGC (void)
{
- _Jv_MutexLock (&disable_gc_mutex);
GC_enable();
- _Jv_MutexUnlock (&disable_gc_mutex);
}
-void
-_Jv_InitGC (void)
+static void * handle_out_of_memory(size_t)
{
- int proc;
- DCL_LOCK_STATE;
+ _Jv_ThrowNoMemory();
+}
- DISABLE_SIGNALS ();
- LOCK ();
+static void
+gcj_describe_type_fn(void *obj, char *out_buf)
+{
+ _Jv_VTable *dt = *(_Jv_VTable **) obj;
- if (initialized)
+ if (! dt /* Shouldn't happen */)
+ {
+ strcpy(out_buf, "GCJ (bad)");
+ return;
+ }
+ jclass klass = dt->clas;
+ if (!klass /* shouldn't happen */)
{
- UNLOCK ();
- ENABLE_SIGNALS ();
+ strcpy(out_buf, "GCJ (bad)");
return;
}
- initialized = 1;
- UNLOCK ();
+ jstring name = klass -> getName();
+ size_t len = name -> length();
+ if (len >= GC_TYPE_DESCR_LEN) len = GC_TYPE_DESCR_LEN - 1;
+ JvGetStringUTFRegion (name, 0, len, out_buf);
+ out_buf[len] = '\0';
+}
+
+void
+_Jv_InitGC (void)
+{
+ int proc;
+
+ // Ignore pointers that do not point to the start of an object.
+ GC_all_interior_pointers = 0;
// Configure the collector to use the bitmap marking descriptors that we
// stash in the class vtable.
+ // We always use mark proc descriptor 0, since the compiler knows
+ // about it.
GC_init_gcj_malloc (0, (void *) _Jv_MarkObj);
- LOCK ();
+ // Cause an out of memory error to be thrown from the allocators,
+ // instead of returning 0. This is cheaper than checking on allocation.
+ GC_oom_fn = handle_out_of_memory;
+
GC_java_finalization = 1;
// We use a different mark procedure for object arrays. This code
// configures a different object `kind' for object array allocation and
- // marking. FIXME: see above.
- array_free_list = (ptr_t *) GC_generic_malloc_inner ((MAXOBJSZ + 1)
- * sizeof (ptr_t),
- PTRFREE);
- memset (array_free_list, 0, (MAXOBJSZ + 1) * sizeof (ptr_t));
-
- proc = GC_n_mark_procs++;
- GC_mark_procs[proc] = (mark_proc) _Jv_MarkArray;
-
- array_kind_x = GC_n_kinds++;
- GC_obj_kinds[array_kind_x].ok_freelist = array_free_list;
- GC_obj_kinds[array_kind_x].ok_reclaim_list = 0;
- GC_obj_kinds[array_kind_x].ok_descriptor = MAKE_PROC (proc, 0);
- GC_obj_kinds[array_kind_x].ok_relocate_descr = FALSE;
- GC_obj_kinds[array_kind_x].ok_init = TRUE;
-
- _Jv_MutexInit (&disable_gc_mutex);
-
- UNLOCK ();
- ENABLE_SIGNALS ();
+ // marking.
+ array_free_list = GC_new_free_list();
+ proc = GC_new_proc((GC_mark_proc)_Jv_MarkArray);
+ array_kind_x = GC_new_kind(array_free_list, GC_MAKE_PROC (proc, 0), 0, 1);
+
+ /* Arrange to have the GC print Java class names in backtraces, etc. */
+ GC_register_describe_type_fn(GC_gcj_kind, gcj_describe_type_fn);
+ GC_register_describe_type_fn(GC_gcj_debug_kind, gcj_describe_type_fn);
}
-#if 0
-void
-_Jv_InitGC (void)
+#ifdef JV_HASH_SYNCHRONIZATION
+// Allocate an object with a fake vtable pointer, which causes only
+// the first field (beyond the fake vtable pointer) to be traced.
+// Eventually this should probably be generalized.
+
+static _Jv_VTable trace_one_vtable = {
+ 0, // class pointer
+ (void *)(2 * sizeof(void *)),
+ // descriptor; scan 2 words incl. vtable ptr.
+ // Least significant bits must be zero to
+ // identify this as a length descriptor
+ {0} // First method
+};
+
+void *
+_Jv_AllocTraceOne (jsize size /* includes vtable slot */)
{
- int proc;
- DCL_LOCK_STATE;
+ return GC_GCJ_MALLOC (size, &trace_one_vtable);
+}
- DISABLE_SIGNALS ();
- LOCK ();
+// Ditto for two words.
+// the first field (beyond the fake vtable pointer) to be traced.
+// Eventually this should probably be generalized.
- if (initialized)
- {
- UNLOCK ();
- ENABLE_SIGNALS ();
- return;
- }
- initialized = 1;
+static _Jv_VTable trace_two_vtable =
+{
+ 0, // class pointer
+ (void *)(3 * sizeof(void *)),
+ // descriptor; scan 3 words incl. vtable ptr.
+ {0} // First method
+};
- GC_java_finalization = 1;
+void *
+_Jv_AllocTraceTwo (jsize size /* includes vtable slot */)
+{
+ return GC_GCJ_MALLOC (size, &trace_two_vtable);
+}
- // Set up state for marking and allocation of Java objects.
- obj_free_list = (ptr_t *) GC_generic_malloc_inner ((MAXOBJSZ + 1)
- * sizeof (ptr_t),
- PTRFREE);
- memset (obj_free_list, 0, (MAXOBJSZ + 1) * sizeof (ptr_t));
-
- proc = GC_n_mark_procs++;
- GC_mark_procs[proc] = (mark_proc) _Jv_MarkObj;
-
- obj_kind_x = GC_n_kinds++;
- GC_obj_kinds[obj_kind_x].ok_freelist = obj_free_list;
- GC_obj_kinds[obj_kind_x].ok_reclaim_list = 0;
- GC_obj_kinds[obj_kind_x].ok_descriptor = MAKE_PROC (proc, 0);
- GC_obj_kinds[obj_kind_x].ok_relocate_descr = FALSE;
- GC_obj_kinds[obj_kind_x].ok_init = TRUE;
-
- // Set up state for marking and allocation of arrays of Java
- // objects.
- array_free_list = (ptr_t *) GC_generic_malloc_inner ((MAXOBJSZ + 1)
- * sizeof (ptr_t),
- PTRFREE);
- memset (array_free_list, 0, (MAXOBJSZ + 1) * sizeof (ptr_t));
-
- proc = GC_n_mark_procs++;
- GC_mark_procs[proc] = (mark_proc) _Jv_MarkArray;
-
- array_kind_x = GC_n_kinds++;
- GC_obj_kinds[array_kind_x].ok_freelist = array_free_list;
- GC_obj_kinds[array_kind_x].ok_reclaim_list = 0;
- GC_obj_kinds[array_kind_x].ok_descriptor = MAKE_PROC (proc, 0);
- GC_obj_kinds[array_kind_x].ok_relocate_descr = FALSE;
- GC_obj_kinds[array_kind_x].ok_init = TRUE;
-
- _Jv_MutexInit (&disable_gc_mutex);
-
- UNLOCK ();
- ENABLE_SIGNALS ();
+#endif /* JV_HASH_SYNCHRONIZATION */
+
+void
+_Jv_GCInitializeFinalizers (void (*notifier) (void))
+{
+ GC_finalize_on_demand = 1;
+ GC_finalizer_notifier = notifier;
+}
+
+void
+_Jv_GCRegisterDisappearingLink (jobject *objp)
+{
+ GC_general_register_disappearing_link ((GC_PTR *) objp, (GC_PTR) *objp);
+}
+
+jboolean
+_Jv_GCCanReclaimSoftReference (jobject)
+{
+ // For now, always reclaim soft references. FIXME.
+ return true;
}
-#endif /* 0 */