}
}
-# if defined(REDIRECT_MALLOC) || defined(REDIRECT_REALLOC)
+# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
+# define REDIRECT_REALLOC GC_realloc
+# endif
+
+# ifdef REDIRECT_REALLOC
+
+/* As with malloc, avoid two levels of extra calls here. */
+# ifdef GC_ADD_CALLER
+# define RA GC_RETURN_ADDR,
+# else
+# define RA
+# endif
+# define GC_debug_realloc_replacement(p, lb) \
+ GC_debug_realloc(p, lb, RA "unknown", 0)
+
# ifdef __STDC__
GC_PTR realloc(GC_PTR p, size_t lb)
# else
size_t lb;
# endif
{
-# ifdef REDIRECT_REALLOC
- return(REDIRECT_REALLOC(p, lb));
-# else
- return(GC_realloc(p, lb));
-# endif
+ return(REDIRECT_REALLOC(p, lb));
}
-# endif /* REDIRECT_MALLOC */
+
+# undef GC_debug_realloc_replacement
+# endif /* REDIRECT_REALLOC */
-/* The same thing, except caller does not hold allocation lock. */
+/* Allocate memory such that only pointers to near the */
+/* beginning of the object are considered. */
/* We avoid holding allocation lock while we clear memory. */
ptr_t GC_generic_malloc_ignore_off_page(lb, k)
register size_t lb;
lw = ROUNDED_UP_WORDS(lb);
n_blocks = OBJ_SZ_TO_BLOCKS(lw);
init = GC_obj_kinds[k].ok_init;
+ if (GC_have_errors) GC_print_all_errors();
GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
if (0 == result) {
return((*GC_oom_fn)(lb));
} else {
- if (init & !GC_debugging_started) {
+ if (init && !GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
}
return(result);
register ptr_t op;
DCL_LOCK_STATE;
+ if (GC_have_errors) GC_print_all_errors();
GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
/* GC_malloc_many or friends to replenish it. (We do not round up */
/* object sizes, since a call indicates the intention to consume many */
/* objects of exactly this size.) */
+/* We return the free-list by assigning it to *result, since it is */
+/* not safe to return, e.g. a linked list of pointer-free objects, */
+/* since the collector would not retain the entire list if it were */
+/* invoked just as we were returning. */
/* Note that the client should usually clear the link field. */
-ptr_t GC_generic_malloc_many(lb, k)
+void GC_generic_malloc_many(lb, k, result)
register word lb;
register int k;
+ptr_t *result;
{
ptr_t op;
ptr_t p;
if (!SMALL_OBJ(lb)) {
op = GC_generic_malloc(lb, k);
if(0 != op) obj_link(op) = 0;
- return(op);
+ *result = op;
+ return;
}
lw = ALIGNED_WORDS(lb);
+ if (GC_have_errors) GC_print_all_errors();
GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
if (!GC_is_initialized) GC_init_inner();
+ /* Do our share of marking work */
+ if (GC_incremental && !GC_dont_gc) {
+ ENTER_GC();
+ GC_collect_a_little_inner(1);
+ EXIT_GC();
+ }
/* First see if we can reclaim a page of objects waiting to be */
/* reclaimed. */
{
while ((hbp = *rlh) != 0) {
hhdr = HDR(hbp);
*rlh = hhdr -> hb_next;
+ hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
# ifdef PARALLEL_MARK
{
signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
GC_mem_found += my_words_allocd;
# endif
# ifdef PARALLEL_MARK
+ *result = op;
(void)GC_atomic_add(
(volatile GC_word *)(&GC_words_allocd_tmp),
(GC_word)(my_words_allocd));
-- GC_fl_builder_count;
if (GC_fl_builder_count == 0) GC_notify_all_builder();
GC_release_mark_lock();
- return GC_clear_stack(op);
+ (void) GC_clear_stack(0);
+ return;
# else
GC_words_allocd += my_words_allocd;
goto out;
op = GC_build_fl(h, lw, ok -> ok_init, 0);
# ifdef PARALLEL_MARK
+ *result = op;
GC_acquire_mark_lock();
-- GC_fl_builder_count;
if (GC_fl_builder_count == 0) GC_notify_all_builder();
GC_release_mark_lock();
- return GC_clear_stack(op);
+ (void) GC_clear_stack(0);
+ return;
# else
goto out;
# endif
if (0 != op) obj_link(op) = 0;
out:
+ *result = op;
UNLOCK();
ENABLE_SIGNALS();
- return(GC_clear_stack(op));
+ (void) GC_clear_stack(0);
}
GC_PTR GC_malloc_many(size_t lb)
{
- return(GC_generic_malloc_many(lb, NORMAL));
+ ptr_t result;
+ GC_generic_malloc_many(lb, NORMAL, &result);
+ return result;
}
/* Note that the "atomic" version of this would be unsafe, since the */
}
}
+#ifdef __STDC__
+/* Not well tested nor integrated. */
+/* Debug version is tricky and currently missing. */
+#include <limits.h>
+
+GC_PTR GC_memalign(size_t align, size_t lb)
+{
+ size_t new_lb;
+ size_t offset;
+ ptr_t result;
+
+# ifdef ALIGN_DOUBLE
+ if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
+# endif
+ if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
+ if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
+ if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
+ return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
+ /* Will be HBLKSIZE aligned. */
+ }
+ /* We could also try to make sure that the real rounded-up object size */
+ /* is a multiple of align. That would be correct up to HBLKSIZE. */
+ new_lb = lb + align - 1;
+ result = GC_malloc(new_lb);
+ offset = (word)result % align;
+ if (offset != 0) {
+ offset = align - offset;
+ if (!GC_all_interior_pointers) {
+ if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
+ GC_register_displacement(offset);
+ }
+ }
+ result = (GC_PTR) ((ptr_t)result + offset);
+ GC_ASSERT((word)result % align == 0);
+ return result;
+}
+#endif
+
# ifdef ATOMIC_UNCOLLECTABLE
/* Allocate lb bytes of pointerfree, untraced, uncollectable data */
/* This is normally roughly equivalent to the system malloc. */