2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* An incomplete test for the garbage collector. */
16 /* Some more obscure entry points are not tested at all. */
17 /* This must be compiled with the same flags used to build the */
18 /* GC. It uses GC internals to allow more precise results */
19 /* checking for some of the tests. */
27 # if defined(mips) && defined(SYSTYPE_BSD43)
35 # define assert ASSERT
37 # include <assert.h> /* Not normally used, but handy for debugging. */
39 # include <assert.h> /* Not normally used, but handy for debugging. */
41 # include "gc_typed.h"
42 # ifdef THREAD_LOCAL_ALLOC
43 # include "gc_local_alloc.h"
45 # include "private/gc_priv.h" /* For output, locking, MIN_WORDS, */
46 /* and some statistics. */
47 # include "private/gcconfig.h"
49 # if defined(MSWIN32) || defined(MSWINCE)
54 # include "th/PCR_ThCrSec.h"
55 # include "th/PCR_Th.h"
57 # define GC_printf0 printf
59 # define GC_printf1 printf
62 # if defined(GC_SOLARIS_THREADS) && !defined(GC_SOLARIS_PTHREADS)
67 # if defined(GC_PTHREADS)
71 # ifdef GC_WIN32_THREADS
74 # define GC_CreateThread(a,b,c,d,e,f) ((HANDLE) _beginthreadex(a,b,c,d,e,f))
76 static CRITICAL_SECTION incr_cs;
80 /* Allocation Statistics */
81 int stubborn_count = 0;
82 int uncollectable_count = 0;
83 int collectable_count = 0;
85 int realloc_count = 0;
87 #if defined(GC_AMIGA_FASTALLOC) && defined(AMIGA)
89 extern void GC_amiga_free_all_mem(void);
90 void Amiga_Fail(void){GC_amiga_free_all_mem();abort();}
91 # define FAIL (void)Amiga_Fail()
92 void *GC_amiga_gctest_malloc_explicitly_typed(size_t lb, GC_descr d){
93 void *ret=GC_malloc_explicitly_typed(lb,d);
97 ret=GC_malloc_explicitly_typed(lb,d);
100 GC_printf0("Out of memory, (typed allocations are not directly "
101 "supported with the GC_AMIGA_FASTALLOC option.)\n");
107 void *GC_amiga_gctest_calloc_explicitly_typed(size_t a,size_t lb, GC_descr d){
108 void *ret=GC_calloc_explicitly_typed(a,lb,d);
112 ret=GC_calloc_explicitly_typed(a,lb,d);
115 GC_printf0("Out of memory, (typed allocations are not directly "
116 "supported with the GC_AMIGA_FASTALLOC option.)\n");
122 # define GC_malloc_explicitly_typed(a,b) GC_amiga_gctest_malloc_explicitly_typed(a,b)
123 # define GC_calloc_explicitly_typed(a,b,c) GC_amiga_gctest_calloc_explicitly_typed(a,b,c)
125 #else /* !AMIGA_FASTALLOC */
128 # define FAIL (void)abort()
131 # define FAIL DebugBreak()
133 # define FAIL GC_abort("Test failed");
137 #endif /* !AMIGA_FASTALLOC */
139 /* AT_END may be defined to exercise the interior pointer test */
140 /* if the collector is configured with ALL_INTERIOR_POINTERS. */
141 /* As it stands, this test should succeed with either */
142 /* configuration. In the FIND_LEAK configuration, it should */
143 /* find lots of leaks, since we free almost nothing. */
146 struct SEXPR * sexpr_car;
147 struct SEXPR * sexpr_cdr;
151 typedef struct SEXPR * sexpr;
153 # define INT_TO_SEXPR(x) ((sexpr)(unsigned long)(x))
156 # define nil (INT_TO_SEXPR(0))
157 # define car(x) ((x) -> sexpr_car)
158 # define cdr(x) ((x) -> sexpr_cdr)
159 # define is_nil(x) ((x) == nil)
162 int extra_count = 0; /* Amount of space wasted in cons node */
164 /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
165 /* to test collector. */
166 # ifdef VERY_SMALL_CONFIG
167 # define cons small_cons
175 register int my_extra = extra_count;
178 r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
180 (void)GC_printf0("Out of memory\n");
184 ((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
186 (void)GC_printf1("Found nonzero at 0x%lx - allocator is broken\n",
193 r = (sexpr)((char *)r + (my_extra & ~7));
198 if ( my_extra >= 5000 ) {
201 extra_count = my_extra;
203 GC_END_STUBBORN_CHANGE((char *)r);
208 sexpr small_cons (x, y)
215 r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
217 (void)GC_printf0("Out of memory\n");
225 sexpr small_cons_uncollectable (x, y)
231 uncollectable_count++;
232 r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
234 (void)GC_printf0("Out of memory\n");
238 r -> sexpr_cdr = (sexpr)(~(unsigned long)y);
242 #ifdef GC_GCJ_SUPPORT
245 #include "private/dbg_mlc.h" /* For USR_PTR_FROM_BASE */
248 /* The following struct emulates the vtable in gcj. */
249 /* This assumes the default value of MARK_DESCR_OFFSET. */
251 void * dummy; /* class pointer in real gcj. */
255 struct fake_vtable gcj_class_struct1 = { 0, sizeof(struct SEXPR)
256 + sizeof(struct fake_vtable *) };
257 /* length based descriptor. */
258 struct fake_vtable gcj_class_struct2 =
259 { 0, (3l << (CPP_WORDSZ - 3)) | GC_DS_BITMAP};
260 /* Bitmap based descriptor. */
262 struct GC_ms_entry * fake_gcj_mark_proc(word * addr,
263 struct GC_ms_entry *mark_stack_ptr,
264 struct GC_ms_entry *mark_stack_limit,
269 /* Object allocated with debug allocator. */
270 addr = (word *)USR_PTR_FROM_BASE(addr);
272 x = (sexpr)(addr + 1); /* Skip the vtable pointer. */
273 mark_stack_ptr = GC_MARK_AND_PUSH(
274 (GC_PTR)(x -> sexpr_cdr), mark_stack_ptr,
275 mark_stack_limit, (GC_PTR *)&(x -> sexpr_cdr));
276 mark_stack_ptr = GC_MARK_AND_PUSH(
277 (GC_PTR)(x -> sexpr_car), mark_stack_ptr,
278 mark_stack_limit, (GC_PTR *)&(x -> sexpr_car));
279 return(mark_stack_ptr);
288 static int count = 0;
291 # ifdef USE_MARK_BYTES
292 r = (GC_word *) GC_GCJ_FAST_MALLOC(4, &gcj_class_struct1);
294 r = (GC_word *) GC_GCJ_FAST_MALLOC(3, &gcj_class_struct1);
297 r = (GC_word *) GC_GCJ_MALLOC(sizeof(struct SEXPR)
298 + sizeof(struct fake_vtable*),
302 (void)GC_printf0("Out of memory\n");
305 result = (sexpr)(r + 1);
306 result -> sexpr_car = x;
307 result -> sexpr_cdr = y;
312 /* Return reverse(x) concatenated with y */
319 return( reverse1(cdr(x), cons(car(x), y)) );
326 return( reverse1(x, nil) );
335 return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
339 #ifdef GC_GCJ_SUPPORT
340 /* Return reverse(x) concatenated with y */
341 sexpr gcj_reverse1(x, y)
347 return( gcj_reverse1(cdr(x), gcj_cons(car(x), y)) );
354 return( gcj_reverse1(x, nil) );
357 sexpr gcj_ints(low, up)
363 return(gcj_cons(gcj_cons(INT_TO_SEXPR(low), nil), gcj_ints(low+1, up)));
366 #endif /* GC_GCJ_SUPPORT */
368 /* To check uncollectable allocation we build lists with disguised cdr */
369 /* pointers, and make sure they don't go away. */
370 sexpr uncollectable_ints(low, up)
376 return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
377 uncollectable_ints(low+1, up)));
381 void check_ints(list, low, up)
385 if ((int)(GC_word)(car(car(list))) != low) {
387 "List reversal produced incorrect list - collector is broken\n");
391 if (cdr(list) != nil) {
392 (void)GC_printf0("List too long - collector is broken\n");
396 check_ints(cdr(list), low+1, up);
400 # define UNCOLLECTABLE_CDR(x) (sexpr)(~(unsigned long)(cdr(x)))
402 void check_uncollectable_ints(list, low, up)
406 if ((int)(GC_word)(car(car(list))) != low) {
408 "Uncollectable list corrupted - collector is broken\n");
412 if (UNCOLLECTABLE_CDR(list) != nil) {
413 (void)GC_printf0("Uncollectable list too long - collector is broken\n");
417 check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
421 /* Not used, but useful for debugging: */
422 void print_int_list(x)
426 (void)GC_printf0("NIL\n");
428 (void)GC_printf1("(%ld)", (long)(car(car(x))));
429 if (!is_nil(cdr(x))) {
430 (void)GC_printf0(", ");
431 (void)print_int_list(cdr(x));
433 (void)GC_printf0("\n");
438 /* Try to force a to be strangely aligned */
446 * A tiny list reversal test to check thread creation.
450 # ifdef GC_WIN32_THREADS
451 unsigned __stdcall tiny_reverse_test(void * arg)
453 void * tiny_reverse_test(void * arg)
456 check_ints(reverse(reverse(ints(1,10))), 1, 10);
460 # if defined(GC_PTHREADS)
465 if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
466 (void)GC_printf1("Small thread creation failed %lu\n",
467 (unsigned long)code);
470 if ((code = pthread_join(t, 0)) != 0) {
471 (void)GC_printf1("Small thread join failed %lu\n",
472 (unsigned long)code);
477 # elif defined(GC_WIN32_THREADS)
482 h = GC_CreateThread(NULL, 0, tiny_reverse_test, 0, 0, &thread_id);
483 if (h == (HANDLE)NULL) {
484 (void)GC_printf1("Small thread creation failed %lu\n",
485 (unsigned long)GetLastError());
488 if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
489 (void)GC_printf1("Small thread wait failed %lu\n",
490 (unsigned long)GetLastError());
495 /* # elif defined(GC_SOLARIS_THREADS) */
499 # define fork_a_thread()
505 # define fork_a_thread()
510 * Repeatedly reverse lists built out of very different sized cons cells.
511 * Check that we didn't lose anything.
521 # if defined(MSWIN32) || defined(MACOS)
522 /* Win32S only allows 128K stacks */
526 /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
530 /* WinCE only allows 64K stacks */
534 /* OSF has limited stack space by default, and large frames. */
547 d = uncollectable_ints(1, 100);
548 e = uncollectable_ints(1, 1);
549 /* Check that realloc updates object descriptors correctly */
551 f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
553 f = (sexpr *)GC_REALLOC((GC_PTR)f, 6 * sizeof(sexpr));
556 g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
558 g = (sexpr *)GC_REALLOC((GC_PTR)g, 800 * sizeof(sexpr));
561 h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
563 h = (sexpr *)GC_REALLOC((GC_PTR)h, 2000 * sizeof(sexpr));
564 # ifdef GC_GCJ_SUPPORT
565 h[1999] = gcj_ints(1,200);
566 h[1999] = gcj_reverse(h[1999]);
568 h[1999] = ints(1,200);
570 /* Try to force some collections and reuse of small list elements */
571 for (i = 0; i < 10; i++) {
574 /* Superficially test interior pointer recognition on stack */
575 c = (sexpr)((char *)c + sizeof(char *));
576 d = (sexpr)((char *)d + sizeof(char *));
585 for (i = 0; i < 50; i++) {
587 b = reverse(reverse(b));
591 for (i = 0; i < 60; i++) {
592 if (i % 10 == 0) fork_a_thread();
593 /* This maintains the invariant that a always points to a list of */
594 /* 49 integers. Thus this is thread safe without locks, */
595 /* assuming atomic pointer assignments. */
596 a = reverse(reverse(a));
597 # if !defined(AT_END) && !defined(THREADS)
598 /* This is not thread safe, since realloc explicitly deallocates */
600 a = (sexpr)GC_REALLOC((GC_PTR)a, 500);
602 a = (sexpr)GC_REALLOC((GC_PTR)a, 8200);
608 c = (sexpr)((char *)c - sizeof(char *));
609 d = (sexpr)((char *)d - sizeof(char *));
611 check_uncollectable_ints(d, 1, 100);
612 check_ints(f[5], 1,17);
613 check_ints(g[799], 1,18);
614 # ifdef GC_GCJ_SUPPORT
615 h[1999] = gcj_reverse(h[1999]);
617 check_ints(h[1999], 1,200);
625 * The rest of this builds balanced binary trees, checks that they don't
626 * disappear, and tests finalization.
628 typedef struct treenode {
630 struct treenode * lchild;
631 struct treenode * rchild;
634 int finalizable_count = 0;
635 int finalized_count = 0;
636 VOLATILE int dropped_something = 0;
639 void finalizer(void * obj, void * client_data)
641 void finalizer(obj, client_data)
649 PCR_ThCrSec_EnterSys();
651 # if defined(GC_SOLARIS_THREADS) && !defined(GC_SOLARIS_PTHREADS)
652 static mutex_t incr_lock;
653 mutex_lock(&incr_lock);
655 # if defined(GC_PTHREADS)
656 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
657 pthread_mutex_lock(&incr_lock);
659 # ifdef GC_WIN32_THREADS
660 EnterCriticalSection(&incr_cs);
662 if ((int)(GC_word)client_data != t -> level) {
663 (void)GC_printf0("Wrong finalization data - collector is broken\n");
668 PCR_ThCrSec_ExitSys();
670 # if defined(GC_SOLARIS_THREADS) && !defined(GC_SOLARIS_PTHREADS)
671 mutex_unlock(&incr_lock);
673 # if defined(GC_PTHREADS)
674 pthread_mutex_unlock(&incr_lock);
676 # ifdef GC_WIN32_THREADS
677 LeaveCriticalSection(&incr_cs);
683 # define MAX_FINALIZED 8000
686 GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
688 /* Too big for THINK_C. have to allocate it dynamically. */
689 GC_word *live_indicators = 0;
692 int live_indicators_count = 0;
697 # ifdef THREAD_LOCAL_ALLOC
698 tn * result = (tn *)GC_LOCAL_MALLOC(sizeof(tn));
700 tn * result = (tn *)GC_MALLOC(sizeof(tn));
704 # ifdef THREAD_LOCAL_ALLOC
705 /* Minimally exercise thread local allocation */
707 char * result = (char *)GC_LOCAL_MALLOC_ATOMIC(17);
708 memset(result, 'a', 17);
710 # endif /* THREAD_LOCAL_ALLOC */
712 /* get around static data limitations. */
713 if (!live_indicators)
715 (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
716 if (!live_indicators) {
717 (void)GC_printf0("Out of memory\n");
721 if (n == 0) return(0);
723 (void)GC_printf0("Out of memory\n");
727 result -> lchild = mktree(n-1);
728 result -> rchild = mktree(n-1);
729 if (counter++ % 17 == 0 && n >= 2) {
730 tn * tmp = result -> lchild -> rchild;
732 result -> lchild -> rchild = result -> rchild -> lchild;
733 result -> rchild -> lchild = tmp;
735 if (counter++ % 119 == 0) {
740 PCR_ThCrSec_EnterSys();
742 # if defined(GC_SOLARIS_THREADS) && !defined(GC_SOLARIS_PTHREADS)
743 static mutex_t incr_lock;
744 mutex_lock(&incr_lock);
746 # if defined(GC_PTHREADS)
747 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
748 pthread_mutex_lock(&incr_lock);
750 # ifdef GC_WIN32_THREADS
751 EnterCriticalSection(&incr_cs);
753 /* Losing a count here causes erroneous report of failure. */
755 my_index = live_indicators_count++;
757 PCR_ThCrSec_ExitSys();
759 # if defined(GC_SOLARIS_THREADS) && !defined(GC_SOLARIS_PTHREADS)
760 mutex_unlock(&incr_lock);
762 # if defined(GC_PTHREADS)
763 pthread_mutex_unlock(&incr_lock);
765 # ifdef GC_WIN32_THREADS
766 LeaveCriticalSection(&incr_cs);
770 GC_REGISTER_FINALIZER((GC_PTR)result, finalizer, (GC_PTR)(GC_word)n,
771 (GC_finalization_proc *)0, (GC_PTR *)0);
772 if (my_index >= MAX_FINALIZED) {
773 GC_printf0("live_indicators overflowed\n");
776 live_indicators[my_index] = 13;
777 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
778 (GC_PTR *)(&(live_indicators[my_index])),
779 (GC_PTR)result) != 0) {
780 GC_printf0("GC_general_register_disappearing_link failed\n");
783 if (GC_unregister_disappearing_link(
785 (&(live_indicators[my_index]))) == 0) {
786 GC_printf0("GC_unregister_disappearing_link failed\n");
789 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
790 (GC_PTR *)(&(live_indicators[my_index])),
791 (GC_PTR)result) != 0) {
792 GC_printf0("GC_general_register_disappearing_link failed 2\n");
803 if (n == 0 && t != 0) {
804 (void)GC_printf0("Clobbered a leaf - collector is broken\n");
808 if (t -> level != n) {
809 (void)GC_printf1("Lost a node at level %lu - collector is broken\n",
813 if (counter++ % 373 == 0) {
815 (void) GC_MALLOC(counter%5001);
817 chktree(t -> lchild, n-1);
818 if (counter++ % 73 == 0) {
820 (void) GC_MALLOC(counter%373);
822 chktree(t -> rchild, n-1);
825 # if defined(GC_SOLARIS_THREADS) && !defined(GC_SOLARIS_PTHREADS)
830 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
832 return(GC_MALLOC(8));
834 void ** my_free_list_ptr;
837 if (thr_getspecific(fl_key, (void **)(&my_free_list_ptr)) != 0) {
838 (void)GC_printf0("thr_getspecific failed\n");
841 if (my_free_list_ptr == 0) {
842 uncollectable_count++;
843 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
844 if (thr_setspecific(fl_key, my_free_list_ptr) != 0) {
845 (void)GC_printf0("thr_setspecific failed\n");
849 my_free_list = *my_free_list_ptr;
850 if (my_free_list == 0) {
852 my_free_list = GC_malloc_many(8);
853 if (my_free_list == 0) {
854 (void)GC_printf0("alloc8bytes out of memory\n");
858 *my_free_list_ptr = GC_NEXT(my_free_list);
859 GC_NEXT(my_free_list) = 0;
860 return(my_free_list);
866 # if defined(GC_PTHREADS)
867 pthread_key_t fl_key;
871 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
873 return(GC_MALLOC(8));
875 void ** my_free_list_ptr;
878 my_free_list_ptr = (void **)pthread_getspecific(fl_key);
879 if (my_free_list_ptr == 0) {
880 uncollectable_count++;
881 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
882 if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
883 (void)GC_printf0("pthread_setspecific failed\n");
887 my_free_list = *my_free_list_ptr;
888 if (my_free_list == 0) {
889 my_free_list = GC_malloc_many(8);
890 if (my_free_list == 0) {
891 (void)GC_printf0("alloc8bytes out of memory\n");
895 *my_free_list_ptr = GC_NEXT(my_free_list);
896 GC_NEXT(my_free_list) = 0;
898 return(my_free_list);
903 # define alloc8bytes() GC_MALLOC_ATOMIC(8)
912 for (i = 0; i < n; i += 8) {
914 if (alloc8bytes() == 0) {
915 (void)GC_printf0("Out of memory\n");
921 # if defined(THREADS) && defined(GC_DEBUG)
922 # ifdef VERY_SMALL_CONFIG
923 # define TREE_HEIGHT 12
925 # define TREE_HEIGHT 15
928 # ifdef VERY_SMALL_CONFIG
929 # define TREE_HEIGHT 13
931 # define TREE_HEIGHT 16
939 root = mktree(TREE_HEIGHT);
940 # ifndef VERY_SMALL_CONFIG
941 alloc_small(5000000);
943 chktree(root, TREE_HEIGHT);
944 if (finalized_count && ! dropped_something) {
945 (void)GC_printf0("Premature finalization - collector is broken\n");
948 dropped_something = 1;
949 GC_noop(root); /* Root needs to remain live until */
950 /* dropped_something is set. */
951 root = mktree(TREE_HEIGHT);
952 chktree(root, TREE_HEIGHT);
953 for (i = TREE_HEIGHT; i >= 0; i--) {
957 # ifndef VERY_SMALL_CONFIG
958 alloc_small(5000000);
962 unsigned n_tests = 0;
964 GC_word bm_huge[10] = {
977 /* A very simple test of explicitly typed allocation */
980 GC_word * old, * new;
983 GC_word bm_large = 0xf7ff7fff;
984 GC_descr d1 = GC_make_descriptor(&bm3, 2);
985 GC_descr d2 = GC_make_descriptor(&bm2, 2);
987 GC_descr dummy = GC_make_descriptor(&bm_large, 32);
989 GC_descr d3 = GC_make_descriptor(&bm_large, 32);
990 GC_descr d4 = GC_make_descriptor(bm_huge, 320);
991 GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
996 for (i = 0; i < 4000; i++) {
998 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
999 if (0 != new[0] || 0 != new[1]) {
1000 GC_printf0("Bad initialization by GC_malloc_explicitly_typed\n");
1004 new[1] = (GC_word)old;
1006 collectable_count++;
1007 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
1009 new[1] = (GC_word)old;
1011 collectable_count++;
1012 new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
1014 new[1] = (GC_word)old;
1016 collectable_count++;
1017 new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
1020 new[1] = (GC_word)old;
1022 collectable_count++;
1024 new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
1027 new = (GC_word *) GC_calloc_explicitly_typed(1001,
1028 3 * sizeof(GC_word),
1030 if (0 != new[0] || 0 != new[1]) {
1031 GC_printf0("Bad initialization by GC_malloc_explicitly_typed\n");
1036 new[1] = (GC_word)old;
1039 for (i = 0; i < 20000; i++) {
1041 (void)GC_printf1("typed alloc failed at %lu\n",
1047 new = (GC_word *)(old[1]);
1066 void fail_proc1(GC_PTR x)
1071 #endif /* __STDC__ */
1074 # define TEST_FAIL_COUNT(n) 1
1076 # define TEST_FAIL_COUNT(n) (fail_count >= (n))
1085 char *y = (char *)(size_t)fail_proc1;
1091 "This test program is not designed for leak detection mode\n");
1092 (void)GC_printf0("Expect lots of problems.\n");
1095 # ifndef DBG_HDRS_ALL
1096 collectable_count += 3;
1097 if (GC_size(GC_malloc(7)) != 8 &&
1098 GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word)
1099 || GC_size(GC_malloc(15)) != 16) {
1100 (void)GC_printf0("GC_size produced unexpected results\n");
1103 collectable_count += 1;
1104 if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
1105 (void)GC_printf1("GC_malloc(0) failed: GC_size returns %ld\n",
1106 GC_size(GC_malloc(0)));
1109 collectable_count += 1;
1110 if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
1111 (void)GC_printf0("GC_malloc_uncollectable(0) failed\n");
1114 GC_is_valid_displacement_print_proc = fail_proc1;
1115 GC_is_visible_print_proc = fail_proc1;
1116 collectable_count += 1;
1118 if (GC_base(x + 13) != x) {
1119 (void)GC_printf0("GC_base(heap ptr) produced incorrect result\n");
1123 if (GC_base(y) != 0) {
1124 (void)GC_printf0("GC_base(fn_ptr) produced incorrect result\n");
1128 if (GC_same_obj(x+5, x) != x + 5) {
1129 (void)GC_printf0("GC_same_obj produced incorrect result\n");
1132 if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
1133 (void)GC_printf0("GC_is_visible produced incorrect result\n");
1136 if (!TEST_FAIL_COUNT(1)) {
1137 # if!(defined(RS6000) || defined(POWERPC) || defined(IA64))
1138 /* ON RS6000s function pointers point to a descriptor in the */
1139 /* data segment, so there should have been no failures. */
1140 (void)GC_printf0("GC_is_visible produced wrong failure indication\n");
1144 if (GC_is_valid_displacement(y) != y
1145 || GC_is_valid_displacement(x) != x
1146 || GC_is_valid_displacement(x + 3) != x + 3) {
1148 "GC_is_valid_displacement produced incorrect result\n");
1151 # ifndef ALL_INTERIOR_POINTERS
1152 # if defined(RS6000) || defined(POWERPC)
1153 if (!TEST_FAIL_COUNT(1)) {
1155 if (GC_all_interior_pointers && !TEST_FAIL_COUNT(1)
1156 || !GC_all_interior_pointers && !TEST_FAIL_COUNT(2)) {
1158 (void)GC_printf0("GC_is_valid_displacement produced wrong failure indication\n");
1162 # endif /* DBG_HDRS_ALL */
1163 /* Test floating point alignment */
1164 collectable_count += 2;
1165 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
1166 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
1167 # ifdef GC_GCJ_SUPPORT
1168 GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
1169 GC_init_gcj_malloc(0, (void *)fake_gcj_mark_proc);
1171 /* Repeated list reversal test. */
1174 GC_printf0("-------------Finished reverse_test\n");
1176 # ifndef DBG_HDRS_ALL
1179 GC_printf0("-------------Finished typed_test\n");
1181 # endif /* DBG_HDRS_ALL */
1186 /* GC_printf1("Finished %x\n", pthread_self()); */
1189 void check_heap_stats()
1191 unsigned long max_heap_sz;
1194 int late_finalize_count = 0;
1196 # ifdef VERY_SMALL_CONFIG
1197 /* these are something of a guess */
1198 if (sizeof(char *) > 4) {
1199 max_heap_sz = 4500000;
1201 max_heap_sz = 2800000;
1204 if (sizeof(char *) > 4) {
1205 max_heap_sz = 15000000;
1207 max_heap_sz = 11000000;
1212 # ifdef SAVE_CALL_CHAIN
1214 # ifdef SAVE_CALL_COUNT
1215 max_heap_sz *= SAVE_CALL_COUNT/4;
1219 /* Garbage collect repeatedly so that all inaccessible objects */
1220 /* can be finalized. */
1221 while (GC_collect_a_little()) { }
1222 for (i = 0; i < 16; i++) {
1224 late_finalize_count += GC_invoke_finalizers();
1226 (void)GC_printf1("Completed %lu tests\n", (unsigned long)n_tests);
1227 (void)GC_printf1("Allocated %lu collectable objects\n", (unsigned long)collectable_count);
1228 (void)GC_printf1("Allocated %lu uncollectable objects\n", (unsigned long)uncollectable_count);
1229 (void)GC_printf1("Allocated %lu atomic objects\n", (unsigned long)atomic_count);
1230 (void)GC_printf1("Allocated %lu stubborn objects\n", (unsigned long)stubborn_count);
1231 (void)GC_printf2("Finalized %lu/%lu objects - ",
1232 (unsigned long)finalized_count,
1233 (unsigned long)finalizable_count);
1234 # ifdef FINALIZE_ON_DEMAND
1235 if (finalized_count != late_finalize_count) {
1236 (void)GC_printf0("Demand finalization error\n");
1240 if (finalized_count > finalizable_count
1241 || finalized_count < finalizable_count/2) {
1242 (void)GC_printf0("finalization is probably broken\n");
1245 (void)GC_printf0("finalization is probably ok\n");
1248 for (i = 0; i < MAX_FINALIZED; i++) {
1249 if (live_indicators[i] != 0) {
1253 i = finalizable_count - finalized_count - still_live;
1256 ("%lu disappearing links remain and %ld more objects were not finalized\n",
1257 (unsigned long) still_live, (long)i);
1259 GC_printf0("\tVery suspicious!\n");
1261 GC_printf0("\tSlightly suspicious, but probably OK.\n");
1264 (void)GC_printf1("Total number of bytes allocated is %lu\n",
1266 WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc));
1267 (void)GC_printf1("Final heap size is %lu bytes\n",
1268 (unsigned long)GC_get_heap_size());
1269 if (WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc)
1270 # ifdef VERY_SMALL_CONFIG
1271 < 2700000*n_tests) {
1273 < 33500000*n_tests) {
1275 (void)GC_printf0("Incorrect execution - missed some allocations\n");
1278 if (GC_get_heap_size() > max_heap_sz*n_tests) {
1279 (void)GC_printf0("Unexpected heap growth - collector may be broken\n");
1282 (void)GC_printf0("Collector appears to work\n");
1286 void SetMinimumStack(long minSize)
1290 if (minSize > LMGetDefltStack())
1292 newApplLimit = (long) GetApplLimit()
1293 - (minSize - LMGetDefltStack());
1294 SetApplLimit((Ptr) newApplLimit);
1299 #define cMinStackSpace (512L * 1024L)
1304 void warn_proc(char *msg, GC_word p)
1306 void warn_proc(msg, p)
1311 GC_printf1(msg, (unsigned long)p);
1316 #if !defined(PCR) && !defined(GC_SOLARIS_THREADS) \
1317 && !defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS) \
1319 #if defined(MSWIN32) && !defined(__MINGW32__)
1320 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPTSTR cmd, int n)
1331 /* No good way to determine stack base from library; do it */
1332 /* manually on this platform. */
1333 GC_stackbottom = (GC_PTR)(&dummy);
1336 /* Make sure we have lots and lots of stack space. */
1337 SetMinimumStack(cMinStackSpace);
1338 /* Cheat and let stdio initialize toolbox for us. */
1339 printf("Testing GC Macintosh port.\n");
1341 GC_INIT(); /* Only needed if gc is dynamic library. */
1342 (void) GC_set_warn_proc(warn_proc);
1343 # if defined(MPROTECT_VDB) || defined(PROC_VDB)
1344 GC_enable_incremental();
1345 (void) GC_printf0("Switched to incremental mode\n");
1346 # if defined(MPROTECT_VDB)
1347 (void)GC_printf0("Emulating dirty bits with mprotect/signals\n");
1349 (void)GC_printf0("Reading dirty bits from /proc\n");
1355 (void)fflush(stdout);
1358 /* Entry points we should be testing, but aren't. */
1359 /* Some can be tested by defining GC_DEBUG at the top of this file */
1360 /* This is a bit SunOS4 specific. */
1361 GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
1362 GC_register_disappearing_link,
1363 GC_register_finalizer_ignore_self,
1364 GC_debug_register_displacement,
1365 GC_print_obj, GC_debug_change_stubborn,
1366 GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
1367 GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
1368 GC_init, GC_make_closure, GC_debug_invoke_finalizer,
1369 GC_page_was_ever_dirty, GC_is_fresh,
1370 GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
1371 GC_set_max_heap_size, GC_get_bytes_since_gc,
1372 GC_get_total_bytes, GC_pre_incr, GC_post_incr);
1375 GC_win32_free_heap();
1381 #ifdef GC_WIN32_THREADS
1383 unsigned __stdcall thr_run_one_test(void *arg)
1390 HANDLE win_created_h;
1393 LRESULT CALLBACK window_proc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
1398 GC_printf0("Received WM_HIBERNATE, calling GC_gcollect\n");
1402 GC_printf0("Received WM_CLOSE, closing window\n");
1403 DestroyWindow(hwnd);
1409 ret = DefWindowProc(hwnd, uMsg, wParam, lParam);
1415 unsigned __stdcall thr_window(void *arg)
1417 WNDCLASS win_class = {
1422 GetModuleHandle(NULL),
1425 (HBRUSH)(COLOR_APPWORKSPACE+1),
1431 if (!RegisterClass(&win_class))
1434 win_handle = CreateWindowEx(
1439 CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
1442 GetModuleHandle(NULL),
1445 if (win_handle == NULL)
1448 SetEvent(win_created_h);
1450 ShowWindow(win_handle, SW_SHOW);
1451 UpdateWindow(win_handle);
1453 while (GetMessage(&msg, NULL, 0, 0)) {
1454 TranslateMessage(&msg);
1455 DispatchMessage(&msg);
1465 int APIENTRY GC_WinMain(HINSTANCE instance, HINSTANCE prev, LPWSTR cmd, int n)
1467 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
1479 GC_enable_incremental();
1481 InitializeCriticalSection(&incr_cs);
1482 (void) GC_set_warn_proc(warn_proc);
1484 win_created_h = CreateEvent(NULL, FALSE, FALSE, NULL);
1485 if (win_created_h == (HANDLE)NULL) {
1486 (void)GC_printf1("Event creation failed %lu\n", (unsigned long)GetLastError());
1489 win_thr_h = GC_CreateThread(NULL, 0, thr_window, 0, 0, &thread_id);
1490 if (win_thr_h == (HANDLE)NULL) {
1491 (void)GC_printf1("Thread creation failed %lu\n", (unsigned long)GetLastError());
1494 if (WaitForSingleObject(win_created_h, INFINITE) != WAIT_OBJECT_0)
1496 CloseHandle(win_created_h);
1499 for (i = 0; i < NTEST; i++) {
1500 h[i] = GC_CreateThread(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
1501 if (h[i] == (HANDLE)NULL) {
1502 (void)GC_printf1("Thread creation failed %lu\n", (unsigned long)GetLastError());
1506 # endif /* NTEST > 0 */
1509 for (i = 0; i < NTEST; i++) {
1510 if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
1511 (void)GC_printf1("Thread wait failed %lu\n", (unsigned long)GetLastError());
1515 # endif /* NTEST > 0 */
1517 PostMessage(win_handle, WM_CLOSE, 0, 0);
1518 if (WaitForSingleObject(win_thr_h, INFINITE) != WAIT_OBJECT_0)
1525 #endif /* GC_WIN32_THREADS */
1536 /* GC_enable_incremental(); */
1537 (void) GC_set_warn_proc(warn_proc);
1538 th1 = PCR_Th_Fork(run_one_test, 0);
1539 th2 = PCR_Th_Fork(run_one_test, 0);
1541 if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1542 != PCR_ERes_okay || code != 0) {
1543 (void)GC_printf0("Thread 1 failed\n");
1545 if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1546 != PCR_ERes_okay || code != 0) {
1547 (void)GC_printf0("Thread 2 failed\n");
1554 #if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS)
1555 void * thr_run_one_test(void * arg)
1562 # define GC_free GC_debug_free
1565 #if defined(GC_SOLARIS_THREADS) && !defined(GC_SOLARIS_PTHREADS)
1573 GC_INIT(); /* Only needed if gc is dynamic library. */
1574 GC_enable_incremental();
1575 (void) GC_set_warn_proc(warn_proc);
1576 if (thr_keycreate(&fl_key, GC_free) != 0) {
1577 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
1580 if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, 0, &th1)) != 0) {
1581 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
1584 if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, THR_NEW_LWP, &th2)) != 0) {
1585 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
1589 if ((code = thr_join(th1, 0, 0)) != 0) {
1590 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
1593 if (thr_join(th2, 0, 0) != 0) {
1594 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
1598 (void)fflush(stdout);
1601 #else /* pthreads */
1611 pthread_attr_t attr;
1614 # ifdef GC_IRIX_THREADS
1615 /* Force a larger stack to be preallocated */
1616 /* Since the initial cant always grow later. */
1617 *((volatile char *)&code - 1024*1024) = 0; /* Require 1 Mb */
1618 # endif /* GC_IRIX_THREADS */
1619 # if defined(GC_HPUX_THREADS)
1620 /* Default stack size is too small, especially with the 64 bit ABI */
1622 if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
1623 (void)GC_printf0("pthread_default_stacksize_np failed.\n");
1625 # endif /* GC_HPUX_THREADS */
1626 pthread_attr_init(&attr);
1627 # if defined(GC_IRIX_THREADS) || defined(GC_FREEBSD_THREADS)
1628 pthread_attr_setstacksize(&attr, 1000000);
1631 # if defined(MPROTECT_VDB) && !defined(PARALLEL_MARK) &&!defined(REDIRECT_MALLOC)
1632 GC_enable_incremental();
1633 (void) GC_printf0("Switched to incremental mode\n");
1634 (void) GC_printf0("Emulating dirty bits with mprotect/signals\n");
1636 (void) GC_set_warn_proc(warn_proc);
1637 if ((code = pthread_key_create(&fl_key, 0)) != 0) {
1638 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
1641 if ((code = pthread_create(&th1, &attr, thr_run_one_test, 0)) != 0) {
1642 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
1645 if ((code = pthread_create(&th2, &attr, thr_run_one_test, 0)) != 0) {
1646 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
1650 if ((code = pthread_join(th1, 0)) != 0) {
1651 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
1654 if (pthread_join(th2, 0) != 0) {
1655 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
1659 (void)fflush(stdout);
1660 pthread_attr_destroy(&attr);
1661 GC_printf1("Completed %d collections\n", GC_gc_no);
1664 #endif /* GC_PTHREADS */
1665 #endif /* GC_SOLARIS_THREADS || GC_PTHREADS */