3 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
22 /* We put this here to minimize the risk of inlining. */
25 void GC_noop(void *p, ...) {}
30 /* Single argument version, robust against whole program analysis. */
34 static VOLATILE word sink;
39 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
41 word GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
43 /* Initialize GC_obj_kinds properly and standard free lists properly. */
44 /* This must be done statically since they may be accessed before */
45 /* GC_init is called. */
46 /* It's done here, since we need to deal with mark descriptors. */
47 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
48 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
49 0 | DS_LENGTH, FALSE, FALSE },
50 /* NORMAL */ { &GC_objfreelist[0], 0,
51 # if defined(ADD_BYTE_AT_END) && ALIGNMENT > DS_TAGS
52 (word)(-ALIGNMENT) | DS_LENGTH,
56 TRUE /* add length to descr */, TRUE },
58 { &GC_uobjfreelist[0], 0,
59 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
60 # ifdef ATOMIC_UNCOLLECTABLE
62 { &GC_auobjfreelist[0], 0,
63 0 | DS_LENGTH, FALSE /* add length to descr */, FALSE },
65 # ifdef STUBBORN_ALLOC
66 /*STUBBORN*/ { &GC_sobjfreelist[0], 0,
67 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
71 # ifdef ATOMIC_UNCOLLECTABLE
72 # ifdef STUBBORN_ALLOC
78 # ifdef STUBBORN_ALLOC
86 # ifndef INITIAL_MARK_STACK_SIZE
87 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
88 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
89 /* multiple of HBLKSIZE. */
90 /* The incremental collector actually likes a larger */
91 /* size, since it want to push all marked dirty objs */
92 /* before marking anything new. Currently we let it */
93 /* grow dynamically. */
97 * Limits of stack for GC_mark routine.
98 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
99 * need to be marked from.
102 word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
103 /* excludes ptrfree pages, etc. */
107 word GC_mark_stack_size = 0;
109 mse * GC_mark_stack_top;
111 static struct hblk * scan_ptr;
113 mark_state_t GC_mark_state = MS_NONE;
115 GC_bool GC_mark_stack_too_small = FALSE;
117 GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
118 /* objects in the heap? */
120 /* Is a collection in progress? Note that this can return true in the */
121 /* nonincremental case, if a collection has been abandoned and the */
122 /* mark state is now MS_INVALID. */
123 GC_bool GC_collection_in_progress()
125 return(GC_mark_state != MS_NONE);
128 /* clear all mark bits in the header */
129 void GC_clear_hdr_marks(hhdr)
132 BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
135 /* Set all mark bits in the header. Used for uncollectable blocks. */
136 void GC_set_hdr_marks(hhdr)
141 for (i = 0; i < MARK_BITS_SZ; ++i) {
142 hhdr -> hb_marks[i] = ONES;
147 * Clear all mark bits associated with block h.
150 static void clear_marks_for_block(h, dummy)
154 register hdr * hhdr = HDR(h);
156 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
157 /* Mark bit for these is cleared only once the object is */
158 /* explicitly deallocated. This either frees the block, or */
159 /* the bit is cleared once the object is on the free list. */
160 GC_clear_hdr_marks(hhdr);
163 /* Slow but general routines for setting/clearing/asking about mark bits */
164 void GC_set_mark_bit(p)
167 register struct hblk *h = HBLKPTR(p);
168 register hdr * hhdr = HDR(h);
169 register int word_no = (word *)p - (word *)h;
171 set_mark_bit_from_hdr(hhdr, word_no);
174 void GC_clear_mark_bit(p)
177 register struct hblk *h = HBLKPTR(p);
178 register hdr * hhdr = HDR(h);
179 register int word_no = (word *)p - (word *)h;
181 clear_mark_bit_from_hdr(hhdr, word_no);
184 GC_bool GC_is_marked(p)
187 register struct hblk *h = HBLKPTR(p);
188 register hdr * hhdr = HDR(h);
189 register int word_no = (word *)p - (word *)h;
191 return(mark_bit_from_hdr(hhdr, word_no));
196 * Clear mark bits in all allocated heap blocks. This invalidates
197 * the marker invariant, and sets GC_mark_state to reflect this.
198 * (This implicitly starts marking to reestablish the invariant.)
200 void GC_clear_marks()
202 GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
203 GC_objects_are_marked = FALSE;
204 GC_mark_state = MS_INVALID;
207 /* Counters reflect currently marked objects: reset here */
208 GC_composite_in_use = 0;
209 GC_atomic_in_use = 0;
214 /* Initiate a garbage collection. Initiates a full collection if the */
215 /* mark state is invalid. */
217 void GC_initiate_gc()
219 if (GC_dirty_maintained) GC_read_dirty();
220 # ifdef STUBBORN_ALLOC
225 extern void GC_check_dirty();
227 if (GC_dirty_maintained) GC_check_dirty();
231 GC_n_rescuing_pages = 0;
233 if (GC_mark_state == MS_NONE) {
234 GC_mark_state = MS_PUSH_RESCUERS;
235 } else if (GC_mark_state != MS_INVALID) {
236 ABORT("unexpected state");
237 } /* else this is really a full collection, and mark */
238 /* bits are invalid. */
243 static void alloc_mark_stack();
245 /* Perform a small amount of marking. */
246 /* We try to touch roughly a page of memory. */
247 /* Return TRUE if we just finished a mark phase. */
248 /* Cold_gc_frame is an address inside a GC frame that */
249 /* remains valid until all marking is complete. */
250 /* A zero value indicates that it's OK to miss some */
251 /* register values. */
252 GC_bool GC_mark_some(cold_gc_frame)
255 switch(GC_mark_state) {
259 case MS_PUSH_RESCUERS:
260 if (GC_mark_stack_top
261 >= GC_mark_stack + GC_mark_stack_size
262 - INITIAL_MARK_STACK_SIZE/2) {
263 /* Go ahead and mark, even though that might cause us to */
264 /* see more marked dirty objects later on. Avoid this */
266 GC_mark_stack_too_small = TRUE;
267 GC_mark_from_mark_stack();
270 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
273 GC_printf1("Marked from %lu dirty pages\n",
274 (unsigned long)GC_n_rescuing_pages);
276 GC_push_roots(FALSE, cold_gc_frame);
277 GC_objects_are_marked = TRUE;
278 if (GC_mark_state != MS_INVALID) {
279 GC_mark_state = MS_ROOTS_PUSHED;
285 case MS_PUSH_UNCOLLECTABLE:
286 if (GC_mark_stack_top
287 >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
288 GC_mark_from_mark_stack();
291 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
293 GC_push_roots(TRUE, cold_gc_frame);
294 GC_objects_are_marked = TRUE;
295 if (GC_mark_state != MS_INVALID) {
296 GC_mark_state = MS_ROOTS_PUSHED;
302 case MS_ROOTS_PUSHED:
303 if (GC_mark_stack_top >= GC_mark_stack) {
304 GC_mark_from_mark_stack();
307 GC_mark_state = MS_NONE;
308 if (GC_mark_stack_too_small) {
309 alloc_mark_stack(2*GC_mark_stack_size);
315 case MS_PARTIALLY_INVALID:
316 if (!GC_objects_are_marked) {
317 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
320 if (GC_mark_stack_top >= GC_mark_stack) {
321 GC_mark_from_mark_stack();
324 if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
325 /* About to start a heap scan for marked objects. */
326 /* Mark stack is empty. OK to reallocate. */
327 if (GC_mark_stack_too_small) {
328 alloc_mark_stack(2*GC_mark_stack_size);
330 GC_mark_state = MS_PARTIALLY_INVALID;
332 scan_ptr = GC_push_next_marked(scan_ptr);
333 if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
334 GC_push_roots(TRUE, cold_gc_frame);
335 GC_objects_are_marked = TRUE;
336 if (GC_mark_state != MS_INVALID) {
337 GC_mark_state = MS_ROOTS_PUSHED;
342 ABORT("GC_mark_some: bad state");
348 GC_bool GC_mark_stack_empty()
350 return(GC_mark_stack_top < GC_mark_stack);
354 word GC_prof_array[10];
355 # define PROF(n) GC_prof_array[n]++
360 /* Given a pointer to someplace other than a small object page or the */
361 /* first page of a large object, return a pointer either to the */
362 /* start of the large object or NIL. */
363 /* In the latter case black list the address current. */
364 /* Returns NIL without black listing if current points to a block */
365 /* with IGNORE_OFF_PAGE set. */
367 # ifdef PRINT_BLACK_LIST
368 ptr_t GC_find_start(current, hhdr, source)
371 ptr_t GC_find_start(current, hhdr)
374 register ptr_t current;
377 # ifdef ALL_INTERIOR_POINTERS
379 register ptr_t orig = current;
381 current = (ptr_t)HBLKPTR(current) + HDR_BYTES;
383 current = current - HBLKSIZE*(word)hhdr;
385 } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
386 /* current points to the start of the large object */
387 if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(0);
388 if ((word *)orig - (word *)current
389 >= (ptrdiff_t)(hhdr->hb_sz)) {
390 /* Pointer past the end of the block */
391 GC_ADD_TO_BLACK_LIST_NORMAL(orig, source);
396 GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
400 GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
406 void GC_invalidate_mark_state()
408 GC_mark_state = MS_INVALID;
409 GC_mark_stack_top = GC_mark_stack-1;
412 mse * GC_signal_mark_stack_overflow(msp)
415 GC_mark_state = MS_INVALID;
416 GC_mark_stack_too_small = TRUE;
418 GC_printf1("Mark stack overflow; current size = %lu entries\n",
421 return(msp-INITIAL_MARK_STACK_SIZE/8);
426 * Mark objects pointed to by the regions described by
427 * mark stack entries between GC_mark_stack and GC_mark_stack_top,
428 * inclusive. Assumes the upper limit of a mark stack entry
429 * is never 0. A mark stack entry never has size 0.
430 * We try to traverse on the order of a hblk of memory before we return.
431 * Caller is responsible for calling this until the mark stack is empty.
432 * Note that this is the most performance critical routine in the
433 * collector. Hence it contains all sorts of ugly hacks to speed
434 * things up. In particular, we avoid procedure calls on the common
435 * path, we take advantage of peculiarities of the mark descriptor
436 * encoding, we optionally maintain a cache for the block address to
437 * header mapping, we prefetch when an object is "grayed", etc.
439 void GC_mark_from_mark_stack()
441 mse * GC_mark_stack_reg = GC_mark_stack;
442 mse * GC_mark_stack_top_reg = GC_mark_stack_top;
443 mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
444 int credit = HBLKSIZE; /* Remaining credit for marking work */
445 register word * current_p; /* Pointer to current candidate ptr. */
446 register word current; /* Candidate pointer. */
447 register word * limit; /* (Incl) limit of current candidate */
450 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
451 register ptr_t least_ha = GC_least_plausible_heap_addr;
454 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
456 GC_objects_are_marked = TRUE;
458 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
459 while (GC_mark_stack_top_reg >= GC_mark_stack_reg && credit >= 0) {
461 while ((((ptr_t)GC_mark_stack_top_reg - (ptr_t)GC_mark_stack_reg) | credit)
464 current_p = GC_mark_stack_top_reg -> mse_start;
465 descr = GC_mark_stack_top_reg -> mse_descr;
467 /* current_p and descr describe the current object. */
468 /* *GC_mark_stack_top_reg is vacant. */
469 /* The following is 0 only for small objects described by a simple */
470 /* length descriptor. For many applications this is the common */
471 /* case, so we try to detect it quickly. */
472 if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | DS_TAGS)) {
473 word tag = descr & DS_TAGS;
478 /* Process part of the range to avoid pushing too much on the */
480 GC_mark_stack_top_reg -> mse_start =
481 limit = current_p + SPLIT_RANGE_WORDS-1;
482 GC_mark_stack_top_reg -> mse_descr =
483 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
484 /* Make sure that pointers overlapping the two ranges are */
486 limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
489 GC_mark_stack_top_reg--;
491 credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
493 if ((signed_word)descr < 0) {
494 current = *current_p;
495 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
497 HC_PUSH_CONTENTS((ptr_t)current, GC_mark_stack_top_reg,
498 mark_stack_limit, current_p, exit1);
506 GC_mark_stack_top_reg--;
507 credit -= PROC_BYTES;
509 current_p = GC_debug_object_start(current_p);
511 GC_mark_stack_top_reg =
513 (current_p, GC_mark_stack_top_reg,
514 mark_stack_limit, ENV(descr));
517 if ((signed_word)descr >= 0) {
518 /* Descriptor is in the object. */
519 descr = *(word *)((ptr_t)current_p + descr - DS_PER_OBJECT);
521 /* Descriptor is in type descriptor pointed to by first */
522 /* word in object. */
523 ptr_t type_descr = *(ptr_t *)current_p;
524 /* type_descr is either a valid pointer to the descriptor */
525 /* structure, or this object was on a free list. If it */
526 /* it was anything but the last object on the free list, */
527 /* we will misinterpret the next object on the free list as */
528 /* the type descriptor, and get a 0 GC descriptor, which */
529 /* is ideal. Unfortunately, we need to check for the last */
530 /* object case explicitly. */
531 if (0 == type_descr) {
532 /* Rarely executed. */
533 GC_mark_stack_top_reg--;
536 descr = *(word *)(type_descr
537 - (descr - (DS_PER_OBJECT - INDIR_PER_OBJ_BIAS)));
541 } else /* Small object with length descriptor */ {
542 GC_mark_stack_top_reg--;
543 limit = (word *)(((ptr_t)current_p) + (word)descr);
545 /* The simple case in which we're scanning a range. */
546 credit -= (ptr_t)limit - (ptr_t)current_p;
551 # ifndef SMALL_CONFIG
554 /* Try to prefetch the next pointer to be examined asap. */
555 /* Empirically, this also seems to help slightly without */
556 /* prefetches, at least on linux/X86. Presumably this loop */
557 /* ends up with less register pressure, and gcc thus ends up */
558 /* generating slightly better code. Overall gcc code quality */
559 /* for this loop is still not great. */
561 PREFETCH((ptr_t)limit - PREF_DIST*CACHE_LINE_SIZE);
563 limit = (word *)((char *)limit - ALIGNMENT);
564 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
568 if (current_p > limit) goto next_object;
569 /* Unroll once, so we don't do too many of the prefetches */
570 /* based on limit. */
572 limit = (word *)((char *)limit - ALIGNMENT);
573 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
577 if (current_p > limit) goto next_object;
581 while (current_p <= limit) {
582 /* Empirically, unrolling this loop doesn't help a lot. */
583 /* Since HC_PUSH_CONTENTS expands to a lot of code, */
585 current = *current_p;
586 PREFETCH((ptr_t)current_p + PREF_DIST*CACHE_LINE_SIZE);
587 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
588 /* Prefetch the contents of the object we just pushed. It's */
589 /* likely we will need them soon. */
591 HC_PUSH_CONTENTS((ptr_t)current, GC_mark_stack_top_reg,
592 mark_stack_limit, current_p, exit2);
594 current_p = (word *)((char *)current_p + ALIGNMENT);
597 # ifndef SMALL_CONFIG
598 /* We still need to mark the entry we previously prefetched. */
599 /* We alrady know that it passes the preliminary pointer */
601 HC_PUSH_CONTENTS((ptr_t)deferred, GC_mark_stack_top_reg,
602 mark_stack_limit, current_p, exit4);
607 GC_mark_stack_top = GC_mark_stack_top_reg;
610 /* Allocate or reallocate space for mark stack of size s words */
611 /* May silently fail. */
612 static void alloc_mark_stack(n)
615 mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct ms_entry));
617 GC_mark_stack_too_small = FALSE;
618 if (GC_mark_stack_size != 0) {
619 if (new_stack != 0) {
620 word displ = (word)GC_mark_stack & (GC_page_size - 1);
621 signed_word size = GC_mark_stack_size * sizeof(struct ms_entry);
623 /* Recycle old space */
624 if (0 != displ) displ = GC_page_size - displ;
625 size = (size - displ) & ~(GC_page_size - 1);
627 GC_add_to_heap((struct hblk *)
628 ((word)GC_mark_stack + displ), (word)size);
630 GC_mark_stack = new_stack;
631 GC_mark_stack_size = n;
633 GC_printf1("Grew mark stack to %lu frames\n",
634 (unsigned long) GC_mark_stack_size);
638 GC_printf1("Failed to grow mark stack to %lu frames\n",
643 if (new_stack == 0) {
644 GC_err_printf0("No space for mark stack\n");
647 GC_mark_stack = new_stack;
648 GC_mark_stack_size = n;
650 GC_mark_stack_top = GC_mark_stack-1;
655 alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
659 * Push all locations between b and t onto the mark stack.
660 * b is the first location to be checked. t is one past the last
661 * location to be checked.
662 * Should only be used if there is no possibility of mark stack
665 void GC_push_all(bottom, top)
669 register word length;
671 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
672 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
673 if (top == 0 || bottom == top) return;
675 if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
676 ABORT("unexpected mark stack overflow");
678 length = top - bottom;
679 # if DS_TAGS > ALIGNMENT - 1
683 GC_mark_stack_top -> mse_start = (word *)bottom;
684 GC_mark_stack_top -> mse_descr = length;
688 * Analogous to the above, but push only those pages that may have been
689 * dirtied. A block h is assumed dirty if dirty_fn(h) != 0.
690 * We use push_fn to actually push the block.
691 * Will not overflow mark stack if push_fn pushes a small fixed number
692 * of entries. (This is invoked only if push_fn pushes a single entry,
693 * or if it marks each object before pushing it, thus ensuring progress
694 * in the event of a stack overflow.)
696 void GC_push_dirty(bottom, top, dirty_fn, push_fn)
699 int (*dirty_fn)(/* struct hblk * h */);
700 void (*push_fn)(/* ptr_t bottom, ptr_t top */);
702 register struct hblk * h;
704 bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
705 top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
707 if (top == 0 || bottom == top) return;
708 h = HBLKPTR(bottom + HBLKSIZE);
709 if (top <= (ptr_t) h) {
710 if ((*dirty_fn)(h-1)) {
711 (*push_fn)(bottom, top);
715 if ((*dirty_fn)(h-1)) {
716 (*push_fn)(bottom, (ptr_t)h);
718 while ((ptr_t)(h+1) <= top) {
719 if ((*dirty_fn)(h)) {
720 if ((word)(GC_mark_stack_top - GC_mark_stack)
721 > 3 * GC_mark_stack_size / 4) {
722 /* Danger of mark stack overflow */
723 (*push_fn)((ptr_t)h, top);
726 (*push_fn)((ptr_t)h, (ptr_t)(h+1));
731 if ((ptr_t)h != top) {
732 if ((*dirty_fn)(h)) {
733 (*push_fn)((ptr_t)h, top);
736 if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
737 ABORT("unexpected mark stack overflow");
741 # ifndef SMALL_CONFIG
742 void GC_push_conditional(bottom, top, all)
748 if (GC_dirty_maintained) {
750 /* Pages that were never dirtied cannot contain pointers */
751 GC_push_dirty(bottom, top, GC_page_was_ever_dirty, GC_push_all);
753 GC_push_all(bottom, top);
756 GC_push_all(bottom, top);
759 GC_push_dirty(bottom, top, GC_page_was_dirty, GC_push_all);
765 void __cdecl GC_push_one(p)
772 if (0 != GC_push_proc) {
777 GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
781 # define BASE(p) (word)GC_base((void *)(p))
783 # define BASE(p) (word)GC_base((char *)(p))
786 /* As above, but argument passed preliminary test. */
787 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
788 void GC_push_one_checked(p, interior_ptrs, source)
791 void GC_push_one_checked(p, interior_ptrs)
795 register GC_bool interior_ptrs;
802 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
803 if (hhdr != 0 && interior_ptrs) {
806 displ = BYTES_TO_WORDS(HBLKDISPL(r));
811 register map_entry_type map_entry;
813 displ = HBLKDISPL(p);
814 map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
815 if (map_entry == OBJ_INVALID) {
816 # ifndef ALL_INTERIOR_POINTERS
819 displ = BYTES_TO_WORDS(HBLKDISPL(r));
820 if (r == 0) hhdr = 0;
825 /* map already reflects interior pointers */
829 displ = BYTES_TO_WORDS(displ);
831 r = (word)((word *)(HBLKPTR(p)) + displ);
834 /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
835 /* displ is the word index within the block. */
838 # ifdef PRINT_BLACK_LIST
839 GC_add_to_black_list_stack(p, source);
841 GC_add_to_black_list_stack(p);
844 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
845 # undef source /* In case we had to define it. */
848 if (!mark_bit_from_hdr(hhdr, displ)) {
849 set_mark_bit_from_hdr(hhdr, displ);
850 GC_STORE_BACK_PTR(source, (ptr_t)r);
851 PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
852 &(GC_mark_stack[GC_mark_stack_size]));
859 # define TRACE_ENTRIES 1000
867 } GC_trace_buf[TRACE_ENTRIES];
869 int GC_trace_buf_ptr = 0;
871 void GC_add_trace_entry(char *kind, word arg1, word arg2)
873 GC_trace_buf[GC_trace_buf_ptr].kind = kind;
874 GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
875 GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
876 GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
877 GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
879 if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
882 void GC_print_trace(word gc_no, GC_bool lock)
885 struct trace_entry *p;
888 for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
889 if (i < 0) i = TRACE_ENTRIES-1;
890 p = GC_trace_buf + i;
891 if (p -> gc_no < gc_no || p -> kind == 0) return;
892 printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
893 p -> kind, p -> gc_no, p -> words_allocd,
894 (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
896 printf("Trace incomplete\n");
900 # endif /* TRACE_BUF */
903 * A version of GC_push_all that treats all interior pointers as valid
904 * and scans the entire region immediately, in case the contents
907 void GC_push_all_eager(bottom, top)
911 word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
912 word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
916 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
917 register ptr_t least_ha = GC_least_plausible_heap_addr;
918 # define GC_greatest_plausible_heap_addr greatest_ha
919 # define GC_least_plausible_heap_addr least_ha
921 if (top == 0) return;
922 /* check all pointers in range and put in push if they appear */
924 lim = t - 1 /* longword */;
925 for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
927 GC_PUSH_ONE_STACK(q, p);
929 # undef GC_greatest_plausible_heap_addr
930 # undef GC_least_plausible_heap_addr
935 * A version of GC_push_all that treats all interior pointers as valid
936 * and scans part of the area immediately, to make sure that saved
937 * register values are not lost.
938 * Cold_gc_frame delimits the stack section that must be scanned
939 * eagerly. A zero value indicates that no eager scanning is needed.
941 void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
946 # ifdef ALL_INTERIOR_POINTERS
947 # define EAGER_BYTES 1024
948 /* Push the hot end of the stack eagerly, so that register values */
949 /* saved inside GC frames are marked before they disappear. */
950 /* The rest of the marking can be deferred until later. */
951 if (0 == cold_gc_frame) {
952 GC_push_all_stack(bottom, top);
955 # ifdef STACK_GROWS_DOWN
956 GC_push_all_eager(bottom, cold_gc_frame);
957 GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
958 # else /* STACK_GROWS_UP */
959 GC_push_all_eager(cold_gc_frame, top);
960 GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
961 # endif /* STACK_GROWS_UP */
963 GC_push_all_eager(bottom, top);
966 GC_add_trace_entry("GC_push_all_stack", bottom, top);
969 #endif /* !THREADS */
971 void GC_push_all_stack(bottom, top)
975 # ifdef ALL_INTERIOR_POINTERS
976 GC_push_all(bottom, top);
978 GC_push_all_eager(bottom, top);
983 /* Push all objects reachable from marked objects in the given block */
984 /* of size 1 objects. */
985 void GC_push_marked1(h, hhdr)
989 word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
994 register word mark_word;
995 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
996 register ptr_t least_ha = GC_least_plausible_heap_addr;
997 # define GC_greatest_plausible_heap_addr greatest_ha
998 # define GC_least_plausible_heap_addr least_ha
1000 p = (word *)(h->hb_body);
1001 plim = (word *)(((word)h) + HBLKSIZE);
1003 /* go through all words in block */
1005 mark_word = *mark_word_addr++;
1007 while(mark_word != 0) {
1008 if (mark_word & 1) {
1010 GC_PUSH_ONE_HEAP(q, p + i);
1017 # undef GC_greatest_plausible_heap_addr
1018 # undef GC_least_plausible_heap_addr
1024 /* Push all objects reachable from marked objects in the given block */
1025 /* of size 2 objects. */
1026 void GC_push_marked2(h, hhdr)
1028 register hdr * hhdr;
1030 word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
1035 register word mark_word;
1036 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1037 register ptr_t least_ha = GC_least_plausible_heap_addr;
1038 # define GC_greatest_plausible_heap_addr greatest_ha
1039 # define GC_least_plausible_heap_addr least_ha
1041 p = (word *)(h->hb_body);
1042 plim = (word *)(((word)h) + HBLKSIZE);
1044 /* go through all words in block */
1046 mark_word = *mark_word_addr++;
1048 while(mark_word != 0) {
1049 if (mark_word & 1) {
1051 GC_PUSH_ONE_HEAP(q, p + i);
1053 GC_PUSH_ONE_HEAP(q, p + i);
1060 # undef GC_greatest_plausible_heap_addr
1061 # undef GC_least_plausible_heap_addr
1064 /* Push all objects reachable from marked objects in the given block */
1065 /* of size 4 objects. */
1066 /* There is a risk of mark stack overflow here. But we handle that. */
1067 /* And only unmarked objects get pushed, so it's not very likely. */
1068 void GC_push_marked4(h, hhdr)
1070 register hdr * hhdr;
1072 word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
1077 register word mark_word;
1078 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1079 register ptr_t least_ha = GC_least_plausible_heap_addr;
1080 # define GC_greatest_plausible_heap_addr greatest_ha
1081 # define GC_least_plausible_heap_addr least_ha
1083 p = (word *)(h->hb_body);
1084 plim = (word *)(((word)h) + HBLKSIZE);
1086 /* go through all words in block */
1088 mark_word = *mark_word_addr++;
1090 while(mark_word != 0) {
1091 if (mark_word & 1) {
1093 GC_PUSH_ONE_HEAP(q, p + i);
1095 GC_PUSH_ONE_HEAP(q, p + i + 1);
1097 GC_PUSH_ONE_HEAP(q, p + i + 2);
1099 GC_PUSH_ONE_HEAP(q, p + i + 3);
1106 # undef GC_greatest_plausible_heap_addr
1107 # undef GC_least_plausible_heap_addr
1110 #endif /* UNALIGNED */
1112 #endif /* SMALL_CONFIG */
1114 /* Push all objects reachable from marked objects in the given block */
1115 void GC_push_marked(h, hhdr)
1117 register hdr * hhdr;
1119 register int sz = hhdr -> hb_sz;
1120 register int descr = hhdr -> hb_descr;
1122 register int word_no;
1123 register word * lim;
1124 register mse * GC_mark_stack_top_reg;
1125 register mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
1127 /* Some quick shortcuts: */
1128 if ((0 | DS_LENGTH) == descr) return;
1129 if (GC_block_empty(hhdr)/* nothing marked */) return;
1131 GC_n_rescuing_pages++;
1133 GC_objects_are_marked = TRUE;
1134 if (sz > MAXOBJSZ) {
1135 lim = (word *)h + HDR_WORDS;
1137 lim = (word *)(h + 1) - sz;
1141 # if !defined(SMALL_CONFIG)
1143 GC_push_marked1(h, hhdr);
1146 # if !defined(SMALL_CONFIG) && !defined(UNALIGNED)
1148 GC_push_marked2(h, hhdr);
1151 GC_push_marked4(h, hhdr);
1155 GC_mark_stack_top_reg = GC_mark_stack_top;
1156 for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
1157 p += sz, word_no += sz) {
1158 if (mark_bit_from_hdr(hhdr, word_no)) {
1159 /* Mark from fields inside the object */
1160 PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1162 /* Subtract this object from total, since it was */
1163 /* added in twice. */
1164 GC_composite_in_use -= sz;
1168 GC_mark_stack_top = GC_mark_stack_top_reg;
1172 #ifndef SMALL_CONFIG
1173 /* Test whether any page in the given block is dirty */
1174 GC_bool GC_block_was_dirty(h, hhdr)
1176 register hdr * hhdr;
1178 register int sz = hhdr -> hb_sz;
1180 if (sz < MAXOBJSZ) {
1181 return(GC_page_was_dirty(h));
1183 register ptr_t p = (ptr_t)h;
1185 sz = WORDS_TO_BYTES(sz);
1186 while (p < (ptr_t)h + sz) {
1187 if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1193 #endif /* SMALL_CONFIG */
1195 /* Similar to GC_push_next_marked, but return address of next block */
1196 struct hblk * GC_push_next_marked(h)
1199 register hdr * hhdr;
1201 h = GC_next_used_block(h);
1202 if (h == 0) return(0);
1204 GC_push_marked(h, hhdr);
1205 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1208 #ifndef SMALL_CONFIG
1209 /* Identical to above, but mark only from dirty pages */
1210 struct hblk * GC_push_next_marked_dirty(h)
1213 register hdr * hhdr;
1215 if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1217 h = GC_next_used_block(h);
1218 if (h == 0) return(0);
1220 # ifdef STUBBORN_ALLOC
1221 if (hhdr -> hb_obj_kind == STUBBORN) {
1222 if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1226 if (GC_block_was_dirty(h, hhdr)) break;
1229 if (GC_block_was_dirty(h, hhdr)) break;
1231 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1233 GC_push_marked(h, hhdr);
1234 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1238 /* Similar to above, but for uncollectable pages. Needed since we */
1239 /* do not clear marks for such pages, even for full collections. */
1240 struct hblk * GC_push_next_marked_uncollectable(h)
1243 register hdr * hhdr = HDR(h);
1246 h = GC_next_used_block(h);
1247 if (h == 0) return(0);
1249 if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1250 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1252 GC_push_marked(h, hhdr);
1253 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));