1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
32 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
33 file open. Prefer either to valloc. */
35 # undef HAVE_MMAP_DEV_ZERO
38 # include <sys/mman.h>
40 # define MAP_FAILED -1
42 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
43 # define MAP_ANONYMOUS MAP_ANON
49 #ifdef HAVE_MMAP_DEV_ZERO
52 # include <sys/mman.h>
54 # define MAP_FAILED -1
67 This garbage-collecting allocator allocates objects on one of a set
68 of pages. Each page can allocate objects of a single size only;
69 available sizes are powers of two starting at four bytes. The size
70 of an allocation request is rounded up to the next power of two
71 (`order'), and satisfied from the appropriate page.
73 Each page is recorded in a page-entry, which also maintains an
74 in-use bitmap of object positions on the page. This allows the
75 allocation state of a particular object to be flipped without
76 touching the page itself.
78 Each page-entry also has a context depth, which is used to track
79 pushing and popping of allocation contexts. Only objects allocated
80 in the current (highest-numbered) context may be collected.
82 Page entries are arranged in an array of singly-linked lists. The
83 array is indexed by the allocation size, in bits, of the pages on
84 it; i.e. all pages on a list allocate objects of the same size.
85 Pages are ordered on the list such that all non-full pages precede
86 all full pages, with non-full pages arranged in order of decreasing
89 Empty pages (of all orders) are kept on a single page cache list,
90 and are considered first when new pages are required; they are
91 deallocated at the start of the next collection if they haven't
92 been recycled by then. */
95 /* Define GGC_POISON to poison memory marked unused by the collector. */
98 /* Define GGC_ALWAYS_COLLECT to perform collection every time
99 ggc_collect is invoked. Otherwise, collection is performed only
100 when a significant amount of memory has been allocated since the
102 #undef GGC_ALWAYS_COLLECT
104 #ifdef ENABLE_GC_CHECKING
107 #ifdef ENABLE_GC_ALWAYS_COLLECT
108 #define GGC_ALWAYS_COLLECT
111 /* Define GGC_DEBUG_LEVEL to print debugging information.
112 0: No debugging output.
113 1: GC statistics only.
114 2: Page-entry allocations/deallocations as well.
115 3: Object allocations as well.
116 4: Object marks as well. */
117 #define GGC_DEBUG_LEVEL (0)
119 #ifndef HOST_BITS_PER_PTR
120 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
124 /* A two-level tree is used to look up the page-entry for a given
125 pointer. Two chunks of the pointer's bits are extracted to index
126 the first and second levels of the tree, as follows:
130 msb +----------------+----+------+------+ lsb
136 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
137 pages are aligned on system page boundaries. The next most
138 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
139 index values in the lookup table, respectively.
141 For 32-bit architectures and the settings below, there are no
142 leftover bits. For architectures with wider pointers, the lookup
143 tree points to a list of pages, which must be scanned to find the
146 #define PAGE_L1_BITS (8)
147 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
148 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
149 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
151 #define LOOKUP_L1(p) \
152 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
154 #define LOOKUP_L2(p) \
155 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
157 /* The number of objects per allocation page, for objects on a page of
158 the indicated ORDER. */
159 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
161 /* The size of an object on a page of the indicated ORDER. */
162 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
164 /* The number of extra orders, not corresponding to power-of-two sized
167 #define NUM_EXTRA_ORDERS \
168 (sizeof (extra_order_size_table) / sizeof (extra_order_size_table[0]))
170 /* The Ith entry is the maximum size of an object to be stored in the
171 Ith extra order. Adding a new entry to this array is the *only*
172 thing you need to do to add a new special allocation size. */
174 static const size_t extra_order_size_table[] = {
175 sizeof (struct tree_decl),
176 sizeof (struct tree_list)
179 /* The total number of orders. */
181 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
183 /* We use this structure to determine the alignment required for
184 allocations. For power-of-two sized allocations, that's not a
185 problem, but it does matter for odd-sized allocations. */
187 struct max_alignment {
191 #ifdef HAVE_LONG_DOUBLE
199 /* The biggest alignment required. */
201 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
203 /* The Ith entry is the number of objects on a page or order I. */
205 static unsigned objects_per_page_table[NUM_ORDERS];
207 /* The Ith entry is the size of an object on a page of order I. */
209 static size_t object_size_table[NUM_ORDERS];
211 /* A page_entry records the status of an allocation page. This
212 structure is dynamically sized to fit the bitmap in_use_p. */
213 typedef struct page_entry
215 /* The next page-entry with objects of the same size, or NULL if
216 this is the last page-entry. */
217 struct page_entry *next;
219 /* The number of bytes allocated. (This will always be a multiple
220 of the host system page size.) */
223 /* The address at which the memory is allocated. */
226 /* Saved in-use bit vector for pages that aren't in the topmost
227 context during collection. */
228 unsigned long *save_in_use_p;
230 /* Context depth of this page. */
231 unsigned short context_depth;
233 /* The number of free objects remaining on this page. */
234 unsigned short num_free_objects;
236 /* A likely candidate for the bit position of a free object for the
237 next allocation from this page. */
238 unsigned short next_bit_hint;
240 /* The lg of size of objects allocated from this page. */
243 /* A bit vector indicating whether or not objects are in use. The
244 Nth bit is one if the Nth object on this page is allocated. This
245 array is dynamically sized. */
246 unsigned long in_use_p[1];
250 #if HOST_BITS_PER_PTR <= 32
252 /* On 32-bit hosts, we use a two level page table, as pictured above. */
253 typedef page_entry **page_table[PAGE_L1_SIZE];
257 /* On 64-bit hosts, we use the same two level page tables plus a linked
258 list that disambiguates the top 32-bits. There will almost always be
259 exactly one entry in the list. */
260 typedef struct page_table_chain
262 struct page_table_chain *next;
264 page_entry **table[PAGE_L1_SIZE];
269 /* The rest of the global variables. */
270 static struct globals
272 /* The Nth element in this array is a page with objects of size 2^N.
273 If there are any pages with free objects, they will be at the
274 head of the list. NULL if there are no page-entries for this
276 page_entry *pages[NUM_ORDERS];
278 /* The Nth element in this array is the last page with objects of
279 size 2^N. NULL if there are no page-entries for this object
281 page_entry *page_tails[NUM_ORDERS];
283 /* Lookup table for associating allocation pages with object addresses. */
286 /* The system's page size. */
290 /* Bytes currently allocated. */
293 /* Bytes currently allocated at the end of the last collection. */
294 size_t allocated_last_gc;
296 /* Total amount of memory mapped. */
299 /* The current depth in the context stack. */
300 unsigned short context_depth;
302 /* A file descriptor open to /dev/zero for reading. */
303 #if defined (HAVE_MMAP_DEV_ZERO)
307 /* A cache of free system pages. */
308 page_entry *free_pages;
310 /* The file descriptor for debugging output. */
314 /* The size in bytes required to maintain a bitmap for the objects
316 #define BITMAP_SIZE(Num_objects) \
317 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
319 /* Skip garbage collection if the current allocation is not at least
320 this factor times the allocation at the end of the last collection.
321 In other words, total allocation must expand by (this factor minus
322 one) before collection is performed. */
323 #define GGC_MIN_EXPAND_FOR_GC (1.3)
325 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
326 test from triggering too often when the heap is small. */
327 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
329 /* Allocate pages in chunks of this size, to throttle calls to mmap.
330 The first page is used, the rest go onto the free list. */
331 #define GGC_QUIRE_SIZE 16
334 static int ggc_allocated_p PARAMS ((const void *));
335 static page_entry *lookup_page_table_entry PARAMS ((const void *));
336 static void set_page_table_entry PARAMS ((void *, page_entry *));
337 static char *alloc_anon PARAMS ((char *, size_t));
338 static struct page_entry * alloc_page PARAMS ((unsigned));
339 static void free_page PARAMS ((struct page_entry *));
340 static void release_pages PARAMS ((void));
341 static void clear_marks PARAMS ((void));
342 static void sweep_pages PARAMS ((void));
343 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
346 static void poison_pages PARAMS ((void));
349 void debug_print_page_list PARAMS ((int));
351 /* Returns non-zero if P was allocated in GC'able memory. */
360 #if HOST_BITS_PER_PTR <= 32
363 page_table table = G.lookup;
364 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
369 if (table->high_bits == high_bits)
373 base = &table->table[0];
376 /* Extract the level 1 and 2 indicies. */
380 return base[L1] && base[L1][L2];
383 /* Traverse the page table and find the entry for a page.
384 Die (probably) if the object wasn't allocated via GC. */
386 static inline page_entry *
387 lookup_page_table_entry(p)
393 #if HOST_BITS_PER_PTR <= 32
396 page_table table = G.lookup;
397 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
398 while (table->high_bits != high_bits)
400 base = &table->table[0];
403 /* Extract the level 1 and 2 indicies. */
410 /* Set the page table entry for a page. */
413 set_page_table_entry(p, entry)
420 #if HOST_BITS_PER_PTR <= 32
424 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
425 for (table = G.lookup; table; table = table->next)
426 if (table->high_bits == high_bits)
429 /* Not found -- allocate a new table. */
430 table = (page_table) xcalloc (1, sizeof(*table));
431 table->next = G.lookup;
432 table->high_bits = high_bits;
435 base = &table->table[0];
438 /* Extract the level 1 and 2 indicies. */
442 if (base[L1] == NULL)
443 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
445 base[L1][L2] = entry;
448 /* Prints the page-entry for object size ORDER, for debugging. */
451 debug_print_page_list (order)
455 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
456 (PTR) G.page_tails[order]);
460 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
461 p->num_free_objects);
468 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
469 (if non-null). The ifdef structure here is intended to cause a
470 compile error unless exactly one of the HAVE_* is defined. */
473 alloc_anon (pref, size)
474 char *pref ATTRIBUTE_UNUSED;
477 #ifdef HAVE_MMAP_ANON
478 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
479 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
481 #ifdef HAVE_MMAP_DEV_ZERO
482 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
483 MAP_PRIVATE, G.dev_zero_fd, 0);
486 char *page = (char *) valloc (size);
489 if (page == (char *) MAP_FAILED)
491 fputs ("Virtual memory exhausted!\n", stderr);
495 /* Remember that we allocated this memory. */
496 G.bytes_mapped += size;
501 /* Allocate a new page for allocating objects of size 2^ORDER,
502 and return an entry for it. The entry is not added to the
503 appropriate page_table list. */
505 static inline struct page_entry *
509 struct page_entry *entry, *p, **pp;
513 size_t page_entry_size;
516 num_objects = OBJECTS_PER_PAGE (order);
517 bitmap_size = BITMAP_SIZE (num_objects + 1);
518 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
519 entry_size = num_objects * OBJECT_SIZE (order);
520 if (entry_size < G.pagesize)
521 entry_size = G.pagesize;
526 /* Check the list of free pages for one we can use. */
527 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
528 if (p->bytes == entry_size)
533 /* Recycle the allocated memory from this page ... */
536 /* ... and, if possible, the page entry itself. */
537 if (p->order == order)
540 memset (entry, 0, page_entry_size);
546 else if (entry_size == G.pagesize)
548 /* We want just one page. Allocate a bunch of them and put the
549 extras on the freelist. (Can only do this optimization with
550 mmap for backing store.) */
551 struct page_entry *e, *f = G.free_pages;
554 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
555 /* This loop counts down so that the chain will be in ascending
557 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
559 e = (struct page_entry *) xcalloc (1, page_entry_size);
561 e->bytes = G.pagesize;
562 e->page = page + (i << G.lg_pagesize);
570 page = alloc_anon (NULL, entry_size);
573 entry = (struct page_entry *) xcalloc (1, page_entry_size);
575 entry->bytes = entry_size;
577 entry->context_depth = G.context_depth;
578 entry->order = order;
579 entry->num_free_objects = num_objects;
580 entry->next_bit_hint = 1;
582 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
583 increment the hint. */
584 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
585 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
587 set_page_table_entry (page, entry);
589 if (GGC_DEBUG_LEVEL >= 2)
590 fprintf (G.debug_file,
591 "Allocating page at %p, object size=%d, data %p-%p\n",
592 (PTR) entry, OBJECT_SIZE (order), page, page + entry_size - 1);
597 /* For a page that is no longer needed, put it on the free page list. */
603 if (GGC_DEBUG_LEVEL >= 2)
604 fprintf (G.debug_file,
605 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
606 entry->page, entry->page + entry->bytes - 1);
608 set_page_table_entry (entry->page, NULL);
610 entry->next = G.free_pages;
611 G.free_pages = entry;
614 /* Release the free page cache to the system. */
619 page_entry *p, *next;
625 /* Gather up adjacent pages so they are unmapped together. */
636 while (p && p->page == start + len)
645 G.bytes_mapped -= len;
648 for (p = G.free_pages; p; p = next)
652 G.bytes_mapped -= p->bytes;
655 #endif /* USING_MMAP */
660 /* This table provides a fast way to determine ceil(log_2(size)) for
661 allocation requests. The minimum allocation size is four bytes. */
663 static unsigned char size_lookup[257] =
665 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
666 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
667 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
668 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
669 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
670 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
671 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
672 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
673 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
674 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
675 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
676 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
677 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
678 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
679 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
680 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
684 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
685 memory is zeroed; otherwise, its contents are undefined. */
691 unsigned order, word, bit, object_offset;
692 struct page_entry *entry;
696 order = size_lookup[size];
700 while (size > OBJECT_SIZE (order))
704 /* If there are non-full pages for this size allocation, they are at
705 the head of the list. */
706 entry = G.pages[order];
708 /* If there is no page for this object size, or all pages in this
709 context are full, allocate a new page. */
710 if (entry == NULL || entry->num_free_objects == 0)
712 struct page_entry *new_entry;
713 new_entry = alloc_page (order);
715 /* If this is the only entry, it's also the tail. */
717 G.page_tails[order] = new_entry;
719 /* Put new pages at the head of the page list. */
720 new_entry->next = entry;
722 G.pages[order] = new_entry;
724 /* For a new page, we know the word and bit positions (in the
725 in_use bitmap) of the first available object -- they're zero. */
726 new_entry->next_bit_hint = 1;
733 /* First try to use the hint left from the previous allocation
734 to locate a clear bit in the in-use bitmap. We've made sure
735 that the one-past-the-end bit is always set, so if the hint
736 has run over, this test will fail. */
737 unsigned hint = entry->next_bit_hint;
738 word = hint / HOST_BITS_PER_LONG;
739 bit = hint % HOST_BITS_PER_LONG;
741 /* If the hint didn't work, scan the bitmap from the beginning. */
742 if ((entry->in_use_p[word] >> bit) & 1)
745 while (~entry->in_use_p[word] == 0)
747 while ((entry->in_use_p[word] >> bit) & 1)
749 hint = word * HOST_BITS_PER_LONG + bit;
752 /* Next time, try the next bit. */
753 entry->next_bit_hint = hint + 1;
755 object_offset = hint * OBJECT_SIZE (order);
758 /* Set the in-use bit. */
759 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
761 /* Keep a running total of the number of free objects. If this page
762 fills up, we may have to move it to the end of the list if the
763 next page isn't full. If the next page is full, all subsequent
764 pages are full, so there's no need to move it. */
765 if (--entry->num_free_objects == 0
766 && entry->next != NULL
767 && entry->next->num_free_objects > 0)
769 G.pages[order] = entry->next;
771 G.page_tails[order]->next = entry;
772 G.page_tails[order] = entry;
775 /* Calculate the object's address. */
776 result = entry->page + object_offset;
779 /* `Poison' the entire allocated object, including any padding at
781 memset (result, 0xaf, OBJECT_SIZE (order));
784 /* Keep track of how many bytes are being allocated. This
785 information is used in deciding when to collect. */
786 G.allocated += OBJECT_SIZE (order);
788 if (GGC_DEBUG_LEVEL >= 3)
789 fprintf (G.debug_file,
790 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
791 (int) size, OBJECT_SIZE (order), result, (PTR) entry);
796 /* If P is not marked, marks it and return false. Otherwise return true.
797 P must have been allocated by the GC allocator; it mustn't point to
798 static objects, stack variables, or memory allocated with malloc. */
808 /* Look up the page on which the object is alloced. If the object
809 wasn't allocated by the collector, we'll probably die. */
810 entry = lookup_page_table_entry (p);
811 #ifdef ENABLE_CHECKING
816 /* Calculate the index of the object on the page; this is its bit
817 position in the in_use_p bitmap. */
818 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
819 word = bit / HOST_BITS_PER_LONG;
820 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
822 /* If the bit was previously set, skip it. */
823 if (entry->in_use_p[word] & mask)
826 /* Otherwise set it, and decrement the free object count. */
827 entry->in_use_p[word] |= mask;
828 entry->num_free_objects -= 1;
830 if (GGC_DEBUG_LEVEL >= 4)
831 fprintf (G.debug_file, "Marking %p\n", p);
836 /* Mark P, but check first that it was allocated by the collector. */
839 ggc_mark_if_gcable (p)
842 if (p && ggc_allocated_p (p))
846 /* Return the size of the gc-able object P. */
852 page_entry *pe = lookup_page_table_entry (p);
853 return OBJECT_SIZE (pe->order);
856 /* Initialize the ggc-mmap allocator. */
863 G.pagesize = getpagesize();
864 G.lg_pagesize = exact_log2 (G.pagesize);
866 #ifdef HAVE_MMAP_DEV_ZERO
867 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
868 if (G.dev_zero_fd == -1)
873 G.debug_file = fopen ("ggc-mmap.debug", "w");
875 G.debug_file = stdout;
878 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
881 /* StunOS has an amazing off-by-one error for the first mmap allocation
882 after fiddling with RLIMIT_STACK. The result, as hard as it is to
883 believe, is an unaligned page allocation, which would cause us to
884 hork badly if we tried to use it. */
886 char *p = alloc_anon (NULL, G.pagesize);
887 struct page_entry *e;
888 if ((size_t)p & (G.pagesize - 1))
890 /* How losing. Discard this one and try another. If we still
891 can't get something useful, give up. */
893 p = alloc_anon (NULL, G.pagesize);
894 if ((size_t)p & (G.pagesize - 1))
898 /* We have a good page, might as well hold onto it... */
899 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
900 e->bytes = G.pagesize;
902 e->next = G.free_pages;
907 /* Initialize the object size table. */
908 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
909 object_size_table[order] = (size_t) 1 << order;
910 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
912 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
914 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
915 so that we're sure of getting aligned memory. */
916 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
917 object_size_table[order] = s;
920 /* Initialize the objects-per-page table. */
921 for (order = 0; order < NUM_ORDERS; ++order)
923 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
924 if (objects_per_page_table[order] == 0)
925 objects_per_page_table[order] = 1;
928 /* Reset the size_lookup array to put appropriately sized objects in
929 the special orders. All objects bigger than the previous power
930 of two, but no greater than the special size, should go in the
932 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
937 o = size_lookup[OBJECT_SIZE (order)];
938 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
939 size_lookup[i] = order;
943 /* Increment the `GC context'. Objects allocated in an outer context
944 are never freed, eliminating the need to register their roots. */
952 if (G.context_depth == 0)
956 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
957 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
960 ggc_recalculate_in_use_p (p)
966 /* Because the past-the-end bit in in_use_p is always set, we
967 pretend there is one additional object. */
968 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
970 /* Reset the free object count. */
971 p->num_free_objects = num_objects;
973 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
975 i < CEIL (BITMAP_SIZE (num_objects),
976 sizeof (*p->in_use_p));
981 /* Something is in use if it is marked, or if it was in use in a
982 context further down the context stack. */
983 p->in_use_p[i] |= p->save_in_use_p[i];
985 /* Decrement the free object count for every object allocated. */
986 for (j = p->in_use_p[i]; j; j >>= 1)
987 p->num_free_objects -= (j & 1);
990 if (p->num_free_objects >= num_objects)
994 /* Decrement the `GC context'. All objects allocated since the
995 previous ggc_push_context are migrated to the outer context. */
1000 unsigned order, depth;
1002 depth = --G.context_depth;
1004 /* Any remaining pages in the popped context are lowered to the new
1005 current context; i.e. objects allocated in the popped context and
1006 left over are imported into the previous context. */
1007 for (order = 2; order < NUM_ORDERS; order++)
1011 for (p = G.pages[order]; p != NULL; p = p->next)
1013 if (p->context_depth > depth)
1014 p->context_depth = depth;
1016 /* If this page is now in the topmost context, and we'd
1017 saved its allocation state, restore it. */
1018 else if (p->context_depth == depth && p->save_in_use_p)
1020 ggc_recalculate_in_use_p (p);
1021 free (p->save_in_use_p);
1022 p->save_in_use_p = 0;
1028 /* Unmark all objects. */
1035 for (order = 2; order < NUM_ORDERS; order++)
1037 size_t num_objects = OBJECTS_PER_PAGE (order);
1038 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1041 for (p = G.pages[order]; p != NULL; p = p->next)
1043 #ifdef ENABLE_CHECKING
1044 /* The data should be page-aligned. */
1045 if ((size_t) p->page & (G.pagesize - 1))
1049 /* Pages that aren't in the topmost context are not collected;
1050 nevertheless, we need their in-use bit vectors to store GC
1051 marks. So, back them up first. */
1052 if (p->context_depth < G.context_depth)
1054 if (! p->save_in_use_p)
1055 p->save_in_use_p = xmalloc (bitmap_size);
1056 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1059 /* Reset reset the number of free objects and clear the
1060 in-use bits. These will be adjusted by mark_obj. */
1061 p->num_free_objects = num_objects;
1062 memset (p->in_use_p, 0, bitmap_size);
1064 /* Make sure the one-past-the-end bit is always set. */
1065 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1066 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1071 /* Free all empty pages. Partially empty pages need no attention
1072 because the `mark' bit doubles as an `unused' bit. */
1079 for (order = 2; order < NUM_ORDERS; order++)
1081 /* The last page-entry to consider, regardless of entries
1082 placed at the end of the list. */
1083 page_entry * const last = G.page_tails[order];
1085 size_t num_objects = OBJECTS_PER_PAGE (order);
1086 size_t live_objects;
1087 page_entry *p, *previous;
1097 page_entry *next = p->next;
1099 /* Loop until all entries have been examined. */
1102 /* Add all live objects on this page to the count of
1103 allocated memory. */
1104 live_objects = num_objects - p->num_free_objects;
1106 G.allocated += OBJECT_SIZE (order) * live_objects;
1108 /* Only objects on pages in the topmost context should get
1110 if (p->context_depth < G.context_depth)
1113 /* Remove the page if it's empty. */
1114 else if (live_objects == 0)
1117 G.pages[order] = next;
1119 previous->next = next;
1121 /* Are we removing the last element? */
1122 if (p == G.page_tails[order])
1123 G.page_tails[order] = previous;
1128 /* If the page is full, move it to the end. */
1129 else if (p->num_free_objects == 0)
1131 /* Don't move it if it's already at the end. */
1132 if (p != G.page_tails[order])
1134 /* Move p to the end of the list. */
1136 G.page_tails[order]->next = p;
1138 /* Update the tail pointer... */
1139 G.page_tails[order] = p;
1141 /* ... and the head pointer, if necessary. */
1143 G.pages[order] = next;
1145 previous->next = next;
1150 /* If we've fallen through to here, it's a page in the
1151 topmost context that is neither full nor empty. Such a
1152 page must precede pages at lesser context depth in the
1153 list, so move it to the head. */
1154 else if (p != G.pages[order])
1156 previous->next = p->next;
1157 p->next = G.pages[order];
1159 /* Are we moving the last element? */
1160 if (G.page_tails[order] == p)
1161 G.page_tails[order] = previous;
1170 /* Now, restore the in_use_p vectors for any pages from contexts
1171 other than the current one. */
1172 for (p = G.pages[order]; p; p = p->next)
1173 if (p->context_depth != G.context_depth)
1174 ggc_recalculate_in_use_p (p);
1179 /* Clobber all free objects. */
1186 for (order = 2; order < NUM_ORDERS; order++)
1188 size_t num_objects = OBJECTS_PER_PAGE (order);
1189 size_t size = OBJECT_SIZE (order);
1192 for (p = G.pages[order]; p != NULL; p = p->next)
1196 if (p->context_depth != G.context_depth)
1197 /* Since we don't do any collection for pages in pushed
1198 contexts, there's no need to do any poisoning. And
1199 besides, the IN_USE_P array isn't valid until we pop
1203 for (i = 0; i < num_objects; i++)
1206 word = i / HOST_BITS_PER_LONG;
1207 bit = i % HOST_BITS_PER_LONG;
1208 if (((p->in_use_p[word] >> bit) & 1) == 0)
1209 memset (p->page + i * size, 0xa5, size);
1216 /* Top level mark-and-sweep routine. */
1221 /* Avoid frequent unnecessary work by skipping collection if the
1222 total allocations haven't expanded much since the last
1224 #ifndef GGC_ALWAYS_COLLECT
1225 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1229 timevar_push (TV_GC);
1231 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1233 /* Zero the total allocated bytes. This will be recalculated in the
1237 /* Release the pages we freed the last time we collected, but didn't
1238 reuse in the interim. */
1250 G.allocated_last_gc = G.allocated;
1251 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1252 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1254 timevar_pop (TV_GC);
1257 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1260 /* Print allocation statistics. */
1261 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1263 : ((x) < 1024*1024*10 \
1265 : (x) / (1024*1024))))
1266 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1269 ggc_print_statistics ()
1271 struct ggc_statistics stats;
1273 size_t total_overhead = 0;
1275 /* Clear the statistics. */
1276 memset (&stats, 0, sizeof (stats));
1278 /* Make sure collection will really occur. */
1279 G.allocated_last_gc = 0;
1281 /* Collect and print the statistics common across collectors. */
1282 ggc_print_common_statistics (stderr, &stats);
1284 /* Release free pages so that we will not count the bytes allocated
1285 there as part of the total allocated memory. */
1288 /* Collect some information about the various sizes of
1290 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1291 "Log", "Allocated", "Used", "Overhead");
1292 for (i = 0; i < NUM_ORDERS; ++i)
1299 /* Skip empty entries. */
1303 overhead = allocated = in_use = 0;
1305 /* Figure out the total number of bytes allocated for objects of
1306 this size, and how many of them are actually in use. Also figure
1307 out how much memory the page table is using. */
1308 for (p = G.pages[i]; p; p = p->next)
1310 allocated += p->bytes;
1312 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1314 overhead += (sizeof (page_entry) - sizeof (long)
1315 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1317 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", i,
1318 SCALE (allocated), LABEL (allocated),
1319 SCALE (in_use), LABEL (in_use),
1320 SCALE (overhead), LABEL (overhead));
1321 total_overhead += overhead;
1323 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1324 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1325 SCALE (G.allocated), LABEL(G.allocated),
1326 SCALE (total_overhead), LABEL (total_overhead));