1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
32 #ifdef HAVE_MMAP_ANYWHERE
40 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
41 #define MAP_ANONYMOUS MAP_ANON
46 This garbage-collecting allocator allocates objects on one of a set
47 of pages. Each page can allocate objects of a single size only;
48 available sizes are powers of two starting at four bytes. The size
49 of an allocation request is rounded up to the next power of two
50 (`order'), and satisfied from the appropriate page.
52 Each page is recorded in a page-entry, which also maintains an
53 in-use bitmap of object positions on the page. This allows the
54 allocation state of a particular object to be flipped without
55 touching the page itself.
57 Each page-entry also has a context depth, which is used to track
58 pushing and popping of allocation contexts. Only objects allocated
59 in the current (highest-numbered) context may be collected.
61 Page entries are arranged in an array of singly-linked lists. The
62 array is indexed by the allocation size, in bits, of the pages on
63 it; i.e. all pages on a list allocate objects of the same size.
64 Pages are ordered on the list such that all non-full pages precede
65 all full pages, with non-full pages arranged in order of decreasing
68 Empty pages (of all orders) are kept on a single page cache list,
69 and are considered first when new pages are required; they are
70 deallocated at the start of the next collection if they haven't
71 been recycled by then. */
74 /* Define GGC_POISON to poison memory marked unused by the collector. */
77 /* Define GGC_ALWAYS_COLLECT to perform collection every time
78 ggc_collect is invoked. Otherwise, collection is performed only
79 when a significant amount of memory has been allocated since the
81 #undef GGC_ALWAYS_COLLECT
83 #ifdef ENABLE_GC_CHECKING
86 #ifdef ENABLE_GC_ALWAYS_COLLECT
87 #define GGC_ALWAYS_COLLECT
90 /* Define GGC_DEBUG_LEVEL to print debugging information.
91 0: No debugging output.
92 1: GC statistics only.
93 2: Page-entry allocations/deallocations as well.
94 3: Object allocations as well.
95 4: Object marks as well. */
96 #define GGC_DEBUG_LEVEL (0)
98 #ifndef HOST_BITS_PER_PTR
99 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
103 /* A two-level tree is used to look up the page-entry for a given
104 pointer. Two chunks of the pointer's bits are extracted to index
105 the first and second levels of the tree, as follows:
109 msb +----------------+----+------+------+ lsb
115 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
116 pages are aligned on system page boundaries. The next most
117 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
118 index values in the lookup table, respectively.
120 For 32-bit architectures and the settings below, there are no
121 leftover bits. For architectures with wider pointers, the lookup
122 tree points to a list of pages, which must be scanned to find the
125 #define PAGE_L1_BITS (8)
126 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
127 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
128 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
130 #define LOOKUP_L1(p) \
131 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
133 #define LOOKUP_L2(p) \
134 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
136 /* The number of objects per allocation page, for objects on a page of
137 the indicated ORDER. */
138 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
140 /* The size of an object on a page of the indicated ORDER. */
141 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
143 #ifdef NO_ALIGNMENT_PROBLEM
145 /* The number of extra orders, not corresponding to power-of-two sized
148 #define NUM_EXTRA_ORDERS \
149 (sizeof (extra_order_size_table) / sizeof (extra_order_size_table[0]))
151 #else /* !defined(NO_ALIGNMENT_PROBLEM) */
153 /* For now, we can't use this code because we don't ensure that the
154 objects returned are appropriately aligned. The problem is that
155 some tree_list sized things, for example, use */
156 #define NUM_EXTRA_ORDERS 0
158 #endif /* !defined(NO_ALIGNMENT_PROBLEM) */
160 /* The Ith entry is the maximum size of an object to be stored in the
161 Ith extra order. Adding a new entry to this array is the *only*
162 thing you need to do to add a new special allocation size. */
164 static const size_t extra_order_size_table[] = {
165 sizeof (struct tree_decl),
166 sizeof (struct tree_list)
169 /* The total number of orders. */
171 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
173 /* The Ith entry is the number of objects on a page or order I. */
175 static unsigned objects_per_page_table[NUM_ORDERS];
177 /* The Ith entry is the size of an object on a page of order I. */
179 static size_t object_size_table[NUM_ORDERS];
181 /* A page_entry records the status of an allocation page. This
182 structure is dynamically sized to fit the bitmap in_use_p. */
183 typedef struct page_entry
185 /* The next page-entry with objects of the same size, or NULL if
186 this is the last page-entry. */
187 struct page_entry *next;
189 /* The number of bytes allocated. (This will always be a multiple
190 of the host system page size.) */
193 /* The address at which the memory is allocated. */
196 /* Saved in-use bit vector for pages that aren't in the topmost
197 context during collection. */
198 unsigned long *save_in_use_p;
200 /* Context depth of this page. */
201 unsigned short context_depth;
203 /* The number of free objects remaining on this page. */
204 unsigned short num_free_objects;
206 /* A likely candidate for the bit position of a free object for the
207 next allocation from this page. */
208 unsigned short next_bit_hint;
210 /* The lg of size of objects allocated from this page. */
213 /* A bit vector indicating whether or not objects are in use. The
214 Nth bit is one if the Nth object on this page is allocated. This
215 array is dynamically sized. */
216 unsigned long in_use_p[1];
220 #if HOST_BITS_PER_PTR <= 32
222 /* On 32-bit hosts, we use a two level page table, as pictured above. */
223 typedef page_entry **page_table[PAGE_L1_SIZE];
227 /* On 64-bit hosts, we use the same two level page tables plus a linked
228 list that disambiguates the top 32-bits. There will almost always be
229 exactly one entry in the list. */
230 typedef struct page_table_chain
232 struct page_table_chain *next;
234 page_entry **table[PAGE_L1_SIZE];
239 /* The rest of the global variables. */
240 static struct globals
242 /* The Nth element in this array is a page with objects of size 2^N.
243 If there are any pages with free objects, they will be at the
244 head of the list. NULL if there are no page-entries for this
246 page_entry *pages[NUM_ORDERS];
248 /* The Nth element in this array is the last page with objects of
249 size 2^N. NULL if there are no page-entries for this object
251 page_entry *page_tails[NUM_ORDERS];
253 /* Lookup table for associating allocation pages with object addresses. */
256 /* The system's page size. */
260 /* Bytes currently allocated. */
263 /* Bytes currently allocated at the end of the last collection. */
264 size_t allocated_last_gc;
266 /* Total amount of memory mapped. */
269 /* The current depth in the context stack. */
270 unsigned short context_depth;
272 /* A file descriptor open to /dev/zero for reading. */
273 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
277 /* A cache of free system pages. */
278 page_entry *free_pages;
280 /* The file descriptor for debugging output. */
284 /* The size in bytes required to maintain a bitmap for the objects
286 #define BITMAP_SIZE(Num_objects) \
287 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
289 /* Skip garbage collection if the current allocation is not at least
290 this factor times the allocation at the end of the last collection.
291 In other words, total allocation must expand by (this factor minus
292 one) before collection is performed. */
293 #define GGC_MIN_EXPAND_FOR_GC (1.3)
295 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
296 test from triggering too often when the heap is small. */
297 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
299 /* Allocate pages in chunks of this size, to throttle calls to mmap.
300 The first page is used, the rest go onto the free list. */
301 #define GGC_QUIRE_SIZE 16
304 static int ggc_allocated_p PARAMS ((const void *));
305 static page_entry *lookup_page_table_entry PARAMS ((const void *));
306 static void set_page_table_entry PARAMS ((void *, page_entry *));
307 static char *alloc_anon PARAMS ((char *, size_t));
308 static struct page_entry * alloc_page PARAMS ((unsigned));
309 static void free_page PARAMS ((struct page_entry *));
310 static void release_pages PARAMS ((void));
311 static void clear_marks PARAMS ((void));
312 static void sweep_pages PARAMS ((void));
313 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
316 static void poison_pages PARAMS ((void));
319 void debug_print_page_list PARAMS ((int));
321 /* Returns non-zero if P was allocated in GC'able memory. */
330 #if HOST_BITS_PER_PTR <= 32
333 page_table table = G.lookup;
334 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
339 if (table->high_bits == high_bits)
343 base = &table->table[0];
346 /* Extract the level 1 and 2 indicies. */
350 return base[L1] && base[L1][L2];
353 /* Traverse the page table and find the entry for a page.
354 Die (probably) if the object wasn't allocated via GC. */
356 static inline page_entry *
357 lookup_page_table_entry(p)
363 #if HOST_BITS_PER_PTR <= 32
366 page_table table = G.lookup;
367 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
368 while (table->high_bits != high_bits)
370 base = &table->table[0];
373 /* Extract the level 1 and 2 indicies. */
380 /* Set the page table entry for a page. */
383 set_page_table_entry(p, entry)
390 #if HOST_BITS_PER_PTR <= 32
394 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
395 for (table = G.lookup; table; table = table->next)
396 if (table->high_bits == high_bits)
399 /* Not found -- allocate a new table. */
400 table = (page_table) xcalloc (1, sizeof(*table));
401 table->next = G.lookup;
402 table->high_bits = high_bits;
405 base = &table->table[0];
408 /* Extract the level 1 and 2 indicies. */
412 if (base[L1] == NULL)
413 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
415 base[L1][L2] = entry;
418 /* Prints the page-entry for object size ORDER, for debugging. */
421 debug_print_page_list (order)
425 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
426 (PTR) G.page_tails[order]);
430 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
431 p->num_free_objects);
438 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
442 alloc_anon (pref, size)
443 char *pref ATTRIBUTE_UNUSED;
448 #ifdef HAVE_MMAP_ANYWHERE
450 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
451 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
453 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
454 MAP_PRIVATE, G.dev_zero_fd, 0);
456 if (page == (char *) MAP_FAILED)
458 fputs ("Virtual memory exhausted!\n", stderr);
463 page = (char *) valloc (size);
466 fputs ("Virtual memory exhausted!\n", stderr);
469 #endif /* HAVE_VALLOC */
470 #endif /* HAVE_MMAP_ANYWHERE */
472 /* Remember that we allocated this memory. */
473 G.bytes_mapped += size;
478 /* Allocate a new page for allocating objects of size 2^ORDER,
479 and return an entry for it. The entry is not added to the
480 appropriate page_table list. */
482 static inline struct page_entry *
486 struct page_entry *entry, *p, **pp;
490 size_t page_entry_size;
493 num_objects = OBJECTS_PER_PAGE (order);
494 bitmap_size = BITMAP_SIZE (num_objects + 1);
495 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
496 entry_size = num_objects * OBJECT_SIZE (order);
501 /* Check the list of free pages for one we can use. */
502 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
503 if (p->bytes == entry_size)
508 /* Recycle the allocated memory from this page ... */
511 /* ... and, if possible, the page entry itself. */
512 if (p->order == order)
515 memset (entry, 0, page_entry_size);
520 #ifdef HAVE_MMAP_ANYWHERE
521 else if (entry_size == G.pagesize)
523 /* We want just one page. Allocate a bunch of them and put the
524 extras on the freelist. (Can only do this optimization with
525 mmap for backing store.) */
526 struct page_entry *e, *f = G.free_pages;
529 page = alloc_anon (NULL, entry_size * GGC_QUIRE_SIZE);
530 /* This loop counts down so that the chain will be in ascending
532 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
534 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
535 e->bytes = entry_size;
536 e->page = page + i*entry_size;
544 page = alloc_anon (NULL, entry_size);
547 entry = (struct page_entry *) xcalloc (1, page_entry_size);
549 entry->bytes = entry_size;
551 entry->context_depth = G.context_depth;
552 entry->order = order;
553 entry->num_free_objects = num_objects;
554 entry->next_bit_hint = 1;
556 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
557 increment the hint. */
558 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
559 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
561 set_page_table_entry (page, entry);
563 if (GGC_DEBUG_LEVEL >= 2)
564 fprintf (G.debug_file,
565 "Allocating page at %p, object size=%d, data %p-%p\n",
566 (PTR) entry, OBJECT_SIZE (order), page, page + entry_size - 1);
571 /* For a page that is no longer needed, put it on the free page list. */
577 if (GGC_DEBUG_LEVEL >= 2)
578 fprintf (G.debug_file,
579 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
580 entry->page, entry->page + entry->bytes - 1);
582 set_page_table_entry (entry->page, NULL);
584 entry->next = G.free_pages;
585 G.free_pages = entry;
588 /* Release the free page cache to the system. */
593 page_entry *p, *next;
595 #ifdef HAVE_MMAP_ANYWHERE
599 /* Gather up adjacent pages so they are unmapped together. */
610 while (p && p->page == start + len)
619 G.bytes_mapped -= len;
624 for (p = G.free_pages; p; p = next)
628 G.bytes_mapped -= p->bytes;
631 #endif /* HAVE_VALLOC */
632 #endif /* HAVE_MMAP_ANYWHERE */
637 /* This table provides a fast way to determine ceil(log_2(size)) for
638 allocation requests. The minimum allocation size is four bytes. */
640 static unsigned char size_lookup[257] =
642 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
643 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
644 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
645 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
646 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
647 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
648 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
649 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
650 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
651 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
652 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
653 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
654 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
655 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
656 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
657 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
661 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
662 memory is zeroed; otherwise, its contents are undefined. */
668 unsigned order, word, bit, object_offset;
669 struct page_entry *entry;
673 order = size_lookup[size];
677 while (size > OBJECT_SIZE (order))
681 /* If there are non-full pages for this size allocation, they are at
682 the head of the list. */
683 entry = G.pages[order];
685 /* If there is no page for this object size, or all pages in this
686 context are full, allocate a new page. */
687 if (entry == NULL || entry->num_free_objects == 0)
689 struct page_entry *new_entry;
690 new_entry = alloc_page (order);
692 /* If this is the only entry, it's also the tail. */
694 G.page_tails[order] = new_entry;
696 /* Put new pages at the head of the page list. */
697 new_entry->next = entry;
699 G.pages[order] = new_entry;
701 /* For a new page, we know the word and bit positions (in the
702 in_use bitmap) of the first available object -- they're zero. */
703 new_entry->next_bit_hint = 1;
710 /* First try to use the hint left from the previous allocation
711 to locate a clear bit in the in-use bitmap. We've made sure
712 that the one-past-the-end bit is always set, so if the hint
713 has run over, this test will fail. */
714 unsigned hint = entry->next_bit_hint;
715 word = hint / HOST_BITS_PER_LONG;
716 bit = hint % HOST_BITS_PER_LONG;
718 /* If the hint didn't work, scan the bitmap from the beginning. */
719 if ((entry->in_use_p[word] >> bit) & 1)
722 while (~entry->in_use_p[word] == 0)
724 while ((entry->in_use_p[word] >> bit) & 1)
726 hint = word * HOST_BITS_PER_LONG + bit;
729 /* Next time, try the next bit. */
730 entry->next_bit_hint = hint + 1;
732 object_offset = hint * OBJECT_SIZE (order);
735 /* Set the in-use bit. */
736 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
738 /* Keep a running total of the number of free objects. If this page
739 fills up, we may have to move it to the end of the list if the
740 next page isn't full. If the next page is full, all subsequent
741 pages are full, so there's no need to move it. */
742 if (--entry->num_free_objects == 0
743 && entry->next != NULL
744 && entry->next->num_free_objects > 0)
746 G.pages[order] = entry->next;
748 G.page_tails[order]->next = entry;
749 G.page_tails[order] = entry;
752 /* Calculate the object's address. */
753 result = entry->page + object_offset;
756 /* `Poison' the entire allocated object, including any padding at
758 memset (result, 0xaf, OBJECT_SIZE (order));
761 /* Keep track of how many bytes are being allocated. This
762 information is used in deciding when to collect. */
763 G.allocated += OBJECT_SIZE (order);
765 if (GGC_DEBUG_LEVEL >= 3)
766 fprintf (G.debug_file,
767 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
768 (int) size, OBJECT_SIZE (order), result, (PTR) entry);
773 /* If P is not marked, marks it and return false. Otherwise return true.
774 P must have been allocated by the GC allocator; it mustn't point to
775 static objects, stack variables, or memory allocated with malloc. */
785 /* Look up the page on which the object is alloced. If the object
786 wasn't allocated by the collector, we'll probably die. */
787 entry = lookup_page_table_entry (p);
788 #ifdef ENABLE_CHECKING
793 /* Calculate the index of the object on the page; this is its bit
794 position in the in_use_p bitmap. */
795 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
796 word = bit / HOST_BITS_PER_LONG;
797 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
799 /* If the bit was previously set, skip it. */
800 if (entry->in_use_p[word] & mask)
803 /* Otherwise set it, and decrement the free object count. */
804 entry->in_use_p[word] |= mask;
805 entry->num_free_objects -= 1;
807 if (GGC_DEBUG_LEVEL >= 4)
808 fprintf (G.debug_file, "Marking %p\n", p);
813 /* Mark P, but check first that it was allocated by the collector. */
816 ggc_mark_if_gcable (p)
819 if (p && ggc_allocated_p (p))
823 /* Return the size of the gc-able object P. */
829 page_entry *pe = lookup_page_table_entry (p);
830 return OBJECT_SIZE (pe->order);
833 /* Initialize the ggc-mmap allocator. */
840 G.pagesize = getpagesize();
841 G.lg_pagesize = exact_log2 (G.pagesize);
843 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
844 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
845 if (G.dev_zero_fd == -1)
850 G.debug_file = fopen ("ggc-mmap.debug", "w");
852 G.debug_file = stdout;
855 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
857 #ifdef HAVE_MMAP_ANYWHERE
858 /* StunOS has an amazing off-by-one error for the first mmap allocation
859 after fiddling with RLIMIT_STACK. The result, as hard as it is to
860 believe, is an unaligned page allocation, which would cause us to
861 hork badly if we tried to use it. */
863 char *p = alloc_anon (NULL, G.pagesize);
864 if ((size_t)p & (G.pagesize - 1))
866 /* How losing. Discard this one and try another. If we still
867 can't get something useful, give up. */
869 p = alloc_anon (NULL, G.pagesize);
870 if ((size_t)p & (G.pagesize - 1))
873 munmap (p, G.pagesize);
877 /* Initialize the object size table. */
878 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
879 object_size_table[order] = (size_t) 1 << order;
880 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
881 object_size_table[order] =
882 extra_order_size_table[order - HOST_BITS_PER_PTR];
884 /* Initialize the objects-per-page table. */
885 for (order = 0; order < NUM_ORDERS; ++order)
887 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
888 if (objects_per_page_table[order] == 0)
889 objects_per_page_table[order] = 1;
892 /* Reset the size_lookup array to put appropriately sized objects in
893 the special orders. All objects bigger than the previous power
894 of two, but no greater than the special size, should go in the
896 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
901 o = size_lookup[OBJECT_SIZE (order)];
902 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
903 size_lookup[i] = order;
907 /* Increment the `GC context'. Objects allocated in an outer context
908 are never freed, eliminating the need to register their roots. */
916 if (G.context_depth == 0)
920 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
921 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
924 ggc_recalculate_in_use_p (p)
930 /* Because the past-the-end bit in in_use_p is always set, we
931 pretend there is one additional object. */
932 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
934 /* Reset the free object count. */
935 p->num_free_objects = num_objects;
937 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
939 i < CEIL (BITMAP_SIZE (num_objects),
940 sizeof (*p->in_use_p));
945 /* Something is in use if it is marked, or if it was in use in a
946 context further down the context stack. */
947 p->in_use_p[i] |= p->save_in_use_p[i];
949 /* Decrement the free object count for every object allocated. */
950 for (j = p->in_use_p[i]; j; j >>= 1)
951 p->num_free_objects -= (j & 1);
954 if (p->num_free_objects >= num_objects)
958 /* Decrement the `GC context'. All objects allocated since the
959 previous ggc_push_context are migrated to the outer context. */
964 unsigned order, depth;
966 depth = --G.context_depth;
968 /* Any remaining pages in the popped context are lowered to the new
969 current context; i.e. objects allocated in the popped context and
970 left over are imported into the previous context. */
971 for (order = 2; order < NUM_ORDERS; order++)
975 for (p = G.pages[order]; p != NULL; p = p->next)
977 if (p->context_depth > depth)
978 p->context_depth = depth;
980 /* If this page is now in the topmost context, and we'd
981 saved its allocation state, restore it. */
982 else if (p->context_depth == depth && p->save_in_use_p)
984 ggc_recalculate_in_use_p (p);
985 free (p->save_in_use_p);
986 p->save_in_use_p = 0;
992 /* Unmark all objects. */
999 for (order = 2; order < NUM_ORDERS; order++)
1001 size_t num_objects = OBJECTS_PER_PAGE (order);
1002 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1005 for (p = G.pages[order]; p != NULL; p = p->next)
1007 #ifdef ENABLE_CHECKING
1008 /* The data should be page-aligned. */
1009 if ((size_t) p->page & (G.pagesize - 1))
1013 /* Pages that aren't in the topmost context are not collected;
1014 nevertheless, we need their in-use bit vectors to store GC
1015 marks. So, back them up first. */
1016 if (p->context_depth < G.context_depth)
1018 if (! p->save_in_use_p)
1019 p->save_in_use_p = xmalloc (bitmap_size);
1020 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1023 /* Reset reset the number of free objects and clear the
1024 in-use bits. These will be adjusted by mark_obj. */
1025 p->num_free_objects = num_objects;
1026 memset (p->in_use_p, 0, bitmap_size);
1028 /* Make sure the one-past-the-end bit is always set. */
1029 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1030 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1035 /* Free all empty pages. Partially empty pages need no attention
1036 because the `mark' bit doubles as an `unused' bit. */
1043 for (order = 2; order < NUM_ORDERS; order++)
1045 /* The last page-entry to consider, regardless of entries
1046 placed at the end of the list. */
1047 page_entry * const last = G.page_tails[order];
1049 size_t num_objects = OBJECTS_PER_PAGE (order);
1050 size_t live_objects;
1051 page_entry *p, *previous;
1061 page_entry *next = p->next;
1063 /* Loop until all entries have been examined. */
1066 /* Add all live objects on this page to the count of
1067 allocated memory. */
1068 live_objects = num_objects - p->num_free_objects;
1070 G.allocated += OBJECT_SIZE (order) * live_objects;
1072 /* Only objects on pages in the topmost context should get
1074 if (p->context_depth < G.context_depth)
1077 /* Remove the page if it's empty. */
1078 else if (live_objects == 0)
1081 G.pages[order] = next;
1083 previous->next = next;
1085 /* Are we removing the last element? */
1086 if (p == G.page_tails[order])
1087 G.page_tails[order] = previous;
1092 /* If the page is full, move it to the end. */
1093 else if (p->num_free_objects == 0)
1095 /* Don't move it if it's already at the end. */
1096 if (p != G.page_tails[order])
1098 /* Move p to the end of the list. */
1100 G.page_tails[order]->next = p;
1102 /* Update the tail pointer... */
1103 G.page_tails[order] = p;
1105 /* ... and the head pointer, if necessary. */
1107 G.pages[order] = next;
1109 previous->next = next;
1114 /* If we've fallen through to here, it's a page in the
1115 topmost context that is neither full nor empty. Such a
1116 page must precede pages at lesser context depth in the
1117 list, so move it to the head. */
1118 else if (p != G.pages[order])
1120 previous->next = p->next;
1121 p->next = G.pages[order];
1123 /* Are we moving the last element? */
1124 if (G.page_tails[order] == p)
1125 G.page_tails[order] = previous;
1134 /* Now, restore the in_use_p vectors for any pages from contexts
1135 other than the current one. */
1136 for (p = G.pages[order]; p; p = p->next)
1137 if (p->context_depth != G.context_depth)
1138 ggc_recalculate_in_use_p (p);
1143 /* Clobber all free objects. */
1150 for (order = 2; order < NUM_ORDERS; order++)
1152 size_t num_objects = OBJECTS_PER_PAGE (order);
1153 size_t size = OBJECT_SIZE (order);
1156 for (p = G.pages[order]; p != NULL; p = p->next)
1160 if (p->context_depth != G.context_depth)
1161 /* Since we don't do any collection for pages in pushed
1162 contexts, there's no need to do any poisoning. And
1163 besides, the IN_USE_P array isn't valid until we pop
1167 for (i = 0; i < num_objects; i++)
1170 word = i / HOST_BITS_PER_LONG;
1171 bit = i % HOST_BITS_PER_LONG;
1172 if (((p->in_use_p[word] >> bit) & 1) == 0)
1173 memset (p->page + i * size, 0xa5, size);
1180 /* Top level mark-and-sweep routine. */
1185 /* Avoid frequent unnecessary work by skipping collection if the
1186 total allocations haven't expanded much since the last
1188 #ifndef GGC_ALWAYS_COLLECT
1189 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1193 timevar_push (TV_GC);
1195 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1197 /* Zero the total allocated bytes. This will be recalculated in the
1201 /* Release the pages we freed the last time we collected, but didn't
1202 reuse in the interim. */
1214 G.allocated_last_gc = G.allocated;
1215 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1216 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1218 timevar_pop (TV_GC);
1221 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1224 /* Print allocation statistics. */
1225 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1227 : ((x) < 1024*1024*10 \
1229 : (x) / (1024*1024))))
1230 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1233 ggc_print_statistics ()
1235 struct ggc_statistics stats;
1237 size_t total_overhead = 0;
1239 /* Clear the statistics. */
1240 memset (&stats, 0, sizeof (stats));
1242 /* Make sure collection will really occur. */
1243 G.allocated_last_gc = 0;
1245 /* Collect and print the statistics common across collectors. */
1246 ggc_print_common_statistics (stderr, &stats);
1248 /* Release free pages so that we will not count the bytes allocated
1249 there as part of the total allocated memory. */
1252 /* Collect some information about the various sizes of
1254 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1255 "Log", "Allocated", "Used", "Overhead");
1256 for (i = 0; i < NUM_ORDERS; ++i)
1263 /* Skip empty entries. */
1267 overhead = allocated = in_use = 0;
1269 /* Figure out the total number of bytes allocated for objects of
1270 this size, and how many of them are actually in use. Also figure
1271 out how much memory the page table is using. */
1272 for (p = G.pages[i]; p; p = p->next)
1274 allocated += p->bytes;
1276 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1278 overhead += (sizeof (page_entry) - sizeof (long)
1279 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1281 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", i,
1282 SCALE (allocated), LABEL (allocated),
1283 SCALE (in_use), LABEL (in_use),
1284 SCALE (overhead), LABEL (overhead));
1285 total_overhead += overhead;
1287 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1288 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1289 SCALE (G.allocated), LABEL(G.allocated),
1290 SCALE (total_overhead), LABEL (total_overhead));