1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
33 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
34 file open. Prefer either to valloc. */
36 # undef HAVE_MMAP_DEV_ZERO
38 # include <sys/mman.h>
40 # define MAP_FAILED -1
42 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
43 # define MAP_ANONYMOUS MAP_ANON
49 #ifdef HAVE_MMAP_DEV_ZERO
51 # include <sys/mman.h>
53 # define MAP_FAILED -1
60 #define USING_MALLOC_PAGE_GROUPS
65 This garbage-collecting allocator allocates objects on one of a set
66 of pages. Each page can allocate objects of a single size only;
67 available sizes are powers of two starting at four bytes. The size
68 of an allocation request is rounded up to the next power of two
69 (`order'), and satisfied from the appropriate page.
71 Each page is recorded in a page-entry, which also maintains an
72 in-use bitmap of object positions on the page. This allows the
73 allocation state of a particular object to be flipped without
74 touching the page itself.
76 Each page-entry also has a context depth, which is used to track
77 pushing and popping of allocation contexts. Only objects allocated
78 in the current (highest-numbered) context may be collected.
80 Page entries are arranged in an array of singly-linked lists. The
81 array is indexed by the allocation size, in bits, of the pages on
82 it; i.e. all pages on a list allocate objects of the same size.
83 Pages are ordered on the list such that all non-full pages precede
84 all full pages, with non-full pages arranged in order of decreasing
87 Empty pages (of all orders) are kept on a single page cache list,
88 and are considered first when new pages are required; they are
89 deallocated at the start of the next collection if they haven't
90 been recycled by then. */
92 /* Define GGC_DEBUG_LEVEL to print debugging information.
93 0: No debugging output.
94 1: GC statistics only.
95 2: Page-entry allocations/deallocations as well.
96 3: Object allocations as well.
97 4: Object marks as well. */
98 #define GGC_DEBUG_LEVEL (0)
100 #ifndef HOST_BITS_PER_PTR
101 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
105 /* A two-level tree is used to look up the page-entry for a given
106 pointer. Two chunks of the pointer's bits are extracted to index
107 the first and second levels of the tree, as follows:
111 msb +----------------+----+------+------+ lsb
117 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
118 pages are aligned on system page boundaries. The next most
119 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
120 index values in the lookup table, respectively.
122 For 32-bit architectures and the settings below, there are no
123 leftover bits. For architectures with wider pointers, the lookup
124 tree points to a list of pages, which must be scanned to find the
127 #define PAGE_L1_BITS (8)
128 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
129 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
130 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
132 #define LOOKUP_L1(p) \
133 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
135 #define LOOKUP_L2(p) \
136 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
138 /* The number of objects per allocation page, for objects on a page of
139 the indicated ORDER. */
140 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
142 /* The size of an object on a page of the indicated ORDER. */
143 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
145 /* For speed, we avoid doing a general integer divide to locate the
146 offset in the allocation bitmap, by precalculating numbers M, S
147 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
148 within the page which is evenly divisible by the object size Z. */
149 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
150 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
151 #define OFFSET_TO_BIT(OFFSET, ORDER) \
152 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
154 /* The number of extra orders, not corresponding to power-of-two sized
157 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
159 #define RTL_SIZE(NSLOTS) \
160 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
162 /* The Ith entry is the maximum size of an object to be stored in the
163 Ith extra order. Adding a new entry to this array is the *only*
164 thing you need to do to add a new special allocation size. */
166 static const size_t extra_order_size_table[] = {
167 sizeof (struct tree_decl),
168 sizeof (struct tree_list),
169 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
170 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
173 /* The total number of orders. */
175 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
177 /* We use this structure to determine the alignment required for
178 allocations. For power-of-two sized allocations, that's not a
179 problem, but it does matter for odd-sized allocations. */
181 struct max_alignment {
185 #ifdef HAVE_LONG_DOUBLE
193 /* The biggest alignment required. */
195 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
197 /* The Ith entry is the number of objects on a page or order I. */
199 static unsigned objects_per_page_table[NUM_ORDERS];
201 /* The Ith entry is the size of an object on a page of order I. */
203 static size_t object_size_table[NUM_ORDERS];
205 /* The Ith entry is a pair of numbers (mult, shift) such that
206 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
207 for all k evenly divisible by OBJECT_SIZE(I). */
214 inverse_table[NUM_ORDERS];
216 /* A page_entry records the status of an allocation page. This
217 structure is dynamically sized to fit the bitmap in_use_p. */
218 typedef struct page_entry
220 /* The next page-entry with objects of the same size, or NULL if
221 this is the last page-entry. */
222 struct page_entry *next;
224 /* The number of bytes allocated. (This will always be a multiple
225 of the host system page size.) */
228 /* The address at which the memory is allocated. */
231 #ifdef USING_MALLOC_PAGE_GROUPS
232 /* Back pointer to the page group this page came from. */
233 struct page_group *group;
236 /* Saved in-use bit vector for pages that aren't in the topmost
237 context during collection. */
238 unsigned long *save_in_use_p;
240 /* Context depth of this page. */
241 unsigned short context_depth;
243 /* The number of free objects remaining on this page. */
244 unsigned short num_free_objects;
246 /* A likely candidate for the bit position of a free object for the
247 next allocation from this page. */
248 unsigned short next_bit_hint;
250 /* The lg of size of objects allocated from this page. */
253 /* A bit vector indicating whether or not objects are in use. The
254 Nth bit is one if the Nth object on this page is allocated. This
255 array is dynamically sized. */
256 unsigned long in_use_p[1];
259 #ifdef USING_MALLOC_PAGE_GROUPS
260 /* A page_group describes a large allocation from malloc, from which
261 we parcel out aligned pages. */
262 typedef struct page_group
264 /* A linked list of all extant page groups. */
265 struct page_group *next;
267 /* The address we received from malloc. */
270 /* The size of the block. */
273 /* A bitmask of pages in use. */
278 #if HOST_BITS_PER_PTR <= 32
280 /* On 32-bit hosts, we use a two level page table, as pictured above. */
281 typedef page_entry **page_table[PAGE_L1_SIZE];
285 /* On 64-bit hosts, we use the same two level page tables plus a linked
286 list that disambiguates the top 32-bits. There will almost always be
287 exactly one entry in the list. */
288 typedef struct page_table_chain
290 struct page_table_chain *next;
292 page_entry **table[PAGE_L1_SIZE];
297 /* The rest of the global variables. */
298 static struct globals
300 /* The Nth element in this array is a page with objects of size 2^N.
301 If there are any pages with free objects, they will be at the
302 head of the list. NULL if there are no page-entries for this
304 page_entry *pages[NUM_ORDERS];
306 /* The Nth element in this array is the last page with objects of
307 size 2^N. NULL if there are no page-entries for this object
309 page_entry *page_tails[NUM_ORDERS];
311 /* Lookup table for associating allocation pages with object addresses. */
314 /* The system's page size. */
318 /* Bytes currently allocated. */
321 /* Bytes currently allocated at the end of the last collection. */
322 size_t allocated_last_gc;
324 /* Total amount of memory mapped. */
327 /* The current depth in the context stack. */
328 unsigned short context_depth;
330 /* A file descriptor open to /dev/zero for reading. */
331 #if defined (HAVE_MMAP_DEV_ZERO)
335 /* A cache of free system pages. */
336 page_entry *free_pages;
338 #ifdef USING_MALLOC_PAGE_GROUPS
339 page_group *page_groups;
342 /* The file descriptor for debugging output. */
346 /* The size in bytes required to maintain a bitmap for the objects
348 #define BITMAP_SIZE(Num_objects) \
349 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
351 /* Allocate pages in chunks of this size, to throttle calls to memory
352 allocation routines. The first page is used, the rest go onto the
353 free list. This cannot be larger than HOST_BITS_PER_INT for the
354 in_use bitmask for page_group. */
355 #define GGC_QUIRE_SIZE 16
357 static int ggc_allocated_p PARAMS ((const void *));
358 static page_entry *lookup_page_table_entry PARAMS ((const void *));
359 static void set_page_table_entry PARAMS ((void *, page_entry *));
361 static char *alloc_anon PARAMS ((char *, size_t));
363 #ifdef USING_MALLOC_PAGE_GROUPS
364 static size_t page_group_index PARAMS ((char *, char *));
365 static void set_page_group_in_use PARAMS ((page_group *, char *));
366 static void clear_page_group_in_use PARAMS ((page_group *, char *));
368 static struct page_entry * alloc_page PARAMS ((unsigned));
369 static void free_page PARAMS ((struct page_entry *));
370 static void release_pages PARAMS ((void));
371 static void clear_marks PARAMS ((void));
372 static void sweep_pages PARAMS ((void));
373 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
374 static void compute_inverse PARAMS ((unsigned));
376 #ifdef ENABLE_GC_CHECKING
377 static void poison_pages PARAMS ((void));
380 void debug_print_page_list PARAMS ((int));
382 /* Returns nonzero if P was allocated in GC'able memory. */
391 #if HOST_BITS_PER_PTR <= 32
394 page_table table = G.lookup;
395 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
400 if (table->high_bits == high_bits)
404 base = &table->table[0];
407 /* Extract the level 1 and 2 indices. */
411 return base[L1] && base[L1][L2];
414 /* Traverse the page table and find the entry for a page.
415 Die (probably) if the object wasn't allocated via GC. */
417 static inline page_entry *
418 lookup_page_table_entry(p)
424 #if HOST_BITS_PER_PTR <= 32
427 page_table table = G.lookup;
428 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
429 while (table->high_bits != high_bits)
431 base = &table->table[0];
434 /* Extract the level 1 and 2 indices. */
441 /* Set the page table entry for a page. */
444 set_page_table_entry(p, entry)
451 #if HOST_BITS_PER_PTR <= 32
455 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
456 for (table = G.lookup; table; table = table->next)
457 if (table->high_bits == high_bits)
460 /* Not found -- allocate a new table. */
461 table = (page_table) xcalloc (1, sizeof(*table));
462 table->next = G.lookup;
463 table->high_bits = high_bits;
466 base = &table->table[0];
469 /* Extract the level 1 and 2 indices. */
473 if (base[L1] == NULL)
474 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
476 base[L1][L2] = entry;
479 /* Prints the page-entry for object size ORDER, for debugging. */
482 debug_print_page_list (order)
486 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
487 (PTR) G.page_tails[order]);
491 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
492 p->num_free_objects);
500 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
501 (if non-null). The ifdef structure here is intended to cause a
502 compile error unless exactly one of the HAVE_* is defined. */
505 alloc_anon (pref, size)
506 char *pref ATTRIBUTE_UNUSED;
509 #ifdef HAVE_MMAP_ANON
510 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
511 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
513 #ifdef HAVE_MMAP_DEV_ZERO
514 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
515 MAP_PRIVATE, G.dev_zero_fd, 0);
518 if (page == (char *) MAP_FAILED)
520 perror ("virtual memory exhausted");
521 exit (FATAL_EXIT_CODE);
524 /* Remember that we allocated this memory. */
525 G.bytes_mapped += size;
530 #ifdef USING_MALLOC_PAGE_GROUPS
531 /* Compute the index for this page into the page group. */
534 page_group_index (allocation, page)
535 char *allocation, *page;
537 return (size_t) (page - allocation) >> G.lg_pagesize;
540 /* Set and clear the in_use bit for this page in the page group. */
543 set_page_group_in_use (group, page)
547 group->in_use |= 1 << page_group_index (group->allocation, page);
551 clear_page_group_in_use (group, page)
555 group->in_use &= ~(1 << page_group_index (group->allocation, page));
559 /* Allocate a new page for allocating objects of size 2^ORDER,
560 and return an entry for it. The entry is not added to the
561 appropriate page_table list. */
563 static inline struct page_entry *
567 struct page_entry *entry, *p, **pp;
571 size_t page_entry_size;
573 #ifdef USING_MALLOC_PAGE_GROUPS
577 num_objects = OBJECTS_PER_PAGE (order);
578 bitmap_size = BITMAP_SIZE (num_objects + 1);
579 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
580 entry_size = num_objects * OBJECT_SIZE (order);
581 if (entry_size < G.pagesize)
582 entry_size = G.pagesize;
587 /* Check the list of free pages for one we can use. */
588 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
589 if (p->bytes == entry_size)
594 /* Recycle the allocated memory from this page ... */
598 #ifdef USING_MALLOC_PAGE_GROUPS
602 /* ... and, if possible, the page entry itself. */
603 if (p->order == order)
606 memset (entry, 0, page_entry_size);
612 else if (entry_size == G.pagesize)
614 /* We want just one page. Allocate a bunch of them and put the
615 extras on the freelist. (Can only do this optimization with
616 mmap for backing store.) */
617 struct page_entry *e, *f = G.free_pages;
620 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
622 /* This loop counts down so that the chain will be in ascending
624 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
626 e = (struct page_entry *) xcalloc (1, page_entry_size);
628 e->bytes = G.pagesize;
629 e->page = page + (i << G.lg_pagesize);
637 page = alloc_anon (NULL, entry_size);
639 #ifdef USING_MALLOC_PAGE_GROUPS
642 /* Allocate a large block of memory and serve out the aligned
643 pages therein. This results in much less memory wastage
644 than the traditional implementation of valloc. */
646 char *allocation, *a, *enda;
647 size_t alloc_size, head_slop, tail_slop;
648 int multiple_pages = (entry_size == G.pagesize);
651 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
653 alloc_size = entry_size + G.pagesize - 1;
654 allocation = xmalloc (alloc_size);
656 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
657 head_slop = page - allocation;
659 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
661 tail_slop = alloc_size - entry_size - head_slop;
662 enda = allocation + alloc_size - tail_slop;
664 /* We allocated N pages, which are likely not aligned, leaving
665 us with N-1 usable pages. We plan to place the page_group
666 structure somewhere in the slop. */
667 if (head_slop >= sizeof (page_group))
668 group = (page_group *)page - 1;
671 /* We magically got an aligned allocation. Too bad, we have
672 to waste a page anyway. */
676 tail_slop += G.pagesize;
678 if (tail_slop < sizeof (page_group))
680 group = (page_group *)enda;
681 tail_slop -= sizeof (page_group);
684 /* Remember that we allocated this memory. */
685 group->next = G.page_groups;
686 group->allocation = allocation;
687 group->alloc_size = alloc_size;
689 G.page_groups = group;
690 G.bytes_mapped += alloc_size;
692 /* If we allocated multiple pages, put the rest on the free list. */
695 struct page_entry *e, *f = G.free_pages;
696 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
698 e = (struct page_entry *) xcalloc (1, page_entry_size);
700 e->bytes = G.pagesize;
712 entry = (struct page_entry *) xcalloc (1, page_entry_size);
714 entry->bytes = entry_size;
716 entry->context_depth = G.context_depth;
717 entry->order = order;
718 entry->num_free_objects = num_objects;
719 entry->next_bit_hint = 1;
721 #ifdef USING_MALLOC_PAGE_GROUPS
722 entry->group = group;
723 set_page_group_in_use (group, page);
726 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
727 increment the hint. */
728 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
729 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
731 set_page_table_entry (page, entry);
733 if (GGC_DEBUG_LEVEL >= 2)
734 fprintf (G.debug_file,
735 "Allocating page at %p, object size=%lu, data %p-%p\n",
736 (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
737 page + entry_size - 1);
742 /* For a page that is no longer needed, put it on the free page list. */
748 if (GGC_DEBUG_LEVEL >= 2)
749 fprintf (G.debug_file,
750 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
751 entry->page, entry->page + entry->bytes - 1);
753 set_page_table_entry (entry->page, NULL);
755 #ifdef USING_MALLOC_PAGE_GROUPS
756 clear_page_group_in_use (entry->group, entry->page);
759 entry->next = G.free_pages;
760 G.free_pages = entry;
763 /* Release the free page cache to the system. */
769 page_entry *p, *next;
773 /* Gather up adjacent pages so they are unmapped together. */
784 while (p && p->page == start + len)
793 G.bytes_mapped -= len;
798 #ifdef USING_MALLOC_PAGE_GROUPS
802 /* Remove all pages from free page groups from the list. */
804 while ((p = *pp) != NULL)
805 if (p->group->in_use == 0)
813 /* Remove all free page groups, and release the storage. */
815 while ((g = *gp) != NULL)
819 G.bytes_mapped -= g->alloc_size;
820 free (g->allocation);
827 /* This table provides a fast way to determine ceil(log_2(size)) for
828 allocation requests. The minimum allocation size is eight bytes. */
830 static unsigned char size_lookup[257] =
832 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
833 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
834 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
835 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
836 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
837 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
838 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
839 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
840 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
841 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
842 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
843 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
844 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
845 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
846 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
847 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
851 /* Allocate a chunk of memory of SIZE bytes. If ZERO is nonzero, the
852 memory is zeroed; otherwise, its contents are undefined. */
858 unsigned order, word, bit, object_offset;
859 struct page_entry *entry;
863 order = size_lookup[size];
867 while (size > OBJECT_SIZE (order))
871 /* If there are non-full pages for this size allocation, they are at
872 the head of the list. */
873 entry = G.pages[order];
875 /* If there is no page for this object size, or all pages in this
876 context are full, allocate a new page. */
877 if (entry == NULL || entry->num_free_objects == 0)
879 struct page_entry *new_entry;
880 new_entry = alloc_page (order);
882 /* If this is the only entry, it's also the tail. */
884 G.page_tails[order] = new_entry;
886 /* Put new pages at the head of the page list. */
887 new_entry->next = entry;
889 G.pages[order] = new_entry;
891 /* For a new page, we know the word and bit positions (in the
892 in_use bitmap) of the first available object -- they're zero. */
893 new_entry->next_bit_hint = 1;
900 /* First try to use the hint left from the previous allocation
901 to locate a clear bit in the in-use bitmap. We've made sure
902 that the one-past-the-end bit is always set, so if the hint
903 has run over, this test will fail. */
904 unsigned hint = entry->next_bit_hint;
905 word = hint / HOST_BITS_PER_LONG;
906 bit = hint % HOST_BITS_PER_LONG;
908 /* If the hint didn't work, scan the bitmap from the beginning. */
909 if ((entry->in_use_p[word] >> bit) & 1)
912 while (~entry->in_use_p[word] == 0)
914 while ((entry->in_use_p[word] >> bit) & 1)
916 hint = word * HOST_BITS_PER_LONG + bit;
919 /* Next time, try the next bit. */
920 entry->next_bit_hint = hint + 1;
922 object_offset = hint * OBJECT_SIZE (order);
925 /* Set the in-use bit. */
926 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
928 /* Keep a running total of the number of free objects. If this page
929 fills up, we may have to move it to the end of the list if the
930 next page isn't full. If the next page is full, all subsequent
931 pages are full, so there's no need to move it. */
932 if (--entry->num_free_objects == 0
933 && entry->next != NULL
934 && entry->next->num_free_objects > 0)
936 G.pages[order] = entry->next;
938 G.page_tails[order]->next = entry;
939 G.page_tails[order] = entry;
942 /* Calculate the object's address. */
943 result = entry->page + object_offset;
945 #ifdef ENABLE_GC_CHECKING
946 /* `Poison' the entire allocated object, including any padding at
948 memset (result, 0xaf, OBJECT_SIZE (order));
951 /* Keep track of how many bytes are being allocated. This
952 information is used in deciding when to collect. */
953 G.allocated += OBJECT_SIZE (order);
955 if (GGC_DEBUG_LEVEL >= 3)
956 fprintf (G.debug_file,
957 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
958 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
964 /* If P is not marked, marks it and return false. Otherwise return true.
965 P must have been allocated by the GC allocator; it mustn't point to
966 static objects, stack variables, or memory allocated with malloc. */
976 /* Look up the page on which the object is alloced. If the object
977 wasn't allocated by the collector, we'll probably die. */
978 entry = lookup_page_table_entry (p);
979 #ifdef ENABLE_CHECKING
984 /* Calculate the index of the object on the page; this is its bit
985 position in the in_use_p bitmap. */
986 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
987 word = bit / HOST_BITS_PER_LONG;
988 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
990 /* If the bit was previously set, skip it. */
991 if (entry->in_use_p[word] & mask)
994 /* Otherwise set it, and decrement the free object count. */
995 entry->in_use_p[word] |= mask;
996 entry->num_free_objects -= 1;
998 if (GGC_DEBUG_LEVEL >= 4)
999 fprintf (G.debug_file, "Marking %p\n", p);
1004 /* Return 1 if P has been marked, zero otherwise.
1005 P must have been allocated by the GC allocator; it mustn't point to
1006 static objects, stack variables, or memory allocated with malloc. */
1016 /* Look up the page on which the object is alloced. If the object
1017 wasn't allocated by the collector, we'll probably die. */
1018 entry = lookup_page_table_entry (p);
1019 #ifdef ENABLE_CHECKING
1024 /* Calculate the index of the object on the page; this is its bit
1025 position in the in_use_p bitmap. */
1026 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1027 word = bit / HOST_BITS_PER_LONG;
1028 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1030 return (entry->in_use_p[word] & mask) != 0;
1033 /* Return the size of the gc-able object P. */
1039 page_entry *pe = lookup_page_table_entry (p);
1040 return OBJECT_SIZE (pe->order);
1043 /* Subroutine of init_ggc which computes the pair of numbers used to
1044 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1046 This algorithm is taken from Granlund and Montgomery's paper
1047 "Division by Invariant Integers using Multiplication"
1048 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1052 compute_inverse (order)
1055 unsigned size, inv, e;
1057 /* There can be only one object per "page" in a bucket for sizes
1058 larger than half a machine page; it will always have offset zero. */
1059 if (OBJECT_SIZE (order) > G.pagesize/2)
1061 if (OBJECTS_PER_PAGE (order) != 1)
1064 DIV_MULT (order) = 1;
1065 DIV_SHIFT (order) = 0;
1069 size = OBJECT_SIZE (order);
1071 while (size % 2 == 0)
1078 while (inv * size != 1)
1079 inv = inv * (2 - inv*size);
1081 DIV_MULT (order) = inv;
1082 DIV_SHIFT (order) = e;
1085 /* Initialize the ggc-mmap allocator. */
1091 G.pagesize = getpagesize();
1092 G.lg_pagesize = exact_log2 (G.pagesize);
1094 #ifdef HAVE_MMAP_DEV_ZERO
1095 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1096 if (G.dev_zero_fd == -1)
1101 G.debug_file = fopen ("ggc-mmap.debug", "w");
1103 G.debug_file = stdout;
1107 /* StunOS has an amazing off-by-one error for the first mmap allocation
1108 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1109 believe, is an unaligned page allocation, which would cause us to
1110 hork badly if we tried to use it. */
1112 char *p = alloc_anon (NULL, G.pagesize);
1113 struct page_entry *e;
1114 if ((size_t)p & (G.pagesize - 1))
1116 /* How losing. Discard this one and try another. If we still
1117 can't get something useful, give up. */
1119 p = alloc_anon (NULL, G.pagesize);
1120 if ((size_t)p & (G.pagesize - 1))
1124 /* We have a good page, might as well hold onto it... */
1125 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1126 e->bytes = G.pagesize;
1128 e->next = G.free_pages;
1133 /* Initialize the object size table. */
1134 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1135 object_size_table[order] = (size_t) 1 << order;
1136 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1138 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1140 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1141 so that we're sure of getting aligned memory. */
1142 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
1143 object_size_table[order] = s;
1146 /* Initialize the objects-per-page and inverse tables. */
1147 for (order = 0; order < NUM_ORDERS; ++order)
1149 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1150 if (objects_per_page_table[order] == 0)
1151 objects_per_page_table[order] = 1;
1152 compute_inverse (order);
1155 /* Reset the size_lookup array to put appropriately sized objects in
1156 the special orders. All objects bigger than the previous power
1157 of two, but no greater than the special size, should go in the
1159 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1164 o = size_lookup[OBJECT_SIZE (order)];
1165 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1166 size_lookup[i] = order;
1170 /* Increment the `GC context'. Objects allocated in an outer context
1171 are never freed, eliminating the need to register their roots. */
1179 if (G.context_depth == 0)
1183 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1184 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1187 ggc_recalculate_in_use_p (p)
1193 /* Because the past-the-end bit in in_use_p is always set, we
1194 pretend there is one additional object. */
1195 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
1197 /* Reset the free object count. */
1198 p->num_free_objects = num_objects;
1200 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1202 i < CEIL (BITMAP_SIZE (num_objects),
1203 sizeof (*p->in_use_p));
1208 /* Something is in use if it is marked, or if it was in use in a
1209 context further down the context stack. */
1210 p->in_use_p[i] |= p->save_in_use_p[i];
1212 /* Decrement the free object count for every object allocated. */
1213 for (j = p->in_use_p[i]; j; j >>= 1)
1214 p->num_free_objects -= (j & 1);
1217 if (p->num_free_objects >= num_objects)
1221 /* Decrement the `GC context'. All objects allocated since the
1222 previous ggc_push_context are migrated to the outer context. */
1227 unsigned order, depth;
1229 depth = --G.context_depth;
1231 /* Any remaining pages in the popped context are lowered to the new
1232 current context; i.e. objects allocated in the popped context and
1233 left over are imported into the previous context. */
1234 for (order = 2; order < NUM_ORDERS; order++)
1238 for (p = G.pages[order]; p != NULL; p = p->next)
1240 if (p->context_depth > depth)
1241 p->context_depth = depth;
1243 /* If this page is now in the topmost context, and we'd
1244 saved its allocation state, restore it. */
1245 else if (p->context_depth == depth && p->save_in_use_p)
1247 ggc_recalculate_in_use_p (p);
1248 free (p->save_in_use_p);
1249 p->save_in_use_p = 0;
1255 /* Unmark all objects. */
1262 for (order = 2; order < NUM_ORDERS; order++)
1264 size_t num_objects = OBJECTS_PER_PAGE (order);
1265 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1268 for (p = G.pages[order]; p != NULL; p = p->next)
1270 #ifdef ENABLE_CHECKING
1271 /* The data should be page-aligned. */
1272 if ((size_t) p->page & (G.pagesize - 1))
1276 /* Pages that aren't in the topmost context are not collected;
1277 nevertheless, we need their in-use bit vectors to store GC
1278 marks. So, back them up first. */
1279 if (p->context_depth < G.context_depth)
1281 if (! p->save_in_use_p)
1282 p->save_in_use_p = xmalloc (bitmap_size);
1283 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1286 /* Reset reset the number of free objects and clear the
1287 in-use bits. These will be adjusted by mark_obj. */
1288 p->num_free_objects = num_objects;
1289 memset (p->in_use_p, 0, bitmap_size);
1291 /* Make sure the one-past-the-end bit is always set. */
1292 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1293 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1298 /* Free all empty pages. Partially empty pages need no attention
1299 because the `mark' bit doubles as an `unused' bit. */
1306 for (order = 2; order < NUM_ORDERS; order++)
1308 /* The last page-entry to consider, regardless of entries
1309 placed at the end of the list. */
1310 page_entry * const last = G.page_tails[order];
1312 size_t num_objects = OBJECTS_PER_PAGE (order);
1313 size_t live_objects;
1314 page_entry *p, *previous;
1324 page_entry *next = p->next;
1326 /* Loop until all entries have been examined. */
1329 /* Add all live objects on this page to the count of
1330 allocated memory. */
1331 live_objects = num_objects - p->num_free_objects;
1333 G.allocated += OBJECT_SIZE (order) * live_objects;
1335 /* Only objects on pages in the topmost context should get
1337 if (p->context_depth < G.context_depth)
1340 /* Remove the page if it's empty. */
1341 else if (live_objects == 0)
1344 G.pages[order] = next;
1346 previous->next = next;
1348 /* Are we removing the last element? */
1349 if (p == G.page_tails[order])
1350 G.page_tails[order] = previous;
1355 /* If the page is full, move it to the end. */
1356 else if (p->num_free_objects == 0)
1358 /* Don't move it if it's already at the end. */
1359 if (p != G.page_tails[order])
1361 /* Move p to the end of the list. */
1363 G.page_tails[order]->next = p;
1365 /* Update the tail pointer... */
1366 G.page_tails[order] = p;
1368 /* ... and the head pointer, if necessary. */
1370 G.pages[order] = next;
1372 previous->next = next;
1377 /* If we've fallen through to here, it's a page in the
1378 topmost context that is neither full nor empty. Such a
1379 page must precede pages at lesser context depth in the
1380 list, so move it to the head. */
1381 else if (p != G.pages[order])
1383 previous->next = p->next;
1384 p->next = G.pages[order];
1386 /* Are we moving the last element? */
1387 if (G.page_tails[order] == p)
1388 G.page_tails[order] = previous;
1397 /* Now, restore the in_use_p vectors for any pages from contexts
1398 other than the current one. */
1399 for (p = G.pages[order]; p; p = p->next)
1400 if (p->context_depth != G.context_depth)
1401 ggc_recalculate_in_use_p (p);
1405 #ifdef ENABLE_GC_CHECKING
1406 /* Clobber all free objects. */
1413 for (order = 2; order < NUM_ORDERS; order++)
1415 size_t num_objects = OBJECTS_PER_PAGE (order);
1416 size_t size = OBJECT_SIZE (order);
1419 for (p = G.pages[order]; p != NULL; p = p->next)
1423 if (p->context_depth != G.context_depth)
1424 /* Since we don't do any collection for pages in pushed
1425 contexts, there's no need to do any poisoning. And
1426 besides, the IN_USE_P array isn't valid until we pop
1430 for (i = 0; i < num_objects; i++)
1433 word = i / HOST_BITS_PER_LONG;
1434 bit = i % HOST_BITS_PER_LONG;
1435 if (((p->in_use_p[word] >> bit) & 1) == 0)
1436 memset (p->page + i * size, 0xa5, size);
1443 /* Top level mark-and-sweep routine. */
1448 /* Avoid frequent unnecessary work by skipping collection if the
1449 total allocations haven't expanded much since the last
1451 size_t allocated_last_gc =
1452 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1454 size_t min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1456 if (G.allocated < allocated_last_gc + min_expand)
1459 timevar_push (TV_GC);
1461 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1463 /* Zero the total allocated bytes. This will be recalculated in the
1467 /* Release the pages we freed the last time we collected, but didn't
1468 reuse in the interim. */
1474 #ifdef ENABLE_GC_CHECKING
1480 G.allocated_last_gc = G.allocated;
1482 timevar_pop (TV_GC);
1485 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1488 /* Print allocation statistics. */
1489 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1491 : ((x) < 1024*1024*10 \
1493 : (x) / (1024*1024))))
1494 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1497 ggc_print_statistics ()
1499 struct ggc_statistics stats;
1501 size_t total_overhead = 0;
1503 /* Clear the statistics. */
1504 memset (&stats, 0, sizeof (stats));
1506 /* Make sure collection will really occur. */
1507 G.allocated_last_gc = 0;
1509 /* Collect and print the statistics common across collectors. */
1510 ggc_print_common_statistics (stderr, &stats);
1512 /* Release free pages so that we will not count the bytes allocated
1513 there as part of the total allocated memory. */
1516 /* Collect some information about the various sizes of
1518 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1519 "Size", "Allocated", "Used", "Overhead");
1520 for (i = 0; i < NUM_ORDERS; ++i)
1527 /* Skip empty entries. */
1531 overhead = allocated = in_use = 0;
1533 /* Figure out the total number of bytes allocated for objects of
1534 this size, and how many of them are actually in use. Also figure
1535 out how much memory the page table is using. */
1536 for (p = G.pages[i]; p; p = p->next)
1538 allocated += p->bytes;
1540 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1542 overhead += (sizeof (page_entry) - sizeof (long)
1543 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1545 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1546 (unsigned long) OBJECT_SIZE (i),
1547 SCALE (allocated), LABEL (allocated),
1548 SCALE (in_use), LABEL (in_use),
1549 SCALE (overhead), LABEL (overhead));
1550 total_overhead += overhead;
1552 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1553 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1554 SCALE (G.allocated), LABEL(G.allocated),
1555 SCALE (total_overhead), LABEL (total_overhead));