1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin (dberlin@dberlin.org)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
38 #ifdef ENABLE_VALGRIND_CHECKING
39 #include <valgrind/memcheck.h>
41 /* Avoid #ifdef:s when we can help it. */
42 #define VALGRIND_DISCARD(x)
43 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
44 #define VALGRIND_FREELIKE_BLOCK(x,y)
46 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
47 file open. Prefer either to valloc. */
49 # undef HAVE_MMAP_DEV_ZERO
51 # include <sys/mman.h>
53 # define MAP_FAILED -1
55 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
56 # define MAP_ANONYMOUS MAP_ANON
62 #ifdef HAVE_MMAP_DEV_ZERO
64 # include <sys/mman.h>
66 # define MAP_FAILED -1
73 #define USING_MALLOC_PAGE_GROUPS
76 #if (GCC_VERSION < 3001)
77 #define prefetch(X) ((void) X)
79 #define prefetch(X) __builtin_prefetch (X)
83 If we track inter-zone pointers, we can mark single zones at a
85 If we have a zone where we guarantee no inter-zone pointers, we
86 could mark that zone seperately.
87 The garbage zone should not be marked, and we should return 1 in
88 ggc_set_mark for any object in the garbage zone, which cuts off
92 This garbage-collecting allocator segregates objects into zones.
93 It also segregates objects into "large" and "small" bins. Large
94 objects are greater or equal to page size.
96 Pages for small objects are broken up into chunks, each of which
97 are described by a struct alloc_chunk. One can walk over all
98 chunks on the page by adding the chunk size to the chunk's data
99 address. The free space for a page exists in the free chunk bins.
101 Each page-entry also has a context depth, which is used to track
102 pushing and popping of allocation contexts. Only objects allocated
103 in the current (highest-numbered) context may be collected.
105 Empty pages (of all sizes) are kept on a single page cache list,
106 and are considered first when new pages are required; they are
107 deallocated at the start of the next collection if they haven't
108 been recycled by then. */
110 /* Define GGC_DEBUG_LEVEL to print debugging information.
111 0: No debugging output.
112 1: GC statistics only.
113 2: Page-entry allocations/deallocations as well.
114 3: Object allocations as well.
115 4: Object marks as well. */
116 #define GGC_DEBUG_LEVEL (0)
118 #ifndef HOST_BITS_PER_PTR
119 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
121 #ifdef COOKIE_CHECKING
122 #define CHUNK_MAGIC 0x95321123
123 #define DEADCHUNK_MAGIC 0x12817317
126 /* This structure manages small chunks. When the chunk is free, it's
127 linked with other chunks via free_next. When the chunk is allocated,
128 the data starts at u. Large chunks are allocated one at a time to
129 their own page, and so don't come in here.
131 The "type" field is a placeholder for a future change to do
132 generational collection. At present it is 0 when free and
133 and 1 when allocated. */
136 #ifdef COOKIE_CHECKING
140 unsigned int typecode:15;
141 unsigned int size:15;
144 struct alloc_chunk *next_free;
147 /* Make sure the data is sufficiently aligned. */
148 HOST_WIDEST_INT align_i;
149 #ifdef HAVE_LONG_DOUBLE
155 } __attribute__ ((packed));
157 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
159 /* We maintain several bins of free lists for chunks for very small
160 objects. We never exhaustively search other bins -- if we don't
161 find one of the proper size, we allocate from the "larger" bin. */
163 /* Decreasing the number of free bins increases the time it takes to allocate.
164 Similar with increasing max_free_bin_size without increasing num_free_bins.
166 After much histogramming of allocation sizes and time spent on gc,
167 on a powerpc G4 7450 - 667 mhz, and an pentium 4 - 2.8ghz,
168 these were determined to be the optimal values. */
169 #define NUM_FREE_BINS 64
170 #define MAX_FREE_BIN_SIZE 256
171 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
172 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
173 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
175 /* Marker used as chunk->size for a large object. Should correspond
176 to the size of the bitfield above. */
177 #define LARGE_OBJECT_SIZE 0x7fff
179 /* We use this structure to determine the alignment required for
180 allocations. For power-of-two sized allocations, that's not a
181 problem, but it does matter for odd-sized allocations. */
183 struct max_alignment {
187 #ifdef HAVE_LONG_DOUBLE
195 /* The biggest alignment required. */
197 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
199 /* Compute the smallest nonnegative number which when added to X gives
202 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
204 /* Compute the smallest multiple of F that is >= X. */
206 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
208 /* A two-level tree is used to look up the page-entry for a given
209 pointer. Two chunks of the pointer's bits are extracted to index
210 the first and second levels of the tree, as follows:
214 msb +----------------+----+------+------+ lsb
220 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
221 pages are aligned on system page boundaries. The next most
222 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
223 index values in the lookup table, respectively.
225 For 32-bit architectures and the settings below, there are no
226 leftover bits. For architectures with wider pointers, the lookup
227 tree points to a list of pages, which must be scanned to find the
230 #define PAGE_L1_BITS (8)
231 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
232 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
233 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
235 #define LOOKUP_L1(p) \
236 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
238 #define LOOKUP_L2(p) \
239 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
242 /* A page_entry records the status of an allocation page. */
243 typedef struct page_entry
245 /* The next page-entry with objects of the same size, or NULL if
246 this is the last page-entry. */
247 struct page_entry *next;
249 /* The number of bytes allocated. (This will always be a multiple
250 of the host system page size.) */
253 /* How many collections we've survived. */
256 /* The address at which the memory is allocated. */
259 #ifdef USING_MALLOC_PAGE_GROUPS
260 /* Back pointer to the page group this page came from. */
261 struct page_group *group;
264 /* Number of bytes on the page unallocated. Only used during
265 collection, and even then large pages merely set this non-zero. */
268 /* Context depth of this page. */
269 unsigned short context_depth;
271 /* Does this page contain small objects, or one large object? */
274 struct alloc_zone *zone;
277 #ifdef USING_MALLOC_PAGE_GROUPS
278 /* A page_group describes a large allocation from malloc, from which
279 we parcel out aligned pages. */
280 typedef struct page_group
282 /* A linked list of all extant page groups. */
283 struct page_group *next;
285 /* The address we received from malloc. */
288 /* The size of the block. */
291 /* A bitmask of pages in use. */
296 #if HOST_BITS_PER_PTR <= 32
298 /* On 32-bit hosts, we use a two level page table, as pictured above. */
299 typedef page_entry **page_table[PAGE_L1_SIZE];
303 /* On 64-bit hosts, we use the same two level page tables plus a linked
304 list that disambiguates the top 32-bits. There will almost always be
305 exactly one entry in the list. */
306 typedef struct page_table_chain
308 struct page_table_chain *next;
310 page_entry **table[PAGE_L1_SIZE];
315 /* The global variables. */
316 static struct globals
318 /* The page lookup table. A single page can only belong to one
319 zone. This means free pages are zone-specific ATM. */
321 /* The linked list of zones. */
322 struct alloc_zone *zones;
324 /* The system's page size. */
328 /* A file descriptor open to /dev/zero for reading. */
329 #if defined (HAVE_MMAP_DEV_ZERO)
333 /* The file descriptor for debugging output. */
337 /* The zone allocation structure. */
340 /* Name of the zone. */
343 /* Linked list of pages in a zone. */
346 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
347 FREE_BIN_DELTA. All other chunks are in slot 0. */
348 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
350 /* Bytes currently allocated. */
353 /* Bytes currently allocated at the end of the last collection. */
354 size_t allocated_last_gc;
356 /* Total amount of memory mapped. */
359 /* Bit N set if any allocations have been done at context depth N. */
360 unsigned long context_depth_allocations;
362 /* Bit N set if any collections have been done at context depth N. */
363 unsigned long context_depth_collections;
365 /* The current depth in the context stack. */
366 unsigned short context_depth;
368 /* A cache of free system pages. */
369 page_entry *free_pages;
371 #ifdef USING_MALLOC_PAGE_GROUPS
372 page_group *page_groups;
375 /* Next zone in the linked list of zones. */
376 struct alloc_zone *next_zone;
378 /* Return true if this zone was collected during this collection. */
382 struct alloc_zone *rtl_zone;
383 struct alloc_zone *garbage_zone;
384 struct alloc_zone *tree_zone;
386 /* Allocate pages in chunks of this size, to throttle calls to memory
387 allocation routines. The first page is used, the rest go onto the
388 free list. This cannot be larger than HOST_BITS_PER_INT for the
389 in_use bitmask for page_group. */
390 #define GGC_QUIRE_SIZE 16
392 static int ggc_allocated_p (const void *);
393 static page_entry *lookup_page_table_entry (const void *);
394 static void set_page_table_entry (void *, page_entry *);
396 static char *alloc_anon (char *, size_t, struct alloc_zone *);
398 #ifdef USING_MALLOC_PAGE_GROUPS
399 static size_t page_group_index (char *, char *);
400 static void set_page_group_in_use (page_group *, char *);
401 static void clear_page_group_in_use (page_group *, char *);
403 static struct page_entry * alloc_small_page ( struct alloc_zone *);
404 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
405 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
406 static void free_page (struct page_entry *);
407 static void release_pages (struct alloc_zone *);
408 static void sweep_pages (struct alloc_zone *);
409 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
410 static bool ggc_collect_1 (struct alloc_zone *, bool);
411 static void check_cookies (void);
414 /* Returns nonzero if P was allocated in GC'able memory. */
417 ggc_allocated_p (const void *p)
422 #if HOST_BITS_PER_PTR <= 32
425 page_table table = G.lookup;
426 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
431 if (table->high_bits == high_bits)
435 base = &table->table[0];
438 /* Extract the level 1 and 2 indices. */
442 return base[L1] && base[L1][L2];
445 /* Traverse the page table and find the entry for a page.
446 Die (probably) if the object wasn't allocated via GC. */
448 static inline page_entry *
449 lookup_page_table_entry(const void *p)
454 #if HOST_BITS_PER_PTR <= 32
457 page_table table = G.lookup;
458 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
459 while (table->high_bits != high_bits)
461 base = &table->table[0];
464 /* Extract the level 1 and 2 indices. */
472 /* Set the page table entry for a page. */
475 set_page_table_entry(void *p, page_entry *entry)
480 #if HOST_BITS_PER_PTR <= 32
484 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
485 for (table = G.lookup; table; table = table->next)
486 if (table->high_bits == high_bits)
489 /* Not found -- allocate a new table. */
490 table = (page_table) xcalloc (1, sizeof(*table));
491 table->next = G.lookup;
492 table->high_bits = high_bits;
495 base = &table->table[0];
498 /* Extract the level 1 and 2 indices. */
502 if (base[L1] == NULL)
503 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
505 base[L1][L2] = entry;
509 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
510 (if non-null). The ifdef structure here is intended to cause a
511 compile error unless exactly one of the HAVE_* is defined. */
514 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
516 #ifdef HAVE_MMAP_ANON
517 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
518 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
520 #ifdef HAVE_MMAP_DEV_ZERO
521 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
522 MAP_PRIVATE, G.dev_zero_fd, 0);
524 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
526 if (page == (char *) MAP_FAILED)
528 perror ("virtual memory exhausted");
529 exit (FATAL_EXIT_CODE);
532 /* Remember that we allocated this memory. */
533 zone->bytes_mapped += size;
534 /* Pretend we don't have access to the allocated pages. We'll enable
535 access to smaller pieces of the area in ggc_alloc. Discard the
536 handle to avoid handle leak. */
537 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
541 #ifdef USING_MALLOC_PAGE_GROUPS
542 /* Compute the index for this page into the page group. */
545 page_group_index (char *allocation, char *page)
547 return (size_t) (page - allocation) >> G.lg_pagesize;
550 /* Set and clear the in_use bit for this page in the page group. */
553 set_page_group_in_use (page_group *group, char *page)
555 group->in_use |= 1 << page_group_index (group->allocation, page);
559 clear_page_group_in_use (page_group *group, char *page)
561 group->in_use &= ~(1 << page_group_index (group->allocation, page));
565 /* Allocate a new page for allocating objects of size 2^ORDER,
566 and return an entry for it. The entry is not added to the
567 appropriate page_table list. */
569 static inline struct page_entry *
570 alloc_small_page (struct alloc_zone *zone)
572 struct page_entry *entry;
574 #ifdef USING_MALLOC_PAGE_GROUPS
580 /* Check the list of free pages for one we can use. */
581 entry = zone->free_pages;
584 /* Recycle the allocated memory from this page ... */
585 zone->free_pages = entry->next;
588 #ifdef USING_MALLOC_PAGE_GROUPS
589 group = entry->group;
595 /* We want just one page. Allocate a bunch of them and put the
596 extras on the freelist. (Can only do this optimization with
597 mmap for backing store.) */
598 struct page_entry *e, *f = zone->free_pages;
601 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
603 /* This loop counts down so that the chain will be in ascending
605 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
607 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
608 e->bytes = G.pagesize;
609 e->page = page + (i << G.lg_pagesize);
614 zone->free_pages = f;
617 #ifdef USING_MALLOC_PAGE_GROUPS
620 /* Allocate a large block of memory and serve out the aligned
621 pages therein. This results in much less memory wastage
622 than the traditional implementation of valloc. */
624 char *allocation, *a, *enda;
625 size_t alloc_size, head_slop, tail_slop;
626 int multiple_pages = (entry_size == G.pagesize);
629 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
631 alloc_size = entry_size + G.pagesize - 1;
632 allocation = xmalloc (alloc_size);
633 VALGRIND_MALLOCLIKE_BLOCK(addr, alloc_size, 0, 0);
635 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
636 head_slop = page - allocation;
638 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
640 tail_slop = alloc_size - entry_size - head_slop;
641 enda = allocation + alloc_size - tail_slop;
643 /* We allocated N pages, which are likely not aligned, leaving
644 us with N-1 usable pages. We plan to place the page_group
645 structure somewhere in the slop. */
646 if (head_slop >= sizeof (page_group))
647 group = (page_group *)page - 1;
650 /* We magically got an aligned allocation. Too bad, we have
651 to waste a page anyway. */
655 tail_slop += G.pagesize;
657 if (tail_slop < sizeof (page_group))
659 group = (page_group *)enda;
660 tail_slop -= sizeof (page_group);
663 /* Remember that we allocated this memory. */
664 group->next = G.page_groups;
665 group->allocation = allocation;
666 group->alloc_size = alloc_size;
668 zone->page_groups = group;
669 G.bytes_mapped += alloc_size;
671 /* If we allocated multiple pages, put the rest on the free list. */
674 struct page_entry *e, *f = G.free_pages;
675 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
677 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
678 e->bytes = G.pagesize;
684 zone->free_pages = f;
690 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
693 entry->bytes = G.pagesize;
694 entry->bytes_free = G.pagesize;
696 entry->context_depth = zone->context_depth;
697 entry->large_p = false;
699 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
701 #ifdef USING_MALLOC_PAGE_GROUPS
702 entry->group = group;
703 set_page_group_in_use (group, page);
706 set_page_table_entry (page, entry);
708 if (GGC_DEBUG_LEVEL >= 2)
709 fprintf (G.debug_file,
710 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
711 (PTR) entry, page, page + G.pagesize - 1);
716 /* Allocate a large page of size SIZE in ZONE. */
718 static inline struct page_entry *
719 alloc_large_page (size_t size, struct alloc_zone *zone)
721 struct page_entry *entry;
724 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
725 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
729 entry->bytes_free = LARGE_OBJECT_SIZE + CHUNK_OVERHEAD;
731 entry->context_depth = zone->context_depth;
732 entry->large_p = true;
734 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
736 #ifdef USING_MALLOC_PAGE_GROUPS
739 set_page_table_entry (page, entry);
741 if (GGC_DEBUG_LEVEL >= 2)
742 fprintf (G.debug_file,
743 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
744 (PTR) entry, page, page + size - 1);
750 /* For a page that is no longer needed, put it on the free page list. */
753 free_page (page_entry *entry)
755 if (GGC_DEBUG_LEVEL >= 2)
756 fprintf (G.debug_file,
757 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
758 entry->page, entry->page + entry->bytes - 1);
760 set_page_table_entry (entry->page, NULL);
765 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
769 /* Mark the page as inaccessible. Discard the handle to
770 avoid handle leak. */
771 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
773 #ifdef USING_MALLOC_PAGE_GROUPS
774 clear_page_group_in_use (entry->group, entry->page);
777 entry->next = entry->zone->free_pages;
778 entry->zone->free_pages = entry;
782 /* Release the free page cache to the system. */
785 release_pages (struct alloc_zone *zone)
788 page_entry *p, *next;
792 /* Gather up adjacent pages so they are unmapped together. */
793 p = zone->free_pages;
803 while (p && p->page == start + len)
812 zone->bytes_mapped -= len;
815 zone->free_pages = NULL;
817 #ifdef USING_MALLOC_PAGE_GROUPS
821 /* Remove all pages from free page groups from the list. */
822 pp = &(zone->free_pages);
823 while ((p = *pp) != NULL)
824 if (p->group->in_use == 0)
832 /* Remove all free page groups, and release the storage. */
833 gp = &(zone->page_groups);
834 while ((g = *gp) != NULL)
838 zone->bytes_mapped -= g->alloc_size;
839 free (g->allocation);
840 VALGRIND_FREELIKE_BLOCK(g->allocation, 0);
847 /* Place CHUNK of size SIZE on the free list for ZONE. */
850 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
854 bin = SIZE_BIN_DOWN (size);
857 if (bin > NUM_FREE_BINS)
859 #ifdef COOKIE_CHECKING
860 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
862 chunk->magic = DEADCHUNK_MAGIC;
864 chunk->u.next_free = zone->free_chunks[bin];
865 zone->free_chunks[bin] = chunk;
866 if (GGC_DEBUG_LEVEL >= 3)
867 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
868 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
871 /* Allocate a chunk of memory of SIZE bytes. */
874 ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
878 struct page_entry *entry;
879 struct alloc_chunk *chunk, *lchunk, **pp;
882 /* Align size, so that we're assured of aligned allocations. */
883 if (size < FREE_BIN_DELTA)
884 size = FREE_BIN_DELTA;
885 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
887 /* Large objects are handled specially. */
888 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
890 entry = alloc_large_page (size, zone);
892 entry->next = entry->zone->pages;
893 entry->zone->pages = entry;
896 chunk = (struct alloc_chunk *) entry->page;
897 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
898 chunk->size = LARGE_OBJECT_SIZE;
903 /* First look for a tiny object already segregated into its own
905 bin = SIZE_BIN_UP (size);
906 if (bin <= NUM_FREE_BINS)
908 chunk = zone->free_chunks[bin];
911 zone->free_chunks[bin] = chunk->u.next_free;
912 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
917 /* Failing that, look through the "other" bucket for a chunk
918 that is large enough. */
919 pp = &(zone->free_chunks[0]);
921 while (chunk && chunk->size < size)
923 pp = &chunk->u.next_free;
927 /* Failing that, allocate new storage. */
930 entry = alloc_small_page (zone);
931 entry->next = entry->zone->pages;
932 entry->zone->pages = entry;
934 chunk = (struct alloc_chunk *) entry->page;
935 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
936 chunk->size = G.pagesize - CHUNK_OVERHEAD;
940 *pp = chunk->u.next_free;
941 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
943 /* Release extra memory from a chunk that's too big. */
944 lsize = chunk->size - size;
945 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
947 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
950 lsize -= CHUNK_OVERHEAD;
951 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
952 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
953 #ifdef COOKIE_CHECKING
954 lchunk->magic = CHUNK_MAGIC;
958 lchunk->size = lsize;
959 free_chunk (lchunk, lsize, zone);
961 /* Calculate the object's address. */
963 #ifdef COOKIE_CHECKING
964 chunk->magic = CHUNK_MAGIC;
968 chunk->typecode = type;
969 result = chunk->u.data;
971 #ifdef ENABLE_GC_CHECKING
972 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
973 exact same semantics in presence of memory bugs, regardless of
974 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
975 handle to avoid handle leak. */
976 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
978 /* `Poison' the entire allocated object. */
979 memset (result, 0xaf, size);
982 /* Tell Valgrind that the memory is there, but its content isn't
983 defined. The bytes at the end of the object are still marked
985 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
987 /* Keep track of how many bytes are being allocated. This
988 information is used in deciding when to collect. */
989 zone->allocated += size + CHUNK_OVERHEAD;
991 if (GGC_DEBUG_LEVEL >= 3)
992 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
993 (void *)chunk, (unsigned long) size, result);
998 /* Allocate a SIZE of chunk memory of GTE type, into an approriate zone
1002 ggc_alloc_typed (enum gt_types_enum gte, size_t size)
1004 if (gte == gt_ggc_e_14lang_tree_node)
1005 return ggc_alloc_zone_1 (size, tree_zone, gte);
1006 else if (gte == gt_ggc_e_7rtx_def)
1007 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1008 else if (gte == gt_ggc_e_9rtvec_def)
1009 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1011 return ggc_alloc_zone_1 (size, &main_zone, gte);
1014 /* Normal ggc_alloc simply allocates into the main zone. */
1017 ggc_alloc (size_t size)
1019 return ggc_alloc_zone_1 (size, &main_zone, -1);
1022 /* Zone allocation allocates into the specified zone. */
1025 ggc_alloc_zone (size_t size, struct alloc_zone *zone)
1027 return ggc_alloc_zone_1 (size, zone, -1);
1030 /* If P is not marked, mark it and return false. Otherwise return true.
1031 P must have been allocated by the GC allocator; it mustn't point to
1032 static objects, stack variables, or memory allocated with malloc. */
1035 ggc_set_mark (const void *p)
1038 struct alloc_chunk *chunk;
1040 #ifdef ENABLE_CHECKING
1041 /* Look up the page on which the object is alloced. If the object
1042 wasn't allocated by the collector, we'll probably die. */
1043 entry = lookup_page_table_entry (p);
1047 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1048 #ifdef COOKIE_CHECKING
1049 if (chunk->magic != CHUNK_MAGIC)
1056 #ifndef ENABLE_CHECKING
1057 entry = lookup_page_table_entry (p);
1060 /* Large pages are either completely full or completely empty. So if
1061 they are marked, they are completely full. */
1063 entry->bytes_free = 0;
1065 entry->bytes_free -= chunk->size + CHUNK_OVERHEAD;
1067 if (GGC_DEBUG_LEVEL >= 4)
1068 fprintf (G.debug_file, "Marking %p\n", p);
1073 /* Return 1 if P has been marked, zero otherwise.
1074 P must have been allocated by the GC allocator; it mustn't point to
1075 static objects, stack variables, or memory allocated with malloc. */
1078 ggc_marked_p (const void *p)
1080 struct alloc_chunk *chunk;
1082 #ifdef ENABLE_CHECKING
1084 page_entry *entry = lookup_page_table_entry (p);
1090 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1091 #ifdef COOKIE_CHECKING
1092 if (chunk->magic != CHUNK_MAGIC)
1098 /* Return the size of the gc-able object P. */
1101 ggc_get_size (const void *p)
1103 struct alloc_chunk *chunk;
1104 struct page_entry *entry;
1106 #ifdef ENABLE_CHECKING
1107 entry = lookup_page_table_entry (p);
1112 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1113 #ifdef COOKIE_CHECKING
1114 if (chunk->magic != CHUNK_MAGIC)
1117 if (chunk->size == LARGE_OBJECT_SIZE)
1119 #ifndef ENABLE_CHECKING
1120 entry = lookup_page_table_entry (p);
1122 return entry->bytes;
1128 /* Initialize the ggc-zone-mmap allocator. */
1132 /* Create the zones. */
1133 main_zone.name = "Main zone";
1134 G.zones = &main_zone;
1136 rtl_zone = xcalloc (1, sizeof (struct alloc_zone));
1137 rtl_zone->name = "RTL zone";
1138 /* The main zone's connected to the ... rtl_zone */
1139 G.zones->next_zone = rtl_zone;
1141 garbage_zone = xcalloc (1, sizeof (struct alloc_zone));
1142 garbage_zone->name = "Garbage zone";
1143 /* The rtl zone's connected to the ... garbage zone */
1144 rtl_zone->next_zone = garbage_zone;
1146 tree_zone = xcalloc (1, sizeof (struct alloc_zone));
1147 tree_zone->name = "Tree zone";
1148 /* The garbage zone's connected to ... the tree zone */
1149 garbage_zone->next_zone = tree_zone;
1151 G.pagesize = getpagesize();
1152 G.lg_pagesize = exact_log2 (G.pagesize);
1153 #ifdef HAVE_MMAP_DEV_ZERO
1154 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1155 if (G.dev_zero_fd == -1)
1160 G.debug_file = fopen ("ggc-mmap.debug", "w");
1161 setlinebuf (G.debug_file);
1163 G.debug_file = stdout;
1167 /* StunOS has an amazing off-by-one error for the first mmap allocation
1168 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1169 believe, is an unaligned page allocation, which would cause us to
1170 hork badly if we tried to use it. */
1172 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1173 struct page_entry *e;
1174 if ((size_t)p & (G.pagesize - 1))
1176 /* How losing. Discard this one and try another. If we still
1177 can't get something useful, give up. */
1179 p = alloc_anon (NULL, G.pagesize, &main_zone);
1180 if ((size_t)p & (G.pagesize - 1))
1184 /* We have a good page, might as well hold onto it... */
1185 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
1186 e->bytes = G.pagesize;
1188 e->next = main_zone.free_pages;
1189 main_zone.free_pages = e;
1194 /* Increment the `GC context'. Objects allocated in an outer context
1195 are never freed, eliminating the need to register their roots. */
1198 ggc_push_context (void)
1200 struct alloc_zone *zone;
1201 for (zone = G.zones; zone; zone = zone->next_zone)
1202 ++(zone->context_depth);
1204 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
1208 /* Decrement the `GC context'. All objects allocated since the
1209 previous ggc_push_context are migrated to the outer context. */
1212 ggc_pop_context_1 (struct alloc_zone *zone)
1214 unsigned long omask;
1218 depth = --(zone->context_depth);
1219 omask = (unsigned long)1 << (depth + 1);
1221 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
1224 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
1225 zone->context_depth_allocations &= omask - 1;
1226 zone->context_depth_collections &= omask - 1;
1228 /* Any remaining pages in the popped context are lowered to the new
1229 current context; i.e. objects allocated in the popped context and
1230 left over are imported into the previous context. */
1231 for (p = zone->pages; p != NULL; p = p->next)
1232 if (p->context_depth > depth)
1233 p->context_depth = depth;
1236 /* Pop all the zone contexts. */
1239 ggc_pop_context (void)
1241 struct alloc_zone *zone;
1242 for (zone = G.zones; zone; zone = zone->next_zone)
1243 ggc_pop_context_1 (zone);
1247 /* Poison the chunk. */
1248 #ifdef ENABLE_GC_CHECKING
1249 #define poison_chunk(CHUNK, SIZE) \
1250 memset ((CHUNK)->u.data, 0xa5, (SIZE))
1252 #define poison_chunk(CHUNK, SIZE)
1255 /* Free all empty pages and objects within a page for a given zone */
1258 sweep_pages (struct alloc_zone *zone)
1260 page_entry **pp, *p, *next;
1261 struct alloc_chunk *chunk, *last_free, *end;
1262 size_t last_free_size, allocated = 0;
1264 /* First, reset the free_chunks lists, since we are going to
1265 re-free free chunks in hopes of coalescing them into large chunks. */
1266 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1268 for (p = zone->pages; p ; p = next)
1272 /* For empty pages, just free the page. */
1273 if (p->bytes_free == G.pagesize && p->context_depth == zone->context_depth)
1276 #ifdef ENABLE_GC_CHECKING
1277 /* Poison the page. */
1278 memset (p->page, 0xb5, p->bytes);
1284 /* Large pages are all or none affairs. Either they are
1285 completely empty, or they are completeley full.
1286 Thus, if the above didn't catch it, we need not do anything
1287 except remove the mark and reset the bytes_free.
1289 XXX: Should we bother to increment allocated. */
1290 else if (p->large_p)
1292 p->bytes_free = p->bytes;
1293 ((struct alloc_chunk *)p->page)->mark = 0;
1298 /* This page has now survived another collection. */
1301 /* Which leaves full and partial pages. Step through all chunks,
1302 consolidate those that are free and insert them into the free
1303 lists. Note that consolidation slows down collection
1306 chunk = (struct alloc_chunk *)p->page;
1307 end = (struct alloc_chunk *)(p->page + G.pagesize);
1313 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1314 if (chunk->mark || p->context_depth < zone->context_depth)
1318 last_free->type = 0;
1319 last_free->size = last_free_size;
1320 last_free->mark = 0;
1321 poison_chunk (last_free, last_free_size);
1322 free_chunk (last_free, last_free_size, zone);
1327 allocated += chunk->size + CHUNK_OVERHEAD;
1328 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1331 #ifdef ENABLE_CHECKING
1332 if (p->bytes_free > p->bytes)
1340 last_free_size += CHUNK_OVERHEAD + chunk->size;
1345 last_free_size = chunk->size;
1349 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1351 while (chunk < end);
1355 last_free->type = 0;
1356 last_free->size = last_free_size;
1357 last_free->mark = 0;
1358 poison_chunk (last_free, last_free_size);
1359 free_chunk (last_free, last_free_size, zone);
1363 zone->allocated = allocated;
1366 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1367 is true if we need to mark before sweeping, false if some other
1368 zone collection has already performed marking for us. Returns true
1369 if we collected, false otherwise. */
1372 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1374 /* Avoid frequent unnecessary work by skipping collection if the
1375 total allocations haven't expanded much since the last
1377 float allocated_last_gc =
1378 MAX (zone->allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1380 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1382 if (zone->allocated < allocated_last_gc + min_expand)
1386 fprintf (stderr, " {%s GC %luk -> ", zone->name, (unsigned long) zone->allocated / 1024);
1388 /* Zero the total allocated bytes. This will be recalculated in the
1390 zone->allocated = 0;
1392 /* Release the pages we freed the last time we collected, but didn't
1393 reuse in the interim. */
1394 release_pages (zone);
1396 /* Indicate that we've seen collections at this context depth. */
1397 zone->context_depth_collections
1398 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1402 zone->was_collected = true;
1403 zone->allocated_last_gc = zone->allocated;
1407 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1411 /* Calculate the average page survival rate in terms of number of
1415 calculate_average_page_survival (struct alloc_zone *zone)
1418 float survival = 0.0;
1420 for (p = zone->pages; p; p = p->next)
1423 survival += p->survived;
1425 return survival/count;
1428 /* Check the magic cookies all of the chunks contain, to make sure we
1429 aren't doing anything stupid, like stomping on alloc_chunk
1433 check_cookies (void)
1435 #ifdef COOKIE_CHECKING
1437 struct alloc_zone *zone;
1439 for (zone = G.zones; zone; zone = zone->next_zone)
1441 for (p = zone->pages; p; p = p->next)
1445 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1446 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1449 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1451 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1453 while (chunk < end);
1461 /* Top level collection routine. */
1466 struct alloc_zone *zone;
1467 bool marked = false;
1470 timevar_push (TV_GC);
1472 /* Start by possibly collecting the main zone. */
1473 main_zone.was_collected = false;
1474 marked |= ggc_collect_1 (&main_zone, true);
1475 /* In order to keep the number of collections down, we don't
1476 collect other zones unless we are collecting the main zone. This
1477 gives us roughly the same number of collections as we used to
1478 have with the old gc. The number of collection is important
1479 because our main slowdown (according to profiling) is now in
1480 marking. So if we mark twice as often as we used to, we'll be
1481 twice as slow. Hopefully we'll avoid this cost when we mark
1484 if (main_zone.was_collected)
1487 rtl_zone->was_collected = false;
1488 marked |= ggc_collect_1 (rtl_zone, !marked);
1490 tree_zone->was_collected = false;
1491 marked |= ggc_collect_1 (tree_zone, !marked);
1493 garbage_zone->was_collected = false;
1494 marked |= ggc_collect_1 (garbage_zone, !marked);
1497 /* Print page survival stats, if someone wants them. */
1498 if (GGC_DEBUG_LEVEL >= 2)
1500 if (rtl_zone->was_collected)
1502 f = calculate_average_page_survival (rtl_zone);
1503 printf ("Average RTL page survival is %f\n", f);
1505 if (main_zone.was_collected)
1507 f = calculate_average_page_survival (&main_zone);
1508 printf ("Average main page survival is %f\n", f);
1510 if (tree_zone->was_collected)
1512 f = calculate_average_page_survival (tree_zone);
1513 printf ("Average tree page survival is %f\n", f);
1516 /* Since we don't mark zone at a time right now, marking in any
1517 zone means marking in every zone. So we have to clear all the
1518 marks in all the zones that weren't collected already. */
1522 for (zone = G.zones; zone; zone = zone->next_zone)
1524 if (zone->was_collected)
1526 for (p = zone->pages; p; p = p->next)
1530 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1531 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1534 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1535 if (chunk->mark || p->context_depth < zone->context_depth)
1538 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1539 #ifdef ENABLE_CHECKING
1540 if (p->bytes_free > p->bytes)
1545 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1547 while (chunk < end);
1551 p->bytes_free = p->bytes;
1552 ((struct alloc_chunk *)p->page)->mark = 0;
1557 timevar_pop (TV_GC);
1560 /* Print allocation statistics. */
1563 ggc_print_statistics (void)
1569 struct ggc_pch_ondisk
1578 /* Initialize the PCH datastructure. */
1580 struct ggc_pch_data *
1583 return xcalloc (sizeof (struct ggc_pch_data), 1);
1586 /* Add the size of object X to the size of the PCH data. */
1589 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1590 size_t size, bool is_string)
1594 d->d.total += size + CHUNK_OVERHEAD;
1600 /* Return the total size of the PCH data. */
1603 ggc_pch_total_size (struct ggc_pch_data *d)
1608 /* Set the base address for the objects in the PCH file. */
1611 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1613 d->base = (size_t) base;
1616 /* Allocate a place for object X of size SIZE in the PCH file. */
1619 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1620 size_t size, bool is_string)
1623 result = (char *)d->base;
1626 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1627 if (chunk->size == LARGE_OBJECT_SIZE)
1628 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1630 d->base += chunk->size + CHUNK_OVERHEAD;
1631 return result + CHUNK_OVERHEAD;
1641 /* Prepare to write out the PCH data to file F. */
1644 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1645 FILE *f ATTRIBUTE_UNUSED)
1647 /* Nothing to do. */
1650 /* Write out object X of SIZE to file F. */
1653 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1654 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1655 size_t size, bool is_string)
1659 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1661 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1662 fatal_error ("can't write PCH file: %m");
1663 d->written += size + CHUNK_OVERHEAD;
1667 if (fwrite (x, size, 1, f) != 1)
1668 fatal_error ("can't write PCH file: %m");
1671 if (d->written == d->d.total
1672 && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0)
1673 fatal_error ("can't write PCH file: %m");
1677 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1679 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1680 fatal_error ("can't write PCH file: %m");
1686 ggc_pch_read (FILE *f, void *addr)
1688 struct ggc_pch_ondisk d;
1689 struct page_entry *entry;
1691 if (fread (&d, sizeof (d), 1, f) != 1)
1692 fatal_error ("can't read PCH file: %m");
1693 entry = xcalloc (1, sizeof (struct page_entry));
1694 entry->bytes = d.total;
1696 entry->context_depth = 0;
1697 entry->zone = &main_zone;
1698 for (pte = entry->page;
1699 pte < entry->page + entry->bytes;
1701 set_page_table_entry (pte, entry);