1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "diagnostic-core.h"
36 #include "ggc-internal.h"
42 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
43 file open. Prefer either to valloc. */
45 # undef HAVE_MMAP_DEV_ZERO
49 #ifdef HAVE_MMAP_DEV_ZERO
54 #error Zone collector requires mmap
57 #if (GCC_VERSION < 3001)
58 #define prefetch(X) ((void) X)
59 #define prefetchw(X) ((void) X)
61 #define prefetch(X) __builtin_prefetch (X)
62 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
67 If we track inter-zone pointers, we can mark single zones at a
70 If we have a zone where we guarantee no inter-zone pointers, we
71 could mark that zone separately.
73 The garbage zone should not be marked, and we should return 1 in
74 ggc_set_mark for any object in the garbage zone, which cuts off
79 This garbage-collecting allocator segregates objects into zones.
80 It also segregates objects into "large" and "small" bins. Large
81 objects are greater than page size.
83 Pages for small objects are broken up into chunks. The page has
84 a bitmap which marks the start position of each chunk (whether
85 allocated or free). Free chunks are on one of the zone's free
86 lists and contain a pointer to the next free chunk. Chunks in
87 most of the free lists have a fixed size determined by the
88 free list. Chunks in the "other" sized free list have their size
89 stored right after their chain pointer.
91 Empty pages (of all sizes) are kept on a single page cache list,
92 and are considered first when new pages are required; they are
93 deallocated at the start of the next collection if they haven't
94 been recycled by then. The free page list is currently per-zone. */
96 /* Define GGC_DEBUG_LEVEL to print debugging information.
97 0: No debugging output.
98 1: GC statistics only.
99 2: Page-entry allocations/deallocations as well.
100 3: Object allocations as well.
101 4: Object marks as well. */
102 #define GGC_DEBUG_LEVEL (0)
104 #ifndef HOST_BITS_PER_PTR
105 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
108 /* This structure manages small free chunks. The SIZE field is only
109 initialized if the chunk is in the "other" sized free list. Large
110 chunks are allocated one at a time to their own page, and so don't
114 struct alloc_chunk *next_free;
118 /* The size of the fixed-size portion of a small page descriptor. */
119 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
121 /* The collector's idea of the page size. This must be a power of two
122 no larger than the system page size, because pages must be aligned
123 to this amount and are tracked at this granularity in the page
124 table. We choose a size at compile time for efficiency.
126 We could make a better guess at compile time if PAGE_SIZE is a
127 constant in system headers, and PAGE_SHIFT is defined... */
128 #define GGC_PAGE_SIZE 4096
129 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
130 #define GGC_PAGE_SHIFT 12
133 /* Alternative definitions which use the runtime page size. */
134 #define GGC_PAGE_SIZE G.pagesize
135 #define GGC_PAGE_MASK G.page_mask
136 #define GGC_PAGE_SHIFT G.lg_pagesize
139 /* The size of a small page managed by the garbage collector. This
140 must currently be GGC_PAGE_SIZE, but with a few changes could
141 be any multiple of it to reduce certain kinds of overhead. */
142 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
144 /* Free bin information. These numbers may be in need of re-tuning.
145 In general, decreasing the number of free bins would seem to
146 increase the time it takes to allocate... */
148 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
151 #define NUM_FREE_BINS 64
152 #define FREE_BIN_DELTA MAX_ALIGNMENT
153 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
155 /* Allocation and marking parameters. */
157 /* The smallest allocatable unit to keep track of. */
158 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
160 /* The smallest markable unit. If we require each allocated object
161 to contain at least two allocatable units, we can use half as many
162 bits for the mark bitmap. But this adds considerable complexity
164 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
166 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
168 /* We use this structure to determine the alignment required for
171 There are several things wrong with this estimation of alignment.
173 The maximum alignment for a structure is often less than the
174 maximum alignment for a basic data type; for instance, on some
175 targets long long must be aligned to sizeof (int) in a structure
176 and sizeof (long long) in a variable. i386-linux is one example;
177 Darwin is another (sometimes, depending on the compiler in use).
179 Also, long double is not included. Nothing in GCC uses long
180 double, so we assume that this is OK. On powerpc-darwin, adding
181 long double would bring the maximum alignment up to 16 bytes,
182 and until we need long double (or to vectorize compiler operations)
183 that's painfully wasteful. This will need to change, some day. */
185 struct max_alignment {
193 /* The biggest alignment required. */
195 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
197 /* Compute the smallest multiple of F that is >= X. */
199 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
201 /* Types to use for the allocation and mark bitmaps. It might be
202 a good idea to add ffsl to libiberty and use unsigned long
203 instead; that could speed us up where long is wider than int. */
205 typedef unsigned int alloc_type;
206 typedef unsigned int mark_type;
207 #define alloc_ffs(x) ffs(x)
209 /* A page_entry records the status of an allocation page. This is the
210 common data between all three kinds of pages - small, large, and
212 typedef struct page_entry
214 /* The address at which the memory is allocated. */
217 /* The zone that this page entry belongs to. */
218 struct alloc_zone *zone;
220 #ifdef GATHER_STATISTICS
221 /* How many collections we've survived. */
225 /* Does this page contain small objects, or one large object? */
228 /* Is this page part of the loaded PCH? */
232 /* Additional data needed for small pages. */
233 struct small_page_entry
235 struct page_entry common;
237 /* The next small page entry, or NULL if this is the last. */
238 struct small_page_entry *next;
240 /* If currently marking this zone, a pointer to the mark bits
241 for this page. If we aren't currently marking this zone,
242 this pointer may be stale (pointing to freed memory). */
243 mark_type *mark_bits;
245 /* The allocation bitmap. This array extends far enough to have
246 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
247 alloc_type alloc_bits[1];
250 /* Additional data needed for large pages. */
251 struct large_page_entry
253 struct page_entry common;
255 /* The next large page entry, or NULL if this is the last. */
256 struct large_page_entry *next;
258 /* The number of bytes allocated, not including the page entry. */
261 /* The previous page in the list, so that we can unlink this one. */
262 struct large_page_entry *prev;
264 /* During marking, is this object marked? */
268 /* A two-level tree is used to look up the page-entry for a given
269 pointer. Two chunks of the pointer's bits are extracted to index
270 the first and second levels of the tree, as follows:
274 msb +----------------+----+------+------+ lsb
280 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
281 pages are aligned on system page boundaries. The next most
282 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
283 index values in the lookup table, respectively.
285 For 32-bit architectures and the settings below, there are no
286 leftover bits. For architectures with wider pointers, the lookup
287 tree points to a list of pages, which must be scanned to find the
290 #define PAGE_L1_BITS (8)
291 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
292 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
293 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
295 #define LOOKUP_L1(p) \
296 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
298 #define LOOKUP_L2(p) \
299 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
301 #if HOST_BITS_PER_PTR <= 32
303 /* On 32-bit hosts, we use a two level page table, as pictured above. */
304 typedef page_entry **page_table[PAGE_L1_SIZE];
308 /* On 64-bit hosts, we use the same two level page tables plus a linked
309 list that disambiguates the top 32-bits. There will almost always be
310 exactly one entry in the list. */
311 typedef struct page_table_chain
313 struct page_table_chain *next;
315 page_entry **table[PAGE_L1_SIZE];
320 /* The global variables. */
321 static struct globals
323 /* The linked list of zones. */
324 struct alloc_zone *zones;
326 /* Lookup table for associating allocation pages with object addresses. */
329 /* The system's page size, and related constants. */
334 /* The size to allocate for a small page entry. This includes
335 the size of the structure and the size of the allocation
337 size_t small_page_overhead;
339 #if defined (HAVE_MMAP_DEV_ZERO)
340 /* A file descriptor open to /dev/zero for reading. */
344 /* Allocate pages in chunks of this size, to throttle calls to memory
345 allocation routines. The first page is used, the rest go onto the
349 /* The file descriptor for debugging output. */
353 /* A zone allocation structure. There is one of these for every
354 distinct allocation zone. */
357 /* The most recent free chunk is saved here, instead of in the linked
358 free list, to decrease list manipulation. It is most likely that we
359 will want this one. */
361 size_t cached_free_size;
363 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
364 FREE_BIN_DELTA. All other chunks are in slot 0. */
365 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
367 /* The highest bin index which might be non-empty. It may turn out
368 to be empty, in which case we have to search downwards. */
369 size_t high_free_bin;
371 /* Bytes currently allocated in this zone. */
374 /* Linked list of the small pages in this zone. */
375 struct small_page_entry *pages;
377 /* Doubly linked list of large pages in this zone. */
378 struct large_page_entry *large_pages;
380 /* If we are currently marking this zone, a pointer to the mark bits. */
381 mark_type *mark_bits;
383 /* Name of the zone. */
386 /* The number of small pages currently allocated in this zone. */
387 size_t n_small_pages;
389 /* Bytes allocated at the end of the last collection. */
390 size_t allocated_last_gc;
392 /* Total amount of memory mapped. */
395 /* A cache of free system pages. */
396 struct small_page_entry *free_pages;
398 /* Next zone in the linked list of zones. */
399 struct alloc_zone *next_zone;
401 /* True if this zone was collected during this collection. */
404 /* True if this zone should be destroyed after the next collection. */
407 #ifdef GATHER_STATISTICS
410 /* Total GC-allocated memory. */
411 unsigned long long total_allocated;
412 /* Total overhead for GC-allocated memory. */
413 unsigned long long total_overhead;
415 /* Total allocations and overhead for sizes less than 32, 64 and 128.
416 These sizes are interesting because they are typical cache line
419 unsigned long long total_allocated_under32;
420 unsigned long long total_overhead_under32;
422 unsigned long long total_allocated_under64;
423 unsigned long long total_overhead_under64;
425 unsigned long long total_allocated_under128;
426 unsigned long long total_overhead_under128;
431 /* Some default zones. */
432 struct alloc_zone rtl_zone;
433 struct alloc_zone tree_zone;
434 struct alloc_zone tree_id_zone;
436 /* The PCH zone does not need a normal zone structure, and it does
437 not live on the linked list of zones. */
440 /* The start of the PCH zone. NULL if there is none. */
443 /* The end of the PCH zone. NULL if there is none. */
446 /* The size of the PCH zone. 0 if there is none. */
449 /* The allocation bitmap for the PCH zone. */
450 alloc_type *alloc_bits;
452 /* If we are currently marking, the mark bitmap for the PCH zone.
453 When it is first read in, we could avoid marking the PCH,
454 because it will not contain any pointers to GC memory outside
455 of the PCH; however, the PCH is currently mapped as writable,
456 so we must mark it in case new pointers are added. */
457 mark_type *mark_bits;
461 static char *alloc_anon (char *, size_t, struct alloc_zone *);
463 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
464 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
465 static void free_chunk (char *, size_t, struct alloc_zone *);
466 static void free_small_page (struct small_page_entry *);
467 static void free_large_page (struct large_page_entry *);
468 static void release_pages (struct alloc_zone *);
469 static void sweep_pages (struct alloc_zone *);
470 static bool ggc_collect_1 (struct alloc_zone *, bool);
471 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
473 /* Traverse the page table and find the entry for a page.
474 Die (probably) if the object wasn't allocated via GC. */
476 static inline page_entry *
477 lookup_page_table_entry (const void *p)
482 #if HOST_BITS_PER_PTR <= 32
485 page_table table = G.lookup;
486 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
487 while (table->high_bits != high_bits)
489 base = &table->table[0];
492 /* Extract the level 1 and 2 indices. */
499 /* Traverse the page table and find the entry for a page.
500 Return NULL if the object wasn't allocated via the GC. */
502 static inline page_entry *
503 lookup_page_table_if_allocated (const void *p)
508 #if HOST_BITS_PER_PTR <= 32
511 page_table table = G.lookup;
512 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
517 if (table->high_bits == high_bits)
521 base = &table->table[0];
524 /* Extract the level 1 and 2 indices. */
530 if (L2 >= PAGE_L2_SIZE)
532 /* We might have a page entry which does not correspond exactly to a
534 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
540 /* Set the page table entry for the page that starts at P. If ENTRY
541 is NULL, clear the entry. */
544 set_page_table_entry (void *p, page_entry *entry)
549 #if HOST_BITS_PER_PTR <= 32
553 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
554 for (table = G.lookup; table; table = table->next)
555 if (table->high_bits == high_bits)
558 /* Not found -- allocate a new table. */
559 table = XCNEW (struct page_table_chain);
560 table->next = G.lookup;
561 table->high_bits = high_bits;
564 base = &table->table[0];
567 /* Extract the level 1 and 2 indices. */
571 if (base[L1] == NULL)
572 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
574 base[L1][L2] = entry;
577 /* Find the page table entry associated with OBJECT. */
579 static inline struct page_entry *
580 zone_get_object_page (const void *object)
582 return lookup_page_table_entry (object);
585 /* Find which element of the alloc_bits array OBJECT should be
587 static inline unsigned int
588 zone_get_object_alloc_word (const void *object)
590 return (((size_t) object & (GGC_PAGE_SIZE - 1))
591 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
594 /* Find which bit of the appropriate word in the alloc_bits array
595 OBJECT should be recorded in. */
596 static inline unsigned int
597 zone_get_object_alloc_bit (const void *object)
599 return (((size_t) object / BYTES_PER_ALLOC_BIT)
600 % (8 * sizeof (alloc_type)));
603 /* Find which element of the mark_bits array OBJECT should be recorded
605 static inline unsigned int
606 zone_get_object_mark_word (const void *object)
608 return (((size_t) object & (GGC_PAGE_SIZE - 1))
609 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
612 /* Find which bit of the appropriate word in the mark_bits array
613 OBJECT should be recorded in. */
614 static inline unsigned int
615 zone_get_object_mark_bit (const void *object)
617 return (((size_t) object / BYTES_PER_MARK_BIT)
618 % (8 * sizeof (mark_type)));
621 /* Set the allocation bit corresponding to OBJECT in its page's
622 bitmap. Used to split this object from the preceding one. */
624 zone_set_object_alloc_bit (const void *object)
626 struct small_page_entry *page
627 = (struct small_page_entry *) zone_get_object_page (object);
628 unsigned int start_word = zone_get_object_alloc_word (object);
629 unsigned int start_bit = zone_get_object_alloc_bit (object);
631 page->alloc_bits[start_word] |= 1L << start_bit;
634 /* Clear the allocation bit corresponding to OBJECT in PAGE's
635 bitmap. Used to coalesce this object with the preceding
638 zone_clear_object_alloc_bit (struct small_page_entry *page,
641 unsigned int start_word = zone_get_object_alloc_word (object);
642 unsigned int start_bit = zone_get_object_alloc_bit (object);
644 /* Would xor be quicker? */
645 page->alloc_bits[start_word] &= ~(1L << start_bit);
648 /* Find the size of the object which starts at START_WORD and
649 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
650 Helper function for ggc_get_size and zone_find_object_size. */
653 zone_object_size_1 (alloc_type *alloc_bits,
654 size_t start_word, size_t start_bit,
658 alloc_type alloc_word;
661 /* Load the first word. */
662 alloc_word = alloc_bits[start_word++];
664 /* If that was the last bit in this word, we'll want to continue
665 with the next word. Otherwise, handle the rest of this word. */
668 indx = alloc_ffs (alloc_word >> start_bit);
670 /* indx is 1-based. We started at the bit after the object's
671 start, but we also ended at the bit after the object's end.
673 return indx * BYTES_PER_ALLOC_BIT;
675 /* The extra 1 accounts for the starting unit, before start_bit. */
676 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
678 if (size >= max_size)
681 alloc_word = alloc_bits[start_word++];
684 size = BYTES_PER_ALLOC_BIT;
686 while (alloc_word == 0)
688 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
689 if (size >= max_size)
691 alloc_word = alloc_bits[start_word++];
694 indx = alloc_ffs (alloc_word);
695 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
698 /* Find the size of OBJECT on small page PAGE. */
701 zone_find_object_size (struct small_page_entry *page,
704 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
705 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
706 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
707 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
708 - (const char *) object);
710 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
714 /* highest_bit assumes that alloc_type is 32 bits. */
715 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
717 /* Find the highest set bit in VALUE. Returns the bit number of that
718 bit, using the same values as ffs. */
719 static inline alloc_type
720 highest_bit (alloc_type value)
722 /* This also assumes that alloc_type is unsigned. */
727 value |= value >> 16;
728 value = value ^ (value >> 1);
729 return alloc_ffs (value);
732 /* Find the offset from the start of an object to P, which may point
733 into the interior of the object. */
736 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
739 unsigned int offset_in_bits;
740 alloc_type alloc_word = alloc_bits[start_word];
742 /* Mask off any bits after the initial bit, but make sure to include
743 the initial bit in the result. Note that START_BIT is
745 if (start_bit < 8 * sizeof (alloc_type) - 1)
746 alloc_word &= (1 << (start_bit + 1)) - 1;
747 offset_in_bits = start_bit;
749 /* Search for the start of the object. */
750 while (alloc_word == 0 && start_word > 0)
752 alloc_word = alloc_bits[--start_word];
753 offset_in_bits += 8 * sizeof (alloc_type);
755 /* We must always find a set bit. */
756 gcc_assert (alloc_word != 0);
757 /* Note that the result of highest_bit is 1-based. */
758 offset_in_bits -= highest_bit (alloc_word) - 1;
760 return BYTES_PER_ALLOC_BIT * offset_in_bits;
763 /* Allocate the mark bits for every zone, and set the pointers on each
766 zone_allocate_marks (void)
768 struct alloc_zone *zone;
770 for (zone = G.zones; zone; zone = zone->next_zone)
772 struct small_page_entry *page;
773 mark_type *cur_marks;
774 size_t mark_words, mark_words_per_page;
775 #ifdef ENABLE_CHECKING
780 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
781 mark_words = zone->n_small_pages * mark_words_per_page;
782 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
784 cur_marks = zone->mark_bits;
785 for (page = zone->pages; page; page = page->next)
787 page->mark_bits = cur_marks;
788 cur_marks += mark_words_per_page;
789 #ifdef ENABLE_CHECKING
793 gcc_checking_assert (n == zone->n_small_pages);
796 /* We don't collect the PCH zone, but we do have to mark it
800 = (mark_type *) xcalloc (sizeof (mark_type),
801 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
804 /* After marking and sweeping, release the memory used for mark bits. */
806 zone_free_marks (void)
808 struct alloc_zone *zone;
810 for (zone = G.zones; zone; zone = zone->next_zone)
813 free (zone->mark_bits);
814 zone->mark_bits = NULL;
819 free (pch_zone.mark_bits);
820 pch_zone.mark_bits = NULL;
825 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
826 (if non-null). The ifdef structure here is intended to cause a
827 compile error unless exactly one of the HAVE_* is defined. */
830 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
832 #ifdef HAVE_MMAP_ANON
833 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
834 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
836 #ifdef HAVE_MMAP_DEV_ZERO
837 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
838 MAP_PRIVATE, G.dev_zero_fd, 0);
841 if (page == (char *) MAP_FAILED)
843 perror ("virtual memory exhausted");
844 exit (FATAL_EXIT_CODE);
847 /* Remember that we allocated this memory. */
848 zone->bytes_mapped += size;
850 /* Pretend we don't have access to the allocated pages. We'll enable
851 access to smaller pieces of the area in ggc_internal_alloc. Discard the
852 handle to avoid handle leak. */
853 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
859 /* Allocate a new page for allocating small objects in ZONE, and
860 return an entry for it. */
862 static struct small_page_entry *
863 alloc_small_page (struct alloc_zone *zone)
865 struct small_page_entry *entry;
867 /* Check the list of free pages for one we can use. */
868 entry = zone->free_pages;
871 /* Recycle the allocated memory from this page ... */
872 zone->free_pages = entry->next;
876 /* We want just one page. Allocate a bunch of them and put the
877 extras on the freelist. (Can only do this optimization with
878 mmap for backing store.) */
879 struct small_page_entry *e, *f = zone->free_pages;
883 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
885 /* This loop counts down so that the chain will be in ascending
887 for (i = G.quire_size - 1; i >= 1; i--)
889 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
890 e->common.page = page + (i << GGC_PAGE_SHIFT);
891 e->common.zone = zone;
894 set_page_table_entry (e->common.page, &e->common);
897 zone->free_pages = f;
899 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
900 entry->common.page = page;
901 entry->common.zone = zone;
902 set_page_table_entry (page, &entry->common);
905 zone->n_small_pages++;
907 if (GGC_DEBUG_LEVEL >= 2)
908 fprintf (G.debug_file,
909 "Allocating %s page at %p, data %p-%p\n",
910 entry->common.zone->name, (PTR) entry, entry->common.page,
911 entry->common.page + SMALL_PAGE_SIZE - 1);
916 /* Allocate a large page of size SIZE in ZONE. */
918 static struct large_page_entry *
919 alloc_large_page (size_t size, struct alloc_zone *zone)
921 struct large_page_entry *entry;
925 needed_size = size + sizeof (struct large_page_entry);
926 page = XNEWVAR (char, needed_size);
928 entry = (struct large_page_entry *) page;
931 entry->common.page = page + sizeof (struct large_page_entry);
932 entry->common.large_p = true;
933 entry->common.pch_p = false;
934 entry->common.zone = zone;
935 #ifdef GATHER_STATISTICS
936 entry->common.survived = 0;
938 entry->mark_p = false;
942 set_page_table_entry (entry->common.page, &entry->common);
944 if (GGC_DEBUG_LEVEL >= 2)
945 fprintf (G.debug_file,
946 "Allocating %s large page at %p, data %p-%p\n",
947 entry->common.zone->name, (PTR) entry, entry->common.page,
948 entry->common.page + SMALL_PAGE_SIZE - 1);
954 /* For a page that is no longer needed, put it on the free page list. */
957 free_small_page (struct small_page_entry *entry)
959 if (GGC_DEBUG_LEVEL >= 2)
960 fprintf (G.debug_file,
961 "Deallocating %s page at %p, data %p-%p\n",
962 entry->common.zone->name, (PTR) entry,
963 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
965 gcc_assert (!entry->common.large_p);
967 /* Mark the page as inaccessible. Discard the handle to
968 avoid handle leak. */
969 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
972 entry->next = entry->common.zone->free_pages;
973 entry->common.zone->free_pages = entry;
974 entry->common.zone->n_small_pages--;
977 /* Release a large page that is no longer needed. */
980 free_large_page (struct large_page_entry *entry)
982 if (GGC_DEBUG_LEVEL >= 2)
983 fprintf (G.debug_file,
984 "Deallocating %s page at %p, data %p-%p\n",
985 entry->common.zone->name, (PTR) entry,
986 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
988 gcc_assert (entry->common.large_p);
990 set_page_table_entry (entry->common.page, NULL);
994 /* Release the free page cache to the system. */
997 release_pages (struct alloc_zone *zone)
1000 struct small_page_entry *p, *next;
1004 /* Gather up adjacent pages so they are unmapped together. */
1005 p = zone->free_pages;
1009 start = p->common.page;
1011 len = SMALL_PAGE_SIZE;
1012 set_page_table_entry (p->common.page, NULL);
1015 while (p && p->common.page == start + len)
1018 len += SMALL_PAGE_SIZE;
1019 set_page_table_entry (p->common.page, NULL);
1023 munmap (start, len);
1024 zone->bytes_mapped -= len;
1027 zone->free_pages = NULL;
1031 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1034 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
1036 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
1039 bin = SIZE_BIN_DOWN (size);
1040 gcc_assert (bin != 0);
1041 if (bin > NUM_FREE_BINS)
1044 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1048 chunk->next_free = zone->free_chunks[bin];
1049 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1058 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1061 chunk->next_free = zone->free_chunks[bin];
1062 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1070 zone->free_chunks[bin] = chunk;
1071 if (bin > zone->high_free_bin)
1072 zone->high_free_bin = bin;
1073 if (GGC_DEBUG_LEVEL >= 3)
1074 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1077 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1080 ggc_internal_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1085 struct small_page_entry *entry;
1086 struct alloc_chunk *chunk, **pp;
1088 size_t size = orig_size;
1090 /* Make sure that zero-sized allocations get a unique and freeable
1093 size = MAX_ALIGNMENT;
1095 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1097 /* Try to allocate the object from several different sources. Each
1098 of these cases is responsible for setting RESULT and SIZE to
1099 describe the allocated block, before jumping to FOUND. If a
1100 chunk is split, the allocate bit for the new chunk should also be
1103 Large objects are handled specially. However, they'll just fail
1104 the next couple of conditions, so we can wait to check for them
1105 below. The large object case is relatively rare (< 1%), so this
1108 /* First try to split the last chunk we allocated. For best
1109 fragmentation behavior it would be better to look for a
1110 free bin of the appropriate size for a small object. However,
1111 we're unlikely (1% - 7%) to find one, and this gives better
1112 locality behavior anyway. This case handles the lion's share
1113 of all calls to this function. */
1114 if (size <= zone->cached_free_size)
1116 result = zone->cached_free;
1118 zone->cached_free_size -= size;
1119 if (zone->cached_free_size)
1121 zone->cached_free += size;
1122 zone_set_object_alloc_bit (zone->cached_free);
1128 /* Next, try to find a free bin of the exactly correct size. */
1130 /* We want to round SIZE up, rather than down, but we know it's
1131 already aligned to at least FREE_BIN_DELTA, so we can just
1133 bin = SIZE_BIN_DOWN (size);
1135 if (bin <= NUM_FREE_BINS
1136 && (chunk = zone->free_chunks[bin]) != NULL)
1138 /* We have a chunk of the right size. Pull it off the free list
1141 zone->free_chunks[bin] = chunk->next_free;
1143 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1144 == FREE_BIN_DELTA. */
1147 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1148 may now be wrong, if this was the last chunk in the high bin.
1149 Rather than fixing it up now, wait until we need to search
1155 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1156 to split. We can find one in the too-big bin, or in the largest
1157 sized bin with a chunk in it. Try the largest normal-sized bin
1160 if (zone->high_free_bin > bin)
1162 /* Find the highest numbered free bin. It will be at or below
1164 while (zone->high_free_bin > bin
1165 && zone->free_chunks[zone->high_free_bin] == NULL)
1166 zone->high_free_bin--;
1168 if (zone->high_free_bin > bin)
1170 size_t tbin = zone->high_free_bin;
1171 chunk = zone->free_chunks[tbin];
1173 /* Remove the chunk from its previous bin. */
1174 zone->free_chunks[tbin] = chunk->next_free;
1176 result = (char *) chunk;
1178 /* Save the rest of the chunk for future allocation. */
1179 if (zone->cached_free_size)
1180 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1182 chunk = (struct alloc_chunk *) ((char *) result + size);
1183 zone->cached_free = (char *) chunk;
1184 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1186 /* Mark the new free chunk as an object, so that we can
1187 find the size of the newly allocated object. */
1188 zone_set_object_alloc_bit (chunk);
1190 /* HIGH_FREE_BIN may now be wrong, if this was the last
1191 chunk in the high bin. Rather than fixing it up now,
1192 wait until we need to search the free bins. */
1198 /* Failing that, look through the "other" bucket for a chunk
1199 that is large enough. */
1200 pp = &(zone->free_chunks[0]);
1202 while (chunk && chunk->size < size)
1204 pp = &chunk->next_free;
1210 /* Remove the chunk from its previous bin. */
1211 *pp = chunk->next_free;
1213 result = (char *) chunk;
1215 /* Save the rest of the chunk for future allocation, if there's any
1217 csize = chunk->size;
1220 if (zone->cached_free_size)
1221 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1223 chunk = (struct alloc_chunk *) ((char *) result + size);
1224 zone->cached_free = (char *) chunk;
1225 zone->cached_free_size = csize - size;
1227 /* Mark the new free chunk as an object. */
1228 zone_set_object_alloc_bit (chunk);
1234 /* Handle large allocations. We could choose any threshold between
1235 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1236 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1237 be guaranteed to have a unique entry in the lookup table. Large
1238 allocations will always fall through to here. */
1239 if (size > GGC_PAGE_SIZE)
1241 struct large_page_entry *entry = alloc_large_page (size, zone);
1243 #ifdef GATHER_STATISTICS
1244 entry->common.survived = 0;
1247 entry->next = zone->large_pages;
1248 if (zone->large_pages)
1249 zone->large_pages->prev = entry;
1250 zone->large_pages = entry;
1252 result = entry->common.page;
1257 /* Failing everything above, allocate a new small page. */
1259 entry = alloc_small_page (zone);
1260 entry->next = zone->pages;
1261 zone->pages = entry;
1263 /* Mark the first chunk in the new page. */
1264 entry->alloc_bits[0] = 1;
1266 result = entry->common.page;
1267 if (size < SMALL_PAGE_SIZE)
1269 if (zone->cached_free_size)
1270 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1272 zone->cached_free = (char *) result + size;
1273 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1275 /* Mark the new free chunk as an object. */
1276 zone_set_object_alloc_bit (zone->cached_free);
1281 /* We could save TYPE in the chunk, but we don't use that for
1282 anything yet. If we wanted to, we could do it by adding it
1283 either before the beginning of the chunk or after its end,
1284 and adjusting the size and pointer appropriately. */
1286 /* We'll probably write to this after we return. */
1289 #ifdef ENABLE_GC_CHECKING
1290 /* `Poison' the entire allocated object. */
1291 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1292 memset (result, 0xaf, size);
1293 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1297 /* Tell Valgrind that the memory is there, but its content isn't
1298 defined. The bytes at the end of the object are still marked
1300 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1302 /* Keep track of how many bytes are being allocated. This
1303 information is used in deciding when to collect. */
1304 zone->allocated += size;
1306 timevar_ggc_mem_total += size;
1308 #ifdef GATHER_STATISTICS
1309 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1312 size_t object_size = size;
1313 size_t overhead = object_size - orig_size;
1315 zone->stats.total_overhead += overhead;
1316 zone->stats.total_allocated += object_size;
1318 if (orig_size <= 32)
1320 zone->stats.total_overhead_under32 += overhead;
1321 zone->stats.total_allocated_under32 += object_size;
1323 if (orig_size <= 64)
1325 zone->stats.total_overhead_under64 += overhead;
1326 zone->stats.total_allocated_under64 += object_size;
1328 if (orig_size <= 128)
1330 zone->stats.total_overhead_under128 += overhead;
1331 zone->stats.total_allocated_under128 += object_size;
1336 if (GGC_DEBUG_LEVEL >= 3)
1337 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1338 (unsigned long) size, result);
1343 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1344 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1347 ggc_internal_cleared_alloc_zone_stat (size_t orig_size,
1348 struct alloc_zone *zone MEM_STAT_DECL)
1350 void * result = ggc_internal_alloc_zone_pass_stat (orig_size, zone);
1351 memset (result, 0, orig_size);
1356 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1360 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1365 case gt_ggc_e_14lang_tree_node:
1366 return ggc_internal_alloc_zone_pass_stat (size, &tree_zone);
1368 case gt_ggc_e_7rtx_def:
1369 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1371 case gt_ggc_e_9rtvec_def:
1372 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1375 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1379 /* Normal GC allocation simply allocates into the main zone. */
1382 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1384 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1387 /* Poison the chunk. */
1388 #ifdef ENABLE_GC_CHECKING
1389 #define poison_region(PTR, SIZE) \
1391 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1392 memset ((PTR), 0xa5, (SIZE)); \
1393 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1396 #define poison_region(PTR, SIZE)
1399 /* Free the object at P. */
1404 struct page_entry *page;
1406 #ifdef GATHER_STATISTICS
1407 ggc_free_overhead (p);
1410 poison_region (p, ggc_get_size (p));
1412 page = zone_get_object_page (p);
1416 struct large_page_entry *large_page
1417 = (struct large_page_entry *) page;
1419 /* Remove the page from the linked list. */
1420 if (large_page->prev)
1421 large_page->prev->next = large_page->next;
1424 gcc_assert (large_page->common.zone->large_pages == large_page);
1425 large_page->common.zone->large_pages = large_page->next;
1427 if (large_page->next)
1428 large_page->next->prev = large_page->prev;
1430 large_page->common.zone->allocated -= large_page->bytes;
1432 /* Release the memory associated with this object. */
1433 free_large_page (large_page);
1435 else if (page->pch_p)
1436 /* Don't do anything. We won't allocate a new object from the
1437 PCH zone so there's no point in releasing anything. */
1441 size_t size = ggc_get_size (p);
1443 page->zone->allocated -= size;
1445 /* Add the chunk to the free list. We don't bother with coalescing,
1446 since we are likely to want a chunk of this size again. */
1447 free_chunk ((char *)p, size, page->zone);
1451 /* Mark function for strings. */
1454 gt_ggc_m_S (const void *p)
1457 unsigned long offset;
1462 /* Look up the page on which the object is alloced. . */
1463 entry = lookup_page_table_if_allocated (p);
1469 size_t alloc_word, alloc_bit, t;
1470 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1471 alloc_word = t / (8 * sizeof (alloc_type));
1472 alloc_bit = t % (8 * sizeof (alloc_type));
1473 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1476 else if (entry->large_p)
1478 struct large_page_entry *le = (struct large_page_entry *) entry;
1479 offset = ((const char *) p) - entry->page;
1480 gcc_assert (offset < le->bytes);
1484 struct small_page_entry *se = (struct small_page_entry *) entry;
1485 unsigned int start_word = zone_get_object_alloc_word (p);
1486 unsigned int start_bit = zone_get_object_alloc_bit (p);
1487 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1489 /* On some platforms a char* will not necessarily line up on an
1490 allocation boundary, so we have to update the offset to
1491 account for the leftover bytes. */
1492 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1497 /* Here we've seen a char* which does not point to the beginning
1498 of an allocated object. We assume it points to the middle of
1500 gcc_assert (offset == offsetof (struct tree_string, str));
1501 p = ((const char *) p) - offset;
1502 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
1506 /* Inefficient, but also unlikely to matter. */
1510 /* If P is not marked, mark it and return false. Otherwise return true.
1511 P must have been allocated by the GC allocator; it mustn't point to
1512 static objects, stack variables, or memory allocated with malloc. */
1515 ggc_set_mark (const void *p)
1517 struct page_entry *page;
1518 const char *ptr = (const char *) p;
1520 page = zone_get_object_page (p);
1524 size_t mark_word, mark_bit, offset;
1525 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1526 mark_word = offset / (8 * sizeof (mark_type));
1527 mark_bit = offset % (8 * sizeof (mark_type));
1529 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1531 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1533 else if (page->large_p)
1535 struct large_page_entry *large_page
1536 = (struct large_page_entry *) page;
1538 if (large_page->mark_p)
1540 large_page->mark_p = true;
1544 struct small_page_entry *small_page
1545 = (struct small_page_entry *) page;
1547 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1548 & (1 << zone_get_object_mark_bit (p)))
1550 small_page->mark_bits[zone_get_object_mark_word (p)]
1551 |= (1 << zone_get_object_mark_bit (p));
1554 if (GGC_DEBUG_LEVEL >= 4)
1555 fprintf (G.debug_file, "Marking %p\n", p);
1560 /* Return 1 if P has been marked, zero otherwise.
1561 P must have been allocated by the GC allocator; it mustn't point to
1562 static objects, stack variables, or memory allocated with malloc. */
1565 ggc_marked_p (const void *p)
1567 struct page_entry *page;
1568 const char *ptr = (const char *) p;
1570 page = zone_get_object_page (p);
1574 size_t mark_word, mark_bit, offset;
1575 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1576 mark_word = offset / (8 * sizeof (mark_type));
1577 mark_bit = offset % (8 * sizeof (mark_type));
1579 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1584 struct large_page_entry *large_page
1585 = (struct large_page_entry *) page;
1587 return large_page->mark_p;
1591 struct small_page_entry *small_page
1592 = (struct small_page_entry *) page;
1594 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1595 & (1 << zone_get_object_mark_bit (p)));
1599 /* Return the size of the gc-able object P. */
1602 ggc_get_size (const void *p)
1604 struct page_entry *page;
1605 const char *ptr = (const char *) p;
1607 page = zone_get_object_page (p);
1611 size_t alloc_word, alloc_bit, offset, max_size;
1612 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1613 alloc_word = offset / (8 * sizeof (alloc_type));
1614 alloc_bit = offset % (8 * sizeof (alloc_type));
1615 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1616 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1621 return ((struct large_page_entry *)page)->bytes;
1623 return zone_find_object_size ((struct small_page_entry *) page, p);
1626 /* Initialize the ggc-zone-mmap allocator. */
1630 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1631 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1632 the current assumptions to hold. */
1634 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1636 /* Set up the main zone by hand. */
1637 main_zone.name = "Main zone";
1638 G.zones = &main_zone;
1640 /* Allocate the default zones. */
1641 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1642 new_ggc_zone_1 (&tree_zone, "Tree zone");
1643 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1645 G.pagesize = getpagesize();
1646 G.lg_pagesize = exact_log2 (G.pagesize);
1647 G.page_mask = ~(G.pagesize - 1);
1649 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1650 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1652 /* Allocate 16 system pages at a time. */
1653 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1655 /* Calculate the size of the allocation bitmap and other overhead. */
1656 /* Right now we allocate bits for the page header and bitmap. These
1657 are wasted, but a little tricky to eliminate. */
1658 G.small_page_overhead
1659 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1660 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1662 #ifdef HAVE_MMAP_DEV_ZERO
1663 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1664 gcc_assert (G.dev_zero_fd != -1);
1668 G.debug_file = fopen ("ggc-mmap.debug", "w");
1669 setlinebuf (G.debug_file);
1671 G.debug_file = stdout;
1675 /* StunOS has an amazing off-by-one error for the first mmap allocation
1676 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1677 believe, is an unaligned page allocation, which would cause us to
1678 hork badly if we tried to use it. */
1680 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1681 struct small_page_entry *e;
1682 if ((size_t)p & (G.pagesize - 1))
1684 /* How losing. Discard this one and try another. If we still
1685 can't get something useful, give up. */
1687 p = alloc_anon (NULL, G.pagesize, &main_zone);
1688 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1691 if (GGC_PAGE_SIZE == G.pagesize)
1693 /* We have a good page, might as well hold onto it... */
1694 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
1696 e->common.zone = &main_zone;
1697 e->next = main_zone.free_pages;
1698 set_page_table_entry (e->common.page, &e->common);
1699 main_zone.free_pages = e;
1703 munmap (p, G.pagesize);
1709 /* Start a new GGC zone. */
1712 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1714 new_zone->name = name;
1715 new_zone->next_zone = G.zones->next_zone;
1716 G.zones->next_zone = new_zone;
1719 /* Free all empty pages and objects within a page for a given zone */
1722 sweep_pages (struct alloc_zone *zone)
1724 struct large_page_entry **lpp, *lp, *lnext;
1725 struct small_page_entry **spp, *sp, *snext;
1727 size_t allocated = 0;
1730 /* First, reset the free_chunks lists, since we are going to
1731 re-free free chunks in hopes of coalescing them into large chunks. */
1732 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1733 zone->high_free_bin = 0;
1734 zone->cached_free = NULL;
1735 zone->cached_free_size = 0;
1737 /* Large pages are all or none affairs. Either they are completely
1738 empty, or they are completely full. */
1739 lpp = &zone->large_pages;
1740 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1742 gcc_assert (lp->common.large_p);
1746 #ifdef GATHER_STATISTICS
1747 /* This page has now survived another collection. */
1748 lp->common.survived++;
1754 allocated += lp->bytes;
1760 #ifdef ENABLE_GC_CHECKING
1761 /* Poison the page. */
1762 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1765 lp->prev->next = lp->next;
1767 lp->next->prev = lp->prev;
1768 free_large_page (lp);
1773 for (sp = zone->pages; sp != NULL; sp = snext)
1775 char *object, *last_object;
1777 alloc_type *alloc_word_p;
1778 mark_type *mark_word_p;
1780 gcc_assert (!sp->common.large_p);
1784 #ifdef GATHER_STATISTICS
1785 /* This page has now survived another collection. */
1786 sp->common.survived++;
1789 /* Step through all chunks, consolidate those that are free and
1790 insert them into the free lists. Note that consolidation
1791 slows down collection slightly. */
1793 last_object = object = sp->common.page;
1794 end = sp->common.page + SMALL_PAGE_SIZE;
1796 nomarksinpage = true;
1797 mark_word_p = sp->mark_bits;
1798 alloc_word_p = sp->alloc_bits;
1800 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1802 object = sp->common.page;
1806 alloc_type alloc_word;
1807 mark_type mark_word;
1809 alloc_word = *alloc_word_p++;
1810 mark_word = *mark_word_p++;
1813 nomarksinpage = false;
1815 /* There ought to be some way to do this without looping... */
1817 while ((n = alloc_ffs (alloc_word)) != 0)
1819 /* Extend the current state for n - 1 bits. We can't
1820 shift alloc_word by n, even though it isn't used in the
1821 loop, in case only the highest bit was set. */
1822 alloc_word >>= n - 1;
1823 mark_word >>= n - 1;
1824 object += BYTES_PER_MARK_BIT * (n - 1);
1830 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1833 poison_region (last_free, object - last_free);
1834 free_chunk (last_free, object - last_free, zone);
1838 allocated += object - last_object;
1839 last_object = object;
1843 if (last_free == NULL)
1846 allocated += object - last_object;
1849 zone_clear_object_alloc_bit (sp, object);
1852 /* Shift to just after the alloc bit we handled. */
1855 object += BYTES_PER_MARK_BIT;
1860 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1862 while (object < end);
1867 #ifdef ENABLE_GC_CHECKING
1868 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1870 /* Poison the page. */
1871 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1873 free_small_page (sp);
1878 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1879 object - last_free));
1880 poison_region (last_free, object - last_free);
1881 free_chunk (last_free, object - last_free, zone);
1884 allocated += object - last_object;
1889 zone->allocated = allocated;
1892 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1893 is true if we need to mark before sweeping, false if some other
1894 zone collection has already performed marking for us. Returns true
1895 if we collected, false otherwise. */
1898 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1904 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1906 struct alloc_chunk *chunk;
1911 chunk = zone->free_chunks[i];
1916 chunk = chunk->next_free;
1918 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1926 fprintf (stderr, " {%s GC %luk -> ",
1927 zone->name, (unsigned long) zone->allocated / 1024);
1929 /* Zero the total allocated bytes. This will be recalculated in the
1931 zone->allocated = 0;
1933 /* Release the pages we freed the last time we collected, but didn't
1934 reuse in the interim. */
1935 release_pages (zone);
1939 zone_allocate_marks ();
1941 #ifdef GATHER_STATISTICS
1942 ggc_prune_overhead_list ();
1947 zone->was_collected = true;
1948 zone->allocated_last_gc = zone->allocated;
1951 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1955 #ifdef GATHER_STATISTICS
1956 /* Calculate the average page survival rate in terms of number of
1960 calculate_average_page_survival (struct alloc_zone *zone)
1963 float survival = 0.0;
1964 struct small_page_entry *p;
1965 struct large_page_entry *lp;
1966 for (p = zone->pages; p; p = p->next)
1969 survival += p->common.survived;
1971 for (lp = zone->large_pages; lp; lp = lp->next)
1974 survival += lp->common.survived;
1976 return survival/count;
1980 /* Top level collection routine. */
1985 struct alloc_zone *zone;
1986 bool marked = false;
1988 timevar_push (TV_GC);
1990 if (!ggc_force_collect)
1992 float allocated_last_gc = 0, allocated = 0, min_expand;
1994 for (zone = G.zones; zone; zone = zone->next_zone)
1996 allocated_last_gc += zone->allocated_last_gc;
1997 allocated += zone->allocated;
2001 MAX (allocated_last_gc,
2002 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2003 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2005 if (allocated < allocated_last_gc + min_expand)
2007 timevar_pop (TV_GC);
2012 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2014 /* Start by possibly collecting the main zone. */
2015 main_zone.was_collected = false;
2016 marked |= ggc_collect_1 (&main_zone, true);
2018 /* In order to keep the number of collections down, we don't
2019 collect other zones unless we are collecting the main zone. This
2020 gives us roughly the same number of collections as we used to
2021 have with the old gc. The number of collection is important
2022 because our main slowdown (according to profiling) is now in
2023 marking. So if we mark twice as often as we used to, we'll be
2024 twice as slow. Hopefully we'll avoid this cost when we mark
2026 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2027 keep this code in case the heuristics are further refined. */
2029 if (main_zone.was_collected)
2031 struct alloc_zone *zone;
2033 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2035 zone->was_collected = false;
2036 marked |= ggc_collect_1 (zone, !marked);
2040 #ifdef GATHER_STATISTICS
2041 /* Print page survival stats, if someone wants them. */
2042 if (GGC_DEBUG_LEVEL >= 2)
2044 for (zone = G.zones; zone; zone = zone->next_zone)
2046 if (zone->was_collected)
2048 float f = calculate_average_page_survival (zone);
2049 printf ("Average page survival in zone `%s' is %f\n",
2059 /* Free dead zones. */
2060 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2062 if (zone->next_zone->dead)
2064 struct alloc_zone *dead_zone = zone->next_zone;
2066 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2068 /* The zone must be empty. */
2069 gcc_assert (!dead_zone->allocated);
2071 /* Unchain the dead zone, release all its pages and free it. */
2072 zone->next_zone = zone->next_zone->next_zone;
2073 release_pages (dead_zone);
2078 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2080 timevar_pop (TV_GC);
2083 /* Print allocation statistics. */
2084 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2086 : ((x) < 1024*1024*10 \
2088 : (x) / (1024*1024))))
2089 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2092 ggc_print_statistics (void)
2094 struct alloc_zone *zone;
2095 struct ggc_statistics stats;
2096 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
2097 size_t pte_overhead, i;
2099 /* Clear the statistics. */
2100 memset (&stats, 0, sizeof (stats));
2102 /* Make sure collection will really occur. */
2103 ggc_force_collect = true;
2105 /* Collect and print the statistics common across collectors. */
2106 ggc_print_common_statistics (stderr, &stats);
2108 ggc_force_collect = false;
2110 /* Release free pages so that we will not count the bytes allocated
2111 there as part of the total allocated memory. */
2112 for (zone = G.zones; zone; zone = zone->next_zone)
2113 release_pages (zone);
2115 /* Collect some information about the various sizes of
2118 "Memory still allocated at the end of the compilation process\n");
2120 fprintf (stderr, "%20s %10s %10s %10s\n",
2121 "Zone", "Allocated", "Used", "Overhead");
2122 for (zone = G.zones; zone; zone = zone->next_zone)
2124 struct large_page_entry *large_page;
2125 size_t overhead, allocated, in_use;
2127 /* Skip empty zones. */
2128 if (!zone->pages && !zone->large_pages)
2131 allocated = in_use = 0;
2133 overhead = sizeof (struct alloc_zone);
2135 for (large_page = zone->large_pages; large_page != NULL;
2136 large_page = large_page->next)
2138 allocated += large_page->bytes;
2139 in_use += large_page->bytes;
2140 overhead += sizeof (struct large_page_entry);
2143 /* There's no easy way to walk through the small pages finding
2144 used and unused objects. Instead, add all the pages, and
2145 subtract out the free list. */
2147 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2148 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2149 overhead += G.small_page_overhead * zone->n_small_pages;
2151 for (i = 0; i <= NUM_FREE_BINS; i++)
2153 struct alloc_chunk *chunk = zone->free_chunks[i];
2156 in_use -= ggc_get_size (chunk);
2157 chunk = chunk->next_free;
2161 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2163 SCALE (allocated), LABEL (allocated),
2164 SCALE (in_use), LABEL (in_use),
2165 SCALE (overhead), LABEL (overhead));
2167 gcc_assert (in_use == zone->allocated);
2169 total_overhead += overhead;
2170 total_allocated += zone->allocated;
2171 total_bytes_mapped += zone->bytes_mapped;
2174 /* Count the size of the page table as best we can. */
2175 #if HOST_BITS_PER_PTR <= 32
2176 pte_overhead = sizeof (G.lookup);
2177 for (i = 0; i < PAGE_L1_SIZE; i++)
2179 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2182 page_table table = G.lookup;
2186 pte_overhead += sizeof (*table);
2187 for (i = 0; i < PAGE_L1_SIZE; i++)
2188 if (table->table[i])
2189 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2190 table = table->next;
2194 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2195 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2196 total_overhead += pte_overhead;
2198 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2199 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2200 SCALE (total_allocated), LABEL(total_allocated),
2201 SCALE (total_overhead), LABEL (total_overhead));
2203 #ifdef GATHER_STATISTICS
2205 unsigned long long all_overhead = 0, all_allocated = 0;
2206 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2207 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2208 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2210 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2212 for (zone = G.zones; zone; zone = zone->next_zone)
2214 all_overhead += zone->stats.total_overhead;
2215 all_allocated += zone->stats.total_allocated;
2217 all_allocated_under32 += zone->stats.total_allocated_under32;
2218 all_overhead_under32 += zone->stats.total_overhead_under32;
2220 all_allocated_under64 += zone->stats.total_allocated_under64;
2221 all_overhead_under64 += zone->stats.total_overhead_under64;
2223 all_allocated_under128 += zone->stats.total_allocated_under128;
2224 all_overhead_under128 += zone->stats.total_overhead_under128;
2226 fprintf (stderr, "%20s: %10lld\n",
2227 zone->name, zone->stats.total_allocated);
2230 fprintf (stderr, "\n");
2232 fprintf (stderr, "Total Overhead: %10lld\n",
2234 fprintf (stderr, "Total Allocated: %10lld\n",
2237 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2238 all_overhead_under32);
2239 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2240 all_allocated_under32);
2241 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2242 all_overhead_under64);
2243 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2244 all_allocated_under64);
2245 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2246 all_overhead_under128);
2247 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2248 all_allocated_under128);
2253 /* Precompiled header support. */
2255 /* For precompiled headers, we sort objects based on their type. We
2256 also sort various objects into their own buckets; currently this
2257 covers strings and IDENTIFIER_NODE trees. The choices of how
2258 to sort buckets have not yet been tuned. */
2260 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2262 #define OTHER_BUCKET (gt_types_enum_last + 0)
2263 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2264 #define STRING_BUCKET (gt_types_enum_last + 2)
2266 struct ggc_pch_ondisk
2269 size_t type_totals[NUM_PCH_BUCKETS];
2274 struct ggc_pch_ondisk d;
2278 alloc_type *alloc_bits;
2279 size_t type_bases[NUM_PCH_BUCKETS];
2280 size_t start_offset;
2283 /* Initialize the PCH data structure. */
2285 struct ggc_pch_data *
2288 return XCNEW (struct ggc_pch_data);
2291 /* Return which of the page-aligned buckets the object at X, with type
2292 TYPE, should be sorted into in the PCH. Strings will have
2293 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2294 of unknown type will also have TYPE equal to gt_types_enum_last. */
2297 pch_bucket (void *x, enum gt_types_enum type,
2300 /* Sort identifiers into their own bucket, to improve locality
2301 when searching the identifier hash table. */
2302 if (type == gt_ggc_e_14lang_tree_node
2303 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2304 return IDENTIFIER_BUCKET;
2305 else if (type == gt_types_enum_last)
2308 return STRING_BUCKET;
2309 return OTHER_BUCKET;
2314 /* Add the size of object X to the size of the PCH data. */
2317 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2318 size_t size, bool is_string, enum gt_types_enum type)
2320 /* NOTE: Right now we don't need to align up the size of any objects.
2321 Strings can be unaligned, and everything else is allocated to a
2322 MAX_ALIGNMENT boundary already. */
2324 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2327 /* Return the total size of the PCH data. */
2330 ggc_pch_total_size (struct ggc_pch_data *d)
2333 size_t alloc_size, total_size;
2336 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2338 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2339 total_size += d->d.type_totals[i];
2341 d->d.total = total_size;
2343 /* Include the size of the allocation bitmap. */
2344 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2345 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2346 d->alloc_size = alloc_size;
2348 return d->d.total + alloc_size;
2351 /* Set the base address for the objects in the PCH file. */
2354 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2357 size_t base = (size_t) base_;
2359 d->base = d->orig_base = base;
2360 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2362 d->type_bases[i] = base;
2363 base += d->d.type_totals[i];
2366 if (d->alloc_bits == NULL)
2367 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
2370 /* Allocate a place for object X of size SIZE in the PCH file. */
2373 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2374 size_t size, bool is_string,
2375 enum gt_types_enum type)
2377 size_t alloc_word, alloc_bit;
2379 int bucket = pch_bucket (x, type, is_string);
2381 /* Record the start of the object in the allocation bitmap. We
2382 can't assert that the allocation bit is previously clear, because
2383 strings may violate the invariant that they are at least
2384 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2385 should not be called for strings. */
2386 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2387 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2388 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2389 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2390 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2392 /* Place the object at the current pointer for this bucket. */
2393 result = (char *) d->type_bases[bucket];
2394 d->type_bases[bucket] += size;
2398 /* Prepare to write out the PCH data to file F. */
2401 ggc_pch_prepare_write (struct ggc_pch_data *d,
2404 /* We seek around a lot while writing. Record where the end
2405 of the padding in the PCH file is, so that we can
2406 locate each object's offset. */
2407 d->start_offset = ftell (f);
2410 /* Write out object X of SIZE to file F. */
2413 ggc_pch_write_object (struct ggc_pch_data *d,
2414 FILE *f, void *x, void *newx,
2415 size_t size, bool is_string ATTRIBUTE_UNUSED)
2417 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2418 fatal_error ("can%'t seek PCH file: %m");
2420 if (fwrite (x, size, 1, f) != 1)
2421 fatal_error ("can%'t write PCH file: %m");
2425 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2427 /* Write out the allocation bitmap. */
2428 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2429 fatal_error ("can%'t seek PCH file: %m");
2431 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2432 fatal_error ("can%'t write PCH file: %m");
2434 /* Done with the PCH, so write out our footer. */
2435 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2436 fatal_error ("can%'t write PCH file: %m");
2438 free (d->alloc_bits);
2442 /* The PCH file from F has been mapped at ADDR. Read in any
2443 additional data from the file and set up the GC state. */
2446 ggc_pch_read (FILE *f, void *addr)
2448 struct ggc_pch_ondisk d;
2450 struct alloc_zone *zone;
2451 struct page_entry *pch_page;
2454 if (fread (&d, sizeof (d), 1, f) != 1)
2455 fatal_error ("can%'t read PCH file: %m");
2457 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2458 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2460 pch_zone.bytes = d.total;
2461 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2462 pch_zone.page = (char *) addr;
2463 pch_zone.end = (char *) pch_zone.alloc_bits;
2465 /* We've just read in a PCH file. So, every object that used to be
2466 allocated is now free. */
2467 #ifdef 0 && GATHER_STATISTICS
2468 zone_allocate_marks ();
2469 ggc_prune_overhead_list ();
2473 for (zone = G.zones; zone; zone = zone->next_zone)
2475 struct small_page_entry *page, *next_page;
2476 struct large_page_entry *large_page, *next_large_page;
2478 zone->allocated = 0;
2480 /* Clear the zone's free chunk list. */
2481 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2482 zone->high_free_bin = 0;
2483 zone->cached_free = NULL;
2484 zone->cached_free_size = 0;
2486 /* Move all the small pages onto the free list. */
2487 for (page = zone->pages; page != NULL; page = next_page)
2489 next_page = page->next;
2490 memset (page->alloc_bits, 0,
2491 G.small_page_overhead - PAGE_OVERHEAD);
2492 free_small_page (page);
2495 /* Discard all the large pages. */
2496 for (large_page = zone->large_pages; large_page != NULL;
2497 large_page = next_large_page)
2499 next_large_page = large_page->next;
2500 free_large_page (large_page);
2504 zone->large_pages = NULL;
2507 /* Allocate the dummy page entry for the PCH, and set all pages
2508 mapped into the PCH to reference it. */
2509 pch_page = XCNEW (struct page_entry);
2510 pch_page->page = pch_zone.page;
2511 pch_page->pch_p = true;
2513 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2514 set_page_table_entry (p, pch_page);