1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
3 2010, 2011 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "diagnostic-core.h"
31 #include "ggc-internal.h"
34 #include "tree-flow.h"
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
41 # undef HAVE_MMAP_DEV_ZERO
45 #ifdef HAVE_MMAP_DEV_ZERO
50 #define USING_MALLOC_PAGE_GROUPS
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54 && defined(USING_MMAP)
55 # define USING_MADVISE
60 This garbage-collecting allocator allocates objects on one of a set
61 of pages. Each page can allocate objects of a single size only;
62 available sizes are powers of two starting at four bytes. The size
63 of an allocation request is rounded up to the next power of two
64 (`order'), and satisfied from the appropriate page.
66 Each page is recorded in a page-entry, which also maintains an
67 in-use bitmap of object positions on the page. This allows the
68 allocation state of a particular object to be flipped without
69 touching the page itself.
71 Each page-entry also has a context depth, which is used to track
72 pushing and popping of allocation contexts. Only objects allocated
73 in the current (highest-numbered) context may be collected.
75 Page entries are arranged in an array of singly-linked lists. The
76 array is indexed by the allocation size, in bits, of the pages on
77 it; i.e. all pages on a list allocate objects of the same size.
78 Pages are ordered on the list such that all non-full pages precede
79 all full pages, with non-full pages arranged in order of decreasing
82 Empty pages (of all orders) are kept on a single page cache list,
83 and are considered first when new pages are required; they are
84 deallocated at the start of the next collection if they haven't
85 been recycled by then. */
87 /* Define GGC_DEBUG_LEVEL to print debugging information.
88 0: No debugging output.
89 1: GC statistics only.
90 2: Page-entry allocations/deallocations as well.
91 3: Object allocations as well.
92 4: Object marks as well. */
93 #define GGC_DEBUG_LEVEL (0)
95 #ifndef HOST_BITS_PER_PTR
96 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
100 /* A two-level tree is used to look up the page-entry for a given
101 pointer. Two chunks of the pointer's bits are extracted to index
102 the first and second levels of the tree, as follows:
106 msb +----------------+----+------+------+ lsb
112 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
113 pages are aligned on system page boundaries. The next most
114 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
115 index values in the lookup table, respectively.
117 For 32-bit architectures and the settings below, there are no
118 leftover bits. For architectures with wider pointers, the lookup
119 tree points to a list of pages, which must be scanned to find the
122 #define PAGE_L1_BITS (8)
123 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
124 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
125 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
127 #define LOOKUP_L1(p) \
128 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
130 #define LOOKUP_L2(p) \
131 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
133 /* The number of objects per allocation page, for objects on a page of
134 the indicated ORDER. */
135 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
137 /* The number of objects in P. */
138 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
140 /* The size of an object on a page of the indicated ORDER. */
141 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
143 /* For speed, we avoid doing a general integer divide to locate the
144 offset in the allocation bitmap, by precalculating numbers M, S
145 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
146 within the page which is evenly divisible by the object size Z. */
147 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
148 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
149 #define OFFSET_TO_BIT(OFFSET, ORDER) \
150 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
152 /* We use this structure to determine the alignment required for
153 allocations. For power-of-two sized allocations, that's not a
154 problem, but it does matter for odd-sized allocations.
155 We do not care about alignment for floating-point types. */
157 struct max_alignment {
165 /* The biggest alignment required. */
167 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
170 /* The number of extra orders, not corresponding to power-of-two sized
173 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
175 #define RTL_SIZE(NSLOTS) \
176 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
178 #define TREE_EXP_SIZE(OPS) \
179 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
181 /* The Ith entry is the maximum size of an object to be stored in the
182 Ith extra order. Adding a new entry to this array is the *only*
183 thing you need to do to add a new special allocation size. */
185 static const size_t extra_order_size_table[] = {
186 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
187 There are a lot of structures with these sizes and explicitly
188 listing them risks orders being dropped because they changed size. */
200 sizeof (struct tree_decl_non_common),
201 sizeof (struct tree_field_decl),
202 sizeof (struct tree_parm_decl),
203 sizeof (struct tree_var_decl),
204 sizeof (struct tree_type_non_common),
205 sizeof (struct function),
206 sizeof (struct basic_block_def),
207 sizeof (struct cgraph_node),
208 sizeof (struct loop),
211 /* The total number of orders. */
213 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
215 /* Compute the smallest nonnegative number which when added to X gives
218 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
220 /* Compute the smallest multiple of F that is >= X. */
222 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
224 /* The Ith entry is the number of objects on a page or order I. */
226 static unsigned objects_per_page_table[NUM_ORDERS];
228 /* The Ith entry is the size of an object on a page of order I. */
230 static size_t object_size_table[NUM_ORDERS];
232 /* The Ith entry is a pair of numbers (mult, shift) such that
233 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
234 for all k evenly divisible by OBJECT_SIZE(I). */
241 inverse_table[NUM_ORDERS];
243 /* A page_entry records the status of an allocation page. This
244 structure is dynamically sized to fit the bitmap in_use_p. */
245 typedef struct page_entry
247 /* The next page-entry with objects of the same size, or NULL if
248 this is the last page-entry. */
249 struct page_entry *next;
251 /* The previous page-entry with objects of the same size, or NULL if
252 this is the first page-entry. The PREV pointer exists solely to
253 keep the cost of ggc_free manageable. */
254 struct page_entry *prev;
256 /* The number of bytes allocated. (This will always be a multiple
257 of the host system page size.) */
260 /* The address at which the memory is allocated. */
263 #ifdef USING_MALLOC_PAGE_GROUPS
264 /* Back pointer to the page group this page came from. */
265 struct page_group *group;
268 /* This is the index in the by_depth varray where this page table
270 unsigned long index_by_depth;
272 /* Context depth of this page. */
273 unsigned short context_depth;
275 /* The number of free objects remaining on this page. */
276 unsigned short num_free_objects;
278 /* A likely candidate for the bit position of a free object for the
279 next allocation from this page. */
280 unsigned short next_bit_hint;
282 /* The lg of size of objects allocated from this page. */
285 /* Discarded page? */
288 /* A bit vector indicating whether or not objects are in use. The
289 Nth bit is one if the Nth object on this page is allocated. This
290 array is dynamically sized. */
291 unsigned long in_use_p[1];
294 #ifdef USING_MALLOC_PAGE_GROUPS
295 /* A page_group describes a large allocation from malloc, from which
296 we parcel out aligned pages. */
297 typedef struct page_group
299 /* A linked list of all extant page groups. */
300 struct page_group *next;
302 /* The address we received from malloc. */
305 /* The size of the block. */
308 /* A bitmask of pages in use. */
313 #if HOST_BITS_PER_PTR <= 32
315 /* On 32-bit hosts, we use a two level page table, as pictured above. */
316 typedef page_entry **page_table[PAGE_L1_SIZE];
320 /* On 64-bit hosts, we use the same two level page tables plus a linked
321 list that disambiguates the top 32-bits. There will almost always be
322 exactly one entry in the list. */
323 typedef struct page_table_chain
325 struct page_table_chain *next;
327 page_entry **table[PAGE_L1_SIZE];
332 #ifdef ENABLE_GC_ALWAYS_COLLECT
333 /* List of free objects to be verified as actually free on the
338 struct free_object *next;
342 /* The rest of the global variables. */
343 static struct globals
345 /* The Nth element in this array is a page with objects of size 2^N.
346 If there are any pages with free objects, they will be at the
347 head of the list. NULL if there are no page-entries for this
349 page_entry *pages[NUM_ORDERS];
351 /* The Nth element in this array is the last page with objects of
352 size 2^N. NULL if there are no page-entries for this object
354 page_entry *page_tails[NUM_ORDERS];
356 /* Lookup table for associating allocation pages with object addresses. */
359 /* The system's page size. */
363 /* Bytes currently allocated. */
366 /* Bytes currently allocated at the end of the last collection. */
367 size_t allocated_last_gc;
369 /* Total amount of memory mapped. */
372 /* Bit N set if any allocations have been done at context depth N. */
373 unsigned long context_depth_allocations;
375 /* Bit N set if any collections have been done at context depth N. */
376 unsigned long context_depth_collections;
378 /* The current depth in the context stack. */
379 unsigned short context_depth;
381 /* A file descriptor open to /dev/zero for reading. */
382 #if defined (HAVE_MMAP_DEV_ZERO)
386 /* A cache of free system pages. */
387 page_entry *free_pages;
389 #ifdef USING_MALLOC_PAGE_GROUPS
390 page_group *page_groups;
393 /* The file descriptor for debugging output. */
396 /* Current number of elements in use in depth below. */
397 unsigned int depth_in_use;
399 /* Maximum number of elements that can be used before resizing. */
400 unsigned int depth_max;
402 /* Each element of this array is an index in by_depth where the given
403 depth starts. This structure is indexed by that given depth we
404 are interested in. */
407 /* Current number of elements in use in by_depth below. */
408 unsigned int by_depth_in_use;
410 /* Maximum number of elements that can be used before resizing. */
411 unsigned int by_depth_max;
413 /* Each element of this array is a pointer to a page_entry, all
414 page_entries can be found in here by increasing depth.
415 index_by_depth in the page_entry is the index into this data
416 structure where that page_entry can be found. This is used to
417 speed up finding all page_entries at a particular depth. */
418 page_entry **by_depth;
420 /* Each element is a pointer to the saved in_use_p bits, if any,
421 zero otherwise. We allocate them all together, to enable a
422 better runtime data access pattern. */
423 unsigned long **save_in_use;
425 #ifdef ENABLE_GC_ALWAYS_COLLECT
426 /* List of free objects to be verified as actually free on the
428 struct free_object *free_object_list;
431 #ifdef GATHER_STATISTICS
434 /* Total GC-allocated memory. */
435 unsigned long long total_allocated;
436 /* Total overhead for GC-allocated memory. */
437 unsigned long long total_overhead;
439 /* Total allocations and overhead for sizes less than 32, 64 and 128.
440 These sizes are interesting because they are typical cache line
443 unsigned long long total_allocated_under32;
444 unsigned long long total_overhead_under32;
446 unsigned long long total_allocated_under64;
447 unsigned long long total_overhead_under64;
449 unsigned long long total_allocated_under128;
450 unsigned long long total_overhead_under128;
452 /* The allocations for each of the allocation orders. */
453 unsigned long long total_allocated_per_order[NUM_ORDERS];
455 /* The overhead for each of the allocation orders. */
456 unsigned long long total_overhead_per_order[NUM_ORDERS];
461 /* The size in bytes required to maintain a bitmap for the objects
463 #define BITMAP_SIZE(Num_objects) \
464 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
466 /* Allocate pages in chunks of this size, to throttle calls to memory
467 allocation routines. The first page is used, the rest go onto the
468 free list. This cannot be larger than HOST_BITS_PER_INT for the
469 in_use bitmask for page_group. Hosts that need a different value
470 can override this by defining GGC_QUIRE_SIZE explicitly. */
471 #ifndef GGC_QUIRE_SIZE
473 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
475 # define GGC_QUIRE_SIZE 16
479 /* Initial guess as to how many page table entries we might need. */
480 #define INITIAL_PTE_COUNT 128
482 static int ggc_allocated_p (const void *);
483 static page_entry *lookup_page_table_entry (const void *);
484 static void set_page_table_entry (void *, page_entry *);
486 static char *alloc_anon (char *, size_t, bool check);
488 #ifdef USING_MALLOC_PAGE_GROUPS
489 static size_t page_group_index (char *, char *);
490 static void set_page_group_in_use (page_group *, char *);
491 static void clear_page_group_in_use (page_group *, char *);
493 static struct page_entry * alloc_page (unsigned);
494 static void free_page (struct page_entry *);
495 static void release_pages (void);
496 static void clear_marks (void);
497 static void sweep_pages (void);
498 static void ggc_recalculate_in_use_p (page_entry *);
499 static void compute_inverse (unsigned);
500 static inline void adjust_depth (void);
501 static void move_ptes_to_front (int, int);
503 void debug_print_page_list (int);
504 static void push_depth (unsigned int);
505 static void push_by_depth (page_entry *, unsigned long *);
507 /* Push an entry onto G.depth. */
510 push_depth (unsigned int i)
512 if (G.depth_in_use >= G.depth_max)
515 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
517 G.depth[G.depth_in_use++] = i;
520 /* Push an entry onto G.by_depth and G.save_in_use. */
523 push_by_depth (page_entry *p, unsigned long *s)
525 if (G.by_depth_in_use >= G.by_depth_max)
528 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
529 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
532 G.by_depth[G.by_depth_in_use] = p;
533 G.save_in_use[G.by_depth_in_use++] = s;
536 #if (GCC_VERSION < 3001)
537 #define prefetch(X) ((void) X)
539 #define prefetch(X) __builtin_prefetch (X)
542 #define save_in_use_p_i(__i) \
544 #define save_in_use_p(__p) \
545 (save_in_use_p_i (__p->index_by_depth))
547 /* Returns nonzero if P was allocated in GC'able memory. */
550 ggc_allocated_p (const void *p)
555 #if HOST_BITS_PER_PTR <= 32
558 page_table table = G.lookup;
559 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
564 if (table->high_bits == high_bits)
568 base = &table->table[0];
571 /* Extract the level 1 and 2 indices. */
575 return base[L1] && base[L1][L2];
578 /* Traverse the page table and find the entry for a page.
579 Die (probably) if the object wasn't allocated via GC. */
581 static inline page_entry *
582 lookup_page_table_entry (const void *p)
587 #if HOST_BITS_PER_PTR <= 32
590 page_table table = G.lookup;
591 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
592 while (table->high_bits != high_bits)
594 base = &table->table[0];
597 /* Extract the level 1 and 2 indices. */
604 /* Set the page table entry for a page. */
607 set_page_table_entry (void *p, page_entry *entry)
612 #if HOST_BITS_PER_PTR <= 32
616 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
617 for (table = G.lookup; table; table = table->next)
618 if (table->high_bits == high_bits)
621 /* Not found -- allocate a new table. */
622 table = XCNEW (struct page_table_chain);
623 table->next = G.lookup;
624 table->high_bits = high_bits;
627 base = &table->table[0];
630 /* Extract the level 1 and 2 indices. */
634 if (base[L1] == NULL)
635 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
637 base[L1][L2] = entry;
640 /* Prints the page-entry for object size ORDER, for debugging. */
643 debug_print_page_list (int order)
646 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
647 (void *) G.page_tails[order]);
651 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
652 p->num_free_objects);
660 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
661 (if non-null). The ifdef structure here is intended to cause a
662 compile error unless exactly one of the HAVE_* is defined. */
665 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
667 #ifdef HAVE_MMAP_ANON
668 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
669 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
671 #ifdef HAVE_MMAP_DEV_ZERO
672 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
673 MAP_PRIVATE, G.dev_zero_fd, 0);
676 if (page == (char *) MAP_FAILED)
680 perror ("virtual memory exhausted");
681 exit (FATAL_EXIT_CODE);
684 /* Remember that we allocated this memory. */
685 G.bytes_mapped += size;
687 /* Pretend we don't have access to the allocated pages. We'll enable
688 access to smaller pieces of the area in ggc_internal_alloc. Discard the
689 handle to avoid handle leak. */
690 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
695 #ifdef USING_MALLOC_PAGE_GROUPS
696 /* Compute the index for this page into the page group. */
699 page_group_index (char *allocation, char *page)
701 return (size_t) (page - allocation) >> G.lg_pagesize;
704 /* Set and clear the in_use bit for this page in the page group. */
707 set_page_group_in_use (page_group *group, char *page)
709 group->in_use |= 1 << page_group_index (group->allocation, page);
713 clear_page_group_in_use (page_group *group, char *page)
715 group->in_use &= ~(1 << page_group_index (group->allocation, page));
719 /* Allocate a new page for allocating objects of size 2^ORDER,
720 and return an entry for it. The entry is not added to the
721 appropriate page_table list. */
723 static inline struct page_entry *
724 alloc_page (unsigned order)
726 struct page_entry *entry, *p, **pp;
730 size_t page_entry_size;
732 #ifdef USING_MALLOC_PAGE_GROUPS
736 num_objects = OBJECTS_PER_PAGE (order);
737 bitmap_size = BITMAP_SIZE (num_objects + 1);
738 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
739 entry_size = num_objects * OBJECT_SIZE (order);
740 if (entry_size < G.pagesize)
741 entry_size = G.pagesize;
742 entry_size = ROUND_UP (entry_size, G.pagesize);
747 /* Check the list of free pages for one we can use. */
748 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
749 if (p->bytes == entry_size)
755 G.bytes_mapped += p->bytes;
756 p->discarded = false;
758 /* Recycle the allocated memory from this page ... */
762 #ifdef USING_MALLOC_PAGE_GROUPS
766 /* ... and, if possible, the page entry itself. */
767 if (p->order == order)
770 memset (entry, 0, page_entry_size);
776 else if (entry_size == G.pagesize)
778 /* We want just one page. Allocate a bunch of them and put the
779 extras on the freelist. (Can only do this optimization with
780 mmap for backing store.) */
781 struct page_entry *e, *f = G.free_pages;
782 int i, entries = GGC_QUIRE_SIZE;
784 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
787 page = alloc_anon(NULL, G.pagesize, true);
791 /* This loop counts down so that the chain will be in ascending
793 for (i = entries - 1; i >= 1; i--)
795 e = XCNEWVAR (struct page_entry, page_entry_size);
797 e->bytes = G.pagesize;
798 e->page = page + (i << G.lg_pagesize);
806 page = alloc_anon (NULL, entry_size, true);
808 #ifdef USING_MALLOC_PAGE_GROUPS
811 /* Allocate a large block of memory and serve out the aligned
812 pages therein. This results in much less memory wastage
813 than the traditional implementation of valloc. */
815 char *allocation, *a, *enda;
816 size_t alloc_size, head_slop, tail_slop;
817 int multiple_pages = (entry_size == G.pagesize);
820 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
822 alloc_size = entry_size + G.pagesize - 1;
823 allocation = XNEWVEC (char, alloc_size);
825 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
826 head_slop = page - allocation;
828 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
830 tail_slop = alloc_size - entry_size - head_slop;
831 enda = allocation + alloc_size - tail_slop;
833 /* We allocated N pages, which are likely not aligned, leaving
834 us with N-1 usable pages. We plan to place the page_group
835 structure somewhere in the slop. */
836 if (head_slop >= sizeof (page_group))
837 group = (page_group *)page - 1;
840 /* We magically got an aligned allocation. Too bad, we have
841 to waste a page anyway. */
845 tail_slop += G.pagesize;
847 gcc_assert (tail_slop >= sizeof (page_group));
848 group = (page_group *)enda;
849 tail_slop -= sizeof (page_group);
852 /* Remember that we allocated this memory. */
853 group->next = G.page_groups;
854 group->allocation = allocation;
855 group->alloc_size = alloc_size;
857 G.page_groups = group;
858 G.bytes_mapped += alloc_size;
860 /* If we allocated multiple pages, put the rest on the free list. */
863 struct page_entry *e, *f = G.free_pages;
864 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
866 e = XCNEWVAR (struct page_entry, page_entry_size);
868 e->bytes = G.pagesize;
880 entry = XCNEWVAR (struct page_entry, page_entry_size);
882 entry->bytes = entry_size;
884 entry->context_depth = G.context_depth;
885 entry->order = order;
886 entry->num_free_objects = num_objects;
887 entry->next_bit_hint = 1;
889 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
891 #ifdef USING_MALLOC_PAGE_GROUPS
892 entry->group = group;
893 set_page_group_in_use (group, page);
896 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
897 increment the hint. */
898 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
899 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
901 set_page_table_entry (page, entry);
903 if (GGC_DEBUG_LEVEL >= 2)
904 fprintf (G.debug_file,
905 "Allocating page at %p, object size=%lu, data %p-%p\n",
906 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
907 page + entry_size - 1);
912 /* Adjust the size of G.depth so that no index greater than the one
913 used by the top of the G.by_depth is used. */
920 if (G.by_depth_in_use)
922 top = G.by_depth[G.by_depth_in_use-1];
924 /* Peel back indices in depth that index into by_depth, so that
925 as new elements are added to by_depth, we note the indices
926 of those elements, if they are for new context depths. */
927 while (G.depth_in_use > (size_t)top->context_depth+1)
932 /* For a page that is no longer needed, put it on the free page list. */
935 free_page (page_entry *entry)
937 if (GGC_DEBUG_LEVEL >= 2)
938 fprintf (G.debug_file,
939 "Deallocating page at %p, data %p-%p\n", (void *) entry,
940 entry->page, entry->page + entry->bytes - 1);
942 /* Mark the page as inaccessible. Discard the handle to avoid handle
944 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
946 set_page_table_entry (entry->page, NULL);
948 #ifdef USING_MALLOC_PAGE_GROUPS
949 clear_page_group_in_use (entry->group, entry->page);
952 if (G.by_depth_in_use > 1)
954 page_entry *top = G.by_depth[G.by_depth_in_use-1];
955 int i = entry->index_by_depth;
957 /* We cannot free a page from a context deeper than the current
959 gcc_assert (entry->context_depth == top->context_depth);
961 /* Put top element into freed slot. */
963 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
964 top->index_by_depth = i;
970 entry->next = G.free_pages;
971 G.free_pages = entry;
974 /* Release the free page cache to the system. */
980 page_entry *p, *start_p;
984 page_entry *next, *prev, *newprev;
985 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
987 /* First free larger continuous areas to the OS.
988 This allows other allocators to grab these areas if needed.
989 This is only done on larger chunks to avoid fragmentation.
990 This does not always work because the free_pages list is only
991 approximately sorted. */
1002 while (p && p->page == start + len)
1006 mapped_len += p->bytes;
1010 if (len >= free_unit)
1012 while (start_p != p)
1014 next = start_p->next;
1018 munmap (start, len);
1023 G.bytes_mapped -= mapped_len;
1029 /* Now give back the fragmented pages to the OS, but keep the address
1030 space to reuse it next time. */
1032 for (p = G.free_pages; p; )
1043 while (p && p->page == start + len)
1048 /* Give the page back to the kernel, but don't free the mapping.
1049 This avoids fragmentation in the virtual memory map of the
1050 process. Next time we can reuse it by just touching it. */
1051 madvise (start, len, MADV_DONTNEED);
1052 /* Don't count those pages as mapped to not touch the garbage collector
1054 G.bytes_mapped -= len;
1055 while (start_p != p)
1057 start_p->discarded = true;
1058 start_p = start_p->next;
1062 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1063 page_entry *p, *next;
1067 /* Gather up adjacent pages so they are unmapped together. */
1078 while (p && p->page == start + len)
1086 munmap (start, len);
1087 G.bytes_mapped -= len;
1090 G.free_pages = NULL;
1092 #ifdef USING_MALLOC_PAGE_GROUPS
1093 page_entry **pp, *p;
1094 page_group **gp, *g;
1096 /* Remove all pages from free page groups from the list. */
1098 while ((p = *pp) != NULL)
1099 if (p->group->in_use == 0)
1107 /* Remove all free page groups, and release the storage. */
1108 gp = &G.page_groups;
1109 while ((g = *gp) != NULL)
1113 G.bytes_mapped -= g->alloc_size;
1114 free (g->allocation);
1121 /* This table provides a fast way to determine ceil(log_2(size)) for
1122 allocation requests. The minimum allocation size is eight bytes. */
1123 #define NUM_SIZE_LOOKUP 512
1124 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1126 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1127 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1128 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1129 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1130 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1131 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1132 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1133 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1134 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1135 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1136 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1137 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1138 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1139 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1140 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1141 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1142 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1143 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1144 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1145 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1146 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1147 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1148 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1149 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1150 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1151 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1152 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1153 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1154 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1155 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1156 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1157 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1160 /* For a given size of memory requested for allocation, return the
1161 actual size that is going to be allocated, as well as the size
1165 ggc_round_alloc_size_1 (size_t requested_size,
1167 size_t *alloced_size)
1169 size_t order, object_size;
1171 if (requested_size < NUM_SIZE_LOOKUP)
1173 order = size_lookup[requested_size];
1174 object_size = OBJECT_SIZE (order);
1179 while (requested_size > (object_size = OBJECT_SIZE (order)))
1184 *size_order = order;
1186 *alloced_size = object_size;
1189 /* For a given size of memory requested for allocation, return the
1190 actual size that is going to be allocated. */
1193 ggc_round_alloc_size (size_t requested_size)
1197 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1201 /* Typed allocation function. Does nothing special in this collector. */
1204 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1207 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1210 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1213 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1215 size_t order, word, bit, object_offset, object_size;
1216 struct page_entry *entry;
1219 ggc_round_alloc_size_1 (size, &order, &object_size);
1221 /* If there are non-full pages for this size allocation, they are at
1222 the head of the list. */
1223 entry = G.pages[order];
1225 /* If there is no page for this object size, or all pages in this
1226 context are full, allocate a new page. */
1227 if (entry == NULL || entry->num_free_objects == 0)
1229 struct page_entry *new_entry;
1230 new_entry = alloc_page (order);
1232 new_entry->index_by_depth = G.by_depth_in_use;
1233 push_by_depth (new_entry, 0);
1235 /* We can skip context depths, if we do, make sure we go all the
1236 way to the new depth. */
1237 while (new_entry->context_depth >= G.depth_in_use)
1238 push_depth (G.by_depth_in_use-1);
1240 /* If this is the only entry, it's also the tail. If it is not
1241 the only entry, then we must update the PREV pointer of the
1242 ENTRY (G.pages[order]) to point to our new page entry. */
1244 G.page_tails[order] = new_entry;
1246 entry->prev = new_entry;
1248 /* Put new pages at the head of the page list. By definition the
1249 entry at the head of the list always has a NULL pointer. */
1250 new_entry->next = entry;
1251 new_entry->prev = NULL;
1253 G.pages[order] = new_entry;
1255 /* For a new page, we know the word and bit positions (in the
1256 in_use bitmap) of the first available object -- they're zero. */
1257 new_entry->next_bit_hint = 1;
1264 /* First try to use the hint left from the previous allocation
1265 to locate a clear bit in the in-use bitmap. We've made sure
1266 that the one-past-the-end bit is always set, so if the hint
1267 has run over, this test will fail. */
1268 unsigned hint = entry->next_bit_hint;
1269 word = hint / HOST_BITS_PER_LONG;
1270 bit = hint % HOST_BITS_PER_LONG;
1272 /* If the hint didn't work, scan the bitmap from the beginning. */
1273 if ((entry->in_use_p[word] >> bit) & 1)
1276 while (~entry->in_use_p[word] == 0)
1279 #if GCC_VERSION >= 3004
1280 bit = __builtin_ctzl (~entry->in_use_p[word]);
1282 while ((entry->in_use_p[word] >> bit) & 1)
1286 hint = word * HOST_BITS_PER_LONG + bit;
1289 /* Next time, try the next bit. */
1290 entry->next_bit_hint = hint + 1;
1292 object_offset = hint * object_size;
1295 /* Set the in-use bit. */
1296 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1298 /* Keep a running total of the number of free objects. If this page
1299 fills up, we may have to move it to the end of the list if the
1300 next page isn't full. If the next page is full, all subsequent
1301 pages are full, so there's no need to move it. */
1302 if (--entry->num_free_objects == 0
1303 && entry->next != NULL
1304 && entry->next->num_free_objects > 0)
1306 /* We have a new head for the list. */
1307 G.pages[order] = entry->next;
1309 /* We are moving ENTRY to the end of the page table list.
1310 The new page at the head of the list will have NULL in
1311 its PREV field and ENTRY will have NULL in its NEXT field. */
1312 entry->next->prev = NULL;
1315 /* Append ENTRY to the tail of the list. */
1316 entry->prev = G.page_tails[order];
1317 G.page_tails[order]->next = entry;
1318 G.page_tails[order] = entry;
1321 /* Calculate the object's address. */
1322 result = entry->page + object_offset;
1323 #ifdef GATHER_STATISTICS
1324 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1325 result PASS_MEM_STAT);
1328 #ifdef ENABLE_GC_CHECKING
1329 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1330 exact same semantics in presence of memory bugs, regardless of
1331 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1332 handle to avoid handle leak. */
1333 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1335 /* `Poison' the entire allocated object, including any padding at
1337 memset (result, 0xaf, object_size);
1339 /* Make the bytes after the end of the object unaccessible. Discard the
1340 handle to avoid handle leak. */
1341 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1342 object_size - size));
1345 /* Tell Valgrind that the memory is there, but its content isn't
1346 defined. The bytes at the end of the object are still marked
1348 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1350 /* Keep track of how many bytes are being allocated. This
1351 information is used in deciding when to collect. */
1352 G.allocated += object_size;
1354 /* For timevar statistics. */
1355 timevar_ggc_mem_total += object_size;
1357 #ifdef GATHER_STATISTICS
1359 size_t overhead = object_size - size;
1361 G.stats.total_overhead += overhead;
1362 G.stats.total_allocated += object_size;
1363 G.stats.total_overhead_per_order[order] += overhead;
1364 G.stats.total_allocated_per_order[order] += object_size;
1368 G.stats.total_overhead_under32 += overhead;
1369 G.stats.total_allocated_under32 += object_size;
1373 G.stats.total_overhead_under64 += overhead;
1374 G.stats.total_allocated_under64 += object_size;
1378 G.stats.total_overhead_under128 += overhead;
1379 G.stats.total_allocated_under128 += object_size;
1384 if (GGC_DEBUG_LEVEL >= 3)
1385 fprintf (G.debug_file,
1386 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1387 (unsigned long) size, (unsigned long) object_size, result,
1393 /* Mark function for strings. */
1396 gt_ggc_m_S (const void *p)
1401 unsigned long offset;
1403 if (!p || !ggc_allocated_p (p))
1406 /* Look up the page on which the object is alloced. . */
1407 entry = lookup_page_table_entry (p);
1410 /* Calculate the index of the object on the page; this is its bit
1411 position in the in_use_p bitmap. Note that because a char* might
1412 point to the middle of an object, we need special code here to
1413 make sure P points to the start of an object. */
1414 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1417 /* Here we've seen a char* which does not point to the beginning
1418 of an allocated object. We assume it points to the middle of
1420 gcc_assert (offset == offsetof (struct tree_string, str));
1421 p = ((const char *) p) - offset;
1422 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1426 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1427 word = bit / HOST_BITS_PER_LONG;
1428 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1430 /* If the bit was previously set, skip it. */
1431 if (entry->in_use_p[word] & mask)
1434 /* Otherwise set it, and decrement the free object count. */
1435 entry->in_use_p[word] |= mask;
1436 entry->num_free_objects -= 1;
1438 if (GGC_DEBUG_LEVEL >= 4)
1439 fprintf (G.debug_file, "Marking %p\n", p);
1444 /* If P is not marked, marks it and return false. Otherwise return true.
1445 P must have been allocated by the GC allocator; it mustn't point to
1446 static objects, stack variables, or memory allocated with malloc. */
1449 ggc_set_mark (const void *p)
1455 /* Look up the page on which the object is alloced. If the object
1456 wasn't allocated by the collector, we'll probably die. */
1457 entry = lookup_page_table_entry (p);
1460 /* Calculate the index of the object on the page; this is its bit
1461 position in the in_use_p bitmap. */
1462 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1463 word = bit / HOST_BITS_PER_LONG;
1464 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1466 /* If the bit was previously set, skip it. */
1467 if (entry->in_use_p[word] & mask)
1470 /* Otherwise set it, and decrement the free object count. */
1471 entry->in_use_p[word] |= mask;
1472 entry->num_free_objects -= 1;
1474 if (GGC_DEBUG_LEVEL >= 4)
1475 fprintf (G.debug_file, "Marking %p\n", p);
1480 /* Return 1 if P has been marked, zero otherwise.
1481 P must have been allocated by the GC allocator; it mustn't point to
1482 static objects, stack variables, or memory allocated with malloc. */
1485 ggc_marked_p (const void *p)
1491 /* Look up the page on which the object is alloced. If the object
1492 wasn't allocated by the collector, we'll probably die. */
1493 entry = lookup_page_table_entry (p);
1496 /* Calculate the index of the object on the page; this is its bit
1497 position in the in_use_p bitmap. */
1498 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1499 word = bit / HOST_BITS_PER_LONG;
1500 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1502 return (entry->in_use_p[word] & mask) != 0;
1505 /* Return the size of the gc-able object P. */
1508 ggc_get_size (const void *p)
1510 page_entry *pe = lookup_page_table_entry (p);
1511 return OBJECT_SIZE (pe->order);
1514 /* Release the memory for object P. */
1519 page_entry *pe = lookup_page_table_entry (p);
1520 size_t order = pe->order;
1521 size_t size = OBJECT_SIZE (order);
1523 #ifdef GATHER_STATISTICS
1524 ggc_free_overhead (p);
1527 if (GGC_DEBUG_LEVEL >= 3)
1528 fprintf (G.debug_file,
1529 "Freeing object, actual size=%lu, at %p on %p\n",
1530 (unsigned long) size, p, (void *) pe);
1532 #ifdef ENABLE_GC_CHECKING
1533 /* Poison the data, to indicate the data is garbage. */
1534 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1535 memset (p, 0xa5, size);
1537 /* Let valgrind know the object is free. */
1538 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1540 #ifdef ENABLE_GC_ALWAYS_COLLECT
1541 /* In the completely-anal-checking mode, we do *not* immediately free
1542 the data, but instead verify that the data is *actually* not
1543 reachable the next time we collect. */
1545 struct free_object *fo = XNEW (struct free_object);
1547 fo->next = G.free_object_list;
1548 G.free_object_list = fo;
1552 unsigned int bit_offset, word, bit;
1554 G.allocated -= size;
1556 /* Mark the object not-in-use. */
1557 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1558 word = bit_offset / HOST_BITS_PER_LONG;
1559 bit = bit_offset % HOST_BITS_PER_LONG;
1560 pe->in_use_p[word] &= ~(1UL << bit);
1562 if (pe->num_free_objects++ == 0)
1566 /* If the page is completely full, then it's supposed to
1567 be after all pages that aren't. Since we've freed one
1568 object from a page that was full, we need to move the
1569 page to the head of the list.
1571 PE is the node we want to move. Q is the previous node
1572 and P is the next node in the list. */
1574 if (q && q->num_free_objects == 0)
1580 /* If PE was at the end of the list, then Q becomes the
1581 new end of the list. If PE was not the end of the
1582 list, then we need to update the PREV field for P. */
1584 G.page_tails[order] = q;
1588 /* Move PE to the head of the list. */
1589 pe->next = G.pages[order];
1591 G.pages[order]->prev = pe;
1592 G.pages[order] = pe;
1595 /* Reset the hint bit to point to the only free object. */
1596 pe->next_bit_hint = bit_offset;
1602 /* Subroutine of init_ggc which computes the pair of numbers used to
1603 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1605 This algorithm is taken from Granlund and Montgomery's paper
1606 "Division by Invariant Integers using Multiplication"
1607 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1611 compute_inverse (unsigned order)
1616 size = OBJECT_SIZE (order);
1618 while (size % 2 == 0)
1625 while (inv * size != 1)
1626 inv = inv * (2 - inv*size);
1628 DIV_MULT (order) = inv;
1629 DIV_SHIFT (order) = e;
1632 /* Initialize the ggc-mmap allocator. */
1638 G.pagesize = getpagesize();
1639 G.lg_pagesize = exact_log2 (G.pagesize);
1641 #ifdef HAVE_MMAP_DEV_ZERO
1642 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1643 if (G.dev_zero_fd == -1)
1644 internal_error ("open /dev/zero: %m");
1648 G.debug_file = fopen ("ggc-mmap.debug", "w");
1650 G.debug_file = stdout;
1654 /* StunOS has an amazing off-by-one error for the first mmap allocation
1655 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1656 believe, is an unaligned page allocation, which would cause us to
1657 hork badly if we tried to use it. */
1659 char *p = alloc_anon (NULL, G.pagesize, true);
1660 struct page_entry *e;
1661 if ((size_t)p & (G.pagesize - 1))
1663 /* How losing. Discard this one and try another. If we still
1664 can't get something useful, give up. */
1666 p = alloc_anon (NULL, G.pagesize, true);
1667 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1670 /* We have a good page, might as well hold onto it... */
1671 e = XCNEW (struct page_entry);
1672 e->bytes = G.pagesize;
1674 e->next = G.free_pages;
1679 /* Initialize the object size table. */
1680 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1681 object_size_table[order] = (size_t) 1 << order;
1682 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1684 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1686 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1687 so that we're sure of getting aligned memory. */
1688 s = ROUND_UP (s, MAX_ALIGNMENT);
1689 object_size_table[order] = s;
1692 /* Initialize the objects-per-page and inverse tables. */
1693 for (order = 0; order < NUM_ORDERS; ++order)
1695 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1696 if (objects_per_page_table[order] == 0)
1697 objects_per_page_table[order] = 1;
1698 compute_inverse (order);
1701 /* Reset the size_lookup array to put appropriately sized objects in
1702 the special orders. All objects bigger than the previous power
1703 of two, but no greater than the special size, should go in the
1705 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1710 i = OBJECT_SIZE (order);
1711 if (i >= NUM_SIZE_LOOKUP)
1714 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1715 size_lookup[i] = order;
1720 G.depth = XNEWVEC (unsigned int, G.depth_max);
1722 G.by_depth_in_use = 0;
1723 G.by_depth_max = INITIAL_PTE_COUNT;
1724 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1725 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1728 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1729 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1732 ggc_recalculate_in_use_p (page_entry *p)
1737 /* Because the past-the-end bit in in_use_p is always set, we
1738 pretend there is one additional object. */
1739 num_objects = OBJECTS_IN_PAGE (p) + 1;
1741 /* Reset the free object count. */
1742 p->num_free_objects = num_objects;
1744 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1746 i < CEIL (BITMAP_SIZE (num_objects),
1747 sizeof (*p->in_use_p));
1752 /* Something is in use if it is marked, or if it was in use in a
1753 context further down the context stack. */
1754 p->in_use_p[i] |= save_in_use_p (p)[i];
1756 /* Decrement the free object count for every object allocated. */
1757 for (j = p->in_use_p[i]; j; j >>= 1)
1758 p->num_free_objects -= (j & 1);
1761 gcc_assert (p->num_free_objects < num_objects);
1764 /* Unmark all objects. */
1771 for (order = 2; order < NUM_ORDERS; order++)
1775 for (p = G.pages[order]; p != NULL; p = p->next)
1777 size_t num_objects = OBJECTS_IN_PAGE (p);
1778 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1780 /* The data should be page-aligned. */
1781 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1783 /* Pages that aren't in the topmost context are not collected;
1784 nevertheless, we need their in-use bit vectors to store GC
1785 marks. So, back them up first. */
1786 if (p->context_depth < G.context_depth)
1788 if (! save_in_use_p (p))
1789 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1790 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1793 /* Reset reset the number of free objects and clear the
1794 in-use bits. These will be adjusted by mark_obj. */
1795 p->num_free_objects = num_objects;
1796 memset (p->in_use_p, 0, bitmap_size);
1798 /* Make sure the one-past-the-end bit is always set. */
1799 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1800 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1805 /* Free all empty pages. Partially empty pages need no attention
1806 because the `mark' bit doubles as an `unused' bit. */
1813 for (order = 2; order < NUM_ORDERS; order++)
1815 /* The last page-entry to consider, regardless of entries
1816 placed at the end of the list. */
1817 page_entry * const last = G.page_tails[order];
1820 size_t live_objects;
1821 page_entry *p, *previous;
1831 page_entry *next = p->next;
1833 /* Loop until all entries have been examined. */
1836 num_objects = OBJECTS_IN_PAGE (p);
1838 /* Add all live objects on this page to the count of
1839 allocated memory. */
1840 live_objects = num_objects - p->num_free_objects;
1842 G.allocated += OBJECT_SIZE (order) * live_objects;
1844 /* Only objects on pages in the topmost context should get
1846 if (p->context_depth < G.context_depth)
1849 /* Remove the page if it's empty. */
1850 else if (live_objects == 0)
1852 /* If P was the first page in the list, then NEXT
1853 becomes the new first page in the list, otherwise
1854 splice P out of the forward pointers. */
1856 G.pages[order] = next;
1858 previous->next = next;
1860 /* Splice P out of the back pointers too. */
1862 next->prev = previous;
1864 /* Are we removing the last element? */
1865 if (p == G.page_tails[order])
1866 G.page_tails[order] = previous;
1871 /* If the page is full, move it to the end. */
1872 else if (p->num_free_objects == 0)
1874 /* Don't move it if it's already at the end. */
1875 if (p != G.page_tails[order])
1877 /* Move p to the end of the list. */
1879 p->prev = G.page_tails[order];
1880 G.page_tails[order]->next = p;
1882 /* Update the tail pointer... */
1883 G.page_tails[order] = p;
1885 /* ... and the head pointer, if necessary. */
1887 G.pages[order] = next;
1889 previous->next = next;
1891 /* And update the backpointer in NEXT if necessary. */
1893 next->prev = previous;
1899 /* If we've fallen through to here, it's a page in the
1900 topmost context that is neither full nor empty. Such a
1901 page must precede pages at lesser context depth in the
1902 list, so move it to the head. */
1903 else if (p != G.pages[order])
1905 previous->next = p->next;
1907 /* Update the backchain in the next node if it exists. */
1909 p->next->prev = previous;
1911 /* Move P to the head of the list. */
1912 p->next = G.pages[order];
1914 G.pages[order]->prev = p;
1916 /* Update the head pointer. */
1919 /* Are we moving the last element? */
1920 if (G.page_tails[order] == p)
1921 G.page_tails[order] = previous;
1930 /* Now, restore the in_use_p vectors for any pages from contexts
1931 other than the current one. */
1932 for (p = G.pages[order]; p; p = p->next)
1933 if (p->context_depth != G.context_depth)
1934 ggc_recalculate_in_use_p (p);
1938 #ifdef ENABLE_GC_CHECKING
1939 /* Clobber all free objects. */
1946 for (order = 2; order < NUM_ORDERS; order++)
1948 size_t size = OBJECT_SIZE (order);
1951 for (p = G.pages[order]; p != NULL; p = p->next)
1956 if (p->context_depth != G.context_depth)
1957 /* Since we don't do any collection for pages in pushed
1958 contexts, there's no need to do any poisoning. And
1959 besides, the IN_USE_P array isn't valid until we pop
1963 num_objects = OBJECTS_IN_PAGE (p);
1964 for (i = 0; i < num_objects; i++)
1967 word = i / HOST_BITS_PER_LONG;
1968 bit = i % HOST_BITS_PER_LONG;
1969 if (((p->in_use_p[word] >> bit) & 1) == 0)
1971 char *object = p->page + i * size;
1973 /* Keep poison-by-write when we expect to use Valgrind,
1974 so the exact same memory semantics is kept, in case
1975 there are memory errors. We override this request
1977 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1979 memset (object, 0xa5, size);
1981 /* Drop the handle to avoid handle leak. */
1982 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1989 #define poison_pages()
1992 #ifdef ENABLE_GC_ALWAYS_COLLECT
1993 /* Validate that the reportedly free objects actually are. */
1996 validate_free_objects (void)
1998 struct free_object *f, *next, *still_free = NULL;
2000 for (f = G.free_object_list; f ; f = next)
2002 page_entry *pe = lookup_page_table_entry (f->object);
2005 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2006 word = bit / HOST_BITS_PER_LONG;
2007 bit = bit % HOST_BITS_PER_LONG;
2010 /* Make certain it isn't visible from any root. Notice that we
2011 do this check before sweep_pages merges save_in_use_p. */
2012 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2014 /* If the object comes from an outer context, then retain the
2015 free_object entry, so that we can verify that the address
2016 isn't live on the stack in some outer context. */
2017 if (pe->context_depth != G.context_depth)
2019 f->next = still_free;
2026 G.free_object_list = still_free;
2029 #define validate_free_objects()
2032 /* Top level mark-and-sweep routine. */
2037 /* Avoid frequent unnecessary work by skipping collection if the
2038 total allocations haven't expanded much since the last
2040 float allocated_last_gc =
2041 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2043 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2045 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2048 timevar_push (TV_GC);
2050 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2051 if (GGC_DEBUG_LEVEL >= 2)
2052 fprintf (G.debug_file, "BEGIN COLLECTING\n");
2054 /* Zero the total allocated bytes. This will be recalculated in the
2058 /* Release the pages we freed the last time we collected, but didn't
2059 reuse in the interim. */
2062 /* Indicate that we've seen collections at this context depth. */
2063 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2065 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2069 #ifdef GATHER_STATISTICS
2070 ggc_prune_overhead_list ();
2073 validate_free_objects ();
2076 G.allocated_last_gc = G.allocated;
2078 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2080 timevar_pop (TV_GC);
2083 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2084 if (GGC_DEBUG_LEVEL >= 2)
2085 fprintf (G.debug_file, "END COLLECTING\n");
2088 /* Print allocation statistics. */
2089 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2091 : ((x) < 1024*1024*10 \
2093 : (x) / (1024*1024))))
2094 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2097 ggc_print_statistics (void)
2099 struct ggc_statistics stats;
2101 size_t total_overhead = 0;
2103 /* Clear the statistics. */
2104 memset (&stats, 0, sizeof (stats));
2106 /* Make sure collection will really occur. */
2107 G.allocated_last_gc = 0;
2109 /* Collect and print the statistics common across collectors. */
2110 ggc_print_common_statistics (stderr, &stats);
2112 /* Release free pages so that we will not count the bytes allocated
2113 there as part of the total allocated memory. */
2116 /* Collect some information about the various sizes of
2119 "Memory still allocated at the end of the compilation process\n");
2120 fprintf (stderr, "%-5s %10s %10s %10s\n",
2121 "Size", "Allocated", "Used", "Overhead");
2122 for (i = 0; i < NUM_ORDERS; ++i)
2129 /* Skip empty entries. */
2133 overhead = allocated = in_use = 0;
2135 /* Figure out the total number of bytes allocated for objects of
2136 this size, and how many of them are actually in use. Also figure
2137 out how much memory the page table is using. */
2138 for (p = G.pages[i]; p; p = p->next)
2140 allocated += p->bytes;
2142 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2144 overhead += (sizeof (page_entry) - sizeof (long)
2145 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2147 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2148 (unsigned long) OBJECT_SIZE (i),
2149 SCALE (allocated), STAT_LABEL (allocated),
2150 SCALE (in_use), STAT_LABEL (in_use),
2151 SCALE (overhead), STAT_LABEL (overhead));
2152 total_overhead += overhead;
2154 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2155 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2156 SCALE (G.allocated), STAT_LABEL(G.allocated),
2157 SCALE (total_overhead), STAT_LABEL (total_overhead));
2159 #ifdef GATHER_STATISTICS
2161 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2163 fprintf (stderr, "Total Overhead: %10lld\n",
2164 G.stats.total_overhead);
2165 fprintf (stderr, "Total Allocated: %10lld\n",
2166 G.stats.total_allocated);
2168 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2169 G.stats.total_overhead_under32);
2170 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2171 G.stats.total_allocated_under32);
2172 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2173 G.stats.total_overhead_under64);
2174 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2175 G.stats.total_allocated_under64);
2176 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2177 G.stats.total_overhead_under128);
2178 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2179 G.stats.total_allocated_under128);
2181 for (i = 0; i < NUM_ORDERS; i++)
2182 if (G.stats.total_allocated_per_order[i])
2184 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2185 (unsigned long) OBJECT_SIZE (i),
2186 G.stats.total_overhead_per_order[i]);
2187 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2188 (unsigned long) OBJECT_SIZE (i),
2189 G.stats.total_allocated_per_order[i]);
2195 struct ggc_pch_ondisk
2197 unsigned totals[NUM_ORDERS];
2202 struct ggc_pch_ondisk d;
2203 size_t base[NUM_ORDERS];
2204 size_t written[NUM_ORDERS];
2207 struct ggc_pch_data *
2210 return XCNEW (struct ggc_pch_data);
2214 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2215 size_t size, bool is_string ATTRIBUTE_UNUSED,
2216 enum gt_types_enum type ATTRIBUTE_UNUSED)
2220 if (size < NUM_SIZE_LOOKUP)
2221 order = size_lookup[size];
2225 while (size > OBJECT_SIZE (order))
2229 d->d.totals[order]++;
2233 ggc_pch_total_size (struct ggc_pch_data *d)
2238 for (i = 0; i < NUM_ORDERS; i++)
2239 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2244 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2246 size_t a = (size_t) base;
2249 for (i = 0; i < NUM_ORDERS; i++)
2252 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2258 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2259 size_t size, bool is_string ATTRIBUTE_UNUSED,
2260 enum gt_types_enum type ATTRIBUTE_UNUSED)
2265 if (size < NUM_SIZE_LOOKUP)
2266 order = size_lookup[size];
2270 while (size > OBJECT_SIZE (order))
2274 result = (char *) d->base[order];
2275 d->base[order] += OBJECT_SIZE (order);
2280 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2281 FILE *f ATTRIBUTE_UNUSED)
2283 /* Nothing to do. */
2287 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2288 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2289 size_t size, bool is_string ATTRIBUTE_UNUSED)
2292 static const char emptyBytes[256] = { 0 };
2294 if (size < NUM_SIZE_LOOKUP)
2295 order = size_lookup[size];
2299 while (size > OBJECT_SIZE (order))
2303 if (fwrite (x, size, 1, f) != 1)
2304 fatal_error ("can%'t write PCH file: %m");
2306 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2307 object out to OBJECT_SIZE(order). This happens for strings. */
2309 if (size != OBJECT_SIZE (order))
2311 unsigned padding = OBJECT_SIZE(order) - size;
2313 /* To speed small writes, we use a nulled-out array that's larger
2314 than most padding requests as the source for our null bytes. This
2315 permits us to do the padding with fwrite() rather than fseek(), and
2316 limits the chance the OS may try to flush any outstanding writes. */
2317 if (padding <= sizeof(emptyBytes))
2319 if (fwrite (emptyBytes, 1, padding, f) != padding)
2320 fatal_error ("can%'t write PCH file");
2324 /* Larger than our buffer? Just default to fseek. */
2325 if (fseek (f, padding, SEEK_CUR) != 0)
2326 fatal_error ("can%'t write PCH file");
2330 d->written[order]++;
2331 if (d->written[order] == d->d.totals[order]
2332 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2335 fatal_error ("can%'t write PCH file: %m");
2339 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2341 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2342 fatal_error ("can%'t write PCH file: %m");
2346 /* Move the PCH PTE entries just added to the end of by_depth, to the
2350 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2354 /* First, we swap the new entries to the front of the varrays. */
2355 page_entry **new_by_depth;
2356 unsigned long **new_save_in_use;
2358 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2359 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2361 memcpy (&new_by_depth[0],
2362 &G.by_depth[count_old_page_tables],
2363 count_new_page_tables * sizeof (void *));
2364 memcpy (&new_by_depth[count_new_page_tables],
2366 count_old_page_tables * sizeof (void *));
2367 memcpy (&new_save_in_use[0],
2368 &G.save_in_use[count_old_page_tables],
2369 count_new_page_tables * sizeof (void *));
2370 memcpy (&new_save_in_use[count_new_page_tables],
2372 count_old_page_tables * sizeof (void *));
2375 free (G.save_in_use);
2377 G.by_depth = new_by_depth;
2378 G.save_in_use = new_save_in_use;
2380 /* Now update all the index_by_depth fields. */
2381 for (i = G.by_depth_in_use; i > 0; --i)
2383 page_entry *p = G.by_depth[i-1];
2384 p->index_by_depth = i-1;
2387 /* And last, we update the depth pointers in G.depth. The first
2388 entry is already 0, and context 0 entries always start at index
2389 0, so there is nothing to update in the first slot. We need a
2390 second slot, only if we have old ptes, and if we do, they start
2391 at index count_new_page_tables. */
2392 if (count_old_page_tables)
2393 push_depth (count_new_page_tables);
2397 ggc_pch_read (FILE *f, void *addr)
2399 struct ggc_pch_ondisk d;
2401 char *offs = (char *) addr;
2402 unsigned long count_old_page_tables;
2403 unsigned long count_new_page_tables;
2405 count_old_page_tables = G.by_depth_in_use;
2407 /* We've just read in a PCH file. So, every object that used to be
2408 allocated is now free. */
2410 #ifdef ENABLE_GC_CHECKING
2413 /* Since we free all the allocated objects, the free list becomes
2414 useless. Validate it now, which will also clear it. */
2415 validate_free_objects();
2417 /* No object read from a PCH file should ever be freed. So, set the
2418 context depth to 1, and set the depth of all the currently-allocated
2419 pages to be 1 too. PCH pages will have depth 0. */
2420 gcc_assert (!G.context_depth);
2421 G.context_depth = 1;
2422 for (i = 0; i < NUM_ORDERS; i++)
2425 for (p = G.pages[i]; p != NULL; p = p->next)
2426 p->context_depth = G.context_depth;
2429 /* Allocate the appropriate page-table entries for the pages read from
2431 if (fread (&d, sizeof (d), 1, f) != 1)
2432 fatal_error ("can%'t read PCH file: %m");
2434 for (i = 0; i < NUM_ORDERS; i++)
2436 struct page_entry *entry;
2442 if (d.totals[i] == 0)
2445 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2446 num_objs = bytes / OBJECT_SIZE (i);
2447 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2449 + BITMAP_SIZE (num_objs + 1)));
2450 entry->bytes = bytes;
2452 entry->context_depth = 0;
2454 entry->num_free_objects = 0;
2458 j + HOST_BITS_PER_LONG <= num_objs + 1;
2459 j += HOST_BITS_PER_LONG)
2460 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2461 for (; j < num_objs + 1; j++)
2462 entry->in_use_p[j / HOST_BITS_PER_LONG]
2463 |= 1L << (j % HOST_BITS_PER_LONG);
2465 for (pte = entry->page;
2466 pte < entry->page + entry->bytes;
2468 set_page_table_entry (pte, entry);
2470 if (G.page_tails[i] != NULL)
2471 G.page_tails[i]->next = entry;
2474 G.page_tails[i] = entry;
2476 /* We start off by just adding all the new information to the
2477 end of the varrays, later, we will move the new information
2478 to the front of the varrays, as the PCH page tables are at
2480 push_by_depth (entry, 0);
2483 /* Now, we update the various data structures that speed page table
2485 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2487 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2489 /* Update the statistics. */
2490 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2498 struct alloc_zone rtl_zone;
2499 struct alloc_zone tree_zone;
2500 struct alloc_zone tree_id_zone;