1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
3 2010 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "diagnostic-core.h"
31 #include "ggc-internal.h"
34 #include "tree-flow.h"
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
41 # undef HAVE_MMAP_DEV_ZERO
45 #ifdef HAVE_MMAP_DEV_ZERO
50 #define USING_MALLOC_PAGE_GROUPS
55 This garbage-collecting allocator allocates objects on one of a set
56 of pages. Each page can allocate objects of a single size only;
57 available sizes are powers of two starting at four bytes. The size
58 of an allocation request is rounded up to the next power of two
59 (`order'), and satisfied from the appropriate page.
61 Each page is recorded in a page-entry, which also maintains an
62 in-use bitmap of object positions on the page. This allows the
63 allocation state of a particular object to be flipped without
64 touching the page itself.
66 Each page-entry also has a context depth, which is used to track
67 pushing and popping of allocation contexts. Only objects allocated
68 in the current (highest-numbered) context may be collected.
70 Page entries are arranged in an array of singly-linked lists. The
71 array is indexed by the allocation size, in bits, of the pages on
72 it; i.e. all pages on a list allocate objects of the same size.
73 Pages are ordered on the list such that all non-full pages precede
74 all full pages, with non-full pages arranged in order of decreasing
77 Empty pages (of all orders) are kept on a single page cache list,
78 and are considered first when new pages are required; they are
79 deallocated at the start of the next collection if they haven't
80 been recycled by then. */
82 /* Define GGC_DEBUG_LEVEL to print debugging information.
83 0: No debugging output.
84 1: GC statistics only.
85 2: Page-entry allocations/deallocations as well.
86 3: Object allocations as well.
87 4: Object marks as well. */
88 #define GGC_DEBUG_LEVEL (0)
90 #ifndef HOST_BITS_PER_PTR
91 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
95 /* A two-level tree is used to look up the page-entry for a given
96 pointer. Two chunks of the pointer's bits are extracted to index
97 the first and second levels of the tree, as follows:
101 msb +----------------+----+------+------+ lsb
107 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
108 pages are aligned on system page boundaries. The next most
109 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
110 index values in the lookup table, respectively.
112 For 32-bit architectures and the settings below, there are no
113 leftover bits. For architectures with wider pointers, the lookup
114 tree points to a list of pages, which must be scanned to find the
117 #define PAGE_L1_BITS (8)
118 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
119 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
120 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
122 #define LOOKUP_L1(p) \
123 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
125 #define LOOKUP_L2(p) \
126 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
128 /* The number of objects per allocation page, for objects on a page of
129 the indicated ORDER. */
130 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
132 /* The number of objects in P. */
133 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
135 /* The size of an object on a page of the indicated ORDER. */
136 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
138 /* For speed, we avoid doing a general integer divide to locate the
139 offset in the allocation bitmap, by precalculating numbers M, S
140 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
141 within the page which is evenly divisible by the object size Z. */
142 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
143 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
144 #define OFFSET_TO_BIT(OFFSET, ORDER) \
145 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
147 /* We use this structure to determine the alignment required for
148 allocations. For power-of-two sized allocations, that's not a
149 problem, but it does matter for odd-sized allocations.
150 We do not care about alignment for floating-point types. */
152 struct max_alignment {
160 /* The biggest alignment required. */
162 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
165 /* The number of extra orders, not corresponding to power-of-two sized
168 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
170 #define RTL_SIZE(NSLOTS) \
171 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
173 #define TREE_EXP_SIZE(OPS) \
174 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
176 /* The Ith entry is the maximum size of an object to be stored in the
177 Ith extra order. Adding a new entry to this array is the *only*
178 thing you need to do to add a new special allocation size. */
180 static const size_t extra_order_size_table[] = {
181 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
182 There are a lot of structures with these sizes and explicitly
183 listing them risks orders being dropped because they changed size. */
195 sizeof (struct tree_decl_non_common),
196 sizeof (struct tree_field_decl),
197 sizeof (struct tree_parm_decl),
198 sizeof (struct tree_var_decl),
199 sizeof (struct tree_type_non_common),
200 sizeof (struct function),
201 sizeof (struct basic_block_def),
202 sizeof (struct cgraph_node),
203 sizeof (struct loop),
206 /* The total number of orders. */
208 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
210 /* Compute the smallest nonnegative number which when added to X gives
213 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
215 /* Compute the smallest multiple of F that is >= X. */
217 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
219 /* The Ith entry is the number of objects on a page or order I. */
221 static unsigned objects_per_page_table[NUM_ORDERS];
223 /* The Ith entry is the size of an object on a page of order I. */
225 static size_t object_size_table[NUM_ORDERS];
227 /* The Ith entry is a pair of numbers (mult, shift) such that
228 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
229 for all k evenly divisible by OBJECT_SIZE(I). */
236 inverse_table[NUM_ORDERS];
238 /* A page_entry records the status of an allocation page. This
239 structure is dynamically sized to fit the bitmap in_use_p. */
240 typedef struct page_entry
242 /* The next page-entry with objects of the same size, or NULL if
243 this is the last page-entry. */
244 struct page_entry *next;
246 /* The previous page-entry with objects of the same size, or NULL if
247 this is the first page-entry. The PREV pointer exists solely to
248 keep the cost of ggc_free manageable. */
249 struct page_entry *prev;
251 /* The number of bytes allocated. (This will always be a multiple
252 of the host system page size.) */
255 /* The address at which the memory is allocated. */
258 #ifdef USING_MALLOC_PAGE_GROUPS
259 /* Back pointer to the page group this page came from. */
260 struct page_group *group;
263 /* This is the index in the by_depth varray where this page table
265 unsigned long index_by_depth;
267 /* Context depth of this page. */
268 unsigned short context_depth;
270 /* The number of free objects remaining on this page. */
271 unsigned short num_free_objects;
273 /* A likely candidate for the bit position of a free object for the
274 next allocation from this page. */
275 unsigned short next_bit_hint;
277 /* The lg of size of objects allocated from this page. */
280 /* A bit vector indicating whether or not objects are in use. The
281 Nth bit is one if the Nth object on this page is allocated. This
282 array is dynamically sized. */
283 unsigned long in_use_p[1];
286 #ifdef USING_MALLOC_PAGE_GROUPS
287 /* A page_group describes a large allocation from malloc, from which
288 we parcel out aligned pages. */
289 typedef struct page_group
291 /* A linked list of all extant page groups. */
292 struct page_group *next;
294 /* The address we received from malloc. */
297 /* The size of the block. */
300 /* A bitmask of pages in use. */
305 #if HOST_BITS_PER_PTR <= 32
307 /* On 32-bit hosts, we use a two level page table, as pictured above. */
308 typedef page_entry **page_table[PAGE_L1_SIZE];
312 /* On 64-bit hosts, we use the same two level page tables plus a linked
313 list that disambiguates the top 32-bits. There will almost always be
314 exactly one entry in the list. */
315 typedef struct page_table_chain
317 struct page_table_chain *next;
319 page_entry **table[PAGE_L1_SIZE];
324 #ifdef ENABLE_GC_ALWAYS_COLLECT
325 /* List of free objects to be verified as actually free on the
330 struct free_object *next;
334 /* The rest of the global variables. */
335 static struct globals
337 /* The Nth element in this array is a page with objects of size 2^N.
338 If there are any pages with free objects, they will be at the
339 head of the list. NULL if there are no page-entries for this
341 page_entry *pages[NUM_ORDERS];
343 /* The Nth element in this array is the last page with objects of
344 size 2^N. NULL if there are no page-entries for this object
346 page_entry *page_tails[NUM_ORDERS];
348 /* Lookup table for associating allocation pages with object addresses. */
351 /* The system's page size. */
355 /* Bytes currently allocated. */
358 /* Bytes currently allocated at the end of the last collection. */
359 size_t allocated_last_gc;
361 /* Total amount of memory mapped. */
364 /* Bit N set if any allocations have been done at context depth N. */
365 unsigned long context_depth_allocations;
367 /* Bit N set if any collections have been done at context depth N. */
368 unsigned long context_depth_collections;
370 /* The current depth in the context stack. */
371 unsigned short context_depth;
373 /* A file descriptor open to /dev/zero for reading. */
374 #if defined (HAVE_MMAP_DEV_ZERO)
378 /* A cache of free system pages. */
379 page_entry *free_pages;
381 #ifdef USING_MALLOC_PAGE_GROUPS
382 page_group *page_groups;
385 /* The file descriptor for debugging output. */
388 /* Current number of elements in use in depth below. */
389 unsigned int depth_in_use;
391 /* Maximum number of elements that can be used before resizing. */
392 unsigned int depth_max;
394 /* Each element of this array is an index in by_depth where the given
395 depth starts. This structure is indexed by that given depth we
396 are interested in. */
399 /* Current number of elements in use in by_depth below. */
400 unsigned int by_depth_in_use;
402 /* Maximum number of elements that can be used before resizing. */
403 unsigned int by_depth_max;
405 /* Each element of this array is a pointer to a page_entry, all
406 page_entries can be found in here by increasing depth.
407 index_by_depth in the page_entry is the index into this data
408 structure where that page_entry can be found. This is used to
409 speed up finding all page_entries at a particular depth. */
410 page_entry **by_depth;
412 /* Each element is a pointer to the saved in_use_p bits, if any,
413 zero otherwise. We allocate them all together, to enable a
414 better runtime data access pattern. */
415 unsigned long **save_in_use;
417 #ifdef ENABLE_GC_ALWAYS_COLLECT
418 /* List of free objects to be verified as actually free on the
420 struct free_object *free_object_list;
423 #ifdef GATHER_STATISTICS
426 /* Total GC-allocated memory. */
427 unsigned long long total_allocated;
428 /* Total overhead for GC-allocated memory. */
429 unsigned long long total_overhead;
431 /* Total allocations and overhead for sizes less than 32, 64 and 128.
432 These sizes are interesting because they are typical cache line
435 unsigned long long total_allocated_under32;
436 unsigned long long total_overhead_under32;
438 unsigned long long total_allocated_under64;
439 unsigned long long total_overhead_under64;
441 unsigned long long total_allocated_under128;
442 unsigned long long total_overhead_under128;
444 /* The allocations for each of the allocation orders. */
445 unsigned long long total_allocated_per_order[NUM_ORDERS];
447 /* The overhead for each of the allocation orders. */
448 unsigned long long total_overhead_per_order[NUM_ORDERS];
453 /* The size in bytes required to maintain a bitmap for the objects
455 #define BITMAP_SIZE(Num_objects) \
456 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
458 /* Allocate pages in chunks of this size, to throttle calls to memory
459 allocation routines. The first page is used, the rest go onto the
460 free list. This cannot be larger than HOST_BITS_PER_INT for the
461 in_use bitmask for page_group. Hosts that need a different value
462 can override this by defining GGC_QUIRE_SIZE explicitly. */
463 #ifndef GGC_QUIRE_SIZE
465 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
467 # define GGC_QUIRE_SIZE 16
471 /* Initial guess as to how many page table entries we might need. */
472 #define INITIAL_PTE_COUNT 128
474 static int ggc_allocated_p (const void *);
475 static page_entry *lookup_page_table_entry (const void *);
476 static void set_page_table_entry (void *, page_entry *);
478 static char *alloc_anon (char *, size_t);
480 #ifdef USING_MALLOC_PAGE_GROUPS
481 static size_t page_group_index (char *, char *);
482 static void set_page_group_in_use (page_group *, char *);
483 static void clear_page_group_in_use (page_group *, char *);
485 static struct page_entry * alloc_page (unsigned);
486 static void free_page (struct page_entry *);
487 static void release_pages (void);
488 static void clear_marks (void);
489 static void sweep_pages (void);
490 static void ggc_recalculate_in_use_p (page_entry *);
491 static void compute_inverse (unsigned);
492 static inline void adjust_depth (void);
493 static void move_ptes_to_front (int, int);
495 void debug_print_page_list (int);
496 static void push_depth (unsigned int);
497 static void push_by_depth (page_entry *, unsigned long *);
499 /* Push an entry onto G.depth. */
502 push_depth (unsigned int i)
504 if (G.depth_in_use >= G.depth_max)
507 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
509 G.depth[G.depth_in_use++] = i;
512 /* Push an entry onto G.by_depth and G.save_in_use. */
515 push_by_depth (page_entry *p, unsigned long *s)
517 if (G.by_depth_in_use >= G.by_depth_max)
520 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
521 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
524 G.by_depth[G.by_depth_in_use] = p;
525 G.save_in_use[G.by_depth_in_use++] = s;
528 #if (GCC_VERSION < 3001)
529 #define prefetch(X) ((void) X)
531 #define prefetch(X) __builtin_prefetch (X)
534 #define save_in_use_p_i(__i) \
536 #define save_in_use_p(__p) \
537 (save_in_use_p_i (__p->index_by_depth))
539 /* Returns nonzero if P was allocated in GC'able memory. */
542 ggc_allocated_p (const void *p)
547 #if HOST_BITS_PER_PTR <= 32
550 page_table table = G.lookup;
551 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
556 if (table->high_bits == high_bits)
560 base = &table->table[0];
563 /* Extract the level 1 and 2 indices. */
567 return base[L1] && base[L1][L2];
570 /* Traverse the page table and find the entry for a page.
571 Die (probably) if the object wasn't allocated via GC. */
573 static inline page_entry *
574 lookup_page_table_entry (const void *p)
579 #if HOST_BITS_PER_PTR <= 32
582 page_table table = G.lookup;
583 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
584 while (table->high_bits != high_bits)
586 base = &table->table[0];
589 /* Extract the level 1 and 2 indices. */
596 /* Set the page table entry for a page. */
599 set_page_table_entry (void *p, page_entry *entry)
604 #if HOST_BITS_PER_PTR <= 32
608 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
609 for (table = G.lookup; table; table = table->next)
610 if (table->high_bits == high_bits)
613 /* Not found -- allocate a new table. */
614 table = XCNEW (struct page_table_chain);
615 table->next = G.lookup;
616 table->high_bits = high_bits;
619 base = &table->table[0];
622 /* Extract the level 1 and 2 indices. */
626 if (base[L1] == NULL)
627 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
629 base[L1][L2] = entry;
632 /* Prints the page-entry for object size ORDER, for debugging. */
635 debug_print_page_list (int order)
638 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
639 (void *) G.page_tails[order]);
643 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
644 p->num_free_objects);
652 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
653 (if non-null). The ifdef structure here is intended to cause a
654 compile error unless exactly one of the HAVE_* is defined. */
657 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
659 #ifdef HAVE_MMAP_ANON
660 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
661 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
663 #ifdef HAVE_MMAP_DEV_ZERO
664 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
665 MAP_PRIVATE, G.dev_zero_fd, 0);
668 if (page == (char *) MAP_FAILED)
670 perror ("virtual memory exhausted");
671 exit (FATAL_EXIT_CODE);
674 /* Remember that we allocated this memory. */
675 G.bytes_mapped += size;
677 /* Pretend we don't have access to the allocated pages. We'll enable
678 access to smaller pieces of the area in ggc_internal_alloc. Discard the
679 handle to avoid handle leak. */
680 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
685 #ifdef USING_MALLOC_PAGE_GROUPS
686 /* Compute the index for this page into the page group. */
689 page_group_index (char *allocation, char *page)
691 return (size_t) (page - allocation) >> G.lg_pagesize;
694 /* Set and clear the in_use bit for this page in the page group. */
697 set_page_group_in_use (page_group *group, char *page)
699 group->in_use |= 1 << page_group_index (group->allocation, page);
703 clear_page_group_in_use (page_group *group, char *page)
705 group->in_use &= ~(1 << page_group_index (group->allocation, page));
709 /* Allocate a new page for allocating objects of size 2^ORDER,
710 and return an entry for it. The entry is not added to the
711 appropriate page_table list. */
713 static inline struct page_entry *
714 alloc_page (unsigned order)
716 struct page_entry *entry, *p, **pp;
720 size_t page_entry_size;
722 #ifdef USING_MALLOC_PAGE_GROUPS
726 num_objects = OBJECTS_PER_PAGE (order);
727 bitmap_size = BITMAP_SIZE (num_objects + 1);
728 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
729 entry_size = num_objects * OBJECT_SIZE (order);
730 if (entry_size < G.pagesize)
731 entry_size = G.pagesize;
736 /* Check the list of free pages for one we can use. */
737 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
738 if (p->bytes == entry_size)
743 /* Recycle the allocated memory from this page ... */
747 #ifdef USING_MALLOC_PAGE_GROUPS
751 /* ... and, if possible, the page entry itself. */
752 if (p->order == order)
755 memset (entry, 0, page_entry_size);
761 else if (entry_size == G.pagesize)
763 /* We want just one page. Allocate a bunch of them and put the
764 extras on the freelist. (Can only do this optimization with
765 mmap for backing store.) */
766 struct page_entry *e, *f = G.free_pages;
769 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
771 /* This loop counts down so that the chain will be in ascending
773 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
775 e = XCNEWVAR (struct page_entry, page_entry_size);
777 e->bytes = G.pagesize;
778 e->page = page + (i << G.lg_pagesize);
786 page = alloc_anon (NULL, entry_size);
788 #ifdef USING_MALLOC_PAGE_GROUPS
791 /* Allocate a large block of memory and serve out the aligned
792 pages therein. This results in much less memory wastage
793 than the traditional implementation of valloc. */
795 char *allocation, *a, *enda;
796 size_t alloc_size, head_slop, tail_slop;
797 int multiple_pages = (entry_size == G.pagesize);
800 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
802 alloc_size = entry_size + G.pagesize - 1;
803 allocation = XNEWVEC (char, alloc_size);
805 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
806 head_slop = page - allocation;
808 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
810 tail_slop = alloc_size - entry_size - head_slop;
811 enda = allocation + alloc_size - tail_slop;
813 /* We allocated N pages, which are likely not aligned, leaving
814 us with N-1 usable pages. We plan to place the page_group
815 structure somewhere in the slop. */
816 if (head_slop >= sizeof (page_group))
817 group = (page_group *)page - 1;
820 /* We magically got an aligned allocation. Too bad, we have
821 to waste a page anyway. */
825 tail_slop += G.pagesize;
827 gcc_assert (tail_slop >= sizeof (page_group));
828 group = (page_group *)enda;
829 tail_slop -= sizeof (page_group);
832 /* Remember that we allocated this memory. */
833 group->next = G.page_groups;
834 group->allocation = allocation;
835 group->alloc_size = alloc_size;
837 G.page_groups = group;
838 G.bytes_mapped += alloc_size;
840 /* If we allocated multiple pages, put the rest on the free list. */
843 struct page_entry *e, *f = G.free_pages;
844 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
846 e = XCNEWVAR (struct page_entry, page_entry_size);
848 e->bytes = G.pagesize;
860 entry = XCNEWVAR (struct page_entry, page_entry_size);
862 entry->bytes = entry_size;
864 entry->context_depth = G.context_depth;
865 entry->order = order;
866 entry->num_free_objects = num_objects;
867 entry->next_bit_hint = 1;
869 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
871 #ifdef USING_MALLOC_PAGE_GROUPS
872 entry->group = group;
873 set_page_group_in_use (group, page);
876 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
877 increment the hint. */
878 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
879 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
881 set_page_table_entry (page, entry);
883 if (GGC_DEBUG_LEVEL >= 2)
884 fprintf (G.debug_file,
885 "Allocating page at %p, object size=%lu, data %p-%p\n",
886 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
887 page + entry_size - 1);
892 /* Adjust the size of G.depth so that no index greater than the one
893 used by the top of the G.by_depth is used. */
900 if (G.by_depth_in_use)
902 top = G.by_depth[G.by_depth_in_use-1];
904 /* Peel back indices in depth that index into by_depth, so that
905 as new elements are added to by_depth, we note the indices
906 of those elements, if they are for new context depths. */
907 while (G.depth_in_use > (size_t)top->context_depth+1)
912 /* For a page that is no longer needed, put it on the free page list. */
915 free_page (page_entry *entry)
917 if (GGC_DEBUG_LEVEL >= 2)
918 fprintf (G.debug_file,
919 "Deallocating page at %p, data %p-%p\n", (void *) entry,
920 entry->page, entry->page + entry->bytes - 1);
922 /* Mark the page as inaccessible. Discard the handle to avoid handle
924 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
926 set_page_table_entry (entry->page, NULL);
928 #ifdef USING_MALLOC_PAGE_GROUPS
929 clear_page_group_in_use (entry->group, entry->page);
932 if (G.by_depth_in_use > 1)
934 page_entry *top = G.by_depth[G.by_depth_in_use-1];
935 int i = entry->index_by_depth;
937 /* We cannot free a page from a context deeper than the current
939 gcc_assert (entry->context_depth == top->context_depth);
941 /* Put top element into freed slot. */
943 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
944 top->index_by_depth = i;
950 entry->next = G.free_pages;
951 G.free_pages = entry;
954 /* Release the free page cache to the system. */
960 page_entry *p, *next;
964 /* Gather up adjacent pages so they are unmapped together. */
975 while (p && p->page == start + len)
984 G.bytes_mapped -= len;
989 #ifdef USING_MALLOC_PAGE_GROUPS
993 /* Remove all pages from free page groups from the list. */
995 while ((p = *pp) != NULL)
996 if (p->group->in_use == 0)
1004 /* Remove all free page groups, and release the storage. */
1005 gp = &G.page_groups;
1006 while ((g = *gp) != NULL)
1010 G.bytes_mapped -= g->alloc_size;
1011 free (g->allocation);
1018 /* This table provides a fast way to determine ceil(log_2(size)) for
1019 allocation requests. The minimum allocation size is eight bytes. */
1020 #define NUM_SIZE_LOOKUP 512
1021 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1023 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1024 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1025 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1026 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1027 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1028 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1029 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1030 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1031 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1032 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1039 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1040 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1041 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1042 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1043 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1044 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1045 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1046 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1047 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1048 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1049 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1050 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1051 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1057 /* For a given size of memory requested for allocation, return the
1058 actual size that is going to be allocated, as well as the size
1062 ggc_round_alloc_size_1 (size_t requested_size,
1064 size_t *alloced_size)
1066 size_t order, object_size;
1068 if (requested_size < NUM_SIZE_LOOKUP)
1070 order = size_lookup[requested_size];
1071 object_size = OBJECT_SIZE (order);
1076 while (requested_size > (object_size = OBJECT_SIZE (order)))
1081 *size_order = order;
1083 *alloced_size = object_size;
1086 /* For a given size of memory requested for allocation, return the
1087 actual size that is going to be allocated. */
1090 ggc_round_alloc_size (size_t requested_size)
1094 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1098 /* Typed allocation function. Does nothing special in this collector. */
1101 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1104 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1107 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1110 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1112 size_t order, word, bit, object_offset, object_size;
1113 struct page_entry *entry;
1116 ggc_round_alloc_size_1 (size, &order, &object_size);
1118 /* If there are non-full pages for this size allocation, they are at
1119 the head of the list. */
1120 entry = G.pages[order];
1122 /* If there is no page for this object size, or all pages in this
1123 context are full, allocate a new page. */
1124 if (entry == NULL || entry->num_free_objects == 0)
1126 struct page_entry *new_entry;
1127 new_entry = alloc_page (order);
1129 new_entry->index_by_depth = G.by_depth_in_use;
1130 push_by_depth (new_entry, 0);
1132 /* We can skip context depths, if we do, make sure we go all the
1133 way to the new depth. */
1134 while (new_entry->context_depth >= G.depth_in_use)
1135 push_depth (G.by_depth_in_use-1);
1137 /* If this is the only entry, it's also the tail. If it is not
1138 the only entry, then we must update the PREV pointer of the
1139 ENTRY (G.pages[order]) to point to our new page entry. */
1141 G.page_tails[order] = new_entry;
1143 entry->prev = new_entry;
1145 /* Put new pages at the head of the page list. By definition the
1146 entry at the head of the list always has a NULL pointer. */
1147 new_entry->next = entry;
1148 new_entry->prev = NULL;
1150 G.pages[order] = new_entry;
1152 /* For a new page, we know the word and bit positions (in the
1153 in_use bitmap) of the first available object -- they're zero. */
1154 new_entry->next_bit_hint = 1;
1161 /* First try to use the hint left from the previous allocation
1162 to locate a clear bit in the in-use bitmap. We've made sure
1163 that the one-past-the-end bit is always set, so if the hint
1164 has run over, this test will fail. */
1165 unsigned hint = entry->next_bit_hint;
1166 word = hint / HOST_BITS_PER_LONG;
1167 bit = hint % HOST_BITS_PER_LONG;
1169 /* If the hint didn't work, scan the bitmap from the beginning. */
1170 if ((entry->in_use_p[word] >> bit) & 1)
1173 while (~entry->in_use_p[word] == 0)
1176 #if GCC_VERSION >= 3004
1177 bit = __builtin_ctzl (~entry->in_use_p[word]);
1179 while ((entry->in_use_p[word] >> bit) & 1)
1183 hint = word * HOST_BITS_PER_LONG + bit;
1186 /* Next time, try the next bit. */
1187 entry->next_bit_hint = hint + 1;
1189 object_offset = hint * object_size;
1192 /* Set the in-use bit. */
1193 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1195 /* Keep a running total of the number of free objects. If this page
1196 fills up, we may have to move it to the end of the list if the
1197 next page isn't full. If the next page is full, all subsequent
1198 pages are full, so there's no need to move it. */
1199 if (--entry->num_free_objects == 0
1200 && entry->next != NULL
1201 && entry->next->num_free_objects > 0)
1203 /* We have a new head for the list. */
1204 G.pages[order] = entry->next;
1206 /* We are moving ENTRY to the end of the page table list.
1207 The new page at the head of the list will have NULL in
1208 its PREV field and ENTRY will have NULL in its NEXT field. */
1209 entry->next->prev = NULL;
1212 /* Append ENTRY to the tail of the list. */
1213 entry->prev = G.page_tails[order];
1214 G.page_tails[order]->next = entry;
1215 G.page_tails[order] = entry;
1218 /* Calculate the object's address. */
1219 result = entry->page + object_offset;
1220 #ifdef GATHER_STATISTICS
1221 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1222 result PASS_MEM_STAT);
1225 #ifdef ENABLE_GC_CHECKING
1226 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1227 exact same semantics in presence of memory bugs, regardless of
1228 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1229 handle to avoid handle leak. */
1230 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1232 /* `Poison' the entire allocated object, including any padding at
1234 memset (result, 0xaf, object_size);
1236 /* Make the bytes after the end of the object unaccessible. Discard the
1237 handle to avoid handle leak. */
1238 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1239 object_size - size));
1242 /* Tell Valgrind that the memory is there, but its content isn't
1243 defined. The bytes at the end of the object are still marked
1245 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1247 /* Keep track of how many bytes are being allocated. This
1248 information is used in deciding when to collect. */
1249 G.allocated += object_size;
1251 /* For timevar statistics. */
1252 timevar_ggc_mem_total += object_size;
1254 #ifdef GATHER_STATISTICS
1256 size_t overhead = object_size - size;
1258 G.stats.total_overhead += overhead;
1259 G.stats.total_allocated += object_size;
1260 G.stats.total_overhead_per_order[order] += overhead;
1261 G.stats.total_allocated_per_order[order] += object_size;
1265 G.stats.total_overhead_under32 += overhead;
1266 G.stats.total_allocated_under32 += object_size;
1270 G.stats.total_overhead_under64 += overhead;
1271 G.stats.total_allocated_under64 += object_size;
1275 G.stats.total_overhead_under128 += overhead;
1276 G.stats.total_allocated_under128 += object_size;
1281 if (GGC_DEBUG_LEVEL >= 3)
1282 fprintf (G.debug_file,
1283 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1284 (unsigned long) size, (unsigned long) object_size, result,
1290 /* Mark function for strings. */
1293 gt_ggc_m_S (const void *p)
1298 unsigned long offset;
1300 if (!p || !ggc_allocated_p (p))
1303 /* Look up the page on which the object is alloced. . */
1304 entry = lookup_page_table_entry (p);
1307 /* Calculate the index of the object on the page; this is its bit
1308 position in the in_use_p bitmap. Note that because a char* might
1309 point to the middle of an object, we need special code here to
1310 make sure P points to the start of an object. */
1311 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1314 /* Here we've seen a char* which does not point to the beginning
1315 of an allocated object. We assume it points to the middle of
1317 gcc_assert (offset == offsetof (struct tree_string, str));
1318 p = ((const char *) p) - offset;
1319 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1323 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1324 word = bit / HOST_BITS_PER_LONG;
1325 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1327 /* If the bit was previously set, skip it. */
1328 if (entry->in_use_p[word] & mask)
1331 /* Otherwise set it, and decrement the free object count. */
1332 entry->in_use_p[word] |= mask;
1333 entry->num_free_objects -= 1;
1335 if (GGC_DEBUG_LEVEL >= 4)
1336 fprintf (G.debug_file, "Marking %p\n", p);
1341 /* If P is not marked, marks it and return false. Otherwise return true.
1342 P must have been allocated by the GC allocator; it mustn't point to
1343 static objects, stack variables, or memory allocated with malloc. */
1346 ggc_set_mark (const void *p)
1352 /* Look up the page on which the object is alloced. If the object
1353 wasn't allocated by the collector, we'll probably die. */
1354 entry = lookup_page_table_entry (p);
1357 /* Calculate the index of the object on the page; this is its bit
1358 position in the in_use_p bitmap. */
1359 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1360 word = bit / HOST_BITS_PER_LONG;
1361 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1363 /* If the bit was previously set, skip it. */
1364 if (entry->in_use_p[word] & mask)
1367 /* Otherwise set it, and decrement the free object count. */
1368 entry->in_use_p[word] |= mask;
1369 entry->num_free_objects -= 1;
1371 if (GGC_DEBUG_LEVEL >= 4)
1372 fprintf (G.debug_file, "Marking %p\n", p);
1377 /* Return 1 if P has been marked, zero otherwise.
1378 P must have been allocated by the GC allocator; it mustn't point to
1379 static objects, stack variables, or memory allocated with malloc. */
1382 ggc_marked_p (const void *p)
1388 /* Look up the page on which the object is alloced. If the object
1389 wasn't allocated by the collector, we'll probably die. */
1390 entry = lookup_page_table_entry (p);
1393 /* Calculate the index of the object on the page; this is its bit
1394 position in the in_use_p bitmap. */
1395 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1396 word = bit / HOST_BITS_PER_LONG;
1397 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1399 return (entry->in_use_p[word] & mask) != 0;
1402 /* Return the size of the gc-able object P. */
1405 ggc_get_size (const void *p)
1407 page_entry *pe = lookup_page_table_entry (p);
1408 return OBJECT_SIZE (pe->order);
1411 /* Release the memory for object P. */
1416 page_entry *pe = lookup_page_table_entry (p);
1417 size_t order = pe->order;
1418 size_t size = OBJECT_SIZE (order);
1420 #ifdef GATHER_STATISTICS
1421 ggc_free_overhead (p);
1424 if (GGC_DEBUG_LEVEL >= 3)
1425 fprintf (G.debug_file,
1426 "Freeing object, actual size=%lu, at %p on %p\n",
1427 (unsigned long) size, p, (void *) pe);
1429 #ifdef ENABLE_GC_CHECKING
1430 /* Poison the data, to indicate the data is garbage. */
1431 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1432 memset (p, 0xa5, size);
1434 /* Let valgrind know the object is free. */
1435 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1437 #ifdef ENABLE_GC_ALWAYS_COLLECT
1438 /* In the completely-anal-checking mode, we do *not* immediately free
1439 the data, but instead verify that the data is *actually* not
1440 reachable the next time we collect. */
1442 struct free_object *fo = XNEW (struct free_object);
1444 fo->next = G.free_object_list;
1445 G.free_object_list = fo;
1449 unsigned int bit_offset, word, bit;
1451 G.allocated -= size;
1453 /* Mark the object not-in-use. */
1454 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1455 word = bit_offset / HOST_BITS_PER_LONG;
1456 bit = bit_offset % HOST_BITS_PER_LONG;
1457 pe->in_use_p[word] &= ~(1UL << bit);
1459 if (pe->num_free_objects++ == 0)
1463 /* If the page is completely full, then it's supposed to
1464 be after all pages that aren't. Since we've freed one
1465 object from a page that was full, we need to move the
1466 page to the head of the list.
1468 PE is the node we want to move. Q is the previous node
1469 and P is the next node in the list. */
1471 if (q && q->num_free_objects == 0)
1477 /* If PE was at the end of the list, then Q becomes the
1478 new end of the list. If PE was not the end of the
1479 list, then we need to update the PREV field for P. */
1481 G.page_tails[order] = q;
1485 /* Move PE to the head of the list. */
1486 pe->next = G.pages[order];
1488 G.pages[order]->prev = pe;
1489 G.pages[order] = pe;
1492 /* Reset the hint bit to point to the only free object. */
1493 pe->next_bit_hint = bit_offset;
1499 /* Subroutine of init_ggc which computes the pair of numbers used to
1500 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1502 This algorithm is taken from Granlund and Montgomery's paper
1503 "Division by Invariant Integers using Multiplication"
1504 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1508 compute_inverse (unsigned order)
1513 size = OBJECT_SIZE (order);
1515 while (size % 2 == 0)
1522 while (inv * size != 1)
1523 inv = inv * (2 - inv*size);
1525 DIV_MULT (order) = inv;
1526 DIV_SHIFT (order) = e;
1529 /* Initialize the ggc-mmap allocator. */
1535 G.pagesize = getpagesize();
1536 G.lg_pagesize = exact_log2 (G.pagesize);
1538 #ifdef HAVE_MMAP_DEV_ZERO
1539 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1540 if (G.dev_zero_fd == -1)
1541 internal_error ("open /dev/zero: %m");
1545 G.debug_file = fopen ("ggc-mmap.debug", "w");
1547 G.debug_file = stdout;
1551 /* StunOS has an amazing off-by-one error for the first mmap allocation
1552 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1553 believe, is an unaligned page allocation, which would cause us to
1554 hork badly if we tried to use it. */
1556 char *p = alloc_anon (NULL, G.pagesize);
1557 struct page_entry *e;
1558 if ((size_t)p & (G.pagesize - 1))
1560 /* How losing. Discard this one and try another. If we still
1561 can't get something useful, give up. */
1563 p = alloc_anon (NULL, G.pagesize);
1564 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1567 /* We have a good page, might as well hold onto it... */
1568 e = XCNEW (struct page_entry);
1569 e->bytes = G.pagesize;
1571 e->next = G.free_pages;
1576 /* Initialize the object size table. */
1577 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1578 object_size_table[order] = (size_t) 1 << order;
1579 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1581 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1583 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1584 so that we're sure of getting aligned memory. */
1585 s = ROUND_UP (s, MAX_ALIGNMENT);
1586 object_size_table[order] = s;
1589 /* Initialize the objects-per-page and inverse tables. */
1590 for (order = 0; order < NUM_ORDERS; ++order)
1592 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1593 if (objects_per_page_table[order] == 0)
1594 objects_per_page_table[order] = 1;
1595 compute_inverse (order);
1598 /* Reset the size_lookup array to put appropriately sized objects in
1599 the special orders. All objects bigger than the previous power
1600 of two, but no greater than the special size, should go in the
1602 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1607 i = OBJECT_SIZE (order);
1608 if (i >= NUM_SIZE_LOOKUP)
1611 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1612 size_lookup[i] = order;
1617 G.depth = XNEWVEC (unsigned int, G.depth_max);
1619 G.by_depth_in_use = 0;
1620 G.by_depth_max = INITIAL_PTE_COUNT;
1621 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1622 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1625 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1626 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1629 ggc_recalculate_in_use_p (page_entry *p)
1634 /* Because the past-the-end bit in in_use_p is always set, we
1635 pretend there is one additional object. */
1636 num_objects = OBJECTS_IN_PAGE (p) + 1;
1638 /* Reset the free object count. */
1639 p->num_free_objects = num_objects;
1641 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1643 i < CEIL (BITMAP_SIZE (num_objects),
1644 sizeof (*p->in_use_p));
1649 /* Something is in use if it is marked, or if it was in use in a
1650 context further down the context stack. */
1651 p->in_use_p[i] |= save_in_use_p (p)[i];
1653 /* Decrement the free object count for every object allocated. */
1654 for (j = p->in_use_p[i]; j; j >>= 1)
1655 p->num_free_objects -= (j & 1);
1658 gcc_assert (p->num_free_objects < num_objects);
1661 /* Unmark all objects. */
1668 for (order = 2; order < NUM_ORDERS; order++)
1672 for (p = G.pages[order]; p != NULL; p = p->next)
1674 size_t num_objects = OBJECTS_IN_PAGE (p);
1675 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1677 /* The data should be page-aligned. */
1678 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1680 /* Pages that aren't in the topmost context are not collected;
1681 nevertheless, we need their in-use bit vectors to store GC
1682 marks. So, back them up first. */
1683 if (p->context_depth < G.context_depth)
1685 if (! save_in_use_p (p))
1686 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1687 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1690 /* Reset reset the number of free objects and clear the
1691 in-use bits. These will be adjusted by mark_obj. */
1692 p->num_free_objects = num_objects;
1693 memset (p->in_use_p, 0, bitmap_size);
1695 /* Make sure the one-past-the-end bit is always set. */
1696 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1697 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1702 /* Free all empty pages. Partially empty pages need no attention
1703 because the `mark' bit doubles as an `unused' bit. */
1710 for (order = 2; order < NUM_ORDERS; order++)
1712 /* The last page-entry to consider, regardless of entries
1713 placed at the end of the list. */
1714 page_entry * const last = G.page_tails[order];
1717 size_t live_objects;
1718 page_entry *p, *previous;
1728 page_entry *next = p->next;
1730 /* Loop until all entries have been examined. */
1733 num_objects = OBJECTS_IN_PAGE (p);
1735 /* Add all live objects on this page to the count of
1736 allocated memory. */
1737 live_objects = num_objects - p->num_free_objects;
1739 G.allocated += OBJECT_SIZE (order) * live_objects;
1741 /* Only objects on pages in the topmost context should get
1743 if (p->context_depth < G.context_depth)
1746 /* Remove the page if it's empty. */
1747 else if (live_objects == 0)
1749 /* If P was the first page in the list, then NEXT
1750 becomes the new first page in the list, otherwise
1751 splice P out of the forward pointers. */
1753 G.pages[order] = next;
1755 previous->next = next;
1757 /* Splice P out of the back pointers too. */
1759 next->prev = previous;
1761 /* Are we removing the last element? */
1762 if (p == G.page_tails[order])
1763 G.page_tails[order] = previous;
1768 /* If the page is full, move it to the end. */
1769 else if (p->num_free_objects == 0)
1771 /* Don't move it if it's already at the end. */
1772 if (p != G.page_tails[order])
1774 /* Move p to the end of the list. */
1776 p->prev = G.page_tails[order];
1777 G.page_tails[order]->next = p;
1779 /* Update the tail pointer... */
1780 G.page_tails[order] = p;
1782 /* ... and the head pointer, if necessary. */
1784 G.pages[order] = next;
1786 previous->next = next;
1788 /* And update the backpointer in NEXT if necessary. */
1790 next->prev = previous;
1796 /* If we've fallen through to here, it's a page in the
1797 topmost context that is neither full nor empty. Such a
1798 page must precede pages at lesser context depth in the
1799 list, so move it to the head. */
1800 else if (p != G.pages[order])
1802 previous->next = p->next;
1804 /* Update the backchain in the next node if it exists. */
1806 p->next->prev = previous;
1808 /* Move P to the head of the list. */
1809 p->next = G.pages[order];
1811 G.pages[order]->prev = p;
1813 /* Update the head pointer. */
1816 /* Are we moving the last element? */
1817 if (G.page_tails[order] == p)
1818 G.page_tails[order] = previous;
1827 /* Now, restore the in_use_p vectors for any pages from contexts
1828 other than the current one. */
1829 for (p = G.pages[order]; p; p = p->next)
1830 if (p->context_depth != G.context_depth)
1831 ggc_recalculate_in_use_p (p);
1835 #ifdef ENABLE_GC_CHECKING
1836 /* Clobber all free objects. */
1843 for (order = 2; order < NUM_ORDERS; order++)
1845 size_t size = OBJECT_SIZE (order);
1848 for (p = G.pages[order]; p != NULL; p = p->next)
1853 if (p->context_depth != G.context_depth)
1854 /* Since we don't do any collection for pages in pushed
1855 contexts, there's no need to do any poisoning. And
1856 besides, the IN_USE_P array isn't valid until we pop
1860 num_objects = OBJECTS_IN_PAGE (p);
1861 for (i = 0; i < num_objects; i++)
1864 word = i / HOST_BITS_PER_LONG;
1865 bit = i % HOST_BITS_PER_LONG;
1866 if (((p->in_use_p[word] >> bit) & 1) == 0)
1868 char *object = p->page + i * size;
1870 /* Keep poison-by-write when we expect to use Valgrind,
1871 so the exact same memory semantics is kept, in case
1872 there are memory errors. We override this request
1874 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1876 memset (object, 0xa5, size);
1878 /* Drop the handle to avoid handle leak. */
1879 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1886 #define poison_pages()
1889 #ifdef ENABLE_GC_ALWAYS_COLLECT
1890 /* Validate that the reportedly free objects actually are. */
1893 validate_free_objects (void)
1895 struct free_object *f, *next, *still_free = NULL;
1897 for (f = G.free_object_list; f ; f = next)
1899 page_entry *pe = lookup_page_table_entry (f->object);
1902 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1903 word = bit / HOST_BITS_PER_LONG;
1904 bit = bit % HOST_BITS_PER_LONG;
1907 /* Make certain it isn't visible from any root. Notice that we
1908 do this check before sweep_pages merges save_in_use_p. */
1909 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1911 /* If the object comes from an outer context, then retain the
1912 free_object entry, so that we can verify that the address
1913 isn't live on the stack in some outer context. */
1914 if (pe->context_depth != G.context_depth)
1916 f->next = still_free;
1923 G.free_object_list = still_free;
1926 #define validate_free_objects()
1929 /* Top level mark-and-sweep routine. */
1934 /* Avoid frequent unnecessary work by skipping collection if the
1935 total allocations haven't expanded much since the last
1937 float allocated_last_gc =
1938 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1940 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1942 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1945 timevar_push (TV_GC);
1947 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1948 if (GGC_DEBUG_LEVEL >= 2)
1949 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1951 /* Zero the total allocated bytes. This will be recalculated in the
1955 /* Release the pages we freed the last time we collected, but didn't
1956 reuse in the interim. */
1959 /* Indicate that we've seen collections at this context depth. */
1960 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1962 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
1966 #ifdef GATHER_STATISTICS
1967 ggc_prune_overhead_list ();
1970 validate_free_objects ();
1973 G.allocated_last_gc = G.allocated;
1975 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
1977 timevar_pop (TV_GC);
1980 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1981 if (GGC_DEBUG_LEVEL >= 2)
1982 fprintf (G.debug_file, "END COLLECTING\n");
1985 /* Print allocation statistics. */
1986 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1988 : ((x) < 1024*1024*10 \
1990 : (x) / (1024*1024))))
1991 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1994 ggc_print_statistics (void)
1996 struct ggc_statistics stats;
1998 size_t total_overhead = 0;
2000 /* Clear the statistics. */
2001 memset (&stats, 0, sizeof (stats));
2003 /* Make sure collection will really occur. */
2004 G.allocated_last_gc = 0;
2006 /* Collect and print the statistics common across collectors. */
2007 ggc_print_common_statistics (stderr, &stats);
2009 /* Release free pages so that we will not count the bytes allocated
2010 there as part of the total allocated memory. */
2013 /* Collect some information about the various sizes of
2016 "Memory still allocated at the end of the compilation process\n");
2017 fprintf (stderr, "%-5s %10s %10s %10s\n",
2018 "Size", "Allocated", "Used", "Overhead");
2019 for (i = 0; i < NUM_ORDERS; ++i)
2026 /* Skip empty entries. */
2030 overhead = allocated = in_use = 0;
2032 /* Figure out the total number of bytes allocated for objects of
2033 this size, and how many of them are actually in use. Also figure
2034 out how much memory the page table is using. */
2035 for (p = G.pages[i]; p; p = p->next)
2037 allocated += p->bytes;
2039 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2041 overhead += (sizeof (page_entry) - sizeof (long)
2042 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2044 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2045 (unsigned long) OBJECT_SIZE (i),
2046 SCALE (allocated), STAT_LABEL (allocated),
2047 SCALE (in_use), STAT_LABEL (in_use),
2048 SCALE (overhead), STAT_LABEL (overhead));
2049 total_overhead += overhead;
2051 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2052 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2053 SCALE (G.allocated), STAT_LABEL(G.allocated),
2054 SCALE (total_overhead), STAT_LABEL (total_overhead));
2056 #ifdef GATHER_STATISTICS
2058 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2060 fprintf (stderr, "Total Overhead: %10lld\n",
2061 G.stats.total_overhead);
2062 fprintf (stderr, "Total Allocated: %10lld\n",
2063 G.stats.total_allocated);
2065 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2066 G.stats.total_overhead_under32);
2067 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2068 G.stats.total_allocated_under32);
2069 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2070 G.stats.total_overhead_under64);
2071 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2072 G.stats.total_allocated_under64);
2073 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2074 G.stats.total_overhead_under128);
2075 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2076 G.stats.total_allocated_under128);
2078 for (i = 0; i < NUM_ORDERS; i++)
2079 if (G.stats.total_allocated_per_order[i])
2081 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2082 (unsigned long) OBJECT_SIZE (i),
2083 G.stats.total_overhead_per_order[i]);
2084 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2085 (unsigned long) OBJECT_SIZE (i),
2086 G.stats.total_allocated_per_order[i]);
2092 struct ggc_pch_ondisk
2094 unsigned totals[NUM_ORDERS];
2099 struct ggc_pch_ondisk d;
2100 size_t base[NUM_ORDERS];
2101 size_t written[NUM_ORDERS];
2104 struct ggc_pch_data *
2107 return XCNEW (struct ggc_pch_data);
2111 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2112 size_t size, bool is_string ATTRIBUTE_UNUSED,
2113 enum gt_types_enum type ATTRIBUTE_UNUSED)
2117 if (size < NUM_SIZE_LOOKUP)
2118 order = size_lookup[size];
2122 while (size > OBJECT_SIZE (order))
2126 d->d.totals[order]++;
2130 ggc_pch_total_size (struct ggc_pch_data *d)
2135 for (i = 0; i < NUM_ORDERS; i++)
2136 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2141 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2143 size_t a = (size_t) base;
2146 for (i = 0; i < NUM_ORDERS; i++)
2149 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2155 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2156 size_t size, bool is_string ATTRIBUTE_UNUSED,
2157 enum gt_types_enum type ATTRIBUTE_UNUSED)
2162 if (size < NUM_SIZE_LOOKUP)
2163 order = size_lookup[size];
2167 while (size > OBJECT_SIZE (order))
2171 result = (char *) d->base[order];
2172 d->base[order] += OBJECT_SIZE (order);
2177 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2178 FILE *f ATTRIBUTE_UNUSED)
2180 /* Nothing to do. */
2184 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2185 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2186 size_t size, bool is_string ATTRIBUTE_UNUSED)
2189 static const char emptyBytes[256] = { 0 };
2191 if (size < NUM_SIZE_LOOKUP)
2192 order = size_lookup[size];
2196 while (size > OBJECT_SIZE (order))
2200 if (fwrite (x, size, 1, f) != 1)
2201 fatal_error ("can%'t write PCH file: %m");
2203 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2204 object out to OBJECT_SIZE(order). This happens for strings. */
2206 if (size != OBJECT_SIZE (order))
2208 unsigned padding = OBJECT_SIZE(order) - size;
2210 /* To speed small writes, we use a nulled-out array that's larger
2211 than most padding requests as the source for our null bytes. This
2212 permits us to do the padding with fwrite() rather than fseek(), and
2213 limits the chance the OS may try to flush any outstanding writes. */
2214 if (padding <= sizeof(emptyBytes))
2216 if (fwrite (emptyBytes, 1, padding, f) != padding)
2217 fatal_error ("can%'t write PCH file");
2221 /* Larger than our buffer? Just default to fseek. */
2222 if (fseek (f, padding, SEEK_CUR) != 0)
2223 fatal_error ("can%'t write PCH file");
2227 d->written[order]++;
2228 if (d->written[order] == d->d.totals[order]
2229 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2232 fatal_error ("can%'t write PCH file: %m");
2236 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2238 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2239 fatal_error ("can%'t write PCH file: %m");
2243 /* Move the PCH PTE entries just added to the end of by_depth, to the
2247 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2251 /* First, we swap the new entries to the front of the varrays. */
2252 page_entry **new_by_depth;
2253 unsigned long **new_save_in_use;
2255 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2256 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2258 memcpy (&new_by_depth[0],
2259 &G.by_depth[count_old_page_tables],
2260 count_new_page_tables * sizeof (void *));
2261 memcpy (&new_by_depth[count_new_page_tables],
2263 count_old_page_tables * sizeof (void *));
2264 memcpy (&new_save_in_use[0],
2265 &G.save_in_use[count_old_page_tables],
2266 count_new_page_tables * sizeof (void *));
2267 memcpy (&new_save_in_use[count_new_page_tables],
2269 count_old_page_tables * sizeof (void *));
2272 free (G.save_in_use);
2274 G.by_depth = new_by_depth;
2275 G.save_in_use = new_save_in_use;
2277 /* Now update all the index_by_depth fields. */
2278 for (i = G.by_depth_in_use; i > 0; --i)
2280 page_entry *p = G.by_depth[i-1];
2281 p->index_by_depth = i-1;
2284 /* And last, we update the depth pointers in G.depth. The first
2285 entry is already 0, and context 0 entries always start at index
2286 0, so there is nothing to update in the first slot. We need a
2287 second slot, only if we have old ptes, and if we do, they start
2288 at index count_new_page_tables. */
2289 if (count_old_page_tables)
2290 push_depth (count_new_page_tables);
2294 ggc_pch_read (FILE *f, void *addr)
2296 struct ggc_pch_ondisk d;
2298 char *offs = (char *) addr;
2299 unsigned long count_old_page_tables;
2300 unsigned long count_new_page_tables;
2302 count_old_page_tables = G.by_depth_in_use;
2304 /* We've just read in a PCH file. So, every object that used to be
2305 allocated is now free. */
2307 #ifdef ENABLE_GC_CHECKING
2310 /* Since we free all the allocated objects, the free list becomes
2311 useless. Validate it now, which will also clear it. */
2312 validate_free_objects();
2314 /* No object read from a PCH file should ever be freed. So, set the
2315 context depth to 1, and set the depth of all the currently-allocated
2316 pages to be 1 too. PCH pages will have depth 0. */
2317 gcc_assert (!G.context_depth);
2318 G.context_depth = 1;
2319 for (i = 0; i < NUM_ORDERS; i++)
2322 for (p = G.pages[i]; p != NULL; p = p->next)
2323 p->context_depth = G.context_depth;
2326 /* Allocate the appropriate page-table entries for the pages read from
2328 if (fread (&d, sizeof (d), 1, f) != 1)
2329 fatal_error ("can%'t read PCH file: %m");
2331 for (i = 0; i < NUM_ORDERS; i++)
2333 struct page_entry *entry;
2339 if (d.totals[i] == 0)
2342 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2343 num_objs = bytes / OBJECT_SIZE (i);
2344 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2346 + BITMAP_SIZE (num_objs + 1)));
2347 entry->bytes = bytes;
2349 entry->context_depth = 0;
2351 entry->num_free_objects = 0;
2355 j + HOST_BITS_PER_LONG <= num_objs + 1;
2356 j += HOST_BITS_PER_LONG)
2357 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2358 for (; j < num_objs + 1; j++)
2359 entry->in_use_p[j / HOST_BITS_PER_LONG]
2360 |= 1L << (j % HOST_BITS_PER_LONG);
2362 for (pte = entry->page;
2363 pte < entry->page + entry->bytes;
2365 set_page_table_entry (pte, entry);
2367 if (G.page_tails[i] != NULL)
2368 G.page_tails[i]->next = entry;
2371 G.page_tails[i] = entry;
2373 /* We start off by just adding all the new information to the
2374 end of the varrays, later, we will move the new information
2375 to the front of the varrays, as the PCH page tables are at
2377 push_by_depth (entry, 0);
2380 /* Now, we update the various data structures that speed page table
2382 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2384 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2386 /* Update the statistics. */
2387 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2395 struct alloc_zone rtl_zone;
2396 struct alloc_zone tree_zone;
2397 struct alloc_zone tree_id_zone;