1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
3 2010 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "toplev.h" /* exact_log2 */
29 #include "diagnostic-core.h"
32 #include "ggc-internal.h"
35 #include "tree-flow.h"
39 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
40 file open. Prefer either to valloc. */
42 # undef HAVE_MMAP_DEV_ZERO
46 #ifdef HAVE_MMAP_DEV_ZERO
51 #define USING_MALLOC_PAGE_GROUPS
56 This garbage-collecting allocator allocates objects on one of a set
57 of pages. Each page can allocate objects of a single size only;
58 available sizes are powers of two starting at four bytes. The size
59 of an allocation request is rounded up to the next power of two
60 (`order'), and satisfied from the appropriate page.
62 Each page is recorded in a page-entry, which also maintains an
63 in-use bitmap of object positions on the page. This allows the
64 allocation state of a particular object to be flipped without
65 touching the page itself.
67 Each page-entry also has a context depth, which is used to track
68 pushing and popping of allocation contexts. Only objects allocated
69 in the current (highest-numbered) context may be collected.
71 Page entries are arranged in an array of singly-linked lists. The
72 array is indexed by the allocation size, in bits, of the pages on
73 it; i.e. all pages on a list allocate objects of the same size.
74 Pages are ordered on the list such that all non-full pages precede
75 all full pages, with non-full pages arranged in order of decreasing
78 Empty pages (of all orders) are kept on a single page cache list,
79 and are considered first when new pages are required; they are
80 deallocated at the start of the next collection if they haven't
81 been recycled by then. */
83 /* Define GGC_DEBUG_LEVEL to print debugging information.
84 0: No debugging output.
85 1: GC statistics only.
86 2: Page-entry allocations/deallocations as well.
87 3: Object allocations as well.
88 4: Object marks as well. */
89 #define GGC_DEBUG_LEVEL (0)
91 #ifndef HOST_BITS_PER_PTR
92 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
96 /* A two-level tree is used to look up the page-entry for a given
97 pointer. Two chunks of the pointer's bits are extracted to index
98 the first and second levels of the tree, as follows:
102 msb +----------------+----+------+------+ lsb
108 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
109 pages are aligned on system page boundaries. The next most
110 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
111 index values in the lookup table, respectively.
113 For 32-bit architectures and the settings below, there are no
114 leftover bits. For architectures with wider pointers, the lookup
115 tree points to a list of pages, which must be scanned to find the
118 #define PAGE_L1_BITS (8)
119 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
120 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
121 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
123 #define LOOKUP_L1(p) \
124 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
126 #define LOOKUP_L2(p) \
127 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
129 /* The number of objects per allocation page, for objects on a page of
130 the indicated ORDER. */
131 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
133 /* The number of objects in P. */
134 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
136 /* The size of an object on a page of the indicated ORDER. */
137 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
139 /* For speed, we avoid doing a general integer divide to locate the
140 offset in the allocation bitmap, by precalculating numbers M, S
141 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
142 within the page which is evenly divisible by the object size Z. */
143 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
144 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
145 #define OFFSET_TO_BIT(OFFSET, ORDER) \
146 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
148 /* We use this structure to determine the alignment required for
149 allocations. For power-of-two sized allocations, that's not a
150 problem, but it does matter for odd-sized allocations.
151 We do not care about alignment for floating-point types. */
153 struct max_alignment {
161 /* The biggest alignment required. */
163 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
166 /* The number of extra orders, not corresponding to power-of-two sized
169 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
171 #define RTL_SIZE(NSLOTS) \
172 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
174 #define TREE_EXP_SIZE(OPS) \
175 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
177 /* The Ith entry is the maximum size of an object to be stored in the
178 Ith extra order. Adding a new entry to this array is the *only*
179 thing you need to do to add a new special allocation size. */
181 static const size_t extra_order_size_table[] = {
182 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
183 There are a lot of structures with these sizes and explicitly
184 listing them risks orders being dropped because they changed size. */
196 sizeof (struct tree_decl_non_common),
197 sizeof (struct tree_field_decl),
198 sizeof (struct tree_parm_decl),
199 sizeof (struct tree_var_decl),
200 sizeof (struct tree_type),
201 sizeof (struct function),
202 sizeof (struct basic_block_def),
203 sizeof (struct cgraph_node),
204 sizeof (struct loop),
207 /* The total number of orders. */
209 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
211 /* Compute the smallest nonnegative number which when added to X gives
214 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
216 /* Compute the smallest multiple of F that is >= X. */
218 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
220 /* The Ith entry is the number of objects on a page or order I. */
222 static unsigned objects_per_page_table[NUM_ORDERS];
224 /* The Ith entry is the size of an object on a page of order I. */
226 static size_t object_size_table[NUM_ORDERS];
228 /* The Ith entry is a pair of numbers (mult, shift) such that
229 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
230 for all k evenly divisible by OBJECT_SIZE(I). */
237 inverse_table[NUM_ORDERS];
239 /* A page_entry records the status of an allocation page. This
240 structure is dynamically sized to fit the bitmap in_use_p. */
241 typedef struct page_entry
243 /* The next page-entry with objects of the same size, or NULL if
244 this is the last page-entry. */
245 struct page_entry *next;
247 /* The previous page-entry with objects of the same size, or NULL if
248 this is the first page-entry. The PREV pointer exists solely to
249 keep the cost of ggc_free manageable. */
250 struct page_entry *prev;
252 /* The number of bytes allocated. (This will always be a multiple
253 of the host system page size.) */
256 /* The address at which the memory is allocated. */
259 #ifdef USING_MALLOC_PAGE_GROUPS
260 /* Back pointer to the page group this page came from. */
261 struct page_group *group;
264 /* This is the index in the by_depth varray where this page table
266 unsigned long index_by_depth;
268 /* Context depth of this page. */
269 unsigned short context_depth;
271 /* The number of free objects remaining on this page. */
272 unsigned short num_free_objects;
274 /* A likely candidate for the bit position of a free object for the
275 next allocation from this page. */
276 unsigned short next_bit_hint;
278 /* The lg of size of objects allocated from this page. */
281 /* A bit vector indicating whether or not objects are in use. The
282 Nth bit is one if the Nth object on this page is allocated. This
283 array is dynamically sized. */
284 unsigned long in_use_p[1];
287 #ifdef USING_MALLOC_PAGE_GROUPS
288 /* A page_group describes a large allocation from malloc, from which
289 we parcel out aligned pages. */
290 typedef struct page_group
292 /* A linked list of all extant page groups. */
293 struct page_group *next;
295 /* The address we received from malloc. */
298 /* The size of the block. */
301 /* A bitmask of pages in use. */
306 #if HOST_BITS_PER_PTR <= 32
308 /* On 32-bit hosts, we use a two level page table, as pictured above. */
309 typedef page_entry **page_table[PAGE_L1_SIZE];
313 /* On 64-bit hosts, we use the same two level page tables plus a linked
314 list that disambiguates the top 32-bits. There will almost always be
315 exactly one entry in the list. */
316 typedef struct page_table_chain
318 struct page_table_chain *next;
320 page_entry **table[PAGE_L1_SIZE];
325 #ifdef ENABLE_GC_ALWAYS_COLLECT
326 /* List of free objects to be verified as actually free on the
331 struct free_object *next;
335 /* The rest of the global variables. */
336 static struct globals
338 /* The Nth element in this array is a page with objects of size 2^N.
339 If there are any pages with free objects, they will be at the
340 head of the list. NULL if there are no page-entries for this
342 page_entry *pages[NUM_ORDERS];
344 /* The Nth element in this array is the last page with objects of
345 size 2^N. NULL if there are no page-entries for this object
347 page_entry *page_tails[NUM_ORDERS];
349 /* Lookup table for associating allocation pages with object addresses. */
352 /* The system's page size. */
356 /* Bytes currently allocated. */
359 /* Bytes currently allocated at the end of the last collection. */
360 size_t allocated_last_gc;
362 /* Total amount of memory mapped. */
365 /* Bit N set if any allocations have been done at context depth N. */
366 unsigned long context_depth_allocations;
368 /* Bit N set if any collections have been done at context depth N. */
369 unsigned long context_depth_collections;
371 /* The current depth in the context stack. */
372 unsigned short context_depth;
374 /* A file descriptor open to /dev/zero for reading. */
375 #if defined (HAVE_MMAP_DEV_ZERO)
379 /* A cache of free system pages. */
380 page_entry *free_pages;
382 #ifdef USING_MALLOC_PAGE_GROUPS
383 page_group *page_groups;
386 /* The file descriptor for debugging output. */
389 /* Current number of elements in use in depth below. */
390 unsigned int depth_in_use;
392 /* Maximum number of elements that can be used before resizing. */
393 unsigned int depth_max;
395 /* Each element of this array is an index in by_depth where the given
396 depth starts. This structure is indexed by that given depth we
397 are interested in. */
400 /* Current number of elements in use in by_depth below. */
401 unsigned int by_depth_in_use;
403 /* Maximum number of elements that can be used before resizing. */
404 unsigned int by_depth_max;
406 /* Each element of this array is a pointer to a page_entry, all
407 page_entries can be found in here by increasing depth.
408 index_by_depth in the page_entry is the index into this data
409 structure where that page_entry can be found. This is used to
410 speed up finding all page_entries at a particular depth. */
411 page_entry **by_depth;
413 /* Each element is a pointer to the saved in_use_p bits, if any,
414 zero otherwise. We allocate them all together, to enable a
415 better runtime data access pattern. */
416 unsigned long **save_in_use;
418 #ifdef ENABLE_GC_ALWAYS_COLLECT
419 /* List of free objects to be verified as actually free on the
421 struct free_object *free_object_list;
424 #ifdef GATHER_STATISTICS
427 /* Total GC-allocated memory. */
428 unsigned long long total_allocated;
429 /* Total overhead for GC-allocated memory. */
430 unsigned long long total_overhead;
432 /* Total allocations and overhead for sizes less than 32, 64 and 128.
433 These sizes are interesting because they are typical cache line
436 unsigned long long total_allocated_under32;
437 unsigned long long total_overhead_under32;
439 unsigned long long total_allocated_under64;
440 unsigned long long total_overhead_under64;
442 unsigned long long total_allocated_under128;
443 unsigned long long total_overhead_under128;
445 /* The allocations for each of the allocation orders. */
446 unsigned long long total_allocated_per_order[NUM_ORDERS];
448 /* The overhead for each of the allocation orders. */
449 unsigned long long total_overhead_per_order[NUM_ORDERS];
454 /* The size in bytes required to maintain a bitmap for the objects
456 #define BITMAP_SIZE(Num_objects) \
457 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
459 /* Allocate pages in chunks of this size, to throttle calls to memory
460 allocation routines. The first page is used, the rest go onto the
461 free list. This cannot be larger than HOST_BITS_PER_INT for the
462 in_use bitmask for page_group. Hosts that need a different value
463 can override this by defining GGC_QUIRE_SIZE explicitly. */
464 #ifndef GGC_QUIRE_SIZE
466 # define GGC_QUIRE_SIZE 256
468 # define GGC_QUIRE_SIZE 16
472 /* Initial guess as to how many page table entries we might need. */
473 #define INITIAL_PTE_COUNT 128
475 static int ggc_allocated_p (const void *);
476 static page_entry *lookup_page_table_entry (const void *);
477 static void set_page_table_entry (void *, page_entry *);
479 static char *alloc_anon (char *, size_t);
481 #ifdef USING_MALLOC_PAGE_GROUPS
482 static size_t page_group_index (char *, char *);
483 static void set_page_group_in_use (page_group *, char *);
484 static void clear_page_group_in_use (page_group *, char *);
486 static struct page_entry * alloc_page (unsigned);
487 static void free_page (struct page_entry *);
488 static void release_pages (void);
489 static void clear_marks (void);
490 static void sweep_pages (void);
491 static void ggc_recalculate_in_use_p (page_entry *);
492 static void compute_inverse (unsigned);
493 static inline void adjust_depth (void);
494 static void move_ptes_to_front (int, int);
496 void debug_print_page_list (int);
497 static void push_depth (unsigned int);
498 static void push_by_depth (page_entry *, unsigned long *);
500 /* Push an entry onto G.depth. */
503 push_depth (unsigned int i)
505 if (G.depth_in_use >= G.depth_max)
508 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
510 G.depth[G.depth_in_use++] = i;
513 /* Push an entry onto G.by_depth and G.save_in_use. */
516 push_by_depth (page_entry *p, unsigned long *s)
518 if (G.by_depth_in_use >= G.by_depth_max)
521 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
522 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
525 G.by_depth[G.by_depth_in_use] = p;
526 G.save_in_use[G.by_depth_in_use++] = s;
529 #if (GCC_VERSION < 3001)
530 #define prefetch(X) ((void) X)
532 #define prefetch(X) __builtin_prefetch (X)
535 #define save_in_use_p_i(__i) \
537 #define save_in_use_p(__p) \
538 (save_in_use_p_i (__p->index_by_depth))
540 /* Returns nonzero if P was allocated in GC'able memory. */
543 ggc_allocated_p (const void *p)
548 #if HOST_BITS_PER_PTR <= 32
551 page_table table = G.lookup;
552 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
557 if (table->high_bits == high_bits)
561 base = &table->table[0];
564 /* Extract the level 1 and 2 indices. */
568 return base[L1] && base[L1][L2];
571 /* Traverse the page table and find the entry for a page.
572 Die (probably) if the object wasn't allocated via GC. */
574 static inline page_entry *
575 lookup_page_table_entry (const void *p)
580 #if HOST_BITS_PER_PTR <= 32
583 page_table table = G.lookup;
584 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
585 while (table->high_bits != high_bits)
587 base = &table->table[0];
590 /* Extract the level 1 and 2 indices. */
597 /* Set the page table entry for a page. */
600 set_page_table_entry (void *p, page_entry *entry)
605 #if HOST_BITS_PER_PTR <= 32
609 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
610 for (table = G.lookup; table; table = table->next)
611 if (table->high_bits == high_bits)
614 /* Not found -- allocate a new table. */
615 table = XCNEW (struct page_table_chain);
616 table->next = G.lookup;
617 table->high_bits = high_bits;
620 base = &table->table[0];
623 /* Extract the level 1 and 2 indices. */
627 if (base[L1] == NULL)
628 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
630 base[L1][L2] = entry;
633 /* Prints the page-entry for object size ORDER, for debugging. */
636 debug_print_page_list (int order)
639 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
640 (void *) G.page_tails[order]);
644 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
645 p->num_free_objects);
653 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
654 (if non-null). The ifdef structure here is intended to cause a
655 compile error unless exactly one of the HAVE_* is defined. */
658 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
660 #ifdef HAVE_MMAP_ANON
661 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
662 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
664 #ifdef HAVE_MMAP_DEV_ZERO
665 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
666 MAP_PRIVATE, G.dev_zero_fd, 0);
669 if (page == (char *) MAP_FAILED)
671 perror ("virtual memory exhausted");
672 exit (FATAL_EXIT_CODE);
675 /* Remember that we allocated this memory. */
676 G.bytes_mapped += size;
678 /* Pretend we don't have access to the allocated pages. We'll enable
679 access to smaller pieces of the area in ggc_internal_alloc. Discard the
680 handle to avoid handle leak. */
681 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
686 #ifdef USING_MALLOC_PAGE_GROUPS
687 /* Compute the index for this page into the page group. */
690 page_group_index (char *allocation, char *page)
692 return (size_t) (page - allocation) >> G.lg_pagesize;
695 /* Set and clear the in_use bit for this page in the page group. */
698 set_page_group_in_use (page_group *group, char *page)
700 group->in_use |= 1 << page_group_index (group->allocation, page);
704 clear_page_group_in_use (page_group *group, char *page)
706 group->in_use &= ~(1 << page_group_index (group->allocation, page));
710 /* Allocate a new page for allocating objects of size 2^ORDER,
711 and return an entry for it. The entry is not added to the
712 appropriate page_table list. */
714 static inline struct page_entry *
715 alloc_page (unsigned order)
717 struct page_entry *entry, *p, **pp;
721 size_t page_entry_size;
723 #ifdef USING_MALLOC_PAGE_GROUPS
727 num_objects = OBJECTS_PER_PAGE (order);
728 bitmap_size = BITMAP_SIZE (num_objects + 1);
729 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
730 entry_size = num_objects * OBJECT_SIZE (order);
731 if (entry_size < G.pagesize)
732 entry_size = G.pagesize;
737 /* Check the list of free pages for one we can use. */
738 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
739 if (p->bytes == entry_size)
744 /* Recycle the allocated memory from this page ... */
748 #ifdef USING_MALLOC_PAGE_GROUPS
752 /* ... and, if possible, the page entry itself. */
753 if (p->order == order)
756 memset (entry, 0, page_entry_size);
762 else if (entry_size == G.pagesize)
764 /* We want just one page. Allocate a bunch of them and put the
765 extras on the freelist. (Can only do this optimization with
766 mmap for backing store.) */
767 struct page_entry *e, *f = G.free_pages;
770 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
772 /* This loop counts down so that the chain will be in ascending
774 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
776 e = XCNEWVAR (struct page_entry, page_entry_size);
778 e->bytes = G.pagesize;
779 e->page = page + (i << G.lg_pagesize);
787 page = alloc_anon (NULL, entry_size);
789 #ifdef USING_MALLOC_PAGE_GROUPS
792 /* Allocate a large block of memory and serve out the aligned
793 pages therein. This results in much less memory wastage
794 than the traditional implementation of valloc. */
796 char *allocation, *a, *enda;
797 size_t alloc_size, head_slop, tail_slop;
798 int multiple_pages = (entry_size == G.pagesize);
801 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
803 alloc_size = entry_size + G.pagesize - 1;
804 allocation = XNEWVEC (char, alloc_size);
806 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
807 head_slop = page - allocation;
809 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
811 tail_slop = alloc_size - entry_size - head_slop;
812 enda = allocation + alloc_size - tail_slop;
814 /* We allocated N pages, which are likely not aligned, leaving
815 us with N-1 usable pages. We plan to place the page_group
816 structure somewhere in the slop. */
817 if (head_slop >= sizeof (page_group))
818 group = (page_group *)page - 1;
821 /* We magically got an aligned allocation. Too bad, we have
822 to waste a page anyway. */
826 tail_slop += G.pagesize;
828 gcc_assert (tail_slop >= sizeof (page_group));
829 group = (page_group *)enda;
830 tail_slop -= sizeof (page_group);
833 /* Remember that we allocated this memory. */
834 group->next = G.page_groups;
835 group->allocation = allocation;
836 group->alloc_size = alloc_size;
838 G.page_groups = group;
839 G.bytes_mapped += alloc_size;
841 /* If we allocated multiple pages, put the rest on the free list. */
844 struct page_entry *e, *f = G.free_pages;
845 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
847 e = XCNEWVAR (struct page_entry, page_entry_size);
849 e->bytes = G.pagesize;
861 entry = XCNEWVAR (struct page_entry, page_entry_size);
863 entry->bytes = entry_size;
865 entry->context_depth = G.context_depth;
866 entry->order = order;
867 entry->num_free_objects = num_objects;
868 entry->next_bit_hint = 1;
870 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
872 #ifdef USING_MALLOC_PAGE_GROUPS
873 entry->group = group;
874 set_page_group_in_use (group, page);
877 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
878 increment the hint. */
879 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
880 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
882 set_page_table_entry (page, entry);
884 if (GGC_DEBUG_LEVEL >= 2)
885 fprintf (G.debug_file,
886 "Allocating page at %p, object size=%lu, data %p-%p\n",
887 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
888 page + entry_size - 1);
893 /* Adjust the size of G.depth so that no index greater than the one
894 used by the top of the G.by_depth is used. */
901 if (G.by_depth_in_use)
903 top = G.by_depth[G.by_depth_in_use-1];
905 /* Peel back indices in depth that index into by_depth, so that
906 as new elements are added to by_depth, we note the indices
907 of those elements, if they are for new context depths. */
908 while (G.depth_in_use > (size_t)top->context_depth+1)
913 /* For a page that is no longer needed, put it on the free page list. */
916 free_page (page_entry *entry)
918 if (GGC_DEBUG_LEVEL >= 2)
919 fprintf (G.debug_file,
920 "Deallocating page at %p, data %p-%p\n", (void *) entry,
921 entry->page, entry->page + entry->bytes - 1);
923 /* Mark the page as inaccessible. Discard the handle to avoid handle
925 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
927 set_page_table_entry (entry->page, NULL);
929 #ifdef USING_MALLOC_PAGE_GROUPS
930 clear_page_group_in_use (entry->group, entry->page);
933 if (G.by_depth_in_use > 1)
935 page_entry *top = G.by_depth[G.by_depth_in_use-1];
936 int i = entry->index_by_depth;
938 /* We cannot free a page from a context deeper than the current
940 gcc_assert (entry->context_depth == top->context_depth);
942 /* Put top element into freed slot. */
944 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
945 top->index_by_depth = i;
951 entry->next = G.free_pages;
952 G.free_pages = entry;
955 /* Release the free page cache to the system. */
961 page_entry *p, *next;
965 /* Gather up adjacent pages so they are unmapped together. */
976 while (p && p->page == start + len)
985 G.bytes_mapped -= len;
990 #ifdef USING_MALLOC_PAGE_GROUPS
994 /* Remove all pages from free page groups from the list. */
996 while ((p = *pp) != NULL)
997 if (p->group->in_use == 0)
1005 /* Remove all free page groups, and release the storage. */
1006 gp = &G.page_groups;
1007 while ((g = *gp) != NULL)
1011 G.bytes_mapped -= g->alloc_size;
1012 free (g->allocation);
1019 /* This table provides a fast way to determine ceil(log_2(size)) for
1020 allocation requests. The minimum allocation size is eight bytes. */
1021 #define NUM_SIZE_LOOKUP 512
1022 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1024 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1025 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1026 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1027 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1028 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1029 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1030 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1031 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1032 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1039 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1040 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1041 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1042 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1043 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1044 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1045 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1046 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1047 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1048 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1049 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1050 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1051 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1055 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1058 /* Typed allocation function. Does nothing special in this collector. */
1061 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1064 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1067 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1070 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1072 size_t order, word, bit, object_offset, object_size;
1073 struct page_entry *entry;
1076 if (size < NUM_SIZE_LOOKUP)
1078 order = size_lookup[size];
1079 object_size = OBJECT_SIZE (order);
1084 while (size > (object_size = OBJECT_SIZE (order)))
1088 /* If there are non-full pages for this size allocation, they are at
1089 the head of the list. */
1090 entry = G.pages[order];
1092 /* If there is no page for this object size, or all pages in this
1093 context are full, allocate a new page. */
1094 if (entry == NULL || entry->num_free_objects == 0)
1096 struct page_entry *new_entry;
1097 new_entry = alloc_page (order);
1099 new_entry->index_by_depth = G.by_depth_in_use;
1100 push_by_depth (new_entry, 0);
1102 /* We can skip context depths, if we do, make sure we go all the
1103 way to the new depth. */
1104 while (new_entry->context_depth >= G.depth_in_use)
1105 push_depth (G.by_depth_in_use-1);
1107 /* If this is the only entry, it's also the tail. If it is not
1108 the only entry, then we must update the PREV pointer of the
1109 ENTRY (G.pages[order]) to point to our new page entry. */
1111 G.page_tails[order] = new_entry;
1113 entry->prev = new_entry;
1115 /* Put new pages at the head of the page list. By definition the
1116 entry at the head of the list always has a NULL pointer. */
1117 new_entry->next = entry;
1118 new_entry->prev = NULL;
1120 G.pages[order] = new_entry;
1122 /* For a new page, we know the word and bit positions (in the
1123 in_use bitmap) of the first available object -- they're zero. */
1124 new_entry->next_bit_hint = 1;
1131 /* First try to use the hint left from the previous allocation
1132 to locate a clear bit in the in-use bitmap. We've made sure
1133 that the one-past-the-end bit is always set, so if the hint
1134 has run over, this test will fail. */
1135 unsigned hint = entry->next_bit_hint;
1136 word = hint / HOST_BITS_PER_LONG;
1137 bit = hint % HOST_BITS_PER_LONG;
1139 /* If the hint didn't work, scan the bitmap from the beginning. */
1140 if ((entry->in_use_p[word] >> bit) & 1)
1143 while (~entry->in_use_p[word] == 0)
1146 #if GCC_VERSION >= 3004
1147 bit = __builtin_ctzl (~entry->in_use_p[word]);
1149 while ((entry->in_use_p[word] >> bit) & 1)
1153 hint = word * HOST_BITS_PER_LONG + bit;
1156 /* Next time, try the next bit. */
1157 entry->next_bit_hint = hint + 1;
1159 object_offset = hint * object_size;
1162 /* Set the in-use bit. */
1163 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1165 /* Keep a running total of the number of free objects. If this page
1166 fills up, we may have to move it to the end of the list if the
1167 next page isn't full. If the next page is full, all subsequent
1168 pages are full, so there's no need to move it. */
1169 if (--entry->num_free_objects == 0
1170 && entry->next != NULL
1171 && entry->next->num_free_objects > 0)
1173 /* We have a new head for the list. */
1174 G.pages[order] = entry->next;
1176 /* We are moving ENTRY to the end of the page table list.
1177 The new page at the head of the list will have NULL in
1178 its PREV field and ENTRY will have NULL in its NEXT field. */
1179 entry->next->prev = NULL;
1182 /* Append ENTRY to the tail of the list. */
1183 entry->prev = G.page_tails[order];
1184 G.page_tails[order]->next = entry;
1185 G.page_tails[order] = entry;
1188 /* Calculate the object's address. */
1189 result = entry->page + object_offset;
1190 #ifdef GATHER_STATISTICS
1191 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1192 result PASS_MEM_STAT);
1195 #ifdef ENABLE_GC_CHECKING
1196 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1197 exact same semantics in presence of memory bugs, regardless of
1198 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1199 handle to avoid handle leak. */
1200 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1202 /* `Poison' the entire allocated object, including any padding at
1204 memset (result, 0xaf, object_size);
1206 /* Make the bytes after the end of the object unaccessible. Discard the
1207 handle to avoid handle leak. */
1208 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1209 object_size - size));
1212 /* Tell Valgrind that the memory is there, but its content isn't
1213 defined. The bytes at the end of the object are still marked
1215 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1217 /* Keep track of how many bytes are being allocated. This
1218 information is used in deciding when to collect. */
1219 G.allocated += object_size;
1221 /* For timevar statistics. */
1222 timevar_ggc_mem_total += object_size;
1224 #ifdef GATHER_STATISTICS
1226 size_t overhead = object_size - size;
1228 G.stats.total_overhead += overhead;
1229 G.stats.total_allocated += object_size;
1230 G.stats.total_overhead_per_order[order] += overhead;
1231 G.stats.total_allocated_per_order[order] += object_size;
1235 G.stats.total_overhead_under32 += overhead;
1236 G.stats.total_allocated_under32 += object_size;
1240 G.stats.total_overhead_under64 += overhead;
1241 G.stats.total_allocated_under64 += object_size;
1245 G.stats.total_overhead_under128 += overhead;
1246 G.stats.total_allocated_under128 += object_size;
1251 if (GGC_DEBUG_LEVEL >= 3)
1252 fprintf (G.debug_file,
1253 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1254 (unsigned long) size, (unsigned long) object_size, result,
1260 /* Mark function for strings. */
1263 gt_ggc_m_S (const void *p)
1268 unsigned long offset;
1270 if (!p || !ggc_allocated_p (p))
1273 /* Look up the page on which the object is alloced. . */
1274 entry = lookup_page_table_entry (p);
1277 /* Calculate the index of the object on the page; this is its bit
1278 position in the in_use_p bitmap. Note that because a char* might
1279 point to the middle of an object, we need special code here to
1280 make sure P points to the start of an object. */
1281 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1284 /* Here we've seen a char* which does not point to the beginning
1285 of an allocated object. We assume it points to the middle of
1287 gcc_assert (offset == offsetof (struct tree_string, str));
1288 p = ((const char *) p) - offset;
1289 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1293 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1294 word = bit / HOST_BITS_PER_LONG;
1295 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1297 /* If the bit was previously set, skip it. */
1298 if (entry->in_use_p[word] & mask)
1301 /* Otherwise set it, and decrement the free object count. */
1302 entry->in_use_p[word] |= mask;
1303 entry->num_free_objects -= 1;
1305 if (GGC_DEBUG_LEVEL >= 4)
1306 fprintf (G.debug_file, "Marking %p\n", p);
1311 /* If P is not marked, marks it and return false. Otherwise return true.
1312 P must have been allocated by the GC allocator; it mustn't point to
1313 static objects, stack variables, or memory allocated with malloc. */
1316 ggc_set_mark (const void *p)
1322 /* Look up the page on which the object is alloced. If the object
1323 wasn't allocated by the collector, we'll probably die. */
1324 entry = lookup_page_table_entry (p);
1327 /* Calculate the index of the object on the page; this is its bit
1328 position in the in_use_p bitmap. */
1329 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1330 word = bit / HOST_BITS_PER_LONG;
1331 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1333 /* If the bit was previously set, skip it. */
1334 if (entry->in_use_p[word] & mask)
1337 /* Otherwise set it, and decrement the free object count. */
1338 entry->in_use_p[word] |= mask;
1339 entry->num_free_objects -= 1;
1341 if (GGC_DEBUG_LEVEL >= 4)
1342 fprintf (G.debug_file, "Marking %p\n", p);
1347 /* Return 1 if P has been marked, zero otherwise.
1348 P must have been allocated by the GC allocator; it mustn't point to
1349 static objects, stack variables, or memory allocated with malloc. */
1352 ggc_marked_p (const void *p)
1358 /* Look up the page on which the object is alloced. If the object
1359 wasn't allocated by the collector, we'll probably die. */
1360 entry = lookup_page_table_entry (p);
1363 /* Calculate the index of the object on the page; this is its bit
1364 position in the in_use_p bitmap. */
1365 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1366 word = bit / HOST_BITS_PER_LONG;
1367 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1369 return (entry->in_use_p[word] & mask) != 0;
1372 /* Return the size of the gc-able object P. */
1375 ggc_get_size (const void *p)
1377 page_entry *pe = lookup_page_table_entry (p);
1378 return OBJECT_SIZE (pe->order);
1381 /* Release the memory for object P. */
1386 page_entry *pe = lookup_page_table_entry (p);
1387 size_t order = pe->order;
1388 size_t size = OBJECT_SIZE (order);
1390 #ifdef GATHER_STATISTICS
1391 ggc_free_overhead (p);
1394 if (GGC_DEBUG_LEVEL >= 3)
1395 fprintf (G.debug_file,
1396 "Freeing object, actual size=%lu, at %p on %p\n",
1397 (unsigned long) size, p, (void *) pe);
1399 #ifdef ENABLE_GC_CHECKING
1400 /* Poison the data, to indicate the data is garbage. */
1401 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1402 memset (p, 0xa5, size);
1404 /* Let valgrind know the object is free. */
1405 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1407 #ifdef ENABLE_GC_ALWAYS_COLLECT
1408 /* In the completely-anal-checking mode, we do *not* immediately free
1409 the data, but instead verify that the data is *actually* not
1410 reachable the next time we collect. */
1412 struct free_object *fo = XNEW (struct free_object);
1414 fo->next = G.free_object_list;
1415 G.free_object_list = fo;
1419 unsigned int bit_offset, word, bit;
1421 G.allocated -= size;
1423 /* Mark the object not-in-use. */
1424 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1425 word = bit_offset / HOST_BITS_PER_LONG;
1426 bit = bit_offset % HOST_BITS_PER_LONG;
1427 pe->in_use_p[word] &= ~(1UL << bit);
1429 if (pe->num_free_objects++ == 0)
1433 /* If the page is completely full, then it's supposed to
1434 be after all pages that aren't. Since we've freed one
1435 object from a page that was full, we need to move the
1436 page to the head of the list.
1438 PE is the node we want to move. Q is the previous node
1439 and P is the next node in the list. */
1441 if (q && q->num_free_objects == 0)
1447 /* If PE was at the end of the list, then Q becomes the
1448 new end of the list. If PE was not the end of the
1449 list, then we need to update the PREV field for P. */
1451 G.page_tails[order] = q;
1455 /* Move PE to the head of the list. */
1456 pe->next = G.pages[order];
1458 G.pages[order]->prev = pe;
1459 G.pages[order] = pe;
1462 /* Reset the hint bit to point to the only free object. */
1463 pe->next_bit_hint = bit_offset;
1469 /* Subroutine of init_ggc which computes the pair of numbers used to
1470 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1472 This algorithm is taken from Granlund and Montgomery's paper
1473 "Division by Invariant Integers using Multiplication"
1474 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1478 compute_inverse (unsigned order)
1483 size = OBJECT_SIZE (order);
1485 while (size % 2 == 0)
1492 while (inv * size != 1)
1493 inv = inv * (2 - inv*size);
1495 DIV_MULT (order) = inv;
1496 DIV_SHIFT (order) = e;
1499 /* Initialize the ggc-mmap allocator. */
1505 G.pagesize = getpagesize();
1506 G.lg_pagesize = exact_log2 (G.pagesize);
1508 #ifdef HAVE_MMAP_DEV_ZERO
1509 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1510 if (G.dev_zero_fd == -1)
1511 internal_error ("open /dev/zero: %m");
1515 G.debug_file = fopen ("ggc-mmap.debug", "w");
1517 G.debug_file = stdout;
1521 /* StunOS has an amazing off-by-one error for the first mmap allocation
1522 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1523 believe, is an unaligned page allocation, which would cause us to
1524 hork badly if we tried to use it. */
1526 char *p = alloc_anon (NULL, G.pagesize);
1527 struct page_entry *e;
1528 if ((size_t)p & (G.pagesize - 1))
1530 /* How losing. Discard this one and try another. If we still
1531 can't get something useful, give up. */
1533 p = alloc_anon (NULL, G.pagesize);
1534 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1537 /* We have a good page, might as well hold onto it... */
1538 e = XCNEW (struct page_entry);
1539 e->bytes = G.pagesize;
1541 e->next = G.free_pages;
1546 /* Initialize the object size table. */
1547 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1548 object_size_table[order] = (size_t) 1 << order;
1549 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1551 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1553 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1554 so that we're sure of getting aligned memory. */
1555 s = ROUND_UP (s, MAX_ALIGNMENT);
1556 object_size_table[order] = s;
1559 /* Initialize the objects-per-page and inverse tables. */
1560 for (order = 0; order < NUM_ORDERS; ++order)
1562 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1563 if (objects_per_page_table[order] == 0)
1564 objects_per_page_table[order] = 1;
1565 compute_inverse (order);
1568 /* Reset the size_lookup array to put appropriately sized objects in
1569 the special orders. All objects bigger than the previous power
1570 of two, but no greater than the special size, should go in the
1572 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1577 i = OBJECT_SIZE (order);
1578 if (i >= NUM_SIZE_LOOKUP)
1581 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1582 size_lookup[i] = order;
1587 G.depth = XNEWVEC (unsigned int, G.depth_max);
1589 G.by_depth_in_use = 0;
1590 G.by_depth_max = INITIAL_PTE_COUNT;
1591 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1592 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1595 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1596 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1599 ggc_recalculate_in_use_p (page_entry *p)
1604 /* Because the past-the-end bit in in_use_p is always set, we
1605 pretend there is one additional object. */
1606 num_objects = OBJECTS_IN_PAGE (p) + 1;
1608 /* Reset the free object count. */
1609 p->num_free_objects = num_objects;
1611 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1613 i < CEIL (BITMAP_SIZE (num_objects),
1614 sizeof (*p->in_use_p));
1619 /* Something is in use if it is marked, or if it was in use in a
1620 context further down the context stack. */
1621 p->in_use_p[i] |= save_in_use_p (p)[i];
1623 /* Decrement the free object count for every object allocated. */
1624 for (j = p->in_use_p[i]; j; j >>= 1)
1625 p->num_free_objects -= (j & 1);
1628 gcc_assert (p->num_free_objects < num_objects);
1631 /* Unmark all objects. */
1638 for (order = 2; order < NUM_ORDERS; order++)
1642 for (p = G.pages[order]; p != NULL; p = p->next)
1644 size_t num_objects = OBJECTS_IN_PAGE (p);
1645 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1647 /* The data should be page-aligned. */
1648 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1650 /* Pages that aren't in the topmost context are not collected;
1651 nevertheless, we need their in-use bit vectors to store GC
1652 marks. So, back them up first. */
1653 if (p->context_depth < G.context_depth)
1655 if (! save_in_use_p (p))
1656 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1657 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1660 /* Reset reset the number of free objects and clear the
1661 in-use bits. These will be adjusted by mark_obj. */
1662 p->num_free_objects = num_objects;
1663 memset (p->in_use_p, 0, bitmap_size);
1665 /* Make sure the one-past-the-end bit is always set. */
1666 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1667 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1672 /* Free all empty pages. Partially empty pages need no attention
1673 because the `mark' bit doubles as an `unused' bit. */
1680 for (order = 2; order < NUM_ORDERS; order++)
1682 /* The last page-entry to consider, regardless of entries
1683 placed at the end of the list. */
1684 page_entry * const last = G.page_tails[order];
1687 size_t live_objects;
1688 page_entry *p, *previous;
1698 page_entry *next = p->next;
1700 /* Loop until all entries have been examined. */
1703 num_objects = OBJECTS_IN_PAGE (p);
1705 /* Add all live objects on this page to the count of
1706 allocated memory. */
1707 live_objects = num_objects - p->num_free_objects;
1709 G.allocated += OBJECT_SIZE (order) * live_objects;
1711 /* Only objects on pages in the topmost context should get
1713 if (p->context_depth < G.context_depth)
1716 /* Remove the page if it's empty. */
1717 else if (live_objects == 0)
1719 /* If P was the first page in the list, then NEXT
1720 becomes the new first page in the list, otherwise
1721 splice P out of the forward pointers. */
1723 G.pages[order] = next;
1725 previous->next = next;
1727 /* Splice P out of the back pointers too. */
1729 next->prev = previous;
1731 /* Are we removing the last element? */
1732 if (p == G.page_tails[order])
1733 G.page_tails[order] = previous;
1738 /* If the page is full, move it to the end. */
1739 else if (p->num_free_objects == 0)
1741 /* Don't move it if it's already at the end. */
1742 if (p != G.page_tails[order])
1744 /* Move p to the end of the list. */
1746 p->prev = G.page_tails[order];
1747 G.page_tails[order]->next = p;
1749 /* Update the tail pointer... */
1750 G.page_tails[order] = p;
1752 /* ... and the head pointer, if necessary. */
1754 G.pages[order] = next;
1756 previous->next = next;
1758 /* And update the backpointer in NEXT if necessary. */
1760 next->prev = previous;
1766 /* If we've fallen through to here, it's a page in the
1767 topmost context that is neither full nor empty. Such a
1768 page must precede pages at lesser context depth in the
1769 list, so move it to the head. */
1770 else if (p != G.pages[order])
1772 previous->next = p->next;
1774 /* Update the backchain in the next node if it exists. */
1776 p->next->prev = previous;
1778 /* Move P to the head of the list. */
1779 p->next = G.pages[order];
1781 G.pages[order]->prev = p;
1783 /* Update the head pointer. */
1786 /* Are we moving the last element? */
1787 if (G.page_tails[order] == p)
1788 G.page_tails[order] = previous;
1797 /* Now, restore the in_use_p vectors for any pages from contexts
1798 other than the current one. */
1799 for (p = G.pages[order]; p; p = p->next)
1800 if (p->context_depth != G.context_depth)
1801 ggc_recalculate_in_use_p (p);
1805 #ifdef ENABLE_GC_CHECKING
1806 /* Clobber all free objects. */
1813 for (order = 2; order < NUM_ORDERS; order++)
1815 size_t size = OBJECT_SIZE (order);
1818 for (p = G.pages[order]; p != NULL; p = p->next)
1823 if (p->context_depth != G.context_depth)
1824 /* Since we don't do any collection for pages in pushed
1825 contexts, there's no need to do any poisoning. And
1826 besides, the IN_USE_P array isn't valid until we pop
1830 num_objects = OBJECTS_IN_PAGE (p);
1831 for (i = 0; i < num_objects; i++)
1834 word = i / HOST_BITS_PER_LONG;
1835 bit = i % HOST_BITS_PER_LONG;
1836 if (((p->in_use_p[word] >> bit) & 1) == 0)
1838 char *object = p->page + i * size;
1840 /* Keep poison-by-write when we expect to use Valgrind,
1841 so the exact same memory semantics is kept, in case
1842 there are memory errors. We override this request
1844 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1846 memset (object, 0xa5, size);
1848 /* Drop the handle to avoid handle leak. */
1849 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1856 #define poison_pages()
1859 #ifdef ENABLE_GC_ALWAYS_COLLECT
1860 /* Validate that the reportedly free objects actually are. */
1863 validate_free_objects (void)
1865 struct free_object *f, *next, *still_free = NULL;
1867 for (f = G.free_object_list; f ; f = next)
1869 page_entry *pe = lookup_page_table_entry (f->object);
1872 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1873 word = bit / HOST_BITS_PER_LONG;
1874 bit = bit % HOST_BITS_PER_LONG;
1877 /* Make certain it isn't visible from any root. Notice that we
1878 do this check before sweep_pages merges save_in_use_p. */
1879 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1881 /* If the object comes from an outer context, then retain the
1882 free_object entry, so that we can verify that the address
1883 isn't live on the stack in some outer context. */
1884 if (pe->context_depth != G.context_depth)
1886 f->next = still_free;
1893 G.free_object_list = still_free;
1896 #define validate_free_objects()
1899 /* Top level mark-and-sweep routine. */
1904 /* Avoid frequent unnecessary work by skipping collection if the
1905 total allocations haven't expanded much since the last
1907 float allocated_last_gc =
1908 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1910 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1912 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1915 timevar_push (TV_GC);
1917 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1918 if (GGC_DEBUG_LEVEL >= 2)
1919 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1921 /* Zero the total allocated bytes. This will be recalculated in the
1925 /* Release the pages we freed the last time we collected, but didn't
1926 reuse in the interim. */
1929 /* Indicate that we've seen collections at this context depth. */
1930 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1932 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
1936 #ifdef GATHER_STATISTICS
1937 ggc_prune_overhead_list ();
1940 validate_free_objects ();
1943 G.allocated_last_gc = G.allocated;
1945 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
1947 timevar_pop (TV_GC);
1950 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1951 if (GGC_DEBUG_LEVEL >= 2)
1952 fprintf (G.debug_file, "END COLLECTING\n");
1955 /* Print allocation statistics. */
1956 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1958 : ((x) < 1024*1024*10 \
1960 : (x) / (1024*1024))))
1961 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1964 ggc_print_statistics (void)
1966 struct ggc_statistics stats;
1968 size_t total_overhead = 0;
1970 /* Clear the statistics. */
1971 memset (&stats, 0, sizeof (stats));
1973 /* Make sure collection will really occur. */
1974 G.allocated_last_gc = 0;
1976 /* Collect and print the statistics common across collectors. */
1977 ggc_print_common_statistics (stderr, &stats);
1979 /* Release free pages so that we will not count the bytes allocated
1980 there as part of the total allocated memory. */
1983 /* Collect some information about the various sizes of
1986 "Memory still allocated at the end of the compilation process\n");
1987 fprintf (stderr, "%-5s %10s %10s %10s\n",
1988 "Size", "Allocated", "Used", "Overhead");
1989 for (i = 0; i < NUM_ORDERS; ++i)
1996 /* Skip empty entries. */
2000 overhead = allocated = in_use = 0;
2002 /* Figure out the total number of bytes allocated for objects of
2003 this size, and how many of them are actually in use. Also figure
2004 out how much memory the page table is using. */
2005 for (p = G.pages[i]; p; p = p->next)
2007 allocated += p->bytes;
2009 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2011 overhead += (sizeof (page_entry) - sizeof (long)
2012 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2014 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2015 (unsigned long) OBJECT_SIZE (i),
2016 SCALE (allocated), STAT_LABEL (allocated),
2017 SCALE (in_use), STAT_LABEL (in_use),
2018 SCALE (overhead), STAT_LABEL (overhead));
2019 total_overhead += overhead;
2021 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2022 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2023 SCALE (G.allocated), STAT_LABEL(G.allocated),
2024 SCALE (total_overhead), STAT_LABEL (total_overhead));
2026 #ifdef GATHER_STATISTICS
2028 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2030 fprintf (stderr, "Total Overhead: %10lld\n",
2031 G.stats.total_overhead);
2032 fprintf (stderr, "Total Allocated: %10lld\n",
2033 G.stats.total_allocated);
2035 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2036 G.stats.total_overhead_under32);
2037 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2038 G.stats.total_allocated_under32);
2039 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2040 G.stats.total_overhead_under64);
2041 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2042 G.stats.total_allocated_under64);
2043 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2044 G.stats.total_overhead_under128);
2045 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2046 G.stats.total_allocated_under128);
2048 for (i = 0; i < NUM_ORDERS; i++)
2049 if (G.stats.total_allocated_per_order[i])
2051 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2052 (unsigned long) OBJECT_SIZE (i),
2053 G.stats.total_overhead_per_order[i]);
2054 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2055 (unsigned long) OBJECT_SIZE (i),
2056 G.stats.total_allocated_per_order[i]);
2062 struct ggc_pch_ondisk
2064 unsigned totals[NUM_ORDERS];
2069 struct ggc_pch_ondisk d;
2070 size_t base[NUM_ORDERS];
2071 size_t written[NUM_ORDERS];
2074 struct ggc_pch_data *
2077 return XCNEW (struct ggc_pch_data);
2081 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2082 size_t size, bool is_string ATTRIBUTE_UNUSED,
2083 enum gt_types_enum type ATTRIBUTE_UNUSED)
2087 if (size < NUM_SIZE_LOOKUP)
2088 order = size_lookup[size];
2092 while (size > OBJECT_SIZE (order))
2096 d->d.totals[order]++;
2100 ggc_pch_total_size (struct ggc_pch_data *d)
2105 for (i = 0; i < NUM_ORDERS; i++)
2106 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2111 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2113 size_t a = (size_t) base;
2116 for (i = 0; i < NUM_ORDERS; i++)
2119 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2125 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2126 size_t size, bool is_string ATTRIBUTE_UNUSED,
2127 enum gt_types_enum type ATTRIBUTE_UNUSED)
2132 if (size < NUM_SIZE_LOOKUP)
2133 order = size_lookup[size];
2137 while (size > OBJECT_SIZE (order))
2141 result = (char *) d->base[order];
2142 d->base[order] += OBJECT_SIZE (order);
2147 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2148 FILE *f ATTRIBUTE_UNUSED)
2150 /* Nothing to do. */
2154 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2155 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2156 size_t size, bool is_string ATTRIBUTE_UNUSED)
2159 static const char emptyBytes[256] = { 0 };
2161 if (size < NUM_SIZE_LOOKUP)
2162 order = size_lookup[size];
2166 while (size > OBJECT_SIZE (order))
2170 if (fwrite (x, size, 1, f) != 1)
2171 fatal_error ("can%'t write PCH file: %m");
2173 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2174 object out to OBJECT_SIZE(order). This happens for strings. */
2176 if (size != OBJECT_SIZE (order))
2178 unsigned padding = OBJECT_SIZE(order) - size;
2180 /* To speed small writes, we use a nulled-out array that's larger
2181 than most padding requests as the source for our null bytes. This
2182 permits us to do the padding with fwrite() rather than fseek(), and
2183 limits the chance the OS may try to flush any outstanding writes. */
2184 if (padding <= sizeof(emptyBytes))
2186 if (fwrite (emptyBytes, 1, padding, f) != padding)
2187 fatal_error ("can%'t write PCH file");
2191 /* Larger than our buffer? Just default to fseek. */
2192 if (fseek (f, padding, SEEK_CUR) != 0)
2193 fatal_error ("can%'t write PCH file");
2197 d->written[order]++;
2198 if (d->written[order] == d->d.totals[order]
2199 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2202 fatal_error ("can%'t write PCH file: %m");
2206 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2208 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2209 fatal_error ("can%'t write PCH file: %m");
2213 /* Move the PCH PTE entries just added to the end of by_depth, to the
2217 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2221 /* First, we swap the new entries to the front of the varrays. */
2222 page_entry **new_by_depth;
2223 unsigned long **new_save_in_use;
2225 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2226 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2228 memcpy (&new_by_depth[0],
2229 &G.by_depth[count_old_page_tables],
2230 count_new_page_tables * sizeof (void *));
2231 memcpy (&new_by_depth[count_new_page_tables],
2233 count_old_page_tables * sizeof (void *));
2234 memcpy (&new_save_in_use[0],
2235 &G.save_in_use[count_old_page_tables],
2236 count_new_page_tables * sizeof (void *));
2237 memcpy (&new_save_in_use[count_new_page_tables],
2239 count_old_page_tables * sizeof (void *));
2242 free (G.save_in_use);
2244 G.by_depth = new_by_depth;
2245 G.save_in_use = new_save_in_use;
2247 /* Now update all the index_by_depth fields. */
2248 for (i = G.by_depth_in_use; i > 0; --i)
2250 page_entry *p = G.by_depth[i-1];
2251 p->index_by_depth = i-1;
2254 /* And last, we update the depth pointers in G.depth. The first
2255 entry is already 0, and context 0 entries always start at index
2256 0, so there is nothing to update in the first slot. We need a
2257 second slot, only if we have old ptes, and if we do, they start
2258 at index count_new_page_tables. */
2259 if (count_old_page_tables)
2260 push_depth (count_new_page_tables);
2264 ggc_pch_read (FILE *f, void *addr)
2266 struct ggc_pch_ondisk d;
2268 char *offs = (char *) addr;
2269 unsigned long count_old_page_tables;
2270 unsigned long count_new_page_tables;
2272 count_old_page_tables = G.by_depth_in_use;
2274 /* We've just read in a PCH file. So, every object that used to be
2275 allocated is now free. */
2277 #ifdef ENABLE_GC_CHECKING
2280 /* Since we free all the allocated objects, the free list becomes
2281 useless. Validate it now, which will also clear it. */
2282 validate_free_objects();
2284 /* No object read from a PCH file should ever be freed. So, set the
2285 context depth to 1, and set the depth of all the currently-allocated
2286 pages to be 1 too. PCH pages will have depth 0. */
2287 gcc_assert (!G.context_depth);
2288 G.context_depth = 1;
2289 for (i = 0; i < NUM_ORDERS; i++)
2292 for (p = G.pages[i]; p != NULL; p = p->next)
2293 p->context_depth = G.context_depth;
2296 /* Allocate the appropriate page-table entries for the pages read from
2298 if (fread (&d, sizeof (d), 1, f) != 1)
2299 fatal_error ("can%'t read PCH file: %m");
2301 for (i = 0; i < NUM_ORDERS; i++)
2303 struct page_entry *entry;
2309 if (d.totals[i] == 0)
2312 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2313 num_objs = bytes / OBJECT_SIZE (i);
2314 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2316 + BITMAP_SIZE (num_objs + 1)));
2317 entry->bytes = bytes;
2319 entry->context_depth = 0;
2321 entry->num_free_objects = 0;
2325 j + HOST_BITS_PER_LONG <= num_objs + 1;
2326 j += HOST_BITS_PER_LONG)
2327 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2328 for (; j < num_objs + 1; j++)
2329 entry->in_use_p[j / HOST_BITS_PER_LONG]
2330 |= 1L << (j % HOST_BITS_PER_LONG);
2332 for (pte = entry->page;
2333 pte < entry->page + entry->bytes;
2335 set_page_table_entry (pte, entry);
2337 if (G.page_tails[i] != NULL)
2338 G.page_tails[i]->next = entry;
2341 G.page_tails[i] = entry;
2343 /* We start off by just adding all the new information to the
2344 end of the varrays, later, we will move the new information
2345 to the front of the varrays, as the PCH page tables are at
2347 push_by_depth (entry, 0);
2350 /* Now, we update the various data structures that speed page table
2352 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2354 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2356 /* Update the statistics. */
2357 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2365 struct alloc_zone rtl_zone;
2366 struct alloc_zone tree_zone;
2367 struct alloc_zone tree_id_zone;