1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 #include "coretypes.h"
33 #ifdef ENABLE_VALGRIND_CHECKING
34 # ifdef HAVE_MEMCHECK_H
35 # include <memcheck.h>
37 # include <valgrind.h>
40 /* Avoid #ifdef:s when we can help it. */
41 #define VALGRIND_DISCARD(x)
44 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
45 file open. Prefer either to valloc. */
47 # undef HAVE_MMAP_DEV_ZERO
49 # include <sys/mman.h>
51 # define MAP_FAILED -1
53 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
54 # define MAP_ANONYMOUS MAP_ANON
60 #ifdef HAVE_MMAP_DEV_ZERO
62 # include <sys/mman.h>
64 # define MAP_FAILED -1
71 #define USING_MALLOC_PAGE_GROUPS
76 This garbage-collecting allocator allocates objects on one of a set
77 of pages. Each page can allocate objects of a single size only;
78 available sizes are powers of two starting at four bytes. The size
79 of an allocation request is rounded up to the next power of two
80 (`order'), and satisfied from the appropriate page.
82 Each page is recorded in a page-entry, which also maintains an
83 in-use bitmap of object positions on the page. This allows the
84 allocation state of a particular object to be flipped without
85 touching the page itself.
87 Each page-entry also has a context depth, which is used to track
88 pushing and popping of allocation contexts. Only objects allocated
89 in the current (highest-numbered) context may be collected.
91 Page entries are arranged in an array of singly-linked lists. The
92 array is indexed by the allocation size, in bits, of the pages on
93 it; i.e. all pages on a list allocate objects of the same size.
94 Pages are ordered on the list such that all non-full pages precede
95 all full pages, with non-full pages arranged in order of decreasing
98 Empty pages (of all orders) are kept on a single page cache list,
99 and are considered first when new pages are required; they are
100 deallocated at the start of the next collection if they haven't
101 been recycled by then. */
103 /* Define GGC_DEBUG_LEVEL to print debugging information.
104 0: No debugging output.
105 1: GC statistics only.
106 2: Page-entry allocations/deallocations as well.
107 3: Object allocations as well.
108 4: Object marks as well. */
109 #define GGC_DEBUG_LEVEL (0)
111 #ifndef HOST_BITS_PER_PTR
112 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
116 /* A two-level tree is used to look up the page-entry for a given
117 pointer. Two chunks of the pointer's bits are extracted to index
118 the first and second levels of the tree, as follows:
122 msb +----------------+----+------+------+ lsb
128 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
129 pages are aligned on system page boundaries. The next most
130 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
131 index values in the lookup table, respectively.
133 For 32-bit architectures and the settings below, there are no
134 leftover bits. For architectures with wider pointers, the lookup
135 tree points to a list of pages, which must be scanned to find the
138 #define PAGE_L1_BITS (8)
139 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
140 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
141 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
143 #define LOOKUP_L1(p) \
144 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
146 #define LOOKUP_L2(p) \
147 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
149 /* The number of objects per allocation page, for objects on a page of
150 the indicated ORDER. */
151 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
153 /* The number of objects in P. */
154 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
156 /* The size of an object on a page of the indicated ORDER. */
157 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
159 /* For speed, we avoid doing a general integer divide to locate the
160 offset in the allocation bitmap, by precalculating numbers M, S
161 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
162 within the page which is evenly divisible by the object size Z. */
163 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
164 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
165 #define OFFSET_TO_BIT(OFFSET, ORDER) \
166 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
168 /* The number of extra orders, not corresponding to power-of-two sized
171 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
173 #define RTL_SIZE(NSLOTS) \
174 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
176 #define TREE_EXP_SIZE(OPS) \
177 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
179 /* The Ith entry is the maximum size of an object to be stored in the
180 Ith extra order. Adding a new entry to this array is the *only*
181 thing you need to do to add a new special allocation size. */
183 static const size_t extra_order_size_table[] = {
184 sizeof (struct tree_decl),
185 sizeof (struct tree_list),
187 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
188 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
191 /* The total number of orders. */
193 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
195 /* We use this structure to determine the alignment required for
196 allocations. For power-of-two sized allocations, that's not a
197 problem, but it does matter for odd-sized allocations. */
199 struct max_alignment {
203 #ifdef HAVE_LONG_DOUBLE
211 /* The biggest alignment required. */
213 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
215 /* Compute the smallest nonnegative number which when added to X gives
218 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
220 /* Compute the smallest multiple of F that is >= X. */
222 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
224 /* The Ith entry is the number of objects on a page or order I. */
226 static unsigned objects_per_page_table[NUM_ORDERS];
228 /* The Ith entry is the size of an object on a page of order I. */
230 static size_t object_size_table[NUM_ORDERS];
232 /* The Ith entry is a pair of numbers (mult, shift) such that
233 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
234 for all k evenly divisible by OBJECT_SIZE(I). */
241 inverse_table[NUM_ORDERS];
243 /* A page_entry records the status of an allocation page. This
244 structure is dynamically sized to fit the bitmap in_use_p. */
245 typedef struct page_entry
247 /* The next page-entry with objects of the same size, or NULL if
248 this is the last page-entry. */
249 struct page_entry *next;
251 /* The number of bytes allocated. (This will always be a multiple
252 of the host system page size.) */
255 /* The address at which the memory is allocated. */
258 #ifdef USING_MALLOC_PAGE_GROUPS
259 /* Back pointer to the page group this page came from. */
260 struct page_group *group;
263 /* This is the index in the by_depth varray where this page table
265 unsigned long index_by_depth;
267 /* Context depth of this page. */
268 unsigned short context_depth;
270 /* The number of free objects remaining on this page. */
271 unsigned short num_free_objects;
273 /* A likely candidate for the bit position of a free object for the
274 next allocation from this page. */
275 unsigned short next_bit_hint;
277 /* The lg of size of objects allocated from this page. */
280 /* A bit vector indicating whether or not objects are in use. The
281 Nth bit is one if the Nth object on this page is allocated. This
282 array is dynamically sized. */
283 unsigned long in_use_p[1];
286 #ifdef USING_MALLOC_PAGE_GROUPS
287 /* A page_group describes a large allocation from malloc, from which
288 we parcel out aligned pages. */
289 typedef struct page_group
291 /* A linked list of all extant page groups. */
292 struct page_group *next;
294 /* The address we received from malloc. */
297 /* The size of the block. */
300 /* A bitmask of pages in use. */
305 #if HOST_BITS_PER_PTR <= 32
307 /* On 32-bit hosts, we use a two level page table, as pictured above. */
308 typedef page_entry **page_table[PAGE_L1_SIZE];
312 /* On 64-bit hosts, we use the same two level page tables plus a linked
313 list that disambiguates the top 32-bits. There will almost always be
314 exactly one entry in the list. */
315 typedef struct page_table_chain
317 struct page_table_chain *next;
319 page_entry **table[PAGE_L1_SIZE];
324 /* The rest of the global variables. */
325 static struct globals
327 /* The Nth element in this array is a page with objects of size 2^N.
328 If there are any pages with free objects, they will be at the
329 head of the list. NULL if there are no page-entries for this
331 page_entry *pages[NUM_ORDERS];
333 /* The Nth element in this array is the last page with objects of
334 size 2^N. NULL if there are no page-entries for this object
336 page_entry *page_tails[NUM_ORDERS];
338 /* Lookup table for associating allocation pages with object addresses. */
341 /* The system's page size. */
345 /* Bytes currently allocated. */
348 /* Bytes currently allocated at the end of the last collection. */
349 size_t allocated_last_gc;
351 /* Total amount of memory mapped. */
354 /* Bit N set if any allocations have been done at context depth N. */
355 unsigned long context_depth_allocations;
357 /* Bit N set if any collections have been done at context depth N. */
358 unsigned long context_depth_collections;
360 /* The current depth in the context stack. */
361 unsigned short context_depth;
363 /* A file descriptor open to /dev/zero for reading. */
364 #if defined (HAVE_MMAP_DEV_ZERO)
368 /* A cache of free system pages. */
369 page_entry *free_pages;
371 #ifdef USING_MALLOC_PAGE_GROUPS
372 page_group *page_groups;
375 /* The file descriptor for debugging output. */
378 /* Current number of elements in use in depth below. */
379 unsigned int depth_in_use;
381 /* Maximum number of elements that can be used before resizing. */
382 unsigned int depth_max;
384 /* Each element of this arry is an index in by_depth where the given
385 depth starts. This structure is indexed by that given depth we
386 are interested in. */
389 /* Current number of elements in use in by_depth below. */
390 unsigned int by_depth_in_use;
392 /* Maximum number of elements that can be used before resizing. */
393 unsigned int by_depth_max;
395 /* Each element of this array is a pointer to a page_entry, all
396 page_entries can be found in here by increasing depth.
397 index_by_depth in the page_entry is the index into this data
398 structure where that page_entry can be found. This is used to
399 speed up finding all page_entries at a particular depth. */
400 page_entry **by_depth;
402 /* Each element is a pointer to the saved in_use_p bits, if any,
403 zero otherwise. We allocate them all together, to enable a
404 better runtime data access pattern. */
405 unsigned long **save_in_use;
409 /* The size in bytes required to maintain a bitmap for the objects
411 #define BITMAP_SIZE(Num_objects) \
412 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
414 /* Allocate pages in chunks of this size, to throttle calls to memory
415 allocation routines. The first page is used, the rest go onto the
416 free list. This cannot be larger than HOST_BITS_PER_INT for the
417 in_use bitmask for page_group. */
418 #define GGC_QUIRE_SIZE 16
420 /* Initial guess as to how many page table entries we might need. */
421 #define INITIAL_PTE_COUNT 128
423 static int ggc_allocated_p PARAMS ((const void *));
424 static page_entry *lookup_page_table_entry PARAMS ((const void *));
425 static void set_page_table_entry PARAMS ((void *, page_entry *));
427 static char *alloc_anon PARAMS ((char *, size_t));
429 #ifdef USING_MALLOC_PAGE_GROUPS
430 static size_t page_group_index PARAMS ((char *, char *));
431 static void set_page_group_in_use PARAMS ((page_group *, char *));
432 static void clear_page_group_in_use PARAMS ((page_group *, char *));
434 static struct page_entry * alloc_page PARAMS ((unsigned));
435 static void free_page PARAMS ((struct page_entry *));
436 static void release_pages PARAMS ((void));
437 static void clear_marks PARAMS ((void));
438 static void sweep_pages PARAMS ((void));
439 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
440 static void compute_inverse PARAMS ((unsigned));
441 static inline void adjust_depth PARAMS ((void));
442 static void move_ptes_to_front PARAMS ((int, int));
444 #ifdef ENABLE_GC_CHECKING
445 static void poison_pages PARAMS ((void));
448 void debug_print_page_list PARAMS ((int));
449 static void push_depth PARAMS ((unsigned int));
450 static void push_by_depth PARAMS ((page_entry *, unsigned long *));
452 /* Push an entry onto G.depth. */
458 if (G.depth_in_use >= G.depth_max)
461 G.depth = (unsigned int *) xrealloc ((char *) G.depth,
462 G.depth_max * sizeof (unsigned int));
464 G.depth[G.depth_in_use++] = i;
467 /* Push an entry onto G.by_depth and G.save_in_use. */
474 if (G.by_depth_in_use >= G.by_depth_max)
477 G.by_depth = (page_entry **) xrealloc ((char *) G.by_depth,
478 G.by_depth_max * sizeof (page_entry *));
479 G.save_in_use = (unsigned long **) xrealloc ((char *) G.save_in_use,
480 G.by_depth_max * sizeof (unsigned long *));
482 G.by_depth[G.by_depth_in_use] = p;
483 G.save_in_use[G.by_depth_in_use++] = s;
486 #if (GCC_VERSION < 3001)
487 #define prefetch(X) ((void) X)
489 #define prefetch(X) __builtin_prefetch (X)
492 #define save_in_use_p_i(__i) \
494 #define save_in_use_p(__p) \
495 (save_in_use_p_i (__p->index_by_depth))
497 /* Returns nonzero if P was allocated in GC'able memory. */
506 #if HOST_BITS_PER_PTR <= 32
509 page_table table = G.lookup;
510 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
515 if (table->high_bits == high_bits)
519 base = &table->table[0];
522 /* Extract the level 1 and 2 indices. */
526 return base[L1] && base[L1][L2];
529 /* Traverse the page table and find the entry for a page.
530 Die (probably) if the object wasn't allocated via GC. */
532 static inline page_entry *
533 lookup_page_table_entry(p)
539 #if HOST_BITS_PER_PTR <= 32
542 page_table table = G.lookup;
543 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
544 while (table->high_bits != high_bits)
546 base = &table->table[0];
549 /* Extract the level 1 and 2 indices. */
556 /* Set the page table entry for a page. */
559 set_page_table_entry(p, entry)
566 #if HOST_BITS_PER_PTR <= 32
570 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
571 for (table = G.lookup; table; table = table->next)
572 if (table->high_bits == high_bits)
575 /* Not found -- allocate a new table. */
576 table = (page_table) xcalloc (1, sizeof(*table));
577 table->next = G.lookup;
578 table->high_bits = high_bits;
581 base = &table->table[0];
584 /* Extract the level 1 and 2 indices. */
588 if (base[L1] == NULL)
589 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
591 base[L1][L2] = entry;
594 /* Prints the page-entry for object size ORDER, for debugging. */
597 debug_print_page_list (order)
601 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
602 (PTR) G.page_tails[order]);
606 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
607 p->num_free_objects);
615 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
616 (if non-null). The ifdef structure here is intended to cause a
617 compile error unless exactly one of the HAVE_* is defined. */
620 alloc_anon (pref, size)
621 char *pref ATTRIBUTE_UNUSED;
624 #ifdef HAVE_MMAP_ANON
625 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
626 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
628 #ifdef HAVE_MMAP_DEV_ZERO
629 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
630 MAP_PRIVATE, G.dev_zero_fd, 0);
633 if (page == (char *) MAP_FAILED)
635 perror ("virtual memory exhausted");
636 exit (FATAL_EXIT_CODE);
639 /* Remember that we allocated this memory. */
640 G.bytes_mapped += size;
642 /* Pretend we don't have access to the allocated pages. We'll enable
643 access to smaller pieces of the area in ggc_alloc. Discard the
644 handle to avoid handle leak. */
645 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
650 #ifdef USING_MALLOC_PAGE_GROUPS
651 /* Compute the index for this page into the page group. */
654 page_group_index (allocation, page)
655 char *allocation, *page;
657 return (size_t) (page - allocation) >> G.lg_pagesize;
660 /* Set and clear the in_use bit for this page in the page group. */
663 set_page_group_in_use (group, page)
667 group->in_use |= 1 << page_group_index (group->allocation, page);
671 clear_page_group_in_use (group, page)
675 group->in_use &= ~(1 << page_group_index (group->allocation, page));
679 /* Allocate a new page for allocating objects of size 2^ORDER,
680 and return an entry for it. The entry is not added to the
681 appropriate page_table list. */
683 static inline struct page_entry *
687 struct page_entry *entry, *p, **pp;
691 size_t page_entry_size;
693 #ifdef USING_MALLOC_PAGE_GROUPS
697 num_objects = OBJECTS_PER_PAGE (order);
698 bitmap_size = BITMAP_SIZE (num_objects + 1);
699 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
700 entry_size = num_objects * OBJECT_SIZE (order);
701 if (entry_size < G.pagesize)
702 entry_size = G.pagesize;
707 /* Check the list of free pages for one we can use. */
708 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
709 if (p->bytes == entry_size)
714 /* Recycle the allocated memory from this page ... */
718 #ifdef USING_MALLOC_PAGE_GROUPS
722 /* ... and, if possible, the page entry itself. */
723 if (p->order == order)
726 memset (entry, 0, page_entry_size);
732 else if (entry_size == G.pagesize)
734 /* We want just one page. Allocate a bunch of them and put the
735 extras on the freelist. (Can only do this optimization with
736 mmap for backing store.) */
737 struct page_entry *e, *f = G.free_pages;
740 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
742 /* This loop counts down so that the chain will be in ascending
744 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
746 e = (struct page_entry *) xcalloc (1, page_entry_size);
748 e->bytes = G.pagesize;
749 e->page = page + (i << G.lg_pagesize);
757 page = alloc_anon (NULL, entry_size);
759 #ifdef USING_MALLOC_PAGE_GROUPS
762 /* Allocate a large block of memory and serve out the aligned
763 pages therein. This results in much less memory wastage
764 than the traditional implementation of valloc. */
766 char *allocation, *a, *enda;
767 size_t alloc_size, head_slop, tail_slop;
768 int multiple_pages = (entry_size == G.pagesize);
771 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
773 alloc_size = entry_size + G.pagesize - 1;
774 allocation = xmalloc (alloc_size);
776 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
777 head_slop = page - allocation;
779 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
781 tail_slop = alloc_size - entry_size - head_slop;
782 enda = allocation + alloc_size - tail_slop;
784 /* We allocated N pages, which are likely not aligned, leaving
785 us with N-1 usable pages. We plan to place the page_group
786 structure somewhere in the slop. */
787 if (head_slop >= sizeof (page_group))
788 group = (page_group *)page - 1;
791 /* We magically got an aligned allocation. Too bad, we have
792 to waste a page anyway. */
796 tail_slop += G.pagesize;
798 if (tail_slop < sizeof (page_group))
800 group = (page_group *)enda;
801 tail_slop -= sizeof (page_group);
804 /* Remember that we allocated this memory. */
805 group->next = G.page_groups;
806 group->allocation = allocation;
807 group->alloc_size = alloc_size;
809 G.page_groups = group;
810 G.bytes_mapped += alloc_size;
812 /* If we allocated multiple pages, put the rest on the free list. */
815 struct page_entry *e, *f = G.free_pages;
816 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
818 e = (struct page_entry *) xcalloc (1, page_entry_size);
820 e->bytes = G.pagesize;
832 entry = (struct page_entry *) xcalloc (1, page_entry_size);
834 entry->bytes = entry_size;
836 entry->context_depth = G.context_depth;
837 entry->order = order;
838 entry->num_free_objects = num_objects;
839 entry->next_bit_hint = 1;
841 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
843 #ifdef USING_MALLOC_PAGE_GROUPS
844 entry->group = group;
845 set_page_group_in_use (group, page);
848 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
849 increment the hint. */
850 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
851 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
853 set_page_table_entry (page, entry);
855 if (GGC_DEBUG_LEVEL >= 2)
856 fprintf (G.debug_file,
857 "Allocating page at %p, object size=%lu, data %p-%p\n",
858 (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
859 page + entry_size - 1);
864 /* Adjust the size of G.depth so that no index greater than the one
865 used by the top of the G.by_depth is used. */
872 if (G.by_depth_in_use)
874 top = G.by_depth[G.by_depth_in_use-1];
876 /* Peel back indicies in depth that index into by_depth, so that
877 as new elements are added to by_depth, we note the indicies
878 of those elements, if they are for new context depths. */
879 while (G.depth_in_use > (size_t)top->context_depth+1)
884 /* For a page that is no longer needed, put it on the free page list. */
890 if (GGC_DEBUG_LEVEL >= 2)
891 fprintf (G.debug_file,
892 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
893 entry->page, entry->page + entry->bytes - 1);
895 /* Mark the page as inaccessible. Discard the handle to avoid handle
897 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
899 set_page_table_entry (entry->page, NULL);
901 #ifdef USING_MALLOC_PAGE_GROUPS
902 clear_page_group_in_use (entry->group, entry->page);
905 if (G.by_depth_in_use > 1)
907 page_entry *top = G.by_depth[G.by_depth_in_use-1];
909 /* If they are at the same depth, put top element into freed
911 if (entry->context_depth == top->context_depth)
913 int i = entry->index_by_depth;
915 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
916 top->index_by_depth = i;
920 /* We cannot free a page from a context deeper than the
929 entry->next = G.free_pages;
930 G.free_pages = entry;
933 /* Release the free page cache to the system. */
939 page_entry *p, *next;
943 /* Gather up adjacent pages so they are unmapped together. */
954 while (p && p->page == start + len)
963 G.bytes_mapped -= len;
968 #ifdef USING_MALLOC_PAGE_GROUPS
972 /* Remove all pages from free page groups from the list. */
974 while ((p = *pp) != NULL)
975 if (p->group->in_use == 0)
983 /* Remove all free page groups, and release the storage. */
985 while ((g = *gp) != NULL)
989 G.bytes_mapped -= g->alloc_size;
990 free (g->allocation);
997 /* This table provides a fast way to determine ceil(log_2(size)) for
998 allocation requests. The minimum allocation size is eight bytes. */
1000 static unsigned char size_lookup[257] =
1002 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1003 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1004 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1005 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1006 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1007 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1008 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1009 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1010 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1011 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1012 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1013 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1014 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1015 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1016 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1017 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1021 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1027 unsigned order, word, bit, object_offset;
1028 struct page_entry *entry;
1032 order = size_lookup[size];
1036 while (size > OBJECT_SIZE (order))
1040 /* If there are non-full pages for this size allocation, they are at
1041 the head of the list. */
1042 entry = G.pages[order];
1044 /* If there is no page for this object size, or all pages in this
1045 context are full, allocate a new page. */
1046 if (entry == NULL || entry->num_free_objects == 0)
1048 struct page_entry *new_entry;
1049 new_entry = alloc_page (order);
1051 new_entry->index_by_depth = G.by_depth_in_use;
1052 push_by_depth (new_entry, 0);
1054 /* We can skip context depths, if we do, make sure we go all the
1055 way to the new depth. */
1056 while (new_entry->context_depth >= G.depth_in_use)
1057 push_depth (G.by_depth_in_use-1);
1059 /* If this is the only entry, it's also the tail. */
1061 G.page_tails[order] = new_entry;
1063 /* Put new pages at the head of the page list. */
1064 new_entry->next = entry;
1066 G.pages[order] = new_entry;
1068 /* For a new page, we know the word and bit positions (in the
1069 in_use bitmap) of the first available object -- they're zero. */
1070 new_entry->next_bit_hint = 1;
1077 /* First try to use the hint left from the previous allocation
1078 to locate a clear bit in the in-use bitmap. We've made sure
1079 that the one-past-the-end bit is always set, so if the hint
1080 has run over, this test will fail. */
1081 unsigned hint = entry->next_bit_hint;
1082 word = hint / HOST_BITS_PER_LONG;
1083 bit = hint % HOST_BITS_PER_LONG;
1085 /* If the hint didn't work, scan the bitmap from the beginning. */
1086 if ((entry->in_use_p[word] >> bit) & 1)
1089 while (~entry->in_use_p[word] == 0)
1091 while ((entry->in_use_p[word] >> bit) & 1)
1093 hint = word * HOST_BITS_PER_LONG + bit;
1096 /* Next time, try the next bit. */
1097 entry->next_bit_hint = hint + 1;
1099 object_offset = hint * OBJECT_SIZE (order);
1102 /* Set the in-use bit. */
1103 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1105 /* Keep a running total of the number of free objects. If this page
1106 fills up, we may have to move it to the end of the list if the
1107 next page isn't full. If the next page is full, all subsequent
1108 pages are full, so there's no need to move it. */
1109 if (--entry->num_free_objects == 0
1110 && entry->next != NULL
1111 && entry->next->num_free_objects > 0)
1113 G.pages[order] = entry->next;
1115 G.page_tails[order]->next = entry;
1116 G.page_tails[order] = entry;
1119 /* Calculate the object's address. */
1120 result = entry->page + object_offset;
1122 #ifdef ENABLE_GC_CHECKING
1123 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1124 exact same semantics in presence of memory bugs, regardless of
1125 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1126 handle to avoid handle leak. */
1127 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
1129 /* `Poison' the entire allocated object, including any padding at
1131 memset (result, 0xaf, OBJECT_SIZE (order));
1133 /* Make the bytes after the end of the object unaccessible. Discard the
1134 handle to avoid handle leak. */
1135 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1136 OBJECT_SIZE (order) - size));
1139 /* Tell Valgrind that the memory is there, but its content isn't
1140 defined. The bytes at the end of the object are still marked
1142 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1144 /* Keep track of how many bytes are being allocated. This
1145 information is used in deciding when to collect. */
1146 G.allocated += OBJECT_SIZE (order);
1148 if (GGC_DEBUG_LEVEL >= 3)
1149 fprintf (G.debug_file,
1150 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1151 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
1157 /* If P is not marked, marks it and return false. Otherwise return true.
1158 P must have been allocated by the GC allocator; it mustn't point to
1159 static objects, stack variables, or memory allocated with malloc. */
1169 /* Look up the page on which the object is alloced. If the object
1170 wasn't allocated by the collector, we'll probably die. */
1171 entry = lookup_page_table_entry (p);
1172 #ifdef ENABLE_CHECKING
1177 /* Calculate the index of the object on the page; this is its bit
1178 position in the in_use_p bitmap. */
1179 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1180 word = bit / HOST_BITS_PER_LONG;
1181 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1183 /* If the bit was previously set, skip it. */
1184 if (entry->in_use_p[word] & mask)
1187 /* Otherwise set it, and decrement the free object count. */
1188 entry->in_use_p[word] |= mask;
1189 entry->num_free_objects -= 1;
1191 if (GGC_DEBUG_LEVEL >= 4)
1192 fprintf (G.debug_file, "Marking %p\n", p);
1197 /* Return 1 if P has been marked, zero otherwise.
1198 P must have been allocated by the GC allocator; it mustn't point to
1199 static objects, stack variables, or memory allocated with malloc. */
1209 /* Look up the page on which the object is alloced. If the object
1210 wasn't allocated by the collector, we'll probably die. */
1211 entry = lookup_page_table_entry (p);
1212 #ifdef ENABLE_CHECKING
1217 /* Calculate the index of the object on the page; this is its bit
1218 position in the in_use_p bitmap. */
1219 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1220 word = bit / HOST_BITS_PER_LONG;
1221 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1223 return (entry->in_use_p[word] & mask) != 0;
1226 /* Return the size of the gc-able object P. */
1232 page_entry *pe = lookup_page_table_entry (p);
1233 return OBJECT_SIZE (pe->order);
1236 /* Subroutine of init_ggc which computes the pair of numbers used to
1237 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1239 This algorithm is taken from Granlund and Montgomery's paper
1240 "Division by Invariant Integers using Multiplication"
1241 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1245 compute_inverse (order)
1248 unsigned size, inv, e;
1250 /* There can be only one object per "page" in a bucket for sizes
1251 larger than half a machine page; it will always have offset zero. */
1252 if (OBJECT_SIZE (order) > G.pagesize/2)
1254 if (OBJECTS_PER_PAGE (order) != 1)
1257 DIV_MULT (order) = 1;
1258 DIV_SHIFT (order) = 0;
1262 size = OBJECT_SIZE (order);
1264 while (size % 2 == 0)
1271 while (inv * size != 1)
1272 inv = inv * (2 - inv*size);
1274 DIV_MULT (order) = inv;
1275 DIV_SHIFT (order) = e;
1278 /* Initialize the ggc-mmap allocator. */
1284 G.pagesize = getpagesize();
1285 G.lg_pagesize = exact_log2 (G.pagesize);
1287 #ifdef HAVE_MMAP_DEV_ZERO
1288 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1289 if (G.dev_zero_fd == -1)
1294 G.debug_file = fopen ("ggc-mmap.debug", "w");
1296 G.debug_file = stdout;
1300 /* StunOS has an amazing off-by-one error for the first mmap allocation
1301 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1302 believe, is an unaligned page allocation, which would cause us to
1303 hork badly if we tried to use it. */
1305 char *p = alloc_anon (NULL, G.pagesize);
1306 struct page_entry *e;
1307 if ((size_t)p & (G.pagesize - 1))
1309 /* How losing. Discard this one and try another. If we still
1310 can't get something useful, give up. */
1312 p = alloc_anon (NULL, G.pagesize);
1313 if ((size_t)p & (G.pagesize - 1))
1317 /* We have a good page, might as well hold onto it... */
1318 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1319 e->bytes = G.pagesize;
1321 e->next = G.free_pages;
1326 /* Initialize the object size table. */
1327 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1328 object_size_table[order] = (size_t) 1 << order;
1329 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1331 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1333 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1334 so that we're sure of getting aligned memory. */
1335 s = ROUND_UP (s, MAX_ALIGNMENT);
1336 object_size_table[order] = s;
1339 /* Initialize the objects-per-page and inverse tables. */
1340 for (order = 0; order < NUM_ORDERS; ++order)
1342 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1343 if (objects_per_page_table[order] == 0)
1344 objects_per_page_table[order] = 1;
1345 compute_inverse (order);
1348 /* Reset the size_lookup array to put appropriately sized objects in
1349 the special orders. All objects bigger than the previous power
1350 of two, but no greater than the special size, should go in the
1352 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1357 o = size_lookup[OBJECT_SIZE (order)];
1358 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1359 size_lookup[i] = order;
1364 G.depth = (unsigned int *) xmalloc (G.depth_max * sizeof (unsigned int));
1366 G.by_depth_in_use = 0;
1367 G.by_depth_max = INITIAL_PTE_COUNT;
1368 G.by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
1369 G.save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
1372 /* Increment the `GC context'. Objects allocated in an outer context
1373 are never freed, eliminating the need to register their roots. */
1381 if (G.context_depth >= HOST_BITS_PER_LONG)
1385 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1386 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1389 ggc_recalculate_in_use_p (p)
1395 /* Because the past-the-end bit in in_use_p is always set, we
1396 pretend there is one additional object. */
1397 num_objects = OBJECTS_IN_PAGE (p) + 1;
1399 /* Reset the free object count. */
1400 p->num_free_objects = num_objects;
1402 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1404 i < CEIL (BITMAP_SIZE (num_objects),
1405 sizeof (*p->in_use_p));
1410 /* Something is in use if it is marked, or if it was in use in a
1411 context further down the context stack. */
1412 p->in_use_p[i] |= save_in_use_p (p)[i];
1414 /* Decrement the free object count for every object allocated. */
1415 for (j = p->in_use_p[i]; j; j >>= 1)
1416 p->num_free_objects -= (j & 1);
1419 if (p->num_free_objects >= num_objects)
1423 /* Decrement the `GC context'. All objects allocated since the
1424 previous ggc_push_context are migrated to the outer context. */
1429 unsigned long omask;
1430 unsigned int depth, i, e;
1431 #ifdef ENABLE_CHECKING
1435 depth = --G.context_depth;
1436 omask = (unsigned long)1 << (depth + 1);
1438 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1441 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1442 G.context_depth_allocations &= omask - 1;
1443 G.context_depth_collections &= omask - 1;
1445 /* The G.depth array is shortend so that the last index is the
1446 context_depth of the top element of by_depth. */
1447 if (depth+1 < G.depth_in_use)
1448 e = G.depth[depth+1];
1450 e = G.by_depth_in_use;
1452 /* We might not have any PTEs of depth depth. */
1453 if (depth < G.depth_in_use)
1456 /* First we go through all the pages at depth depth to
1457 recalculate the in use bits. */
1458 for (i = G.depth[depth]; i < e; ++i)
1462 #ifdef ENABLE_CHECKING
1465 /* Check that all of the pages really are at the depth that
1467 if (p->context_depth != depth)
1469 if (p->index_by_depth != i)
1473 prefetch (&save_in_use_p_i (i+8));
1474 prefetch (&save_in_use_p_i (i+16));
1475 if (save_in_use_p_i (i))
1478 ggc_recalculate_in_use_p (p);
1479 free (save_in_use_p_i (i));
1480 save_in_use_p_i (i) = 0;
1485 /* Then, we reset all page_entries with a depth greater than depth
1487 for (i = e; i < G.by_depth_in_use; ++i)
1489 page_entry *p = G.by_depth[i];
1491 /* Check that all of the pages really are at the depth we
1493 #ifdef ENABLE_CHECKING
1494 if (p->context_depth <= depth)
1496 if (p->index_by_depth != i)
1499 p->context_depth = depth;
1504 #ifdef ENABLE_CHECKING
1505 for (order = 2; order < NUM_ORDERS; order++)
1509 for (p = G.pages[order]; p != NULL; p = p->next)
1511 if (p->context_depth > depth)
1513 else if (p->context_depth == depth && save_in_use_p (p))
1520 /* Unmark all objects. */
1527 for (order = 2; order < NUM_ORDERS; order++)
1531 for (p = G.pages[order]; p != NULL; p = p->next)
1533 size_t num_objects = OBJECTS_IN_PAGE (p);
1534 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1536 #ifdef ENABLE_CHECKING
1537 /* The data should be page-aligned. */
1538 if ((size_t) p->page & (G.pagesize - 1))
1542 /* Pages that aren't in the topmost context are not collected;
1543 nevertheless, we need their in-use bit vectors to store GC
1544 marks. So, back them up first. */
1545 if (p->context_depth < G.context_depth)
1547 if (! save_in_use_p (p))
1548 save_in_use_p (p) = xmalloc (bitmap_size);
1549 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1552 /* Reset reset the number of free objects and clear the
1553 in-use bits. These will be adjusted by mark_obj. */
1554 p->num_free_objects = num_objects;
1555 memset (p->in_use_p, 0, bitmap_size);
1557 /* Make sure the one-past-the-end bit is always set. */
1558 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1559 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1564 /* Free all empty pages. Partially empty pages need no attention
1565 because the `mark' bit doubles as an `unused' bit. */
1572 for (order = 2; order < NUM_ORDERS; order++)
1574 /* The last page-entry to consider, regardless of entries
1575 placed at the end of the list. */
1576 page_entry * const last = G.page_tails[order];
1579 size_t live_objects;
1580 page_entry *p, *previous;
1590 page_entry *next = p->next;
1592 /* Loop until all entries have been examined. */
1595 num_objects = OBJECTS_IN_PAGE (p);
1597 /* Add all live objects on this page to the count of
1598 allocated memory. */
1599 live_objects = num_objects - p->num_free_objects;
1601 G.allocated += OBJECT_SIZE (order) * live_objects;
1603 /* Only objects on pages in the topmost context should get
1605 if (p->context_depth < G.context_depth)
1608 /* Remove the page if it's empty. */
1609 else if (live_objects == 0)
1612 G.pages[order] = next;
1614 previous->next = next;
1616 /* Are we removing the last element? */
1617 if (p == G.page_tails[order])
1618 G.page_tails[order] = previous;
1623 /* If the page is full, move it to the end. */
1624 else if (p->num_free_objects == 0)
1626 /* Don't move it if it's already at the end. */
1627 if (p != G.page_tails[order])
1629 /* Move p to the end of the list. */
1631 G.page_tails[order]->next = p;
1633 /* Update the tail pointer... */
1634 G.page_tails[order] = p;
1636 /* ... and the head pointer, if necessary. */
1638 G.pages[order] = next;
1640 previous->next = next;
1645 /* If we've fallen through to here, it's a page in the
1646 topmost context that is neither full nor empty. Such a
1647 page must precede pages at lesser context depth in the
1648 list, so move it to the head. */
1649 else if (p != G.pages[order])
1651 previous->next = p->next;
1652 p->next = G.pages[order];
1654 /* Are we moving the last element? */
1655 if (G.page_tails[order] == p)
1656 G.page_tails[order] = previous;
1665 /* Now, restore the in_use_p vectors for any pages from contexts
1666 other than the current one. */
1667 for (p = G.pages[order]; p; p = p->next)
1668 if (p->context_depth != G.context_depth)
1669 ggc_recalculate_in_use_p (p);
1673 #ifdef ENABLE_GC_CHECKING
1674 /* Clobber all free objects. */
1681 for (order = 2; order < NUM_ORDERS; order++)
1683 size_t size = OBJECT_SIZE (order);
1686 for (p = G.pages[order]; p != NULL; p = p->next)
1691 if (p->context_depth != G.context_depth)
1692 /* Since we don't do any collection for pages in pushed
1693 contexts, there's no need to do any poisoning. And
1694 besides, the IN_USE_P array isn't valid until we pop
1698 num_objects = OBJECTS_IN_PAGE (p);
1699 for (i = 0; i < num_objects; i++)
1702 word = i / HOST_BITS_PER_LONG;
1703 bit = i % HOST_BITS_PER_LONG;
1704 if (((p->in_use_p[word] >> bit) & 1) == 0)
1706 char *object = p->page + i * size;
1708 /* Keep poison-by-write when we expect to use Valgrind,
1709 so the exact same memory semantics is kept, in case
1710 there are memory errors. We override this request
1712 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1713 memset (object, 0xa5, size);
1715 /* Drop the handle to avoid handle leak. */
1716 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1724 /* Top level mark-and-sweep routine. */
1729 /* Avoid frequent unnecessary work by skipping collection if the
1730 total allocations haven't expanded much since the last
1732 float allocated_last_gc =
1733 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1735 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1737 if (G.allocated < allocated_last_gc + min_expand)
1740 timevar_push (TV_GC);
1742 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1744 /* Zero the total allocated bytes. This will be recalculated in the
1748 /* Release the pages we freed the last time we collected, but didn't
1749 reuse in the interim. */
1752 /* Indicate that we've seen collections at this context depth. */
1753 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1758 #ifdef ENABLE_GC_CHECKING
1764 G.allocated_last_gc = G.allocated;
1766 timevar_pop (TV_GC);
1769 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1772 /* Print allocation statistics. */
1773 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1775 : ((x) < 1024*1024*10 \
1777 : (x) / (1024*1024))))
1778 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1781 ggc_print_statistics ()
1783 struct ggc_statistics stats;
1785 size_t total_overhead = 0;
1787 /* Clear the statistics. */
1788 memset (&stats, 0, sizeof (stats));
1790 /* Make sure collection will really occur. */
1791 G.allocated_last_gc = 0;
1793 /* Collect and print the statistics common across collectors. */
1794 ggc_print_common_statistics (stderr, &stats);
1796 /* Release free pages so that we will not count the bytes allocated
1797 there as part of the total allocated memory. */
1800 /* Collect some information about the various sizes of
1802 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1803 "Size", "Allocated", "Used", "Overhead");
1804 for (i = 0; i < NUM_ORDERS; ++i)
1811 /* Skip empty entries. */
1815 overhead = allocated = in_use = 0;
1817 /* Figure out the total number of bytes allocated for objects of
1818 this size, and how many of them are actually in use. Also figure
1819 out how much memory the page table is using. */
1820 for (p = G.pages[i]; p; p = p->next)
1822 allocated += p->bytes;
1824 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1826 overhead += (sizeof (page_entry) - sizeof (long)
1827 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1829 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1830 (unsigned long) OBJECT_SIZE (i),
1831 SCALE (allocated), LABEL (allocated),
1832 SCALE (in_use), LABEL (in_use),
1833 SCALE (overhead), LABEL (overhead));
1834 total_overhead += overhead;
1836 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1837 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1838 SCALE (G.allocated), LABEL(G.allocated),
1839 SCALE (total_overhead), LABEL (total_overhead));
1844 struct ggc_pch_ondisk
1846 unsigned totals[NUM_ORDERS];
1848 size_t base[NUM_ORDERS];
1849 size_t written[NUM_ORDERS];
1852 struct ggc_pch_data *
1855 return xcalloc (sizeof (struct ggc_pch_data), 1);
1859 ggc_pch_count_object (d, x, size)
1860 struct ggc_pch_data *d;
1861 void *x ATTRIBUTE_UNUSED;
1867 order = size_lookup[size];
1871 while (size > OBJECT_SIZE (order))
1875 d->d.totals[order]++;
1879 ggc_pch_total_size (d)
1880 struct ggc_pch_data *d;
1885 for (i = 0; i < NUM_ORDERS; i++)
1886 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1891 ggc_pch_this_base (d, base)
1892 struct ggc_pch_data *d;
1895 size_t a = (size_t) base;
1898 for (i = 0; i < NUM_ORDERS; i++)
1901 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1907 ggc_pch_alloc_object (d, x, size)
1908 struct ggc_pch_data *d;
1909 void *x ATTRIBUTE_UNUSED;
1916 order = size_lookup[size];
1920 while (size > OBJECT_SIZE (order))
1924 result = (char *) d->base[order];
1925 d->base[order] += OBJECT_SIZE (order);
1930 ggc_pch_prepare_write (d, f)
1931 struct ggc_pch_data * d ATTRIBUTE_UNUSED;
1932 FILE * f ATTRIBUTE_UNUSED;
1934 /* Nothing to do. */
1938 ggc_pch_write_object (d, f, x, newx, size)
1939 struct ggc_pch_data * d ATTRIBUTE_UNUSED;
1942 void *newx ATTRIBUTE_UNUSED;
1948 order = size_lookup[size];
1952 while (size > OBJECT_SIZE (order))
1956 if (fwrite (x, size, 1, f) != 1)
1957 fatal_error ("can't write PCH file: %m");
1959 /* In the current implementation, SIZE is always equal to
1960 OBJECT_SIZE (order) and so the fseek is never executed. */
1961 if (size != OBJECT_SIZE (order)
1962 && fseek (f, OBJECT_SIZE (order) - size, SEEK_CUR) != 0)
1963 fatal_error ("can't write PCH file: %m");
1965 d->written[order]++;
1966 if (d->written[order] == d->d.totals[order]
1967 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
1970 fatal_error ("can't write PCH file: %m");
1974 ggc_pch_finish (d, f)
1975 struct ggc_pch_data * d;
1978 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1979 fatal_error ("can't write PCH file: %m");
1983 /* Move the PCH PTE entries just added to the end of by_depth, to the
1987 move_ptes_to_front (count_old_page_tables, count_new_page_tables)
1988 int count_old_page_tables;
1989 int count_new_page_tables;
1993 /* First, we swap the new entries to the front of the varrays. */
1994 page_entry **new_by_depth;
1995 unsigned long **new_save_in_use;
1997 new_by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
1998 new_save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
2000 memcpy (&new_by_depth[0],
2001 &G.by_depth[count_old_page_tables],
2002 count_new_page_tables * sizeof (void *));
2003 memcpy (&new_by_depth[count_new_page_tables],
2005 count_old_page_tables * sizeof (void *));
2006 memcpy (&new_save_in_use[0],
2007 &G.save_in_use[count_old_page_tables],
2008 count_new_page_tables * sizeof (void *));
2009 memcpy (&new_save_in_use[count_new_page_tables],
2011 count_old_page_tables * sizeof (void *));
2014 free (G.save_in_use);
2016 G.by_depth = new_by_depth;
2017 G.save_in_use = new_save_in_use;
2019 /* Now update all the index_by_depth fields. */
2020 for (i = G.by_depth_in_use; i > 0; --i)
2022 page_entry *p = G.by_depth[i-1];
2023 p->index_by_depth = i-1;
2026 /* And last, we update the depth pointers in G.depth. The first
2027 entry is already 0, and context 0 entries always start at index
2028 0, so there is nothing to update in the first slot. We need a
2029 second slot, only if we have old ptes, and if we do, they start
2030 at index count_new_page_tables. */
2031 if (count_old_page_tables)
2032 push_depth (count_new_page_tables);
2036 ggc_pch_read (f, addr)
2040 struct ggc_pch_ondisk d;
2043 unsigned long count_old_page_tables;
2044 unsigned long count_new_page_tables;
2046 count_old_page_tables = G.by_depth_in_use;
2048 /* We've just read in a PCH file. So, every object that used to be
2049 allocated is now free. */
2055 /* No object read from a PCH file should ever be freed. So, set the
2056 context depth to 1, and set the depth of all the currently-allocated
2057 pages to be 1 too. PCH pages will have depth 0. */
2058 if (G.context_depth != 0)
2060 G.context_depth = 1;
2061 for (i = 0; i < NUM_ORDERS; i++)
2064 for (p = G.pages[i]; p != NULL; p = p->next)
2065 p->context_depth = G.context_depth;
2068 /* Allocate the appropriate page-table entries for the pages read from
2070 if (fread (&d, sizeof (d), 1, f) != 1)
2071 fatal_error ("can't read PCH file: %m");
2073 for (i = 0; i < NUM_ORDERS; i++)
2075 struct page_entry *entry;
2081 if (d.totals[i] == 0)
2084 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2085 num_objs = bytes / OBJECT_SIZE (i);
2086 entry = xcalloc (1, (sizeof (struct page_entry)
2088 + BITMAP_SIZE (num_objs + 1)));
2089 entry->bytes = bytes;
2091 entry->context_depth = 0;
2093 entry->num_free_objects = 0;
2097 j + HOST_BITS_PER_LONG <= num_objs + 1;
2098 j += HOST_BITS_PER_LONG)
2099 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2100 for (; j < num_objs + 1; j++)
2101 entry->in_use_p[j / HOST_BITS_PER_LONG]
2102 |= 1L << (j % HOST_BITS_PER_LONG);
2104 for (pte = entry->page;
2105 pte < entry->page + entry->bytes;
2107 set_page_table_entry (pte, entry);
2109 if (G.page_tails[i] != NULL)
2110 G.page_tails[i]->next = entry;
2113 G.page_tails[i] = entry;
2115 /* We start off by just adding all the new information to the
2116 end of the varrays, later, we will move the new information
2117 to the front of the varrays, as the PCH page tables are at
2119 push_by_depth (entry, 0);
2122 /* Now, we update the various data structures that speed page table
2124 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2126 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2128 /* Update the statistics. */
2129 G.allocated = G.allocated_last_gc = offs - (char *)addr;