1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 #include "coretypes.h"
34 #ifdef ENABLE_VALGRIND_CHECKING
35 # ifdef HAVE_MEMCHECK_H
36 # include <memcheck.h>
38 # include <valgrind.h>
41 /* Avoid #ifdef:s when we can help it. */
42 #define VALGRIND_DISCARD(x)
45 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
46 file open. Prefer either to valloc. */
48 # undef HAVE_MMAP_DEV_ZERO
50 # include <sys/mman.h>
52 # define MAP_FAILED -1
54 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
55 # define MAP_ANONYMOUS MAP_ANON
61 #ifdef HAVE_MMAP_DEV_ZERO
63 # include <sys/mman.h>
65 # define MAP_FAILED -1
72 #define USING_MALLOC_PAGE_GROUPS
77 This garbage-collecting allocator allocates objects on one of a set
78 of pages. Each page can allocate objects of a single size only;
79 available sizes are powers of two starting at four bytes. The size
80 of an allocation request is rounded up to the next power of two
81 (`order'), and satisfied from the appropriate page.
83 Each page is recorded in a page-entry, which also maintains an
84 in-use bitmap of object positions on the page. This allows the
85 allocation state of a particular object to be flipped without
86 touching the page itself.
88 Each page-entry also has a context depth, which is used to track
89 pushing and popping of allocation contexts. Only objects allocated
90 in the current (highest-numbered) context may be collected.
92 Page entries are arranged in an array of singly-linked lists. The
93 array is indexed by the allocation size, in bits, of the pages on
94 it; i.e. all pages on a list allocate objects of the same size.
95 Pages are ordered on the list such that all non-full pages precede
96 all full pages, with non-full pages arranged in order of decreasing
99 Empty pages (of all orders) are kept on a single page cache list,
100 and are considered first when new pages are required; they are
101 deallocated at the start of the next collection if they haven't
102 been recycled by then. */
104 /* Define GGC_DEBUG_LEVEL to print debugging information.
105 0: No debugging output.
106 1: GC statistics only.
107 2: Page-entry allocations/deallocations as well.
108 3: Object allocations as well.
109 4: Object marks as well. */
110 #define GGC_DEBUG_LEVEL (0)
112 #ifndef HOST_BITS_PER_PTR
113 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
117 /* A two-level tree is used to look up the page-entry for a given
118 pointer. Two chunks of the pointer's bits are extracted to index
119 the first and second levels of the tree, as follows:
123 msb +----------------+----+------+------+ lsb
129 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
130 pages are aligned on system page boundaries. The next most
131 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
132 index values in the lookup table, respectively.
134 For 32-bit architectures and the settings below, there are no
135 leftover bits. For architectures with wider pointers, the lookup
136 tree points to a list of pages, which must be scanned to find the
139 #define PAGE_L1_BITS (8)
140 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
141 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
142 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
144 #define LOOKUP_L1(p) \
145 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
147 #define LOOKUP_L2(p) \
148 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
150 /* The number of objects per allocation page, for objects on a page of
151 the indicated ORDER. */
152 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
154 /* The number of objects in P. */
155 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
157 /* The size of an object on a page of the indicated ORDER. */
158 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
160 /* For speed, we avoid doing a general integer divide to locate the
161 offset in the allocation bitmap, by precalculating numbers M, S
162 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
163 within the page which is evenly divisible by the object size Z. */
164 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
165 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
166 #define OFFSET_TO_BIT(OFFSET, ORDER) \
167 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
169 /* The number of extra orders, not corresponding to power-of-two sized
172 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
174 #define RTL_SIZE(NSLOTS) \
175 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
177 /* The Ith entry is the maximum size of an object to be stored in the
178 Ith extra order. Adding a new entry to this array is the *only*
179 thing you need to do to add a new special allocation size. */
181 static const size_t extra_order_size_table[] = {
182 sizeof (struct tree_decl),
183 sizeof (struct tree_list),
184 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
185 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
188 /* The total number of orders. */
190 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
192 /* We use this structure to determine the alignment required for
193 allocations. For power-of-two sized allocations, that's not a
194 problem, but it does matter for odd-sized allocations. */
196 struct max_alignment {
200 #ifdef HAVE_LONG_DOUBLE
208 /* The biggest alignment required. */
210 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
212 /* Compute the smallest nonnegative number which when added to X gives
215 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
217 /* Compute the smallest multiple of F that is >= X. */
219 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
221 /* The Ith entry is the number of objects on a page or order I. */
223 static unsigned objects_per_page_table[NUM_ORDERS];
225 /* The Ith entry is the size of an object on a page of order I. */
227 static size_t object_size_table[NUM_ORDERS];
229 /* The Ith entry is a pair of numbers (mult, shift) such that
230 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
231 for all k evenly divisible by OBJECT_SIZE(I). */
238 inverse_table[NUM_ORDERS];
240 /* A page_entry records the status of an allocation page. This
241 structure is dynamically sized to fit the bitmap in_use_p. */
242 typedef struct page_entry
244 /* The next page-entry with objects of the same size, or NULL if
245 this is the last page-entry. */
246 struct page_entry *next;
248 /* The number of bytes allocated. (This will always be a multiple
249 of the host system page size.) */
252 /* The address at which the memory is allocated. */
255 #ifdef USING_MALLOC_PAGE_GROUPS
256 /* Back pointer to the page group this page came from. */
257 struct page_group *group;
260 /* Saved in-use bit vector for pages that aren't in the topmost
261 context during collection. */
262 unsigned long *save_in_use_p;
264 /* Context depth of this page. */
265 unsigned short context_depth;
267 /* The number of free objects remaining on this page. */
268 unsigned short num_free_objects;
270 /* A likely candidate for the bit position of a free object for the
271 next allocation from this page. */
272 unsigned short next_bit_hint;
274 /* The lg of size of objects allocated from this page. */
277 /* A bit vector indicating whether or not objects are in use. The
278 Nth bit is one if the Nth object on this page is allocated. This
279 array is dynamically sized. */
280 unsigned long in_use_p[1];
283 #ifdef USING_MALLOC_PAGE_GROUPS
284 /* A page_group describes a large allocation from malloc, from which
285 we parcel out aligned pages. */
286 typedef struct page_group
288 /* A linked list of all extant page groups. */
289 struct page_group *next;
291 /* The address we received from malloc. */
294 /* The size of the block. */
297 /* A bitmask of pages in use. */
302 #if HOST_BITS_PER_PTR <= 32
304 /* On 32-bit hosts, we use a two level page table, as pictured above. */
305 typedef page_entry **page_table[PAGE_L1_SIZE];
309 /* On 64-bit hosts, we use the same two level page tables plus a linked
310 list that disambiguates the top 32-bits. There will almost always be
311 exactly one entry in the list. */
312 typedef struct page_table_chain
314 struct page_table_chain *next;
316 page_entry **table[PAGE_L1_SIZE];
321 /* The rest of the global variables. */
322 static struct globals
324 /* The Nth element in this array is a page with objects of size 2^N.
325 If there are any pages with free objects, they will be at the
326 head of the list. NULL if there are no page-entries for this
328 page_entry *pages[NUM_ORDERS];
330 /* The Nth element in this array is the last page with objects of
331 size 2^N. NULL if there are no page-entries for this object
333 page_entry *page_tails[NUM_ORDERS];
335 /* Lookup table for associating allocation pages with object addresses. */
338 /* The system's page size. */
342 /* Bytes currently allocated. */
345 /* Bytes currently allocated at the end of the last collection. */
346 size_t allocated_last_gc;
348 /* Total amount of memory mapped. */
351 /* Bit N set if any allocations have been done at context depth N. */
352 unsigned long context_depth_allocations;
354 /* Bit N set if any collections have been done at context depth N. */
355 unsigned long context_depth_collections;
357 /* The current depth in the context stack. */
358 unsigned short context_depth;
360 /* A file descriptor open to /dev/zero for reading. */
361 #if defined (HAVE_MMAP_DEV_ZERO)
365 /* A cache of free system pages. */
366 page_entry *free_pages;
368 #ifdef USING_MALLOC_PAGE_GROUPS
369 page_group *page_groups;
372 /* The file descriptor for debugging output. */
376 /* The size in bytes required to maintain a bitmap for the objects
378 #define BITMAP_SIZE(Num_objects) \
379 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
381 /* Allocate pages in chunks of this size, to throttle calls to memory
382 allocation routines. The first page is used, the rest go onto the
383 free list. This cannot be larger than HOST_BITS_PER_INT for the
384 in_use bitmask for page_group. */
385 #define GGC_QUIRE_SIZE 16
387 static int ggc_allocated_p PARAMS ((const void *));
388 static page_entry *lookup_page_table_entry PARAMS ((const void *));
389 static void set_page_table_entry PARAMS ((void *, page_entry *));
391 static char *alloc_anon PARAMS ((char *, size_t));
393 #ifdef USING_MALLOC_PAGE_GROUPS
394 static size_t page_group_index PARAMS ((char *, char *));
395 static void set_page_group_in_use PARAMS ((page_group *, char *));
396 static void clear_page_group_in_use PARAMS ((page_group *, char *));
398 static struct page_entry * alloc_page PARAMS ((unsigned));
399 static void free_page PARAMS ((struct page_entry *));
400 static void release_pages PARAMS ((void));
401 static void clear_marks PARAMS ((void));
402 static void sweep_pages PARAMS ((void));
403 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
404 static void compute_inverse PARAMS ((unsigned));
406 #ifdef ENABLE_GC_CHECKING
407 static void poison_pages PARAMS ((void));
410 void debug_print_page_list PARAMS ((int));
412 /* Returns nonzero if P was allocated in GC'able memory. */
421 #if HOST_BITS_PER_PTR <= 32
424 page_table table = G.lookup;
425 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
430 if (table->high_bits == high_bits)
434 base = &table->table[0];
437 /* Extract the level 1 and 2 indices. */
441 return base[L1] && base[L1][L2];
444 /* Traverse the page table and find the entry for a page.
445 Die (probably) if the object wasn't allocated via GC. */
447 static inline page_entry *
448 lookup_page_table_entry(p)
454 #if HOST_BITS_PER_PTR <= 32
457 page_table table = G.lookup;
458 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
459 while (table->high_bits != high_bits)
461 base = &table->table[0];
464 /* Extract the level 1 and 2 indices. */
471 /* Set the page table entry for a page. */
474 set_page_table_entry(p, entry)
481 #if HOST_BITS_PER_PTR <= 32
485 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
486 for (table = G.lookup; table; table = table->next)
487 if (table->high_bits == high_bits)
490 /* Not found -- allocate a new table. */
491 table = (page_table) xcalloc (1, sizeof(*table));
492 table->next = G.lookup;
493 table->high_bits = high_bits;
496 base = &table->table[0];
499 /* Extract the level 1 and 2 indices. */
503 if (base[L1] == NULL)
504 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
506 base[L1][L2] = entry;
509 /* Prints the page-entry for object size ORDER, for debugging. */
512 debug_print_page_list (order)
516 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
517 (PTR) G.page_tails[order]);
521 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
522 p->num_free_objects);
530 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
531 (if non-null). The ifdef structure here is intended to cause a
532 compile error unless exactly one of the HAVE_* is defined. */
535 alloc_anon (pref, size)
536 char *pref ATTRIBUTE_UNUSED;
539 #ifdef HAVE_MMAP_ANON
540 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
541 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
543 #ifdef HAVE_MMAP_DEV_ZERO
544 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
545 MAP_PRIVATE, G.dev_zero_fd, 0);
548 if (page == (char *) MAP_FAILED)
550 perror ("virtual memory exhausted");
551 exit (FATAL_EXIT_CODE);
554 /* Remember that we allocated this memory. */
555 G.bytes_mapped += size;
557 /* Pretend we don't have access to the allocated pages. We'll enable
558 access to smaller pieces of the area in ggc_alloc. Discard the
559 handle to avoid handle leak. */
560 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
565 #ifdef USING_MALLOC_PAGE_GROUPS
566 /* Compute the index for this page into the page group. */
569 page_group_index (allocation, page)
570 char *allocation, *page;
572 return (size_t) (page - allocation) >> G.lg_pagesize;
575 /* Set and clear the in_use bit for this page in the page group. */
578 set_page_group_in_use (group, page)
582 group->in_use |= 1 << page_group_index (group->allocation, page);
586 clear_page_group_in_use (group, page)
590 group->in_use &= ~(1 << page_group_index (group->allocation, page));
594 /* Allocate a new page for allocating objects of size 2^ORDER,
595 and return an entry for it. The entry is not added to the
596 appropriate page_table list. */
598 static inline struct page_entry *
602 struct page_entry *entry, *p, **pp;
606 size_t page_entry_size;
608 #ifdef USING_MALLOC_PAGE_GROUPS
612 num_objects = OBJECTS_PER_PAGE (order);
613 bitmap_size = BITMAP_SIZE (num_objects + 1);
614 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
615 entry_size = num_objects * OBJECT_SIZE (order);
616 if (entry_size < G.pagesize)
617 entry_size = G.pagesize;
622 /* Check the list of free pages for one we can use. */
623 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
624 if (p->bytes == entry_size)
629 /* Recycle the allocated memory from this page ... */
633 #ifdef USING_MALLOC_PAGE_GROUPS
637 /* ... and, if possible, the page entry itself. */
638 if (p->order == order)
641 memset (entry, 0, page_entry_size);
647 else if (entry_size == G.pagesize)
649 /* We want just one page. Allocate a bunch of them and put the
650 extras on the freelist. (Can only do this optimization with
651 mmap for backing store.) */
652 struct page_entry *e, *f = G.free_pages;
655 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
657 /* This loop counts down so that the chain will be in ascending
659 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
661 e = (struct page_entry *) xcalloc (1, page_entry_size);
663 e->bytes = G.pagesize;
664 e->page = page + (i << G.lg_pagesize);
672 page = alloc_anon (NULL, entry_size);
674 #ifdef USING_MALLOC_PAGE_GROUPS
677 /* Allocate a large block of memory and serve out the aligned
678 pages therein. This results in much less memory wastage
679 than the traditional implementation of valloc. */
681 char *allocation, *a, *enda;
682 size_t alloc_size, head_slop, tail_slop;
683 int multiple_pages = (entry_size == G.pagesize);
686 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
688 alloc_size = entry_size + G.pagesize - 1;
689 allocation = xmalloc (alloc_size);
691 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
692 head_slop = page - allocation;
694 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
696 tail_slop = alloc_size - entry_size - head_slop;
697 enda = allocation + alloc_size - tail_slop;
699 /* We allocated N pages, which are likely not aligned, leaving
700 us with N-1 usable pages. We plan to place the page_group
701 structure somewhere in the slop. */
702 if (head_slop >= sizeof (page_group))
703 group = (page_group *)page - 1;
706 /* We magically got an aligned allocation. Too bad, we have
707 to waste a page anyway. */
711 tail_slop += G.pagesize;
713 if (tail_slop < sizeof (page_group))
715 group = (page_group *)enda;
716 tail_slop -= sizeof (page_group);
719 /* Remember that we allocated this memory. */
720 group->next = G.page_groups;
721 group->allocation = allocation;
722 group->alloc_size = alloc_size;
724 G.page_groups = group;
725 G.bytes_mapped += alloc_size;
727 /* If we allocated multiple pages, put the rest on the free list. */
730 struct page_entry *e, *f = G.free_pages;
731 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
733 e = (struct page_entry *) xcalloc (1, page_entry_size);
735 e->bytes = G.pagesize;
747 entry = (struct page_entry *) xcalloc (1, page_entry_size);
749 entry->bytes = entry_size;
751 entry->context_depth = G.context_depth;
752 entry->order = order;
753 entry->num_free_objects = num_objects;
754 entry->next_bit_hint = 1;
756 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
758 #ifdef USING_MALLOC_PAGE_GROUPS
759 entry->group = group;
760 set_page_group_in_use (group, page);
763 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
764 increment the hint. */
765 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
766 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
768 set_page_table_entry (page, entry);
770 if (GGC_DEBUG_LEVEL >= 2)
771 fprintf (G.debug_file,
772 "Allocating page at %p, object size=%lu, data %p-%p\n",
773 (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
774 page + entry_size - 1);
779 /* For a page that is no longer needed, put it on the free page list. */
785 if (GGC_DEBUG_LEVEL >= 2)
786 fprintf (G.debug_file,
787 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
788 entry->page, entry->page + entry->bytes - 1);
790 /* Mark the page as inaccessible. Discard the handle to avoid handle
792 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
794 set_page_table_entry (entry->page, NULL);
796 #ifdef USING_MALLOC_PAGE_GROUPS
797 clear_page_group_in_use (entry->group, entry->page);
800 entry->next = G.free_pages;
801 G.free_pages = entry;
804 /* Release the free page cache to the system. */
810 page_entry *p, *next;
814 /* Gather up adjacent pages so they are unmapped together. */
825 while (p && p->page == start + len)
834 G.bytes_mapped -= len;
839 #ifdef USING_MALLOC_PAGE_GROUPS
843 /* Remove all pages from free page groups from the list. */
845 while ((p = *pp) != NULL)
846 if (p->group->in_use == 0)
854 /* Remove all free page groups, and release the storage. */
856 while ((g = *gp) != NULL)
860 G.bytes_mapped -= g->alloc_size;
861 free (g->allocation);
868 /* This table provides a fast way to determine ceil(log_2(size)) for
869 allocation requests. The minimum allocation size is eight bytes. */
871 static unsigned char size_lookup[257] =
873 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
874 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
875 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
876 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
877 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
878 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
879 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
880 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
881 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
882 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
883 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
884 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
885 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
886 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
887 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
888 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
892 /* Allocate a chunk of memory of SIZE bytes. If ZERO is nonzero, the
893 memory is zeroed; otherwise, its contents are undefined. */
899 unsigned order, word, bit, object_offset;
900 struct page_entry *entry;
904 order = size_lookup[size];
908 while (size > OBJECT_SIZE (order))
912 /* If there are non-full pages for this size allocation, they are at
913 the head of the list. */
914 entry = G.pages[order];
916 /* If there is no page for this object size, or all pages in this
917 context are full, allocate a new page. */
918 if (entry == NULL || entry->num_free_objects == 0)
920 struct page_entry *new_entry;
921 new_entry = alloc_page (order);
923 /* If this is the only entry, it's also the tail. */
925 G.page_tails[order] = new_entry;
927 /* Put new pages at the head of the page list. */
928 new_entry->next = entry;
930 G.pages[order] = new_entry;
932 /* For a new page, we know the word and bit positions (in the
933 in_use bitmap) of the first available object -- they're zero. */
934 new_entry->next_bit_hint = 1;
941 /* First try to use the hint left from the previous allocation
942 to locate a clear bit in the in-use bitmap. We've made sure
943 that the one-past-the-end bit is always set, so if the hint
944 has run over, this test will fail. */
945 unsigned hint = entry->next_bit_hint;
946 word = hint / HOST_BITS_PER_LONG;
947 bit = hint % HOST_BITS_PER_LONG;
949 /* If the hint didn't work, scan the bitmap from the beginning. */
950 if ((entry->in_use_p[word] >> bit) & 1)
953 while (~entry->in_use_p[word] == 0)
955 while ((entry->in_use_p[word] >> bit) & 1)
957 hint = word * HOST_BITS_PER_LONG + bit;
960 /* Next time, try the next bit. */
961 entry->next_bit_hint = hint + 1;
963 object_offset = hint * OBJECT_SIZE (order);
966 /* Set the in-use bit. */
967 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
969 /* Keep a running total of the number of free objects. If this page
970 fills up, we may have to move it to the end of the list if the
971 next page isn't full. If the next page is full, all subsequent
972 pages are full, so there's no need to move it. */
973 if (--entry->num_free_objects == 0
974 && entry->next != NULL
975 && entry->next->num_free_objects > 0)
977 G.pages[order] = entry->next;
979 G.page_tails[order]->next = entry;
980 G.page_tails[order] = entry;
983 /* Calculate the object's address. */
984 result = entry->page + object_offset;
986 #ifdef ENABLE_GC_CHECKING
987 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
988 exact same semantics in presence of memory bugs, regardless of
989 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
990 handle to avoid handle leak. */
991 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
993 /* `Poison' the entire allocated object, including any padding at
995 memset (result, 0xaf, OBJECT_SIZE (order));
997 /* Make the bytes after the end of the object unaccessible. Discard the
998 handle to avoid handle leak. */
999 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1000 OBJECT_SIZE (order) - size));
1003 /* Tell Valgrind that the memory is there, but its content isn't
1004 defined. The bytes at the end of the object are still marked
1006 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1008 /* Keep track of how many bytes are being allocated. This
1009 information is used in deciding when to collect. */
1010 G.allocated += OBJECT_SIZE (order);
1012 if (GGC_DEBUG_LEVEL >= 3)
1013 fprintf (G.debug_file,
1014 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1015 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
1021 /* If P is not marked, marks it and return false. Otherwise return true.
1022 P must have been allocated by the GC allocator; it mustn't point to
1023 static objects, stack variables, or memory allocated with malloc. */
1033 /* Look up the page on which the object is alloced. If the object
1034 wasn't allocated by the collector, we'll probably die. */
1035 entry = lookup_page_table_entry (p);
1036 #ifdef ENABLE_CHECKING
1041 /* Calculate the index of the object on the page; this is its bit
1042 position in the in_use_p bitmap. */
1043 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1044 word = bit / HOST_BITS_PER_LONG;
1045 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1047 /* If the bit was previously set, skip it. */
1048 if (entry->in_use_p[word] & mask)
1051 /* Otherwise set it, and decrement the free object count. */
1052 entry->in_use_p[word] |= mask;
1053 entry->num_free_objects -= 1;
1055 if (GGC_DEBUG_LEVEL >= 4)
1056 fprintf (G.debug_file, "Marking %p\n", p);
1061 /* Return 1 if P has been marked, zero otherwise.
1062 P must have been allocated by the GC allocator; it mustn't point to
1063 static objects, stack variables, or memory allocated with malloc. */
1073 /* Look up the page on which the object is alloced. If the object
1074 wasn't allocated by the collector, we'll probably die. */
1075 entry = lookup_page_table_entry (p);
1076 #ifdef ENABLE_CHECKING
1081 /* Calculate the index of the object on the page; this is its bit
1082 position in the in_use_p bitmap. */
1083 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1084 word = bit / HOST_BITS_PER_LONG;
1085 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1087 return (entry->in_use_p[word] & mask) != 0;
1090 /* Return the size of the gc-able object P. */
1096 page_entry *pe = lookup_page_table_entry (p);
1097 return OBJECT_SIZE (pe->order);
1100 /* Subroutine of init_ggc which computes the pair of numbers used to
1101 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1103 This algorithm is taken from Granlund and Montgomery's paper
1104 "Division by Invariant Integers using Multiplication"
1105 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1109 compute_inverse (order)
1112 unsigned size, inv, e;
1114 /* There can be only one object per "page" in a bucket for sizes
1115 larger than half a machine page; it will always have offset zero. */
1116 if (OBJECT_SIZE (order) > G.pagesize/2)
1118 if (OBJECTS_PER_PAGE (order) != 1)
1121 DIV_MULT (order) = 1;
1122 DIV_SHIFT (order) = 0;
1126 size = OBJECT_SIZE (order);
1128 while (size % 2 == 0)
1135 while (inv * size != 1)
1136 inv = inv * (2 - inv*size);
1138 DIV_MULT (order) = inv;
1139 DIV_SHIFT (order) = e;
1142 /* Initialize the ggc-mmap allocator. */
1148 G.pagesize = getpagesize();
1149 G.lg_pagesize = exact_log2 (G.pagesize);
1151 #ifdef HAVE_MMAP_DEV_ZERO
1152 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1153 if (G.dev_zero_fd == -1)
1158 G.debug_file = fopen ("ggc-mmap.debug", "w");
1160 G.debug_file = stdout;
1164 /* StunOS has an amazing off-by-one error for the first mmap allocation
1165 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1166 believe, is an unaligned page allocation, which would cause us to
1167 hork badly if we tried to use it. */
1169 char *p = alloc_anon (NULL, G.pagesize);
1170 struct page_entry *e;
1171 if ((size_t)p & (G.pagesize - 1))
1173 /* How losing. Discard this one and try another. If we still
1174 can't get something useful, give up. */
1176 p = alloc_anon (NULL, G.pagesize);
1177 if ((size_t)p & (G.pagesize - 1))
1181 /* We have a good page, might as well hold onto it... */
1182 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1183 e->bytes = G.pagesize;
1185 e->next = G.free_pages;
1190 /* Initialize the object size table. */
1191 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1192 object_size_table[order] = (size_t) 1 << order;
1193 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1195 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1197 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1198 so that we're sure of getting aligned memory. */
1199 s = ROUND_UP (s, MAX_ALIGNMENT);
1200 object_size_table[order] = s;
1203 /* Initialize the objects-per-page and inverse tables. */
1204 for (order = 0; order < NUM_ORDERS; ++order)
1206 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1207 if (objects_per_page_table[order] == 0)
1208 objects_per_page_table[order] = 1;
1209 compute_inverse (order);
1212 /* Reset the size_lookup array to put appropriately sized objects in
1213 the special orders. All objects bigger than the previous power
1214 of two, but no greater than the special size, should go in the
1216 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1221 o = size_lookup[OBJECT_SIZE (order)];
1222 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1223 size_lookup[i] = order;
1227 /* Increment the `GC context'. Objects allocated in an outer context
1228 are never freed, eliminating the need to register their roots. */
1236 if (G.context_depth >= HOST_BITS_PER_LONG)
1240 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1241 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1244 ggc_recalculate_in_use_p (p)
1250 /* Because the past-the-end bit in in_use_p is always set, we
1251 pretend there is one additional object. */
1252 num_objects = OBJECTS_IN_PAGE (p) + 1;
1254 /* Reset the free object count. */
1255 p->num_free_objects = num_objects;
1257 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1259 i < CEIL (BITMAP_SIZE (num_objects),
1260 sizeof (*p->in_use_p));
1265 /* Something is in use if it is marked, or if it was in use in a
1266 context further down the context stack. */
1267 p->in_use_p[i] |= p->save_in_use_p[i];
1269 /* Decrement the free object count for every object allocated. */
1270 for (j = p->in_use_p[i]; j; j >>= 1)
1271 p->num_free_objects -= (j & 1);
1274 if (p->num_free_objects >= num_objects)
1278 /* Decrement the `GC context'. All objects allocated since the
1279 previous ggc_push_context are migrated to the outer context. */
1284 unsigned long omask;
1285 unsigned order, depth;
1287 depth = --G.context_depth;
1288 omask = (unsigned long)1 << (depth + 1);
1290 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1293 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1294 G.context_depth_allocations &= omask - 1;
1295 G.context_depth_collections &= omask - 1;
1297 /* Any remaining pages in the popped context are lowered to the new
1298 current context; i.e. objects allocated in the popped context and
1299 left over are imported into the previous context. */
1300 for (order = 2; order < NUM_ORDERS; order++)
1304 for (p = G.pages[order]; p != NULL; p = p->next)
1306 if (p->context_depth > depth)
1307 p->context_depth = depth;
1309 /* If this page is now in the topmost context, and we'd
1310 saved its allocation state, restore it. */
1311 else if (p->context_depth == depth && p->save_in_use_p)
1313 ggc_recalculate_in_use_p (p);
1314 free (p->save_in_use_p);
1315 p->save_in_use_p = 0;
1321 /* Unmark all objects. */
1328 for (order = 2; order < NUM_ORDERS; order++)
1332 for (p = G.pages[order]; p != NULL; p = p->next)
1334 size_t num_objects = OBJECTS_IN_PAGE (p);
1335 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1337 #ifdef ENABLE_CHECKING
1338 /* The data should be page-aligned. */
1339 if ((size_t) p->page & (G.pagesize - 1))
1343 /* Pages that aren't in the topmost context are not collected;
1344 nevertheless, we need their in-use bit vectors to store GC
1345 marks. So, back them up first. */
1346 if (p->context_depth < G.context_depth)
1348 if (! p->save_in_use_p)
1349 p->save_in_use_p = xmalloc (bitmap_size);
1350 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1353 /* Reset reset the number of free objects and clear the
1354 in-use bits. These will be adjusted by mark_obj. */
1355 p->num_free_objects = num_objects;
1356 memset (p->in_use_p, 0, bitmap_size);
1358 /* Make sure the one-past-the-end bit is always set. */
1359 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1360 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1365 /* Free all empty pages. Partially empty pages need no attention
1366 because the `mark' bit doubles as an `unused' bit. */
1373 for (order = 2; order < NUM_ORDERS; order++)
1375 /* The last page-entry to consider, regardless of entries
1376 placed at the end of the list. */
1377 page_entry * const last = G.page_tails[order];
1380 size_t live_objects;
1381 page_entry *p, *previous;
1391 page_entry *next = p->next;
1393 /* Loop until all entries have been examined. */
1396 num_objects = OBJECTS_IN_PAGE (p);
1398 /* Add all live objects on this page to the count of
1399 allocated memory. */
1400 live_objects = num_objects - p->num_free_objects;
1402 G.allocated += OBJECT_SIZE (order) * live_objects;
1404 /* Only objects on pages in the topmost context should get
1406 if (p->context_depth < G.context_depth)
1409 /* Remove the page if it's empty. */
1410 else if (live_objects == 0)
1413 G.pages[order] = next;
1415 previous->next = next;
1417 /* Are we removing the last element? */
1418 if (p == G.page_tails[order])
1419 G.page_tails[order] = previous;
1424 /* If the page is full, move it to the end. */
1425 else if (p->num_free_objects == 0)
1427 /* Don't move it if it's already at the end. */
1428 if (p != G.page_tails[order])
1430 /* Move p to the end of the list. */
1432 G.page_tails[order]->next = p;
1434 /* Update the tail pointer... */
1435 G.page_tails[order] = p;
1437 /* ... and the head pointer, if necessary. */
1439 G.pages[order] = next;
1441 previous->next = next;
1446 /* If we've fallen through to here, it's a page in the
1447 topmost context that is neither full nor empty. Such a
1448 page must precede pages at lesser context depth in the
1449 list, so move it to the head. */
1450 else if (p != G.pages[order])
1452 previous->next = p->next;
1453 p->next = G.pages[order];
1455 /* Are we moving the last element? */
1456 if (G.page_tails[order] == p)
1457 G.page_tails[order] = previous;
1466 /* Now, restore the in_use_p vectors for any pages from contexts
1467 other than the current one. */
1468 for (p = G.pages[order]; p; p = p->next)
1469 if (p->context_depth != G.context_depth)
1470 ggc_recalculate_in_use_p (p);
1474 #ifdef ENABLE_GC_CHECKING
1475 /* Clobber all free objects. */
1482 for (order = 2; order < NUM_ORDERS; order++)
1484 size_t size = OBJECT_SIZE (order);
1487 for (p = G.pages[order]; p != NULL; p = p->next)
1492 if (p->context_depth != G.context_depth)
1493 /* Since we don't do any collection for pages in pushed
1494 contexts, there's no need to do any poisoning. And
1495 besides, the IN_USE_P array isn't valid until we pop
1499 num_objects = OBJECTS_IN_PAGE (p);
1500 for (i = 0; i < num_objects; i++)
1503 word = i / HOST_BITS_PER_LONG;
1504 bit = i % HOST_BITS_PER_LONG;
1505 if (((p->in_use_p[word] >> bit) & 1) == 0)
1507 char *object = p->page + i * size;
1509 /* Keep poison-by-write when we expect to use Valgrind,
1510 so the exact same memory semantics is kept, in case
1511 there are memory errors. We override this request
1513 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1514 memset (object, 0xa5, size);
1516 /* Drop the handle to avoid handle leak. */
1517 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1525 /* Top level mark-and-sweep routine. */
1530 /* Avoid frequent unnecessary work by skipping collection if the
1531 total allocations haven't expanded much since the last
1533 float allocated_last_gc =
1534 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1536 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1538 if (G.allocated < allocated_last_gc + min_expand)
1541 timevar_push (TV_GC);
1543 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1545 /* Zero the total allocated bytes. This will be recalculated in the
1549 /* Release the pages we freed the last time we collected, but didn't
1550 reuse in the interim. */
1553 /* Indicate that we've seen collections at this context depth. */
1554 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1559 #ifdef ENABLE_GC_CHECKING
1565 G.allocated_last_gc = G.allocated;
1567 timevar_pop (TV_GC);
1570 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1573 /* Print allocation statistics. */
1574 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1576 : ((x) < 1024*1024*10 \
1578 : (x) / (1024*1024))))
1579 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1582 ggc_print_statistics ()
1584 struct ggc_statistics stats;
1586 size_t total_overhead = 0;
1588 /* Clear the statistics. */
1589 memset (&stats, 0, sizeof (stats));
1591 /* Make sure collection will really occur. */
1592 G.allocated_last_gc = 0;
1594 /* Collect and print the statistics common across collectors. */
1595 ggc_print_common_statistics (stderr, &stats);
1597 /* Release free pages so that we will not count the bytes allocated
1598 there as part of the total allocated memory. */
1601 /* Collect some information about the various sizes of
1603 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1604 "Size", "Allocated", "Used", "Overhead");
1605 for (i = 0; i < NUM_ORDERS; ++i)
1612 /* Skip empty entries. */
1616 overhead = allocated = in_use = 0;
1618 /* Figure out the total number of bytes allocated for objects of
1619 this size, and how many of them are actually in use. Also figure
1620 out how much memory the page table is using. */
1621 for (p = G.pages[i]; p; p = p->next)
1623 allocated += p->bytes;
1625 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1627 overhead += (sizeof (page_entry) - sizeof (long)
1628 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1630 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1631 (unsigned long) OBJECT_SIZE (i),
1632 SCALE (allocated), LABEL (allocated),
1633 SCALE (in_use), LABEL (in_use),
1634 SCALE (overhead), LABEL (overhead));
1635 total_overhead += overhead;
1637 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1638 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1639 SCALE (G.allocated), LABEL(G.allocated),
1640 SCALE (total_overhead), LABEL (total_overhead));
1645 struct ggc_pch_ondisk
1647 unsigned totals[NUM_ORDERS];
1649 size_t base[NUM_ORDERS];
1650 size_t written[NUM_ORDERS];
1653 struct ggc_pch_data *
1656 return xcalloc (sizeof (struct ggc_pch_data), 1);
1660 ggc_pch_count_object (d, x, size)
1661 struct ggc_pch_data *d;
1662 void *x ATTRIBUTE_UNUSED;
1668 order = size_lookup[size];
1672 while (size > OBJECT_SIZE (order))
1676 d->d.totals[order]++;
1680 ggc_pch_total_size (d)
1681 struct ggc_pch_data *d;
1686 for (i = 0; i < NUM_ORDERS; i++)
1687 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1692 ggc_pch_this_base (d, base)
1693 struct ggc_pch_data *d;
1696 size_t a = (size_t) base;
1699 for (i = 0; i < NUM_ORDERS; i++)
1702 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1708 ggc_pch_alloc_object (d, x, size)
1709 struct ggc_pch_data *d;
1710 void *x ATTRIBUTE_UNUSED;
1717 order = size_lookup[size];
1721 while (size > OBJECT_SIZE (order))
1725 result = (char *) d->base[order];
1726 d->base[order] += OBJECT_SIZE (order);
1731 ggc_pch_prepare_write (d, f)
1732 struct ggc_pch_data * d ATTRIBUTE_UNUSED;
1733 FILE * f ATTRIBUTE_UNUSED;
1735 /* Nothing to do. */
1739 ggc_pch_write_object (d, f, x, newx, size)
1740 struct ggc_pch_data * d ATTRIBUTE_UNUSED;
1743 void *newx ATTRIBUTE_UNUSED;
1749 order = size_lookup[size];
1753 while (size > OBJECT_SIZE (order))
1757 if (fwrite (x, size, 1, f) != 1)
1758 fatal_io_error ("can't write PCH file");
1760 /* In the current implementation, SIZE is always equal to
1761 OBJECT_SIZE (order) and so the fseek is never executed. */
1762 if (size != OBJECT_SIZE (order)
1763 && fseek (f, OBJECT_SIZE (order) - size, SEEK_CUR) != 0)
1764 fatal_io_error ("can't write PCH file");
1766 d->written[order]++;
1767 if (d->written[order] == d->d.totals[order]
1768 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
1771 fatal_io_error ("can't write PCH file");
1775 ggc_pch_finish (d, f)
1776 struct ggc_pch_data * d;
1779 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1780 fatal_io_error ("can't write PCH file");
1785 ggc_pch_read (f, addr)
1789 struct ggc_pch_ondisk d;
1793 /* We've just read in a PCH file. So, every object that used to be allocated
1800 /* No object read from a PCH file should ever be freed. So, set the
1801 context depth to 1, and set the depth of all the currently-allocated
1802 pages to be 1 too. PCH pages will have depth 0. */
1803 if (G.context_depth != 0)
1805 G.context_depth = 1;
1806 for (i = 0; i < NUM_ORDERS; i++)
1809 for (p = G.pages[i]; p != NULL; p = p->next)
1810 p->context_depth = G.context_depth;
1813 /* Allocate the appropriate page-table entries for the pages read from
1815 if (fread (&d, sizeof (d), 1, f) != 1)
1816 fatal_io_error ("can't read PCH file");
1818 for (i = 0; i < NUM_ORDERS; i++)
1820 struct page_entry *entry;
1826 if (d.totals[i] == 0)
1829 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1830 num_objs = bytes / OBJECT_SIZE (i);
1831 entry = xcalloc (1, (sizeof (struct page_entry)
1833 + BITMAP_SIZE (num_objs + 1)));
1834 entry->bytes = bytes;
1836 entry->context_depth = 0;
1838 entry->num_free_objects = 0;
1842 j + HOST_BITS_PER_LONG <= num_objs + 1;
1843 j += HOST_BITS_PER_LONG)
1844 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
1845 for (; j < num_objs + 1; j++)
1846 entry->in_use_p[j / HOST_BITS_PER_LONG]
1847 |= 1L << (j % HOST_BITS_PER_LONG);
1849 for (pte = entry->page;
1850 pte < entry->page + entry->bytes;
1852 set_page_table_entry (pte, entry);
1854 if (G.page_tails[i] != NULL)
1855 G.page_tails[i]->next = entry;
1858 G.page_tails[i] = entry;
1861 /* Update the statistics. */
1862 G.allocated = G.allocated_last_gc = offs - (char *)addr;