/* "Bag-of-pages" garbage collector for the GNU compiler.
- Copyright (C) 1999 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
- This file is part of GNU CC.
+This file is part of GCC.
- GNU CC is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
- GNU CC is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
- You should have received a copy of the GNU General Public License
- along with GNU CC; see the file COPYING. If not, write to
- the Free Software Foundation, 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
-
-#include <unistd.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <fcntl.h>
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
#include "config.h"
-#include "ggc.h"
-#include "rtl.h"
#include "system.h"
#include "tree.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "toplev.h"
#include "varray.h"
#include "flags.h"
+#include "ggc.h"
+#include "timevar.h"
+
+/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
+ file open. Prefer either to valloc. */
+#ifdef HAVE_MMAP_ANON
+# undef HAVE_MMAP_DEV_ZERO
+
+# include <sys/mman.h>
+# ifndef MAP_FAILED
+# define MAP_FAILED -1
+# endif
+# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
+# define MAP_ANONYMOUS MAP_ANON
+# endif
+# define USING_MMAP
+#endif
+
+#ifdef HAVE_MMAP_DEV_ZERO
+
+# include <sys/mman.h>
+# ifndef MAP_FAILED
+# define MAP_FAILED -1
+# endif
+# define USING_MMAP
+
+#endif
+
+#ifndef USING_MMAP
+#define USING_MALLOC_PAGE_GROUPS
+#endif
/* Stategy:
ggc_collect is invoked. Otherwise, collection is performed only
when a significant amount of memory has been allocated since the
last collection. */
-#undef GGC_ALWAYS_COLLECT.
+#undef GGC_ALWAYS_COLLECT
-/* If ENABLE_CHECKING is defined, enable GGC_POISON and
- GGC_ALWAYS_COLLECT automatically. */
-#ifdef ENABLE_CHECKING
+#ifdef ENABLE_GC_CHECKING
#define GGC_POISON
+#endif
+#ifdef ENABLE_GC_ALWAYS_COLLECT
#define GGC_ALWAYS_COLLECT
#endif
1: GC statistics only.
2: Page-entry allocations/deallocations as well.
3: Object allocations as well.
- 4: Object marks as well. */
+ 4: Object marks as well. */
#define GGC_DEBUG_LEVEL (0)
\f
#ifndef HOST_BITS_PER_PTR
#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
#endif
-/* Timing information for collect execution goes into here. */
-extern int gc_time;
-
-/* The "" allocated string. */
-char *empty_string;
\f
/* A two-level tree is used to look up the page-entry for a given
pointer. Two chunks of the pointer's bits are extracted to index
significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
index values in the lookup table, respectively.
- The topmost leftover bits, if any, are ignored. For 32-bit
- architectures and the settings below, there are no leftover bits.
- For architectures with wider pointers, the lookup tree points to a
- list of pages, which must be scanned to find the correct one. */
+ For 32-bit architectures and the settings below, there are no
+ leftover bits. For architectures with wider pointers, the lookup
+ tree points to a list of pages, which must be scanned to find the
+ correct one. */
#define PAGE_L1_BITS (8)
#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
#define LOOKUP_L2(p) \
(((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
+/* The number of objects per allocation page, for objects on a page of
+ the indicated ORDER. */
+#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
+
+/* The size of an object on a page of the indicated ORDER. */
+#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
+
+/* The number of extra orders, not corresponding to power-of-two sized
+ objects. */
+
+#define NUM_EXTRA_ORDERS \
+ (sizeof (extra_order_size_table) / sizeof (extra_order_size_table[0]))
+
+/* The Ith entry is the maximum size of an object to be stored in the
+ Ith extra order. Adding a new entry to this array is the *only*
+ thing you need to do to add a new special allocation size. */
+
+static const size_t extra_order_size_table[] = {
+ sizeof (struct tree_decl),
+ sizeof (struct tree_list)
+};
+
+/* The total number of orders. */
+
+#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
+
+/* We use this structure to determine the alignment required for
+ allocations. For power-of-two sized allocations, that's not a
+ problem, but it does matter for odd-sized allocations. */
+
+struct max_alignment {
+ char c;
+ union {
+ HOST_WIDEST_INT i;
+#ifdef HAVE_LONG_DOUBLE
+ long double d;
+#else
+ double d;
+#endif
+ } u;
+};
+
+/* The biggest alignment required. */
+
+#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
+
+/* The Ith entry is the number of objects on a page or order I. */
+
+static unsigned objects_per_page_table[NUM_ORDERS];
+
+/* The Ith entry is the size of an object on a page of order I. */
+
+static size_t object_size_table[NUM_ORDERS];
/* A page_entry records the status of an allocation page. This
structure is dynamically sized to fit the bitmap in_use_p. */
/* The address at which the memory is allocated. */
char *page;
+#ifdef USING_MALLOC_PAGE_GROUPS
+ /* Back pointer to the page group this page came from. */
+ struct page_group *group;
+#endif
+
/* Saved in-use bit vector for pages that aren't in the topmost
context during collection. */
unsigned long *save_in_use_p;
/* Context depth of this page. */
- unsigned char context_depth;
-
- /* The lg of size of objects allocated from this page. */
- unsigned char order;
+ unsigned short context_depth;
/* The number of free objects remaining on this page. */
unsigned short num_free_objects;
next allocation from this page. */
unsigned short next_bit_hint;
- /* Saved number of free objects for pages that aren't in the topmost
- context during colleciton. */
- unsigned short save_num_free_objects;
+ /* The lg of size of objects allocated from this page. */
+ unsigned char order;
/* A bit vector indicating whether or not objects are in use. The
Nth bit is one if the Nth object on this page is allocated. This
unsigned long in_use_p[1];
} page_entry;
+#ifdef USING_MALLOC_PAGE_GROUPS
+/* A page_group describes a large allocation from malloc, from which
+ we parcel out aligned pages. */
+typedef struct page_group
+{
+ /* A linked list of all extant page groups. */
+ struct page_group *next;
+
+ /* The address we received from malloc. */
+ char *allocation;
+
+ /* The size of the block. */
+ size_t alloc_size;
+
+ /* A bitmask of pages in use. */
+ unsigned int in_use;
+} page_group;
+#endif
#if HOST_BITS_PER_PTR <= 32
#else
-/* On 64-bit hosts, we use two level page tables plus a linked list
- that disambiguates the top 32-bits. There will almost always be
+/* On 64-bit hosts, we use the same two level page tables plus a linked
+ list that disambiguates the top 32-bits. There will almost always be
exactly one entry in the list. */
typedef struct page_table_chain
{
If there are any pages with free objects, they will be at the
head of the list. NULL if there are no page-entries for this
object size. */
- page_entry *pages[HOST_BITS_PER_PTR];
+ page_entry *pages[NUM_ORDERS];
/* The Nth element in this array is the last page with objects of
size 2^N. NULL if there are no page-entries for this object
size. */
- page_entry *page_tails[HOST_BITS_PER_PTR];
+ page_entry *page_tails[NUM_ORDERS];
/* Lookup table for associating allocation pages with object addresses. */
page_table lookup;
/* Bytes currently allocated at the end of the last collection. */
size_t allocated_last_gc;
+ /* Total amount of memory mapped. */
+ size_t bytes_mapped;
+
/* The current depth in the context stack. */
- unsigned char context_depth;
+ unsigned short context_depth;
/* A file descriptor open to /dev/zero for reading. */
-#ifndef MAP_ANONYMOUS
+#if defined (HAVE_MMAP_DEV_ZERO)
int dev_zero_fd;
#endif
/* A cache of free system pages. */
page_entry *free_pages;
+#ifdef USING_MALLOC_PAGE_GROUPS
+ page_group *page_groups;
+#endif
+
/* The file descriptor for debugging output. */
FILE *debug_file;
} G;
-
-/* Compute DIVIDEND / DIVISOR, rounded up. */
-#define DIV_ROUND_UP(Dividend, Divisor) \
- ((Dividend + Divisor - 1) / Divisor)
-
-/* The number of objects per allocation page, for objects of size
- 2^ORDER. */
-#define OBJECTS_PER_PAGE(Order) \
- ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
-
/* The size in bytes required to maintain a bitmap for the objects
on a page-entry. */
#define BITMAP_SIZE(Num_objects) \
- (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
+ (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
/* Skip garbage collection if the current allocation is not at least
this factor times the allocation at the end of the last collection.
test from triggering too often when the heap is small. */
#define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
+/* Allocate pages in chunks of this size, to throttle calls to memory
+ allocation routines. The first page is used, the rest go onto the
+ free list. This cannot be larger than HOST_BITS_PER_INT for the
+ in_use bitmask for page_group. */
+#define GGC_QUIRE_SIZE 16
\f
-static page_entry *lookup_page_table_entry PROTO ((void *));
-static void set_page_table_entry PROTO ((void *, page_entry *));
-static char *alloc_anon PROTO ((char *, size_t));
-static struct page_entry * alloc_page PROTO ((unsigned));
-static void free_page PROTO ((struct page_entry *));
-static void release_pages PROTO ((void));
-static void *alloc_obj PROTO ((size_t, int));
-static int mark_obj PROTO ((void *));
-static void clear_marks PROTO ((void));
-static void sweep_pages PROTO ((void));
+static int ggc_allocated_p PARAMS ((const void *));
+static page_entry *lookup_page_table_entry PARAMS ((const void *));
+static void set_page_table_entry PARAMS ((void *, page_entry *));
+#ifdef USING_MMAP
+static char *alloc_anon PARAMS ((char *, size_t));
+#endif
+#ifdef USING_MALLOC_PAGE_GROUPS
+static size_t page_group_index PARAMS ((char *, char *));
+static void set_page_group_in_use PARAMS ((page_group *, char *));
+static void clear_page_group_in_use PARAMS ((page_group *, char *));
+#endif
+static struct page_entry * alloc_page PARAMS ((unsigned));
+static void free_page PARAMS ((struct page_entry *));
+static void release_pages PARAMS ((void));
+static void clear_marks PARAMS ((void));
+static void sweep_pages PARAMS ((void));
+static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
#ifdef GGC_POISON
-static void poison PROTO ((void *, size_t));
-static void poison_pages PROTO ((void));
+static void poison_pages PARAMS ((void));
#endif
-void debug_print_page_list PROTO ((int));
+void debug_print_page_list PARAMS ((int));
\f
+/* Returns non-zero if P was allocated in GC'able memory. */
+
+static inline int
+ggc_allocated_p (p)
+ const void *p;
+{
+ page_entry ***base;
+ size_t L1, L2;
+
+#if HOST_BITS_PER_PTR <= 32
+ base = &G.lookup[0];
+#else
+ page_table table = G.lookup;
+ size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+ while (1)
+ {
+ if (table == NULL)
+ return 0;
+ if (table->high_bits == high_bits)
+ break;
+ table = table->next;
+ }
+ base = &table->table[0];
+#endif
+
+ /* Extract the level 1 and 2 indices. */
+ L1 = LOOKUP_L1 (p);
+ L2 = LOOKUP_L2 (p);
+
+ return base[L1] && base[L1][L2];
+}
+
/* Traverse the page table and find the entry for a page.
Die (probably) if the object wasn't allocated via GC. */
static inline page_entry *
lookup_page_table_entry(p)
- void *p;
+ const void *p;
{
page_entry ***base;
size_t L1, L2;
base = &table->table[0];
#endif
- /* Extract the level 1 and 2 indicies. */
+ /* Extract the level 1 and 2 indices. */
L1 = LOOKUP_L1 (p);
L2 = LOOKUP_L2 (p);
return base[L1][L2];
}
-
/* Set the page table entry for a page. */
+
static void
set_page_table_entry(p, entry)
void *p;
base = &table->table[0];
#endif
- /* Extract the level 1 and 2 indicies. */
+ /* Extract the level 1 and 2 indices. */
L1 = LOOKUP_L1 (p);
L2 = LOOKUP_L2 (p);
base[L1][L2] = entry;
}
-
/* Prints the page-entry for object size ORDER, for debugging. */
+
void
debug_print_page_list (order)
int order;
{
page_entry *p;
- printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
+ printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
+ (PTR) G.page_tails[order]);
p = G.pages[order];
while (p != NULL)
{
- printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
+ printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
+ p->num_free_objects);
p = p->next;
}
printf ("NULL\n");
fflush (stdout);
}
-#ifdef GGC_POISON
-/* `Poisons' the region of memory starting at START and extending for
- LEN bytes. */
-static inline void
-poison (start, len)
- void *start;
- size_t len;
-{
- memset (start, 0xa5, len);
-}
-#endif
-
+#ifdef USING_MMAP
/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
- (if non-null). */
+ (if non-null). The ifdef structure here is intended to cause a
+ compile error unless exactly one of the HAVE_* is defined. */
+
static inline char *
alloc_anon (pref, size)
- char *pref;
+ char *pref ATTRIBUTE_UNUSED;
size_t size;
{
- char *page;
-
-#ifdef MAP_ANONYMOUS
- page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-#else
- page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, G.dev_zero_fd, 0);
+#ifdef HAVE_MMAP_ANON
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+#endif
+#ifdef HAVE_MMAP_DEV_ZERO
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, G.dev_zero_fd, 0);
#endif
+
if (page == (char *) MAP_FAILED)
{
- fputs ("Virtual memory exhausted!\n", stderr);
- exit(1);
+ perror ("virtual memory exhausted");
+ exit (FATAL_EXIT_CODE);
}
+ /* Remember that we allocated this memory. */
+ G.bytes_mapped += size;
+
return page;
}
+#endif
+#ifdef USING_MALLOC_PAGE_GROUPS
+/* Compute the index for this page into the page group. */
+
+static inline size_t
+page_group_index (allocation, page)
+ char *allocation, *page;
+{
+ return (size_t) (page - allocation) >> G.lg_pagesize;
+}
+
+/* Set and clear the in_use bit for this page in the page group. */
+
+static inline void
+set_page_group_in_use (group, page)
+ page_group *group;
+ char *page;
+{
+ group->in_use |= 1 << page_group_index (group->allocation, page);
+}
+
+static inline void
+clear_page_group_in_use (group, page)
+ page_group *group;
+ char *page;
+{
+ group->in_use &= ~(1 << page_group_index (group->allocation, page));
+}
+#endif
/* Allocate a new page for allocating objects of size 2^ORDER,
and return an entry for it. The entry is not added to the
appropriate page_table list. */
+
static inline struct page_entry *
alloc_page (order)
unsigned order;
size_t bitmap_size;
size_t page_entry_size;
size_t entry_size;
+#ifdef USING_MALLOC_PAGE_GROUPS
+ page_group *group;
+#endif
num_objects = OBJECTS_PER_PAGE (order);
bitmap_size = BITMAP_SIZE (num_objects + 1);
page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
- entry_size = num_objects * (1 << order);
+ entry_size = num_objects * OBJECT_SIZE (order);
+ if (entry_size < G.pagesize)
+ entry_size = G.pagesize;
entry = NULL;
page = NULL;
/* Check the list of free pages for one we can use. */
- for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
+ for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
if (p->bytes == entry_size)
break;
if (p != NULL)
{
- /* Recycle the allocated memory from this page ... */
+ /* Recycle the allocated memory from this page ... */
*pp = p->next;
page = p->page;
+
+#ifdef USING_MALLOC_PAGE_GROUPS
+ group = p->group;
+#endif
+
/* ... and, if possible, the page entry itself. */
if (p->order == order)
{
else
free (p);
}
+#ifdef USING_MMAP
+ else if (entry_size == G.pagesize)
+ {
+ /* We want just one page. Allocate a bunch of them and put the
+ extras on the freelist. (Can only do this optimization with
+ mmap for backing store.) */
+ struct page_entry *e, *f = G.free_pages;
+ int i;
+
+ page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
+
+ /* This loop counts down so that the chain will be in ascending
+ memory order. */
+ for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
+ {
+ e = (struct page_entry *) xcalloc (1, page_entry_size);
+ e->order = order;
+ e->bytes = G.pagesize;
+ e->page = page + (i << G.lg_pagesize);
+ e->next = f;
+ f = e;
+ }
+
+ G.free_pages = f;
+ }
+ else
+ page = alloc_anon (NULL, entry_size);
+#endif
+#ifdef USING_MALLOC_PAGE_GROUPS
else
{
- /* Actually allocate the memory, using mmap. */
- page = alloc_anon (NULL, entry_size);
+ /* Allocate a large block of memory and serve out the aligned
+ pages therein. This results in much less memory wastage
+ than the traditional implementation of valloc. */
+
+ char *allocation, *a, *enda;
+ size_t alloc_size, head_slop, tail_slop;
+ int multiple_pages = (entry_size == G.pagesize);
+
+ if (multiple_pages)
+ alloc_size = GGC_QUIRE_SIZE * G.pagesize;
+ else
+ alloc_size = entry_size + G.pagesize - 1;
+ allocation = xmalloc (alloc_size);
+
+ page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
+ head_slop = page - allocation;
+ if (multiple_pages)
+ tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
+ else
+ tail_slop = alloc_size - entry_size - head_slop;
+ enda = allocation + alloc_size - tail_slop;
+
+ /* We allocated N pages, which are likely not aligned, leaving
+ us with N-1 usable pages. We plan to place the page_group
+ structure somewhere in the slop. */
+ if (head_slop >= sizeof (page_group))
+ group = (page_group *)page - 1;
+ else
+ {
+ /* We magically got an aligned allocation. Too bad, we have
+ to waste a page anyway. */
+ if (tail_slop == 0)
+ {
+ enda -= G.pagesize;
+ tail_slop += G.pagesize;
+ }
+ if (tail_slop < sizeof (page_group))
+ abort ();
+ group = (page_group *)enda;
+ tail_slop -= sizeof (page_group);
+ }
+
+ /* Remember that we allocated this memory. */
+ group->next = G.page_groups;
+ group->allocation = allocation;
+ group->alloc_size = alloc_size;
+ group->in_use = 0;
+ G.page_groups = group;
+ G.bytes_mapped += alloc_size;
+
+ /* If we allocated multiple pages, put the rest on the free list. */
+ if (multiple_pages)
+ {
+ struct page_entry *e, *f = G.free_pages;
+ for (a = enda - G.pagesize; a != page; a -= G.pagesize)
+ {
+ e = (struct page_entry *) xcalloc (1, page_entry_size);
+ e->order = order;
+ e->bytes = G.pagesize;
+ e->page = a;
+ e->group = group;
+ e->next = f;
+ f = e;
+ }
+ G.free_pages = f;
+ }
}
+#endif
if (entry == NULL)
entry = (struct page_entry *) xcalloc (1, page_entry_size);
entry->num_free_objects = num_objects;
entry->next_bit_hint = 1;
+#ifdef USING_MALLOC_PAGE_GROUPS
+ entry->group = group;
+ set_page_group_in_use (group, page);
+#endif
+
/* Set the one-past-the-end in-use bit. This acts as a sentry as we
increment the hint. */
entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
- "Allocating page at %p, object size=%d, data %p-%p\n", entry,
- 1 << order, page, page + entry_size - 1);
+ "Allocating page at %p, object size=%ld, data %p-%p\n",
+ (PTR) entry, (long) OBJECT_SIZE (order), page,
+ page + entry_size - 1);
return entry;
}
+/* For a page that is no longer needed, put it on the free page list. */
-/* Free a page when it's no longer needed. */
static inline void
free_page (entry)
page_entry *entry;
{
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
- "Deallocating page at %p, data %p-%p\n", entry,
+ "Deallocating page at %p, data %p-%p\n", (PTR) entry,
entry->page, entry->page + entry->bytes - 1);
set_page_table_entry (entry->page, NULL);
+#ifdef USING_MALLOC_PAGE_GROUPS
+ clear_page_group_in_use (entry->group, entry->page);
+#endif
+
entry->next = G.free_pages;
G.free_pages = entry;
}
+/* Release the free page cache to the system. */
-/* Release the page cache to the system. */
-static inline void
+static void
release_pages ()
{
+#ifdef USING_MMAP
page_entry *p, *next;
char *start;
size_t len;
+ /* Gather up adjacent pages so they are unmapped together. */
p = G.free_pages;
- if (p == NULL)
- return;
-
- next = p->next;
- start = p->page;
- len = p->bytes;
- free (p);
- p = next;
while (p)
{
+ start = p->page;
next = p->next;
- /* Gather up adjacent pages so they are unmapped together. */
- if (p->page == start + len)
- len += p->bytes;
- else
- {
- munmap (start, len);
- start = p->page;
- len = p->bytes;
- }
+ len = p->bytes;
free (p);
p = next;
+
+ while (p && p->page == start + len)
+ {
+ next = p->next;
+ len += p->bytes;
+ free (p);
+ p = next;
+ }
+
+ munmap (start, len);
+ G.bytes_mapped -= len;
}
- munmap (start, len);
G.free_pages = NULL;
+#endif
+#ifdef USING_MALLOC_PAGE_GROUPS
+ page_entry **pp, *p;
+ page_group **gp, *g;
+
+ /* Remove all pages from free page groups from the list. */
+ pp = &G.free_pages;
+ while ((p = *pp) != NULL)
+ if (p->group->in_use == 0)
+ {
+ *pp = p->next;
+ free (p);
+ }
+ else
+ pp = &p->next;
+
+ /* Remove all free page groups, and release the storage. */
+ gp = &G.page_groups;
+ while ((g = *gp) != NULL)
+ if (g->in_use == 0)
+ {
+ *gp = g->next;
+ G.bytes_mapped -= g->alloc_size;
+ free (g->allocation);
+ }
+ else
+ gp = &g->next;
+#endif
}
-
/* This table provides a fast way to determine ceil(log_2(size)) for
- allocation requests. The minimum allocation size is four bytes. */
-static unsigned char const size_lookup[257] =
-{
- 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
+ allocation requests. The minimum allocation size is eight bytes. */
+
+static unsigned char size_lookup[257] =
+{
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
/* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
memory is zeroed; otherwise, its contents are undefined. */
-static void *
-alloc_obj (size, zero)
+
+void *
+ggc_alloc (size)
size_t size;
- int zero;
{
unsigned order, word, bit, object_offset;
struct page_entry *entry;
else
{
order = 9;
- while (size > ((size_t) 1 << order))
+ while (size > OBJECT_SIZE (order))
order++;
}
/* If there is no page for this object size, or all pages in this
context are full, allocate a new page. */
- if (entry == NULL
- || entry->num_free_objects == 0
- || entry->context_depth != G.context_depth)
+ if (entry == NULL || entry->num_free_objects == 0)
{
struct page_entry *new_entry;
new_entry = alloc_page (order);
/* Next time, try the next bit. */
entry->next_bit_hint = hint + 1;
- object_offset = hint << order;
+ object_offset = hint * OBJECT_SIZE (order);
}
/* Set the in-use bit. */
result = entry->page + object_offset;
#ifdef GGC_POISON
- /* `Poison' the entire allocated object before zeroing the requested area,
- so that bytes beyond the end, if any, will not necessarily be zero. */
- poison (result, 1 << order);
+ /* `Poison' the entire allocated object, including any padding at
+ the end. */
+ memset (result, 0xaf, OBJECT_SIZE (order));
#endif
- if (zero)
- memset (result, 0, size);
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
- G.allocated += (size_t) 1 << order;
+ G.allocated += OBJECT_SIZE (order);
if (GGC_DEBUG_LEVEL >= 3)
fprintf (G.debug_file,
- "Allocating object, requested size=%d, actual=%d at %p on %p\n",
- (int) size, 1 << order, result, entry);
+ "Allocating object, requested size=%ld, actual=%ld at %p on %p\n",
+ (long) size, (long) OBJECT_SIZE (order), result, (PTR) entry);
return result;
}
-
-/* If P is not marked, marks it and returns 0. Otherwise returns 1.
+/* If P is not marked, marks it and return false. Otherwise return true.
P must have been allocated by the GC allocator; it mustn't point to
static objects, stack variables, or memory allocated with malloc. */
-static int
-mark_obj (p)
- void *p;
+
+int
+ggc_set_mark (p)
+ const void *p;
{
page_entry *entry;
unsigned bit, word;
/* Look up the page on which the object is alloced. If the object
wasn't allocated by the collector, we'll probably die. */
- entry = lookup_page_table_entry(p);
+ entry = lookup_page_table_entry (p);
#ifdef ENABLE_CHECKING
if (entry == NULL)
abort ();
/* Calculate the index of the object on the page; this is its bit
position in the in_use_p bitmap. */
- bit = (((char *) p) - entry->page) >> entry->order;
+ bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
word = bit / HOST_BITS_PER_LONG;
mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
- /* If the bit was previously set, skip it. */
+ /* If the bit was previously set, skip it. */
if (entry->in_use_p[word] & mask)
return 1;
entry->in_use_p[word] |= mask;
entry->num_free_objects -= 1;
- G.allocated += (size_t) 1 << entry->order;
-
if (GGC_DEBUG_LEVEL >= 4)
fprintf (G.debug_file, "Marking %p\n", p);
return 0;
}
+/* Return 1 if P has been marked, zero otherwise.
+ P must have been allocated by the GC allocator; it mustn't point to
+ static objects, stack variables, or memory allocated with malloc. */
+
+int
+ggc_marked_p (p)
+ const void *p;
+{
+ page_entry *entry;
+ unsigned bit, word;
+ unsigned long mask;
+
+ /* Look up the page on which the object is alloced. If the object
+ wasn't allocated by the collector, we'll probably die. */
+ entry = lookup_page_table_entry (p);
+#ifdef ENABLE_CHECKING
+ if (entry == NULL)
+ abort ();
+#endif
+
+ /* Calculate the index of the object on the page; this is its bit
+ position in the in_use_p bitmap. */
+ bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
+ word = bit / HOST_BITS_PER_LONG;
+ mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
+
+ return (entry->in_use_p[word] & mask) != 0;
+}
+
+/* Return the size of the gc-able object P. */
+
+size_t
+ggc_get_size (p)
+ const void *p;
+{
+ page_entry *pe = lookup_page_table_entry (p);
+ return OBJECT_SIZE (pe->order);
+}
\f
/* Initialize the ggc-mmap allocator. */
+
void
init_ggc ()
{
+ unsigned order;
+
G.pagesize = getpagesize();
G.lg_pagesize = exact_log2 (G.pagesize);
-#ifndef MAP_ANONYMOUS
+#ifdef HAVE_MMAP_DEV_ZERO
G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
if (G.dev_zero_fd == -1)
abort ();
G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
- empty_string = ggc_alloc_string ("", 0);
- ggc_add_string_root (&empty_string, 1);
+#ifdef USING_MMAP
+ /* StunOS has an amazing off-by-one error for the first mmap allocation
+ after fiddling with RLIMIT_STACK. The result, as hard as it is to
+ believe, is an unaligned page allocation, which would cause us to
+ hork badly if we tried to use it. */
+ {
+ char *p = alloc_anon (NULL, G.pagesize);
+ struct page_entry *e;
+ if ((size_t)p & (G.pagesize - 1))
+ {
+ /* How losing. Discard this one and try another. If we still
+ can't get something useful, give up. */
+
+ p = alloc_anon (NULL, G.pagesize);
+ if ((size_t)p & (G.pagesize - 1))
+ abort ();
+ }
+
+ /* We have a good page, might as well hold onto it... */
+ e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
+ e->bytes = G.pagesize;
+ e->page = p;
+ e->next = G.free_pages;
+ G.free_pages = e;
+ }
+#endif
+
+ /* Initialize the object size table. */
+ for (order = 0; order < HOST_BITS_PER_PTR; ++order)
+ object_size_table[order] = (size_t) 1 << order;
+ for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
+ {
+ size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
+
+ /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
+ so that we're sure of getting aligned memory. */
+ s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
+ object_size_table[order] = s;
+ }
+
+ /* Initialize the objects-per-page table. */
+ for (order = 0; order < NUM_ORDERS; ++order)
+ {
+ objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
+ if (objects_per_page_table[order] == 0)
+ objects_per_page_table[order] = 1;
+ }
+
+ /* Reset the size_lookup array to put appropriately sized objects in
+ the special orders. All objects bigger than the previous power
+ of two, but no greater than the special size, should go in the
+ new order. */
+ for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
+ {
+ int o;
+ int i;
+
+ o = size_lookup[OBJECT_SIZE (order)];
+ for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
+ size_lookup[i] = order;
+ }
}
+/* Increment the `GC context'. Objects allocated in an outer context
+ are never freed, eliminating the need to register their roots. */
void
ggc_push_context ()
abort ();
}
+/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
+ reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
+
+static void
+ggc_recalculate_in_use_p (p)
+ page_entry *p;
+{
+ unsigned int i;
+ size_t num_objects;
+
+ /* Because the past-the-end bit in in_use_p is always set, we
+ pretend there is one additional object. */
+ num_objects = OBJECTS_PER_PAGE (p->order) + 1;
+
+ /* Reset the free object count. */
+ p->num_free_objects = num_objects;
+
+ /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
+ for (i = 0;
+ i < CEIL (BITMAP_SIZE (num_objects),
+ sizeof (*p->in_use_p));
+ ++i)
+ {
+ unsigned long j;
+
+ /* Something is in use if it is marked, or if it was in use in a
+ context further down the context stack. */
+ p->in_use_p[i] |= p->save_in_use_p[i];
+
+ /* Decrement the free object count for every object allocated. */
+ for (j = p->in_use_p[i]; j; j >>= 1)
+ p->num_free_objects -= (j & 1);
+ }
+
+ if (p->num_free_objects >= num_objects)
+ abort ();
+}
+
+/* Decrement the `GC context'. All objects allocated since the
+ previous ggc_push_context are migrated to the outer context. */
void
ggc_pop_context ()
/* Any remaining pages in the popped context are lowered to the new
current context; i.e. objects allocated in the popped context and
left over are imported into the previous context. */
- for (order = 2; order < HOST_BITS_PER_PTR; order++)
+ for (order = 2; order < NUM_ORDERS; order++)
{
- size_t num_objects = OBJECTS_PER_PAGE (order);
- size_t bitmap_size = BITMAP_SIZE (num_objects);
-
page_entry *p;
for (p = G.pages[order]; p != NULL; p = p->next)
{
if (p->context_depth > depth)
- {
- p->context_depth = depth;
- }
+ p->context_depth = depth;
/* If this page is now in the topmost context, and we'd
saved its allocation state, restore it. */
else if (p->context_depth == depth && p->save_in_use_p)
{
- memcpy (p->in_use_p, p->save_in_use_p, bitmap_size);
+ ggc_recalculate_in_use_p (p);
free (p->save_in_use_p);
p->save_in_use_p = 0;
- p->num_free_objects = p->save_num_free_objects;
}
}
}
}
-
-
-struct rtx_def *
-ggc_alloc_rtx (nslots)
- int nslots;
-{
- return (struct rtx_def *)
- alloc_obj (sizeof (struct rtx_def) + (nslots - 1) * sizeof (rtunion), 1);
-}
-
-
-struct rtvec_def *
-ggc_alloc_rtvec (nelt)
- int nelt;
-{
- return (struct rtvec_def *)
- alloc_obj (sizeof (struct rtvec_def) + (nelt - 1) * sizeof (rtunion), 1);
-}
-
-
-union tree_node *
-ggc_alloc_tree (length)
- int length;
-{
- return (union tree_node *) alloc_obj (length, 1);
-}
-
-
-char *
-ggc_alloc_string (contents, length)
- const char *contents;
- int length;
-{
- char *string;
-
- if (length < 0)
- {
- if (contents == NULL)
- return NULL;
- length = strlen (contents);
- }
-
- string = (char *) alloc_obj (length + 1, 0);
- if (contents != NULL)
- memcpy (string, contents, length);
- string[length] = 0;
-
- return string;
-}
-
-
-void *
-ggc_alloc (size)
- size_t size;
-{
- return alloc_obj (size, 0);
-}
-
\f
+/* Unmark all objects. */
+
static inline void
clear_marks ()
{
unsigned order;
- for (order = 2; order < HOST_BITS_PER_PTR; order++)
+ for (order = 2; order < NUM_ORDERS; order++)
{
size_t num_objects = OBJECTS_PER_PAGE (order);
- size_t bitmap_size = BITMAP_SIZE (num_objects);
+ size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
page_entry *p;
for (p = G.pages[order]; p != NULL; p = p->next)
/* Pages that aren't in the topmost context are not collected;
nevertheless, we need their in-use bit vectors to store GC
marks. So, back them up first. */
- if (p->context_depth < G.context_depth
- && ! p->save_in_use_p)
+ if (p->context_depth < G.context_depth)
{
- p->save_in_use_p = xmalloc (bitmap_size);
+ if (! p->save_in_use_p)
+ p->save_in_use_p = xmalloc (bitmap_size);
memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
- p->save_num_free_objects = p->num_free_objects;
}
/* Reset reset the number of free objects and clear the
}
}
+/* Free all empty pages. Partially empty pages need no attention
+ because the `mark' bit doubles as an `unused' bit. */
+
static inline void
sweep_pages ()
{
unsigned order;
- for (order = 2; order < HOST_BITS_PER_PTR; order++)
+ for (order = 2; order < NUM_ORDERS; order++)
{
/* The last page-entry to consider, regardless of entries
placed at the end of the list. */
page_entry * const last = G.page_tails[order];
size_t num_objects = OBJECTS_PER_PAGE (order);
+ size_t live_objects;
page_entry *p, *previous;
int done;
/* Loop until all entries have been examined. */
done = (p == last);
+ /* Add all live objects on this page to the count of
+ allocated memory. */
+ live_objects = num_objects - p->num_free_objects;
+
+ G.allocated += OBJECT_SIZE (order) * live_objects;
+
/* Only objects on pages in the topmost context should get
collected. */
if (p->context_depth < G.context_depth)
;
/* Remove the page if it's empty. */
- else if (p->num_free_objects == num_objects)
+ else if (live_objects == 0)
{
if (! previous)
G.pages[order] = next;
p = next;
}
while (! done);
+
+ /* Now, restore the in_use_p vectors for any pages from contexts
+ other than the current one. */
+ for (p = G.pages[order]; p; p = p->next)
+ if (p->context_depth != G.context_depth)
+ ggc_recalculate_in_use_p (p);
}
}
#ifdef GGC_POISON
+/* Clobber all free objects. */
+
static inline void
poison_pages ()
{
unsigned order;
- for (order = 2; order < HOST_BITS_PER_PTR; order++)
+ for (order = 2; order < NUM_ORDERS; order++)
{
size_t num_objects = OBJECTS_PER_PAGE (order);
- size_t size = (size_t) 1 << order;
+ size_t size = OBJECT_SIZE (order);
page_entry *p;
for (p = G.pages[order]; p != NULL; p = p->next)
{
size_t i;
+
+ if (p->context_depth != G.context_depth)
+ /* Since we don't do any collection for pages in pushed
+ contexts, there's no need to do any poisoning. And
+ besides, the IN_USE_P array isn't valid until we pop
+ contexts. */
+ continue;
+
for (i = 0; i < num_objects; i++)
{
size_t word, bit;
word = i / HOST_BITS_PER_LONG;
bit = i % HOST_BITS_PER_LONG;
if (((p->in_use_p[word] >> bit) & 1) == 0)
- poison (p->page + i * size, size);
+ memset (p->page + i * size, 0xa5, size);
}
}
}
}
#endif
+/* Top level mark-and-sweep routine. */
+
void
ggc_collect ()
{
- int time;
-
/* Avoid frequent unnecessary work by skipping collection if the
total allocations haven't expanded much since the last
collection. */
return;
#endif
- time = get_run_time ();
+ timevar_push (TV_GC);
if (!quiet_flag)
- fprintf (stderr, " {GC %luk -> ", (unsigned long)G.allocated / 1024);
+ fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
- /* Zero the total allocated bytes. We'll reaccumulate this while
- marking. */
+ /* Zero the total allocated bytes. This will be recalculated in the
+ sweep phase. */
G.allocated = 0;
/* Release the pages we freed the last time we collected, but didn't
clear_marks ();
ggc_mark_roots ();
- sweep_pages ();
#ifdef GGC_POISON
poison_pages ();
#endif
+ sweep_pages ();
+
G.allocated_last_gc = G.allocated;
if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
- time = get_run_time () - time;
- gc_time += time;
+ timevar_pop (TV_GC);
- time = (time + 500) / 1000;
if (!quiet_flag)
- fprintf (stderr, "%luk in %d.%03d}",
- (unsigned long) G.allocated / 1024, time / 1000, time % 1000);
+ fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
}
+/* Print allocation statistics. */
+#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
+ ? (x) \
+ : ((x) < 1024*1024*10 \
+ ? (x) / 1024 \
+ : (x) / (1024*1024))))
+#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
-int
-ggc_set_mark_rtx (r)
- rtx r;
+void
+ggc_print_statistics ()
{
- return mark_obj (r);
-}
+ struct ggc_statistics stats;
+ unsigned int i;
+ size_t total_overhead = 0;
-int
-ggc_set_mark_rtvec (v)
- rtvec v;
-{
- return mark_obj (v);
-}
+ /* Clear the statistics. */
+ memset (&stats, 0, sizeof (stats));
+
+ /* Make sure collection will really occur. */
+ G.allocated_last_gc = 0;
-int
-ggc_set_mark_tree (t)
- tree t;
-{
- return mark_obj (t);
-}
+ /* Collect and print the statistics common across collectors. */
+ ggc_print_common_statistics (stderr, &stats);
-void
-ggc_mark_string (s)
- char *s;
-{
- if (s)
- mark_obj (s);
-}
+ /* Release free pages so that we will not count the bytes allocated
+ there as part of the total allocated memory. */
+ release_pages ();
-void
-ggc_mark (p)
- void *p;
-{
- if (p)
- mark_obj (p);
+ /* Collect some information about the various sizes of
+ allocation. */
+ fprintf (stderr, "\n%-5s %10s %10s %10s\n",
+ "Size", "Allocated", "Used", "Overhead");
+ for (i = 0; i < NUM_ORDERS; ++i)
+ {
+ page_entry *p;
+ size_t allocated;
+ size_t in_use;
+ size_t overhead;
+
+ /* Skip empty entries. */
+ if (!G.pages[i])
+ continue;
+
+ overhead = allocated = in_use = 0;
+
+ /* Figure out the total number of bytes allocated for objects of
+ this size, and how many of them are actually in use. Also figure
+ out how much memory the page table is using. */
+ for (p = G.pages[i]; p; p = p->next)
+ {
+ allocated += p->bytes;
+ in_use +=
+ (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
+
+ overhead += (sizeof (page_entry) - sizeof (long)
+ + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
+ }
+ fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", OBJECT_SIZE (i),
+ SCALE (allocated), LABEL (allocated),
+ SCALE (in_use), LABEL (in_use),
+ SCALE (overhead), LABEL (overhead));
+ total_overhead += overhead;
+ }
+ fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
+ SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
+ SCALE (G.allocated), LABEL(G.allocated),
+ SCALE (total_overhead), LABEL (total_overhead));
}