/* "Bag-of-pages" garbage collector for the GNU compiler.
- Copyright (C) 1999 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000 Free Software Foundation, Inc.
This file is part of GNU CC.
#include "flags.h"
#include "ggc.h"
-#ifdef HAVE_MMAP
+#ifdef HAVE_MMAP_ANYWHERE
#include <sys/mman.h>
#endif
last collection. */
#undef GGC_ALWAYS_COLLECT
-/* If ENABLE_CHECKING is defined, enable GGC_POISON and
- GGC_ALWAYS_COLLECT automatically. */
-#ifdef ENABLE_CHECKING
+#ifdef ENABLE_GC_CHECKING
#define GGC_POISON
+#endif
+#ifdef ENABLE_GC_ALWAYS_COLLECT
#define GGC_ALWAYS_COLLECT
#endif
unsigned long *save_in_use_p;
/* Context depth of this page. */
- unsigned char context_depth;
-
- /* The lg of size of objects allocated from this page. */
- unsigned char order;
+ unsigned short context_depth;
/* The number of free objects remaining on this page. */
unsigned short num_free_objects;
next allocation from this page. */
unsigned short next_bit_hint;
- /* Saved number of free objects for pages that aren't in the topmost
- context during colleciton. */
- unsigned short save_num_free_objects;
+ /* The lg of size of objects allocated from this page. */
+ unsigned char order;
/* A bit vector indicating whether or not objects are in use. The
Nth bit is one if the Nth object on this page is allocated. This
size_t bytes_mapped;
/* The current depth in the context stack. */
- unsigned char context_depth;
+ unsigned short context_depth;
/* A file descriptor open to /dev/zero for reading. */
-#if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
+#if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
int dev_zero_fd;
#endif
/* Compute DIVIDEND / DIVISOR, rounded up. */
#define DIV_ROUND_UP(Dividend, Divisor) \
- ((Dividend + Divisor - 1) / Divisor)
+ (((Dividend) + (Divisor) - 1) / (Divisor))
/* The number of objects per allocation page, for objects of size
2^ORDER. */
#define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
\f
-static int ggc_allocated_p PROTO ((const void *));
-static page_entry *lookup_page_table_entry PROTO ((const void *));
-static void set_page_table_entry PROTO ((void *, page_entry *));
-static char *alloc_anon PROTO ((char *, size_t));
-static struct page_entry * alloc_page PROTO ((unsigned));
-static void free_page PROTO ((struct page_entry *));
-static void release_pages PROTO ((void));
-static void clear_marks PROTO ((void));
-static void sweep_pages PROTO ((void));
+static int ggc_allocated_p PARAMS ((const void *));
+static page_entry *lookup_page_table_entry PARAMS ((const void *));
+static void set_page_table_entry PARAMS ((void *, page_entry *));
+static char *alloc_anon PARAMS ((char *, size_t));
+static struct page_entry * alloc_page PARAMS ((unsigned));
+static void free_page PARAMS ((struct page_entry *));
+static void release_pages PARAMS ((void));
+static void clear_marks PARAMS ((void));
+static void sweep_pages PARAMS ((void));
+static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
#ifdef GGC_POISON
-static void poison_pages PROTO ((void));
+static void poison_pages PARAMS ((void));
#endif
-void debug_print_page_list PROTO ((int));
+void debug_print_page_list PARAMS ((int));
\f
/* Returns non-zero if P was allocated in GC'able memory. */
{
char *page;
-#ifdef HAVE_MMAP
+#ifdef HAVE_MMAP_ANYWHERE
#ifdef MAP_ANONYMOUS
page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
exit(1);
}
#endif /* HAVE_VALLOC */
-#endif /* HAVE_MMAP */
+#endif /* HAVE_MMAP_ANYWHERE */
/* Remember that we allocated this memory. */
G.bytes_mapped += size;
/* Release the free page cache to the system. */
-static inline void
+static void
release_pages ()
{
-#ifdef HAVE_MMAP
+#ifdef HAVE_MMAP_ANYWHERE
page_entry *p, *next;
char *start;
size_t len;
free (p);
}
#endif /* HAVE_VALLOC */
-#endif /* HAVE_MMAP */
+#endif /* HAVE_MMAP_ANYWHERE */
G.free_pages = NULL;
}
/* If there is no page for this object size, or all pages in this
context are full, allocate a new page. */
- if (entry == NULL
- || entry->num_free_objects == 0
- || entry->context_depth != G.context_depth)
+ if (entry == NULL || entry->num_free_objects == 0)
{
struct page_entry *new_entry;
new_entry = alloc_page (order);
int
ggc_set_mark (p)
- void *p;
+ const void *p;
{
page_entry *entry;
unsigned bit, word;
/* Calculate the index of the object on the page; this is its bit
position in the in_use_p bitmap. */
- bit = (((char *) p) - entry->page) >> entry->order;
+ bit = (((const char *) p) - entry->page) >> entry->order;
word = bit / HOST_BITS_PER_LONG;
mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
void
ggc_mark_if_gcable (p)
- void *p;
+ const void *p;
{
if (p && ggc_allocated_p (p))
ggc_set_mark (p);
size_t
ggc_get_size (p)
- void *p;
+ const void *p;
{
page_entry *pe = lookup_page_table_entry (p);
return 1 << pe->order;
G.pagesize = getpagesize();
G.lg_pagesize = exact_log2 (G.pagesize);
-#if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
+#if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
if (G.dev_zero_fd == -1)
abort ();
G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
-#ifdef HAVE_MMAP
+#ifdef HAVE_MMAP_ANYWHERE
/* StunOS has an amazing off-by-one error for the first mmap allocation
after fiddling with RLIMIT_STACK. The result, as hard as it is to
believe, is an unaligned page allocation, which would cause us to
abort ();
}
+/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
+ reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
+
+static void
+ggc_recalculate_in_use_p (p)
+ page_entry *p;
+{
+ unsigned int i;
+ size_t num_objects;
+
+ /* Because the past-the-end bit in in_use_p is always set, we
+ pretend there is one additional object. */
+ num_objects = OBJECTS_PER_PAGE (p->order) + 1;
+
+ /* Reset the free object count. */
+ p->num_free_objects = num_objects;
+
+ /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
+ for (i = 0;
+ i < DIV_ROUND_UP (BITMAP_SIZE (num_objects),
+ sizeof (*p->in_use_p));
+ ++i)
+ {
+ unsigned long j;
+
+ /* Something is in use if it is marked, or if it was in use in a
+ context further down the context stack. */
+ p->in_use_p[i] |= p->save_in_use_p[i];
+
+ /* Decrement the free object count for every object allocated. */
+ for (j = p->in_use_p[i]; j; j >>= 1)
+ p->num_free_objects -= (j & 1);
+ }
+
+ if (p->num_free_objects >= num_objects)
+ abort ();
+}
+
/* Decrement the `GC context'. All objects allocated since the
previous ggc_push_context are migrated to the outer context. */
left over are imported into the previous context. */
for (order = 2; order < HOST_BITS_PER_PTR; order++)
{
- size_t num_objects = OBJECTS_PER_PAGE (order);
- size_t bitmap_size = BITMAP_SIZE (num_objects);
-
page_entry *p;
for (p = G.pages[order]; p != NULL; p = p->next)
{
if (p->context_depth > depth)
- {
- p->context_depth = depth;
- }
+ p->context_depth = depth;
/* If this page is now in the topmost context, and we'd
saved its allocation state, restore it. */
else if (p->context_depth == depth && p->save_in_use_p)
{
- memcpy (p->in_use_p, p->save_in_use_p, bitmap_size);
+ ggc_recalculate_in_use_p (p);
free (p->save_in_use_p);
p->save_in_use_p = 0;
- p->num_free_objects = p->save_num_free_objects;
}
}
}
for (order = 2; order < HOST_BITS_PER_PTR; order++)
{
size_t num_objects = OBJECTS_PER_PAGE (order);
- size_t bitmap_size = BITMAP_SIZE (num_objects);
+ size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
page_entry *p;
for (p = G.pages[order]; p != NULL; p = p->next)
/* Pages that aren't in the topmost context are not collected;
nevertheless, we need their in-use bit vectors to store GC
marks. So, back them up first. */
- if (p->context_depth < G.context_depth
- && ! p->save_in_use_p)
+ if (p->context_depth < G.context_depth)
{
- p->save_in_use_p = xmalloc (bitmap_size);
+ if (! p->save_in_use_p)
+ p->save_in_use_p = xmalloc (bitmap_size);
memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
- p->save_num_free_objects = p->num_free_objects;
}
/* Reset reset the number of free objects and clear the
p = next;
}
while (! done);
+
+ /* Now, restore the in_use_p vectors for any pages from contexts
+ other than the current one. */
+ for (p = G.pages[order]; p; p = p->next)
+ if (p->context_depth != G.context_depth)
+ ggc_recalculate_in_use_p (p);
}
}
ggc_page_print_statistics ()
{
struct ggc_statistics stats;
- int i;
+ unsigned int i;
/* Clear the statistics. */
- bzero (&stats, sizeof (stats));
+ memset (&stats, 0, sizeof (stats));
/* Make sure collection will really occur. */
G.allocated_last_gc = 0;
/* Collect and print the statistics common across collectors. */
ggc_print_statistics (stderr, &stats);
+ /* Release free pages so that we will not count the bytes allocated
+ there as part of the total allocated memory. */
+ release_pages ();
+
/* Collect some information about the various sizes of
allocation. */
fprintf (stderr, "\n%-4s%-16s%-16s\n", "Log", "Allocated", "Used");
in_use +=
(OBJECTS_PER_PAGE (i) - p->num_free_objects) * (1 << i);
}
- fprintf (stderr, "%-3d %-15lu %-15u\n", i,
- (unsigned long) allocated, in_use);
+ fprintf (stderr, "%-3d %-15lu %-15lu\n", i,
+ (unsigned long) allocated, (unsigned long) in_use);
}
/* Print out some global information. */