/* "Bag-of-pages" zone garbage collector for the GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
Free Software Foundation, Inc.
Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
#include "rtl.h"
#include "tm_p.h"
#include "toplev.h"
-#include "varray.h"
#include "flags.h"
#include "ggc.h"
#include "timevar.h"
#include "params.h"
#include "bitmap.h"
-
-#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_VALGRIND_MEMCHECK_H
-# include <valgrind/memcheck.h>
-# elif defined HAVE_MEMCHECK_H
-# include <memcheck.h>
-# else
-# include <valgrind.h>
-# endif
-#else
-/* Avoid #ifdef:s when we can help it. */
-#define VALGRIND_DISCARD(x)
-#define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
-#define VALGRIND_FREELIKE_BLOCK(x,y)
-#endif
+#include "plugin.h"
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
/* Total allocations and overhead for sizes less than 32, 64 and 128.
These sizes are interesting because they are typical cache line
sizes. */
-
+
unsigned long long total_allocated_under32;
unsigned long long total_overhead_under32;
-
+
unsigned long long total_allocated_under64;
unsigned long long total_overhead_under64;
-
+
unsigned long long total_allocated_under128;
unsigned long long total_overhead_under128;
} stats;
return base[L1][L2];
}
+/* Traverse the page table and find the entry for a page.
+ Return NULL if the object wasn't allocated via the GC. */
+
+static inline page_entry *
+lookup_page_table_if_allocated (const void *p)
+{
+ page_entry ***base;
+ size_t L1, L2;
+
+#if HOST_BITS_PER_PTR <= 32
+ base = &G.lookup[0];
+#else
+ page_table table = G.lookup;
+ size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+ while (1)
+ {
+ if (table == NULL)
+ return NULL;
+ if (table->high_bits == high_bits)
+ break;
+ table = table->next;
+ }
+ base = &table->table[0];
+#endif
+
+ /* Extract the level 1 and 2 indices. */
+ L1 = LOOKUP_L1 (p);
+ if (! base[L1])
+ return NULL;
+
+ L2 = LOOKUP_L2 (p);
+ if (L2 >= PAGE_L2_SIZE)
+ return NULL;
+ /* We might have a page entry which does not correspond exactly to a
+ system page. */
+ if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
+ return NULL;
+
+ return base[L1][L2];
+}
+
/* Set the page table entry for the page that starts at P. If ENTRY
is NULL, clear the entry. */
goto found;
/* Not found -- allocate a new table. */
- table = xcalloc (1, sizeof(*table));
+ table = XCNEW (struct page_table_chain);
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
- base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
base[L1][L2] = entry;
}
unsigned int start_word = zone_get_object_alloc_word (object_midptr);
unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
size_t max_size = (page->common.page + SMALL_PAGE_SIZE
- - (char *) object);
+ - (const char *) object);
return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
max_size);
}
+/* highest_bit assumes that alloc_type is 32 bits. */
+extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
+
+/* Find the highest set bit in VALUE. Returns the bit number of that
+ bit, using the same values as ffs. */
+static inline alloc_type
+highest_bit (alloc_type value)
+{
+ /* This also assumes that alloc_type is unsigned. */
+ value |= value >> 1;
+ value |= value >> 2;
+ value |= value >> 4;
+ value |= value >> 8;
+ value |= value >> 16;
+ value = value ^ (value >> 1);
+ return alloc_ffs (value);
+}
+
+/* Find the offset from the start of an object to P, which may point
+ into the interior of the object. */
+
+static unsigned long
+zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
+ size_t start_bit)
+{
+ unsigned int offset_in_bits;
+ alloc_type alloc_word = alloc_bits[start_word];
+
+ /* Mask off any bits after the initial bit, but make sure to include
+ the initial bit in the result. Note that START_BIT is
+ 0-based. */
+ if (start_bit < 8 * sizeof (alloc_type) - 1)
+ alloc_word &= (1 << (start_bit + 1)) - 1;
+ offset_in_bits = start_bit;
+
+ /* Search for the start of the object. */
+ while (alloc_word == 0 && start_word > 0)
+ {
+ alloc_word = alloc_bits[--start_word];
+ offset_in_bits += 8 * sizeof (alloc_type);
+ }
+ /* We must always find a set bit. */
+ gcc_assert (alloc_word != 0);
+ /* Note that the result of highest_bit is 1-based. */
+ offset_in_bits -= highest_bit (alloc_word) - 1;
+
+ return BYTES_PER_ALLOC_BIT * offset_in_bits;
+}
+
/* Allocate the mark bits for every zone, and set the pointers on each
page. */
static void
/* Pretend we don't have access to the allocated pages. We'll enable
access to smaller pieces of the area in ggc_alloc. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
return page;
}
memory order. */
for (i = G.quire_size - 1; i >= 1; i--)
{
- e = xcalloc (1, G.small_page_overhead);
+ e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
e->common.page = page + (i << GGC_PAGE_SHIFT);
e->common.zone = zone;
e->next = f;
zone->free_pages = f;
- entry = xcalloc (1, G.small_page_overhead);
+ entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
entry->common.page = page;
entry->common.zone = zone;
set_page_table_entry (page, &entry->common);
size_t needed_size;
needed_size = size + sizeof (struct large_page_entry);
- page = xmalloc (needed_size);
+ page = XNEWVAR (char, needed_size);
entry = (struct large_page_entry *) page;
/* Mark the page as inaccessible. Discard the handle to
avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->common.page,
- SMALL_PAGE_SIZE));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
+ SMALL_PAGE_SIZE));
entry->next = entry->common.zone->free_pages;
entry->common.zone->free_pages = entry;
if (bin > NUM_FREE_BINS)
{
bin = 0;
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
+ sizeof (struct
+ alloc_chunk)));
chunk->size = size;
chunk->next_free = zone->free_chunks[bin];
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk),
- size - sizeof (struct alloc_chunk)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
+ + sizeof (struct
+ alloc_chunk),
+ size
+ - sizeof (struct
+ alloc_chunk)));
}
else
{
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk *)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
+ sizeof (struct
+ alloc_chunk *)));
chunk->next_free = zone->free_chunks[bin];
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk *),
- size - sizeof (struct alloc_chunk *)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
+ + sizeof (struct
+ alloc_chunk *),
+ size
+ - sizeof (struct
+ alloc_chunk *)));
}
zone->free_chunks[bin] = chunk;
#ifdef ENABLE_GC_CHECKING
/* `Poison' the entire allocated object. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
memset (result, 0xaf, size);
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
- size - orig_size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
+ size - orig_size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
zone->allocated += size;
-
+
timevar_ggc_mem_total += size;
#ifdef GATHER_STATISTICS
/* Add the chunk to the free list. We don't bother with coalescing,
since we are likely to want a chunk of this size again. */
- free_chunk (p, size, page->zone);
+ free_chunk ((char *)p, size, page->zone);
+ }
+}
+
+/* Mark function for strings. */
+
+void
+gt_ggc_m_S (const void *p)
+{
+ page_entry *entry;
+ unsigned long offset;
+
+ if (!p)
+ return;
+
+ /* Look up the page on which the object is alloced. . */
+ entry = lookup_page_table_if_allocated (p);
+ if (! entry)
+ return;
+
+ if (entry->pch_p)
+ {
+ size_t alloc_word, alloc_bit, t;
+ t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
+ alloc_word = t / (8 * sizeof (alloc_type));
+ alloc_bit = t % (8 * sizeof (alloc_type));
+ offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
+ alloc_bit);
+ }
+ else if (entry->large_p)
+ {
+ struct large_page_entry *le = (struct large_page_entry *) entry;
+ offset = ((const char *) p) - entry->page;
+ gcc_assert (offset < le->bytes);
}
+ else
+ {
+ struct small_page_entry *se = (struct small_page_entry *) entry;
+ unsigned int start_word = zone_get_object_alloc_word (p);
+ unsigned int start_bit = zone_get_object_alloc_bit (p);
+ offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
+
+ /* On some platforms a char* will not necessarily line up on an
+ allocation boundary, so we have to update the offset to
+ account for the leftover bytes. */
+ offset += (size_t) p % BYTES_PER_ALLOC_BIT;
+ }
+
+ if (offset)
+ {
+ /* Here we've seen a char* which does not point to the beginning
+ of an allocated object. We assume it points to the middle of
+ a STRING_CST. */
+ gcc_assert (offset == offsetof (struct tree_string, str));
+ p = ((const char *) p) - offset;
+ gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
+ return;
+ }
+
+ /* Inefficient, but also unlikely to matter. */
+ ggc_set_mark (p);
}
/* If P is not marked, mark it and return false. Otherwise return true.
offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
mark_word = offset / (8 * sizeof (mark_type));
mark_bit = offset % (8 * sizeof (mark_type));
-
+
if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
return 1;
pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
ggc_marked_p (const void *p)
{
struct page_entry *page;
- const char *ptr = p;
+ const char *ptr = (const char *) p;
page = zone_get_object_page (p);
offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
mark_word = offset / (8 * sizeof (mark_type));
mark_bit = offset % (8 * sizeof (mark_type));
-
+
return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
}
if (GGC_PAGE_SIZE == G.pagesize)
{
/* We have a good page, might as well hold onto it... */
- e = xcalloc (1, G.small_page_overhead);
+ e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
e->common.page = p;
e->common.zone = &main_zone;
e->next = main_zone.free_pages;
struct alloc_zone *
new_ggc_zone (const char * name)
{
- struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
+ struct alloc_zone *new_zone = XCNEW (struct alloc_zone);
new_ggc_zone_1 (new_zone, name);
return new_zone;
}
{
if (last_free)
{
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
- object
- - last_free));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
+ object
+ - last_free));
poison_region (last_free, object - last_free);
free_chunk (last_free, object - last_free, zone);
last_free = NULL;
{
*spp = snext;
#ifdef ENABLE_GC_CHECKING
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (sp->common.page, SMALL_PAGE_SIZE));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
+ SMALL_PAGE_SIZE));
/* Poison the page. */
memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
#endif
}
else if (last_free)
{
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
- object - last_free));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
+ object - last_free));
poison_region (last_free, object - last_free);
free_chunk (last_free, object - last_free, zone);
}
ggc_prune_overhead_list ();
#endif
}
-
+
sweep_pages (zone);
zone->was_collected = true;
zone->allocated_last_gc = zone->allocated;
}
}
+ invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
+
/* Start by possibly collecting the main zone. */
main_zone.was_collected = false;
marked |= ggc_collect_1 (&main_zone, true);
}
}
+ invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
+
timevar_pop (TV_GC);
}
chunk = chunk->next_free;
}
}
-
+
fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
zone->name,
SCALE (allocated), LABEL (allocated),
SCALE (total_allocated), LABEL(total_allocated),
SCALE (total_overhead), LABEL (total_overhead));
-#ifdef GATHER_STATISTICS
+#ifdef GATHER_STATISTICS
{
unsigned long long all_overhead = 0, all_allocated = 0;
unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
all_allocated_under64 += zone->stats.total_allocated_under64;
all_overhead_under64 += zone->stats.total_overhead_under64;
-
+
all_allocated_under128 += zone->stats.total_allocated_under128;
all_overhead_under128 += zone->stats.total_overhead_under128;
struct ggc_pch_data *
init_ggc_pch (void)
{
- return xcalloc (sizeof (struct ggc_pch_data), 1);
+ return XCNEW (struct ggc_pch_data);
}
/* Return which of the page-aligned buckets the object at X, with type
}
if (d->alloc_bits == NULL)
- d->alloc_bits = xcalloc (1, d->alloc_size);
+ d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
}
/* Allocate a place for object X of size SIZE in the PCH file. */
fatal_error ("can't seek PCH file: %m");
if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
- fatal_error ("can't write PCH fle: %m");
+ fatal_error ("can't write PCH file: %m");
/* Done with the PCH, so write out our footer. */
if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
/* Allocate the dummy page entry for the PCH, and set all pages
mapped into the PCH to reference it. */
- pch_page = xcalloc (1, sizeof (struct page_entry));
+ pch_page = XCNEW (struct page_entry);
pch_page->page = pch_zone.page;
pch_page->pch_p = true;