X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fggc-zone.c;h=cae23e128deb4d184a13d049a37a9021d138f87d;hb=6f654217e1b23001a1365aa0c63c24029729bb14;hp=40d751907f7828a895494308c69696aa64e03522;hpb=67ce556b47830dd825524e8370969b814c355216;p=pf3gnuchains%2Fgcc-fork.git
diff --git a/gcc/ggc-zone.c b/gcc/ggc-zone.c
index 40d751907f7..cae23e128de 100644
--- a/gcc/ggc-zone.c
+++ b/gcc/ggc-zone.c
@@ -1,5 +1,5 @@
/* "Bag-of-pages" zone garbage collector for the GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
Free Software Foundation, Inc.
Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
@@ -10,7 +10,7 @@ This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
@@ -19,9 +19,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+. */
#include "config.h"
#include "system.h"
@@ -37,21 +36,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "timevar.h"
#include "params.h"
#include "bitmap.h"
-
-#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_VALGRIND_MEMCHECK_H
-# include
-# elif defined HAVE_MEMCHECK_H
-# include
-# else
-# include
-# endif
-#else
-/* Avoid #ifdef:s when we can help it. */
-#define VALGRIND_DISCARD(x)
-#define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
-#define VALGRIND_FREELIKE_BLOCK(x,y)
-#endif
+#include "plugin.h"
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
@@ -441,13 +426,13 @@ struct alloc_zone
/* Total allocations and overhead for sizes less than 32, 64 and 128.
These sizes are interesting because they are typical cache line
sizes. */
-
+
unsigned long long total_allocated_under32;
unsigned long long total_overhead_under32;
-
+
unsigned long long total_allocated_under64;
unsigned long long total_overhead_under64;
-
+
unsigned long long total_allocated_under128;
unsigned long long total_overhead_under128;
} stats;
@@ -522,6 +507,47 @@ lookup_page_table_entry (const void *p)
return base[L1][L2];
}
+/* Traverse the page table and find the entry for a page.
+ Return NULL if the object wasn't allocated via the GC. */
+
+static inline page_entry *
+lookup_page_table_if_allocated (const void *p)
+{
+ page_entry ***base;
+ size_t L1, L2;
+
+#if HOST_BITS_PER_PTR <= 32
+ base = &G.lookup[0];
+#else
+ page_table table = G.lookup;
+ size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+ while (1)
+ {
+ if (table == NULL)
+ return NULL;
+ if (table->high_bits == high_bits)
+ break;
+ table = table->next;
+ }
+ base = &table->table[0];
+#endif
+
+ /* Extract the level 1 and 2 indices. */
+ L1 = LOOKUP_L1 (p);
+ if (! base[L1])
+ return NULL;
+
+ L2 = LOOKUP_L2 (p);
+ if (L2 >= PAGE_L2_SIZE)
+ return NULL;
+ /* We might have a page entry which does not correspond exactly to a
+ system page. */
+ if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
+ return NULL;
+
+ return base[L1][L2];
+}
+
/* Set the page table entry for the page that starts at P. If ENTRY
is NULL, clear the entry. */
@@ -541,7 +567,7 @@ set_page_table_entry (void *p, page_entry *entry)
goto found;
/* Not found -- allocate a new table. */
- table = xcalloc (1, sizeof(*table));
+ table = XCNEW (struct page_table_chain);
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
@@ -554,7 +580,7 @@ found:
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
- base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
base[L1][L2] = entry;
}
@@ -690,12 +716,61 @@ zone_find_object_size (struct small_page_entry *page,
unsigned int start_word = zone_get_object_alloc_word (object_midptr);
unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
size_t max_size = (page->common.page + SMALL_PAGE_SIZE
- - (char *) object);
+ - (const char *) object);
return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
max_size);
}
+/* highest_bit assumes that alloc_type is 32 bits. */
+extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
+
+/* Find the highest set bit in VALUE. Returns the bit number of that
+ bit, using the same values as ffs. */
+static inline alloc_type
+highest_bit (alloc_type value)
+{
+ /* This also assumes that alloc_type is unsigned. */
+ value |= value >> 1;
+ value |= value >> 2;
+ value |= value >> 4;
+ value |= value >> 8;
+ value |= value >> 16;
+ value = value ^ (value >> 1);
+ return alloc_ffs (value);
+}
+
+/* Find the offset from the start of an object to P, which may point
+ into the interior of the object. */
+
+static unsigned long
+zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
+ size_t start_bit)
+{
+ unsigned int offset_in_bits;
+ alloc_type alloc_word = alloc_bits[start_word];
+
+ /* Mask off any bits after the initial bit, but make sure to include
+ the initial bit in the result. Note that START_BIT is
+ 0-based. */
+ if (start_bit < 8 * sizeof (alloc_type) - 1)
+ alloc_word &= (1 << (start_bit + 1)) - 1;
+ offset_in_bits = start_bit;
+
+ /* Search for the start of the object. */
+ while (alloc_word == 0 && start_word > 0)
+ {
+ alloc_word = alloc_bits[--start_word];
+ offset_in_bits += 8 * sizeof (alloc_type);
+ }
+ /* We must always find a set bit. */
+ gcc_assert (alloc_word != 0);
+ /* Note that the result of highest_bit is 1-based. */
+ offset_in_bits -= highest_bit (alloc_word) - 1;
+
+ return BYTES_PER_ALLOC_BIT * offset_in_bits;
+}
+
/* Allocate the mark bits for every zone, and set the pointers on each
page. */
static void
@@ -788,7 +863,7 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
/* Pretend we don't have access to the allocated pages. We'll enable
access to smaller pieces of the area in ggc_alloc. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
return page;
}
@@ -824,7 +899,7 @@ alloc_small_page (struct alloc_zone *zone)
memory order. */
for (i = G.quire_size - 1; i >= 1; i--)
{
- e = xcalloc (1, G.small_page_overhead);
+ e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
e->common.page = page + (i << GGC_PAGE_SHIFT);
e->common.zone = zone;
e->next = f;
@@ -834,7 +909,7 @@ alloc_small_page (struct alloc_zone *zone)
zone->free_pages = f;
- entry = xcalloc (1, G.small_page_overhead);
+ entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
entry->common.page = page;
entry->common.zone = zone;
set_page_table_entry (page, &entry->common);
@@ -861,7 +936,7 @@ alloc_large_page (size_t size, struct alloc_zone *zone)
size_t needed_size;
needed_size = size + sizeof (struct large_page_entry);
- page = xmalloc (needed_size);
+ page = XNEWVAR (char, needed_size);
entry = (struct large_page_entry *) page;
@@ -904,8 +979,8 @@ free_small_page (struct small_page_entry *entry)
/* Mark the page as inaccessible. Discard the handle to
avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->common.page,
- SMALL_PAGE_SIZE));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
+ SMALL_PAGE_SIZE));
entry->next = entry->common.zone->free_pages;
entry->common.zone->free_pages = entry;
@@ -979,18 +1054,30 @@ free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
if (bin > NUM_FREE_BINS)
{
bin = 0;
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
+ sizeof (struct
+ alloc_chunk)));
chunk->size = size;
chunk->next_free = zone->free_chunks[bin];
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk),
- size - sizeof (struct alloc_chunk)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
+ + sizeof (struct
+ alloc_chunk),
+ size
+ - sizeof (struct
+ alloc_chunk)));
}
else
{
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk *)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
+ sizeof (struct
+ alloc_chunk *)));
chunk->next_free = zone->free_chunks[bin];
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk *),
- size - sizeof (struct alloc_chunk *)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
+ + sizeof (struct
+ alloc_chunk *),
+ size
+ - sizeof (struct
+ alloc_chunk *)));
}
zone->free_chunks[bin] = chunk;
@@ -1214,22 +1301,22 @@ ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
#ifdef ENABLE_GC_CHECKING
/* `Poison' the entire allocated object. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
memset (result, 0xaf, size);
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
- size - orig_size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
+ size - orig_size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
zone->allocated += size;
-
- timevar_ggc_mem_total += (size + CHUNK_OVERHEAD);
+
+ timevar_ggc_mem_total += size;
#ifdef GATHER_STATISTICS
ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
@@ -1353,8 +1440,67 @@ ggc_free (void *p)
/* Add the chunk to the free list. We don't bother with coalescing,
since we are likely to want a chunk of this size again. */
- free_chunk (p, size, page->zone);
+ free_chunk ((char *)p, size, page->zone);
+ }
+}
+
+/* Mark function for strings. */
+
+void
+gt_ggc_m_S (const void *p)
+{
+ page_entry *entry;
+ unsigned long offset;
+
+ if (!p)
+ return;
+
+ /* Look up the page on which the object is alloced. . */
+ entry = lookup_page_table_if_allocated (p);
+ if (! entry)
+ return;
+
+ if (entry->pch_p)
+ {
+ size_t alloc_word, alloc_bit, t;
+ t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
+ alloc_word = t / (8 * sizeof (alloc_type));
+ alloc_bit = t % (8 * sizeof (alloc_type));
+ offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
+ alloc_bit);
+ }
+ else if (entry->large_p)
+ {
+ struct large_page_entry *le = (struct large_page_entry *) entry;
+ offset = ((const char *) p) - entry->page;
+ gcc_assert (offset < le->bytes);
}
+ else
+ {
+ struct small_page_entry *se = (struct small_page_entry *) entry;
+ unsigned int start_word = zone_get_object_alloc_word (p);
+ unsigned int start_bit = zone_get_object_alloc_bit (p);
+ offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
+
+ /* On some platforms a char* will not necessarily line up on an
+ allocation boundary, so we have to update the offset to
+ account for the leftover bytes. */
+ offset += (size_t) p % BYTES_PER_ALLOC_BIT;
+ }
+
+ if (offset)
+ {
+ /* Here we've seen a char* which does not point to the beginning
+ of an allocated object. We assume it points to the middle of
+ a STRING_CST. */
+ gcc_assert (offset == offsetof (struct tree_string, str));
+ p = ((const char *) p) - offset;
+ gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
+ return;
+ }
+
+ /* Inefficient, but also unlikely to matter. */
+ ggc_set_mark (p);
}
/* If P is not marked, mark it and return false. Otherwise return true.
@@ -1375,7 +1521,7 @@ ggc_set_mark (const void *p)
offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
mark_word = offset / (8 * sizeof (mark_type));
mark_bit = offset % (8 * sizeof (mark_type));
-
+
if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
return 1;
pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
@@ -1415,7 +1561,7 @@ int
ggc_marked_p (const void *p)
{
struct page_entry *page;
- const char *ptr = p;
+ const char *ptr = (const char *) p;
page = zone_get_object_page (p);
@@ -1425,7 +1571,7 @@ ggc_marked_p (const void *p)
offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
mark_word = offset / (8 * sizeof (mark_type));
mark_bit = offset % (8 * sizeof (mark_type));
-
+
return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
}
@@ -1541,7 +1687,7 @@ init_ggc (void)
if (GGC_PAGE_SIZE == G.pagesize)
{
/* We have a good page, might as well hold onto it... */
- e = xcalloc (1, G.small_page_overhead);
+ e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
e->common.page = p;
e->common.zone = &main_zone;
e->next = main_zone.free_pages;
@@ -1569,7 +1715,7 @@ new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
struct alloc_zone *
new_ggc_zone (const char * name)
{
- struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
+ struct alloc_zone *new_zone = XCNEW (struct alloc_zone);
new_ggc_zone_1 (new_zone, name);
return new_zone;
}
@@ -1702,9 +1848,9 @@ sweep_pages (struct alloc_zone *zone)
{
if (last_free)
{
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
- object
- - last_free));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
+ object
+ - last_free));
poison_region (last_free, object - last_free);
free_chunk (last_free, object - last_free, zone);
last_free = NULL;
@@ -1740,7 +1886,8 @@ sweep_pages (struct alloc_zone *zone)
{
*spp = snext;
#ifdef ENABLE_GC_CHECKING
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (sp->common.page, SMALL_PAGE_SIZE));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
+ SMALL_PAGE_SIZE));
/* Poison the page. */
memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
#endif
@@ -1749,8 +1896,8 @@ sweep_pages (struct alloc_zone *zone)
}
else if (last_free)
{
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
- object - last_free));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
+ object - last_free));
poison_region (last_free, object - last_free);
free_chunk (last_free, object - last_free, zone);
}
@@ -1816,7 +1963,7 @@ ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
ggc_prune_overhead_list ();
#endif
}
-
+
sweep_pages (zone);
zone->was_collected = true;
zone->allocated_last_gc = zone->allocated;
@@ -1883,6 +2030,8 @@ ggc_collect (void)
}
}
+ invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
+
/* Start by possibly collecting the main zone. */
main_zone.was_collected = false;
marked |= ggc_collect_1 (&main_zone, true);
@@ -1947,6 +2096,8 @@ ggc_collect (void)
}
}
+ invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
+
timevar_pop (TV_GC);
}
@@ -2027,7 +2178,7 @@ ggc_print_statistics (void)
chunk = chunk->next_free;
}
}
-
+
fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
zone->name,
SCALE (allocated), LABEL (allocated),
@@ -2049,7 +2200,7 @@ ggc_print_statistics (void)
pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
#else
{
- struct page_table_chain *table;
+ page_table table = G.lookup;
pte_overhead = 0;
while (table)
{
@@ -2070,7 +2221,7 @@ ggc_print_statistics (void)
SCALE (total_allocated), LABEL(total_allocated),
SCALE (total_overhead), LABEL (total_overhead));
-#ifdef GATHER_STATISTICS
+#ifdef GATHER_STATISTICS
{
unsigned long long all_overhead = 0, all_allocated = 0;
unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
@@ -2089,7 +2240,7 @@ ggc_print_statistics (void)
all_allocated_under64 += zone->stats.total_allocated_under64;
all_overhead_under64 += zone->stats.total_overhead_under64;
-
+
all_allocated_under128 += zone->stats.total_allocated_under128;
all_overhead_under128 += zone->stats.total_overhead_under128;
@@ -2155,7 +2306,7 @@ struct ggc_pch_data
struct ggc_pch_data *
init_ggc_pch (void)
{
- return xcalloc (sizeof (struct ggc_pch_data), 1);
+ return XCNEW (struct ggc_pch_data);
}
/* Return which of the page-aligned buckets the object at X, with type
@@ -2234,7 +2385,7 @@ ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
}
if (d->alloc_bits == NULL)
- d->alloc_bits = xcalloc (1, d->alloc_size);
+ d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
}
/* Allocate a place for object X of size SIZE in the PCH file. */
@@ -2299,7 +2450,7 @@ ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
fatal_error ("can't seek PCH file: %m");
if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
- fatal_error ("can't write PCH fle: %m");
+ fatal_error ("can't write PCH file: %m");
/* Done with the PCH, so write out our footer. */
if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
@@ -2370,7 +2521,7 @@ ggc_pch_read (FILE *f, void *addr)
/* Allocate the dummy page entry for the PCH, and set all pages
mapped into the PCH to reference it. */
- pch_page = xcalloc (1, sizeof (struct page_entry));
+ pch_page = XCNEW (struct page_entry);
pch_page->page = pch_zone.page;
pch_page->pch_p = true;