/* "Bag-of-pages" garbage collector for the GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
- Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
+ 2010, 2011 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "tree.h"
#include "rtl.h"
#include "tm_p.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "flags.h"
#include "ggc.h"
+#include "ggc-internal.h"
#include "timevar.h"
#include "params.h"
#include "tree-flow.h"
-#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_VALGRIND_MEMCHECK_H
-# include <valgrind/memcheck.h>
-# elif defined HAVE_MEMCHECK_H
-# include <memcheck.h>
-# else
-# include <valgrind.h>
-# endif
-#else
-/* Avoid #ifdef:s when we can help it. */
-#define VALGRIND_DISCARD(x)
-#endif
+#include "cfgloop.h"
+#include "plugin.h"
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
#ifdef HAVE_MMAP_ANON
# undef HAVE_MMAP_DEV_ZERO
-
-# include <sys/mman.h>
-# ifndef MAP_FAILED
-# define MAP_FAILED -1
-# endif
-# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
-# define MAP_ANONYMOUS MAP_ANON
-# endif
# define USING_MMAP
-
#endif
#ifdef HAVE_MMAP_DEV_ZERO
-
-# include <sys/mman.h>
-# ifndef MAP_FAILED
-# define MAP_FAILED -1
-# endif
# define USING_MMAP
-
#endif
#ifndef USING_MMAP
#define USING_MALLOC_PAGE_GROUPS
#endif
+#if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
+ && defined(USING_MMAP)
+# define USING_MADVISE
+#endif
+
/* Strategy:
This garbage-collecting allocator allocates objects on one of a set
#define OFFSET_TO_BIT(OFFSET, ORDER) \
(((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
+/* We use this structure to determine the alignment required for
+ allocations. For power-of-two sized allocations, that's not a
+ problem, but it does matter for odd-sized allocations.
+ We do not care about alignment for floating-point types. */
+
+struct max_alignment {
+ char c;
+ union {
+ HOST_WIDEST_INT i;
+ void *p;
+ } u;
+};
+
+/* The biggest alignment required. */
+
+#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
+
+
/* The number of extra orders, not corresponding to power-of-two sized
objects. */
thing you need to do to add a new special allocation size. */
static const size_t extra_order_size_table[] = {
- sizeof (struct stmt_ann_d),
+ /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
+ There are a lot of structures with these sizes and explicitly
+ listing them risks orders being dropped because they changed size. */
+ MAX_ALIGNMENT * 3,
+ MAX_ALIGNMENT * 5,
+ MAX_ALIGNMENT * 6,
+ MAX_ALIGNMENT * 7,
+ MAX_ALIGNMENT * 9,
+ MAX_ALIGNMENT * 10,
+ MAX_ALIGNMENT * 11,
+ MAX_ALIGNMENT * 12,
+ MAX_ALIGNMENT * 13,
+ MAX_ALIGNMENT * 14,
+ MAX_ALIGNMENT * 15,
sizeof (struct tree_decl_non_common),
sizeof (struct tree_field_decl),
sizeof (struct tree_parm_decl),
sizeof (struct tree_var_decl),
- sizeof (struct tree_list),
- TREE_EXP_SIZE (2),
- RTL_SIZE (2), /* MEM, PLUS, etc. */
- RTL_SIZE (9), /* INSN */
+ sizeof (struct tree_type_non_common),
+ sizeof (struct function),
+ sizeof (struct basic_block_def),
+ sizeof (struct cgraph_node),
+ sizeof (struct loop),
};
/* The total number of orders. */
#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
-/* We use this structure to determine the alignment required for
- allocations. For power-of-two sized allocations, that's not a
- problem, but it does matter for odd-sized allocations. */
-
-struct max_alignment {
- char c;
- union {
- HOST_WIDEST_INT i;
- long double d;
- } u;
-};
-
-/* The biggest alignment required. */
-
-#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
-
/* Compute the smallest nonnegative number which when added to X gives
a multiple of F. */
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
+/* Round X to next multiple of the page size */
+
+#define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
+
/* The Ith entry is the number of objects on a page or order I. */
static unsigned objects_per_page_table[NUM_ORDERS];
/* The lg of size of objects allocated from this page. */
unsigned char order;
+ /* Discarded page? */
+ bool discarded;
+
/* A bit vector indicating whether or not objects are in use. The
Nth bit is one if the Nth object on this page is allocated. This
array is dynamically sized. */
#endif
+#ifdef ENABLE_GC_ALWAYS_COLLECT
+/* List of free objects to be verified as actually free on the
+ next collection. */
+struct free_object
+{
+ void *object;
+ struct free_object *next;
+};
+#endif
+
/* The rest of the global variables. */
static struct globals
{
/* Maximum number of elements that can be used before resizing. */
unsigned int depth_max;
- /* Each element of this arry is an index in by_depth where the given
+ /* Each element of this array is an index in by_depth where the given
depth starts. This structure is indexed by that given depth we
are interested in. */
unsigned int *depth;
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* List of free objects to be verified as actually free on the
next collection. */
- struct free_object
- {
- void *object;
- struct free_object *next;
- } *free_object_list;
+ struct free_object *free_object_list;
#endif
#ifdef GATHER_STATISTICS
struct
{
- /* Total memory allocated with ggc_alloc. */
+ /* Total GC-allocated memory. */
unsigned long long total_allocated;
- /* Total overhead for memory to be allocated with ggc_alloc. */
+ /* Total overhead for GC-allocated memory. */
unsigned long long total_overhead;
/* Total allocations and overhead for sizes less than 32, 64 and 128.
These sizes are interesting because they are typical cache line
sizes. */
-
+
unsigned long long total_allocated_under32;
unsigned long long total_overhead_under32;
-
+
unsigned long long total_allocated_under64;
unsigned long long total_overhead_under64;
-
+
unsigned long long total_allocated_under128;
unsigned long long total_overhead_under128;
-
+
/* The allocations for each of the allocation orders. */
unsigned long long total_allocated_per_order[NUM_ORDERS];
can override this by defining GGC_QUIRE_SIZE explicitly. */
#ifndef GGC_QUIRE_SIZE
# ifdef USING_MMAP
-# define GGC_QUIRE_SIZE 256
+# define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
# else
# define GGC_QUIRE_SIZE 16
# endif
static page_entry *lookup_page_table_entry (const void *);
static void set_page_table_entry (void *, page_entry *);
#ifdef USING_MMAP
-static char *alloc_anon (char *, size_t);
+static char *alloc_anon (char *, size_t, bool check);
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
static size_t page_group_index (char *, char *);
if (G.depth_in_use >= G.depth_max)
{
G.depth_max *= 2;
- G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
+ G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
}
G.depth[G.depth_in_use++] = i;
}
if (G.by_depth_in_use >= G.by_depth_max)
{
G.by_depth_max *= 2;
- G.by_depth = xrealloc (G.by_depth,
- G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = xrealloc (G.save_in_use,
- G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
+ G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
+ G.by_depth_max);
}
G.by_depth[G.by_depth_in_use] = p;
G.save_in_use[G.by_depth_in_use++] = s;
goto found;
/* Not found -- allocate a new table. */
- table = xcalloc (1, sizeof(*table));
+ table = XCNEW (struct page_table_chain);
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
- base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
base[L1][L2] = entry;
}
/* Prints the page-entry for object size ORDER, for debugging. */
-void
+DEBUG_FUNCTION void
debug_print_page_list (int order)
{
page_entry *p;
compile error unless exactly one of the HAVE_* is defined. */
static inline char *
-alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
+alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
{
#ifdef HAVE_MMAP_ANON
- char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#endif
#ifdef HAVE_MMAP_DEV_ZERO
- char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, G.dev_zero_fd, 0);
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, G.dev_zero_fd, 0);
#endif
if (page == (char *) MAP_FAILED)
{
+ if (!check)
+ return NULL;
perror ("virtual memory exhausted");
exit (FATAL_EXIT_CODE);
}
G.bytes_mapped += size;
/* Pretend we don't have access to the allocated pages. We'll enable
- access to smaller pieces of the area in ggc_alloc. Discard the
+ access to smaller pieces of the area in ggc_internal_alloc. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
return page;
}
entry_size = num_objects * OBJECT_SIZE (order);
if (entry_size < G.pagesize)
entry_size = G.pagesize;
+ entry_size = PAGE_ALIGN (entry_size);
entry = NULL;
page = NULL;
if (p != NULL)
{
+ if (p->discarded)
+ G.bytes_mapped += p->bytes;
+ p->discarded = false;
+
/* Recycle the allocated memory from this page ... */
*pp = p->next;
page = p->page;
extras on the freelist. (Can only do this optimization with
mmap for backing store.) */
struct page_entry *e, *f = G.free_pages;
- int i;
+ int i, entries = GGC_QUIRE_SIZE;
- page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
+ page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
+ if (page == NULL)
+ {
+ page = alloc_anon(NULL, G.pagesize, true);
+ entries = 1;
+ }
/* This loop counts down so that the chain will be in ascending
memory order. */
- for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
+ for (i = entries - 1; i >= 1; i--)
{
- e = xcalloc (1, page_entry_size);
+ e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = page + (i << G.lg_pagesize);
G.free_pages = f;
}
else
- page = alloc_anon (NULL, entry_size);
+ page = alloc_anon (NULL, entry_size, true);
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
else
alloc_size = GGC_QUIRE_SIZE * G.pagesize;
else
alloc_size = entry_size + G.pagesize - 1;
- allocation = xmalloc (alloc_size);
+ allocation = XNEWVEC (char, alloc_size);
page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
head_slop = page - allocation;
struct page_entry *e, *f = G.free_pages;
for (a = enda - G.pagesize; a != page; a -= G.pagesize)
{
- e = xcalloc (1, page_entry_size);
+ e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = a;
#endif
if (entry == NULL)
- entry = xcalloc (1, page_entry_size);
+ entry = XCNEWVAR (struct page_entry, page_entry_size);
entry->bytes = entry_size;
entry->page = page;
/* Mark the page as inaccessible. Discard the handle to avoid handle
leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
set_page_table_entry (entry->page, NULL);
/* We cannot free a page from a context deeper than the current
one. */
gcc_assert (entry->context_depth == top->context_depth);
-
+
/* Put top element into freed slot. */
G.by_depth[i] = top;
G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
static void
release_pages (void)
{
-#ifdef USING_MMAP
+#ifdef USING_MADVISE
+ page_entry *p, *start_p;
+ char *start;
+ size_t len;
+ size_t mapped_len;
+ page_entry *next, *prev, *newprev;
+ size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
+
+ /* First free larger continuous areas to the OS.
+ This allows other allocators to grab these areas if needed.
+ This is only done on larger chunks to avoid fragmentation.
+ This does not always work because the free_pages list is only
+ approximately sorted. */
+
+ p = G.free_pages;
+ prev = NULL;
+ while (p)
+ {
+ start = p->page;
+ start_p = p;
+ len = 0;
+ mapped_len = 0;
+ newprev = prev;
+ while (p && p->page == start + len)
+ {
+ len += p->bytes;
+ if (!p->discarded)
+ mapped_len += p->bytes;
+ newprev = p;
+ p = p->next;
+ }
+ if (len >= free_unit)
+ {
+ while (start_p != p)
+ {
+ next = start_p->next;
+ free (start_p);
+ start_p = next;
+ }
+ munmap (start, len);
+ if (prev)
+ prev->next = p;
+ else
+ G.free_pages = p;
+ G.bytes_mapped -= mapped_len;
+ continue;
+ }
+ prev = newprev;
+ }
+
+ /* Now give back the fragmented pages to the OS, but keep the address
+ space to reuse it next time. */
+
+ for (p = G.free_pages; p; )
+ {
+ if (p->discarded)
+ {
+ p = p->next;
+ continue;
+ }
+ start = p->page;
+ len = p->bytes;
+ start_p = p;
+ p = p->next;
+ while (p && p->page == start + len)
+ {
+ len += p->bytes;
+ p = p->next;
+ }
+ /* Give the page back to the kernel, but don't free the mapping.
+ This avoids fragmentation in the virtual memory map of the
+ process. Next time we can reuse it by just touching it. */
+ madvise (start, len, MADV_DONTNEED);
+ /* Don't count those pages as mapped to not touch the garbage collector
+ unnecessarily. */
+ G.bytes_mapped -= len;
+ while (start_p != p)
+ {
+ start_p->discarded = true;
+ start_p = start_p->next;
+ }
+ }
+#endif
+#if defined(USING_MMAP) && !defined(USING_MADVISE)
page_entry *p, *next;
char *start;
size_t len;
/* This table provides a fast way to determine ceil(log_2(size)) for
allocation requests. The minimum allocation size is eight bytes. */
-
-static unsigned char size_lookup[257] =
+#define NUM_SIZE_LOOKUP 512
+static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
{
3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8
+ 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
};
+/* For a given size of memory requested for allocation, return the
+ actual size that is going to be allocated, as well as the size
+ order. */
+
+static void
+ggc_round_alloc_size_1 (size_t requested_size,
+ size_t *size_order,
+ size_t *alloced_size)
+{
+ size_t order, object_size;
+
+ if (requested_size < NUM_SIZE_LOOKUP)
+ {
+ order = size_lookup[requested_size];
+ object_size = OBJECT_SIZE (order);
+ }
+ else
+ {
+ order = 10;
+ while (requested_size > (object_size = OBJECT_SIZE (order)))
+ order++;
+ }
+
+ if (size_order)
+ *size_order = order;
+ if (alloced_size)
+ *alloced_size = object_size;
+}
+
+/* For a given size of memory requested for allocation, return the
+ actual size that is going to be allocated. */
+
+size_t
+ggc_round_alloc_size (size_t requested_size)
+{
+ size_t size = 0;
+
+ ggc_round_alloc_size_1 (requested_size, NULL, &size);
+ return size;
+}
+
/* Typed allocation function. Does nothing special in this collector. */
void *
ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
MEM_STAT_DECL)
{
- return ggc_alloc_stat (size PASS_MEM_STAT);
+ return ggc_internal_alloc_stat (size PASS_MEM_STAT);
}
/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
void *
-ggc_alloc_stat (size_t size MEM_STAT_DECL)
+ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
{
size_t order, word, bit, object_offset, object_size;
struct page_entry *entry;
void *result;
- if (size <= 256)
- {
- order = size_lookup[size];
- object_size = OBJECT_SIZE (order);
- }
- else
- {
- order = 9;
- while (size > (object_size = OBJECT_SIZE (order)))
- order++;
- }
+ ggc_round_alloc_size_1 (size, &order, &object_size);
/* If there are non-full pages for this size allocation, they are at
the head of the list. */
exact same semantics in presence of memory bugs, regardless of
ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
/* `Poison' the entire allocated object, including any padding at
the end. */
/* Make the bytes after the end of the object unaccessible. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
- object_size - size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
+ object_size - size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
return result;
}
+/* Mark function for strings. */
+
+void
+gt_ggc_m_S (const void *p)
+{
+ page_entry *entry;
+ unsigned bit, word;
+ unsigned long mask;
+ unsigned long offset;
+
+ if (!p || !ggc_allocated_p (p))
+ return;
+
+ /* Look up the page on which the object is alloced. . */
+ entry = lookup_page_table_entry (p);
+ gcc_assert (entry);
+
+ /* Calculate the index of the object on the page; this is its bit
+ position in the in_use_p bitmap. Note that because a char* might
+ point to the middle of an object, we need special code here to
+ make sure P points to the start of an object. */
+ offset = ((const char *) p - entry->page) % object_size_table[entry->order];
+ if (offset)
+ {
+ /* Here we've seen a char* which does not point to the beginning
+ of an allocated object. We assume it points to the middle of
+ a STRING_CST. */
+ gcc_assert (offset == offsetof (struct tree_string, str));
+ p = ((const char *) p) - offset;
+ gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
+ return;
+ }
+
+ bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
+ word = bit / HOST_BITS_PER_LONG;
+ mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
+
+ /* If the bit was previously set, skip it. */
+ if (entry->in_use_p[word] & mask)
+ return;
+
+ /* Otherwise set it, and decrement the free object count. */
+ entry->in_use_p[word] |= mask;
+ entry->num_free_objects -= 1;
+
+ if (GGC_DEBUG_LEVEL >= 4)
+ fprintf (G.debug_file, "Marking %p\n", p);
+
+ return;
+}
+
/* If P is not marked, marks it and return false. Otherwise return true.
P must have been allocated by the GC allocator; it mustn't point to
static objects, stack variables, or memory allocated with malloc. */
#ifdef ENABLE_GC_CHECKING
/* Poison the data, to indicate the data is garbage. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
memset (p, 0xa5, size);
#endif
/* Let valgrind know the object is free. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* In the completely-anal-checking mode, we do *not* immediately free
- the data, but instead verify that the data is *actually* not
+ the data, but instead verify that the data is *actually* not
reachable the next time we collect. */
{
- struct free_object *fo = xmalloc (sizeof (struct free_object));
+ struct free_object *fo = XNEW (struct free_object);
fo->object = p;
fo->next = G.free_object_list;
G.free_object_list = fo;
/* If the page is completely full, then it's supposed to
be after all pages that aren't. Since we've freed one
object from a page that was full, we need to move the
- page to the head of the list.
+ page to the head of the list.
PE is the node we want to move. Q is the previous node
and P is the next node in the list. */
static void
compute_inverse (unsigned order)
{
- size_t size, inv;
+ size_t size, inv;
unsigned int e;
size = OBJECT_SIZE (order);
believe, is an unaligned page allocation, which would cause us to
hork badly if we tried to use it. */
{
- char *p = alloc_anon (NULL, G.pagesize);
+ char *p = alloc_anon (NULL, G.pagesize, true);
struct page_entry *e;
if ((size_t)p & (G.pagesize - 1))
{
/* How losing. Discard this one and try another. If we still
can't get something useful, give up. */
- p = alloc_anon (NULL, G.pagesize);
+ p = alloc_anon (NULL, G.pagesize, true);
gcc_assert (!((size_t)p & (G.pagesize - 1)));
}
/* We have a good page, might as well hold onto it... */
- e = xcalloc (1, sizeof (struct page_entry));
+ e = XCNEW (struct page_entry);
e->bytes = G.pagesize;
e->page = p;
e->next = G.free_pages;
int o;
int i;
- o = size_lookup[OBJECT_SIZE (order)];
- for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
+ i = OBJECT_SIZE (order);
+ if (i >= NUM_SIZE_LOOKUP)
+ continue;
+
+ for (o = size_lookup[i]; o == size_lookup [i]; --i)
size_lookup[i] = order;
}
G.depth_in_use = 0;
G.depth_max = 10;
- G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
+ G.depth = XNEWVEC (unsigned int, G.depth_max);
G.by_depth_in_use = 0;
G.by_depth_max = INITIAL_PTE_COUNT;
- G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
-}
-
-/* Start a new GGC zone. */
-
-struct alloc_zone *
-new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
-{
- return NULL;
-}
-
-/* Destroy a GGC zone. */
-void
-destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
-{
+ G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
+ G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
}
/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
if (p->context_depth < G.context_depth)
{
if (! save_in_use_p (p))
- save_in_use_p (p) = xmalloc (bitmap_size);
+ save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
}
G.pages[order] = next;
else
previous->next = next;
-
+
/* Splice P out of the back pointers too. */
if (next)
next->prev = previous;
so the exact same memory semantics is kept, in case
there are memory errors. We override this request
below. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
+ size));
memset (object, 0xa5, size);
/* Drop the handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
}
}
}
/* Indicate that we've seen collections at this context depth. */
G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
+ invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
+
clear_marks ();
ggc_mark_roots ();
#ifdef GATHER_STATISTICS
G.allocated_last_gc = G.allocated;
+ invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
+
timevar_pop (TV_GC);
if (!quiet_flag)
SCALE (G.allocated), STAT_LABEL(G.allocated),
SCALE (total_overhead), STAT_LABEL (total_overhead));
-#ifdef GATHER_STATISTICS
+#ifdef GATHER_STATISTICS
{
fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
G.stats.total_overhead_under128);
fprintf (stderr, "Total Allocated under 128B: %10lld\n",
G.stats.total_allocated_under128);
-
+
for (i = 0; i < NUM_ORDERS; i++)
if (G.stats.total_allocated_per_order[i])
{
- fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
- OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
- fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
- OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
+ fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
+ (unsigned long) OBJECT_SIZE (i),
+ G.stats.total_overhead_per_order[i]);
+ fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
+ (unsigned long) OBJECT_SIZE (i),
+ G.stats.total_allocated_per_order[i]);
}
}
#endif
}
\f
+struct ggc_pch_ondisk
+{
+ unsigned totals[NUM_ORDERS];
+};
+
struct ggc_pch_data
{
- struct ggc_pch_ondisk
- {
- unsigned totals[NUM_ORDERS];
- } d;
+ struct ggc_pch_ondisk d;
size_t base[NUM_ORDERS];
size_t written[NUM_ORDERS];
};
struct ggc_pch_data *
init_ggc_pch (void)
{
- return xcalloc (sizeof (struct ggc_pch_data), 1);
+ return XCNEW (struct ggc_pch_data);
}
void
{
unsigned order;
- if (size <= 256)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
- order = 9;
+ order = 10;
while (size > OBJECT_SIZE (order))
order++;
}
unsigned i;
for (i = 0; i < NUM_ORDERS; i++)
- a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+ a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
return a;
}
for (i = 0; i < NUM_ORDERS; i++)
{
d->base[i] = a;
- a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+ a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
}
}
unsigned order;
char *result;
- if (size <= 256)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
- order = 9;
+ order = 10;
while (size > OBJECT_SIZE (order))
order++;
}
size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
- static const char emptyBytes[256];
+ static const char emptyBytes[256] = { 0 };
- if (size <= 256)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
- order = 9;
+ order = 10;
while (size > OBJECT_SIZE (order))
order++;
}
if (fwrite (x, size, 1, f) != 1)
- fatal_error ("can't write PCH file: %m");
+ fatal_error ("can%'t write PCH file: %m");
/* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
object out to OBJECT_SIZE(order). This happens for strings. */
if (padding <= sizeof(emptyBytes))
{
if (fwrite (emptyBytes, 1, padding, f) != padding)
- fatal_error ("can't write PCH file");
+ fatal_error ("can%'t write PCH file");
}
else
{
/* Larger than our buffer? Just default to fseek. */
if (fseek (f, padding, SEEK_CUR) != 0)
- fatal_error ("can't write PCH file");
+ fatal_error ("can%'t write PCH file");
}
}
&& fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
G.pagesize),
SEEK_CUR) != 0)
- fatal_error ("can't write PCH file: %m");
+ fatal_error ("can%'t write PCH file: %m");
}
void
ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
{
if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
- fatal_error ("can't write PCH file: %m");
+ fatal_error ("can%'t write PCH file: %m");
free (d);
}
page_entry **new_by_depth;
unsigned long **new_save_in_use;
- new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
- new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
+ new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
memcpy (&new_by_depth[0],
&G.by_depth[count_old_page_tables],
{
struct ggc_pch_ondisk d;
unsigned i;
- char *offs = addr;
+ char *offs = (char *) addr;
unsigned long count_old_page_tables;
unsigned long count_new_page_tables;
#ifdef ENABLE_GC_CHECKING
poison_pages ();
#endif
+ /* Since we free all the allocated objects, the free list becomes
+ useless. Validate it now, which will also clear it. */
+ validate_free_objects();
/* No object read from a PCH file should ever be freed. So, set the
context depth to 1, and set the depth of all the currently-allocated
/* Allocate the appropriate page-table entries for the pages read from
the PCH file. */
if (fread (&d, sizeof (d), 1, f) != 1)
- fatal_error ("can't read PCH file: %m");
+ fatal_error ("can%'t read PCH file: %m");
for (i = 0; i < NUM_ORDERS; i++)
{
if (d.totals[i] == 0)
continue;
- bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+ bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
num_objs = bytes / OBJECT_SIZE (i);
- entry = xcalloc (1, (sizeof (struct page_entry)
- - sizeof (long)
- + BITMAP_SIZE (num_objs + 1)));
+ entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
+ - sizeof (long)
+ + BITMAP_SIZE (num_objs + 1)));
entry->bytes = bytes;
entry->page = offs;
entry->context_depth = 0;
/* Update the statistics. */
G.allocated = G.allocated_last_gc = offs - (char *)addr;
}
+
+struct alloc_zone
+{
+ int dummy;
+};
+
+struct alloc_zone rtl_zone;
+struct alloc_zone tree_zone;
+struct alloc_zone tree_id_zone;