/* "Bag-of-pages" garbage collector for the GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
- Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
+ 2010 Free Software Foundation, Inc.
This file is part of GCC.
#include "tree.h"
#include "rtl.h"
#include "tm_p.h"
-#include "toplev.h"
+#include "toplev.h" /* exact_log2 */
+#include "diagnostic-core.h"
#include "flags.h"
#include "ggc.h"
+#include "ggc-internal.h"
#include "timevar.h"
#include "params.h"
#include "tree-flow.h"
-#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_VALGRIND_MEMCHECK_H
-# include <valgrind/memcheck.h>
-# elif defined HAVE_MEMCHECK_H
-# include <memcheck.h>
-# else
-# include <valgrind.h>
-# endif
-#else
-/* Avoid #ifdef:s when we can help it. */
-#define VALGRIND_DISCARD(x)
-#endif
+#include "cfgloop.h"
+#include "plugin.h"
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
#define OFFSET_TO_BIT(OFFSET, ORDER) \
(((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
+/* We use this structure to determine the alignment required for
+ allocations. For power-of-two sized allocations, that's not a
+ problem, but it does matter for odd-sized allocations.
+ We do not care about alignment for floating-point types. */
+
+struct max_alignment {
+ char c;
+ union {
+ HOST_WIDEST_INT i;
+ void *p;
+ } u;
+};
+
+/* The biggest alignment required. */
+
+#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
+
+
/* The number of extra orders, not corresponding to power-of-two sized
objects. */
thing you need to do to add a new special allocation size. */
static const size_t extra_order_size_table[] = {
- sizeof (struct stmt_ann_d),
- sizeof (struct var_ann_d),
+ /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
+ There are a lot of structures with these sizes and explicitly
+ listing them risks orders being dropped because they changed size. */
+ MAX_ALIGNMENT * 3,
+ MAX_ALIGNMENT * 5,
+ MAX_ALIGNMENT * 6,
+ MAX_ALIGNMENT * 7,
+ MAX_ALIGNMENT * 9,
+ MAX_ALIGNMENT * 10,
+ MAX_ALIGNMENT * 11,
+ MAX_ALIGNMENT * 12,
+ MAX_ALIGNMENT * 13,
+ MAX_ALIGNMENT * 14,
+ MAX_ALIGNMENT * 15,
sizeof (struct tree_decl_non_common),
sizeof (struct tree_field_decl),
sizeof (struct tree_parm_decl),
sizeof (struct tree_var_decl),
- sizeof (struct tree_list),
- sizeof (struct tree_ssa_name),
+ sizeof (struct tree_type),
sizeof (struct function),
sizeof (struct basic_block_def),
- sizeof (bitmap_element),
- sizeof (bitmap_head),
- /* PHI nodes with one to three arguments are already covered by the
- above sizes. */
- sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3,
- TREE_EXP_SIZE (2),
- RTL_SIZE (2), /* MEM, PLUS, etc. */
- RTL_SIZE (9), /* INSN */
+ sizeof (struct cgraph_node),
+ sizeof (struct loop),
};
/* The total number of orders. */
#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
-/* We use this structure to determine the alignment required for
- allocations. For power-of-two sized allocations, that's not a
- problem, but it does matter for odd-sized allocations. */
-
-struct max_alignment {
- char c;
- union {
- HOST_WIDEST_INT i;
- long double d;
- } u;
-};
-
-/* The biggest alignment required. */
-
-#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
-
/* Compute the smallest nonnegative number which when added to X gives
a multiple of F. */
#endif
+#ifdef ENABLE_GC_ALWAYS_COLLECT
+/* List of free objects to be verified as actually free on the
+ next collection. */
+struct free_object
+{
+ void *object;
+ struct free_object *next;
+};
+#endif
+
/* The rest of the global variables. */
static struct globals
{
/* Maximum number of elements that can be used before resizing. */
unsigned int depth_max;
- /* Each element of this arry is an index in by_depth where the given
+ /* Each element of this array is an index in by_depth where the given
depth starts. This structure is indexed by that given depth we
are interested in. */
unsigned int *depth;
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* List of free objects to be verified as actually free on the
next collection. */
- struct free_object
- {
- void *object;
- struct free_object *next;
- } *free_object_list;
+ struct free_object *free_object_list;
#endif
#ifdef GATHER_STATISTICS
struct
{
- /* Total memory allocated with ggc_alloc. */
+ /* Total GC-allocated memory. */
unsigned long long total_allocated;
- /* Total overhead for memory to be allocated with ggc_alloc. */
+ /* Total overhead for GC-allocated memory. */
unsigned long long total_overhead;
/* Total allocations and overhead for sizes less than 32, 64 and 128.
These sizes are interesting because they are typical cache line
sizes. */
-
+
unsigned long long total_allocated_under32;
unsigned long long total_overhead_under32;
-
+
unsigned long long total_allocated_under64;
unsigned long long total_overhead_under64;
-
+
unsigned long long total_allocated_under128;
unsigned long long total_overhead_under128;
-
+
/* The allocations for each of the allocation orders. */
unsigned long long total_allocated_per_order[NUM_ORDERS];
if (G.depth_in_use >= G.depth_max)
{
G.depth_max *= 2;
- G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
+ G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
}
G.depth[G.depth_in_use++] = i;
}
if (G.by_depth_in_use >= G.by_depth_max)
{
G.by_depth_max *= 2;
- G.by_depth = xrealloc (G.by_depth,
- G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = xrealloc (G.save_in_use,
- G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
+ G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
+ G.by_depth_max);
}
G.by_depth[G.by_depth_in_use] = p;
G.save_in_use[G.by_depth_in_use++] = s;
goto found;
/* Not found -- allocate a new table. */
- table = xcalloc (1, sizeof(*table));
+ table = XCNEW (struct page_table_chain);
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
/* Prints the page-entry for object size ORDER, for debugging. */
-void
+DEBUG_FUNCTION void
debug_print_page_list (int order)
{
page_entry *p;
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
{
#ifdef HAVE_MMAP_ANON
- char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#endif
#ifdef HAVE_MMAP_DEV_ZERO
- char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, G.dev_zero_fd, 0);
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, G.dev_zero_fd, 0);
#endif
if (page == (char *) MAP_FAILED)
G.bytes_mapped += size;
/* Pretend we don't have access to the allocated pages. We'll enable
- access to smaller pieces of the area in ggc_alloc. Discard the
+ access to smaller pieces of the area in ggc_internal_alloc. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
return page;
}
memory order. */
for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
{
- e = xcalloc (1, page_entry_size);
+ e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = page + (i << G.lg_pagesize);
alloc_size = GGC_QUIRE_SIZE * G.pagesize;
else
alloc_size = entry_size + G.pagesize - 1;
- allocation = xmalloc (alloc_size);
+ allocation = XNEWVEC (char, alloc_size);
page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
head_slop = page - allocation;
struct page_entry *e, *f = G.free_pages;
for (a = enda - G.pagesize; a != page; a -= G.pagesize)
{
- e = xcalloc (1, page_entry_size);
+ e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = a;
#endif
if (entry == NULL)
- entry = xcalloc (1, page_entry_size);
+ entry = XCNEWVAR (struct page_entry, page_entry_size);
entry->bytes = entry_size;
entry->page = page;
/* Mark the page as inaccessible. Discard the handle to avoid handle
leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
set_page_table_entry (entry->page, NULL);
/* We cannot free a page from a context deeper than the current
one. */
gcc_assert (entry->context_depth == top->context_depth);
-
+
/* Put top element into freed slot. */
G.by_depth[i] = top;
G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
MEM_STAT_DECL)
{
- return ggc_alloc_stat (size PASS_MEM_STAT);
+ return ggc_internal_alloc_stat (size PASS_MEM_STAT);
}
/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
void *
-ggc_alloc_stat (size_t size MEM_STAT_DECL)
+ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
{
size_t order, word, bit, object_offset, object_size;
struct page_entry *entry;
exact same semantics in presence of memory bugs, regardless of
ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
/* `Poison' the entire allocated object, including any padding at
the end. */
/* Make the bytes after the end of the object unaccessible. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
- object_size - size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
+ object_size - size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
return result;
}
+/* Mark function for strings. */
+
+void
+gt_ggc_m_S (const void *p)
+{
+ page_entry *entry;
+ unsigned bit, word;
+ unsigned long mask;
+ unsigned long offset;
+
+ if (!p || !ggc_allocated_p (p))
+ return;
+
+ /* Look up the page on which the object is alloced. . */
+ entry = lookup_page_table_entry (p);
+ gcc_assert (entry);
+
+ /* Calculate the index of the object on the page; this is its bit
+ position in the in_use_p bitmap. Note that because a char* might
+ point to the middle of an object, we need special code here to
+ make sure P points to the start of an object. */
+ offset = ((const char *) p - entry->page) % object_size_table[entry->order];
+ if (offset)
+ {
+ /* Here we've seen a char* which does not point to the beginning
+ of an allocated object. We assume it points to the middle of
+ a STRING_CST. */
+ gcc_assert (offset == offsetof (struct tree_string, str));
+ p = ((const char *) p) - offset;
+ gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
+ return;
+ }
+
+ bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
+ word = bit / HOST_BITS_PER_LONG;
+ mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
+
+ /* If the bit was previously set, skip it. */
+ if (entry->in_use_p[word] & mask)
+ return;
+
+ /* Otherwise set it, and decrement the free object count. */
+ entry->in_use_p[word] |= mask;
+ entry->num_free_objects -= 1;
+
+ if (GGC_DEBUG_LEVEL >= 4)
+ fprintf (G.debug_file, "Marking %p\n", p);
+
+ return;
+}
+
/* If P is not marked, marks it and return false. Otherwise return true.
P must have been allocated by the GC allocator; it mustn't point to
static objects, stack variables, or memory allocated with malloc. */
#ifdef ENABLE_GC_CHECKING
/* Poison the data, to indicate the data is garbage. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
memset (p, 0xa5, size);
#endif
/* Let valgrind know the object is free. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* In the completely-anal-checking mode, we do *not* immediately free
- the data, but instead verify that the data is *actually* not
+ the data, but instead verify that the data is *actually* not
reachable the next time we collect. */
{
struct free_object *fo = XNEW (struct free_object);
/* If the page is completely full, then it's supposed to
be after all pages that aren't. Since we've freed one
object from a page that was full, we need to move the
- page to the head of the list.
+ page to the head of the list.
PE is the node we want to move. Q is the previous node
and P is the next node in the list. */
static void
compute_inverse (unsigned order)
{
- size_t size, inv;
+ size_t size, inv;
unsigned int e;
size = OBJECT_SIZE (order);
G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
}
-/* Start a new GGC zone. */
-
-struct alloc_zone *
-new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
-{
- return NULL;
-}
-
-/* Destroy a GGC zone. */
-void
-destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
-{
-}
-
/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
if (p->context_depth < G.context_depth)
{
if (! save_in_use_p (p))
- save_in_use_p (p) = xmalloc (bitmap_size);
+ save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
}
G.pages[order] = next;
else
previous->next = next;
-
+
/* Splice P out of the back pointers too. */
if (next)
next->prev = previous;
so the exact same memory semantics is kept, in case
there are memory errors. We override this request
below. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
+ size));
memset (object, 0xa5, size);
/* Drop the handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
}
}
}
/* Indicate that we've seen collections at this context depth. */
G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
+ invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
+
clear_marks ();
ggc_mark_roots ();
#ifdef GATHER_STATISTICS
G.allocated_last_gc = G.allocated;
+ invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
+
timevar_pop (TV_GC);
if (!quiet_flag)
SCALE (G.allocated), STAT_LABEL(G.allocated),
SCALE (total_overhead), STAT_LABEL (total_overhead));
-#ifdef GATHER_STATISTICS
+#ifdef GATHER_STATISTICS
{
fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
G.stats.total_overhead_under128);
fprintf (stderr, "Total Allocated under 128B: %10lld\n",
G.stats.total_allocated_under128);
-
+
for (i = 0; i < NUM_ORDERS; i++)
if (G.stats.total_allocated_per_order[i])
{
#endif
}
\f
+struct ggc_pch_ondisk
+{
+ unsigned totals[NUM_ORDERS];
+};
+
struct ggc_pch_data
{
- struct ggc_pch_ondisk
- {
- unsigned totals[NUM_ORDERS];
- } d;
+ struct ggc_pch_ondisk d;
size_t base[NUM_ORDERS];
size_t written[NUM_ORDERS];
};
size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
- static const char emptyBytes[256];
+ static const char emptyBytes[256] = { 0 };
if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
{
struct ggc_pch_ondisk d;
unsigned i;
- char *offs = addr;
+ char *offs = (char *) addr;
unsigned long count_old_page_tables;
unsigned long count_new_page_tables;
bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
num_objs = bytes / OBJECT_SIZE (i);
- entry = xcalloc (1, (sizeof (struct page_entry)
- - sizeof (long)
- + BITMAP_SIZE (num_objs + 1)));
+ entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
+ - sizeof (long)
+ + BITMAP_SIZE (num_objs + 1)));
entry->bytes = bytes;
entry->page = offs;
entry->context_depth = 0;
/* Update the statistics. */
G.allocated = G.allocated_last_gc = offs - (char *)addr;
}
+
+struct alloc_zone
+{
+ int dummy;
+};
+
+struct alloc_zone rtl_zone;
+struct alloc_zone tree_zone;
+struct alloc_zone tree_id_zone;