#include "timevar.h"
#include "params.h"
#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_MEMCHECK_H
-# include <memcheck.h>
+# ifdef HAVE_VALGRIND_MEMCHECK_H
+# include <valgrind/memcheck.h>
+# elif defined HAVE_MEMCHECK_H
+# include <memcheck.h>
# else
-# include <valgrind.h>
+# include <valgrind.h>
# endif
#else
/* Avoid #ifdef:s when we can help it. */
#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
#define RTL_SIZE(NSLOTS) \
- (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
+ (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
#define TREE_EXP_SIZE(OPS) \
(sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
sizeof (struct tree_decl),
sizeof (struct tree_list),
TREE_EXP_SIZE (2),
- RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
- RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
+ RTL_SIZE (2), /* MEM, PLUS, etc. */
+ RTL_SIZE (9), /* INSN */
};
/* The total number of orders. */
static struct
{
- unsigned int mult;
+ size_t mult;
unsigned int shift;
}
inverse_table[NUM_ORDERS];
zero otherwise. We allocate them all together, to enable a
better runtime data access pattern. */
unsigned long **save_in_use;
-
+#ifdef GATHER_STATISTICS
+ struct
+ {
+ /* Total memory allocated with ggc_alloc. */
+ unsigned long long total_allocated;
+ /* Total overhead for memory to be allocated with ggc_alloc. */
+ unsigned long long total_overhead;
+
+ /* Total allocations and overhead for sizes less than 32, 64 and 128.
+ These sizes are interesting because they are typical cache line
+ sizes. */
+
+ unsigned long long total_allocated_under32;
+ unsigned long long total_overhead_under32;
+
+ unsigned long long total_allocated_under64;
+ unsigned long long total_overhead_under64;
+
+ unsigned long long total_allocated_under128;
+ unsigned long long total_overhead_under128;
+
+ /* The allocations for each of the allocation orders. */
+ unsigned long long total_allocated_per_order[NUM_ORDERS];
+
+ /* The overhead for each of the allocation orders. */
+ unsigned long long total_overhead_per_order[NUM_ORDERS];
+ } stats;
+#endif
} G;
/* The size in bytes required to maintain a bitmap for the objects
void debug_print_page_list (int);
static void push_depth (unsigned int);
static void push_by_depth (page_entry *, unsigned long *);
-\f
+struct alloc_zone *rtl_zone = NULL;
+struct alloc_zone *tree_zone = NULL;
+struct alloc_zone *garbage_zone = NULL;
+
/* Push an entry onto G.depth. */
inline static void
if (G.depth_in_use >= G.depth_max)
{
G.depth_max *= 2;
- G.depth = (unsigned int *) xrealloc ((char *) G.depth,
- G.depth_max * sizeof (unsigned int));
+ G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
}
G.depth[G.depth_in_use++] = i;
}
if (G.by_depth_in_use >= G.by_depth_max)
{
G.by_depth_max *= 2;
- G.by_depth = (page_entry **) xrealloc ((char *) G.by_depth,
- G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = (unsigned long **) xrealloc ((char *) G.save_in_use,
- G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = xrealloc (G.by_depth,
+ G.by_depth_max * sizeof (page_entry *));
+ G.save_in_use = xrealloc (G.save_in_use,
+ G.by_depth_max * sizeof (unsigned long *));
}
G.by_depth[G.by_depth_in_use] = p;
G.save_in_use[G.by_depth_in_use++] = s;
goto found;
/* Not found -- allocate a new table. */
- table = (page_table) xcalloc (1, sizeof(*table));
+ table = xcalloc (1, sizeof(*table));
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
- base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
base[L1][L2] = entry;
}
memory order. */
for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
{
- e = (struct page_entry *) xcalloc (1, page_entry_size);
+ e = xcalloc (1, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = page + (i << G.lg_pagesize);
struct page_entry *e, *f = G.free_pages;
for (a = enda - G.pagesize; a != page; a -= G.pagesize)
{
- e = (struct page_entry *) xcalloc (1, page_entry_size);
+ e = xcalloc (1, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = a;
#endif
if (entry == NULL)
- entry = (struct page_entry *) xcalloc (1, page_entry_size);
+ entry = xcalloc (1, page_entry_size);
entry->bytes = entry_size;
entry->page = page;
{
top = G.by_depth[G.by_depth_in_use-1];
- /* Peel back indicies in depth that index into by_depth, so that
- as new elements are added to by_depth, we note the indicies
+ /* Peel back indices in depth that index into by_depth, so that
+ as new elements are added to by_depth, we note the indices
of those elements, if they are for new context depths. */
while (G.depth_in_use > (size_t)top->context_depth+1)
--G.depth_in_use;
8
};
+/* Typed allocation function. Does nothing special in this collector. */
+
+void *
+ggc_alloc_typed (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size)
+{
+ return ggc_alloc (size);
+}
+
+/* Zone allocation function. Does nothing special in this collector. */
+
+void *
+ggc_alloc_zone (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED)
+{
+ return ggc_alloc (size);
+}
+
/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
void *
information is used in deciding when to collect. */
G.allocated += OBJECT_SIZE (order);
+#ifdef GATHER_STATISTICS
+ {
+ G.stats.total_overhead += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated += OBJECT_SIZE(order);
+ G.stats.total_overhead_per_order[order] += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated_per_order[order] += OBJECT_SIZE (order);
+
+ if (size <= 32){
+ G.stats.total_overhead_under32 += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated_under32 += OBJECT_SIZE(order);
+ }
+
+ if (size <= 64){
+ G.stats.total_overhead_under64 += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated_under64 += OBJECT_SIZE(order);
+ }
+
+ if (size <= 128){
+ G.stats.total_overhead_under128 += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated_under128 += OBJECT_SIZE(order);
+ }
+
+ }
+#endif
+
if (GGC_DEBUG_LEVEL >= 3)
fprintf (G.debug_file,
"Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
static void
compute_inverse (unsigned order)
{
- unsigned size, inv, e;
-
- /* There can be only one object per "page" in a bucket for sizes
- larger than half a machine page; it will always have offset zero. */
- if (OBJECT_SIZE (order) > G.pagesize/2)
- {
- if (OBJECTS_PER_PAGE (order) != 1)
- abort ();
-
- DIV_MULT (order) = 1;
- DIV_SHIFT (order) = 0;
- return;
- }
+ size_t size, inv;
+ unsigned int e;
size = OBJECT_SIZE (order);
e = 0;
}
/* We have a good page, might as well hold onto it... */
- e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
+ e = xcalloc (1, sizeof (struct page_entry));
e->bytes = G.pagesize;
e->page = p;
e->next = G.free_pages;
G.depth_in_use = 0;
G.depth_max = 10;
- G.depth = (unsigned int *) xmalloc (G.depth_max * sizeof (unsigned int));
+ G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
G.by_depth_in_use = 0;
G.by_depth_max = INITIAL_PTE_COUNT;
- G.by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
+ G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
+}
+
+/* Start a new GGC zone. */
+
+struct alloc_zone *
+new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
+{
+ return NULL;
+}
+
+/* Destroy a GGC zone. */
+void
+destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
+{
}
/* Increment the `GC context'. Objects allocated in an outer context
G.context_depth_allocations &= omask - 1;
G.context_depth_collections &= omask - 1;
- /* The G.depth array is shortend so that the last index is the
+ /* The G.depth array is shortened so that the last index is the
context_depth of the top element of by_depth. */
if (depth+1 < G.depth_in_use)
e = G.depth[depth+1];
/* Collect some information about the various sizes of
allocation. */
- fprintf (stderr, "\n%-5s %10s %10s %10s\n",
+ fprintf (stderr,
+ "Memory still allocated at the end of the compilation process\n");
+ fprintf (stderr, "%-5s %10s %10s %10s\n",
"Size", "Allocated", "Used", "Overhead");
for (i = 0; i < NUM_ORDERS; ++i)
{
SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
SCALE (G.allocated), LABEL(G.allocated),
SCALE (total_overhead), LABEL (total_overhead));
+
+#ifdef GATHER_STATISTICS
+ {
+ fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
+
+ fprintf (stderr, "Total Overhead: %10lld\n",
+ G.stats.total_overhead);
+ fprintf (stderr, "Total Allocated: %10lld\n",
+ G.stats.total_allocated);
+
+ fprintf (stderr, "Total Overhead under 32B: %10lld\n",
+ G.stats.total_overhead_under32);
+ fprintf (stderr, "Total Allocated under 32B: %10lld\n",
+ G.stats.total_allocated_under32);
+ fprintf (stderr, "Total Overhead under 64B: %10lld\n",
+ G.stats.total_overhead_under64);
+ fprintf (stderr, "Total Allocated under 64B: %10lld\n",
+ G.stats.total_allocated_under64);
+ fprintf (stderr, "Total Overhead under 128B: %10lld\n",
+ G.stats.total_overhead_under128);
+ fprintf (stderr, "Total Allocated under 128B: %10lld\n",
+ G.stats.total_allocated_under128);
+
+ for (i = 0; i < NUM_ORDERS; i++)
+ if (G.stats.total_allocated_per_order[i])
+ {
+ fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
+ OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
+ fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
+ OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
+ }
+ }
+#endif
}
\f
struct ggc_pch_data
void
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
- size_t size)
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
char *
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
- size_t size)
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
char *result;
void
ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
- size_t size)
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
+ static const char emptyBytes[256];
if (size <= 256)
order = size_lookup[size];
if (fwrite (x, size, 1, f) != 1)
fatal_error ("can't write PCH file: %m");
- /* In the current implementation, SIZE is always equal to
- OBJECT_SIZE (order) and so the fseek is never executed. */
- if (size != OBJECT_SIZE (order)
- && fseek (f, OBJECT_SIZE (order) - size, SEEK_CUR) != 0)
- fatal_error ("can't write PCH file: %m");
+ /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
+ object out to OBJECT_SIZE(order). This happens for strings. */
+
+ if (size != OBJECT_SIZE (order))
+ {
+ unsigned padding = OBJECT_SIZE(order) - size;
+
+ /* To speed small writes, we use a nulled-out array that's larger
+ than most padding requests as the source for our null bytes. This
+ permits us to do the padding with fwrite() rather than fseek(), and
+ limits the chance the the OS may try to flush any outstanding
+ writes. */
+ if (padding <= sizeof(emptyBytes))
+ {
+ if (fwrite (emptyBytes, 1, padding, f) != padding)
+ fatal_error ("can't write PCH file");
+ }
+ else
+ {
+ /* Larger than our buffer? Just default to fseek. */
+ if (fseek (f, padding, SEEK_CUR) != 0)
+ fatal_error ("can't write PCH file");
+ }
+ }
d->written[order]++;
if (d->written[order] == d->d.totals[order]
page_entry **new_by_depth;
unsigned long **new_save_in_use;
- new_by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
- new_save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
+ new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
memcpy (&new_by_depth[0],
&G.by_depth[count_old_page_tables],
/* We've just read in a PCH file. So, every object that used to be
allocated is now free. */
clear_marks ();
-#ifdef GGC_POISON
+#ifdef ENABLE_GC_CHECKING
poison_pages ();
#endif