#include "timevar.h"
#include "params.h"
#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_MEMCHECK_H
-# include <memcheck.h>
+# ifdef HAVE_VALGRIND_MEMCHECK_H
+# include <valgrind/memcheck.h>
+# elif defined HAVE_MEMCHECK_H
+# include <memcheck.h>
# else
-# include <valgrind.h>
+# include <valgrind.h>
# endif
#else
/* Avoid #ifdef:s when we can help it. */
#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
#define RTL_SIZE(NSLOTS) \
- (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
+ (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
+
+#define TREE_EXP_SIZE(OPS) \
+ (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
/* The Ith entry is the maximum size of an object to be stored in the
Ith extra order. Adding a new entry to this array is the *only*
static const size_t extra_order_size_table[] = {
sizeof (struct tree_decl),
sizeof (struct tree_list),
- RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
- RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
+ TREE_EXP_SIZE (2),
+ RTL_SIZE (2), /* MEM, PLUS, etc. */
+ RTL_SIZE (9), /* INSN, CALL_INSN, JUMP_INSN */
};
/* The total number of orders. */
char c;
union {
HOST_WIDEST_INT i;
-#ifdef HAVE_LONG_DOUBLE
long double d;
-#else
- double d;
-#endif
} u;
};
static struct
{
- unsigned int mult;
+ size_t mult;
unsigned int shift;
}
inverse_table[NUM_ORDERS];
better runtime data access pattern. */
unsigned long **save_in_use;
+#ifdef GATHER_STATISTICS
+ struct
+ {
+ /* Total memory allocated with ggc_alloc */
+ unsigned long long total_allocated;
+ /* Total overhead for memory to be allocated with ggc_alloc */
+ unsigned long long total_overhead;
+
+ /* Total allocations and overhead for sizes less than 32, 64 and 128.
+ These sizes are interesting because they are typical cache line
+ sizes. */
+
+ unsigned long long total_allocated_under32;
+ unsigned long long total_overhead_under32;
+
+ unsigned long long total_allocated_under64;
+ unsigned long long total_overhead_under64;
+
+ unsigned long long total_allocated_under128;
+ unsigned long long total_overhead_under128;
+
+ /* The overhead for each of the allocation orders. */
+ unsigned long long total_overhead_per_order[NUM_ORDERS];
+ } stats;
+#endif
} G;
/* The size in bytes required to maintain a bitmap for the objects
/* Initial guess as to how many page table entries we might need. */
#define INITIAL_PTE_COUNT 128
\f
-static int ggc_allocated_p PARAMS ((const void *));
-static page_entry *lookup_page_table_entry PARAMS ((const void *));
-static void set_page_table_entry PARAMS ((void *, page_entry *));
+static int ggc_allocated_p (const void *);
+static page_entry *lookup_page_table_entry (const void *);
+static void set_page_table_entry (void *, page_entry *);
#ifdef USING_MMAP
-static char *alloc_anon PARAMS ((char *, size_t));
+static char *alloc_anon (char *, size_t);
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
-static size_t page_group_index PARAMS ((char *, char *));
-static void set_page_group_in_use PARAMS ((page_group *, char *));
-static void clear_page_group_in_use PARAMS ((page_group *, char *));
+static size_t page_group_index (char *, char *);
+static void set_page_group_in_use (page_group *, char *);
+static void clear_page_group_in_use (page_group *, char *);
#endif
-static struct page_entry * alloc_page PARAMS ((unsigned));
-static void free_page PARAMS ((struct page_entry *));
-static void release_pages PARAMS ((void));
-static void clear_marks PARAMS ((void));
-static void sweep_pages PARAMS ((void));
-static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
-static void compute_inverse PARAMS ((unsigned));
-static inline void adjust_depth PARAMS ((void));
-static void move_ptes_to_front PARAMS ((int, int));
+static struct page_entry * alloc_page (unsigned);
+static void free_page (struct page_entry *);
+static void release_pages (void);
+static void clear_marks (void);
+static void sweep_pages (void);
+static void ggc_recalculate_in_use_p (page_entry *);
+static void compute_inverse (unsigned);
+static inline void adjust_depth (void);
+static void move_ptes_to_front (int, int);
#ifdef ENABLE_GC_CHECKING
-static void poison_pages PARAMS ((void));
+static void poison_pages (void);
#endif
-void debug_print_page_list PARAMS ((int));
-static void push_depth PARAMS ((unsigned int));
-static void push_by_depth PARAMS ((page_entry *, unsigned long *));
-\f
+void debug_print_page_list (int);
+static void push_depth (unsigned int);
+static void push_by_depth (page_entry *, unsigned long *);
+struct alloc_zone *rtl_zone = NULL;
+struct alloc_zone *tree_zone = NULL;
+struct alloc_zone *garbage_zone = NULL;
+
/* Push an entry onto G.depth. */
inline static void
-push_depth (i)
- unsigned int i;
+push_depth (unsigned int i)
{
if (G.depth_in_use >= G.depth_max)
{
G.depth_max *= 2;
- G.depth = (unsigned int *) xrealloc ((char *) G.depth,
- G.depth_max * sizeof (unsigned int));
+ G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
}
G.depth[G.depth_in_use++] = i;
}
/* Push an entry onto G.by_depth and G.save_in_use. */
inline static void
-push_by_depth (p, s)
- page_entry *p;
- unsigned long *s;
+push_by_depth (page_entry *p, unsigned long *s)
{
if (G.by_depth_in_use >= G.by_depth_max)
{
G.by_depth_max *= 2;
- G.by_depth = (page_entry **) xrealloc ((char *) G.by_depth,
- G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = (unsigned long **) xrealloc ((char *) G.save_in_use,
- G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = xrealloc (G.by_depth,
+ G.by_depth_max * sizeof (page_entry *));
+ G.save_in_use = xrealloc (G.save_in_use,
+ G.by_depth_max * sizeof (unsigned long *));
}
G.by_depth[G.by_depth_in_use] = p;
G.save_in_use[G.by_depth_in_use++] = s;
/* Returns nonzero if P was allocated in GC'able memory. */
static inline int
-ggc_allocated_p (p)
- const void *p;
+ggc_allocated_p (const void *p)
{
page_entry ***base;
size_t L1, L2;
Die (probably) if the object wasn't allocated via GC. */
static inline page_entry *
-lookup_page_table_entry(p)
- const void *p;
+lookup_page_table_entry (const void *p)
{
page_entry ***base;
size_t L1, L2;
/* Set the page table entry for a page. */
static void
-set_page_table_entry(p, entry)
- void *p;
- page_entry *entry;
+set_page_table_entry (void *p, page_entry *entry)
{
page_entry ***base;
size_t L1, L2;
goto found;
/* Not found -- allocate a new table. */
- table = (page_table) xcalloc (1, sizeof(*table));
+ table = xcalloc (1, sizeof(*table));
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
- base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
base[L1][L2] = entry;
}
/* Prints the page-entry for object size ORDER, for debugging. */
void
-debug_print_page_list (order)
- int order;
+debug_print_page_list (int order)
{
page_entry *p;
- printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
- (PTR) G.page_tails[order]);
+ printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
+ (void *) G.page_tails[order]);
p = G.pages[order];
while (p != NULL)
{
- printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
+ printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
p->num_free_objects);
p = p->next;
}
compile error unless exactly one of the HAVE_* is defined. */
static inline char *
-alloc_anon (pref, size)
- char *pref ATTRIBUTE_UNUSED;
- size_t size;
+alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
{
#ifdef HAVE_MMAP_ANON
char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
/* Compute the index for this page into the page group. */
static inline size_t
-page_group_index (allocation, page)
- char *allocation, *page;
+page_group_index (char *allocation, char *page)
{
return (size_t) (page - allocation) >> G.lg_pagesize;
}
/* Set and clear the in_use bit for this page in the page group. */
static inline void
-set_page_group_in_use (group, page)
- page_group *group;
- char *page;
+set_page_group_in_use (page_group *group, char *page)
{
group->in_use |= 1 << page_group_index (group->allocation, page);
}
static inline void
-clear_page_group_in_use (group, page)
- page_group *group;
- char *page;
+clear_page_group_in_use (page_group *group, char *page)
{
group->in_use &= ~(1 << page_group_index (group->allocation, page));
}
appropriate page_table list. */
static inline struct page_entry *
-alloc_page (order)
- unsigned order;
+alloc_page (unsigned order)
{
struct page_entry *entry, *p, **pp;
char *page;
memory order. */
for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
{
- e = (struct page_entry *) xcalloc (1, page_entry_size);
+ e = xcalloc (1, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = page + (i << G.lg_pagesize);
struct page_entry *e, *f = G.free_pages;
for (a = enda - G.pagesize; a != page; a -= G.pagesize)
{
- e = (struct page_entry *) xcalloc (1, page_entry_size);
+ e = xcalloc (1, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = a;
#endif
if (entry == NULL)
- entry = (struct page_entry *) xcalloc (1, page_entry_size);
+ entry = xcalloc (1, page_entry_size);
entry->bytes = entry_size;
entry->page = page;
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
"Allocating page at %p, object size=%lu, data %p-%p\n",
- (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
+ (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
page + entry_size - 1);
return entry;
used by the top of the G.by_depth is used. */
static inline void
-adjust_depth ()
+adjust_depth (void)
{
page_entry *top;
{
top = G.by_depth[G.by_depth_in_use-1];
- /* Peel back indicies in depth that index into by_depth, so that
- as new elements are added to by_depth, we note the indicies
+ /* Peel back indices in depth that index into by_depth, so that
+ as new elements are added to by_depth, we note the indices
of those elements, if they are for new context depths. */
while (G.depth_in_use > (size_t)top->context_depth+1)
--G.depth_in_use;
/* For a page that is no longer needed, put it on the free page list. */
static inline void
-free_page (entry)
- page_entry *entry;
+free_page (page_entry *entry)
{
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
- "Deallocating page at %p, data %p-%p\n", (PTR) entry,
+ "Deallocating page at %p, data %p-%p\n", (void *) entry,
entry->page, entry->page + entry->bytes - 1);
/* Mark the page as inaccessible. Discard the handle to avoid handle
/* Release the free page cache to the system. */
static void
-release_pages ()
+release_pages (void)
{
#ifdef USING_MMAP
page_entry *p, *next;
8
};
+/* Typed allocation function. Does nothing special in this collector. */
+
+void *
+ggc_alloc_typed (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size)
+{
+ return ggc_alloc (size);
+}
+
+/* Zone allocation function. Does nothing special in this collector. */
+
+void *
+ggc_alloc_zone (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED)
+{
+ return ggc_alloc (size);
+}
+
/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
void *
-ggc_alloc (size)
- size_t size;
+ggc_alloc (size_t size)
{
unsigned order, word, bit, object_offset;
struct page_entry *entry;
information is used in deciding when to collect. */
G.allocated += OBJECT_SIZE (order);
+#ifdef GATHER_STATISTICS
+ {
+ G.stats.total_overhead += OBJECT_SIZE (order) - size;
+ G.stats.total_overhead_per_order[order] += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated += OBJECT_SIZE(order);
+
+ if (size <= 32){
+ G.stats.total_overhead_under32 += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated_under32 += OBJECT_SIZE(order);
+ }
+
+ if (size <= 64){
+ G.stats.total_overhead_under64 += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated_under64 += OBJECT_SIZE(order);
+ }
+
+ if (size <= 128){
+ G.stats.total_overhead_under128 += OBJECT_SIZE (order) - size;
+ G.stats.total_allocated_under128 += OBJECT_SIZE(order);
+ }
+
+ }
+#endif
+
if (GGC_DEBUG_LEVEL >= 3)
fprintf (G.debug_file,
"Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
(unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
- (PTR) entry);
+ (void *) entry);
return result;
}
static objects, stack variables, or memory allocated with malloc. */
int
-ggc_set_mark (p)
- const void *p;
+ggc_set_mark (const void *p)
{
page_entry *entry;
unsigned bit, word;
static objects, stack variables, or memory allocated with malloc. */
int
-ggc_marked_p (p)
- const void *p;
+ggc_marked_p (const void *p)
{
page_entry *entry;
unsigned bit, word;
/* Return the size of the gc-able object P. */
size_t
-ggc_get_size (p)
- const void *p;
+ggc_get_size (const void *p)
{
page_entry *pe = lookup_page_table_entry (p);
return OBJECT_SIZE (pe->order);
constants). */
static void
-compute_inverse (order)
- unsigned order;
+compute_inverse (unsigned order)
{
- unsigned size, inv, e;
-
- /* There can be only one object per "page" in a bucket for sizes
- larger than half a machine page; it will always have offset zero. */
- if (OBJECT_SIZE (order) > G.pagesize/2)
- {
- if (OBJECTS_PER_PAGE (order) != 1)
- abort ();
-
- DIV_MULT (order) = 1;
- DIV_SHIFT (order) = 0;
- return;
- }
+ size_t size, inv;
+ unsigned int e;
size = OBJECT_SIZE (order);
e = 0;
/* Initialize the ggc-mmap allocator. */
void
-init_ggc ()
+init_ggc (void)
{
unsigned order;
#ifdef HAVE_MMAP_DEV_ZERO
G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
if (G.dev_zero_fd == -1)
- abort ();
+ internal_error ("open /dev/zero: %m");
#endif
#if 0
}
/* We have a good page, might as well hold onto it... */
- e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
+ e = xcalloc (1, sizeof (struct page_entry));
e->bytes = G.pagesize;
e->page = p;
e->next = G.free_pages;
G.depth_in_use = 0;
G.depth_max = 10;
- G.depth = (unsigned int *) xmalloc (G.depth_max * sizeof (unsigned int));
+ G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
G.by_depth_in_use = 0;
G.by_depth_max = INITIAL_PTE_COUNT;
- G.by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
+ G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
}
/* Increment the `GC context'. Objects allocated in an outer context
are never freed, eliminating the need to register their roots. */
void
-ggc_push_context ()
+ggc_push_context (void)
{
++G.context_depth;
reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
static void
-ggc_recalculate_in_use_p (p)
- page_entry *p;
+ggc_recalculate_in_use_p (page_entry *p)
{
unsigned int i;
size_t num_objects;
previous ggc_push_context are migrated to the outer context. */
void
-ggc_pop_context ()
+ggc_pop_context (void)
{
unsigned long omask;
unsigned int depth, i, e;
G.context_depth_allocations &= omask - 1;
G.context_depth_collections &= omask - 1;
- /* The G.depth array is shortend so that the last index is the
+ /* The G.depth array is shortened so that the last index is the
context_depth of the top element of by_depth. */
if (depth+1 < G.depth_in_use)
e = G.depth[depth+1];
/* We might not have any PTEs of depth depth. */
if (depth < G.depth_in_use)
- {
+ {
/* First we go through all the pages at depth depth to
recalculate the in use bits. */
/* Unmark all objects. */
static inline void
-clear_marks ()
+clear_marks (void)
{
unsigned order;
because the `mark' bit doubles as an `unused' bit. */
static inline void
-sweep_pages ()
+sweep_pages (void)
{
unsigned order;
/* Loop until all entries have been examined. */
done = (p == last);
-
+
num_objects = OBJECTS_IN_PAGE (p);
/* Add all live objects on this page to the count of
/* Clobber all free objects. */
static inline void
-poison_pages ()
+poison_pages (void)
{
unsigned order;
/* Top level mark-and-sweep routine. */
void
-ggc_collect ()
+ggc_collect (void)
{
/* Avoid frequent unnecessary work by skipping collection if the
total allocations haven't expanded much since the last
#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
void
-ggc_print_statistics ()
+ggc_print_statistics (void)
{
struct ggc_statistics stats;
unsigned int i;
/* Collect some information about the various sizes of
allocation. */
- fprintf (stderr, "\n%-5s %10s %10s %10s\n",
+ fprintf (stderr, "%-5s %10s %10s %10s\n",
"Size", "Allocated", "Used", "Overhead");
for (i = 0; i < NUM_ORDERS; ++i)
{
for (p = G.pages[i]; p; p = p->next)
{
allocated += p->bytes;
- in_use +=
+ in_use +=
(OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
overhead += (sizeof (page_entry) - sizeof (long)
SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
SCALE (G.allocated), LABEL(G.allocated),
SCALE (total_overhead), LABEL (total_overhead));
+
+#ifdef GATHER_STATISTICS
+ {
+ fprintf (stderr, "Total Overhead: %10lld\n",
+ G.stats.total_overhead);
+ fprintf (stderr, "Total Allocated: %10lld\n",
+ G.stats.total_allocated);
+
+ fprintf (stderr, "Total Overhead under 32B: %10lld\n",
+ G.stats.total_overhead_under32);
+ fprintf (stderr, "Total Allocated under 32B: %10lld\n",
+ G.stats.total_allocated_under32);
+ fprintf (stderr, "Total Overhead under 64B: %10lld\n",
+ G.stats.total_overhead_under64);
+ fprintf (stderr, "Total Allocated under 64B: %10lld\n",
+ G.stats.total_allocated_under64);
+ fprintf (stderr, "Total Overhead under 128B: %10lld\n",
+ G.stats.total_overhead_under128);
+ fprintf (stderr, "Total Allocated under 128B: %10lld\n",
+ G.stats.total_allocated_under128);
+
+ for (i = 0; i < NUM_ORDERS; i++)
+ if (G.stats.total_overhead_per_order[i])
+ fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
+ OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
+ }
+#endif
}
\f
struct ggc_pch_data
{
- struct ggc_pch_ondisk
+ struct ggc_pch_ondisk
{
unsigned totals[NUM_ORDERS];
} d;
};
struct ggc_pch_data *
-init_ggc_pch ()
+init_ggc_pch (void)
{
return xcalloc (sizeof (struct ggc_pch_data), 1);
}
-void
-ggc_pch_count_object (d, x, size)
- struct ggc_pch_data *d;
- void *x ATTRIBUTE_UNUSED;
- size_t size;
+void
+ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
while (size > OBJECT_SIZE (order))
order++;
}
-
+
d->d.totals[order]++;
}
-
+
size_t
-ggc_pch_total_size (d)
- struct ggc_pch_data *d;
+ggc_pch_total_size (struct ggc_pch_data *d)
{
size_t a = 0;
unsigned i;
}
void
-ggc_pch_this_base (d, base)
- struct ggc_pch_data *d;
- void *base;
+ggc_pch_this_base (struct ggc_pch_data *d, void *base)
{
size_t a = (size_t) base;
unsigned i;
-
+
for (i = 0; i < NUM_ORDERS; i++)
{
d->base[i] = a;
char *
-ggc_pch_alloc_object (d, x, size)
- struct ggc_pch_data *d;
- void *x ATTRIBUTE_UNUSED;
- size_t size;
+ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
char *result;
-
+
if (size <= 256)
order = size_lookup[size];
else
return result;
}
-void
-ggc_pch_prepare_write (d, f)
- struct ggc_pch_data * d ATTRIBUTE_UNUSED;
- FILE * f ATTRIBUTE_UNUSED;
+void
+ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
+ FILE *f ATTRIBUTE_UNUSED)
{
/* Nothing to do. */
}
void
-ggc_pch_write_object (d, f, x, newx, size)
- struct ggc_pch_data * d ATTRIBUTE_UNUSED;
- FILE *f;
- void *x;
- void *newx ATTRIBUTE_UNUSED;
- size_t size;
+ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
+ FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
+ static const char emptyBytes[256];
if (size <= 256)
order = size_lookup[size];
while (size > OBJECT_SIZE (order))
order++;
}
-
+
if (fwrite (x, size, 1, f) != 1)
- fatal_io_error ("can't write PCH file");
+ fatal_error ("can't write PCH file: %m");
- /* In the current implementation, SIZE is always equal to
- OBJECT_SIZE (order) and so the fseek is never executed. */
- if (size != OBJECT_SIZE (order)
- && fseek (f, OBJECT_SIZE (order) - size, SEEK_CUR) != 0)
- fatal_io_error ("can't write PCH file");
+ /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
+ object out to OBJECT_SIZE(order). This happens for strings. */
+
+ if (size != OBJECT_SIZE (order))
+ {
+ unsigned padding = OBJECT_SIZE(order) - size;
+
+ /* To speed small writes, we use a nulled-out array that's larger
+ than most padding requests as the source for our null bytes. This
+ permits us to do the padding with fwrite() rather than fseek(), and
+ limits the chance the the OS may try to flush any outstanding
+ writes. */
+ if (padding <= sizeof(emptyBytes))
+ {
+ if (fwrite (emptyBytes, 1, padding, f) != padding)
+ fatal_error ("can't write PCH file");
+ }
+ else
+ {
+ /* Larger than our buffer? Just default to fseek. */
+ if (fseek (f, padding, SEEK_CUR) != 0)
+ fatal_error ("can't write PCH file");
+ }
+ }
d->written[order]++;
if (d->written[order] == d->d.totals[order]
&& fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
G.pagesize),
SEEK_CUR) != 0)
- fatal_io_error ("can't write PCH file");
+ fatal_error ("can't write PCH file: %m");
}
void
-ggc_pch_finish (d, f)
- struct ggc_pch_data * d;
- FILE *f;
+ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
{
if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
- fatal_io_error ("can't write PCH file");
+ fatal_error ("can't write PCH file: %m");
free (d);
}
front. */
static void
-move_ptes_to_front (count_old_page_tables, count_new_page_tables)
- int count_old_page_tables;
- int count_new_page_tables;
+move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
{
unsigned i;
page_entry **new_by_depth;
unsigned long **new_save_in_use;
- new_by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
- new_save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
+ new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
memcpy (&new_by_depth[0],
&G.by_depth[count_old_page_tables],
free (G.by_depth);
free (G.save_in_use);
-
+
G.by_depth = new_by_depth;
G.save_in_use = new_save_in_use;
}
void
-ggc_pch_read (f, addr)
- FILE *f;
- void *addr;
+ggc_pch_read (FILE *f, void *addr)
{
struct ggc_pch_ondisk d;
unsigned i;
/* We've just read in a PCH file. So, every object that used to be
allocated is now free. */
clear_marks ();
-#ifdef GGC_POISON
+#ifdef ENABLE_GC_CHECKING
poison_pages ();
#endif
/* Allocate the appropriate page-table entries for the pages read from
the PCH file. */
if (fread (&d, sizeof (d), 1, f) != 1)
- fatal_io_error ("can't read PCH file");
-
+ fatal_error ("can't read PCH file: %m");
+
for (i = 0; i < NUM_ORDERS; i++)
{
struct page_entry *entry;
bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
num_objs = bytes / OBJECT_SIZE (i);
- entry = xcalloc (1, (sizeof (struct page_entry)
+ entry = xcalloc (1, (sizeof (struct page_entry)
- sizeof (long)
+ BITMAP_SIZE (num_objs + 1)));
entry->bytes = bytes;
entry->num_free_objects = 0;
entry->order = i;
- for (j = 0;
+ for (j = 0;
j + HOST_BITS_PER_LONG <= num_objs + 1;
j += HOST_BITS_PER_LONG)
entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
for (; j < num_objs + 1; j++)
- entry->in_use_p[j / HOST_BITS_PER_LONG]
+ entry->in_use_p[j / HOST_BITS_PER_LONG]
|= 1L << (j % HOST_BITS_PER_LONG);
- for (pte = entry->page;
- pte < entry->page + entry->bytes;
+ for (pte = entry->page;
+ pte < entry->page + entry->bytes;
pte += G.pagesize)
set_page_table_entry (pte, entry);