/* "Bag-of-pages" garbage collector for the GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
+ Free Software Foundation, Inc.
This file is part of GCC.
#include "timevar.h"
#include "params.h"
#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_MEMCHECK_H
-# include <memcheck.h>
+# ifdef HAVE_VALGRIND_MEMCHECK_H
+# include <valgrind/memcheck.h>
+# elif defined HAVE_MEMCHECK_H
+# include <memcheck.h>
# else
-# include <valgrind.h>
+# include <valgrind.h>
# endif
#else
/* Avoid #ifdef:s when we can help it. */
#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
#define RTL_SIZE(NSLOTS) \
- (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
+ (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
#define TREE_EXP_SIZE(OPS) \
(sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
sizeof (struct tree_list),
TREE_EXP_SIZE (2),
RTL_SIZE (2), /* MEM, PLUS, etc. */
- RTL_SIZE (9), /* INSN, CALL_INSN, JUMP_INSN */
+ RTL_SIZE (9), /* INSN */
};
/* The total number of orders. */
this is the last page-entry. */
struct page_entry *next;
+ /* The previous page-entry with objects of the same size, or NULL if
+ this is the first page-entry. The PREV pointer exists solely to
+ keep the cost of ggc_free manageable. */
+ struct page_entry *prev;
+
/* The number of bytes allocated. (This will always be a multiple
of the host system page size.) */
size_t bytes;
better runtime data access pattern. */
unsigned long **save_in_use;
+#ifdef ENABLE_GC_ALWAYS_COLLECT
+ /* List of free objects to be verified as actually free on the
+ next collection. */
+ struct free_object
+ {
+ void *object;
+ struct free_object *next;
+ } *free_object_list;
+#endif
+
#ifdef GATHER_STATISTICS
struct
{
- /* Total memory allocated with ggc_alloc */
+ /* Total memory allocated with ggc_alloc. */
unsigned long long total_allocated;
- /* Total overhead for memory to be allocated with ggc_alloc */
+ /* Total overhead for memory to be allocated with ggc_alloc. */
unsigned long long total_overhead;
/* Total allocations and overhead for sizes less than 32, 64 and 128.
These sizes are interesting because they are typical cache line
- sizes. */
+ sizes. */
unsigned long long total_allocated_under32;
unsigned long long total_overhead_under32;
unsigned long long total_allocated_under128;
unsigned long long total_overhead_under128;
- /* The overhead for each of the allocation orders. */
+ /* The allocations for each of the allocation orders. */
+ unsigned long long total_allocated_per_order[NUM_ORDERS];
+
+ /* The overhead for each of the allocation orders. */
unsigned long long total_overhead_per_order[NUM_ORDERS];
} stats;
#endif
static inline void adjust_depth (void);
static void move_ptes_to_front (int, int);
-#ifdef ENABLE_GC_CHECKING
-static void poison_pages (void);
-#endif
-
void debug_print_page_list (int);
static void push_depth (unsigned int);
static void push_by_depth (page_entry *, unsigned long *);
-\f
+struct alloc_zone *rtl_zone = NULL;
+struct alloc_zone *tree_zone = NULL;
+struct alloc_zone *garbage_zone = NULL;
+
/* Push an entry onto G.depth. */
inline static void
if (G.depth_in_use >= G.depth_max)
{
G.depth_max *= 2;
- G.depth = (unsigned int *) xrealloc ((char *) G.depth,
- G.depth_max * sizeof (unsigned int));
+ G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
}
G.depth[G.depth_in_use++] = i;
}
if (G.by_depth_in_use >= G.by_depth_max)
{
G.by_depth_max *= 2;
- G.by_depth = (page_entry **) xrealloc ((char *) G.by_depth,
- G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = (unsigned long **) xrealloc ((char *) G.save_in_use,
- G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = xrealloc (G.by_depth,
+ G.by_depth_max * sizeof (page_entry *));
+ G.save_in_use = xrealloc (G.save_in_use,
+ G.by_depth_max * sizeof (unsigned long *));
}
G.by_depth[G.by_depth_in_use] = p;
G.save_in_use[G.by_depth_in_use++] = s;
goto found;
/* Not found -- allocate a new table. */
- table = (page_table) xcalloc (1, sizeof(*table));
+ table = xcalloc (1, sizeof(*table));
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
- base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
base[L1][L2] = entry;
}
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
{
#ifdef HAVE_MMAP_ANON
- char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#endif
#ifdef HAVE_MMAP_DEV_ZERO
- char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, G.dev_zero_fd, 0);
+ char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, G.dev_zero_fd, 0);
#endif
if (page == (char *) MAP_FAILED)
memory order. */
for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
{
- e = (struct page_entry *) xcalloc (1, page_entry_size);
+ e = xcalloc (1, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = page + (i << G.lg_pagesize);
struct page_entry *e, *f = G.free_pages;
for (a = enda - G.pagesize; a != page; a -= G.pagesize)
{
- e = (struct page_entry *) xcalloc (1, page_entry_size);
+ e = xcalloc (1, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = a;
#endif
if (entry == NULL)
- entry = (struct page_entry *) xcalloc (1, page_entry_size);
+ entry = xcalloc (1, page_entry_size);
entry->bytes = entry_size;
entry->page = page;
/* For a page that is no longer needed, put it on the free page list. */
-static inline void
+static void
free_page (page_entry *entry)
{
if (GGC_DEBUG_LEVEL >= 2)
8
};
+/* Typed allocation function. Does nothing special in this collector. */
+
+void *
+ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
+ MEM_STAT_DECL)
+{
+ return ggc_alloc_stat (size PASS_MEM_STAT);
+}
+
+/* Zone allocation function. Does nothing special in this collector. */
+
+void *
+ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED
+ MEM_STAT_DECL)
+{
+ return ggc_alloc_stat (size PASS_MEM_STAT);
+}
+
/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
void *
-ggc_alloc (size_t size)
+ggc_alloc_stat (size_t size MEM_STAT_DECL)
{
- unsigned order, word, bit, object_offset;
+ size_t order, word, bit, object_offset, object_size;
struct page_entry *entry;
void *result;
if (size <= 256)
- order = size_lookup[size];
+ {
+ order = size_lookup[size];
+ object_size = OBJECT_SIZE (order);
+ }
else
{
order = 9;
- while (size > OBJECT_SIZE (order))
+ while (size > (object_size = OBJECT_SIZE (order)))
order++;
}
while (new_entry->context_depth >= G.depth_in_use)
push_depth (G.by_depth_in_use-1);
- /* If this is the only entry, it's also the tail. */
+ /* If this is the only entry, it's also the tail. If it is not
+ the only entry, then we must update the PREV pointer of the
+ ENTRY (G.pages[order]) to point to our new page entry. */
if (entry == NULL)
G.page_tails[order] = new_entry;
+ else
+ entry->prev = new_entry;
- /* Put new pages at the head of the page list. */
+ /* Put new pages at the head of the page list. By definition the
+ entry at the head of the list always has a NULL pointer. */
new_entry->next = entry;
+ new_entry->prev = NULL;
entry = new_entry;
G.pages[order] = new_entry;
/* Next time, try the next bit. */
entry->next_bit_hint = hint + 1;
- object_offset = hint * OBJECT_SIZE (order);
+ object_offset = hint * object_size;
}
/* Set the in-use bit. */
&& entry->next != NULL
&& entry->next->num_free_objects > 0)
{
+ /* We have a new head for the list. */
G.pages[order] = entry->next;
+
+ /* We are moving ENTRY to the end of the page table list.
+ The new page at the head of the list will have NULL in
+ its PREV field and ENTRY will have NULL in its NEXT field. */
+ entry->next->prev = NULL;
entry->next = NULL;
+
+ /* Append ENTRY to the tail of the list. */
+ entry->prev = G.page_tails[order];
G.page_tails[order]->next = entry;
G.page_tails[order] = entry;
}
+#ifdef GATHER_STATISTICS
+ ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size PASS_MEM_STAT);
+#endif
/* Calculate the object's address. */
result = entry->page + object_offset;
exact same semantics in presence of memory bugs, regardless of
ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
/* `Poison' the entire allocated object, including any padding at
the end. */
- memset (result, 0xaf, OBJECT_SIZE (order));
+ memset (result, 0xaf, object_size);
/* Make the bytes after the end of the object unaccessible. Discard the
handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
- OBJECT_SIZE (order) - size));
+ object_size - size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
- G.allocated += OBJECT_SIZE (order);
+ G.allocated += object_size;
#ifdef GATHER_STATISTICS
{
- G.stats.total_overhead += OBJECT_SIZE (order) - size;
- G.stats.total_overhead_per_order[order] += OBJECT_SIZE (order) - size;
- G.stats.total_allocated += OBJECT_SIZE(order);
-
- if (size <= 32){
- G.stats.total_overhead_under32 += OBJECT_SIZE (order) - size;
- G.stats.total_allocated_under32 += OBJECT_SIZE(order);
- }
+ size_t overhead = object_size - size;
- if (size <= 64){
- G.stats.total_overhead_under64 += OBJECT_SIZE (order) - size;
- G.stats.total_allocated_under64 += OBJECT_SIZE(order);
- }
-
- if (size <= 128){
- G.stats.total_overhead_under128 += OBJECT_SIZE (order) - size;
- G.stats.total_allocated_under128 += OBJECT_SIZE(order);
- }
+ G.stats.total_overhead += overhead;
+ G.stats.total_allocated += object_size;
+ G.stats.total_overhead_per_order[order] += overhead;
+ G.stats.total_allocated_per_order[order] += object_size;
+ if (size <= 32)
+ {
+ G.stats.total_overhead_under32 += overhead;
+ G.stats.total_allocated_under32 += object_size;
+ }
+ if (size <= 64)
+ {
+ G.stats.total_overhead_under64 += overhead;
+ G.stats.total_allocated_under64 += object_size;
+ }
+ if (size <= 128)
+ {
+ G.stats.total_overhead_under128 += overhead;
+ G.stats.total_allocated_under128 += object_size;
+ }
}
#endif
-
+
if (GGC_DEBUG_LEVEL >= 3)
fprintf (G.debug_file,
"Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
- (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
+ (unsigned long) size, (unsigned long) object_size, result,
(void *) entry);
return result;
page_entry *pe = lookup_page_table_entry (p);
return OBJECT_SIZE (pe->order);
}
+
+/* Release the memory for object P. */
+
+void
+ggc_free (void *p)
+{
+ page_entry *pe = lookup_page_table_entry (p);
+ size_t order = pe->order;
+ size_t size = OBJECT_SIZE (order);
+
+ if (GGC_DEBUG_LEVEL >= 3)
+ fprintf (G.debug_file,
+ "Freeing object, actual size=%lu, at %p on %p\n",
+ (unsigned long) size, p, (void *) pe);
+
+#ifdef ENABLE_GC_CHECKING
+ /* Poison the data, to indicate the data is garbage. */
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
+ memset (p, 0xa5, size);
+#endif
+ /* Let valgrind know the object is free. */
+ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
+
+#ifdef ENABLE_GC_ALWAYS_COLLECT
+ /* In the completely-anal-checking mode, we do *not* immediately free
+ the data, but instead verify that the data is *actually* not
+ reachable the next time we collect. */
+ {
+ struct free_object *fo = xmalloc (sizeof (struct free_object));
+ fo->object = p;
+ fo->next = G.free_object_list;
+ G.free_object_list = fo;
+ }
+#else
+ {
+ unsigned int bit_offset, word, bit;
+
+ G.allocated -= size;
+
+ /* Mark the object not-in-use. */
+ bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
+ word = bit_offset / HOST_BITS_PER_LONG;
+ bit = bit_offset % HOST_BITS_PER_LONG;
+ pe->in_use_p[word] &= ~(1UL << bit);
+
+ if (pe->num_free_objects++ == 0)
+ {
+ page_entry *p, *q;
+
+ /* If the page is completely full, then it's supposed to
+ be after all pages that aren't. Since we've freed one
+ object from a page that was full, we need to move the
+ page to the head of the list.
+
+ PE is the node we want to move. Q is the previous node
+ and P is the next node in the list. */
+ q = pe->prev;
+ if (q && q->num_free_objects == 0)
+ {
+ p = pe->next;
+
+ q->next = p;
+
+ /* If PE was at the end of the list, then Q becomes the
+ new end of the list. If PE was not the end of the
+ list, then we need to update the PREV field for P. */
+ if (!p)
+ G.page_tails[order] = q;
+ else
+ p->prev = q;
+
+ /* Move PE to the head of the list. */
+ pe->next = G.pages[order];
+ pe->prev = NULL;
+ G.pages[order]->prev = pe;
+ G.pages[order] = pe;
+ }
+
+ /* Reset the hint bit to point to the only free object. */
+ pe->next_bit_hint = bit_offset;
+ }
+ }
+#endif
+}
\f
/* Subroutine of init_ggc which computes the pair of numbers used to
perform division by OBJECT_SIZE (order) and fills in inverse_table[].
}
/* We have a good page, might as well hold onto it... */
- e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
+ e = xcalloc (1, sizeof (struct page_entry));
e->bytes = G.pagesize;
e->page = p;
e->next = G.free_pages;
G.depth_in_use = 0;
G.depth_max = 10;
- G.depth = (unsigned int *) xmalloc (G.depth_max * sizeof (unsigned int));
+ G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
G.by_depth_in_use = 0;
G.by_depth_max = INITIAL_PTE_COUNT;
- G.by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
+ G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
+}
+
+/* Start a new GGC zone. */
+
+struct alloc_zone *
+new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
+{
+ return NULL;
+}
+
+/* Destroy a GGC zone. */
+void
+destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
+{
}
/* Increment the `GC context'. Objects allocated in an outer context
G.context_depth_allocations &= omask - 1;
G.context_depth_collections &= omask - 1;
- /* The G.depth array is shortend so that the last index is the
+ /* The G.depth array is shortened so that the last index is the
context_depth of the top element of by_depth. */
if (depth+1 < G.depth_in_use)
e = G.depth[depth+1];
\f
/* Unmark all objects. */
-static inline void
+static void
clear_marks (void)
{
unsigned order;
/* Free all empty pages. Partially empty pages need no attention
because the `mark' bit doubles as an `unused' bit. */
-static inline void
+static void
sweep_pages (void)
{
unsigned order;
/* Remove the page if it's empty. */
else if (live_objects == 0)
{
+ /* If P was the first page in the list, then NEXT
+ becomes the new first page in the list, otherwise
+ splice P out of the forward pointers. */
if (! previous)
G.pages[order] = next;
else
previous->next = next;
+
+ /* Splice P out of the back pointers too. */
+ if (next)
+ next->prev = previous;
/* Are we removing the last element? */
if (p == G.page_tails[order])
{
/* Move p to the end of the list. */
p->next = NULL;
+ p->prev = G.page_tails[order];
G.page_tails[order]->next = p;
/* Update the tail pointer... */
G.pages[order] = next;
else
previous->next = next;
+
+ /* And update the backpointer in NEXT if necessary. */
+ if (next)
+ next->prev = previous;
+
p = previous;
}
}
else if (p != G.pages[order])
{
previous->next = p->next;
+
+ /* Update the backchain in the next node if it exists. */
+ if (p->next)
+ p->next->prev = previous;
+
+ /* Move P to the head of the list. */
p->next = G.pages[order];
+ p->prev = NULL;
+ G.pages[order]->prev = p;
+
+ /* Update the head pointer. */
G.pages[order] = p;
+
/* Are we moving the last element? */
if (G.page_tails[order] == p)
G.page_tails[order] = previous;
#ifdef ENABLE_GC_CHECKING
/* Clobber all free objects. */
-static inline void
+static void
poison_pages (void)
{
unsigned order;
}
}
}
+#else
+#define poison_pages()
+#endif
+
+#ifdef ENABLE_GC_ALWAYS_COLLECT
+/* Validate that the reportedly free objects actually are. */
+
+static void
+validate_free_objects (void)
+{
+ struct free_object *f, *next, *still_free = NULL;
+
+ for (f = G.free_object_list; f ; f = next)
+ {
+ page_entry *pe = lookup_page_table_entry (f->object);
+ size_t bit, word;
+
+ bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
+ word = bit / HOST_BITS_PER_LONG;
+ bit = bit % HOST_BITS_PER_LONG;
+ next = f->next;
+
+ /* Make certain it isn't visible from any root. Notice that we
+ do this check before sweep_pages merges save_in_use_p. */
+ if (pe->in_use_p[word] & (1UL << bit))
+ abort ();
+
+ /* If the object comes from an outer context, then retain the
+ free_object entry, so that we can verify that the address
+ isn't live on the stack in some outer context. */
+ if (pe->context_depth != G.context_depth)
+ {
+ f->next = still_free;
+ still_free = f;
+ }
+ else
+ free (f);
+ }
+
+ G.free_object_list = still_free;
+}
+#else
+#define validate_free_objects()
#endif
/* Top level mark-and-sweep routine. */
timevar_push (TV_GC);
if (!quiet_flag)
fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
+ if (GGC_DEBUG_LEVEL >= 2)
+ fprintf (G.debug_file, "BEGIN COLLECTING\n");
/* Zero the total allocated bytes. This will be recalculated in the
sweep phase. */
clear_marks ();
ggc_mark_roots ();
-
-#ifdef ENABLE_GC_CHECKING
poison_pages ();
-#endif
-
+ validate_free_objects ();
sweep_pages ();
G.allocated_last_gc = G.allocated;
if (!quiet_flag)
fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
+ if (GGC_DEBUG_LEVEL >= 2)
+ fprintf (G.debug_file, "END COLLECTING\n");
}
/* Print allocation statistics. */
/* Collect some information about the various sizes of
allocation. */
+ fprintf (stderr,
+ "Memory still allocated at the end of the compilation process\n");
fprintf (stderr, "%-5s %10s %10s %10s\n",
"Size", "Allocated", "Used", "Overhead");
for (i = 0; i < NUM_ORDERS; ++i)
#ifdef GATHER_STATISTICS
{
+ fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
+
fprintf (stderr, "Total Overhead: %10lld\n",
G.stats.total_overhead);
fprintf (stderr, "Total Allocated: %10lld\n",
G.stats.total_allocated_under128);
for (i = 0; i < NUM_ORDERS; i++)
- if (G.stats.total_overhead_per_order[i])
- fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
- OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
+ if (G.stats.total_allocated_per_order[i])
+ {
+ fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
+ OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
+ fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
+ OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
+ }
}
#endif
}
void
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
- size_t size)
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
char *
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
- size_t size)
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
char *result;
void
ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
- size_t size)
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
+ static const char emptyBytes[256];
if (size <= 256)
order = size_lookup[size];
if (fwrite (x, size, 1, f) != 1)
fatal_error ("can't write PCH file: %m");
- /* In the current implementation, SIZE is always equal to
- OBJECT_SIZE (order) and so the fseek is never executed. */
- if (size != OBJECT_SIZE (order)
- && fseek (f, OBJECT_SIZE (order) - size, SEEK_CUR) != 0)
- fatal_error ("can't write PCH file: %m");
+ /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
+ object out to OBJECT_SIZE(order). This happens for strings. */
+
+ if (size != OBJECT_SIZE (order))
+ {
+ unsigned padding = OBJECT_SIZE(order) - size;
+
+ /* To speed small writes, we use a nulled-out array that's larger
+ than most padding requests as the source for our null bytes. This
+ permits us to do the padding with fwrite() rather than fseek(), and
+ limits the chance the the OS may try to flush any outstanding
+ writes. */
+ if (padding <= sizeof(emptyBytes))
+ {
+ if (fwrite (emptyBytes, 1, padding, f) != padding)
+ fatal_error ("can't write PCH file");
+ }
+ else
+ {
+ /* Larger than our buffer? Just default to fseek. */
+ if (fseek (f, padding, SEEK_CUR) != 0)
+ fatal_error ("can't write PCH file");
+ }
+ }
d->written[order]++;
if (d->written[order] == d->d.totals[order]
page_entry **new_by_depth;
unsigned long **new_save_in_use;
- new_by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
- new_save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
+ new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
memcpy (&new_by_depth[0],
&G.by_depth[count_old_page_tables],
/* We've just read in a PCH file. So, every object that used to be
allocated is now free. */
clear_marks ();
-#ifdef GGC_POISON
+#ifdef ENABLE_GC_CHECKING
poison_pages ();
#endif