X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fggc-page.c;h=ee796cbb7e9fd76f87666651313a03dd88f9ee56;hb=b3f86a4f3fb7430f84442dcbb34db85625c7d44c;hp=ea637b1b63c2869f4eed2362cebe3d4d639e9249;hpb=a7779e7501b3b5de7cdc621f737f72098c2d620c;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c index ea637b1b63c..ee796cbb7e9 100644 --- a/gcc/ggc-page.c +++ b/gcc/ggc-page.c @@ -1,6 +1,6 @@ /* "Bag-of-pages" garbage collector for the GNU compiler. - Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007 - Free Software Foundation, Inc. + Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, + 2010, 2011 Free Software Foundation, Inc. This file is part of GCC. @@ -25,43 +25,36 @@ along with GCC; see the file COPYING3. If not see #include "tree.h" #include "rtl.h" #include "tm_p.h" -#include "toplev.h" +#include "diagnostic-core.h" #include "flags.h" #include "ggc.h" +#include "ggc-internal.h" #include "timevar.h" #include "params.h" #include "tree-flow.h" +#include "cfgloop.h" +#include "plugin.h" /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a file open. Prefer either to valloc. */ #ifdef HAVE_MMAP_ANON # undef HAVE_MMAP_DEV_ZERO - -# include -# ifndef MAP_FAILED -# define MAP_FAILED -1 -# endif -# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) -# define MAP_ANONYMOUS MAP_ANON -# endif # define USING_MMAP - #endif #ifdef HAVE_MMAP_DEV_ZERO - -# include -# ifndef MAP_FAILED -# define MAP_FAILED -1 -# endif # define USING_MMAP - #endif #ifndef USING_MMAP #define USING_MALLOC_PAGE_GROUPS #endif +#if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \ + && defined(USING_MMAP) +# define USING_MADVISE +#endif + /* Strategy: This garbage-collecting allocator allocates objects on one of a set @@ -156,6 +149,24 @@ along with GCC; see the file COPYING3. If not see #define OFFSET_TO_BIT(OFFSET, ORDER) \ (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) +/* We use this structure to determine the alignment required for + allocations. For power-of-two sized allocations, that's not a + problem, but it does matter for odd-sized allocations. + We do not care about alignment for floating-point types. */ + +struct max_alignment { + char c; + union { + HOST_WIDEST_INT i; + void *p; + } u; +}; + +/* The biggest alignment required. */ + +#define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) + + /* The number of extra orders, not corresponding to power-of-two sized objects. */ @@ -172,46 +183,35 @@ along with GCC; see the file COPYING3. If not see thing you need to do to add a new special allocation size. */ static const size_t extra_order_size_table[] = { - sizeof (struct stmt_ann_d), - sizeof (struct var_ann_d), + /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT. + There are a lot of structures with these sizes and explicitly + listing them risks orders being dropped because they changed size. */ + MAX_ALIGNMENT * 3, + MAX_ALIGNMENT * 5, + MAX_ALIGNMENT * 6, + MAX_ALIGNMENT * 7, + MAX_ALIGNMENT * 9, + MAX_ALIGNMENT * 10, + MAX_ALIGNMENT * 11, + MAX_ALIGNMENT * 12, + MAX_ALIGNMENT * 13, + MAX_ALIGNMENT * 14, + MAX_ALIGNMENT * 15, sizeof (struct tree_decl_non_common), sizeof (struct tree_field_decl), sizeof (struct tree_parm_decl), sizeof (struct tree_var_decl), - sizeof (struct tree_list), - sizeof (struct tree_ssa_name), + sizeof (struct tree_type_non_common), sizeof (struct function), sizeof (struct basic_block_def), - sizeof (bitmap_element), - sizeof (bitmap_head), - /* PHI nodes with one to three arguments are already covered by the - above sizes. */ - sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3, - TREE_EXP_SIZE (2), - RTL_SIZE (2), /* MEM, PLUS, etc. */ - RTL_SIZE (9), /* INSN */ + sizeof (struct cgraph_node), + sizeof (struct loop), }; /* The total number of orders. */ #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) -/* We use this structure to determine the alignment required for - allocations. For power-of-two sized allocations, that's not a - problem, but it does matter for odd-sized allocations. */ - -struct max_alignment { - char c; - union { - HOST_WIDEST_INT i; - long double d; - } u; -}; - -/* The biggest alignment required. */ - -#define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) - /* Compute the smallest nonnegative number which when added to X gives a multiple of F. */ @@ -221,6 +221,10 @@ struct max_alignment { #define ROUND_UP(x, f) (CEIL (x, f) * (f)) +/* Round X to next multiple of the page size */ + +#define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1)) + /* The Ith entry is the number of objects on a page or order I. */ static unsigned objects_per_page_table[NUM_ORDERS]; @@ -282,6 +286,9 @@ typedef struct page_entry /* The lg of size of objects allocated from this page. */ unsigned char order; + /* Discarded page? */ + bool discarded; + /* A bit vector indicating whether or not objects are in use. The Nth bit is one if the Nth object on this page is allocated. This array is dynamically sized. */ @@ -326,6 +333,16 @@ typedef struct page_table_chain #endif +#ifdef ENABLE_GC_ALWAYS_COLLECT +/* List of free objects to be verified as actually free on the + next collection. */ +struct free_object +{ + void *object; + struct free_object *next; +}; +#endif + /* The rest of the global variables. */ static struct globals { @@ -386,7 +403,7 @@ static struct globals /* Maximum number of elements that can be used before resizing. */ unsigned int depth_max; - /* Each element of this arry is an index in by_depth where the given + /* Each element of this array is an index in by_depth where the given depth starts. This structure is indexed by that given depth we are interested in. */ unsigned int *depth; @@ -412,34 +429,30 @@ static struct globals #ifdef ENABLE_GC_ALWAYS_COLLECT /* List of free objects to be verified as actually free on the next collection. */ - struct free_object - { - void *object; - struct free_object *next; - } *free_object_list; + struct free_object *free_object_list; #endif #ifdef GATHER_STATISTICS struct { - /* Total memory allocated with ggc_alloc. */ + /* Total GC-allocated memory. */ unsigned long long total_allocated; - /* Total overhead for memory to be allocated with ggc_alloc. */ + /* Total overhead for GC-allocated memory. */ unsigned long long total_overhead; /* Total allocations and overhead for sizes less than 32, 64 and 128. These sizes are interesting because they are typical cache line sizes. */ - + unsigned long long total_allocated_under32; unsigned long long total_overhead_under32; - + unsigned long long total_allocated_under64; unsigned long long total_overhead_under64; - + unsigned long long total_allocated_under128; unsigned long long total_overhead_under128; - + /* The allocations for each of the allocation orders. */ unsigned long long total_allocated_per_order[NUM_ORDERS]; @@ -461,7 +474,7 @@ static struct globals can override this by defining GGC_QUIRE_SIZE explicitly. */ #ifndef GGC_QUIRE_SIZE # ifdef USING_MMAP -# define GGC_QUIRE_SIZE 256 +# define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */ # else # define GGC_QUIRE_SIZE 16 # endif @@ -474,7 +487,7 @@ static int ggc_allocated_p (const void *); static page_entry *lookup_page_table_entry (const void *); static void set_page_table_entry (void *, page_entry *); #ifdef USING_MMAP -static char *alloc_anon (char *, size_t); +static char *alloc_anon (char *, size_t, bool check); #endif #ifdef USING_MALLOC_PAGE_GROUPS static size_t page_group_index (char *, char *); @@ -503,7 +516,7 @@ push_depth (unsigned int i) if (G.depth_in_use >= G.depth_max) { G.depth_max *= 2; - G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int)); + G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max); } G.depth[G.depth_in_use++] = i; } @@ -516,10 +529,9 @@ push_by_depth (page_entry *p, unsigned long *s) if (G.by_depth_in_use >= G.by_depth_max) { G.by_depth_max *= 2; - G.by_depth = xrealloc (G.by_depth, - G.by_depth_max * sizeof (page_entry *)); - G.save_in_use = xrealloc (G.save_in_use, - G.by_depth_max * sizeof (unsigned long *)); + G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max); + G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use, + G.by_depth_max); } G.by_depth[G.by_depth_in_use] = p; G.save_in_use[G.by_depth_in_use++] = s; @@ -611,7 +623,7 @@ set_page_table_entry (void *p, page_entry *entry) goto found; /* Not found -- allocate a new table. */ - table = xcalloc (1, sizeof(*table)); + table = XCNEW (struct page_table_chain); table->next = G.lookup; table->high_bits = high_bits; G.lookup = table; @@ -631,7 +643,7 @@ found: /* Prints the page-entry for object size ORDER, for debugging. */ -void +DEBUG_FUNCTION void debug_print_page_list (int order) { page_entry *p; @@ -654,19 +666,21 @@ debug_print_page_list (int order) compile error unless exactly one of the HAVE_* is defined. */ static inline char * -alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) +alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check) { #ifdef HAVE_MMAP_ANON - char *page = mmap (pref, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #endif #ifdef HAVE_MMAP_DEV_ZERO - char *page = mmap (pref, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE, G.dev_zero_fd, 0); + char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE, G.dev_zero_fd, 0); #endif if (page == (char *) MAP_FAILED) { + if (!check) + return NULL; perror ("virtual memory exhausted"); exit (FATAL_EXIT_CODE); } @@ -675,7 +689,7 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) G.bytes_mapped += size; /* Pretend we don't have access to the allocated pages. We'll enable - access to smaller pieces of the area in ggc_alloc. Discard the + access to smaller pieces of the area in ggc_internal_alloc. Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); @@ -729,6 +743,7 @@ alloc_page (unsigned order) entry_size = num_objects * OBJECT_SIZE (order); if (entry_size < G.pagesize) entry_size = G.pagesize; + entry_size = PAGE_ALIGN (entry_size); entry = NULL; page = NULL; @@ -740,6 +755,10 @@ alloc_page (unsigned order) if (p != NULL) { + if (p->discarded) + G.bytes_mapped += p->bytes; + p->discarded = false; + /* Recycle the allocated memory from this page ... */ *pp = p->next; page = p->page; @@ -764,15 +783,20 @@ alloc_page (unsigned order) extras on the freelist. (Can only do this optimization with mmap for backing store.) */ struct page_entry *e, *f = G.free_pages; - int i; + int i, entries = GGC_QUIRE_SIZE; - page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE); + page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false); + if (page == NULL) + { + page = alloc_anon(NULL, G.pagesize, true); + entries = 1; + } /* This loop counts down so that the chain will be in ascending memory order. */ - for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) + for (i = entries - 1; i >= 1; i--) { - e = xcalloc (1, page_entry_size); + e = XCNEWVAR (struct page_entry, page_entry_size); e->order = order; e->bytes = G.pagesize; e->page = page + (i << G.lg_pagesize); @@ -783,7 +807,7 @@ alloc_page (unsigned order) G.free_pages = f; } else - page = alloc_anon (NULL, entry_size); + page = alloc_anon (NULL, entry_size, true); #endif #ifdef USING_MALLOC_PAGE_GROUPS else @@ -800,7 +824,7 @@ alloc_page (unsigned order) alloc_size = GGC_QUIRE_SIZE * G.pagesize; else alloc_size = entry_size + G.pagesize - 1; - allocation = xmalloc (alloc_size); + allocation = XNEWVEC (char, alloc_size); page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize); head_slop = page - allocation; @@ -843,7 +867,7 @@ alloc_page (unsigned order) struct page_entry *e, *f = G.free_pages; for (a = enda - G.pagesize; a != page; a -= G.pagesize) { - e = xcalloc (1, page_entry_size); + e = XCNEWVAR (struct page_entry, page_entry_size); e->order = order; e->bytes = G.pagesize; e->page = a; @@ -857,7 +881,7 @@ alloc_page (unsigned order) #endif if (entry == NULL) - entry = xcalloc (1, page_entry_size); + entry = XCNEWVAR (struct page_entry, page_entry_size); entry->bytes = entry_size; entry->page = page; @@ -937,7 +961,7 @@ free_page (page_entry *entry) /* We cannot free a page from a context deeper than the current one. */ gcc_assert (entry->context_depth == top->context_depth); - + /* Put top element into freed slot. */ G.by_depth[i] = top; G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; @@ -956,7 +980,90 @@ free_page (page_entry *entry) static void release_pages (void) { -#ifdef USING_MMAP +#ifdef USING_MADVISE + page_entry *p, *start_p; + char *start; + size_t len; + size_t mapped_len; + page_entry *next, *prev, *newprev; + size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize; + + /* First free larger continuous areas to the OS. + This allows other allocators to grab these areas if needed. + This is only done on larger chunks to avoid fragmentation. + This does not always work because the free_pages list is only + approximately sorted. */ + + p = G.free_pages; + prev = NULL; + while (p) + { + start = p->page; + start_p = p; + len = 0; + mapped_len = 0; + newprev = prev; + while (p && p->page == start + len) + { + len += p->bytes; + if (!p->discarded) + mapped_len += p->bytes; + newprev = p; + p = p->next; + } + if (len >= free_unit) + { + while (start_p != p) + { + next = start_p->next; + free (start_p); + start_p = next; + } + munmap (start, len); + if (prev) + prev->next = p; + else + G.free_pages = p; + G.bytes_mapped -= mapped_len; + continue; + } + prev = newprev; + } + + /* Now give back the fragmented pages to the OS, but keep the address + space to reuse it next time. */ + + for (p = G.free_pages; p; ) + { + if (p->discarded) + { + p = p->next; + continue; + } + start = p->page; + len = p->bytes; + start_p = p; + p = p->next; + while (p && p->page == start + len) + { + len += p->bytes; + p = p->next; + } + /* Give the page back to the kernel, but don't free the mapping. + This avoids fragmentation in the virtual memory map of the + process. Next time we can reuse it by just touching it. */ + madvise (start, len, MADV_DONTNEED); + /* Don't count those pages as mapped to not touch the garbage collector + unnecessarily. */ + G.bytes_mapped -= len; + while (start_p != p) + { + start_p->discarded = true; + start_p = start_p->next; + } + } +#endif +#if defined(USING_MMAP) && !defined(USING_MADVISE) page_entry *p, *next; char *start; size_t len; @@ -1054,35 +1161,66 @@ static unsigned char size_lookup[NUM_SIZE_LOOKUP] = 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; +/* For a given size of memory requested for allocation, return the + actual size that is going to be allocated, as well as the size + order. */ + +static void +ggc_round_alloc_size_1 (size_t requested_size, + size_t *size_order, + size_t *alloced_size) +{ + size_t order, object_size; + + if (requested_size < NUM_SIZE_LOOKUP) + { + order = size_lookup[requested_size]; + object_size = OBJECT_SIZE (order); + } + else + { + order = 10; + while (requested_size > (object_size = OBJECT_SIZE (order))) + order++; + } + + if (size_order) + *size_order = order; + if (alloced_size) + *alloced_size = object_size; +} + +/* For a given size of memory requested for allocation, return the + actual size that is going to be allocated. */ + +size_t +ggc_round_alloc_size (size_t requested_size) +{ + size_t size = 0; + + ggc_round_alloc_size_1 (requested_size, NULL, &size); + return size; +} + /* Typed allocation function. Does nothing special in this collector. */ void * ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size MEM_STAT_DECL) { - return ggc_alloc_stat (size PASS_MEM_STAT); + return ggc_internal_alloc_stat (size PASS_MEM_STAT); } /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ void * -ggc_alloc_stat (size_t size MEM_STAT_DECL) +ggc_internal_alloc_stat (size_t size MEM_STAT_DECL) { size_t order, word, bit, object_offset, object_size; struct page_entry *entry; void *result; - if (size < NUM_SIZE_LOOKUP) - { - order = size_lookup[size]; - object_size = OBJECT_SIZE (order); - } - else - { - order = 10; - while (size > (object_size = OBJECT_SIZE (order))) - order++; - } + ggc_round_alloc_size_1 (size, &order, &object_size); /* If there are non-full pages for this size allocation, they are at the head of the list. */ @@ -1256,6 +1394,57 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) return result; } +/* Mark function for strings. */ + +void +gt_ggc_m_S (const void *p) +{ + page_entry *entry; + unsigned bit, word; + unsigned long mask; + unsigned long offset; + + if (!p || !ggc_allocated_p (p)) + return; + + /* Look up the page on which the object is alloced. . */ + entry = lookup_page_table_entry (p); + gcc_assert (entry); + + /* Calculate the index of the object on the page; this is its bit + position in the in_use_p bitmap. Note that because a char* might + point to the middle of an object, we need special code here to + make sure P points to the start of an object. */ + offset = ((const char *) p - entry->page) % object_size_table[entry->order]; + if (offset) + { + /* Here we've seen a char* which does not point to the beginning + of an allocated object. We assume it points to the middle of + a STRING_CST. */ + gcc_assert (offset == offsetof (struct tree_string, str)); + p = ((const char *) p) - offset; + gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p)); + return; + } + + bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); + word = bit / HOST_BITS_PER_LONG; + mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); + + /* If the bit was previously set, skip it. */ + if (entry->in_use_p[word] & mask) + return; + + /* Otherwise set it, and decrement the free object count. */ + entry->in_use_p[word] |= mask; + entry->num_free_objects -= 1; + + if (GGC_DEBUG_LEVEL >= 4) + fprintf (G.debug_file, "Marking %p\n", p); + + return; +} + /* If P is not marked, marks it and return false. Otherwise return true. P must have been allocated by the GC allocator; it mustn't point to static objects, stack variables, or memory allocated with malloc. */ @@ -1354,7 +1543,7 @@ ggc_free (void *p) #ifdef ENABLE_GC_ALWAYS_COLLECT /* In the completely-anal-checking mode, we do *not* immediately free - the data, but instead verify that the data is *actually* not + the data, but instead verify that the data is *actually* not reachable the next time we collect. */ { struct free_object *fo = XNEW (struct free_object); @@ -1381,7 +1570,7 @@ ggc_free (void *p) /* If the page is completely full, then it's supposed to be after all pages that aren't. Since we've freed one object from a page that was full, we need to move the - page to the head of the list. + page to the head of the list. PE is the node we want to move. Q is the previous node and P is the next node in the list. */ @@ -1425,7 +1614,7 @@ ggc_free (void *p) static void compute_inverse (unsigned order) { - size_t size, inv; + size_t size, inv; unsigned int e; size = OBJECT_SIZE (order); @@ -1471,14 +1660,14 @@ init_ggc (void) believe, is an unaligned page allocation, which would cause us to hork badly if we tried to use it. */ { - char *p = alloc_anon (NULL, G.pagesize); + char *p = alloc_anon (NULL, G.pagesize, true); struct page_entry *e; if ((size_t)p & (G.pagesize - 1)) { /* How losing. Discard this one and try another. If we still can't get something useful, give up. */ - p = alloc_anon (NULL, G.pagesize); + p = alloc_anon (NULL, G.pagesize, true); gcc_assert (!((size_t)p & (G.pagesize - 1))); } @@ -1540,20 +1729,6 @@ init_ggc (void) G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); } -/* Start a new GGC zone. */ - -struct alloc_zone * -new_ggc_zone (const char *name ATTRIBUTE_UNUSED) -{ - return NULL; -} - -/* Destroy a GGC zone. */ -void -destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED) -{ -} - /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ @@ -1615,7 +1790,7 @@ clear_marks (void) if (p->context_depth < G.context_depth) { if (! save_in_use_p (p)) - save_in_use_p (p) = xmalloc (bitmap_size); + save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size); memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); } @@ -1685,7 +1860,7 @@ sweep_pages (void) G.pages[order] = next; else previous->next = next; - + /* Splice P out of the back pointers too. */ if (next) next->prev = previous; @@ -1891,6 +2066,8 @@ ggc_collect (void) /* Indicate that we've seen collections at this context depth. */ G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; + invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); + clear_marks (); ggc_mark_roots (); #ifdef GATHER_STATISTICS @@ -1902,6 +2079,8 @@ ggc_collect (void) G.allocated_last_gc = G.allocated; + invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); + timevar_pop (TV_GC); if (!quiet_flag) @@ -1981,7 +2160,7 @@ ggc_print_statistics (void) SCALE (G.allocated), STAT_LABEL(G.allocated), SCALE (total_overhead), STAT_LABEL (total_overhead)); -#ifdef GATHER_STATISTICS +#ifdef GATHER_STATISTICS { fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); @@ -2002,7 +2181,7 @@ ggc_print_statistics (void) G.stats.total_overhead_under128); fprintf (stderr, "Total Allocated under 128B: %10lld\n", G.stats.total_allocated_under128); - + for (i = 0; i < NUM_ORDERS; i++) if (G.stats.total_allocated_per_order[i]) { @@ -2017,12 +2196,14 @@ ggc_print_statistics (void) #endif } +struct ggc_pch_ondisk +{ + unsigned totals[NUM_ORDERS]; +}; + struct ggc_pch_data { - struct ggc_pch_ondisk - { - unsigned totals[NUM_ORDERS]; - } d; + struct ggc_pch_ondisk d; size_t base[NUM_ORDERS]; size_t written[NUM_ORDERS]; }; @@ -2059,7 +2240,7 @@ ggc_pch_total_size (struct ggc_pch_data *d) unsigned i; for (i = 0; i < NUM_ORDERS; i++) - a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); + a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); return a; } @@ -2072,7 +2253,7 @@ ggc_pch_this_base (struct ggc_pch_data *d, void *base) for (i = 0; i < NUM_ORDERS; i++) { d->base[i] = a; - a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); + a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); } } @@ -2112,7 +2293,7 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, size_t size, bool is_string ATTRIBUTE_UNUSED) { unsigned order; - static const char emptyBytes[256]; + static const char emptyBytes[256] = { 0 }; if (size < NUM_SIZE_LOOKUP) order = size_lookup[size]; @@ -2124,7 +2305,7 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, } if (fwrite (x, size, 1, f) != 1) - fatal_error ("can't write PCH file: %m"); + fatal_error ("can%'t write PCH file: %m"); /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the object out to OBJECT_SIZE(order). This happens for strings. */ @@ -2140,13 +2321,13 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, if (padding <= sizeof(emptyBytes)) { if (fwrite (emptyBytes, 1, padding, f) != padding) - fatal_error ("can't write PCH file"); + fatal_error ("can%'t write PCH file"); } else { /* Larger than our buffer? Just default to fseek. */ if (fseek (f, padding, SEEK_CUR) != 0) - fatal_error ("can't write PCH file"); + fatal_error ("can%'t write PCH file"); } } @@ -2155,14 +2336,14 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), G.pagesize), SEEK_CUR) != 0) - fatal_error ("can't write PCH file: %m"); + fatal_error ("can%'t write PCH file: %m"); } void ggc_pch_finish (struct ggc_pch_data *d, FILE *f) { if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) - fatal_error ("can't write PCH file: %m"); + fatal_error ("can%'t write PCH file: %m"); free (d); } @@ -2221,7 +2402,7 @@ ggc_pch_read (FILE *f, void *addr) { struct ggc_pch_ondisk d; unsigned i; - char *offs = addr; + char *offs = (char *) addr; unsigned long count_old_page_tables; unsigned long count_new_page_tables; @@ -2252,7 +2433,7 @@ ggc_pch_read (FILE *f, void *addr) /* Allocate the appropriate page-table entries for the pages read from the PCH file. */ if (fread (&d, sizeof (d), 1, f) != 1) - fatal_error ("can't read PCH file: %m"); + fatal_error ("can%'t read PCH file: %m"); for (i = 0; i < NUM_ORDERS; i++) { @@ -2265,11 +2446,11 @@ ggc_pch_read (FILE *f, void *addr) if (d.totals[i] == 0) continue; - bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize); + bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i)); num_objs = bytes / OBJECT_SIZE (i); - entry = xcalloc (1, (sizeof (struct page_entry) - - sizeof (long) - + BITMAP_SIZE (num_objs + 1))); + entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) + - sizeof (long) + + BITMAP_SIZE (num_objs + 1))); entry->bytes = bytes; entry->page = offs; entry->context_depth = 0; @@ -2312,3 +2493,12 @@ ggc_pch_read (FILE *f, void *addr) /* Update the statistics. */ G.allocated = G.allocated_last_gc = offs - (char *)addr; } + +struct alloc_zone +{ + int dummy; +}; + +struct alloc_zone rtl_zone; +struct alloc_zone tree_zone; +struct alloc_zone tree_id_zone;