X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fggc-page.c;h=ee796cbb7e9fd76f87666651313a03dd88f9ee56;hp=adb32880cfa609c94a218d21c8b9814685fbaee3;hb=927a01eba66c97c810b74e48669832863c8b846d;hpb=3089b75c807357d985c791f1f62f8e47fb87c8b4 diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c index adb32880cfa..ee796cbb7e9 100644 --- a/gcc/ggc-page.c +++ b/gcc/ggc-page.c @@ -1,6 +1,6 @@ /* "Bag-of-pages" garbage collector for the GNU compiler. - Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009 - Free Software Foundation, Inc. + Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, + 2010, 2011 Free Software Foundation, Inc. This file is part of GCC. @@ -25,9 +25,10 @@ along with GCC; see the file COPYING3. If not see #include "tree.h" #include "rtl.h" #include "tm_p.h" -#include "toplev.h" +#include "diagnostic-core.h" #include "flags.h" #include "ggc.h" +#include "ggc-internal.h" #include "timevar.h" #include "params.h" #include "tree-flow.h" @@ -38,32 +39,22 @@ along with GCC; see the file COPYING3. If not see file open. Prefer either to valloc. */ #ifdef HAVE_MMAP_ANON # undef HAVE_MMAP_DEV_ZERO - -# include -# ifndef MAP_FAILED -# define MAP_FAILED -1 -# endif -# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) -# define MAP_ANONYMOUS MAP_ANON -# endif # define USING_MMAP - #endif #ifdef HAVE_MMAP_DEV_ZERO - -# include -# ifndef MAP_FAILED -# define MAP_FAILED -1 -# endif # define USING_MMAP - #endif #ifndef USING_MMAP #define USING_MALLOC_PAGE_GROUPS #endif +#if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \ + && defined(USING_MMAP) +# define USING_MADVISE +#endif + /* Strategy: This garbage-collecting allocator allocates objects on one of a set @@ -210,7 +201,7 @@ static const size_t extra_order_size_table[] = { sizeof (struct tree_field_decl), sizeof (struct tree_parm_decl), sizeof (struct tree_var_decl), - sizeof (struct tree_type), + sizeof (struct tree_type_non_common), sizeof (struct function), sizeof (struct basic_block_def), sizeof (struct cgraph_node), @@ -230,6 +221,10 @@ static const size_t extra_order_size_table[] = { #define ROUND_UP(x, f) (CEIL (x, f) * (f)) +/* Round X to next multiple of the page size */ + +#define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1)) + /* The Ith entry is the number of objects on a page or order I. */ static unsigned objects_per_page_table[NUM_ORDERS]; @@ -291,6 +286,9 @@ typedef struct page_entry /* The lg of size of objects allocated from this page. */ unsigned char order; + /* Discarded page? */ + bool discarded; + /* A bit vector indicating whether or not objects are in use. The Nth bit is one if the Nth object on this page is allocated. This array is dynamically sized. */ @@ -335,6 +333,16 @@ typedef struct page_table_chain #endif +#ifdef ENABLE_GC_ALWAYS_COLLECT +/* List of free objects to be verified as actually free on the + next collection. */ +struct free_object +{ + void *object; + struct free_object *next; +}; +#endif + /* The rest of the global variables. */ static struct globals { @@ -421,34 +429,30 @@ static struct globals #ifdef ENABLE_GC_ALWAYS_COLLECT /* List of free objects to be verified as actually free on the next collection. */ - struct free_object - { - void *object; - struct free_object *next; - } *free_object_list; + struct free_object *free_object_list; #endif #ifdef GATHER_STATISTICS struct { - /* Total memory allocated with ggc_alloc. */ + /* Total GC-allocated memory. */ unsigned long long total_allocated; - /* Total overhead for memory to be allocated with ggc_alloc. */ + /* Total overhead for GC-allocated memory. */ unsigned long long total_overhead; /* Total allocations and overhead for sizes less than 32, 64 and 128. These sizes are interesting because they are typical cache line sizes. */ - + unsigned long long total_allocated_under32; unsigned long long total_overhead_under32; - + unsigned long long total_allocated_under64; unsigned long long total_overhead_under64; - + unsigned long long total_allocated_under128; unsigned long long total_overhead_under128; - + /* The allocations for each of the allocation orders. */ unsigned long long total_allocated_per_order[NUM_ORDERS]; @@ -470,7 +474,7 @@ static struct globals can override this by defining GGC_QUIRE_SIZE explicitly. */ #ifndef GGC_QUIRE_SIZE # ifdef USING_MMAP -# define GGC_QUIRE_SIZE 256 +# define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */ # else # define GGC_QUIRE_SIZE 16 # endif @@ -483,7 +487,7 @@ static int ggc_allocated_p (const void *); static page_entry *lookup_page_table_entry (const void *); static void set_page_table_entry (void *, page_entry *); #ifdef USING_MMAP -static char *alloc_anon (char *, size_t); +static char *alloc_anon (char *, size_t, bool check); #endif #ifdef USING_MALLOC_PAGE_GROUPS static size_t page_group_index (char *, char *); @@ -639,7 +643,7 @@ found: /* Prints the page-entry for object size ORDER, for debugging. */ -void +DEBUG_FUNCTION void debug_print_page_list (int order) { page_entry *p; @@ -662,7 +666,7 @@ debug_print_page_list (int order) compile error unless exactly one of the HAVE_* is defined. */ static inline char * -alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) +alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check) { #ifdef HAVE_MMAP_ANON char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, @@ -675,6 +679,8 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) if (page == (char *) MAP_FAILED) { + if (!check) + return NULL; perror ("virtual memory exhausted"); exit (FATAL_EXIT_CODE); } @@ -683,7 +689,7 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) G.bytes_mapped += size; /* Pretend we don't have access to the allocated pages. We'll enable - access to smaller pieces of the area in ggc_alloc. Discard the + access to smaller pieces of the area in ggc_internal_alloc. Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); @@ -737,6 +743,7 @@ alloc_page (unsigned order) entry_size = num_objects * OBJECT_SIZE (order); if (entry_size < G.pagesize) entry_size = G.pagesize; + entry_size = PAGE_ALIGN (entry_size); entry = NULL; page = NULL; @@ -748,6 +755,10 @@ alloc_page (unsigned order) if (p != NULL) { + if (p->discarded) + G.bytes_mapped += p->bytes; + p->discarded = false; + /* Recycle the allocated memory from this page ... */ *pp = p->next; page = p->page; @@ -772,13 +783,18 @@ alloc_page (unsigned order) extras on the freelist. (Can only do this optimization with mmap for backing store.) */ struct page_entry *e, *f = G.free_pages; - int i; + int i, entries = GGC_QUIRE_SIZE; - page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE); + page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false); + if (page == NULL) + { + page = alloc_anon(NULL, G.pagesize, true); + entries = 1; + } /* This loop counts down so that the chain will be in ascending memory order. */ - for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) + for (i = entries - 1; i >= 1; i--) { e = XCNEWVAR (struct page_entry, page_entry_size); e->order = order; @@ -791,7 +807,7 @@ alloc_page (unsigned order) G.free_pages = f; } else - page = alloc_anon (NULL, entry_size); + page = alloc_anon (NULL, entry_size, true); #endif #ifdef USING_MALLOC_PAGE_GROUPS else @@ -945,7 +961,7 @@ free_page (page_entry *entry) /* We cannot free a page from a context deeper than the current one. */ gcc_assert (entry->context_depth == top->context_depth); - + /* Put top element into freed slot. */ G.by_depth[i] = top; G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; @@ -964,7 +980,90 @@ free_page (page_entry *entry) static void release_pages (void) { -#ifdef USING_MMAP +#ifdef USING_MADVISE + page_entry *p, *start_p; + char *start; + size_t len; + size_t mapped_len; + page_entry *next, *prev, *newprev; + size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize; + + /* First free larger continuous areas to the OS. + This allows other allocators to grab these areas if needed. + This is only done on larger chunks to avoid fragmentation. + This does not always work because the free_pages list is only + approximately sorted. */ + + p = G.free_pages; + prev = NULL; + while (p) + { + start = p->page; + start_p = p; + len = 0; + mapped_len = 0; + newprev = prev; + while (p && p->page == start + len) + { + len += p->bytes; + if (!p->discarded) + mapped_len += p->bytes; + newprev = p; + p = p->next; + } + if (len >= free_unit) + { + while (start_p != p) + { + next = start_p->next; + free (start_p); + start_p = next; + } + munmap (start, len); + if (prev) + prev->next = p; + else + G.free_pages = p; + G.bytes_mapped -= mapped_len; + continue; + } + prev = newprev; + } + + /* Now give back the fragmented pages to the OS, but keep the address + space to reuse it next time. */ + + for (p = G.free_pages; p; ) + { + if (p->discarded) + { + p = p->next; + continue; + } + start = p->page; + len = p->bytes; + start_p = p; + p = p->next; + while (p && p->page == start + len) + { + len += p->bytes; + p = p->next; + } + /* Give the page back to the kernel, but don't free the mapping. + This avoids fragmentation in the virtual memory map of the + process. Next time we can reuse it by just touching it. */ + madvise (start, len, MADV_DONTNEED); + /* Don't count those pages as mapped to not touch the garbage collector + unnecessarily. */ + G.bytes_mapped -= len; + while (start_p != p) + { + start_p->discarded = true; + start_p = start_p->next; + } + } +#endif +#if defined(USING_MMAP) && !defined(USING_MADVISE) page_entry *p, *next; char *start; size_t len; @@ -1062,35 +1161,66 @@ static unsigned char size_lookup[NUM_SIZE_LOOKUP] = 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; +/* For a given size of memory requested for allocation, return the + actual size that is going to be allocated, as well as the size + order. */ + +static void +ggc_round_alloc_size_1 (size_t requested_size, + size_t *size_order, + size_t *alloced_size) +{ + size_t order, object_size; + + if (requested_size < NUM_SIZE_LOOKUP) + { + order = size_lookup[requested_size]; + object_size = OBJECT_SIZE (order); + } + else + { + order = 10; + while (requested_size > (object_size = OBJECT_SIZE (order))) + order++; + } + + if (size_order) + *size_order = order; + if (alloced_size) + *alloced_size = object_size; +} + +/* For a given size of memory requested for allocation, return the + actual size that is going to be allocated. */ + +size_t +ggc_round_alloc_size (size_t requested_size) +{ + size_t size = 0; + + ggc_round_alloc_size_1 (requested_size, NULL, &size); + return size; +} + /* Typed allocation function. Does nothing special in this collector. */ void * ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size MEM_STAT_DECL) { - return ggc_alloc_stat (size PASS_MEM_STAT); + return ggc_internal_alloc_stat (size PASS_MEM_STAT); } /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ void * -ggc_alloc_stat (size_t size MEM_STAT_DECL) +ggc_internal_alloc_stat (size_t size MEM_STAT_DECL) { size_t order, word, bit, object_offset, object_size; struct page_entry *entry; void *result; - if (size < NUM_SIZE_LOOKUP) - { - order = size_lookup[size]; - object_size = OBJECT_SIZE (order); - } - else - { - order = 10; - while (size > (object_size = OBJECT_SIZE (order))) - order++; - } + ggc_round_alloc_size_1 (size, &order, &object_size); /* If there are non-full pages for this size allocation, they are at the head of the list. */ @@ -1413,7 +1543,7 @@ ggc_free (void *p) #ifdef ENABLE_GC_ALWAYS_COLLECT /* In the completely-anal-checking mode, we do *not* immediately free - the data, but instead verify that the data is *actually* not + the data, but instead verify that the data is *actually* not reachable the next time we collect. */ { struct free_object *fo = XNEW (struct free_object); @@ -1440,7 +1570,7 @@ ggc_free (void *p) /* If the page is completely full, then it's supposed to be after all pages that aren't. Since we've freed one object from a page that was full, we need to move the - page to the head of the list. + page to the head of the list. PE is the node we want to move. Q is the previous node and P is the next node in the list. */ @@ -1484,7 +1614,7 @@ ggc_free (void *p) static void compute_inverse (unsigned order) { - size_t size, inv; + size_t size, inv; unsigned int e; size = OBJECT_SIZE (order); @@ -1530,14 +1660,14 @@ init_ggc (void) believe, is an unaligned page allocation, which would cause us to hork badly if we tried to use it. */ { - char *p = alloc_anon (NULL, G.pagesize); + char *p = alloc_anon (NULL, G.pagesize, true); struct page_entry *e; if ((size_t)p & (G.pagesize - 1)) { /* How losing. Discard this one and try another. If we still can't get something useful, give up. */ - p = alloc_anon (NULL, G.pagesize); + p = alloc_anon (NULL, G.pagesize, true); gcc_assert (!((size_t)p & (G.pagesize - 1))); } @@ -1599,20 +1729,6 @@ init_ggc (void) G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); } -/* Start a new GGC zone. */ - -struct alloc_zone * -new_ggc_zone (const char *name ATTRIBUTE_UNUSED) -{ - return NULL; -} - -/* Destroy a GGC zone. */ -void -destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED) -{ -} - /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ @@ -1744,7 +1860,7 @@ sweep_pages (void) G.pages[order] = next; else previous->next = next; - + /* Splice P out of the back pointers too. */ if (next) next->prev = previous; @@ -2044,7 +2160,7 @@ ggc_print_statistics (void) SCALE (G.allocated), STAT_LABEL(G.allocated), SCALE (total_overhead), STAT_LABEL (total_overhead)); -#ifdef GATHER_STATISTICS +#ifdef GATHER_STATISTICS { fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); @@ -2065,7 +2181,7 @@ ggc_print_statistics (void) G.stats.total_overhead_under128); fprintf (stderr, "Total Allocated under 128B: %10lld\n", G.stats.total_allocated_under128); - + for (i = 0; i < NUM_ORDERS; i++) if (G.stats.total_allocated_per_order[i]) { @@ -2124,7 +2240,7 @@ ggc_pch_total_size (struct ggc_pch_data *d) unsigned i; for (i = 0; i < NUM_ORDERS; i++) - a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); + a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); return a; } @@ -2137,7 +2253,7 @@ ggc_pch_this_base (struct ggc_pch_data *d, void *base) for (i = 0; i < NUM_ORDERS; i++) { d->base[i] = a; - a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); + a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); } } @@ -2189,7 +2305,7 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, } if (fwrite (x, size, 1, f) != 1) - fatal_error ("can't write PCH file: %m"); + fatal_error ("can%'t write PCH file: %m"); /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the object out to OBJECT_SIZE(order). This happens for strings. */ @@ -2205,13 +2321,13 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, if (padding <= sizeof(emptyBytes)) { if (fwrite (emptyBytes, 1, padding, f) != padding) - fatal_error ("can't write PCH file"); + fatal_error ("can%'t write PCH file"); } else { /* Larger than our buffer? Just default to fseek. */ if (fseek (f, padding, SEEK_CUR) != 0) - fatal_error ("can't write PCH file"); + fatal_error ("can%'t write PCH file"); } } @@ -2220,14 +2336,14 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), G.pagesize), SEEK_CUR) != 0) - fatal_error ("can't write PCH file: %m"); + fatal_error ("can%'t write PCH file: %m"); } void ggc_pch_finish (struct ggc_pch_data *d, FILE *f) { if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) - fatal_error ("can't write PCH file: %m"); + fatal_error ("can%'t write PCH file: %m"); free (d); } @@ -2317,7 +2433,7 @@ ggc_pch_read (FILE *f, void *addr) /* Allocate the appropriate page-table entries for the pages read from the PCH file. */ if (fread (&d, sizeof (d), 1, f) != 1) - fatal_error ("can't read PCH file: %m"); + fatal_error ("can%'t read PCH file: %m"); for (i = 0; i < NUM_ORDERS; i++) { @@ -2330,7 +2446,7 @@ ggc_pch_read (FILE *f, void *addr) if (d.totals[i] == 0) continue; - bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize); + bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i)); num_objs = bytes / OBJECT_SIZE (i); entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) - sizeof (long) @@ -2377,3 +2493,12 @@ ggc_pch_read (FILE *f, void *addr) /* Update the statistics. */ G.allocated = G.allocated_last_gc = offs - (char *)addr; } + +struct alloc_zone +{ + int dummy; +}; + +struct alloc_zone rtl_zone; +struct alloc_zone tree_zone; +struct alloc_zone tree_id_zone;