X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fggc-page.c;h=807bded61fcf0d97f1686dc4992b849075ea35ff;hb=c3c620eaed1c621ba5e6d2e29fa7c374a1d76850;hp=cba44dd0b25aff4f83b7f2658791d05db0aa8e11;hpb=28a61eb789df9aa49304d4f2d9229f3606de6e0d;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c index cba44dd0b25..807bded61fc 100644 --- a/gcc/ggc-page.c +++ b/gcc/ggc-page.c @@ -1,12 +1,12 @@ /* "Bag-of-pages" garbage collector for the GNU compiler. - Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 + Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -15,9 +15,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" #include "system.h" @@ -32,18 +31,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "timevar.h" #include "params.h" #include "tree-flow.h" -#ifdef ENABLE_VALGRIND_CHECKING -# ifdef HAVE_VALGRIND_MEMCHECK_H -# include -# elif defined HAVE_MEMCHECK_H -# include -# else -# include -# endif -#else -/* Avoid #ifdef:s when we can help it. */ -#define VALGRIND_DISCARD(x) -#endif +#include "cfgloop.h" +#include "plugin.h" /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a file open. Prefer either to valloc. */ @@ -75,7 +64,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #define USING_MALLOC_PAGE_GROUPS #endif -/* Stategy: +/* Strategy: This garbage-collecting allocator allocates objects on one of a set of pages. Each page can allocate objects of a single size only; @@ -169,6 +158,24 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #define OFFSET_TO_BIT(OFFSET, ORDER) \ (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) +/* We use this structure to determine the alignment required for + allocations. For power-of-two sized allocations, that's not a + problem, but it does matter for odd-sized allocations. + We do not care about alignment for floating-point types. */ + +struct max_alignment { + char c; + union { + HOST_WIDEST_INT i; + void *p; + } u; +}; + +/* The biggest alignment required. */ + +#define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) + + /* The number of extra orders, not corresponding to power-of-two sized objects. */ @@ -185,34 +192,35 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA thing you need to do to add a new special allocation size. */ static const size_t extra_order_size_table[] = { - sizeof (struct stmt_ann_d), - sizeof (struct tree_decl), - sizeof (struct tree_list), - TREE_EXP_SIZE (2), - RTL_SIZE (2), /* MEM, PLUS, etc. */ - RTL_SIZE (9), /* INSN */ + /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT. + There are a lot of structures with these sizes and explicitly + listing them risks orders being dropped because they changed size. */ + MAX_ALIGNMENT * 3, + MAX_ALIGNMENT * 5, + MAX_ALIGNMENT * 6, + MAX_ALIGNMENT * 7, + MAX_ALIGNMENT * 9, + MAX_ALIGNMENT * 10, + MAX_ALIGNMENT * 11, + MAX_ALIGNMENT * 12, + MAX_ALIGNMENT * 13, + MAX_ALIGNMENT * 14, + MAX_ALIGNMENT * 15, + sizeof (struct tree_decl_non_common), + sizeof (struct tree_field_decl), + sizeof (struct tree_parm_decl), + sizeof (struct tree_var_decl), + sizeof (struct tree_type), + sizeof (struct function), + sizeof (struct basic_block_def), + sizeof (struct cgraph_node), + sizeof (struct loop), }; /* The total number of orders. */ #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) -/* We use this structure to determine the alignment required for - allocations. For power-of-two sized allocations, that's not a - problem, but it does matter for odd-sized allocations. */ - -struct max_alignment { - char c; - union { - HOST_WIDEST_INT i; - long double d; - } u; -}; - -/* The biggest alignment required. */ - -#define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) - /* Compute the smallest nonnegative number which when added to X gives a multiple of F. */ @@ -327,6 +335,16 @@ typedef struct page_table_chain #endif +#ifdef ENABLE_GC_ALWAYS_COLLECT +/* List of free objects to be verified as actually free on the + next collection. */ +struct free_object +{ + void *object; + struct free_object *next; +}; +#endif + /* The rest of the global variables. */ static struct globals { @@ -387,7 +405,7 @@ static struct globals /* Maximum number of elements that can be used before resizing. */ unsigned int depth_max; - /* Each element of this arry is an index in by_depth where the given + /* Each element of this array is an index in by_depth where the given depth starts. This structure is indexed by that given depth we are interested in. */ unsigned int *depth; @@ -413,11 +431,7 @@ static struct globals #ifdef ENABLE_GC_ALWAYS_COLLECT /* List of free objects to be verified as actually free on the next collection. */ - struct free_object - { - void *object; - struct free_object *next; - } *free_object_list; + struct free_object *free_object_list; #endif #ifdef GATHER_STATISTICS @@ -431,16 +445,16 @@ static struct globals /* Total allocations and overhead for sizes less than 32, 64 and 128. These sizes are interesting because they are typical cache line sizes. */ - + unsigned long long total_allocated_under32; unsigned long long total_overhead_under32; - + unsigned long long total_allocated_under64; unsigned long long total_overhead_under64; - + unsigned long long total_allocated_under128; unsigned long long total_overhead_under128; - + /* The allocations for each of the allocation orders. */ unsigned long long total_allocated_per_order[NUM_ORDERS]; @@ -459,7 +473,7 @@ static struct globals allocation routines. The first page is used, the rest go onto the free list. This cannot be larger than HOST_BITS_PER_INT for the in_use bitmask for page_group. Hosts that need a different value - can override this by defining GGC_QUIRE_SIZE explicitly. */ + can override this by defining GGC_QUIRE_SIZE explicitly. */ #ifndef GGC_QUIRE_SIZE # ifdef USING_MMAP # define GGC_QUIRE_SIZE 256 @@ -495,9 +509,6 @@ static void move_ptes_to_front (int, int); void debug_print_page_list (int); static void push_depth (unsigned int); static void push_by_depth (page_entry *, unsigned long *); -struct alloc_zone *rtl_zone = NULL; -struct alloc_zone *tree_zone = NULL; -struct alloc_zone *garbage_zone = NULL; /* Push an entry onto G.depth. */ @@ -507,7 +518,7 @@ push_depth (unsigned int i) if (G.depth_in_use >= G.depth_max) { G.depth_max *= 2; - G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int)); + G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max); } G.depth[G.depth_in_use++] = i; } @@ -520,10 +531,9 @@ push_by_depth (page_entry *p, unsigned long *s) if (G.by_depth_in_use >= G.by_depth_max) { G.by_depth_max *= 2; - G.by_depth = xrealloc (G.by_depth, - G.by_depth_max * sizeof (page_entry *)); - G.save_in_use = xrealloc (G.save_in_use, - G.by_depth_max * sizeof (unsigned long *)); + G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max); + G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use, + G.by_depth_max); } G.by_depth[G.by_depth_in_use] = p; G.save_in_use[G.by_depth_in_use++] = s; @@ -615,7 +625,7 @@ set_page_table_entry (void *p, page_entry *entry) goto found; /* Not found -- allocate a new table. */ - table = xcalloc (1, sizeof(*table)); + table = XCNEW (struct page_table_chain); table->next = G.lookup; table->high_bits = high_bits; G.lookup = table; @@ -628,7 +638,7 @@ found: L2 = LOOKUP_L2 (p); if (base[L1] == NULL) - base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *)); + base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE); base[L1][L2] = entry; } @@ -661,12 +671,12 @@ static inline char * alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) { #ifdef HAVE_MMAP_ANON - char *page = mmap (pref, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #endif #ifdef HAVE_MMAP_DEV_ZERO - char *page = mmap (pref, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE, G.dev_zero_fd, 0); + char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE, G.dev_zero_fd, 0); #endif if (page == (char *) MAP_FAILED) @@ -681,7 +691,7 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) /* Pretend we don't have access to the allocated pages. We'll enable access to smaller pieces of the area in ggc_alloc. Discard the handle to avoid handle leak. */ - VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); return page; } @@ -776,7 +786,7 @@ alloc_page (unsigned order) memory order. */ for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) { - e = xcalloc (1, page_entry_size); + e = XCNEWVAR (struct page_entry, page_entry_size); e->order = order; e->bytes = G.pagesize; e->page = page + (i << G.lg_pagesize); @@ -804,7 +814,7 @@ alloc_page (unsigned order) alloc_size = GGC_QUIRE_SIZE * G.pagesize; else alloc_size = entry_size + G.pagesize - 1; - allocation = xmalloc (alloc_size); + allocation = XNEWVEC (char, alloc_size); page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize); head_slop = page - allocation; @@ -847,7 +857,7 @@ alloc_page (unsigned order) struct page_entry *e, *f = G.free_pages; for (a = enda - G.pagesize; a != page; a -= G.pagesize) { - e = xcalloc (1, page_entry_size); + e = XCNEWVAR (struct page_entry, page_entry_size); e->order = order; e->bytes = G.pagesize; e->page = a; @@ -861,7 +871,7 @@ alloc_page (unsigned order) #endif if (entry == NULL) - entry = xcalloc (1, page_entry_size); + entry = XCNEWVAR (struct page_entry, page_entry_size); entry->bytes = entry_size; entry->page = page; @@ -925,7 +935,7 @@ free_page (page_entry *entry) /* Mark the page as inaccessible. Discard the handle to avoid handle leak. */ - VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes)); set_page_table_entry (entry->page, NULL); @@ -941,7 +951,7 @@ free_page (page_entry *entry) /* We cannot free a page from a context deeper than the current one. */ gcc_assert (entry->context_depth == top->context_depth); - + /* Put top element into freed slot. */ G.by_depth[i] = top; G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; @@ -1021,8 +1031,8 @@ release_pages (void) /* This table provides a fast way to determine ceil(log_2(size)) for allocation requests. The minimum allocation size is eight bytes. */ - -static unsigned char size_lookup[257] = +#define NUM_SIZE_LOOKUP 512 +static unsigned char size_lookup[NUM_SIZE_LOOKUP] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, @@ -1040,7 +1050,22 @@ static unsigned char size_lookup[257] = 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8 + 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; /* Typed allocation function. Does nothing special in this collector. */ @@ -1052,15 +1077,6 @@ ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size return ggc_alloc_stat (size PASS_MEM_STAT); } -/* Zone allocation function. Does nothing special in this collector. */ - -void * -ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED - MEM_STAT_DECL) -{ - return ggc_alloc_stat (size PASS_MEM_STAT); -} - /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ void * @@ -1070,14 +1086,14 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) struct page_entry *entry; void *result; - if (size <= 256) + if (size < NUM_SIZE_LOOKUP) { order = size_lookup[size]; object_size = OBJECT_SIZE (order); } else { - order = 9; + order = 10; while (size > (object_size = OBJECT_SIZE (order))) order++; } @@ -1139,8 +1155,14 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) word = bit = 0; while (~entry->in_use_p[word] == 0) ++word; + +#if GCC_VERSION >= 3004 + bit = __builtin_ctzl (~entry->in_use_p[word]); +#else while ((entry->in_use_p[word] >> bit) & 1) ++bit; +#endif + hint = word * HOST_BITS_PER_LONG + bit; } @@ -1188,7 +1210,7 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) exact same semantics in presence of memory bugs, regardless of ENABLE_VALGRIND_CHECKING. We override this request below. Drop the handle to avoid handle leak. */ - VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size)); /* `Poison' the entire allocated object, including any padding at the end. */ @@ -1196,19 +1218,22 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) /* Make the bytes after the end of the object unaccessible. Discard the handle to avoid handle leak. */ - VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size, - object_size - size)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size, + object_size - size)); #endif /* Tell Valgrind that the memory is there, but its content isn't defined. The bytes at the end of the object are still marked unaccessible. */ - VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size)); /* Keep track of how many bytes are being allocated. This information is used in deciding when to collect. */ G.allocated += object_size; + /* For timevar statistics. */ + timevar_ggc_mem_total += object_size; + #ifdef GATHER_STATISTICS { size_t overhead = object_size - size; @@ -1245,6 +1270,57 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) return result; } +/* Mark function for strings. */ + +void +gt_ggc_m_S (const void *p) +{ + page_entry *entry; + unsigned bit, word; + unsigned long mask; + unsigned long offset; + + if (!p || !ggc_allocated_p (p)) + return; + + /* Look up the page on which the object is alloced. . */ + entry = lookup_page_table_entry (p); + gcc_assert (entry); + + /* Calculate the index of the object on the page; this is its bit + position in the in_use_p bitmap. Note that because a char* might + point to the middle of an object, we need special code here to + make sure P points to the start of an object. */ + offset = ((const char *) p - entry->page) % object_size_table[entry->order]; + if (offset) + { + /* Here we've seen a char* which does not point to the beginning + of an allocated object. We assume it points to the middle of + a STRING_CST. */ + gcc_assert (offset == offsetof (struct tree_string, str)); + p = ((const char *) p) - offset; + gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p)); + return; + } + + bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); + word = bit / HOST_BITS_PER_LONG; + mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); + + /* If the bit was previously set, skip it. */ + if (entry->in_use_p[word] & mask) + return; + + /* Otherwise set it, and decrement the free object count. */ + entry->in_use_p[word] |= mask; + entry->num_free_objects -= 1; + + if (GGC_DEBUG_LEVEL >= 4) + fprintf (G.debug_file, "Marking %p\n", p); + + return; +} + /* If P is not marked, marks it and return false. Otherwise return true. P must have been allocated by the GC allocator; it mustn't point to static objects, stack variables, or memory allocated with malloc. */ @@ -1335,18 +1411,18 @@ ggc_free (void *p) #ifdef ENABLE_GC_CHECKING /* Poison the data, to indicate the data is garbage. */ - VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size)); memset (p, 0xa5, size); #endif /* Let valgrind know the object is free. */ - VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size)); #ifdef ENABLE_GC_ALWAYS_COLLECT /* In the completely-anal-checking mode, we do *not* immediately free - the data, but instead verify that the data is *actually* not + the data, but instead verify that the data is *actually* not reachable the next time we collect. */ { - struct free_object *fo = xmalloc (sizeof (struct free_object)); + struct free_object *fo = XNEW (struct free_object); fo->object = p; fo->next = G.free_object_list; G.free_object_list = fo; @@ -1370,7 +1446,7 @@ ggc_free (void *p) /* If the page is completely full, then it's supposed to be after all pages that aren't. Since we've freed one object from a page that was full, we need to move the - page to the head of the list. + page to the head of the list. PE is the node we want to move. Q is the previous node and P is the next node in the list. */ @@ -1414,7 +1490,7 @@ ggc_free (void *p) static void compute_inverse (unsigned order) { - size_t size, inv; + size_t size, inv; unsigned int e; size = OBJECT_SIZE (order); @@ -1472,7 +1548,7 @@ init_ggc (void) } /* We have a good page, might as well hold onto it... */ - e = xcalloc (1, sizeof (struct page_entry)); + e = XCNEW (struct page_entry); e->bytes = G.pagesize; e->page = p; e->next = G.free_pages; @@ -1511,19 +1587,22 @@ init_ggc (void) int o; int i; - o = size_lookup[OBJECT_SIZE (order)]; - for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i) + i = OBJECT_SIZE (order); + if (i >= NUM_SIZE_LOOKUP) + continue; + + for (o = size_lookup[i]; o == size_lookup [i]; --i) size_lookup[i] = order; } G.depth_in_use = 0; G.depth_max = 10; - G.depth = xmalloc (G.depth_max * sizeof (unsigned int)); + G.depth = XNEWVEC (unsigned int, G.depth_max); G.by_depth_in_use = 0; G.by_depth_max = INITIAL_PTE_COUNT; - G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *)); - G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); + G.by_depth = XNEWVEC (page_entry *, G.by_depth_max); + G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); } /* Start a new GGC zone. */ @@ -1540,18 +1619,6 @@ destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED) { } -/* Increment the `GC context'. Objects allocated in an outer context - are never freed, eliminating the need to register their roots. */ - -void -ggc_push_context (void) -{ - ++G.context_depth; - - /* Die on wrap. */ - gcc_assert (G.context_depth < HOST_BITS_PER_LONG); -} - /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ @@ -1587,89 +1654,6 @@ ggc_recalculate_in_use_p (page_entry *p) gcc_assert (p->num_free_objects < num_objects); } - -/* Decrement the `GC context'. All objects allocated since the - previous ggc_push_context are migrated to the outer context. */ - -void -ggc_pop_context (void) -{ - unsigned long omask; - unsigned int depth, i, e; -#ifdef ENABLE_CHECKING - unsigned int order; -#endif - - depth = --G.context_depth; - omask = (unsigned long)1 << (depth + 1); - - if (!((G.context_depth_allocations | G.context_depth_collections) & omask)) - return; - - G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1; - G.context_depth_allocations &= omask - 1; - G.context_depth_collections &= omask - 1; - - /* The G.depth array is shortened so that the last index is the - context_depth of the top element of by_depth. */ - if (depth+1 < G.depth_in_use) - e = G.depth[depth+1]; - else - e = G.by_depth_in_use; - - /* We might not have any PTEs of depth depth. */ - if (depth < G.depth_in_use) - { - - /* First we go through all the pages at depth depth to - recalculate the in use bits. */ - for (i = G.depth[depth]; i < e; ++i) - { - page_entry *p = G.by_depth[i]; - - /* Check that all of the pages really are at the depth that - we expect. */ - gcc_assert (p->context_depth == depth); - gcc_assert (p->index_by_depth == i); - - prefetch (&save_in_use_p_i (i+8)); - prefetch (&save_in_use_p_i (i+16)); - if (save_in_use_p_i (i)) - { - p = G.by_depth[i]; - ggc_recalculate_in_use_p (p); - free (save_in_use_p_i (i)); - save_in_use_p_i (i) = 0; - } - } - } - - /* Then, we reset all page_entries with a depth greater than depth - to be at depth. */ - for (i = e; i < G.by_depth_in_use; ++i) - { - page_entry *p = G.by_depth[i]; - - /* Check that all of the pages really are at the depth we - expect. */ - gcc_assert (p->context_depth > depth); - gcc_assert (p->index_by_depth == i); - p->context_depth = depth; - } - - adjust_depth (); - -#ifdef ENABLE_CHECKING - for (order = 2; order < NUM_ORDERS; order++) - { - page_entry *p; - - for (p = G.pages[order]; p != NULL; p = p->next) - gcc_assert (p->context_depth < depth || - (p->context_depth == depth && !save_in_use_p (p))); - } -#endif -} /* Unmark all objects. */ @@ -1696,7 +1680,7 @@ clear_marks (void) if (p->context_depth < G.context_depth) { if (! save_in_use_p (p)) - save_in_use_p (p) = xmalloc (bitmap_size); + save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size); memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); } @@ -1766,7 +1750,7 @@ sweep_pages (void) G.pages[order] = next; else previous->next = next; - + /* Splice P out of the back pointers too. */ if (next) next->prev = previous; @@ -1884,11 +1868,12 @@ poison_pages (void) so the exact same memory semantics is kept, in case there are memory errors. We override this request below. */ - VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object, + size)); memset (object, 0xa5, size); /* Drop the handle to avoid handle leak. */ - VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size)); + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size)); } } } @@ -1971,6 +1956,8 @@ ggc_collect (void) /* Indicate that we've seen collections at this context depth. */ G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; + invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); + clear_marks (); ggc_mark_roots (); #ifdef GATHER_STATISTICS @@ -1982,6 +1969,8 @@ ggc_collect (void) G.allocated_last_gc = G.allocated; + invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); + timevar_pop (TV_GC); if (!quiet_flag) @@ -2061,7 +2050,7 @@ ggc_print_statistics (void) SCALE (G.allocated), STAT_LABEL(G.allocated), SCALE (total_overhead), STAT_LABEL (total_overhead)); -#ifdef GATHER_STATISTICS +#ifdef GATHER_STATISTICS { fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); @@ -2082,25 +2071,29 @@ ggc_print_statistics (void) G.stats.total_overhead_under128); fprintf (stderr, "Total Allocated under 128B: %10lld\n", G.stats.total_allocated_under128); - + for (i = 0; i < NUM_ORDERS; i++) if (G.stats.total_allocated_per_order[i]) { - fprintf (stderr, "Total Overhead page size %7d: %10lld\n", - OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]); - fprintf (stderr, "Total Allocated page size %7d: %10lld\n", - OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]); + fprintf (stderr, "Total Overhead page size %7lu: %10lld\n", + (unsigned long) OBJECT_SIZE (i), + G.stats.total_overhead_per_order[i]); + fprintf (stderr, "Total Allocated page size %7lu: %10lld\n", + (unsigned long) OBJECT_SIZE (i), + G.stats.total_allocated_per_order[i]); } } #endif } +struct ggc_pch_ondisk +{ + unsigned totals[NUM_ORDERS]; +}; + struct ggc_pch_data { - struct ggc_pch_ondisk - { - unsigned totals[NUM_ORDERS]; - } d; + struct ggc_pch_ondisk d; size_t base[NUM_ORDERS]; size_t written[NUM_ORDERS]; }; @@ -2108,20 +2101,21 @@ struct ggc_pch_data struct ggc_pch_data * init_ggc_pch (void) { - return xcalloc (sizeof (struct ggc_pch_data), 1); + return XCNEW (struct ggc_pch_data); } void ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, - size_t size, bool is_string ATTRIBUTE_UNUSED) + size_t size, bool is_string ATTRIBUTE_UNUSED, + enum gt_types_enum type ATTRIBUTE_UNUSED) { unsigned order; - if (size <= 256) + if (size < NUM_SIZE_LOOKUP) order = size_lookup[size]; else { - order = 9; + order = 10; while (size > OBJECT_SIZE (order)) order++; } @@ -2156,16 +2150,17 @@ ggc_pch_this_base (struct ggc_pch_data *d, void *base) char * ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, - size_t size, bool is_string ATTRIBUTE_UNUSED) + size_t size, bool is_string ATTRIBUTE_UNUSED, + enum gt_types_enum type ATTRIBUTE_UNUSED) { unsigned order; char *result; - if (size <= 256) + if (size < NUM_SIZE_LOOKUP) order = size_lookup[size]; else { - order = 9; + order = 10; while (size > OBJECT_SIZE (order)) order++; } @@ -2188,13 +2183,13 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, size_t size, bool is_string ATTRIBUTE_UNUSED) { unsigned order; - static const char emptyBytes[256]; + static const char emptyBytes[256] = { 0 }; - if (size <= 256) + if (size < NUM_SIZE_LOOKUP) order = size_lookup[size]; else { - order = 9; + order = 10; while (size > OBJECT_SIZE (order)) order++; } @@ -2212,8 +2207,7 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, /* To speed small writes, we use a nulled-out array that's larger than most padding requests as the source for our null bytes. This permits us to do the padding with fwrite() rather than fseek(), and - limits the chance the the OS may try to flush any outstanding - writes. */ + limits the chance the OS may try to flush any outstanding writes. */ if (padding <= sizeof(emptyBytes)) { if (fwrite (emptyBytes, 1, padding, f) != padding) @@ -2255,8 +2249,8 @@ move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) page_entry **new_by_depth; unsigned long **new_save_in_use; - new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *)); - new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); + new_by_depth = XNEWVEC (page_entry *, G.by_depth_max); + new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); memcpy (&new_by_depth[0], &G.by_depth[count_old_page_tables], @@ -2298,7 +2292,7 @@ ggc_pch_read (FILE *f, void *addr) { struct ggc_pch_ondisk d; unsigned i; - char *offs = addr; + char *offs = (char *) addr; unsigned long count_old_page_tables; unsigned long count_new_page_tables; @@ -2310,6 +2304,9 @@ ggc_pch_read (FILE *f, void *addr) #ifdef ENABLE_GC_CHECKING poison_pages (); #endif + /* Since we free all the allocated objects, the free list becomes + useless. Validate it now, which will also clear it. */ + validate_free_objects(); /* No object read from a PCH file should ever be freed. So, set the context depth to 1, and set the depth of all the currently-allocated @@ -2341,9 +2338,9 @@ ggc_pch_read (FILE *f, void *addr) bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize); num_objs = bytes / OBJECT_SIZE (i); - entry = xcalloc (1, (sizeof (struct page_entry) - - sizeof (long) - + BITMAP_SIZE (num_objs + 1))); + entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) + - sizeof (long) + + BITMAP_SIZE (num_objs + 1))); entry->bytes = bytes; entry->page = offs; entry->context_depth = 0;