X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fggc-page.c;h=578faf67ce3ed02d17f2325a7670052e1c5a5389;hb=d7997df08d801df20306d42f2c68cfe03d1acd26;hp=12b1f17b26583195521b9263ce04b07f4fb2c8cc;hpb=32bbbaacf25952ce6b923c5299335156e512030e;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c index 12b1f17b265..578faf67ce3 100644 --- a/gcc/ggc-page.c +++ b/gcc/ggc-page.c @@ -1,5 +1,6 @@ /* "Bag-of-pages" garbage collector for the GNU compiler. - Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. + Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 + Free Software Foundation, Inc. This file is part of GCC. @@ -30,6 +31,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "ggc.h" #include "timevar.h" #include "params.h" +#include "tree-flow.h" #ifdef ENABLE_VALGRIND_CHECKING # ifdef HAVE_VALGRIND_MEMCHECK_H # include @@ -183,11 +185,12 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA thing you need to do to add a new special allocation size. */ static const size_t extra_order_size_table[] = { + sizeof (struct stmt_ann_d), sizeof (struct tree_decl), sizeof (struct tree_list), TREE_EXP_SIZE (2), RTL_SIZE (2), /* MEM, PLUS, etc. */ - RTL_SIZE (9), /* INSN, CALL_INSN, JUMP_INSN */ + RTL_SIZE (9), /* INSN */ }; /* The total number of orders. */ @@ -246,6 +249,11 @@ typedef struct page_entry this is the last page-entry. */ struct page_entry *next; + /* The previous page-entry with objects of the same size, or NULL if + this is the first page-entry. The PREV pointer exists solely to + keep the cost of ggc_free manageable. */ + struct page_entry *prev; + /* The number of bytes allocated. (This will always be a multiple of the host system page size.) */ size_t bytes; @@ -402,12 +410,22 @@ static struct globals better runtime data access pattern. */ unsigned long **save_in_use; +#ifdef ENABLE_GC_ALWAYS_COLLECT + /* List of free objects to be verified as actually free on the + next collection. */ + struct free_object + { + void *object; + struct free_object *next; + } *free_object_list; +#endif + #ifdef GATHER_STATISTICS struct { - /* Total memory allocated with ggc_alloc */ + /* Total memory allocated with ggc_alloc. */ unsigned long long total_allocated; - /* Total overhead for memory to be allocated with ggc_alloc */ + /* Total overhead for memory to be allocated with ggc_alloc. */ unsigned long long total_overhead; /* Total allocations and overhead for sizes less than 32, 64 and 128. @@ -423,6 +441,9 @@ static struct globals unsigned long long total_allocated_under128; unsigned long long total_overhead_under128; + /* The allocations for each of the allocation orders. */ + unsigned long long total_allocated_per_order[NUM_ORDERS]; + /* The overhead for each of the allocation orders. */ unsigned long long total_overhead_per_order[NUM_ORDERS]; } stats; @@ -464,10 +485,6 @@ static void compute_inverse (unsigned); static inline void adjust_depth (void); static void move_ptes_to_front (int, int); -#ifdef ENABLE_GC_CHECKING -static void poison_pages (void); -#endif - void debug_print_page_list (int); static void push_depth (unsigned int); static void push_by_depth (page_entry *, unsigned long *); @@ -637,12 +654,12 @@ static inline char * alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) { #ifdef HAVE_MMAP_ANON - char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + char *page = mmap (pref, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #endif #ifdef HAVE_MMAP_DEV_ZERO - char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE, G.dev_zero_fd, 0); + char *page = mmap (pref, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE, G.dev_zero_fd, 0); #endif if (page == (char *) MAP_FAILED) @@ -892,7 +909,7 @@ adjust_depth (void) /* For a page that is no longer needed, put it on the free page list. */ -static inline void +static void free_page (page_entry *entry) { if (GGC_DEBUG_LEVEL >= 2) @@ -1029,34 +1046,39 @@ static unsigned char size_lookup[257] = /* Typed allocation function. Does nothing special in this collector. */ void * -ggc_alloc_typed (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size) +ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size + MEM_STAT_DECL) { - return ggc_alloc (size); + return ggc_alloc_stat (size PASS_MEM_STAT); } /* Zone allocation function. Does nothing special in this collector. */ void * -ggc_alloc_zone (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED) +ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED + MEM_STAT_DECL) { - return ggc_alloc (size); + return ggc_alloc_stat (size PASS_MEM_STAT); } /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ void * -ggc_alloc (size_t size) +ggc_alloc_stat (size_t size MEM_STAT_DECL) { - unsigned order, word, bit, object_offset; + size_t order, word, bit, object_offset, object_size; struct page_entry *entry; void *result; if (size <= 256) - order = size_lookup[size]; + { + order = size_lookup[size]; + object_size = OBJECT_SIZE (order); + } else { order = 9; - while (size > OBJECT_SIZE (order)) + while (size > (object_size = OBJECT_SIZE (order))) order++; } @@ -1079,12 +1101,18 @@ ggc_alloc (size_t size) while (new_entry->context_depth >= G.depth_in_use) push_depth (G.by_depth_in_use-1); - /* If this is the only entry, it's also the tail. */ + /* If this is the only entry, it's also the tail. If it is not + the only entry, then we must update the PREV pointer of the + ENTRY (G.pages[order]) to point to our new page entry. */ if (entry == NULL) G.page_tails[order] = new_entry; + else + entry->prev = new_entry; - /* Put new pages at the head of the page list. */ + /* Put new pages at the head of the page list. By definition the + entry at the head of the list always has a NULL pointer. */ new_entry->next = entry; + new_entry->prev = NULL; entry = new_entry; G.pages[order] = new_entry; @@ -1119,7 +1147,7 @@ ggc_alloc (size_t size) /* Next time, try the next bit. */ entry->next_bit_hint = hint + 1; - object_offset = hint * OBJECT_SIZE (order); + object_offset = hint * object_size; } /* Set the in-use bit. */ @@ -1133,30 +1161,43 @@ ggc_alloc (size_t size) && entry->next != NULL && entry->next->num_free_objects > 0) { + /* We have a new head for the list. */ G.pages[order] = entry->next; + + /* We are moving ENTRY to the end of the page table list. + The new page at the head of the list will have NULL in + its PREV field and ENTRY will have NULL in its NEXT field. */ + entry->next->prev = NULL; entry->next = NULL; + + /* Append ENTRY to the tail of the list. */ + entry->prev = G.page_tails[order]; G.page_tails[order]->next = entry; G.page_tails[order] = entry; } /* Calculate the object's address. */ result = entry->page + object_offset; +#ifdef GATHER_STATISTICS + ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size, + result PASS_MEM_STAT); +#endif #ifdef ENABLE_GC_CHECKING /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the exact same semantics in presence of memory bugs, regardless of ENABLE_VALGRIND_CHECKING. We override this request below. Drop the handle to avoid handle leak. */ - VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order))); + VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size)); /* `Poison' the entire allocated object, including any padding at the end. */ - memset (result, 0xaf, OBJECT_SIZE (order)); + memset (result, 0xaf, object_size); /* Make the bytes after the end of the object unaccessible. Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size, - OBJECT_SIZE (order) - size)); + object_size - size)); #endif /* Tell Valgrind that the memory is there, but its content isn't @@ -1166,36 +1207,39 @@ ggc_alloc (size_t size) /* Keep track of how many bytes are being allocated. This information is used in deciding when to collect. */ - G.allocated += OBJECT_SIZE (order); + G.allocated += object_size; #ifdef GATHER_STATISTICS { - G.stats.total_overhead += OBJECT_SIZE (order) - size; - G.stats.total_overhead_per_order[order] += OBJECT_SIZE (order) - size; - G.stats.total_allocated += OBJECT_SIZE(order); - - if (size <= 32){ - G.stats.total_overhead_under32 += OBJECT_SIZE (order) - size; - G.stats.total_allocated_under32 += OBJECT_SIZE(order); - } + size_t overhead = object_size - size; - if (size <= 64){ - G.stats.total_overhead_under64 += OBJECT_SIZE (order) - size; - G.stats.total_allocated_under64 += OBJECT_SIZE(order); - } - - if (size <= 128){ - G.stats.total_overhead_under128 += OBJECT_SIZE (order) - size; - G.stats.total_allocated_under128 += OBJECT_SIZE(order); - } + G.stats.total_overhead += overhead; + G.stats.total_allocated += object_size; + G.stats.total_overhead_per_order[order] += overhead; + G.stats.total_allocated_per_order[order] += object_size; + if (size <= 32) + { + G.stats.total_overhead_under32 += overhead; + G.stats.total_allocated_under32 += object_size; + } + if (size <= 64) + { + G.stats.total_overhead_under64 += overhead; + G.stats.total_allocated_under64 += object_size; + } + if (size <= 128) + { + G.stats.total_overhead_under128 += overhead; + G.stats.total_allocated_under128 += object_size; + } } #endif - + if (GGC_DEBUG_LEVEL >= 3) fprintf (G.debug_file, "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", - (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result, + (unsigned long) size, (unsigned long) object_size, result, (void *) entry); return result; @@ -1276,6 +1320,94 @@ ggc_get_size (const void *p) page_entry *pe = lookup_page_table_entry (p); return OBJECT_SIZE (pe->order); } + +/* Release the memory for object P. */ + +void +ggc_free (void *p) +{ + page_entry *pe = lookup_page_table_entry (p); + size_t order = pe->order; + size_t size = OBJECT_SIZE (order); + +#ifdef GATHER_STATISTICS + ggc_free_overhead (p); +#endif + + if (GGC_DEBUG_LEVEL >= 3) + fprintf (G.debug_file, + "Freeing object, actual size=%lu, at %p on %p\n", + (unsigned long) size, p, (void *) pe); + +#ifdef ENABLE_GC_CHECKING + /* Poison the data, to indicate the data is garbage. */ + VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size)); + memset (p, 0xa5, size); +#endif + /* Let valgrind know the object is free. */ + VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size)); + +#ifdef ENABLE_GC_ALWAYS_COLLECT + /* In the completely-anal-checking mode, we do *not* immediately free + the data, but instead verify that the data is *actually* not + reachable the next time we collect. */ + { + struct free_object *fo = xmalloc (sizeof (struct free_object)); + fo->object = p; + fo->next = G.free_object_list; + G.free_object_list = fo; + } +#else + { + unsigned int bit_offset, word, bit; + + G.allocated -= size; + + /* Mark the object not-in-use. */ + bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order); + word = bit_offset / HOST_BITS_PER_LONG; + bit = bit_offset % HOST_BITS_PER_LONG; + pe->in_use_p[word] &= ~(1UL << bit); + + if (pe->num_free_objects++ == 0) + { + page_entry *p, *q; + + /* If the page is completely full, then it's supposed to + be after all pages that aren't. Since we've freed one + object from a page that was full, we need to move the + page to the head of the list. + + PE is the node we want to move. Q is the previous node + and P is the next node in the list. */ + q = pe->prev; + if (q && q->num_free_objects == 0) + { + p = pe->next; + + q->next = p; + + /* If PE was at the end of the list, then Q becomes the + new end of the list. If PE was not the end of the + list, then we need to update the PREV field for P. */ + if (!p) + G.page_tails[order] = q; + else + p->prev = q; + + /* Move PE to the head of the list. */ + pe->next = G.pages[order]; + pe->prev = NULL; + G.pages[order]->prev = pe; + G.pages[order] = pe; + } + + /* Reset the hint bit to point to the only free object. */ + pe->next_bit_hint = bit_offset; + } + } +#endif +} /* Subroutine of init_ggc which computes the pair of numbers used to perform division by OBJECT_SIZE (order) and fills in inverse_table[]. @@ -1401,6 +1533,20 @@ init_ggc (void) G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); } +/* Start a new GGC zone. */ + +struct alloc_zone * +new_ggc_zone (const char *name ATTRIBUTE_UNUSED) +{ + return NULL; +} + +/* Destroy a GGC zone. */ +void +destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED) +{ +} + /* Increment the `GC context'. Objects allocated in an outer context are never freed, eliminating the need to register their roots. */ @@ -1550,7 +1696,7 @@ ggc_pop_context (void) /* Unmark all objects. */ -static inline void +static void clear_marks (void) { unsigned order; @@ -1595,7 +1741,7 @@ clear_marks (void) /* Free all empty pages. Partially empty pages need no attention because the `mark' bit doubles as an `unused' bit. */ -static inline void +static void sweep_pages (void) { unsigned order; @@ -1639,10 +1785,17 @@ sweep_pages (void) /* Remove the page if it's empty. */ else if (live_objects == 0) { + /* If P was the first page in the list, then NEXT + becomes the new first page in the list, otherwise + splice P out of the forward pointers. */ if (! previous) G.pages[order] = next; else previous->next = next; + + /* Splice P out of the back pointers too. */ + if (next) + next->prev = previous; /* Are we removing the last element? */ if (p == G.page_tails[order]) @@ -1659,6 +1812,7 @@ sweep_pages (void) { /* Move p to the end of the list. */ p->next = NULL; + p->prev = G.page_tails[order]; G.page_tails[order]->next = p; /* Update the tail pointer... */ @@ -1669,6 +1823,11 @@ sweep_pages (void) G.pages[order] = next; else previous->next = next; + + /* And update the backpointer in NEXT if necessary. */ + if (next) + next->prev = previous; + p = previous; } } @@ -1680,8 +1839,19 @@ sweep_pages (void) else if (p != G.pages[order]) { previous->next = p->next; + + /* Update the backchain in the next node if it exists. */ + if (p->next) + p->next->prev = previous; + + /* Move P to the head of the list. */ p->next = G.pages[order]; + p->prev = NULL; + G.pages[order]->prev = p; + + /* Update the head pointer. */ G.pages[order] = p; + /* Are we moving the last element? */ if (G.page_tails[order] == p) G.page_tails[order] = previous; @@ -1704,7 +1874,7 @@ sweep_pages (void) #ifdef ENABLE_GC_CHECKING /* Clobber all free objects. */ -static inline void +static void poison_pages (void) { unsigned order; @@ -1750,6 +1920,49 @@ poison_pages (void) } } } +#else +#define poison_pages() +#endif + +#ifdef ENABLE_GC_ALWAYS_COLLECT +/* Validate that the reportedly free objects actually are. */ + +static void +validate_free_objects (void) +{ + struct free_object *f, *next, *still_free = NULL; + + for (f = G.free_object_list; f ; f = next) + { + page_entry *pe = lookup_page_table_entry (f->object); + size_t bit, word; + + bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order); + word = bit / HOST_BITS_PER_LONG; + bit = bit % HOST_BITS_PER_LONG; + next = f->next; + + /* Make certain it isn't visible from any root. Notice that we + do this check before sweep_pages merges save_in_use_p. */ + if (pe->in_use_p[word] & (1UL << bit)) + abort (); + + /* If the object comes from an outer context, then retain the + free_object entry, so that we can verify that the address + isn't live on the stack in some outer context. */ + if (pe->context_depth != G.context_depth) + { + f->next = still_free; + still_free = f; + } + else + free (f); + } + + G.free_object_list = still_free; +} +#else +#define validate_free_objects() #endif /* Top level mark-and-sweep routine. */ @@ -1765,12 +1978,14 @@ ggc_collect (void) float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; - if (G.allocated < allocated_last_gc + min_expand) + if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect) return; timevar_push (TV_GC); if (!quiet_flag) fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024); + if (GGC_DEBUG_LEVEL >= 2) + fprintf (G.debug_file, "BEGIN COLLECTING\n"); /* Zero the total allocated bytes. This will be recalculated in the sweep phase. */ @@ -1785,11 +2000,11 @@ ggc_collect (void) clear_marks (); ggc_mark_roots (); - -#ifdef ENABLE_GC_CHECKING - poison_pages (); +#ifdef GATHER_STATISTICS + ggc_prune_overhead_list (); #endif - + poison_pages (); + validate_free_objects (); sweep_pages (); G.allocated_last_gc = G.allocated; @@ -1798,6 +2013,8 @@ ggc_collect (void) if (!quiet_flag) fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); + if (GGC_DEBUG_LEVEL >= 2) + fprintf (G.debug_file, "END COLLECTING\n"); } /* Print allocation statistics. */ @@ -1806,7 +2023,7 @@ ggc_collect (void) : ((x) < 1024*1024*10 \ ? (x) / 1024 \ : (x) / (1024*1024)))) -#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) +#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) void ggc_print_statistics (void) @@ -1830,6 +2047,8 @@ ggc_print_statistics (void) /* Collect some information about the various sizes of allocation. */ + fprintf (stderr, + "Memory still allocated at the end of the compilation process\n"); fprintf (stderr, "%-5s %10s %10s %10s\n", "Size", "Allocated", "Used", "Overhead"); for (i = 0; i < NUM_ORDERS; ++i) @@ -1859,18 +2078,20 @@ ggc_print_statistics (void) } fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n", (unsigned long) OBJECT_SIZE (i), - SCALE (allocated), LABEL (allocated), - SCALE (in_use), LABEL (in_use), - SCALE (overhead), LABEL (overhead)); + SCALE (allocated), STAT_LABEL (allocated), + SCALE (in_use), STAT_LABEL (in_use), + SCALE (overhead), STAT_LABEL (overhead)); total_overhead += overhead; } fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total", - SCALE (G.bytes_mapped), LABEL (G.bytes_mapped), - SCALE (G.allocated), LABEL(G.allocated), - SCALE (total_overhead), LABEL (total_overhead)); + SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped), + SCALE (G.allocated), STAT_LABEL(G.allocated), + SCALE (total_overhead), STAT_LABEL (total_overhead)); #ifdef GATHER_STATISTICS { + fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); + fprintf (stderr, "Total Overhead: %10lld\n", G.stats.total_overhead); fprintf (stderr, "Total Allocated: %10lld\n", @@ -1890,9 +2111,13 @@ ggc_print_statistics (void) G.stats.total_allocated_under128); for (i = 0; i < NUM_ORDERS; i++) - if (G.stats.total_overhead_per_order[i]) - fprintf (stderr, "Total Overhead page size %7d: %10lld\n", - OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]); + if (G.stats.total_allocated_per_order[i]) + { + fprintf (stderr, "Total Overhead page size %7d: %10lld\n", + OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]); + fprintf (stderr, "Total Allocated page size %7d: %10lld\n", + OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]); + } } #endif }