X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fggc-page.c;h=6b691eec759d8ed6fc3de0554eb9c0459f3c5a20;hb=fbbc6fdbe73dde1b85b992579ce30f6d90ea62e8;hp=362bfe19763ccf9f8716f643f8d0d9b87b8ec66d;hpb=bff490025616739605f60e4778b42d2a55afd858;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c index 362bfe19763..6b691eec759 100644 --- a/gcc/ggc-page.c +++ b/gcc/ggc-page.c @@ -1,12 +1,12 @@ /* "Bag-of-pages" garbage collector for the GNU compiler. - Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 + Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -15,9 +15,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" #include "system.h" @@ -75,7 +74,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #define USING_MALLOC_PAGE_GROUPS #endif -/* Stategy: +/* Strategy: This garbage-collecting allocator allocates objects on one of a set of pages. Each page can allocate objects of a single size only; @@ -186,8 +185,19 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA static const size_t extra_order_size_table[] = { sizeof (struct stmt_ann_d), - sizeof (struct tree_decl), + sizeof (struct var_ann_d), + sizeof (struct tree_decl_non_common), + sizeof (struct tree_field_decl), + sizeof (struct tree_parm_decl), + sizeof (struct tree_var_decl), sizeof (struct tree_list), + sizeof (struct tree_ssa_name), + sizeof (struct function), + sizeof (struct basic_block_def), + sizeof (bitmap_element), + /* PHI nodes with one to three arguments are already covered by the + above sizes. */ + sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3, TREE_EXP_SIZE (2), RTL_SIZE (2), /* MEM, PLUS, etc. */ RTL_SIZE (9), /* INSN */ @@ -495,9 +505,6 @@ static void move_ptes_to_front (int, int); void debug_print_page_list (int); static void push_depth (unsigned int); static void push_by_depth (page_entry *, unsigned long *); -struct alloc_zone *rtl_zone = NULL; -struct alloc_zone *tree_zone = NULL; -struct alloc_zone *garbage_zone = NULL; /* Push an entry onto G.depth. */ @@ -628,7 +635,7 @@ found: L2 = LOOKUP_L2 (p); if (base[L1] == NULL) - base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *)); + base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE); base[L1][L2] = entry; } @@ -1021,8 +1028,8 @@ release_pages (void) /* This table provides a fast way to determine ceil(log_2(size)) for allocation requests. The minimum allocation size is eight bytes. */ - -static unsigned char size_lookup[257] = +#define NUM_SIZE_LOOKUP 512 +static unsigned char size_lookup[NUM_SIZE_LOOKUP] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, @@ -1040,7 +1047,22 @@ static unsigned char size_lookup[257] = 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8 + 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; /* Typed allocation function. Does nothing special in this collector. */ @@ -1052,15 +1074,6 @@ ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size return ggc_alloc_stat (size PASS_MEM_STAT); } -/* Zone allocation function. Does nothing special in this collector. */ - -void * -ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED - MEM_STAT_DECL) -{ - return ggc_alloc_stat (size PASS_MEM_STAT); -} - /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ void * @@ -1070,14 +1083,14 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) struct page_entry *entry; void *result; - if (size <= 256) + if (size < NUM_SIZE_LOOKUP) { order = size_lookup[size]; object_size = OBJECT_SIZE (order); } else { - order = 9; + order = 10; while (size > (object_size = OBJECT_SIZE (order))) order++; } @@ -1215,6 +1228,9 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) information is used in deciding when to collect. */ G.allocated += object_size; + /* For timevar statistics. */ + timevar_ggc_mem_total += object_size; + #ifdef GATHER_STATISTICS { size_t overhead = object_size - size; @@ -1352,7 +1368,7 @@ ggc_free (void *p) the data, but instead verify that the data is *actually* not reachable the next time we collect. */ { - struct free_object *fo = xmalloc (sizeof (struct free_object)); + struct free_object *fo = XNEW (struct free_object); fo->object = p; fo->next = G.free_object_list; G.free_object_list = fo; @@ -1478,7 +1494,7 @@ init_ggc (void) } /* We have a good page, might as well hold onto it... */ - e = xcalloc (1, sizeof (struct page_entry)); + e = XCNEW (struct page_entry); e->bytes = G.pagesize; e->page = p; e->next = G.free_pages; @@ -1517,19 +1533,22 @@ init_ggc (void) int o; int i; - o = size_lookup[OBJECT_SIZE (order)]; - for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i) + i = OBJECT_SIZE (order); + if (i >= NUM_SIZE_LOOKUP) + continue; + + for (o = size_lookup[i]; o == size_lookup [i]; --i) size_lookup[i] = order; } G.depth_in_use = 0; G.depth_max = 10; - G.depth = xmalloc (G.depth_max * sizeof (unsigned int)); + G.depth = XNEWVEC (unsigned int, G.depth_max); G.by_depth_in_use = 0; G.by_depth_max = INITIAL_PTE_COUNT; - G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *)); - G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); + G.by_depth = XNEWVEC (page_entry *, G.by_depth_max); + G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); } /* Start a new GGC zone. */ @@ -1546,18 +1565,6 @@ destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED) { } -/* Increment the `GC context'. Objects allocated in an outer context - are never freed, eliminating the need to register their roots. */ - -void -ggc_push_context (void) -{ - ++G.context_depth; - - /* Die on wrap. */ - gcc_assert (G.context_depth < HOST_BITS_PER_LONG); -} - /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ @@ -1593,89 +1600,6 @@ ggc_recalculate_in_use_p (page_entry *p) gcc_assert (p->num_free_objects < num_objects); } - -/* Decrement the `GC context'. All objects allocated since the - previous ggc_push_context are migrated to the outer context. */ - -void -ggc_pop_context (void) -{ - unsigned long omask; - unsigned int depth, i, e; -#ifdef ENABLE_CHECKING - unsigned int order; -#endif - - depth = --G.context_depth; - omask = (unsigned long)1 << (depth + 1); - - if (!((G.context_depth_allocations | G.context_depth_collections) & omask)) - return; - - G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1; - G.context_depth_allocations &= omask - 1; - G.context_depth_collections &= omask - 1; - - /* The G.depth array is shortened so that the last index is the - context_depth of the top element of by_depth. */ - if (depth+1 < G.depth_in_use) - e = G.depth[depth+1]; - else - e = G.by_depth_in_use; - - /* We might not have any PTEs of depth depth. */ - if (depth < G.depth_in_use) - { - - /* First we go through all the pages at depth depth to - recalculate the in use bits. */ - for (i = G.depth[depth]; i < e; ++i) - { - page_entry *p = G.by_depth[i]; - - /* Check that all of the pages really are at the depth that - we expect. */ - gcc_assert (p->context_depth == depth); - gcc_assert (p->index_by_depth == i); - - prefetch (&save_in_use_p_i (i+8)); - prefetch (&save_in_use_p_i (i+16)); - if (save_in_use_p_i (i)) - { - p = G.by_depth[i]; - ggc_recalculate_in_use_p (p); - free (save_in_use_p_i (i)); - save_in_use_p_i (i) = 0; - } - } - } - - /* Then, we reset all page_entries with a depth greater than depth - to be at depth. */ - for (i = e; i < G.by_depth_in_use; ++i) - { - page_entry *p = G.by_depth[i]; - - /* Check that all of the pages really are at the depth we - expect. */ - gcc_assert (p->context_depth > depth); - gcc_assert (p->index_by_depth == i); - p->context_depth = depth; - } - - adjust_depth (); - -#ifdef ENABLE_CHECKING - for (order = 2; order < NUM_ORDERS; order++) - { - page_entry *p; - - for (p = G.pages[order]; p != NULL; p = p->next) - gcc_assert (p->context_depth < depth || - (p->context_depth == depth && !save_in_use_p (p))); - } -#endif -} /* Unmark all objects. */ @@ -2092,10 +2016,12 @@ ggc_print_statistics (void) for (i = 0; i < NUM_ORDERS; i++) if (G.stats.total_allocated_per_order[i]) { - fprintf (stderr, "Total Overhead page size %7d: %10lld\n", - OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]); - fprintf (stderr, "Total Allocated page size %7d: %10lld\n", - OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]); + fprintf (stderr, "Total Overhead page size %7lu: %10lld\n", + (unsigned long) OBJECT_SIZE (i), + G.stats.total_overhead_per_order[i]); + fprintf (stderr, "Total Allocated page size %7lu: %10lld\n", + (unsigned long) OBJECT_SIZE (i), + G.stats.total_allocated_per_order[i]); } } #endif @@ -2114,20 +2040,21 @@ struct ggc_pch_data struct ggc_pch_data * init_ggc_pch (void) { - return xcalloc (sizeof (struct ggc_pch_data), 1); + return XCNEW (struct ggc_pch_data); } void ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, - size_t size, bool is_string ATTRIBUTE_UNUSED) + size_t size, bool is_string ATTRIBUTE_UNUSED, + enum gt_types_enum type ATTRIBUTE_UNUSED) { unsigned order; - if (size <= 256) + if (size < NUM_SIZE_LOOKUP) order = size_lookup[size]; else { - order = 9; + order = 10; while (size > OBJECT_SIZE (order)) order++; } @@ -2162,16 +2089,17 @@ ggc_pch_this_base (struct ggc_pch_data *d, void *base) char * ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, - size_t size, bool is_string ATTRIBUTE_UNUSED) + size_t size, bool is_string ATTRIBUTE_UNUSED, + enum gt_types_enum type ATTRIBUTE_UNUSED) { unsigned order; char *result; - if (size <= 256) + if (size < NUM_SIZE_LOOKUP) order = size_lookup[size]; else { - order = 9; + order = 10; while (size > OBJECT_SIZE (order)) order++; } @@ -2196,11 +2124,11 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, unsigned order; static const char emptyBytes[256]; - if (size <= 256) + if (size < NUM_SIZE_LOOKUP) order = size_lookup[size]; else { - order = 9; + order = 10; while (size > OBJECT_SIZE (order)) order++; } @@ -2218,8 +2146,7 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, /* To speed small writes, we use a nulled-out array that's larger than most padding requests as the source for our null bytes. This permits us to do the padding with fwrite() rather than fseek(), and - limits the chance the the OS may try to flush any outstanding - writes. */ + limits the chance the OS may try to flush any outstanding writes. */ if (padding <= sizeof(emptyBytes)) { if (fwrite (emptyBytes, 1, padding, f) != padding) @@ -2261,8 +2188,8 @@ move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) page_entry **new_by_depth; unsigned long **new_save_in_use; - new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *)); - new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); + new_by_depth = XNEWVEC (page_entry *, G.by_depth_max); + new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); memcpy (&new_by_depth[0], &G.by_depth[count_old_page_tables], @@ -2316,6 +2243,9 @@ ggc_pch_read (FILE *f, void *addr) #ifdef ENABLE_GC_CHECKING poison_pages (); #endif + /* Since we free all the allocated objects, the free list becomes + useless. Validate it now, which will also clear it. */ + validate_free_objects(); /* No object read from a PCH file should ever be freed. So, set the context depth to 1, and set the depth of all the currently-allocated