X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fggc-page.c;h=807bded61fcf0d97f1686dc4992b849075ea35ff;hb=9c5b6e15021ce28bae0ea7a2afed31fdea871af0;hp=ea637b1b63c2869f4eed2362cebe3d4d639e9249;hpb=a7779e7501b3b5de7cdc621f737f72098c2d620c;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c index ea637b1b63c..807bded61fc 100644 --- a/gcc/ggc-page.c +++ b/gcc/ggc-page.c @@ -1,5 +1,5 @@ /* "Bag-of-pages" garbage collector for the GNU compiler. - Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007 + Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc. This file is part of GCC. @@ -31,6 +31,8 @@ along with GCC; see the file COPYING3. If not see #include "timevar.h" #include "params.h" #include "tree-flow.h" +#include "cfgloop.h" +#include "plugin.h" /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a file open. Prefer either to valloc. */ @@ -156,6 +158,24 @@ along with GCC; see the file COPYING3. If not see #define OFFSET_TO_BIT(OFFSET, ORDER) \ (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) +/* We use this structure to determine the alignment required for + allocations. For power-of-two sized allocations, that's not a + problem, but it does matter for odd-sized allocations. + We do not care about alignment for floating-point types. */ + +struct max_alignment { + char c; + union { + HOST_WIDEST_INT i; + void *p; + } u; +}; + +/* The biggest alignment required. */ + +#define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) + + /* The number of extra orders, not corresponding to power-of-two sized objects. */ @@ -172,46 +192,35 @@ along with GCC; see the file COPYING3. If not see thing you need to do to add a new special allocation size. */ static const size_t extra_order_size_table[] = { - sizeof (struct stmt_ann_d), - sizeof (struct var_ann_d), + /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT. + There are a lot of structures with these sizes and explicitly + listing them risks orders being dropped because they changed size. */ + MAX_ALIGNMENT * 3, + MAX_ALIGNMENT * 5, + MAX_ALIGNMENT * 6, + MAX_ALIGNMENT * 7, + MAX_ALIGNMENT * 9, + MAX_ALIGNMENT * 10, + MAX_ALIGNMENT * 11, + MAX_ALIGNMENT * 12, + MAX_ALIGNMENT * 13, + MAX_ALIGNMENT * 14, + MAX_ALIGNMENT * 15, sizeof (struct tree_decl_non_common), sizeof (struct tree_field_decl), sizeof (struct tree_parm_decl), sizeof (struct tree_var_decl), - sizeof (struct tree_list), - sizeof (struct tree_ssa_name), + sizeof (struct tree_type), sizeof (struct function), sizeof (struct basic_block_def), - sizeof (bitmap_element), - sizeof (bitmap_head), - /* PHI nodes with one to three arguments are already covered by the - above sizes. */ - sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3, - TREE_EXP_SIZE (2), - RTL_SIZE (2), /* MEM, PLUS, etc. */ - RTL_SIZE (9), /* INSN */ + sizeof (struct cgraph_node), + sizeof (struct loop), }; /* The total number of orders. */ #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) -/* We use this structure to determine the alignment required for - allocations. For power-of-two sized allocations, that's not a - problem, but it does matter for odd-sized allocations. */ - -struct max_alignment { - char c; - union { - HOST_WIDEST_INT i; - long double d; - } u; -}; - -/* The biggest alignment required. */ - -#define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) - /* Compute the smallest nonnegative number which when added to X gives a multiple of F. */ @@ -326,6 +335,16 @@ typedef struct page_table_chain #endif +#ifdef ENABLE_GC_ALWAYS_COLLECT +/* List of free objects to be verified as actually free on the + next collection. */ +struct free_object +{ + void *object; + struct free_object *next; +}; +#endif + /* The rest of the global variables. */ static struct globals { @@ -386,7 +405,7 @@ static struct globals /* Maximum number of elements that can be used before resizing. */ unsigned int depth_max; - /* Each element of this arry is an index in by_depth where the given + /* Each element of this array is an index in by_depth where the given depth starts. This structure is indexed by that given depth we are interested in. */ unsigned int *depth; @@ -412,11 +431,7 @@ static struct globals #ifdef ENABLE_GC_ALWAYS_COLLECT /* List of free objects to be verified as actually free on the next collection. */ - struct free_object - { - void *object; - struct free_object *next; - } *free_object_list; + struct free_object *free_object_list; #endif #ifdef GATHER_STATISTICS @@ -430,16 +445,16 @@ static struct globals /* Total allocations and overhead for sizes less than 32, 64 and 128. These sizes are interesting because they are typical cache line sizes. */ - + unsigned long long total_allocated_under32; unsigned long long total_overhead_under32; - + unsigned long long total_allocated_under64; unsigned long long total_overhead_under64; - + unsigned long long total_allocated_under128; unsigned long long total_overhead_under128; - + /* The allocations for each of the allocation orders. */ unsigned long long total_allocated_per_order[NUM_ORDERS]; @@ -503,7 +518,7 @@ push_depth (unsigned int i) if (G.depth_in_use >= G.depth_max) { G.depth_max *= 2; - G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int)); + G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max); } G.depth[G.depth_in_use++] = i; } @@ -516,10 +531,9 @@ push_by_depth (page_entry *p, unsigned long *s) if (G.by_depth_in_use >= G.by_depth_max) { G.by_depth_max *= 2; - G.by_depth = xrealloc (G.by_depth, - G.by_depth_max * sizeof (page_entry *)); - G.save_in_use = xrealloc (G.save_in_use, - G.by_depth_max * sizeof (unsigned long *)); + G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max); + G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use, + G.by_depth_max); } G.by_depth[G.by_depth_in_use] = p; G.save_in_use[G.by_depth_in_use++] = s; @@ -611,7 +625,7 @@ set_page_table_entry (void *p, page_entry *entry) goto found; /* Not found -- allocate a new table. */ - table = xcalloc (1, sizeof(*table)); + table = XCNEW (struct page_table_chain); table->next = G.lookup; table->high_bits = high_bits; G.lookup = table; @@ -657,12 +671,12 @@ static inline char * alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) { #ifdef HAVE_MMAP_ANON - char *page = mmap (pref, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #endif #ifdef HAVE_MMAP_DEV_ZERO - char *page = mmap (pref, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE, G.dev_zero_fd, 0); + char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE, G.dev_zero_fd, 0); #endif if (page == (char *) MAP_FAILED) @@ -772,7 +786,7 @@ alloc_page (unsigned order) memory order. */ for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) { - e = xcalloc (1, page_entry_size); + e = XCNEWVAR (struct page_entry, page_entry_size); e->order = order; e->bytes = G.pagesize; e->page = page + (i << G.lg_pagesize); @@ -800,7 +814,7 @@ alloc_page (unsigned order) alloc_size = GGC_QUIRE_SIZE * G.pagesize; else alloc_size = entry_size + G.pagesize - 1; - allocation = xmalloc (alloc_size); + allocation = XNEWVEC (char, alloc_size); page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize); head_slop = page - allocation; @@ -843,7 +857,7 @@ alloc_page (unsigned order) struct page_entry *e, *f = G.free_pages; for (a = enda - G.pagesize; a != page; a -= G.pagesize) { - e = xcalloc (1, page_entry_size); + e = XCNEWVAR (struct page_entry, page_entry_size); e->order = order; e->bytes = G.pagesize; e->page = a; @@ -857,7 +871,7 @@ alloc_page (unsigned order) #endif if (entry == NULL) - entry = xcalloc (1, page_entry_size); + entry = XCNEWVAR (struct page_entry, page_entry_size); entry->bytes = entry_size; entry->page = page; @@ -937,7 +951,7 @@ free_page (page_entry *entry) /* We cannot free a page from a context deeper than the current one. */ gcc_assert (entry->context_depth == top->context_depth); - + /* Put top element into freed slot. */ G.by_depth[i] = top; G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; @@ -1256,6 +1270,57 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL) return result; } +/* Mark function for strings. */ + +void +gt_ggc_m_S (const void *p) +{ + page_entry *entry; + unsigned bit, word; + unsigned long mask; + unsigned long offset; + + if (!p || !ggc_allocated_p (p)) + return; + + /* Look up the page on which the object is alloced. . */ + entry = lookup_page_table_entry (p); + gcc_assert (entry); + + /* Calculate the index of the object on the page; this is its bit + position in the in_use_p bitmap. Note that because a char* might + point to the middle of an object, we need special code here to + make sure P points to the start of an object. */ + offset = ((const char *) p - entry->page) % object_size_table[entry->order]; + if (offset) + { + /* Here we've seen a char* which does not point to the beginning + of an allocated object. We assume it points to the middle of + a STRING_CST. */ + gcc_assert (offset == offsetof (struct tree_string, str)); + p = ((const char *) p) - offset; + gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p)); + return; + } + + bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); + word = bit / HOST_BITS_PER_LONG; + mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); + + /* If the bit was previously set, skip it. */ + if (entry->in_use_p[word] & mask) + return; + + /* Otherwise set it, and decrement the free object count. */ + entry->in_use_p[word] |= mask; + entry->num_free_objects -= 1; + + if (GGC_DEBUG_LEVEL >= 4) + fprintf (G.debug_file, "Marking %p\n", p); + + return; +} + /* If P is not marked, marks it and return false. Otherwise return true. P must have been allocated by the GC allocator; it mustn't point to static objects, stack variables, or memory allocated with malloc. */ @@ -1354,7 +1419,7 @@ ggc_free (void *p) #ifdef ENABLE_GC_ALWAYS_COLLECT /* In the completely-anal-checking mode, we do *not* immediately free - the data, but instead verify that the data is *actually* not + the data, but instead verify that the data is *actually* not reachable the next time we collect. */ { struct free_object *fo = XNEW (struct free_object); @@ -1381,7 +1446,7 @@ ggc_free (void *p) /* If the page is completely full, then it's supposed to be after all pages that aren't. Since we've freed one object from a page that was full, we need to move the - page to the head of the list. + page to the head of the list. PE is the node we want to move. Q is the previous node and P is the next node in the list. */ @@ -1425,7 +1490,7 @@ ggc_free (void *p) static void compute_inverse (unsigned order) { - size_t size, inv; + size_t size, inv; unsigned int e; size = OBJECT_SIZE (order); @@ -1615,7 +1680,7 @@ clear_marks (void) if (p->context_depth < G.context_depth) { if (! save_in_use_p (p)) - save_in_use_p (p) = xmalloc (bitmap_size); + save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size); memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); } @@ -1685,7 +1750,7 @@ sweep_pages (void) G.pages[order] = next; else previous->next = next; - + /* Splice P out of the back pointers too. */ if (next) next->prev = previous; @@ -1891,6 +1956,8 @@ ggc_collect (void) /* Indicate that we've seen collections at this context depth. */ G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; + invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); + clear_marks (); ggc_mark_roots (); #ifdef GATHER_STATISTICS @@ -1902,6 +1969,8 @@ ggc_collect (void) G.allocated_last_gc = G.allocated; + invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); + timevar_pop (TV_GC); if (!quiet_flag) @@ -1981,7 +2050,7 @@ ggc_print_statistics (void) SCALE (G.allocated), STAT_LABEL(G.allocated), SCALE (total_overhead), STAT_LABEL (total_overhead)); -#ifdef GATHER_STATISTICS +#ifdef GATHER_STATISTICS { fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); @@ -2002,7 +2071,7 @@ ggc_print_statistics (void) G.stats.total_overhead_under128); fprintf (stderr, "Total Allocated under 128B: %10lld\n", G.stats.total_allocated_under128); - + for (i = 0; i < NUM_ORDERS; i++) if (G.stats.total_allocated_per_order[i]) { @@ -2017,12 +2086,14 @@ ggc_print_statistics (void) #endif } +struct ggc_pch_ondisk +{ + unsigned totals[NUM_ORDERS]; +}; + struct ggc_pch_data { - struct ggc_pch_ondisk - { - unsigned totals[NUM_ORDERS]; - } d; + struct ggc_pch_ondisk d; size_t base[NUM_ORDERS]; size_t written[NUM_ORDERS]; }; @@ -2112,7 +2183,7 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, size_t size, bool is_string ATTRIBUTE_UNUSED) { unsigned order; - static const char emptyBytes[256]; + static const char emptyBytes[256] = { 0 }; if (size < NUM_SIZE_LOOKUP) order = size_lookup[size]; @@ -2221,7 +2292,7 @@ ggc_pch_read (FILE *f, void *addr) { struct ggc_pch_ondisk d; unsigned i; - char *offs = addr; + char *offs = (char *) addr; unsigned long count_old_page_tables; unsigned long count_new_page_tables; @@ -2267,9 +2338,9 @@ ggc_pch_read (FILE *f, void *addr) bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize); num_objs = bytes / OBJECT_SIZE (i); - entry = xcalloc (1, (sizeof (struct page_entry) - - sizeof (long) - + BITMAP_SIZE (num_objs + 1))); + entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) + - sizeof (long) + + BITMAP_SIZE (num_objs + 1))); entry->bytes = bytes; entry->page = offs; entry->context_depth = 0;