/* "Bag-of-pages" garbage collector for the GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "timevar.h"
#include "params.h"
#include "tree-flow.h"
-#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_VALGRIND_MEMCHECK_H
-# include <valgrind/memcheck.h>
-# elif defined HAVE_MEMCHECK_H
-# include <memcheck.h>
-# else
-# include <valgrind.h>
-# endif
-#else
-/* Avoid #ifdef:s when we can help it. */
-#define VALGRIND_DISCARD(x)
-#endif
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
sizeof (struct tree_var_decl),
sizeof (struct tree_list),
sizeof (struct tree_ssa_name),
- sizeof (struct tree_function_decl),
- sizeof (struct tree_binfo),
sizeof (struct function),
sizeof (struct basic_block_def),
sizeof (bitmap_element),
+ sizeof (bitmap_head),
/* PHI nodes with one to three arguments are already covered by the
above sizes. */
sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3,
/* Pretend we don't have access to the allocated pages. We'll enable
access to smaller pieces of the area in ggc_alloc. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
return page;
}
/* Mark the page as inaccessible. Discard the handle to avoid handle
leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
set_page_table_entry (entry->page, NULL);
/* This table provides a fast way to determine ceil(log_2(size)) for
allocation requests. The minimum allocation size is eight bytes. */
-
-static unsigned char size_lookup[512] =
+#define NUM_SIZE_LOOKUP 512
+static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
{
3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
struct page_entry *entry;
void *result;
- if (size < 512)
+ if (size < NUM_SIZE_LOOKUP)
{
order = size_lookup[size];
object_size = OBJECT_SIZE (order);
exact same semantics in presence of memory bugs, regardless of
ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
/* `Poison' the entire allocated object, including any padding at
the end. */
/* Make the bytes after the end of the object unaccessible. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
- object_size - size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
+ object_size - size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
#ifdef ENABLE_GC_CHECKING
/* Poison the data, to indicate the data is garbage. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
memset (p, 0xa5, size);
#endif
/* Let valgrind know the object is free. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* In the completely-anal-checking mode, we do *not* immediately free
for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
{
size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
+
+ /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
+ so that we're sure of getting aligned memory. */
+ s = ROUND_UP (s, MAX_ALIGNMENT);
object_size_table[order] = s;
}
/* Reset the size_lookup array to put appropriately sized objects in
the special orders. All objects bigger than the previous power
of two, but no greater than the special size, should go in the
- new order.
- Enforce alignment during lookup. The resulting bin size must
- have the same or bigger alignment than the apparent alignment
- requirement from the size request (but not bigger alignment
- than MAX_ALIGNMENT). Consider an extra bin of size 76 (in
- addition to the 64 and 128 byte sized bins). A request of
- allocation size of 72 bytes must be served from the 128 bytes
- bin, because 72 bytes looks like a request for 8 byte aligned
- memory, while the 76 byte bin can only serve chunks with a
- guaranteed alignment of 4 bytes. */
+ new order. */
for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
{
- int i, mask;
-
- /* Build an alignment mask that can be used for testing
- size % 2*align. If (size | MAX_ALIGNMENT) & mask is non-zero
- then the requested size apparent alignment requirement
- (which is at most MAX_ALIGNMENT) is less or equal than what
- the OBJECT_SIZE bin can guarantee. */
- mask = ~(((unsigned)-1) << ffs (OBJECT_SIZE (order)));
- mask &= 2 * MAX_ALIGNMENT - 1;
-
- /* All objects smaller than the OBJECT_SIZE for this ORDER could go
- into ORDER. Determine the cases for which that is profitable
- and fulfilling the alignment requirements. Stop searching
- once a smaller bin with same or better alignment guarantee is
- found. */
- for (i = OBJECT_SIZE (order); ; --i)
- {
- unsigned int old_sz = OBJECT_SIZE (size_lookup [i]);
- if (!(old_sz & (mask >> 1))
- && old_sz < OBJECT_SIZE (order))
- break;
-
- /* If object of size I are presently using a larger bin, we would
- like to move them to ORDER. However, we can only do that if we
- can be sure they will be properly aligned. They will be properly
- aligned if either the ORDER bin is maximally aligned, or if
- objects of size I cannot be more strictly aligned than the
- alignment of this order. */
- if ((i | MAX_ALIGNMENT) & mask
- && old_sz > OBJECT_SIZE (order))
- size_lookup[i] = order;
- }
- }
+ int o;
+ int i;
- /* Verify we got everything right with respect to alignment requests. */
- for (order = 1; order < 512; ++order)
- gcc_assert (ffs (OBJECT_SIZE (size_lookup [order]))
- >= ffs (order | MAX_ALIGNMENT));
+ i = OBJECT_SIZE (order);
+ if (i >= NUM_SIZE_LOOKUP)
+ continue;
+
+ for (o = size_lookup[i]; o == size_lookup [i]; --i)
+ size_lookup[i] = order;
+ }
G.depth_in_use = 0;
G.depth_max = 10;
so the exact same memory semantics is kept, in case
there are memory errors. We override this request
below. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
+ size));
memset (object, 0xa5, size);
/* Drop the handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
}
}
}
for (i = 0; i < NUM_ORDERS; i++)
if (G.stats.total_allocated_per_order[i])
{
- fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
- OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
- fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
- OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
+ fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
+ (unsigned long) OBJECT_SIZE (i),
+ G.stats.total_overhead_per_order[i]);
+ fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
+ (unsigned long) OBJECT_SIZE (i),
+ G.stats.total_allocated_per_order[i]);
}
}
#endif
{
unsigned order;
- if (size < 512)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
unsigned order;
char *result;
- if (size < 512)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
unsigned order;
static const char emptyBytes[256];
- if (size < 512)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
#ifdef ENABLE_GC_CHECKING
poison_pages ();
#endif
+ /* Since we free all the allocated objects, the free list becomes
+ useless. Validate it now, which will also clear it. */
+ validate_free_objects();
/* No object read from a PCH file should ever be freed. So, set the
context depth to 1, and set the depth of all the currently-allocated