X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fggc-page.c;h=9e6800d4b2ad1c3b24166f665849af33a92697cc;hb=294a1cea6a8c82518c010f3dd9271d311db3968e;hp=e0dfb1610d404fc26d734486d7623b16daaab0b9;hpb=dac49aa5c53ad15cdab5f898160dbe72708c98d3;p=pf3gnuchains%2Fgcc-fork.git
diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index e0dfb1610d4..9e6800d4b2a 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -1,12 +1,12 @@
/* "Bag-of-pages" garbage collector for the GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
- Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
+ 2010, 2011 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
@@ -15,9 +15,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+along with GCC; see the file COPYING3. If not see
+. */
#include "config.h"
#include "system.h"
@@ -26,56 +25,37 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "tree.h"
#include "rtl.h"
#include "tm_p.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "flags.h"
#include "ggc.h"
+#include "ggc-internal.h"
#include "timevar.h"
#include "params.h"
#include "tree-flow.h"
-#ifdef ENABLE_VALGRIND_CHECKING
-# ifdef HAVE_VALGRIND_MEMCHECK_H
-# include
-# elif defined HAVE_MEMCHECK_H
-# include
-# else
-# include
-# endif
-#else
-/* Avoid #ifdef:s when we can help it. */
-#define VALGRIND_DISCARD(x)
-#endif
+#include "cfgloop.h"
+#include "plugin.h"
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
#ifdef HAVE_MMAP_ANON
# undef HAVE_MMAP_DEV_ZERO
-
-# include
-# ifndef MAP_FAILED
-# define MAP_FAILED -1
-# endif
-# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
-# define MAP_ANONYMOUS MAP_ANON
-# endif
# define USING_MMAP
-
#endif
#ifdef HAVE_MMAP_DEV_ZERO
-
-# include
-# ifndef MAP_FAILED
-# define MAP_FAILED -1
-# endif
# define USING_MMAP
-
#endif
#ifndef USING_MMAP
#define USING_MALLOC_PAGE_GROUPS
#endif
-/* Stategy:
+#if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
+ && defined(USING_MMAP)
+# define USING_MADVISE
+#endif
+
+/* Strategy:
This garbage-collecting allocator allocates objects on one of a set
of pages. Each page can allocate objects of a single size only;
@@ -169,6 +149,24 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#define OFFSET_TO_BIT(OFFSET, ORDER) \
(((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
+/* We use this structure to determine the alignment required for
+ allocations. For power-of-two sized allocations, that's not a
+ problem, but it does matter for odd-sized allocations.
+ We do not care about alignment for floating-point types. */
+
+struct max_alignment {
+ char c;
+ union {
+ HOST_WIDEST_INT i;
+ void *p;
+ } u;
+};
+
+/* The biggest alignment required. */
+
+#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
+
+
/* The number of extra orders, not corresponding to power-of-two sized
objects. */
@@ -185,34 +183,35 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
thing you need to do to add a new special allocation size. */
static const size_t extra_order_size_table[] = {
- sizeof (struct stmt_ann_d),
- sizeof (struct tree_decl),
- sizeof (struct tree_list),
- TREE_EXP_SIZE (2),
- RTL_SIZE (2), /* MEM, PLUS, etc. */
- RTL_SIZE (9), /* INSN */
+ /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
+ There are a lot of structures with these sizes and explicitly
+ listing them risks orders being dropped because they changed size. */
+ MAX_ALIGNMENT * 3,
+ MAX_ALIGNMENT * 5,
+ MAX_ALIGNMENT * 6,
+ MAX_ALIGNMENT * 7,
+ MAX_ALIGNMENT * 9,
+ MAX_ALIGNMENT * 10,
+ MAX_ALIGNMENT * 11,
+ MAX_ALIGNMENT * 12,
+ MAX_ALIGNMENT * 13,
+ MAX_ALIGNMENT * 14,
+ MAX_ALIGNMENT * 15,
+ sizeof (struct tree_decl_non_common),
+ sizeof (struct tree_field_decl),
+ sizeof (struct tree_parm_decl),
+ sizeof (struct tree_var_decl),
+ sizeof (struct tree_type_non_common),
+ sizeof (struct function),
+ sizeof (struct basic_block_def),
+ sizeof (struct cgraph_node),
+ sizeof (struct loop),
};
/* The total number of orders. */
#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
-/* We use this structure to determine the alignment required for
- allocations. For power-of-two sized allocations, that's not a
- problem, but it does matter for odd-sized allocations. */
-
-struct max_alignment {
- char c;
- union {
- HOST_WIDEST_INT i;
- long double d;
- } u;
-};
-
-/* The biggest alignment required. */
-
-#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
-
/* Compute the smallest nonnegative number which when added to X gives
a multiple of F. */
@@ -222,6 +221,10 @@ struct max_alignment {
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
+/* Round X to next multiple of the page size */
+
+#define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
+
/* The Ith entry is the number of objects on a page or order I. */
static unsigned objects_per_page_table[NUM_ORDERS];
@@ -283,6 +286,9 @@ typedef struct page_entry
/* The lg of size of objects allocated from this page. */
unsigned char order;
+ /* Discarded page? */
+ bool discarded;
+
/* A bit vector indicating whether or not objects are in use. The
Nth bit is one if the Nth object on this page is allocated. This
array is dynamically sized. */
@@ -327,6 +333,16 @@ typedef struct page_table_chain
#endif
+#ifdef ENABLE_GC_ALWAYS_COLLECT
+/* List of free objects to be verified as actually free on the
+ next collection. */
+struct free_object
+{
+ void *object;
+ struct free_object *next;
+};
+#endif
+
/* The rest of the global variables. */
static struct globals
{
@@ -387,7 +403,7 @@ static struct globals
/* Maximum number of elements that can be used before resizing. */
unsigned int depth_max;
- /* Each element of this arry is an index in by_depth where the given
+ /* Each element of this array is an index in by_depth where the given
depth starts. This structure is indexed by that given depth we
are interested in. */
unsigned int *depth;
@@ -413,34 +429,30 @@ static struct globals
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* List of free objects to be verified as actually free on the
next collection. */
- struct free_object
- {
- void *object;
- struct free_object *next;
- } *free_object_list;
+ struct free_object *free_object_list;
#endif
#ifdef GATHER_STATISTICS
struct
{
- /* Total memory allocated with ggc_alloc. */
+ /* Total GC-allocated memory. */
unsigned long long total_allocated;
- /* Total overhead for memory to be allocated with ggc_alloc. */
+ /* Total overhead for GC-allocated memory. */
unsigned long long total_overhead;
/* Total allocations and overhead for sizes less than 32, 64 and 128.
These sizes are interesting because they are typical cache line
sizes. */
-
+
unsigned long long total_allocated_under32;
unsigned long long total_overhead_under32;
-
+
unsigned long long total_allocated_under64;
unsigned long long total_overhead_under64;
-
+
unsigned long long total_allocated_under128;
unsigned long long total_overhead_under128;
-
+
/* The allocations for each of the allocation orders. */
unsigned long long total_allocated_per_order[NUM_ORDERS];
@@ -462,7 +474,7 @@ static struct globals
can override this by defining GGC_QUIRE_SIZE explicitly. */
#ifndef GGC_QUIRE_SIZE
# ifdef USING_MMAP
-# define GGC_QUIRE_SIZE 256
+# define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
# else
# define GGC_QUIRE_SIZE 16
# endif
@@ -475,7 +487,7 @@ static int ggc_allocated_p (const void *);
static page_entry *lookup_page_table_entry (const void *);
static void set_page_table_entry (void *, page_entry *);
#ifdef USING_MMAP
-static char *alloc_anon (char *, size_t);
+static char *alloc_anon (char *, size_t, bool check);
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
static size_t page_group_index (char *, char *);
@@ -495,9 +507,6 @@ static void move_ptes_to_front (int, int);
void debug_print_page_list (int);
static void push_depth (unsigned int);
static void push_by_depth (page_entry *, unsigned long *);
-struct alloc_zone *rtl_zone = NULL;
-struct alloc_zone *tree_zone = NULL;
-struct alloc_zone *garbage_zone = NULL;
/* Push an entry onto G.depth. */
@@ -507,7 +516,7 @@ push_depth (unsigned int i)
if (G.depth_in_use >= G.depth_max)
{
G.depth_max *= 2;
- G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
+ G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
}
G.depth[G.depth_in_use++] = i;
}
@@ -520,10 +529,9 @@ push_by_depth (page_entry *p, unsigned long *s)
if (G.by_depth_in_use >= G.by_depth_max)
{
G.by_depth_max *= 2;
- G.by_depth = xrealloc (G.by_depth,
- G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = xrealloc (G.save_in_use,
- G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
+ G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
+ G.by_depth_max);
}
G.by_depth[G.by_depth_in_use] = p;
G.save_in_use[G.by_depth_in_use++] = s;
@@ -615,7 +623,7 @@ set_page_table_entry (void *p, page_entry *entry)
goto found;
/* Not found -- allocate a new table. */
- table = xcalloc (1, sizeof(*table));
+ table = XCNEW (struct page_table_chain);
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
@@ -628,14 +636,14 @@ found:
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
- base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
base[L1][L2] = entry;
}
/* Prints the page-entry for object size ORDER, for debugging. */
-void
+DEBUG_FUNCTION void
debug_print_page_list (int order)
{
page_entry *p;
@@ -658,19 +666,21 @@ debug_print_page_list (int order)
compile error unless exactly one of the HAVE_* is defined. */
static inline char *
-alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
+alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
{
#ifdef HAVE_MMAP_ANON
- char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#endif
#ifdef HAVE_MMAP_DEV_ZERO
- char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, G.dev_zero_fd, 0);
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, G.dev_zero_fd, 0);
#endif
if (page == (char *) MAP_FAILED)
{
+ if (!check)
+ return NULL;
perror ("virtual memory exhausted");
exit (FATAL_EXIT_CODE);
}
@@ -679,9 +689,9 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
G.bytes_mapped += size;
/* Pretend we don't have access to the allocated pages. We'll enable
- access to smaller pieces of the area in ggc_alloc. Discard the
+ access to smaller pieces of the area in ggc_internal_alloc. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
return page;
}
@@ -733,6 +743,7 @@ alloc_page (unsigned order)
entry_size = num_objects * OBJECT_SIZE (order);
if (entry_size < G.pagesize)
entry_size = G.pagesize;
+ entry_size = PAGE_ALIGN (entry_size);
entry = NULL;
page = NULL;
@@ -744,6 +755,10 @@ alloc_page (unsigned order)
if (p != NULL)
{
+ if (p->discarded)
+ G.bytes_mapped += p->bytes;
+ p->discarded = false;
+
/* Recycle the allocated memory from this page ... */
*pp = p->next;
page = p->page;
@@ -768,15 +783,20 @@ alloc_page (unsigned order)
extras on the freelist. (Can only do this optimization with
mmap for backing store.) */
struct page_entry *e, *f = G.free_pages;
- int i;
+ int i, entries = GGC_QUIRE_SIZE;
- page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
+ page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
+ if (page == NULL)
+ {
+ page = alloc_anon(NULL, G.pagesize, true);
+ entries = 1;
+ }
/* This loop counts down so that the chain will be in ascending
memory order. */
- for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
+ for (i = entries - 1; i >= 1; i--)
{
- e = xcalloc (1, page_entry_size);
+ e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = page + (i << G.lg_pagesize);
@@ -787,7 +807,7 @@ alloc_page (unsigned order)
G.free_pages = f;
}
else
- page = alloc_anon (NULL, entry_size);
+ page = alloc_anon (NULL, entry_size, true);
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
else
@@ -804,7 +824,7 @@ alloc_page (unsigned order)
alloc_size = GGC_QUIRE_SIZE * G.pagesize;
else
alloc_size = entry_size + G.pagesize - 1;
- allocation = xmalloc (alloc_size);
+ allocation = XNEWVEC (char, alloc_size);
page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
head_slop = page - allocation;
@@ -847,7 +867,7 @@ alloc_page (unsigned order)
struct page_entry *e, *f = G.free_pages;
for (a = enda - G.pagesize; a != page; a -= G.pagesize)
{
- e = xcalloc (1, page_entry_size);
+ e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = a;
@@ -861,7 +881,7 @@ alloc_page (unsigned order)
#endif
if (entry == NULL)
- entry = xcalloc (1, page_entry_size);
+ entry = XCNEWVAR (struct page_entry, page_entry_size);
entry->bytes = entry_size;
entry->page = page;
@@ -925,7 +945,7 @@ free_page (page_entry *entry)
/* Mark the page as inaccessible. Discard the handle to avoid handle
leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
set_page_table_entry (entry->page, NULL);
@@ -941,7 +961,7 @@ free_page (page_entry *entry)
/* We cannot free a page from a context deeper than the current
one. */
gcc_assert (entry->context_depth == top->context_depth);
-
+
/* Put top element into freed slot. */
G.by_depth[i] = top;
G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
@@ -960,7 +980,90 @@ free_page (page_entry *entry)
static void
release_pages (void)
{
-#ifdef USING_MMAP
+#ifdef USING_MADVISE
+ page_entry *p, *start_p;
+ char *start;
+ size_t len;
+ size_t mapped_len;
+ page_entry *next, *prev, *newprev;
+ size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
+
+ /* First free larger continuous areas to the OS.
+ This allows other allocators to grab these areas if needed.
+ This is only done on larger chunks to avoid fragmentation.
+ This does not always work because the free_pages list is only
+ approximately sorted. */
+
+ p = G.free_pages;
+ prev = NULL;
+ while (p)
+ {
+ start = p->page;
+ start_p = p;
+ len = 0;
+ mapped_len = 0;
+ newprev = prev;
+ while (p && p->page == start + len)
+ {
+ len += p->bytes;
+ if (!p->discarded)
+ mapped_len += p->bytes;
+ newprev = p;
+ p = p->next;
+ }
+ if (len >= free_unit)
+ {
+ while (start_p != p)
+ {
+ next = start_p->next;
+ free (start_p);
+ start_p = next;
+ }
+ munmap (start, len);
+ if (prev)
+ prev->next = p;
+ else
+ G.free_pages = p;
+ G.bytes_mapped -= mapped_len;
+ continue;
+ }
+ prev = newprev;
+ }
+
+ /* Now give back the fragmented pages to the OS, but keep the address
+ space to reuse it next time. */
+
+ for (p = G.free_pages; p; )
+ {
+ if (p->discarded)
+ {
+ p = p->next;
+ continue;
+ }
+ start = p->page;
+ len = p->bytes;
+ start_p = p;
+ p = p->next;
+ while (p && p->page == start + len)
+ {
+ len += p->bytes;
+ p = p->next;
+ }
+ /* Give the page back to the kernel, but don't free the mapping.
+ This avoids fragmentation in the virtual memory map of the
+ process. Next time we can reuse it by just touching it. */
+ madvise (start, len, MADV_DONTNEED);
+ /* Don't count those pages as mapped to not touch the garbage collector
+ unnecessarily. */
+ G.bytes_mapped -= len;
+ while (start_p != p)
+ {
+ start_p->discarded = true;
+ start_p = start_p->next;
+ }
+ }
+#endif
+#if defined(USING_MMAP) && !defined(USING_MADVISE)
page_entry *p, *next;
char *start;
size_t len;
@@ -1021,8 +1124,8 @@ release_pages (void)
/* This table provides a fast way to determine ceil(log_2(size)) for
allocation requests. The minimum allocation size is eight bytes. */
-
-static unsigned char size_lookup[257] =
+#define NUM_SIZE_LOOKUP 512
+static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
{
3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -1040,47 +1143,84 @@ static unsigned char size_lookup[257] =
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8
+ 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
};
-/* Typed allocation function. Does nothing special in this collector. */
+/* For a given size of memory requested for allocation, return the
+ actual size that is going to be allocated, as well as the size
+ order. */
-void *
-ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
- MEM_STAT_DECL)
+static void
+ggc_round_alloc_size_1 (size_t requested_size,
+ size_t *size_order,
+ size_t *alloced_size)
+{
+ size_t order, object_size;
+
+ if (requested_size < NUM_SIZE_LOOKUP)
+ {
+ order = size_lookup[requested_size];
+ object_size = OBJECT_SIZE (order);
+ }
+ else
+ {
+ order = 10;
+ while (requested_size > (object_size = OBJECT_SIZE (order)))
+ order++;
+ }
+
+ if (size_order)
+ *size_order = order;
+ if (alloced_size)
+ *alloced_size = object_size;
+}
+
+/* For a given size of memory requested for allocation, return the
+ actual size that is going to be allocated. */
+
+size_t
+ggc_round_alloc_size (size_t requested_size)
{
- return ggc_alloc_stat (size PASS_MEM_STAT);
+ size_t size = 0;
+
+ ggc_round_alloc_size_1 (requested_size, NULL, &size);
+ return size;
}
-/* Zone allocation function. Does nothing special in this collector. */
+/* Typed allocation function. Does nothing special in this collector. */
void *
-ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED
- MEM_STAT_DECL)
+ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
+ MEM_STAT_DECL)
{
- return ggc_alloc_stat (size PASS_MEM_STAT);
+ return ggc_internal_alloc_stat (size PASS_MEM_STAT);
}
/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
void *
-ggc_alloc_stat (size_t size MEM_STAT_DECL)
+ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
{
size_t order, word, bit, object_offset, object_size;
struct page_entry *entry;
void *result;
- if (size <= 256)
- {
- order = size_lookup[size];
- object_size = OBJECT_SIZE (order);
- }
- else
- {
- order = 9;
- while (size > (object_size = OBJECT_SIZE (order)))
- order++;
- }
+ ggc_round_alloc_size_1 (size, &order, &object_size);
/* If there are non-full pages for this size allocation, they are at
the head of the list. */
@@ -1139,8 +1279,14 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL)
word = bit = 0;
while (~entry->in_use_p[word] == 0)
++word;
+
+#if GCC_VERSION >= 3004
+ bit = __builtin_ctzl (~entry->in_use_p[word]);
+#else
while ((entry->in_use_p[word] >> bit) & 1)
++bit;
+#endif
+
hint = word * HOST_BITS_PER_LONG + bit;
}
@@ -1188,7 +1334,7 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL)
exact same semantics in presence of memory bugs, regardless of
ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
/* `Poison' the entire allocated object, including any padding at
the end. */
@@ -1196,19 +1342,22 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL)
/* Make the bytes after the end of the object unaccessible. Discard the
handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
- object_size - size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
+ object_size - size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
G.allocated += object_size;
+ /* For timevar statistics. */
+ timevar_ggc_mem_total += object_size;
+
#ifdef GATHER_STATISTICS
{
size_t overhead = object_size - size;
@@ -1245,6 +1394,57 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL)
return result;
}
+/* Mark function for strings. */
+
+void
+gt_ggc_m_S (const void *p)
+{
+ page_entry *entry;
+ unsigned bit, word;
+ unsigned long mask;
+ unsigned long offset;
+
+ if (!p || !ggc_allocated_p (p))
+ return;
+
+ /* Look up the page on which the object is alloced. . */
+ entry = lookup_page_table_entry (p);
+ gcc_assert (entry);
+
+ /* Calculate the index of the object on the page; this is its bit
+ position in the in_use_p bitmap. Note that because a char* might
+ point to the middle of an object, we need special code here to
+ make sure P points to the start of an object. */
+ offset = ((const char *) p - entry->page) % object_size_table[entry->order];
+ if (offset)
+ {
+ /* Here we've seen a char* which does not point to the beginning
+ of an allocated object. We assume it points to the middle of
+ a STRING_CST. */
+ gcc_assert (offset == offsetof (struct tree_string, str));
+ p = ((const char *) p) - offset;
+ gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
+ return;
+ }
+
+ bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
+ word = bit / HOST_BITS_PER_LONG;
+ mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
+
+ /* If the bit was previously set, skip it. */
+ if (entry->in_use_p[word] & mask)
+ return;
+
+ /* Otherwise set it, and decrement the free object count. */
+ entry->in_use_p[word] |= mask;
+ entry->num_free_objects -= 1;
+
+ if (GGC_DEBUG_LEVEL >= 4)
+ fprintf (G.debug_file, "Marking %p\n", p);
+
+ return;
+}
+
/* If P is not marked, marks it and return false. Otherwise return true.
P must have been allocated by the GC allocator; it mustn't point to
static objects, stack variables, or memory allocated with malloc. */
@@ -1335,18 +1535,18 @@ ggc_free (void *p)
#ifdef ENABLE_GC_CHECKING
/* Poison the data, to indicate the data is garbage. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
memset (p, 0xa5, size);
#endif
/* Let valgrind know the object is free. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* In the completely-anal-checking mode, we do *not* immediately free
- the data, but instead verify that the data is *actually* not
+ the data, but instead verify that the data is *actually* not
reachable the next time we collect. */
{
- struct free_object *fo = xmalloc (sizeof (struct free_object));
+ struct free_object *fo = XNEW (struct free_object);
fo->object = p;
fo->next = G.free_object_list;
G.free_object_list = fo;
@@ -1370,7 +1570,7 @@ ggc_free (void *p)
/* If the page is completely full, then it's supposed to
be after all pages that aren't. Since we've freed one
object from a page that was full, we need to move the
- page to the head of the list.
+ page to the head of the list.
PE is the node we want to move. Q is the previous node
and P is the next node in the list. */
@@ -1414,7 +1614,7 @@ ggc_free (void *p)
static void
compute_inverse (unsigned order)
{
- size_t size, inv;
+ size_t size, inv;
unsigned int e;
size = OBJECT_SIZE (order);
@@ -1460,19 +1660,19 @@ init_ggc (void)
believe, is an unaligned page allocation, which would cause us to
hork badly if we tried to use it. */
{
- char *p = alloc_anon (NULL, G.pagesize);
+ char *p = alloc_anon (NULL, G.pagesize, true);
struct page_entry *e;
if ((size_t)p & (G.pagesize - 1))
{
/* How losing. Discard this one and try another. If we still
can't get something useful, give up. */
- p = alloc_anon (NULL, G.pagesize);
+ p = alloc_anon (NULL, G.pagesize, true);
gcc_assert (!((size_t)p & (G.pagesize - 1)));
}
/* We have a good page, might as well hold onto it... */
- e = xcalloc (1, sizeof (struct page_entry));
+ e = XCNEW (struct page_entry);
e->bytes = G.pagesize;
e->page = p;
e->next = G.free_pages;
@@ -1511,45 +1711,22 @@ init_ggc (void)
int o;
int i;
- o = size_lookup[OBJECT_SIZE (order)];
- for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
+ i = OBJECT_SIZE (order);
+ if (i >= NUM_SIZE_LOOKUP)
+ continue;
+
+ for (o = size_lookup[i]; o == size_lookup [i]; --i)
size_lookup[i] = order;
}
G.depth_in_use = 0;
G.depth_max = 10;
- G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
+ G.depth = XNEWVEC (unsigned int, G.depth_max);
G.by_depth_in_use = 0;
G.by_depth_max = INITIAL_PTE_COUNT;
- G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
-}
-
-/* Start a new GGC zone. */
-
-struct alloc_zone *
-new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
-{
- return NULL;
-}
-
-/* Destroy a GGC zone. */
-void
-destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
-{
-}
-
-/* Increment the `GC context'. Objects allocated in an outer context
- are never freed, eliminating the need to register their roots. */
-
-void
-ggc_push_context (void)
-{
- ++G.context_depth;
-
- /* Die on wrap. */
- gcc_assert (G.context_depth < HOST_BITS_PER_LONG);
+ G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
+ G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
}
/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
@@ -1587,89 +1764,6 @@ ggc_recalculate_in_use_p (page_entry *p)
gcc_assert (p->num_free_objects < num_objects);
}
-
-/* Decrement the `GC context'. All objects allocated since the
- previous ggc_push_context are migrated to the outer context. */
-
-void
-ggc_pop_context (void)
-{
- unsigned long omask;
- unsigned int depth, i, e;
-#ifdef ENABLE_CHECKING
- unsigned int order;
-#endif
-
- depth = --G.context_depth;
- omask = (unsigned long)1 << (depth + 1);
-
- if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
- return;
-
- G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
- G.context_depth_allocations &= omask - 1;
- G.context_depth_collections &= omask - 1;
-
- /* The G.depth array is shortened so that the last index is the
- context_depth of the top element of by_depth. */
- if (depth+1 < G.depth_in_use)
- e = G.depth[depth+1];
- else
- e = G.by_depth_in_use;
-
- /* We might not have any PTEs of depth depth. */
- if (depth < G.depth_in_use)
- {
-
- /* First we go through all the pages at depth depth to
- recalculate the in use bits. */
- for (i = G.depth[depth]; i < e; ++i)
- {
- page_entry *p = G.by_depth[i];
-
- /* Check that all of the pages really are at the depth that
- we expect. */
- gcc_assert (p->context_depth == depth);
- gcc_assert (p->index_by_depth == i);
-
- prefetch (&save_in_use_p_i (i+8));
- prefetch (&save_in_use_p_i (i+16));
- if (save_in_use_p_i (i))
- {
- p = G.by_depth[i];
- ggc_recalculate_in_use_p (p);
- free (save_in_use_p_i (i));
- save_in_use_p_i (i) = 0;
- }
- }
- }
-
- /* Then, we reset all page_entries with a depth greater than depth
- to be at depth. */
- for (i = e; i < G.by_depth_in_use; ++i)
- {
- page_entry *p = G.by_depth[i];
-
- /* Check that all of the pages really are at the depth we
- expect. */
- gcc_assert (p->context_depth > depth);
- gcc_assert (p->index_by_depth == i);
- p->context_depth = depth;
- }
-
- adjust_depth ();
-
-#ifdef ENABLE_CHECKING
- for (order = 2; order < NUM_ORDERS; order++)
- {
- page_entry *p;
-
- for (p = G.pages[order]; p != NULL; p = p->next)
- gcc_assert (p->context_depth < depth ||
- (p->context_depth == depth && !save_in_use_p (p)));
- }
-#endif
-}
/* Unmark all objects. */
@@ -1696,7 +1790,7 @@ clear_marks (void)
if (p->context_depth < G.context_depth)
{
if (! save_in_use_p (p))
- save_in_use_p (p) = xmalloc (bitmap_size);
+ save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
}
@@ -1766,7 +1860,7 @@ sweep_pages (void)
G.pages[order] = next;
else
previous->next = next;
-
+
/* Splice P out of the back pointers too. */
if (next)
next->prev = previous;
@@ -1884,11 +1978,12 @@ poison_pages (void)
so the exact same memory semantics is kept, in case
there are memory errors. We override this request
below. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
+ size));
memset (object, 0xa5, size);
/* Drop the handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
}
}
}
@@ -1971,6 +2066,8 @@ ggc_collect (void)
/* Indicate that we've seen collections at this context depth. */
G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
+ invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
+
clear_marks ();
ggc_mark_roots ();
#ifdef GATHER_STATISTICS
@@ -1982,6 +2079,8 @@ ggc_collect (void)
G.allocated_last_gc = G.allocated;
+ invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
+
timevar_pop (TV_GC);
if (!quiet_flag)
@@ -2061,46 +2160,50 @@ ggc_print_statistics (void)
SCALE (G.allocated), STAT_LABEL(G.allocated),
SCALE (total_overhead), STAT_LABEL (total_overhead));
-#ifdef GATHER_STATISTICS
+#ifdef GATHER_STATISTICS
{
fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
- fprintf (stderr, "Total Overhead: %10lld\n",
+ fprintf (stderr, "Total Overhead: %10" HOST_LONG_LONG_FORMAT "d\n",
G.stats.total_overhead);
- fprintf (stderr, "Total Allocated: %10lld\n",
+ fprintf (stderr, "Total Allocated: %10" HOST_LONG_LONG_FORMAT "d\n",
G.stats.total_allocated);
- fprintf (stderr, "Total Overhead under 32B: %10lld\n",
+ fprintf (stderr, "Total Overhead under 32B: %10" HOST_LONG_LONG_FORMAT "d\n",
G.stats.total_overhead_under32);
- fprintf (stderr, "Total Allocated under 32B: %10lld\n",
+ fprintf (stderr, "Total Allocated under 32B: %10" HOST_LONG_LONG_FORMAT "d\n",
G.stats.total_allocated_under32);
- fprintf (stderr, "Total Overhead under 64B: %10lld\n",
+ fprintf (stderr, "Total Overhead under 64B: %10" HOST_LONG_LONG_FORMAT "d\n",
G.stats.total_overhead_under64);
- fprintf (stderr, "Total Allocated under 64B: %10lld\n",
+ fprintf (stderr, "Total Allocated under 64B: %10" HOST_LONG_LONG_FORMAT "d\n",
G.stats.total_allocated_under64);
- fprintf (stderr, "Total Overhead under 128B: %10lld\n",
+ fprintf (stderr, "Total Overhead under 128B: %10" HOST_LONG_LONG_FORMAT "d\n",
G.stats.total_overhead_under128);
- fprintf (stderr, "Total Allocated under 128B: %10lld\n",
+ fprintf (stderr, "Total Allocated under 128B: %10" HOST_LONG_LONG_FORMAT "d\n",
G.stats.total_allocated_under128);
-
+
for (i = 0; i < NUM_ORDERS; i++)
if (G.stats.total_allocated_per_order[i])
{
- fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
- OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
- fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
- OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
+ fprintf (stderr, "Total Overhead page size %7lu: %10" HOST_LONG_LONG_FORMAT "d\n",
+ (unsigned long) OBJECT_SIZE (i),
+ G.stats.total_overhead_per_order[i]);
+ fprintf (stderr, "Total Allocated page size %7lu: %10" HOST_LONG_LONG_FORMAT "d\n",
+ (unsigned long) OBJECT_SIZE (i),
+ G.stats.total_allocated_per_order[i]);
}
}
#endif
}
+struct ggc_pch_ondisk
+{
+ unsigned totals[NUM_ORDERS];
+};
+
struct ggc_pch_data
{
- struct ggc_pch_ondisk
- {
- unsigned totals[NUM_ORDERS];
- } d;
+ struct ggc_pch_ondisk d;
size_t base[NUM_ORDERS];
size_t written[NUM_ORDERS];
};
@@ -2108,20 +2211,21 @@ struct ggc_pch_data
struct ggc_pch_data *
init_ggc_pch (void)
{
- return xcalloc (sizeof (struct ggc_pch_data), 1);
+ return XCNEW (struct ggc_pch_data);
}
void
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
- size_t size, bool is_string ATTRIBUTE_UNUSED)
+ size_t size, bool is_string ATTRIBUTE_UNUSED,
+ enum gt_types_enum type ATTRIBUTE_UNUSED)
{
unsigned order;
- if (size <= 256)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
- order = 9;
+ order = 10;
while (size > OBJECT_SIZE (order))
order++;
}
@@ -2136,7 +2240,7 @@ ggc_pch_total_size (struct ggc_pch_data *d)
unsigned i;
for (i = 0; i < NUM_ORDERS; i++)
- a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+ a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
return a;
}
@@ -2149,23 +2253,24 @@ ggc_pch_this_base (struct ggc_pch_data *d, void *base)
for (i = 0; i < NUM_ORDERS; i++)
{
d->base[i] = a;
- a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+ a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
}
}
char *
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
- size_t size, bool is_string ATTRIBUTE_UNUSED)
+ size_t size, bool is_string ATTRIBUTE_UNUSED,
+ enum gt_types_enum type ATTRIBUTE_UNUSED)
{
unsigned order;
char *result;
- if (size <= 256)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
- order = 9;
+ order = 10;
while (size > OBJECT_SIZE (order))
order++;
}
@@ -2188,19 +2293,19 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
size_t size, bool is_string ATTRIBUTE_UNUSED)
{
unsigned order;
- static const char emptyBytes[256];
+ static const char emptyBytes[256] = { 0 };
- if (size <= 256)
+ if (size < NUM_SIZE_LOOKUP)
order = size_lookup[size];
else
{
- order = 9;
+ order = 10;
while (size > OBJECT_SIZE (order))
order++;
}
if (fwrite (x, size, 1, f) != 1)
- fatal_error ("can't write PCH file: %m");
+ fatal_error ("can%'t write PCH file: %m");
/* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
object out to OBJECT_SIZE(order). This happens for strings. */
@@ -2212,18 +2317,17 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
/* To speed small writes, we use a nulled-out array that's larger
than most padding requests as the source for our null bytes. This
permits us to do the padding with fwrite() rather than fseek(), and
- limits the chance the the OS may try to flush any outstanding
- writes. */
+ limits the chance the OS may try to flush any outstanding writes. */
if (padding <= sizeof(emptyBytes))
{
if (fwrite (emptyBytes, 1, padding, f) != padding)
- fatal_error ("can't write PCH file");
+ fatal_error ("can%'t write PCH file");
}
else
{
/* Larger than our buffer? Just default to fseek. */
if (fseek (f, padding, SEEK_CUR) != 0)
- fatal_error ("can't write PCH file");
+ fatal_error ("can%'t write PCH file");
}
}
@@ -2232,14 +2336,14 @@ ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
&& fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
G.pagesize),
SEEK_CUR) != 0)
- fatal_error ("can't write PCH file: %m");
+ fatal_error ("can%'t write PCH file: %m");
}
void
ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
{
if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
- fatal_error ("can't write PCH file: %m");
+ fatal_error ("can%'t write PCH file: %m");
free (d);
}
@@ -2255,8 +2359,8 @@ move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
page_entry **new_by_depth;
unsigned long **new_save_in_use;
- new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
- new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
+ new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
memcpy (&new_by_depth[0],
&G.by_depth[count_old_page_tables],
@@ -2298,7 +2402,7 @@ ggc_pch_read (FILE *f, void *addr)
{
struct ggc_pch_ondisk d;
unsigned i;
- char *offs = addr;
+ char *offs = (char *) addr;
unsigned long count_old_page_tables;
unsigned long count_new_page_tables;
@@ -2310,6 +2414,9 @@ ggc_pch_read (FILE *f, void *addr)
#ifdef ENABLE_GC_CHECKING
poison_pages ();
#endif
+ /* Since we free all the allocated objects, the free list becomes
+ useless. Validate it now, which will also clear it. */
+ validate_free_objects();
/* No object read from a PCH file should ever be freed. So, set the
context depth to 1, and set the depth of all the currently-allocated
@@ -2326,7 +2433,7 @@ ggc_pch_read (FILE *f, void *addr)
/* Allocate the appropriate page-table entries for the pages read from
the PCH file. */
if (fread (&d, sizeof (d), 1, f) != 1)
- fatal_error ("can't read PCH file: %m");
+ fatal_error ("can%'t read PCH file: %m");
for (i = 0; i < NUM_ORDERS; i++)
{
@@ -2339,11 +2446,11 @@ ggc_pch_read (FILE *f, void *addr)
if (d.totals[i] == 0)
continue;
- bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+ bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
num_objs = bytes / OBJECT_SIZE (i);
- entry = xcalloc (1, (sizeof (struct page_entry)
- - sizeof (long)
- + BITMAP_SIZE (num_objs + 1)));
+ entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
+ - sizeof (long)
+ + BITMAP_SIZE (num_objs + 1)));
entry->bytes = bytes;
entry->page = offs;
entry->context_depth = 0;
@@ -2386,3 +2493,12 @@ ggc_pch_read (FILE *f, void *addr)
/* Update the statistics. */
G.allocated = G.allocated_last_gc = offs - (char *)addr;
}
+
+struct alloc_zone
+{
+ int dummy;
+};
+
+struct alloc_zone rtl_zone;
+struct alloc_zone tree_zone;
+struct alloc_zone tree_id_zone;