// Allocators -*- C++ -*-
-// Copyright (C) 2001, 2002 Free Software Foundation, Inc.
+// Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
* into a "standard" one.
* @endif
*
- * @note The @c reallocate member functions have been deprecated for 3.2
- * and will be removed in 3.4. You must define @c _GLIBCPP_DEPRECATED
- * to make this visible in 3.2; see c++config.h.
- *
* The canonical description of these classes is in docs/html/ext/howto.html
* or online at http://gcc.gnu.org/onlinedocs/libstdc++/ext/howto.html#3
*/
#include <cstddef>
#include <cstdlib>
#include <cstring>
-#include <cassert>
#include <bits/functexcept.h> // For __throw_bad_alloc
#include <bits/stl_threads.h>
-
#include <bits/atomicity.h>
namespace std
/**
* @if maint
* A malloc-based allocator. Typically slower than the
- * __default_alloc_template (below). Typically thread-safe and more
+ * __pool_alloc (below). Typically thread-safe and more
* storage efficient. The template argument is unused and is only present
- * to permit multiple instantiations (but see __default_alloc_template
+ * to permit multiple instantiations (but see __pool_alloc
* for caveats). "SGI" style, plus __set_malloc_handler for OOM conditions.
* @endif
* (See @link Allocators allocators info @endlink for more.)
*/
template<int __inst>
- class __malloc_alloc_template
+ class __malloc_alloc
{
private:
static void* _S_oom_malloc(size_t);
-#ifdef _GLIBCPP_DEPRECATED
- static void* _S_oom_realloc(void*, size_t);
-#endif
static void (* __malloc_alloc_oom_handler)();
public:
allocate(size_t __n)
{
void* __result = malloc(__n);
- if (0 == __result) __result = _S_oom_malloc(__n);
+ if (__builtin_expect(__result == 0, 0))
+ __result = _S_oom_malloc(__n);
return __result;
}
deallocate(void* __p, size_t /* __n */)
{ free(__p); }
-#ifdef _GLIBCPP_DEPRECATED
- static void*
- reallocate(void* __p, size_t /* old_sz */, size_t __new_sz)
- {
- void* __result = realloc(__p, __new_sz);
- if (0 == __result)
- __result = _S_oom_realloc(__p, __new_sz);
- return __result;
- }
-#endif
-
static void (* __set_malloc_handler(void (*__f)()))()
{
void (* __old)() = __malloc_alloc_oom_handler;
// malloc_alloc out-of-memory handling
template<int __inst>
- void (* __malloc_alloc_template<__inst>::__malloc_alloc_oom_handler)() = 0;
+ void (* __malloc_alloc<__inst>::__malloc_alloc_oom_handler)() = 0;
template<int __inst>
void*
- __malloc_alloc_template<__inst>::
+ __malloc_alloc<__inst>::
_S_oom_malloc(size_t __n)
{
void (* __my_malloc_handler)();
for (;;)
{
__my_malloc_handler = __malloc_alloc_oom_handler;
- if (0 == __my_malloc_handler)
- std::__throw_bad_alloc();
+ if (__builtin_expect(__my_malloc_handler == 0, 0))
+ __throw_bad_alloc();
(*__my_malloc_handler)();
__result = malloc(__n);
if (__result)
}
}
-#ifdef _GLIBCPP_DEPRECATED
- template<int __inst>
- void*
- __malloc_alloc_template<__inst>::
- _S_oom_realloc(void* __p, size_t __n)
- {
- void (* __my_malloc_handler)();
- void* __result;
-
- for (;;)
- {
- __my_malloc_handler = __malloc_alloc_oom_handler;
- if (0 == __my_malloc_handler)
- std::__throw_bad_alloc();
- (*__my_malloc_handler)();
- __result = realloc(__p, __n);
- if (__result)
- return __result;
- }
- }
-#endif
-
- // Should not be referenced within the library anymore.
- typedef __new_alloc __mem_interface;
/**
* @if maint
public:
static _Tp*
allocate(size_t __n)
- { return 0 == __n ? 0 : (_Tp*) _Alloc::allocate(__n * sizeof (_Tp)); }
+ {
+ _Tp* __ret = 0;
+ if (__n)
+ __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
+ return __ret;
+ }
static _Tp*
allocate()
/**
* @if maint
* An adaptor for an underlying allocator (_Alloc) to check the size
- * arguments for debugging. Errors are reported using assert; these
- * checks can be disabled via NDEBUG, but the space penalty is still
- * paid, therefore it is far better to just use the underlying allocator
- * by itelf when no checking is desired.
+ * arguments for debugging.
*
* "There is some evidence that this can confuse Purify." - SGI comment
*
deallocate(void* __p, size_t __n)
{
char* __real_p = (char*)__p - (int) _S_extra;
- assert(*(size_t*)__real_p == __n);
+ if (*(size_t*)__real_p != __n)
+ abort();
_Alloc::deallocate(__real_p, __n + (int) _S_extra);
}
-
-#ifdef _GLIBCPP_DEPRECATED
- static void*
- reallocate(void* __p, size_t __old_sz, size_t __new_sz)
- {
- char* __real_p = (char*)__p - (int) _S_extra;
- assert(*(size_t*)__real_p == __old_sz);
- char* __result = (char*)
- _Alloc::reallocate(__real_p, __old_sz + (int) _S_extra,
- __new_sz + (int) _S_extra);
- *(size_t*)__result = __new_sz;
- return __result + (int) _S_extra;
- }
-#endif
};
*
* Important implementation properties:
* 0. If globally mandated, then allocate objects from __new_alloc
- * 1. If the clients request an object of size > _MAX_BYTES, the resulting
+ * 1. If the clients request an object of size > _S_max_bytes, the resulting
* object will be obtained directly from __new_alloc
* 2. In all other cases, we allocate an object of size exactly
* _S_round_up(requested_size). Thus the client has enough size
* (See @link Allocators allocators info @endlink for more.)
*/
template<bool __threads, int __inst>
- class __default_alloc_template
+ class __pool_alloc
{
private:
- enum {_ALIGN = 8};
- enum {_MAX_BYTES = 128};
- enum {_NFREELISTS = _MAX_BYTES / _ALIGN};
+ enum {_S_align = 8};
+ enum {_S_max_bytes = 128};
+ enum {_S_freelists = _S_max_bytes / _S_align};
union _Obj
{
char _M_client_data[1]; // The client sees this.
};
- static _Obj* volatile _S_free_list[_NFREELISTS];
+ static _Obj* volatile _S_free_list[_S_freelists];
// Chunk allocation state.
static char* _S_start_free;
static char* _S_end_free;
static size_t _S_heap_size;
- static _STL_mutex_lock _S_node_allocator_lock;
+ static _STL_mutex_lock _S_lock;
+ static _Atomic_word _S_force_new;
static size_t
_S_round_up(size_t __bytes)
- { return (((__bytes) + (size_t) _ALIGN-1) & ~((size_t) _ALIGN - 1)); }
+ { return (((__bytes) + (size_t) _S_align-1) & ~((size_t) _S_align - 1)); }
static size_t
_S_freelist_index(size_t __bytes)
- { return (((__bytes) + (size_t)_ALIGN-1)/(size_t)_ALIGN - 1); }
+ { return (((__bytes) + (size_t)_S_align - 1)/(size_t)_S_align - 1); }
// Returns an object of size __n, and optionally adds to size __n
// free list.
// test whether threads are in use.
struct _Lock
{
- _Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }
- ~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }
+ _Lock() { if (__threads) _S_lock._M_acquire_lock(); }
+ ~_Lock() { if (__threads) _S_lock._M_release_lock(); }
} __attribute__ ((__unused__));
friend struct _Lock;
- static _Atomic_word _S_force_new;
-
public:
// __n must be > 0
static void*
__atomic_add(&_S_force_new, 1);
else
__atomic_add(&_S_force_new, -1);
- // Trust but verify...
- assert (_S_force_new != 0);
}
- if ((__n > (size_t) _MAX_BYTES) || (_S_force_new > 0))
+ if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
__ret = __new_alloc::allocate(__n);
else
{
// unwinding.
_Lock __lock_instance;
_Obj* __restrict__ __result = *__my_free_list;
- if (__result == 0)
+ if (__builtin_expect(__result == 0, 0))
__ret = _S_refill(_S_round_up(__n));
else
{
*__my_free_list = __result -> _M_free_list_link;
__ret = __result;
- }
+ }
+ if (__builtin_expect(__ret == 0, 0))
+ __throw_bad_alloc();
}
return __ret;
}
static void
deallocate(void* __p, size_t __n)
{
- if ((__n > (size_t) _MAX_BYTES) || (_S_force_new > 0))
+ if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
__new_alloc::deallocate(__p, __n);
else
{
*__my_free_list = __q;
}
}
-
-#ifdef _GLIBCPP_DEPRECATED
- static void*
- reallocate(void* __p, size_t __old_sz, size_t __new_sz);
-#endif
};
template<bool __threads, int __inst> _Atomic_word
- __default_alloc_template<__threads, __inst>::_S_force_new = 0;
+ __pool_alloc<__threads, __inst>::_S_force_new = 0;
template<bool __threads, int __inst>
inline bool
- operator==(const __default_alloc_template<__threads,__inst>&,
- const __default_alloc_template<__threads,__inst>&)
+ operator==(const __pool_alloc<__threads,__inst>&,
+ const __pool_alloc<__threads,__inst>&)
{ return true; }
template<bool __threads, int __inst>
inline bool
- operator!=(const __default_alloc_template<__threads,__inst>&,
- const __default_alloc_template<__threads,__inst>&)
+ operator!=(const __pool_alloc<__threads,__inst>&,
+ const __pool_alloc<__threads,__inst>&)
{ return false; }
// the allocation lock.
template<bool __threads, int __inst>
char*
- __default_alloc_template<__threads, __inst>::
+ __pool_alloc<__threads, __inst>::
_S_chunk_alloc(size_t __size, int& __nobjs)
{
char* __result;
_Obj* volatile* __my_free_list =
_S_free_list + _S_freelist_index(__bytes_left);
- ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
- *__my_free_list = (_Obj*)_S_start_free;
+ ((_Obj*)(void*)_S_start_free) -> _M_free_list_link = *__my_free_list;
+ *__my_free_list = (_Obj*)(void*)_S_start_free;
}
_S_start_free = (char*) __new_alloc::allocate(__bytes_to_get);
- if (0 == _S_start_free)
+ if (_S_start_free == 0)
{
size_t __i;
_Obj* volatile* __my_free_list;
// do not try smaller requests, since that tends to result
// in disaster on multi-process machines.
__i = __size;
- for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)
+ for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
{
__my_free_list = _S_free_list + _S_freelist_index(__i);
__p = *__my_free_list;
- if (0 != __p)
+ if (__p != 0)
{
*__my_free_list = __p -> _M_free_list_link;
_S_start_free = (char*)__p;
// hold the allocation lock.
template<bool __threads, int __inst>
void*
- __default_alloc_template<__threads, __inst>::_S_refill(size_t __n)
+ __pool_alloc<__threads, __inst>::_S_refill(size_t __n)
{
int __nobjs = 20;
char* __chunk = _S_chunk_alloc(__n, __nobjs);
__my_free_list = _S_free_list + _S_freelist_index(__n);
// Build free list in chunk.
- __result = (_Obj*)__chunk;
- *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
+ __result = (_Obj*)(void*)__chunk;
+ *__my_free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
for (__i = 1; ; __i++)
{
- __current_obj = __next_obj;
- __next_obj = (_Obj*)((char*)__next_obj + __n);
- if (__nobjs - 1 == __i)
- {
- __current_obj -> _M_free_list_link = 0;
- break;
- }
- else
- __current_obj -> _M_free_list_link = __next_obj;
- }
- return(__result);
+ __current_obj = __next_obj;
+ __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
+ if (__nobjs - 1 == __i)
+ {
+ __current_obj -> _M_free_list_link = 0;
+ break;
+ }
+ else
+ __current_obj -> _M_free_list_link = __next_obj;
+ }
+ return __result;
}
-#ifdef _GLIBCPP_DEPRECATED
- template<bool threads, int inst>
- void*
- __default_alloc_template<threads, inst>::
- reallocate(void* __p, size_t __old_sz, size_t __new_sz)
- {
- void* __result;
- size_t __copy_sz;
-
- if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES)
- return(realloc(__p, __new_sz));
- if (_S_round_up(__old_sz) == _S_round_up(__new_sz))
- return(__p);
- __result = allocate(__new_sz);
- __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
- memcpy(__result, __p, __copy_sz);
- deallocate(__p, __old_sz);
- return(__result);
- }
-#endif
-
template<bool __threads, int __inst>
_STL_mutex_lock
- __default_alloc_template<__threads,__inst>::_S_node_allocator_lock
- __STL_MUTEX_INITIALIZER;
+ __pool_alloc<__threads,__inst>::_S_lock __STL_MUTEX_INITIALIZER;
template<bool __threads, int __inst>
- char* __default_alloc_template<__threads,__inst>::_S_start_free = 0;
+ char* __pool_alloc<__threads,__inst>::_S_start_free = 0;
template<bool __threads, int __inst>
- char* __default_alloc_template<__threads,__inst>::_S_end_free = 0;
+ char* __pool_alloc<__threads,__inst>::_S_end_free = 0;
template<bool __threads, int __inst>
- size_t __default_alloc_template<__threads,__inst>::_S_heap_size = 0;
+ size_t __pool_alloc<__threads,__inst>::_S_heap_size = 0;
template<bool __threads, int __inst>
- typename __default_alloc_template<__threads,__inst>::_Obj* volatile
- __default_alloc_template<__threads,__inst>::_S_free_list[_NFREELISTS];
+ typename __pool_alloc<__threads,__inst>::_Obj* volatile
+ __pool_alloc<__threads,__inst>::_S_free_list[_S_freelists];
- typedef __default_alloc_template<true,0> __alloc;
- typedef __default_alloc_template<false,0> __single_client_alloc;
+ typedef __pool_alloc<true,0> __alloc;
+ typedef __pool_alloc<false,0> __single_client_alloc;
/**
* of stl_alloc.h.)
*
* The underlying allocator behaves as follows.
- * - __default_alloc_template is used via two typedefs
+ * - __pool_alloc is used via two typedefs
* - "__single_client_alloc" typedef does no locking for threads
* - "__alloc" typedef is threadsafe via the locks
* - __new_alloc is used for memory requests
const_pointer
address(const_reference __x) const { return &__x; }
- // __n is permitted to be 0. The C++ standard says nothing about what
- // the return value is when __n == 0.
+ // NB: __n is permitted to be 0. The C++ standard says nothing
+ // about what the return value is when __n == 0.
_Tp*
allocate(size_type __n, const void* = 0)
{
- return __n != 0
- ? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp))) : 0;
+ _Tp* __ret = 0;
+ if (__n)
+ {
+ if (__n <= this->max_size())
+ __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
+ else
+ __throw_bad_alloc();
+ }
+ return __ret;
}
// __p is not permitted to be a null pointer.
/**
* @if maint
- * Allocator adaptor to turn an "SGI" style allocator (e.g., __alloc,
- * __malloc_alloc_template) into a "standard" conforming allocator. Note
- * that this adaptor does *not* assume that all objects of the underlying
- * alloc class are identical, nor does it assume that all of the underlying
- * alloc's member functions are static member functions. Note, also, that
- * __allocator<_Tp, __alloc> is essentially the same thing as allocator<_Tp>.
+ * Allocator adaptor to turn an "SGI" style allocator (e.g.,
+ * __alloc, __malloc_alloc) into a "standard" conforming
+ * allocator. Note that this adaptor does *not* assume that all
+ * objects of the underlying alloc class are identical, nor does it
+ * assume that all of the underlying alloc's member functions are
+ * static member functions. Note, also, that __allocator<_Tp,
+ * __alloc> is essentially the same thing as allocator<_Tp>.
* @endif
* (See @link Allocators allocators info @endlink for more.)
*/
struct __allocator
{
_Alloc __underlying_alloc;
-
+
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef _Tp* pointer;
const_pointer
address(const_reference __x) const { return &__x; }
- // __n is permitted to be 0.
- _Tp*
- allocate(size_type __n, const void* = 0)
- {
- return __n != 0
- ? static_cast<_Tp*>(__underlying_alloc.allocate(__n * sizeof(_Tp)))
- : 0;
- }
-
- // __p is not permitted to be a null pointer.
- void
- deallocate(pointer __p, size_type __n)
- { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); }
-
- size_type
- max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
-
- void
- construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
+ // NB: __n is permitted to be 0. The C++ standard says nothing
+ // about what the return value is when __n == 0.
+ _Tp*
+ allocate(size_type __n, const void* = 0)
+ {
+ _Tp* __ret = 0;
+ if (__n)
+ __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
+ return __ret;
+ }
- void
- destroy(pointer __p) { __p->~_Tp(); }
- };
+ // __p is not permitted to be a null pointer.
+ void
+ deallocate(pointer __p, size_type __n)
+ { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); }
+
+ size_type
+ max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
+
+ void
+ construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
+
+ void
+ destroy(pointer __p) { __p->~_Tp(); }
+ };
template<typename _Alloc>
struct __allocator<void, _Alloc>
*/
template<int inst>
inline bool
- operator==(const __malloc_alloc_template<inst>&,
- const __malloc_alloc_template<inst>&)
+ operator==(const __malloc_alloc<inst>&,
+ const __malloc_alloc<inst>&)
{ return true; }
template<int __inst>
inline bool
- operator!=(const __malloc_alloc_template<__inst>&,
- const __malloc_alloc_template<__inst>&)
+ operator!=(const __malloc_alloc<__inst>&,
+ const __malloc_alloc<__inst>&)
{ return false; }
template<typename _Alloc>
//@{
/// Versions for the predefined "SGI" style allocators.
template<typename _Tp, int __inst>
- struct _Alloc_traits<_Tp, __malloc_alloc_template<__inst> >
+ struct _Alloc_traits<_Tp, __malloc_alloc<__inst> >
{
static const bool _S_instanceless = true;
- typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
- typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
+ typedef __simple_alloc<_Tp, __malloc_alloc<__inst> > _Alloc_type;
+ typedef __allocator<_Tp, __malloc_alloc<__inst> > allocator_type;
};
template<typename _Tp, bool __threads, int __inst>
- struct _Alloc_traits<_Tp, __default_alloc_template<__threads, __inst> >
+ struct _Alloc_traits<_Tp, __pool_alloc<__threads, __inst> >
{
static const bool _S_instanceless = true;
- typedef __simple_alloc<_Tp, __default_alloc_template<__threads, __inst> >
+ typedef __simple_alloc<_Tp, __pool_alloc<__threads, __inst> >
_Alloc_type;
- typedef __allocator<_Tp, __default_alloc_template<__threads, __inst> >
+ typedef __allocator<_Tp, __pool_alloc<__threads, __inst> >
allocator_type;
};
/// "SGI" style allocators.
template<typename _Tp, typename _Tp1, int __inst>
struct _Alloc_traits<_Tp,
- __allocator<_Tp1, __malloc_alloc_template<__inst> > >
+ __allocator<_Tp1, __malloc_alloc<__inst> > >
{
static const bool _S_instanceless = true;
- typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
- typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
+ typedef __simple_alloc<_Tp, __malloc_alloc<__inst> > _Alloc_type;
+ typedef __allocator<_Tp, __malloc_alloc<__inst> > allocator_type;
};
template<typename _Tp, typename _Tp1, bool __thr, int __inst>
- struct _Alloc_traits<_Tp, __allocator<_Tp1, __default_alloc_template<__thr, __inst> > >
+ struct _Alloc_traits<_Tp, __allocator<_Tp1, __pool_alloc<__thr, __inst> > >
{
static const bool _S_instanceless = true;
- typedef __simple_alloc<_Tp, __default_alloc_template<__thr,__inst> >
+ typedef __simple_alloc<_Tp, __pool_alloc<__thr,__inst> >
_Alloc_type;
- typedef __allocator<_Tp, __default_alloc_template<__thr,__inst> >
+ typedef __allocator<_Tp, __pool_alloc<__thr,__inst> >
allocator_type;
};
// Inhibit implicit instantiations for required instantiations,
// which are defined via explicit instantiations elsewhere.
// NB: This syntax is a GNU extension.
+#if _GLIBCPP_EXTERN_TEMPLATE
extern template class allocator<char>;
extern template class allocator<wchar_t>;
- extern template class __default_alloc_template<true,0>;
+ extern template class __pool_alloc<true,0>;
+#endif
} // namespace std
#endif