* into a "standard" one.
* @endif
*
+ * @note The @c reallocate member functions have been deprecated for 3.2
+ * and will be removed in 3.3. You must define @c _GLIBCPP_DEPRECATED
+ * to make this visible in 3.2; see c++config.h.
+ *
* The canonical description of these classes is in docs/html/ext/howto.html
* or online at http://gcc.gnu.org/onlinedocs/libstdc++/ext/howto.html#3
*/
static void*
allocate(size_t __n)
{ return ::operator new(__n); }
-
+
static void
deallocate(void* __p, size_t)
{ ::operator delete(__p); }
{
private:
static void* _S_oom_malloc(size_t);
+#ifdef _GLIBCPP_DEPRECATED
static void* _S_oom_realloc(void*, size_t);
+#endif
static void (* __malloc_alloc_oom_handler)();
public:
static void*
allocate(size_t __n)
{
- void* __result = malloc(__n);
- if (0 == __result) __result = _S_oom_malloc(__n);
- return __result;
+ void* __result = malloc(__n);
+ if (0 == __result) __result = _S_oom_malloc(__n);
+ return __result;
}
-
+
static void
deallocate(void* __p, size_t /* __n */)
{ free(__p); }
-
+
+#ifdef _GLIBCPP_DEPRECATED
static void*
reallocate(void* __p, size_t /* old_sz */, size_t __new_sz)
{
- void* __result = realloc(__p, __new_sz);
- if (0 == __result)
- __result = _S_oom_realloc(__p, __new_sz);
- return __result;
+ void* __result = realloc(__p, __new_sz);
+ if (0 == __result)
+ __result = _S_oom_realloc(__p, __new_sz);
+ return __result;
}
+#endif
static void (* __set_malloc_handler(void (*__f)()))()
{
- void (* __old)() = __malloc_alloc_oom_handler;
- __malloc_alloc_oom_handler = __f;
- return(__old);
+ void (* __old)() = __malloc_alloc_oom_handler;
+ __malloc_alloc_oom_handler = __f;
+ return(__old);
}
};
template<int __inst>
void*
- __malloc_alloc_template<__inst>::_S_oom_malloc(size_t __n)
+ __malloc_alloc_template<__inst>::
+ _S_oom_malloc(size_t __n)
{
void (* __my_malloc_handler)();
void* __result;
-
+
for (;;)
- {
- __my_malloc_handler = __malloc_alloc_oom_handler;
- if (0 == __my_malloc_handler)
+ {
+ __my_malloc_handler = __malloc_alloc_oom_handler;
+ if (0 == __my_malloc_handler)
std::__throw_bad_alloc();
- (*__my_malloc_handler)();
- __result = malloc(__n);
- if (__result)
- return(__result);
- }
+ (*__my_malloc_handler)();
+ __result = malloc(__n);
+ if (__result)
+ return(__result);
+ }
}
-
+
+#ifdef _GLIBCPP_DEPRECATED
template<int __inst>
- void*
- __malloc_alloc_template<__inst>::
- _S_oom_realloc(void* __p, size_t __n)
- {
- void (* __my_malloc_handler)();
- void* __result;
+ void*
+ __malloc_alloc_template<__inst>::
+ _S_oom_realloc(void* __p, size_t __n)
+ {
+ void (* __my_malloc_handler)();
+ void* __result;
- for (;;)
- {
- __my_malloc_handler = __malloc_alloc_oom_handler;
- if (0 == __my_malloc_handler)
- std::__throw_bad_alloc();
- (*__my_malloc_handler)();
- __result = realloc(__p, __n);
- if (__result)
- return(__result);
- }
- }
+ for (;;)
+ {
+ __my_malloc_handler = __malloc_alloc_oom_handler;
+ if (0 == __my_malloc_handler)
+ std::__throw_bad_alloc();
+ (*__my_malloc_handler)();
+ __result = realloc(__p, __n);
+ if (__result)
+ return(__result);
+ }
+ }
+#endif
// Determines the underlying allocator choice for the node allocator.
private:
// Size of space used to store size. Note that this must be
// large enough to preserve alignment.
- enum {_S_extra = 8};
+ enum {_S_extra = 8};
public:
static void*
allocate(size_t __n)
{
- char* __result = (char*)_Alloc::allocate(__n + (int) _S_extra);
- *(size_t*)__result = __n;
- return __result + (int) _S_extra;
+ char* __result = (char*)_Alloc::allocate(__n + (int) _S_extra);
+ *(size_t*)__result = __n;
+ return __result + (int) _S_extra;
}
-
+
static void
deallocate(void* __p, size_t __n)
{
- char* __real_p = (char*)__p - (int) _S_extra;
- assert(*(size_t*)__real_p == __n);
- _Alloc::deallocate(__real_p, __n + (int) _S_extra);
+ char* __real_p = (char*)__p - (int) _S_extra;
+ assert(*(size_t*)__real_p == __n);
+ _Alloc::deallocate(__real_p, __n + (int) _S_extra);
}
-
+
+#ifdef _GLIBCPP_DEPRECATED
static void*
reallocate(void* __p, size_t __old_sz, size_t __new_sz)
{
- char* __real_p = (char*)__p - (int) _S_extra;
- assert(*(size_t*)__real_p == __old_sz);
- char* __result = (char*)
- _Alloc::reallocate(__real_p, __old_sz + (int) _S_extra,
- __new_sz + (int) _S_extra);
- *(size_t*)__result = __new_sz;
- return __result + (int) _S_extra;
+ char* __real_p = (char*)__p - (int) _S_extra;
+ assert(*(size_t*)__real_p == __old_sz);
+ char* __result = (char*)
+ _Alloc::reallocate(__real_p, __old_sz + (int) _S_extra,
+ __new_sz + (int) _S_extra);
+ *(size_t*)__result = __new_sz;
+ return __result + (int) _S_extra;
}
+#endif
};
-
-
+
+
#ifdef __USE_MALLOC
-
+
typedef __mem_interface __alloc;
typedef __mem_interface __single_client_alloc;
enum {_ALIGN = 8};
enum {_MAX_BYTES = 128};
enum {_NFREELISTS = _MAX_BYTES / _ALIGN};
-
+
union _Obj
{
- union _Obj* _M_free_list_link;
- char _M_client_data[1]; // The client sees this.
+ union _Obj* _M_free_list_link;
+ char _M_client_data[1]; // The client sees this.
};
-
+
static _Obj* volatile _S_free_list[_NFREELISTS];
-
+
// Chunk allocation state.
static char* _S_start_free;
static char* _S_end_free;
static size_t _S_heap_size;
-
+
static _STL_mutex_lock _S_node_allocator_lock;
-
+
static size_t
_S_round_up(size_t __bytes)
{ return (((__bytes) + (size_t) _ALIGN-1) & ~((size_t) _ALIGN - 1)); }
-
+
static size_t
_S_freelist_index(size_t __bytes)
{ return (((__bytes) + (size_t)_ALIGN-1)/(size_t)_ALIGN - 1); }
-
+
// Returns an object of size __n, and optionally adds to size __n
// free list.
static void*
_S_refill(size_t __n);
-
+
// Allocates a chunk for nobjs of size size. nobjs may be reduced
// if it is inconvenient to allocate the requested number.
static char*
_S_chunk_alloc(size_t __size, int& __nobjs);
-
+
// It would be nice to use _STL_auto_lock here. But we need a
// test whether threads are in use.
struct _Lock
{
- _Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }
- ~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }
+ _Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }
+ ~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }
} __attribute__ ((__unused__));
friend struct _Lock;
-
+
public:
// __n must be > 0
static void*
allocate(size_t __n)
{
- void* __ret = 0;
-
- if (__n > (size_t) _MAX_BYTES)
- __ret = __mem_interface::allocate(__n);
- else
- {
- _Obj* volatile* __my_free_list = _S_free_list
- + _S_freelist_index(__n);
- // Acquire the lock here with a constructor call. This
- // ensures that it is released in exit or during stack
- // unwinding.
- _Lock __lock_instance;
- _Obj* __restrict__ __result = *__my_free_list;
- if (__result == 0)
- __ret = _S_refill(_S_round_up(__n));
- else
- {
- *__my_free_list = __result -> _M_free_list_link;
- __ret = __result;
- }
- }
- return __ret;
+ void* __ret = 0;
+
+ if (__n > (size_t) _MAX_BYTES)
+ __ret = __mem_interface::allocate(__n);
+ else
+ {
+ _Obj* volatile* __my_free_list = _S_free_list
+ + _S_freelist_index(__n);
+ // Acquire the lock here with a constructor call. This
+ // ensures that it is released in exit or during stack
+ // unwinding.
+ _Lock __lock_instance;
+ _Obj* __restrict__ __result = *__my_free_list;
+ if (__result == 0)
+ __ret = _S_refill(_S_round_up(__n));
+ else
+ {
+ *__my_free_list = __result -> _M_free_list_link;
+ __ret = __result;
+ }
+ }
+ return __ret;
};
-
+
// __p may not be 0
static void
deallocate(void* __p, size_t __n)
{
- if (__n > (size_t) _MAX_BYTES)
- __mem_interface::deallocate(__p, __n);
- else
- {
- _Obj* volatile* __my_free_list = _S_free_list
- + _S_freelist_index(__n);
- _Obj* __q = (_Obj*)__p;
-
- // Acquire the lock here with a constructor call. This
- // ensures that it is released in exit or during stack
- // unwinding.
- _Lock __lock_instance;
- __q -> _M_free_list_link = *__my_free_list;
- *__my_free_list = __q;
- }
+ if (__n > (size_t) _MAX_BYTES)
+ __mem_interface::deallocate(__p, __n);
+ else
+ {
+ _Obj* volatile* __my_free_list = _S_free_list
+ + _S_freelist_index(__n);
+ _Obj* __q = (_Obj*)__p;
+
+ // Acquire the lock here with a constructor call. This
+ // ensures that it is released in exit or during stack
+ // unwinding.
+ _Lock __lock_instance;
+ __q -> _M_free_list_link = *__my_free_list;
+ *__my_free_list = __q;
+ }
}
-
+
+#ifdef _GLIBCPP_DEPRECATED
static void*
reallocate(void* __p, size_t __old_sz, size_t __new_sz);
+#endif
};
-
+
template<bool __threads, int __inst>
inline bool
operator==(const __default_alloc_template<__threads,__inst>&,
- const __default_alloc_template<__threads,__inst>&)
+ const __default_alloc_template<__threads,__inst>&)
{ return true; }
template<bool __threads, int __inst>
inline bool
operator!=(const __default_alloc_template<__threads,__inst>&,
- const __default_alloc_template<__threads,__inst>&)
+ const __default_alloc_template<__threads,__inst>&)
{ return false; }
char* __result;
size_t __total_bytes = __size * __nobjs;
size_t __bytes_left = _S_end_free - _S_start_free;
-
+
if (__bytes_left >= __total_bytes)
- {
- __result = _S_start_free;
- _S_start_free += __total_bytes;
- return(__result);
- }
+ {
+ __result = _S_start_free;
+ _S_start_free += __total_bytes;
+ return(__result);
+ }
else if (__bytes_left >= __size)
- {
- __nobjs = (int)(__bytes_left/__size);
- __total_bytes = __size * __nobjs;
- __result = _S_start_free;
- _S_start_free += __total_bytes;
- return(__result);
- }
+ {
+ __nobjs = (int)(__bytes_left/__size);
+ __total_bytes = __size * __nobjs;
+ __result = _S_start_free;
+ _S_start_free += __total_bytes;
+ return(__result);
+ }
else
- {
- size_t __bytes_to_get =
- 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
- // Try to make use of the left-over piece.
- if (__bytes_left > 0)
- {
- _Obj* volatile* __my_free_list =
- _S_free_list + _S_freelist_index(__bytes_left);
-
- ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
- *__my_free_list = (_Obj*)_S_start_free;
- }
- _S_start_free = (char*) __mem_interface::allocate(__bytes_to_get);
- if (0 == _S_start_free)
- {
- size_t __i;
- _Obj* volatile* __my_free_list;
- _Obj* __p;
- // Try to make do with what we have. That can't hurt. We
- // do not try smaller requests, since that tends to result
- // in disaster on multi-process machines.
- __i = __size;
- for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)
- {
- __my_free_list = _S_free_list + _S_freelist_index(__i);
- __p = *__my_free_list;
- if (0 != __p)
- {
- *__my_free_list = __p -> _M_free_list_link;
- _S_start_free = (char*)__p;
- _S_end_free = _S_start_free + __i;
- return(_S_chunk_alloc(__size, __nobjs));
- // Any leftover piece will eventually make it to the
- // right free list.
- }
- }
- _S_end_free = 0; // In case of exception.
- _S_start_free = (char*)__mem_interface::allocate(__bytes_to_get);
- // This should either throw an exception or remedy the situation.
- // Thus we assume it succeeded.
- }
- _S_heap_size += __bytes_to_get;
- _S_end_free = _S_start_free + __bytes_to_get;
- return(_S_chunk_alloc(__size, __nobjs));
- }
+ {
+ size_t __bytes_to_get =
+ 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
+ // Try to make use of the left-over piece.
+ if (__bytes_left > 0)
+ {
+ _Obj* volatile* __my_free_list =
+ _S_free_list + _S_freelist_index(__bytes_left);
+
+ ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
+ *__my_free_list = (_Obj*)_S_start_free;
+ }
+ _S_start_free = (char*) __mem_interface::allocate(__bytes_to_get);
+ if (0 == _S_start_free)
+ {
+ size_t __i;
+ _Obj* volatile* __my_free_list;
+ _Obj* __p;
+ // Try to make do with what we have. That can't hurt. We
+ // do not try smaller requests, since that tends to result
+ // in disaster on multi-process machines.
+ __i = __size;
+ for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)
+ {
+ __my_free_list = _S_free_list + _S_freelist_index(__i);
+ __p = *__my_free_list;
+ if (0 != __p)
+ {
+ *__my_free_list = __p -> _M_free_list_link;
+ _S_start_free = (char*)__p;
+ _S_end_free = _S_start_free + __i;
+ return(_S_chunk_alloc(__size, __nobjs));
+ // Any leftover piece will eventually make it to the
+ // right free list.
+ }
+ }
+ _S_end_free = 0; // In case of exception.
+ _S_start_free = (char*)__mem_interface::allocate(__bytes_to_get);
+ // This should either throw an exception or remedy the situation.
+ // Thus we assume it succeeded.
+ }
+ _S_heap_size += __bytes_to_get;
+ _S_end_free = _S_start_free + __bytes_to_get;
+ return(_S_chunk_alloc(__size, __nobjs));
+ }
}
-
+
// Returns an object of size __n, and optionally adds to "size
// __n"'s free list. We assume that __n is properly aligned. We
_Obj* __current_obj;
_Obj* __next_obj;
int __i;
-
+
if (1 == __nobjs)
- return(__chunk);
+ return(__chunk);
__my_free_list = _S_free_list + _S_freelist_index(__n);
-
+
// Build free list in chunk.
__result = (_Obj*)__chunk;
*__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
for (__i = 1; ; __i++)
- {
- __current_obj = __next_obj;
- __next_obj = (_Obj*)((char*)__next_obj + __n);
- if (__nobjs - 1 == __i)
- {
- __current_obj -> _M_free_list_link = 0;
- break;
- }
- else
- __current_obj -> _M_free_list_link = __next_obj;
- }
+ {
+ __current_obj = __next_obj;
+ __next_obj = (_Obj*)((char*)__next_obj + __n);
+ if (__nobjs - 1 == __i)
+ {
+ __current_obj -> _M_free_list_link = 0;
+ break;
+ }
+ else
+ __current_obj -> _M_free_list_link = __next_obj;
+ }
return(__result);
}
-
+
+#ifdef _GLIBCPP_DEPRECATED
template<bool threads, int inst>
void*
__default_alloc_template<threads, inst>::
{
void* __result;
size_t __copy_sz;
-
+
if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES)
- return(realloc(__p, __new_sz));
+ return(realloc(__p, __new_sz));
if (_S_round_up(__old_sz) == _S_round_up(__new_sz))
- return(__p);
+ return(__p);
__result = allocate(__new_sz);
__copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
memcpy(__result, __p, __copy_sz);
deallocate(__p, __old_sz);
return(__result);
}
+#endif
template<bool __threads, int __inst>
_STL_mutex_lock
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
-
- template<typename _Tp1>
- struct rebind
- { typedef allocator<_Tp1> other; };
+
+ template<typename _Tp1>
+ struct rebind
+ { typedef allocator<_Tp1> other; };
allocator() throw() {}
allocator(const allocator&) throw() {}
- template<typename _Tp1>
+ template<typename _Tp1>
allocator(const allocator<_Tp1>&) throw() {}
~allocator() throw() {}
-
- pointer
+
+ pointer
address(reference __x) const { return &__x; }
- const_pointer
+ const_pointer
address(const_reference __x) const { return &__x; }
-
+
// __n is permitted to be 0. The C++ standard says nothing about what
// the return value is when __n == 0.
_Tp*
allocate(size_type __n, const void* = 0)
{
- return __n != 0
- ? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp))) : 0;
+ return __n != 0
+ ? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp))) : 0;
}
// __p is not permitted to be a null pointer.
void
deallocate(pointer __p, size_type __n)
{ _Alloc::deallocate(__p, __n * sizeof(_Tp)); }
-
+
size_type
max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
-
+
void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
void destroy(pointer __p) { __p->~_Tp(); }
};
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
-
- template<typename _Tp1>
- struct rebind
+
+ template<typename _Tp1>
+ struct rebind
{ typedef allocator<_Tp1> other; };
};
-
+
template<typename _T1, typename _T2>
inline bool
struct __allocator
{
_Alloc __underlying_alloc;
-
+
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef _Tp* pointer;
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
-
- template<typename _Tp1>
- struct rebind
- { typedef __allocator<_Tp1, _Alloc> other; };
+
+ template<typename _Tp1>
+ struct rebind
+ { typedef __allocator<_Tp1, _Alloc> other; };
__allocator() throw() {}
__allocator(const __allocator& __a) throw()
~__allocator() throw() {}
- pointer
+ pointer
address(reference __x) const { return &__x; }
- const_pointer
+ const_pointer
address(const_reference __x) const { return &__x; }
// __n is permitted to be 0.
size_type
max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
- void
+ void
construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
- void
+ void
destroy(pointer __p) { __p->~_Tp(); }
};
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
-
- template<typename _Tp1>
- struct rebind
- { typedef __allocator<_Tp1, _Alloc> other; };
+
+ template<typename _Tp1>
+ struct rebind
+ { typedef __allocator<_Tp1, _Alloc> other; };
};
template<typename _Tp, typename _Alloc>
inline bool
operator==(const __allocator<_Tp,_Alloc>& __a1,
- const __allocator<_Tp,_Alloc>& __a2)
+ const __allocator<_Tp,_Alloc>& __a2)
{ return __a1.__underlying_alloc == __a2.__underlying_alloc; }
template<typename _Tp, typename _Alloc>
inline bool
- operator!=(const __allocator<_Tp, _Alloc>& __a1,
- const __allocator<_Tp, _Alloc>& __a2)
+ operator!=(const __allocator<_Tp, _Alloc>& __a1,
+ const __allocator<_Tp, _Alloc>& __a2)
{ return __a1.__underlying_alloc != __a2.__underlying_alloc; }
*/
template<int inst>
inline bool
- operator==(const __malloc_alloc_template<inst>&,
- const __malloc_alloc_template<inst>&)
+ operator==(const __malloc_alloc_template<inst>&,
+ const __malloc_alloc_template<inst>&)
{ return true; }
template<int __inst>
inline bool
- operator!=(const __malloc_alloc_template<__inst>&,
- const __malloc_alloc_template<__inst>&)
+ operator!=(const __malloc_alloc_template<__inst>&,
+ const __malloc_alloc_template<__inst>&)
{ return false; }
template<typename _Alloc>
static const bool _S_instanceless = false;
typedef typename _Allocator::template rebind<_Tp>::other allocator_type;
};
-
+
template<typename _Tp, typename _Allocator>
const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless;
/// "SGI" style allocators.
template<typename _Tp, typename _Tp1, int __inst>
struct _Alloc_traits<_Tp,
- __allocator<_Tp1, __malloc_alloc_template<__inst> > >
+ __allocator<_Tp1, __malloc_alloc_template<__inst> > >
{
static const bool _S_instanceless = true;
typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
#endif
} // namespace std
-#endif
+#endif