1 // MT-optimized allocator -*- C++ -*-
3 // Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
30 /** @file ext/mt_allocator.h
31 * This file is a GNU extension to the Standard C++ Library.
34 #ifndef _MT_ALLOCATOR_H
35 #define _MT_ALLOCATOR_H 1
39 #include <bits/functexcept.h>
40 #include <ext/atomicity.h>
42 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
47 typedef void (*__destroy_handler)(void*);
49 /// @brief Base class for pool object.
52 // Using short int as type for the binmap implies we are never
53 // caching blocks larger than 65535 with this allocator.
54 typedef unsigned short int _Binmap_type;
56 // Variables used to configure the behavior of the allocator,
57 // assigned and explained in detail below.
60 // Compile time constants for the default _Tune values.
61 enum { _S_align = 8 };
62 enum { _S_max_bytes = 128 };
63 enum { _S_min_bin = 8 };
64 enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
65 enum { _S_max_threads = 4096 };
66 enum { _S_freelist_headroom = 10 };
69 // NB: In any case must be >= sizeof(_Block_record), that
70 // is 4 on 32 bit machines and 8 on 64 bit machines.
73 // Allocation requests (after round-up to power of 2) below
74 // this value will be handled by the allocator. A raw new/
75 // call will be used for requests larger than this value.
78 // Size in bytes of the smallest bin.
79 // NB: Must be a power of 2 and >= _M_align.
82 // In order to avoid fragmenting and minimize the number of
83 // new() calls we always request new memory using this
84 // value. Based on previous discussions on the libstdc++
85 // mailing list we have choosen the value below.
86 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
89 // The maximum number of supported threads. For
90 // single-threaded operation, use one. Maximum values will
91 // vary depending on details of the underlying system. (For
92 // instance, Linux 2.4.18 reports 4070 in
93 // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
95 size_t _M_max_threads;
97 // Each time a deallocation occurs in a threaded application
98 // we make sure that there are no more than
99 // _M_freelist_headroom % of used memory on the freelist. If
100 // the number of additional records is more than
101 // _M_freelist_headroom % of the freelist, we move these
102 // records back to the global pool.
103 size_t _M_freelist_headroom;
105 // Set to true forces all allocations to use new().
110 : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
111 _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
112 _M_freelist_headroom(_S_freelist_headroom),
113 _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
117 _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
118 size_t __maxthreads, size_t __headroom, bool __force)
119 : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
120 _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
121 _M_freelist_headroom(__headroom), _M_force_new(__force)
125 struct _Block_address
128 _Block_address* _M_next;
132 _M_get_options() const
133 { return _M_options; }
136 _M_set_options(_Tune __t)
143 _M_check_threshold(size_t __bytes)
144 { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
147 _M_get_binmap(size_t __bytes)
148 { return _M_binmap[__bytes]; }
152 { return _M_options._M_align; }
156 : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
159 __pool_base(const _Tune& __options)
160 : _M_options(__options), _M_binmap(NULL), _M_init(false) { }
164 __pool_base(const __pool_base&);
167 operator=(const __pool_base&);
170 // Configuration options.
173 _Binmap_type* _M_binmap;
175 // Configuration of the pool object via _M_options can happen
176 // after construction but before initialization. After
177 // initialization is complete, this variable is set to true.
183 * @brief Data describing the underlying memory pool, parameterized on
186 template<bool _Thread>
189 /// Specialization for single thread.
191 class __pool<false> : public __pool_base
196 // Points to the block_record of the next free block.
197 _Block_record* _M_next;
202 // An "array" of pointers to the first free block.
203 _Block_record** _M_first;
205 // A list of the initial addresses of all allocated blocks.
206 _Block_address* _M_address;
212 if (__builtin_expect(_M_init == false, false))
217 _M_destroy() throw();
220 _M_reserve_block(size_t __bytes, const size_t __thread_id);
223 _M_reclaim_block(char* __p, size_t __bytes);
226 _M_get_thread_id() { return 0; }
229 _M_get_bin(size_t __which)
230 { return _M_bin[__which]; }
233 _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
237 : _M_bin(NULL), _M_bin_size(1) { }
239 explicit __pool(const __pool_base::_Tune& __tune)
240 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
243 // An "array" of bin_records each of which represents a specific
244 // power of 2 size. Memory to this "array" is allocated in
248 // Actual value calculated in _M_initialize().
256 /// Specialization for thread enabled, via gthreads.h.
258 class __pool<true> : public __pool_base
261 // Each requesting thread is assigned an id ranging from 1 to
262 // _S_max_threads. Thread id 0 is used as a global memory pool.
263 // In order to get constant performance on the thread assignment
264 // routine, we keep a list of free ids. When a thread first
265 // requests memory we remove the first record in this list and
266 // stores the address in a __gthread_key. When initializing the
267 // __gthread_key we specify a destructor. When this destructor
268 // (i.e. the thread dies) is called, we return the thread id to
269 // the front of this list.
270 struct _Thread_record
272 // Points to next free thread id record. NULL if last record in list.
273 _Thread_record* _M_next;
275 // Thread id ranging from 1 to _S_max_threads.
281 // Points to the block_record of the next free block.
282 _Block_record* _M_next;
284 // The thread id of the thread which has requested this block.
290 // An "array" of pointers to the first free block for each
291 // thread id. Memory to this "array" is allocated in
292 // _S_initialize() for _S_max_threads + global pool 0.
293 _Block_record** _M_first;
295 // A list of the initial addresses of all allocated blocks.
296 _Block_address* _M_address;
298 // An "array" of counters used to keep track of the amount of
299 // blocks that are on the freelist/used for each thread id.
300 // - Note that the second part of the allocated _M_used "array"
301 // actually hosts (atomic) counters of reclaimed blocks: in
302 // _M_reserve_block and in _M_reclaim_block those numbers are
303 // subtracted from the first ones to obtain the actual size
304 // of the "working set" of the given thread.
305 // - Memory to these "arrays" is allocated in _S_initialize()
306 // for _S_max_threads + global pool 0.
310 // Each bin has its own mutex which is used to ensure data
311 // integrity while changing "ownership" on a block. The mutex
312 // is initialized in _S_initialize().
313 __gthread_mutex_t* _M_mutex;
316 // XXX GLIBCXX_ABI Deprecated
318 _M_initialize(__destroy_handler);
323 if (__builtin_expect(_M_init == false, false))
328 _M_destroy() throw();
331 _M_reserve_block(size_t __bytes, const size_t __thread_id);
334 _M_reclaim_block(char* __p, size_t __bytes);
337 _M_get_bin(size_t __which)
338 { return _M_bin[__which]; }
341 _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
344 if (__gthread_active_p())
346 __block->_M_thread_id = __thread_id;
347 --__bin._M_free[__thread_id];
348 ++__bin._M_used[__thread_id];
352 // XXX GLIBCXX_ABI Deprecated
354 _M_destroy_thread_key(void*);
360 : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL)
363 explicit __pool(const __pool_base::_Tune& __tune)
364 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1),
365 _M_thread_freelist(NULL)
369 // An "array" of bin_records each of which represents a specific
370 // power of 2 size. Memory to this "array" is allocated in
374 // Actual value calculated in _M_initialize().
377 _Thread_record* _M_thread_freelist;
378 void* _M_thread_freelist_initial;
385 template<template <bool> class _PoolTp, bool _Thread>
388 typedef _PoolTp<_Thread> pool_type;
393 static pool_type _S_pool;
398 template<template <bool> class _PoolTp, bool _Thread>
399 struct __common_pool_base;
401 template<template <bool> class _PoolTp>
402 struct __common_pool_base<_PoolTp, false>
403 : public __common_pool<_PoolTp, false>
405 using __common_pool<_PoolTp, false>::_S_get_pool;
411 if (__builtin_expect(__init == false, false))
413 _S_get_pool()._M_initialize_once();
420 template<template <bool> class _PoolTp>
421 struct __common_pool_base<_PoolTp, true>
422 : public __common_pool<_PoolTp, true>
424 using __common_pool<_PoolTp, true>::_S_get_pool;
428 { _S_get_pool()._M_initialize_once(); }
434 if (__builtin_expect(__init == false, false))
436 if (__gthread_active_p())
438 // On some platforms, __gthread_once_t is an aggregate.
439 static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
440 __gthread_once(&__once, _S_initialize);
443 // Double check initialization. May be necessary on some
444 // systems for proper construction when not compiling with
446 _S_get_pool()._M_initialize_once();
453 /// @brief Policy for shared __pool objects.
454 template<template <bool> class _PoolTp, bool _Thread>
455 struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
457 template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
458 bool _Thread1 = _Thread>
460 { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
462 using __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
463 using __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
467 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
468 struct __per_type_pool
470 typedef _Tp value_type;
471 typedef _PoolTp<_Thread> pool_type;
476 // Sane defaults for the _PoolTp.
477 typedef typename pool_type::_Block_record _Block_record;
478 const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
479 ? __alignof__(_Tp) : sizeof(_Block_record));
481 typedef typename __pool_base::_Tune _Tune;
482 static _Tune _S_tune(__a, sizeof(_Tp) * 64,
483 sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
484 sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
485 _Tune::_S_max_threads,
486 _Tune::_S_freelist_headroom,
487 std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
488 static pool_type _S_pool(_S_tune);
493 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
494 struct __per_type_pool_base;
496 template<typename _Tp, template <bool> class _PoolTp>
497 struct __per_type_pool_base<_Tp, _PoolTp, false>
498 : public __per_type_pool<_Tp, _PoolTp, false>
500 using __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
506 if (__builtin_expect(__init == false, false))
508 _S_get_pool()._M_initialize_once();
515 template<typename _Tp, template <bool> class _PoolTp>
516 struct __per_type_pool_base<_Tp, _PoolTp, true>
517 : public __per_type_pool<_Tp, _PoolTp, true>
519 using __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
523 { _S_get_pool()._M_initialize_once(); }
529 if (__builtin_expect(__init == false, false))
531 if (__gthread_active_p())
533 // On some platforms, __gthread_once_t is an aggregate.
534 static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
535 __gthread_once(&__once, _S_initialize);
538 // Double check initialization. May be necessary on some
539 // systems for proper construction when not compiling with
541 _S_get_pool()._M_initialize_once();
548 /// @brief Policy for individual __pool objects.
549 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
550 struct __per_type_pool_policy
551 : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
553 template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
554 bool _Thread1 = _Thread>
556 { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
558 using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
559 using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
563 /// @brief Base class for _Tp dependent member functions.
564 template<typename _Tp>
565 class __mt_alloc_base
568 typedef size_t size_type;
569 typedef ptrdiff_t difference_type;
570 typedef _Tp* pointer;
571 typedef const _Tp* const_pointer;
572 typedef _Tp& reference;
573 typedef const _Tp& const_reference;
574 typedef _Tp value_type;
577 address(reference __x) const
581 address(const_reference __x) const
585 max_size() const throw()
586 { return size_t(-1) / sizeof(_Tp); }
588 // _GLIBCXX_RESOLVE_LIB_DEFECTS
589 // 402. wrong new expression in [some_] allocator::construct
591 construct(pointer __p, const _Tp& __val)
592 { ::new(__p) _Tp(__val); }
595 destroy(pointer __p) { __p->~_Tp(); }
599 #define __thread_default true
601 #define __thread_default false
605 * @brief This is a fixed size (power of 2) allocator which - when
606 * compiled with thread support - will maintain one freelist per
607 * size per thread plus a "global" one. Steps are taken to limit
608 * the per thread freelist sizes (by returning excess back to
609 * the "global" list).
612 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
614 template<typename _Tp,
615 typename _Poolp = __common_pool_policy<__pool, __thread_default> >
616 class __mt_alloc : public __mt_alloc_base<_Tp>
619 typedef size_t size_type;
620 typedef ptrdiff_t difference_type;
621 typedef _Tp* pointer;
622 typedef const _Tp* const_pointer;
623 typedef _Tp& reference;
624 typedef const _Tp& const_reference;
625 typedef _Tp value_type;
626 typedef _Poolp __policy_type;
627 typedef typename _Poolp::pool_type __pool_type;
629 template<typename _Tp1, typename _Poolp1 = _Poolp>
632 typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
633 typedef __mt_alloc<_Tp1, pol_type> other;
636 __mt_alloc() throw() { }
638 __mt_alloc(const __mt_alloc&) throw() { }
640 template<typename _Tp1, typename _Poolp1>
641 __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) throw() { }
643 ~__mt_alloc() throw() { }
646 allocate(size_type __n, const void* = 0);
649 deallocate(pointer __p, size_type __n);
651 const __pool_base::_Tune
654 // Return a copy, not a reference, for external consumption.
655 return __policy_type::_S_get_pool()._M_get_options();
659 _M_set_options(__pool_base::_Tune __t)
660 { __policy_type::_S_get_pool()._M_set_options(__t); }
663 template<typename _Tp, typename _Poolp>
664 typename __mt_alloc<_Tp, _Poolp>::pointer
665 __mt_alloc<_Tp, _Poolp>::
666 allocate(size_type __n, const void*)
668 if (__builtin_expect(__n > this->max_size(), false))
669 std::__throw_bad_alloc();
671 __policy_type::_S_initialize_once();
673 // Requests larger than _M_max_bytes are handled by operator
674 // new/delete directly.
675 __pool_type& __pool = __policy_type::_S_get_pool();
676 const size_t __bytes = __n * sizeof(_Tp);
677 if (__pool._M_check_threshold(__bytes))
679 void* __ret = ::operator new(__bytes);
680 return static_cast<_Tp*>(__ret);
683 // Round up to power of 2 and figure out which bin to use.
684 const size_t __which = __pool._M_get_binmap(__bytes);
685 const size_t __thread_id = __pool._M_get_thread_id();
687 // Find out if we have blocks on our freelist. If so, go ahead
688 // and use them directly without having to lock anything.
690 typedef typename __pool_type::_Bin_record _Bin_record;
691 const _Bin_record& __bin = __pool._M_get_bin(__which);
692 if (__bin._M_first[__thread_id])
695 typedef typename __pool_type::_Block_record _Block_record;
696 _Block_record* __block = __bin._M_first[__thread_id];
697 __bin._M_first[__thread_id] = __block->_M_next;
699 __pool._M_adjust_freelist(__bin, __block, __thread_id);
700 __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
705 __c = __pool._M_reserve_block(__bytes, __thread_id);
707 return static_cast<_Tp*>(static_cast<void*>(__c));
710 template<typename _Tp, typename _Poolp>
712 __mt_alloc<_Tp, _Poolp>::
713 deallocate(pointer __p, size_type __n)
715 if (__builtin_expect(__p != 0, true))
717 // Requests larger than _M_max_bytes are handled by
718 // operators new/delete directly.
719 __pool_type& __pool = __policy_type::_S_get_pool();
720 const size_t __bytes = __n * sizeof(_Tp);
721 if (__pool._M_check_threshold(__bytes))
722 ::operator delete(__p);
724 __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
728 template<typename _Tp, typename _Poolp>
730 operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
733 template<typename _Tp, typename _Poolp>
735 operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
738 #undef __thread_default
740 _GLIBCXX_END_NAMESPACE