1 // Allocators -*- C++ -*-
3 // Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 * Copyright (c) 1996-1997
32 * Silicon Graphics Computer Systems, Inc.
34 * Permission to use, copy, modify, distribute and sell this software
35 * and its documentation for any purpose is hereby granted without fee,
36 * provided that the above copyright notice appear in all copies and
37 * that both that copyright notice and this permission notice appear
38 * in supporting documentation. Silicon Graphics makes no
39 * representations about the suitability of this software for any
40 * purpose. It is provided "as is" without express or implied warranty.
43 /** @file ext/pool_allocator.h
44 * This file is a GNU extension to the Standard C++ Library.
45 * You should only include this header if you are using GCC 3 or later.
47 #ifndef _POOL_ALLOCATOR_H
48 #define _POOL_ALLOCATOR_H 1
51 #include <bits/functexcept.h>
52 #include <bits/stl_threads.h>
53 #include <bits/atomicity.h>
57 using std::__throw_bad_alloc;
61 * Default node allocator. "SGI" style. Uses various allocators to
62 * fulfill underlying requests (and makes as few requests as possible
63 * when in default high-speed pool mode).
65 * Important implementation properties:
66 * 0. If globally mandated, then allocate objects from new
67 * 1. If the clients request an object of size > _S_max_bytes, the resulting
68 * object will be obtained directly from new
69 * 2. In all other cases, we allocate an object of size exactly
70 * _S_round_up(requested_size). Thus the client has enough size
71 * information that we can return the object to the proper free list
72 * without permanently losing part of the object.
74 * The first template parameter specifies whether more than one thread may
75 * use this allocator. It is safe to allocate an object from one instance
76 * of a default_alloc and deallocate it with another one. This effectively
77 * transfers its ownership to the second one. This may have undesirable
78 * effects on reference locality.
80 * The second parameter is unused and serves only to allow the
81 * creation of multiple default_alloc instances. Note that
82 * containers built on different allocator instances have different
83 * types, limiting the utility of this approach. If you do not
84 * wish to share the free lists with the main default_alloc
85 * instance, instantiate this with a non-zero __inst.
88 * (See @link Allocators allocators info @endlink for more.)
90 template<bool __threads, int __inst>
95 enum {_S_max_bytes = 128};
96 enum {_S_freelists = _S_max_bytes / _S_align};
100 union _Obj* _M_free_list_link;
101 char _M_client_data[1]; // The client sees this.
104 static _Obj* volatile _S_free_list[_S_freelists];
106 // Chunk allocation state.
107 static char* _S_start_free;
108 static char* _S_end_free;
109 static size_t _S_heap_size;
111 static _STL_mutex_lock _S_lock;
112 static _Atomic_word _S_force_new;
115 _S_round_up(size_t __bytes)
116 { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
119 _S_freelist_index(size_t __bytes)
120 { return ((__bytes + (size_t)_S_align - 1)/(size_t)_S_align - 1); }
122 // Returns an object of size __n, and optionally adds to size __n
125 _S_refill(size_t __n);
127 // Allocates a chunk for nobjs of size size. nobjs may be reduced
128 // if it is inconvenient to allocate the requested number.
130 _S_chunk_alloc(size_t __n, int& __nobjs);
132 // It would be nice to use _STL_auto_lock here. But we need a
133 // test whether threads are in use.
136 _Lock() { if (__threads) _S_lock._M_acquire_lock(); }
137 ~_Lock() { if (__threads) _S_lock._M_release_lock(); }
138 } __attribute__ ((__unused__));
144 allocate(size_t __n);
148 deallocate(void* __p, size_t __n);
151 template<bool __threads, int __inst>
153 operator==(const __pool_alloc<__threads,__inst>&,
154 const __pool_alloc<__threads,__inst>&)
157 template<bool __threads, int __inst>
159 operator!=(const __pool_alloc<__threads,__inst>&,
160 const __pool_alloc<__threads,__inst>&)
164 // Allocate memory in large chunks in order to avoid fragmenting the
165 // heap too much. Assume that __n is properly aligned. We hold
166 // the allocation lock.
167 template<bool __threads, int __inst>
169 __pool_alloc<__threads, __inst>::_S_chunk_alloc(size_t __n, int& __nobjs)
172 size_t __total_bytes = __n * __nobjs;
173 size_t __bytes_left = _S_end_free - _S_start_free;
175 if (__bytes_left >= __total_bytes)
177 __result = _S_start_free;
178 _S_start_free += __total_bytes;
181 else if (__bytes_left >= __n)
183 __nobjs = (int)(__bytes_left/__n);
184 __total_bytes = __n * __nobjs;
185 __result = _S_start_free;
186 _S_start_free += __total_bytes;
191 size_t __bytes_to_get =
192 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
193 // Try to make use of the left-over piece.
194 if (__bytes_left > 0)
196 _Obj* volatile* __free_list =
197 _S_free_list + _S_freelist_index(__bytes_left);
199 ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
200 *__free_list = (_Obj*)(void*)_S_start_free;
202 _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
203 if (_S_start_free == 0)
206 _Obj* volatile* __free_list;
208 // Try to make do with what we have. That can't hurt. We
209 // do not try smaller requests, since that tends to result
210 // in disaster on multi-process machines.
212 for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
214 __free_list = _S_free_list + _S_freelist_index(__i);
218 *__free_list = __p -> _M_free_list_link;
219 _S_start_free = (char*)__p;
220 _S_end_free = _S_start_free + __i;
221 return _S_chunk_alloc(__n, __nobjs);
222 // Any leftover piece will eventually make it to the
226 _S_end_free = 0; // In case of exception.
227 _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
228 // This should either throw an exception or remedy the situation.
229 // Thus we assume it succeeded.
231 _S_heap_size += __bytes_to_get;
232 _S_end_free = _S_start_free + __bytes_to_get;
233 return _S_chunk_alloc(__n, __nobjs);
237 // Returns an object of size __n, and optionally adds to "size
238 // __n"'s free list. We assume that __n is properly aligned. We
239 // hold the allocation lock.
240 template<bool __threads, int __inst>
242 __pool_alloc<__threads, __inst>::_S_refill(size_t __n)
245 char* __chunk = _S_chunk_alloc(__n, __nobjs);
246 _Obj* volatile* __free_list;
254 __free_list = _S_free_list + _S_freelist_index(__n);
256 // Build free list in chunk.
257 __result = (_Obj*)(void*)__chunk;
258 *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
259 for (__i = 1; ; __i++)
261 __current_obj = __next_obj;
262 __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
263 if (__nobjs - 1 == __i)
265 __current_obj -> _M_free_list_link = 0;
269 __current_obj -> _M_free_list_link = __next_obj;
274 template<bool __threads, int __inst>
276 __pool_alloc<__threads, __inst>::allocate(size_t __n)
280 // If there is a race through here, assume answer from getenv
281 // will resolve in same direction. Inspired by techniques
282 // to efficiently support threading found in basic_string.h.
283 if (_S_force_new == 0)
285 if (getenv("GLIBCXX_FORCE_NEW"))
286 __atomic_add(&_S_force_new, 1);
288 __atomic_add(&_S_force_new, -1);
291 if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
292 __ret = ::operator new(__n);
295 _Obj* volatile* __free_list = _S_free_list + _S_freelist_index(__n);
296 // Acquire the lock here with a constructor call. This
297 // ensures that it is released in exit or during stack
299 _Lock __lock_instance;
300 _Obj* __restrict__ __result = *__free_list;
301 if (__builtin_expect(__result == 0, 0))
302 __ret = _S_refill(_S_round_up(__n));
305 *__free_list = __result -> _M_free_list_link;
308 if (__builtin_expect(__ret == 0, 0))
314 template<bool __threads, int __inst>
316 __pool_alloc<__threads, __inst>::deallocate(void* __p, size_t __n)
318 if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
319 ::operator delete(__p);
322 _Obj* volatile* __free_list = _S_free_list + _S_freelist_index(__n);
323 _Obj* __q = (_Obj*)__p;
325 // Acquire the lock here with a constructor call. This
326 // ensures that it is released in exit or during stack
328 _Lock __lock_instance;
329 __q -> _M_free_list_link = *__free_list;
334 template<bool __threads, int __inst>
335 typename __pool_alloc<__threads, __inst>::_Obj* volatile
336 __pool_alloc<__threads, __inst>::_S_free_list[_S_freelists];
338 template<bool __threads, int __inst>
339 char* __pool_alloc<__threads, __inst>::_S_start_free = 0;
341 template<bool __threads, int __inst>
342 char* __pool_alloc<__threads, __inst>::_S_end_free = 0;
344 template<bool __threads, int __inst>
345 size_t __pool_alloc<__threads, __inst>::_S_heap_size = 0;
347 template<bool __threads, int __inst>
349 __pool_alloc<__threads, __inst>::_S_lock __STL_MUTEX_INITIALIZER;
351 template<bool __threads, int __inst> _Atomic_word
352 __pool_alloc<__threads, __inst>::_S_force_new = 0;
354 // Inhibit implicit instantiations for required instantiations,
355 // which are defined via explicit instantiations elsewhere.
356 // NB: This syntax is a GNU extension.
357 #if _GLIBCXX_EXTERN_TEMPLATE
358 extern template class __pool_alloc<true, 0>;
360 } // namespace __gnu_cxx