3 // Copyright (C) 2004 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Librarbooly. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
34 #include <bits/c++config.h>
35 #include <bits/concurrence.h>
36 #include <ext/mt_allocator.h>
38 namespace __gnu_internal
40 __glibcxx_mutex_define_initialized(freelist_mutex);
43 __gthread_key_t freelist_key;
50 __pool<false>::_M_destroy() throw()
52 if (_M_init && !_M_options._M_force_new)
54 for (size_t __n = 0; __n < _M_bin_size; ++__n)
56 _Bin_record& __bin = _M_bin[__n];
57 while (__bin._M_address)
59 _Block_address* __tmp = __bin._M_address->_M_next;
60 ::operator delete(__bin._M_address->_M_initial);
61 delete __bin._M_address;
62 __bin._M_address = __tmp;
64 delete __bin._M_first;
72 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
74 // Round up to power of 2 and figure out which bin to use.
75 const size_t __which = _M_binmap[__bytes];
76 _Bin_record& __bin = _M_bin[__which];
78 const _Tune& __options = _M_get_options();
79 char* __c = __p - __options._M_align;
80 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
82 // Single threaded application - return to global pool.
83 __block->_M_next = __bin._M_first[0];
84 __bin._M_first[0] = __block;
88 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
90 // Round up to power of 2 and figure out which bin to use.
91 const size_t __which = _M_binmap[__bytes];
92 const _Tune& __options = _M_get_options();
93 const size_t __bin_size = ((__options._M_min_bin << __which)
94 + __options._M_align);
95 size_t __block_count = __options._M_chunk_size / __bin_size;
97 // Get a new block dynamically, set it up for use.
98 void* __v = ::operator new(__options._M_chunk_size);
99 _Block_record* __block = static_cast<_Block_record*>(__v);
101 _Block_record* __tmp = __block;
102 while (__block_count-- > 0)
104 char* __c = reinterpret_cast<char*>(__tmp) + __bin_size;
105 __tmp->_M_next = reinterpret_cast<_Block_record*>(__c);
106 __tmp = __tmp->_M_next;
108 __tmp->_M_next = NULL;
110 // Update _Bin_record fields.
111 _Bin_record& __bin = _M_bin[__which];
112 __bin._M_first[__thread_id] = __block->_M_next;
113 _Block_address* __address = new _Block_address;
114 __address->_M_initial = __v;
115 __address->_M_next = __bin._M_address;
116 __bin._M_address = __address;
118 // NB: For alignment reasons, we can't use the first _M_align
119 // bytes, even when sizeof(_Block_record) < _M_align.
120 return reinterpret_cast<char*>(__block) + __options._M_align;
124 __pool<false>::_M_initialize()
126 // _M_force_new must not change after the first allocate(), which
127 // in turn calls this method, so if it's false, it's false forever
128 // and we don't need to return here ever again.
129 if (_M_options._M_force_new)
136 // Calculate the number of bins required based on _M_max_bytes.
137 // _M_bin_size is statically-initialized to one.
138 size_t __bin_size = _M_options._M_min_bin;
139 while (_M_options._M_max_bytes > __bin_size)
145 // Setup the bin map for quick lookup of the relevant bin.
146 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
147 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
148 _Binmap_type* __bp = _M_binmap;
149 _Binmap_type __bin_max = _M_options._M_min_bin;
150 _Binmap_type __bint = 0;
151 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
153 if (__ct > __bin_max)
161 // Initialize _M_bin and its members.
162 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
163 _M_bin = static_cast<_Bin_record*>(__v);
164 for (size_t __n = 0; __n < _M_bin_size; ++__n)
166 _Bin_record& __bin = _M_bin[__n];
167 __v = ::operator new(sizeof(_Block_record*));
168 __bin._M_first = static_cast<_Block_record**>(__v);
169 __bin._M_first[0] = NULL;
170 __bin._M_address = NULL;
177 __pool<true>::_M_destroy() throw()
179 if (_M_init && !_M_options._M_force_new)
181 if (__gthread_active_p())
183 for (size_t __n = 0; __n < _M_bin_size; ++__n)
185 _Bin_record& __bin = _M_bin[__n];
186 while (__bin._M_address)
188 _Block_address* __tmp = __bin._M_address->_M_next;
189 ::operator delete(__bin._M_address->_M_initial);
190 delete __bin._M_address;
191 __bin._M_address = __tmp;
193 delete __bin._M_first;
194 delete __bin._M_free;
195 delete __bin._M_used;
196 delete __bin._M_mutex;
198 ::operator delete(_M_thread_freelist_initial);
202 for (size_t __n = 0; __n < _M_bin_size; ++__n)
204 _Bin_record& __bin = _M_bin[__n];
205 while (__bin._M_address)
207 _Block_address* __tmp = __bin._M_address->_M_next;
208 ::operator delete(__bin._M_address->_M_initial);
209 delete __bin._M_address;
210 __bin._M_address = __tmp;
212 delete __bin._M_first;
221 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
223 // Round up to power of 2 and figure out which bin to use.
224 const size_t __which = _M_binmap[__bytes];
225 const _Bin_record& __bin = _M_bin[__which];
227 const _Tune& __options = _M_get_options();
228 char* __c = __p - __options._M_align;
229 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
230 if (__gthread_active_p())
232 // Calculate the number of records to remove from our freelist:
233 // in order to avoid too much contention we wait until the
234 // number of records is "high enough".
235 const size_t __thread_id = _M_get_thread_id();
237 long __remove = ((__bin._M_free[__thread_id]
238 * __options._M_freelist_headroom)
239 - __bin._M_used[__thread_id]);
240 if (__remove > static_cast<long>(100 * (_M_bin_size - __which)
241 * __options._M_freelist_headroom)
242 && __remove > static_cast<long>(__bin._M_free[__thread_id]))
244 _Block_record* __tmp = __bin._M_first[__thread_id];
245 _Block_record* __first = __tmp;
246 __remove /= __options._M_freelist_headroom;
247 const long __removed = __remove;
249 while (__remove-- > 0)
250 __tmp = __tmp->_M_next;
251 __bin._M_first[__thread_id] = __tmp->_M_next;
252 __bin._M_free[__thread_id] -= __removed;
254 __gthread_mutex_lock(__bin._M_mutex);
255 __tmp->_M_next = __bin._M_first[0];
256 __bin._M_first[0] = __first;
257 __bin._M_free[0] += __removed;
258 __gthread_mutex_unlock(__bin._M_mutex);
261 // Return this block to our list and update counters and
262 // owner id as needed.
263 --__bin._M_used[__block->_M_thread_id];
265 __block->_M_next = __bin._M_first[__thread_id];
266 __bin._M_first[__thread_id] = __block;
268 ++__bin._M_free[__thread_id];
272 // Not using threads, so single threaded application - return
274 __block->_M_next = __bin._M_first[0];
275 __bin._M_first[0] = __block;
280 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
282 // Round up to power of 2 and figure out which bin to use.
283 const size_t __which = _M_binmap[__bytes];
284 const _Tune& __options = _M_get_options();
285 const size_t __bin_size = ((__options._M_min_bin << __which)
286 + __options._M_align);
287 size_t __block_count = __options._M_chunk_size / __bin_size;
289 // Are we using threads?
290 // - Yes, check if there are free blocks on the global
291 // list. If so, grab up to __block_count blocks in one
292 // lock and change ownership. If the global list is
293 // empty, we allocate a new chunk and add those blocks
294 // directly to our own freelist (with us as owner).
295 // - No, all operations are made directly to global pool 0
296 // no need to lock or change ownership but check for free
297 // blocks on global list (and if not add new ones) and
298 // get the first one.
299 _Bin_record& __bin = _M_bin[__which];
300 _Block_record* __block = NULL;
301 if (__gthread_active_p())
303 __gthread_mutex_lock(__bin._M_mutex);
304 if (__bin._M_first[0] == NULL)
306 // No need to hold the lock when we are adding a whole
307 // chunk to our own list.
308 __gthread_mutex_unlock(__bin._M_mutex);
310 void* __v = ::operator new(__options._M_chunk_size);
311 __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v);
312 __bin._M_free[__thread_id] = __block_count;
314 __block = __bin._M_first[__thread_id];
315 while (__block_count-- > 0)
317 char* __c = reinterpret_cast<char*>(__block) + __bin_size;
318 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
319 __block = __block->_M_next;
321 __block->_M_next = NULL;
323 __gthread_mutex_lock(__bin._M_mutex);
324 _Block_address* __address = new _Block_address;
325 __address->_M_initial = __v;
326 __address->_M_next = __bin._M_address;
327 __bin._M_address = __address;
328 __gthread_mutex_unlock(__bin._M_mutex);
332 // Is the number of required blocks greater than or equal
333 // to the number that can be provided by the global free
335 __bin._M_first[__thread_id] = __bin._M_first[0];
336 if (__block_count >= __bin._M_free[0])
338 __bin._M_free[__thread_id] = __bin._M_free[0];
339 __bin._M_free[0] = 0;
340 __bin._M_first[0] = NULL;
344 __bin._M_free[__thread_id] = __block_count;
345 __bin._M_free[0] -= __block_count;
347 __block = __bin._M_first[0];
348 while (__block_count-- > 0)
349 __block = __block->_M_next;
350 __bin._M_first[0] = __block->_M_next;
351 __block->_M_next = NULL;
353 __gthread_mutex_unlock(__bin._M_mutex);
358 void* __v = ::operator new(__options._M_chunk_size);
359 __block = static_cast<_Block_record*>(__v);
360 __bin._M_first[0] = __block;
362 while (__block_count-- > 0)
364 char* __c = reinterpret_cast<char*>(__block) + __bin_size;
365 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
366 __block = __block->_M_next;
368 __block->_M_next = NULL;
370 _Block_address* __address = new _Block_address;
371 __address->_M_initial = __v;
372 __address->_M_next = __bin._M_address;
373 __bin._M_address = __address;
376 __block = __bin._M_first[__thread_id];
377 __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
379 if (__gthread_active_p())
381 __block->_M_thread_id = __thread_id;
382 --__bin._M_free[__thread_id];
383 ++__bin._M_used[__thread_id];
386 // NB: For alignment reasons, we can't use the first _M_align
387 // bytes, even when sizeof(_Block_record) < _M_align.
388 return reinterpret_cast<char*>(__block) + __options._M_align;
392 __pool<true>::_M_initialize(__destroy_handler __d)
394 // _M_force_new must not change after the first allocate(),
395 // which in turn calls this method, so if it's false, it's false
396 // forever and we don't need to return here ever again.
397 if (_M_options._M_force_new)
404 // Calculate the number of bins required based on _M_max_bytes.
405 // _M_bin_size is statically-initialized to one.
406 size_t __bin_size = _M_options._M_min_bin;
407 while (_M_options._M_max_bytes > __bin_size)
413 // Setup the bin map for quick lookup of the relevant bin.
414 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
415 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
416 _Binmap_type* __bp = _M_binmap;
417 _Binmap_type __bin_max = _M_options._M_min_bin;
418 _Binmap_type __bint = 0;
419 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
421 if (__ct > __bin_max)
429 // Initialize _M_bin and its members.
430 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
431 _M_bin = static_cast<_Bin_record*>(__v);
433 // If __gthread_active_p() create and initialize the list of
434 // free thread ids. Single threaded applications use thread id 0
435 // directly and have no need for this.
436 if (__gthread_active_p())
438 const size_t __k = sizeof(_Thread_record) * _M_options._M_max_threads;
439 __v = ::operator new(__k);
440 _M_thread_freelist = static_cast<_Thread_record*>(__v);
441 _M_thread_freelist_initial = __v;
443 // NOTE! The first assignable thread id is 1 since the
444 // global pool uses id 0
446 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
448 _Thread_record& __tr = _M_thread_freelist[__i - 1];
449 __tr._M_next = &_M_thread_freelist[__i];
454 _M_thread_freelist[__i - 1]._M_next = NULL;
455 _M_thread_freelist[__i - 1]._M_id = __i;
457 // Initialize per thread key to hold pointer to
458 // _M_thread_freelist.
459 __gthread_key_create(&__gnu_internal::freelist_key, __d);
461 const size_t __max_threads = _M_options._M_max_threads + 1;
462 for (size_t __n = 0; __n < _M_bin_size; ++__n)
464 _Bin_record& __bin = _M_bin[__n];
465 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
466 __bin._M_first = static_cast<_Block_record**>(__v);
468 __bin._M_address = NULL;
470 __v = ::operator new(sizeof(size_t) * __max_threads);
471 __bin._M_free = static_cast<size_t*>(__v);
473 __v = ::operator new(sizeof(size_t) * __max_threads);
474 __bin._M_used = static_cast<size_t*>(__v);
476 __v = ::operator new(sizeof(__gthread_mutex_t));
477 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
479 #ifdef __GTHREAD_MUTEX_INIT
481 // Do not copy a POSIX/gthr mutex once in use.
482 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
483 *__bin._M_mutex = __tmp;
486 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
488 for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
490 __bin._M_first[__threadn] = NULL;
491 __bin._M_free[__threadn] = 0;
492 __bin._M_used[__threadn] = 0;
498 for (size_t __n = 0; __n < _M_bin_size; ++__n)
500 _Bin_record& __bin = _M_bin[__n];
501 __v = ::operator new(sizeof(_Block_record*));
502 __bin._M_first = static_cast<_Block_record**>(__v);
503 __bin._M_first[0] = NULL;
504 __bin._M_address = NULL;
511 __pool<true>::_M_get_thread_id()
513 // If we have thread support and it's active we check the thread
514 // key value and return its id or if it's not set we take the
515 // first record from _M_thread_freelist and sets the key and
517 if (__gthread_active_p())
519 void* v = __gthread_getspecific(__gnu_internal::freelist_key);
520 _Thread_record* __freelist_pos = static_cast<_Thread_record*>(v);
521 if (__freelist_pos == NULL)
523 // Since _M_options._M_max_threads must be larger than
524 // the theoretical max number of threads of the OS the
525 // list can never be empty.
527 __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
528 __freelist_pos = _M_thread_freelist;
529 _M_thread_freelist = _M_thread_freelist->_M_next;
532 __gthread_setspecific(__gnu_internal::freelist_key,
533 static_cast<void*>(__freelist_pos));
535 return __freelist_pos->_M_id;
538 // Otherwise (no thread support or inactive) all requests are
539 // served from the global pool 0.
544 __pool<true>::_M_destroy_thread_key(void* __freelist_pos)
546 // Return this thread id record to front of thread_freelist.
547 __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
548 _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
549 __tr->_M_next = _M_thread_freelist;
550 _M_thread_freelist = __tr;
555 template class __mt_alloc<char>;
556 template class __mt_alloc<wchar_t>;
557 } // namespace __gnu_cxx