OSDN Git Service

2004-10-11 Benjamin Kosnik <bkoz@redhat.com>
[pf3gnuchains/gcc-fork.git] / libstdc++-v3 / src / mt_allocator.cc
1 // Allocator details.
2
3 // Copyright (C) 2004 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Librarbooly.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING.  If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
20
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction.  Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License.  This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29
30 //
31 // ISO C++ 14882:
32 //
33
34 #include <bits/c++config.h>
35 #include <bits/concurrence.h>
36 #include <ext/mt_allocator.h>
37
38 namespace __gnu_internal
39 {
40   __glibcxx_mutex_define_initialized(freelist_mutex);
41
42 #ifdef __GTHREADS
43   __gthread_key_t freelist_key;
44 #endif
45 }
46
47 namespace __gnu_cxx
48 {
49   void
50   __pool<false>::_M_destroy() throw()
51   {
52     if (_M_init && !_M_options._M_force_new)
53       {
54         for (size_t __n = 0; __n < _M_bin_size; ++__n)
55           {
56             _Bin_record& __bin = _M_bin[__n];
57             while (__bin._M_address)
58               {
59                 _Block_address* __tmp = __bin._M_address->_M_next;
60                 ::operator delete(__bin._M_address->_M_initial);
61                 delete __bin._M_address;
62                 __bin._M_address = __tmp;
63               }
64             delete __bin._M_first;
65           }
66         delete _M_bin;
67         delete _M_binmap;
68       }
69   }
70
71   void
72   __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
73   {
74     // Round up to power of 2 and figure out which bin to use.
75     const size_t __which = _M_binmap[__bytes];
76     _Bin_record& __bin = _M_bin[__which];
77
78     const _Tune& __options = _M_get_options();
79     char* __c = __p - __options._M_align;
80     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
81       
82     // Single threaded application - return to global pool.
83     __block->_M_next = __bin._M_first[0];
84     __bin._M_first[0] = __block;
85   }
86
87   char* 
88   __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
89   {
90     // Round up to power of 2 and figure out which bin to use.
91     const size_t __which = _M_binmap[__bytes];
92     const _Tune& __options = _M_get_options();
93     const size_t __bin_size = ((__options._M_min_bin << __which) 
94                                + __options._M_align);
95     size_t __block_count = __options._M_chunk_size / __bin_size;          
96
97     // Get a new block dynamically, set it up for use.
98     void* __v = ::operator new(__options._M_chunk_size);
99     _Block_record* __block = static_cast<_Block_record*>(__v);
100     --__block_count;
101     _Block_record* __tmp = __block;
102     while (__block_count-- > 0)
103       {
104         char* __c = reinterpret_cast<char*>(__tmp) + __bin_size;
105         __tmp->_M_next = reinterpret_cast<_Block_record*>(__c);
106         __tmp = __tmp->_M_next;
107       }
108     __tmp->_M_next = NULL;
109
110     // Update _Bin_record fields.
111     _Bin_record& __bin = _M_bin[__which];
112     __bin._M_first[__thread_id] = __block->_M_next;
113     _Block_address* __address = new _Block_address;
114     __address->_M_initial = __v;
115     __address->_M_next = __bin._M_address;
116     __bin._M_address = __address;
117
118     // NB: For alignment reasons, we can't use the first _M_align
119     // bytes, even when sizeof(_Block_record) < _M_align.
120     return reinterpret_cast<char*>(__block) + __options._M_align;
121   }
122
123   void
124   __pool<false>::_M_initialize()
125   {
126     // _M_force_new must not change after the first allocate(), which
127     // in turn calls this method, so if it's false, it's false forever
128     // and we don't need to return here ever again.
129     if (_M_options._M_force_new) 
130       {
131         _M_init = true;
132         return;
133       }
134       
135     // Create the bins.
136     // Calculate the number of bins required based on _M_max_bytes.
137     // _M_bin_size is statically-initialized to one.
138     size_t __bin_size = _M_options._M_min_bin;
139     while (_M_options._M_max_bytes > __bin_size)
140       {
141         __bin_size <<= 1;
142         ++_M_bin_size;
143       }
144       
145     // Setup the bin map for quick lookup of the relevant bin.
146     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
147     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
148     _Binmap_type* __bp = _M_binmap;
149     _Binmap_type __bin_max = _M_options._M_min_bin;
150     _Binmap_type __bint = 0;
151     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
152       {
153         if (__ct > __bin_max)
154           {
155             __bin_max <<= 1;
156             ++__bint;
157           }
158         *__bp++ = __bint;
159       }
160       
161     // Initialize _M_bin and its members.
162     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
163     _M_bin = static_cast<_Bin_record*>(__v);
164     for (size_t __n = 0; __n < _M_bin_size; ++__n)
165       {
166         _Bin_record& __bin = _M_bin[__n];
167         __v = ::operator new(sizeof(_Block_record*));
168         __bin._M_first = static_cast<_Block_record**>(__v);
169         __bin._M_first[0] = NULL;
170         __bin._M_address = NULL;
171       }
172     _M_init = true;
173   }
174   
175 #ifdef __GTHREADS
176   void
177   __pool<true>::_M_destroy() throw()
178   {
179     if (_M_init && !_M_options._M_force_new)
180       {
181         if (__gthread_active_p())
182           {
183             for (size_t __n = 0; __n < _M_bin_size; ++__n)
184               {
185                 _Bin_record& __bin = _M_bin[__n];
186                 while (__bin._M_address)
187                   {
188                     _Block_address* __tmp = __bin._M_address->_M_next;
189                     ::operator delete(__bin._M_address->_M_initial);
190                     delete __bin._M_address;
191                     __bin._M_address = __tmp;
192                   }
193                 delete __bin._M_first;
194                 delete __bin._M_free;
195                 delete __bin._M_used;
196                 delete __bin._M_mutex;
197               }
198             ::operator delete(_M_thread_freelist_initial);
199           }
200         else
201           {
202             for (size_t __n = 0; __n < _M_bin_size; ++__n)
203               {
204                 _Bin_record& __bin = _M_bin[__n];
205                 while (__bin._M_address)
206                   {
207                     _Block_address* __tmp = __bin._M_address->_M_next;
208                     ::operator delete(__bin._M_address->_M_initial);
209                     delete __bin._M_address;
210                     __bin._M_address = __tmp;
211                   }
212                 delete __bin._M_first;
213               }
214           }
215         delete _M_bin;
216         delete _M_binmap;
217       }
218   }
219
220   void
221   __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
222   {
223     // Round up to power of 2 and figure out which bin to use.
224     const size_t __which = _M_binmap[__bytes];
225     const _Bin_record& __bin = _M_bin[__which];
226
227     const _Tune& __options = _M_get_options();
228     char* __c = __p - __options._M_align;
229     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
230     if (__gthread_active_p())
231       {
232         // Calculate the number of records to remove from our freelist:
233         // in order to avoid too much contention we wait until the
234         // number of records is "high enough".
235         const size_t __thread_id = _M_get_thread_id();
236         
237         long __remove = ((__bin._M_free[__thread_id] 
238                           * __options._M_freelist_headroom)
239                          - __bin._M_used[__thread_id]);
240         if (__remove > static_cast<long>(100 * (_M_bin_size - __which)
241                                          * __options._M_freelist_headroom)
242             && __remove > static_cast<long>(__bin._M_free[__thread_id]))
243           {
244             _Block_record* __tmp = __bin._M_first[__thread_id];
245             _Block_record* __first = __tmp;
246             __remove /= __options._M_freelist_headroom;
247             const long __removed = __remove;
248             --__remove;
249             while (__remove-- > 0)
250               __tmp = __tmp->_M_next;
251             __bin._M_first[__thread_id] = __tmp->_M_next;
252             __bin._M_free[__thread_id] -= __removed;
253             
254             __gthread_mutex_lock(__bin._M_mutex);
255             __tmp->_M_next = __bin._M_first[0];
256             __bin._M_first[0] = __first;
257             __bin._M_free[0] += __removed;
258             __gthread_mutex_unlock(__bin._M_mutex);
259           }
260         
261         // Return this block to our list and update counters and
262         // owner id as needed.
263         --__bin._M_used[__block->_M_thread_id];
264         
265         __block->_M_next = __bin._M_first[__thread_id];
266         __bin._M_first[__thread_id] = __block;
267         
268         ++__bin._M_free[__thread_id];
269       }
270     else
271       {
272         // Not using threads, so single threaded application - return
273         // to global pool.
274         __block->_M_next = __bin._M_first[0];
275         __bin._M_first[0] = __block;
276       }
277   }
278
279   char* 
280   __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
281   {
282     // Round up to power of 2 and figure out which bin to use.
283     const size_t __which = _M_binmap[__bytes];
284     const _Tune& __options = _M_get_options();
285     const size_t __bin_size = ((__options._M_min_bin << __which)
286                                + __options._M_align);
287     size_t __block_count = __options._M_chunk_size / __bin_size;          
288     
289     // Are we using threads?
290     // - Yes, check if there are free blocks on the global
291     //   list. If so, grab up to __block_count blocks in one
292     //   lock and change ownership. If the global list is 
293     //   empty, we allocate a new chunk and add those blocks 
294     //   directly to our own freelist (with us as owner).
295     // - No, all operations are made directly to global pool 0
296     //   no need to lock or change ownership but check for free
297     //   blocks on global list (and if not add new ones) and
298     //   get the first one.
299     _Bin_record& __bin = _M_bin[__which];
300     _Block_record* __block = NULL;
301     if (__gthread_active_p())
302       {
303         __gthread_mutex_lock(__bin._M_mutex);
304         if (__bin._M_first[0] == NULL)
305           {
306             // No need to hold the lock when we are adding a whole
307             // chunk to our own list.
308             __gthread_mutex_unlock(__bin._M_mutex);
309
310             void* __v = ::operator new(__options._M_chunk_size);
311             __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v);
312             __bin._M_free[__thread_id] = __block_count;
313             --__block_count;
314             __block = __bin._M_first[__thread_id];
315             while (__block_count-- > 0)
316               {
317                 char* __c = reinterpret_cast<char*>(__block) + __bin_size;
318                 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
319                 __block = __block->_M_next;
320               }
321             __block->_M_next = NULL;
322
323             __gthread_mutex_lock(__bin._M_mutex);
324             _Block_address* __address = new _Block_address;
325             __address->_M_initial = __v;
326             __address->_M_next = __bin._M_address;
327             __bin._M_address = __address;
328             __gthread_mutex_unlock(__bin._M_mutex);
329           }
330         else
331           {
332             // Is the number of required blocks greater than or equal
333             // to the number that can be provided by the global free
334             // list?
335             __bin._M_first[__thread_id] = __bin._M_first[0];
336             if (__block_count >= __bin._M_free[0])
337               {
338                 __bin._M_free[__thread_id] = __bin._M_free[0];
339                 __bin._M_free[0] = 0;
340                 __bin._M_first[0] = NULL;
341               }
342             else
343               {
344                 __bin._M_free[__thread_id] = __block_count;
345                 __bin._M_free[0] -= __block_count;
346                 --__block_count;
347                 __block = __bin._M_first[0];
348                 while (__block_count-- > 0)
349                   __block = __block->_M_next;
350                 __bin._M_first[0] = __block->_M_next;
351                 __block->_M_next = NULL;
352               }
353             __gthread_mutex_unlock(__bin._M_mutex);
354           }
355       }
356     else
357       {
358         void* __v = ::operator new(__options._M_chunk_size);
359         __block = static_cast<_Block_record*>(__v);
360         __bin._M_first[0] = __block;
361         --__block_count;
362         while (__block_count-- > 0)
363           {
364             char* __c = reinterpret_cast<char*>(__block) + __bin_size;
365             __block->_M_next = reinterpret_cast<_Block_record*>(__c);
366             __block = __block->_M_next;
367           }
368         __block->_M_next = NULL;
369
370         _Block_address* __address = new _Block_address;
371         __address->_M_initial = __v;
372         __address->_M_next = __bin._M_address;
373         __bin._M_address = __address;
374       }
375       
376     __block = __bin._M_first[__thread_id];
377     __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
378
379     if (__gthread_active_p())
380       {
381         __block->_M_thread_id = __thread_id;
382         --__bin._M_free[__thread_id];
383         ++__bin._M_used[__thread_id];
384       }
385
386     // NB: For alignment reasons, we can't use the first _M_align
387     // bytes, even when sizeof(_Block_record) < _M_align.
388     return reinterpret_cast<char*>(__block) + __options._M_align;
389   }
390
391  void
392   __pool<true>::_M_initialize(__destroy_handler __d)
393   {
394     // _M_force_new must not change after the first allocate(),
395     // which in turn calls this method, so if it's false, it's false
396     // forever and we don't need to return here ever again.
397     if (_M_options._M_force_new) 
398       {
399         _M_init = true;
400         return;
401       }
402       
403     // Create the bins.
404     // Calculate the number of bins required based on _M_max_bytes.
405     // _M_bin_size is statically-initialized to one.
406     size_t __bin_size = _M_options._M_min_bin;
407     while (_M_options._M_max_bytes > __bin_size)
408       {
409         __bin_size <<= 1;
410         ++_M_bin_size;
411       }
412       
413     // Setup the bin map for quick lookup of the relevant bin.
414     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
415     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
416     _Binmap_type* __bp = _M_binmap;
417     _Binmap_type __bin_max = _M_options._M_min_bin;
418     _Binmap_type __bint = 0;
419     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
420       {
421         if (__ct > __bin_max)
422           {
423             __bin_max <<= 1;
424             ++__bint;
425           }
426         *__bp++ = __bint;
427       }
428       
429     // Initialize _M_bin and its members.
430     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
431     _M_bin = static_cast<_Bin_record*>(__v);
432       
433     // If __gthread_active_p() create and initialize the list of
434     // free thread ids. Single threaded applications use thread id 0
435     // directly and have no need for this.
436     if (__gthread_active_p())
437       {
438         const size_t __k = sizeof(_Thread_record) * _M_options._M_max_threads;
439         __v = ::operator new(__k);
440         _M_thread_freelist = static_cast<_Thread_record*>(__v);
441         _M_thread_freelist_initial = __v;
442           
443         // NOTE! The first assignable thread id is 1 since the
444         // global pool uses id 0
445         size_t __i;
446         for (__i = 1; __i < _M_options._M_max_threads; ++__i)
447           {
448             _Thread_record& __tr = _M_thread_freelist[__i - 1];
449             __tr._M_next = &_M_thread_freelist[__i];
450             __tr._M_id = __i;
451           }
452           
453         // Set last record.
454         _M_thread_freelist[__i - 1]._M_next = NULL;
455         _M_thread_freelist[__i - 1]._M_id = __i;
456           
457         // Initialize per thread key to hold pointer to
458         // _M_thread_freelist.
459         __gthread_key_create(&__gnu_internal::freelist_key, __d);
460           
461         const size_t __max_threads = _M_options._M_max_threads + 1;
462         for (size_t __n = 0; __n < _M_bin_size; ++__n)
463           {
464             _Bin_record& __bin = _M_bin[__n];
465             __v = ::operator new(sizeof(_Block_record*) * __max_threads);
466             __bin._M_first = static_cast<_Block_record**>(__v);
467
468             __bin._M_address = NULL;
469
470             __v = ::operator new(sizeof(size_t) * __max_threads);
471             __bin._M_free = static_cast<size_t*>(__v);
472               
473             __v = ::operator new(sizeof(size_t) * __max_threads);
474             __bin._M_used = static_cast<size_t*>(__v);
475               
476             __v = ::operator new(sizeof(__gthread_mutex_t));
477             __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
478               
479 #ifdef __GTHREAD_MUTEX_INIT
480             {
481               // Do not copy a POSIX/gthr mutex once in use.
482               __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
483               *__bin._M_mutex = __tmp;
484             }
485 #else
486             { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
487 #endif
488             for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
489               {
490                 __bin._M_first[__threadn] = NULL;
491                 __bin._M_free[__threadn] = 0;
492                 __bin._M_used[__threadn] = 0;
493               }
494           }
495       }
496     else
497       {
498         for (size_t __n = 0; __n < _M_bin_size; ++__n)
499           {
500             _Bin_record& __bin = _M_bin[__n];
501             __v = ::operator new(sizeof(_Block_record*));
502             __bin._M_first = static_cast<_Block_record**>(__v);
503             __bin._M_first[0] = NULL;
504             __bin._M_address = NULL;
505           }
506       }
507     _M_init = true;
508   }
509
510   size_t
511   __pool<true>::_M_get_thread_id()
512   {
513     // If we have thread support and it's active we check the thread
514     // key value and return its id or if it's not set we take the
515     // first record from _M_thread_freelist and sets the key and
516     // returns it's id.
517     if (__gthread_active_p())
518       {
519         void* v = __gthread_getspecific(__gnu_internal::freelist_key);
520         _Thread_record* __freelist_pos = static_cast<_Thread_record*>(v); 
521         if (__freelist_pos == NULL)
522           {
523             // Since _M_options._M_max_threads must be larger than
524             // the theoretical max number of threads of the OS the
525             // list can never be empty.
526             {
527               __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
528               __freelist_pos = _M_thread_freelist;
529               _M_thread_freelist = _M_thread_freelist->_M_next;
530             }
531               
532             __gthread_setspecific(__gnu_internal::freelist_key, 
533                                   static_cast<void*>(__freelist_pos));
534           }
535         return __freelist_pos->_M_id;
536       }
537
538     // Otherwise (no thread support or inactive) all requests are
539     // served from the global pool 0.
540     return 0;
541   }
542
543   void
544   __pool<true>::_M_destroy_thread_key(void* __freelist_pos)
545   {
546     // Return this thread id record to front of thread_freelist.
547     __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
548     _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
549     __tr->_M_next = _M_thread_freelist; 
550     _M_thread_freelist = __tr;
551   }
552 #endif
553
554   // Instantiations.
555   template class __mt_alloc<char>;
556   template class __mt_alloc<wchar_t>;
557 } // namespace __gnu_cxx