OSDN Git Service

332cb173368fb832ce68fa1fc93f673209941783
[pf3gnuchains/gcc-fork.git] / libstdc++-v3 / src / mt_allocator.cc
1 // Allocator details.
2
3 // Copyright (C) 2004, 2005 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING.  If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 // USA.
20
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction.  Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License.  This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29
30 //
31 // ISO C++ 14882:
32 //
33
34 #include <bits/c++config.h>
35 #include <bits/concurrence.h>
36 #include <ext/mt_allocator.h>
37
38 namespace __gnu_internal
39 {
40 #ifdef __GTHREADS
41   struct __freelist
42   {
43     typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
44     _Thread_record*     _M_thread_freelist;
45     _Thread_record*     _M_thread_freelist_array;
46     size_t              _M_max_threads;
47     __gthread_key_t     _M_key;
48
49     ~__freelist()
50     {
51       if (_M_thread_freelist_array)
52         {
53           __gthread_key_delete(_M_key);
54           ::operator delete(static_cast<void*>(_M_thread_freelist_array));
55         }
56     }
57   };
58
59   // Ensure freelist is constructed first.
60   static __freelist freelist;
61   static __glibcxx_mutex_define_initialized(freelist_mutex);
62
63   static void 
64   _M_destroy_thread_key(void* __id)
65   {
66     // Return this thread id record to the front of thread_freelist.
67     __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
68     size_t _M_id = reinterpret_cast<size_t>(__id);
69
70     using namespace __gnu_internal;
71     typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
72     _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
73     __tr->_M_next = freelist._M_thread_freelist;
74     freelist._M_thread_freelist = __tr;
75   }
76 #endif
77 }
78
79 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
80
81   void
82   __pool<false>::_M_destroy() throw()
83   {
84     if (_M_init && !_M_options._M_force_new)
85       {
86         for (size_t __n = 0; __n < _M_bin_size; ++__n)
87           {
88             _Bin_record& __bin = _M_bin[__n];
89             while (__bin._M_address)
90               {
91                 _Block_address* __tmp = __bin._M_address->_M_next;
92                 ::operator delete(__bin._M_address->_M_initial);
93                 __bin._M_address = __tmp;
94               }
95             ::operator delete(__bin._M_first);
96           }
97         ::operator delete(_M_bin);
98         ::operator delete(_M_binmap);
99       }
100   }
101
102   void
103   __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
104   {
105     // Round up to power of 2 and figure out which bin to use.
106     const size_t __which = _M_binmap[__bytes];
107     _Bin_record& __bin = _M_bin[__which];
108
109     char* __c = __p - _M_get_align();
110     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
111       
112     // Single threaded application - return to global pool.
113     __block->_M_next = __bin._M_first[0];
114     __bin._M_first[0] = __block;
115   }
116
117   char* 
118   __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
119   {
120     // Round up to power of 2 and figure out which bin to use.
121     const size_t __which = _M_binmap[__bytes];
122     _Bin_record& __bin = _M_bin[__which];
123     const _Tune& __options = _M_get_options();
124     const size_t __bin_size = (__options._M_min_bin << __which) 
125                                + __options._M_align;
126     size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
127     __block_count /= __bin_size;          
128
129     // Get a new block dynamically, set it up for use.
130     void* __v = ::operator new(__options._M_chunk_size);
131     _Block_address* __address = static_cast<_Block_address*>(__v);
132     __address->_M_initial = __v;
133     __address->_M_next = __bin._M_address;
134     __bin._M_address = __address;
135
136     char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
137     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
138     __bin._M_first[__thread_id] = __block;
139     while (--__block_count > 0)
140       {
141         __c += __bin_size;
142         __block->_M_next = reinterpret_cast<_Block_record*>(__c);
143         __block = __block->_M_next;
144       }
145     __block->_M_next = NULL;
146
147     __block = __bin._M_first[__thread_id];
148     __bin._M_first[__thread_id] = __block->_M_next;
149
150     // NB: For alignment reasons, we can't use the first _M_align
151     // bytes, even when sizeof(_Block_record) < _M_align.
152     return reinterpret_cast<char*>(__block) + __options._M_align;
153   }
154
155   void
156   __pool<false>::_M_initialize()
157   {
158     // _M_force_new must not change after the first allocate(), which
159     // in turn calls this method, so if it's false, it's false forever
160     // and we don't need to return here ever again.
161     if (_M_options._M_force_new) 
162       {
163         _M_init = true;
164         return;
165       }
166       
167     // Create the bins.
168     // Calculate the number of bins required based on _M_max_bytes.
169     // _M_bin_size is statically-initialized to one.
170     size_t __bin_size = _M_options._M_min_bin;
171     while (_M_options._M_max_bytes > __bin_size)
172       {
173         __bin_size <<= 1;
174         ++_M_bin_size;
175       }
176       
177     // Setup the bin map for quick lookup of the relevant bin.
178     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
179     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
180     _Binmap_type* __bp = _M_binmap;
181     _Binmap_type __bin_max = _M_options._M_min_bin;
182     _Binmap_type __bint = 0;
183     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
184       {
185         if (__ct > __bin_max)
186           {
187             __bin_max <<= 1;
188             ++__bint;
189           }
190         *__bp++ = __bint;
191       }
192       
193     // Initialize _M_bin and its members.
194     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
195     _M_bin = static_cast<_Bin_record*>(__v);
196     for (size_t __n = 0; __n < _M_bin_size; ++__n)
197       {
198         _Bin_record& __bin = _M_bin[__n];
199         __v = ::operator new(sizeof(_Block_record*));
200         __bin._M_first = static_cast<_Block_record**>(__v);
201         __bin._M_first[0] = NULL;
202         __bin._M_address = NULL;
203       }
204     _M_init = true;
205   }
206
207   
208 #ifdef __GTHREADS
209   void
210   __pool<true>::_M_destroy() throw()
211   {
212     if (_M_init && !_M_options._M_force_new)
213       {
214         if (__gthread_active_p())
215           {
216             for (size_t __n = 0; __n < _M_bin_size; ++__n)
217               {
218                 _Bin_record& __bin = _M_bin[__n];
219                 while (__bin._M_address)
220                   {
221                     _Block_address* __tmp = __bin._M_address->_M_next;
222                     ::operator delete(__bin._M_address->_M_initial);
223                     __bin._M_address = __tmp;
224                   }
225                 ::operator delete(__bin._M_first);
226                 ::operator delete(__bin._M_free);
227                 ::operator delete(__bin._M_used);
228                 ::operator delete(__bin._M_mutex);
229               }
230           }
231         else
232           {
233             for (size_t __n = 0; __n < _M_bin_size; ++__n)
234               {
235                 _Bin_record& __bin = _M_bin[__n];
236                 while (__bin._M_address)
237                   {
238                     _Block_address* __tmp = __bin._M_address->_M_next;
239                     ::operator delete(__bin._M_address->_M_initial);
240                     __bin._M_address = __tmp;
241                   }
242                 ::operator delete(__bin._M_first);
243               }
244           }
245         ::operator delete(_M_bin);
246         ::operator delete(_M_binmap);
247       }
248   }
249
250   void
251   __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
252   {
253     // Round up to power of 2 and figure out which bin to use.
254     const size_t __which = _M_binmap[__bytes];
255     const _Bin_record& __bin = _M_bin[__which];
256
257     // Know __p not null, assume valid block.
258     char* __c = __p - _M_get_align();
259     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
260     if (__gthread_active_p())
261       {
262         // Calculate the number of records to remove from our freelist:
263         // in order to avoid too much contention we wait until the
264         // number of records is "high enough".
265         const size_t __thread_id = _M_get_thread_id();
266         const _Tune& __options = _M_get_options();      
267         const unsigned long __limit = 100 * (_M_bin_size - __which)
268                                       * __options._M_freelist_headroom;
269
270         unsigned long __remove = __bin._M_free[__thread_id];
271         __remove *= __options._M_freelist_headroom;
272         if (__remove >= __bin._M_used[__thread_id])
273           __remove -= __bin._M_used[__thread_id];
274         else
275           __remove = 0;
276         if (__remove > __limit && __remove > __bin._M_free[__thread_id])
277           {
278             _Block_record* __first = __bin._M_first[__thread_id];
279             _Block_record* __tmp = __first;
280             __remove /= __options._M_freelist_headroom;
281             const unsigned long __removed = __remove;
282             while (--__remove > 0)
283               __tmp = __tmp->_M_next;
284             __bin._M_first[__thread_id] = __tmp->_M_next;
285             __bin._M_free[__thread_id] -= __removed;
286             
287             __gthread_mutex_lock(__bin._M_mutex);
288             __tmp->_M_next = __bin._M_first[0];
289             __bin._M_first[0] = __first;
290             __bin._M_free[0] += __removed;
291             __gthread_mutex_unlock(__bin._M_mutex);
292           }
293
294         // Return this block to our list and update counters and
295         // owner id as needed.
296         --__bin._M_used[__block->_M_thread_id];
297         
298         __block->_M_next = __bin._M_first[__thread_id];
299         __bin._M_first[__thread_id] = __block;
300         
301         ++__bin._M_free[__thread_id];
302       }
303     else
304       {
305         // Not using threads, so single threaded application - return
306         // to global pool.
307         __block->_M_next = __bin._M_first[0];
308         __bin._M_first[0] = __block;
309       }
310   }
311
312   char* 
313   __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
314   {
315     // Round up to power of 2 and figure out which bin to use.
316     const size_t __which = _M_binmap[__bytes];
317     const _Tune& __options = _M_get_options();
318     const size_t __bin_size = ((__options._M_min_bin << __which)
319                                + __options._M_align);
320     size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
321     __block_count /= __bin_size;          
322     
323     // Are we using threads?
324     // - Yes, check if there are free blocks on the global
325     //   list. If so, grab up to __block_count blocks in one
326     //   lock and change ownership. If the global list is 
327     //   empty, we allocate a new chunk and add those blocks 
328     //   directly to our own freelist (with us as owner).
329     // - No, all operations are made directly to global pool 0
330     //   no need to lock or change ownership but check for free
331     //   blocks on global list (and if not add new ones) and
332     //   get the first one.
333     _Bin_record& __bin = _M_bin[__which];
334     _Block_record* __block = NULL;
335     if (__gthread_active_p())
336       {
337         __gthread_mutex_lock(__bin._M_mutex);
338         if (__bin._M_first[0] == NULL)
339           {
340             void* __v = ::operator new(__options._M_chunk_size);
341             _Block_address* __address = static_cast<_Block_address*>(__v);
342             __address->_M_initial = __v;
343             __address->_M_next = __bin._M_address;
344             __bin._M_address = __address;
345             __gthread_mutex_unlock(__bin._M_mutex);
346
347             // No need to hold the lock when we are adding a whole
348             // chunk to our own list.
349             char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
350             __block = reinterpret_cast<_Block_record*>(__c);
351             __bin._M_free[__thread_id] = __block_count;
352             __bin._M_first[__thread_id] = __block;
353             while (--__block_count > 0)
354               {
355                 __c += __bin_size;
356                 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
357                 __block = __block->_M_next;
358               }
359             __block->_M_next = NULL;
360           }
361         else
362           {
363             // Is the number of required blocks greater than or equal
364             // to the number that can be provided by the global free
365             // list?
366             __bin._M_first[__thread_id] = __bin._M_first[0];
367             if (__block_count >= __bin._M_free[0])
368               {
369                 __bin._M_free[__thread_id] = __bin._M_free[0];
370                 __bin._M_free[0] = 0;
371                 __bin._M_first[0] = NULL;
372               }
373             else
374               {
375                 __bin._M_free[__thread_id] = __block_count;
376                 __bin._M_free[0] -= __block_count;
377                 __block = __bin._M_first[0];
378                 while (--__block_count > 0)
379                   __block = __block->_M_next;
380                 __bin._M_first[0] = __block->_M_next;
381                 __block->_M_next = NULL;
382               }
383             __gthread_mutex_unlock(__bin._M_mutex);
384           }
385       }
386     else
387       {
388         void* __v = ::operator new(__options._M_chunk_size);
389         _Block_address* __address = static_cast<_Block_address*>(__v);
390         __address->_M_initial = __v;
391         __address->_M_next = __bin._M_address;
392         __bin._M_address = __address;
393
394         char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
395         _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
396         __bin._M_first[0] = __block;
397         while (--__block_count > 0)
398           {
399             __c += __bin_size;
400             __block->_M_next = reinterpret_cast<_Block_record*>(__c);
401             __block = __block->_M_next;
402           }
403         __block->_M_next = NULL;
404       }
405       
406     __block = __bin._M_first[__thread_id];
407     __bin._M_first[__thread_id] = __block->_M_next;
408
409     if (__gthread_active_p())
410       {
411         __block->_M_thread_id = __thread_id;
412         --__bin._M_free[__thread_id];
413         ++__bin._M_used[__thread_id];
414       }
415
416     // NB: For alignment reasons, we can't use the first _M_align
417     // bytes, even when sizeof(_Block_record) < _M_align.
418     return reinterpret_cast<char*>(__block) + __options._M_align;
419   }
420
421   void
422   __pool<true>::_M_initialize()
423   {
424     // _M_force_new must not change after the first allocate(),
425     // which in turn calls this method, so if it's false, it's false
426     // forever and we don't need to return here ever again.
427     if (_M_options._M_force_new) 
428       {
429         _M_init = true;
430         return;
431       }
432
433     // Create the bins.
434     // Calculate the number of bins required based on _M_max_bytes.
435     // _M_bin_size is statically-initialized to one.
436     size_t __bin_size = _M_options._M_min_bin;
437     while (_M_options._M_max_bytes > __bin_size)
438       {
439         __bin_size <<= 1;
440         ++_M_bin_size;
441       }
442       
443     // Setup the bin map for quick lookup of the relevant bin.
444     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
445     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
446     _Binmap_type* __bp = _M_binmap;
447     _Binmap_type __bin_max = _M_options._M_min_bin;
448     _Binmap_type __bint = 0;
449     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
450       {
451         if (__ct > __bin_max)
452           {
453             __bin_max <<= 1;
454             ++__bint;
455           }
456         *__bp++ = __bint;
457       }
458       
459     // Initialize _M_bin and its members.
460     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
461     _M_bin = static_cast<_Bin_record*>(__v);
462       
463     // If __gthread_active_p() create and initialize the list of
464     // free thread ids. Single threaded applications use thread id 0
465     // directly and have no need for this.
466     if (__gthread_active_p())
467       {
468         {
469           __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
470
471           if (!__gnu_internal::freelist._M_thread_freelist_array
472               || __gnu_internal::freelist._M_max_threads
473                  < _M_options._M_max_threads)
474             {
475               const size_t __k = sizeof(_Thread_record)
476                                  * _M_options._M_max_threads;
477               __v = ::operator new(__k);
478               _Thread_record* _M_thread_freelist
479                 = static_cast<_Thread_record*>(__v);
480
481               // NOTE! The first assignable thread id is 1 since the
482               // global pool uses id 0
483               size_t __i;
484               for (__i = 1; __i < _M_options._M_max_threads; ++__i)
485                 {
486                   _Thread_record& __tr = _M_thread_freelist[__i - 1];
487                   __tr._M_next = &_M_thread_freelist[__i];
488                   __tr._M_id = __i;
489                 }
490
491               // Set last record.
492               _M_thread_freelist[__i - 1]._M_next = NULL;
493               _M_thread_freelist[__i - 1]._M_id = __i;
494
495               if (!__gnu_internal::freelist._M_thread_freelist_array)
496                 {
497                   // Initialize per thread key to hold pointer to
498                   // _M_thread_freelist.
499                   __gthread_key_create(&__gnu_internal::freelist._M_key,
500                                        __gnu_internal::_M_destroy_thread_key);
501                   __gnu_internal::freelist._M_thread_freelist
502                     = _M_thread_freelist;
503                 }
504               else
505                 {
506                   _Thread_record* _M_old_freelist
507                     = __gnu_internal::freelist._M_thread_freelist;
508                   _Thread_record* _M_old_array
509                     = __gnu_internal::freelist._M_thread_freelist_array;
510                   __gnu_internal::freelist._M_thread_freelist
511                     = &_M_thread_freelist[_M_old_freelist - _M_old_array];
512                   while (_M_old_freelist)
513                     {
514                       size_t next_id;
515                       if (_M_old_freelist->_M_next)
516                         next_id = _M_old_freelist->_M_next - _M_old_array;
517                       else
518                         next_id = __gnu_internal::freelist._M_max_threads;
519                       _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
520                         = &_M_thread_freelist[next_id];
521                       _M_old_freelist = _M_old_freelist->_M_next;
522                     }
523                   ::operator delete(static_cast<void*>(_M_old_array));
524                 }
525               __gnu_internal::freelist._M_thread_freelist_array
526                 = _M_thread_freelist;
527               __gnu_internal::freelist._M_max_threads
528                 = _M_options._M_max_threads;
529             }
530         }
531
532         const size_t __max_threads = _M_options._M_max_threads + 1;
533         for (size_t __n = 0; __n < _M_bin_size; ++__n)
534           {
535             _Bin_record& __bin = _M_bin[__n];
536             __v = ::operator new(sizeof(_Block_record*) * __max_threads);
537             __bin._M_first = static_cast<_Block_record**>(__v);
538
539             __bin._M_address = NULL;
540
541             __v = ::operator new(sizeof(size_t) * __max_threads);
542             __bin._M_free = static_cast<size_t*>(__v);
543               
544             __v = ::operator new(sizeof(size_t) * __max_threads);
545             __bin._M_used = static_cast<size_t*>(__v);
546               
547             __v = ::operator new(sizeof(__gthread_mutex_t));
548             __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
549               
550 #ifdef __GTHREAD_MUTEX_INIT
551             {
552               // Do not copy a POSIX/gthr mutex once in use.
553               __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
554               *__bin._M_mutex = __tmp;
555             }
556 #else
557             { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
558 #endif
559             for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
560               {
561                 __bin._M_first[__threadn] = NULL;
562                 __bin._M_free[__threadn] = 0;
563                 __bin._M_used[__threadn] = 0;
564               }
565           }
566       }
567     else
568       {
569         for (size_t __n = 0; __n < _M_bin_size; ++__n)
570           {
571             _Bin_record& __bin = _M_bin[__n];
572             __v = ::operator new(sizeof(_Block_record*));
573             __bin._M_first = static_cast<_Block_record**>(__v);
574             __bin._M_first[0] = NULL;
575             __bin._M_address = NULL;
576           }
577       }
578     _M_init = true;
579   }
580
581   size_t
582   __pool<true>::_M_get_thread_id()
583   {
584     // If we have thread support and it's active we check the thread
585     // key value and return its id or if it's not set we take the
586     // first record from _M_thread_freelist and sets the key and
587     // returns it's id.
588     if (__gthread_active_p())
589       {
590         void* v = __gthread_getspecific(__gnu_internal::freelist._M_key);
591         size_t _M_id = (size_t)v;
592         if (_M_id == 0)
593           {
594             {
595               __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
596               if (__gnu_internal::freelist._M_thread_freelist)
597                 {
598                   _M_id = __gnu_internal::freelist._M_thread_freelist->_M_id;
599                   __gnu_internal::freelist._M_thread_freelist
600                     = __gnu_internal::freelist._M_thread_freelist->_M_next;
601                 }
602             }
603
604             __gthread_setspecific(__gnu_internal::freelist._M_key,
605                                   (void*)_M_id);
606           }
607         return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
608       }
609
610     // Otherwise (no thread support or inactive) all requests are
611     // served from the global pool 0.
612     return 0;
613   }
614
615   // XXX GLIBCXX_ABI Deprecated
616   void 
617   __pool<true>::_M_destroy_thread_key(void*) { }
618
619   // XXX GLIBCXX_ABI Deprecated
620   void
621   __pool<true>::_M_initialize(__destroy_handler)
622   {
623     // _M_force_new must not change after the first allocate(),
624     // which in turn calls this method, so if it's false, it's false
625     // forever and we don't need to return here ever again.
626     if (_M_options._M_force_new) 
627       {
628         _M_init = true;
629         return;
630       }
631
632     // Create the bins.
633     // Calculate the number of bins required based on _M_max_bytes.
634     // _M_bin_size is statically-initialized to one.
635     size_t __bin_size = _M_options._M_min_bin;
636     while (_M_options._M_max_bytes > __bin_size)
637       {
638         __bin_size <<= 1;
639         ++_M_bin_size;
640       }
641       
642     // Setup the bin map for quick lookup of the relevant bin.
643     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
644     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
645     _Binmap_type* __bp = _M_binmap;
646     _Binmap_type __bin_max = _M_options._M_min_bin;
647     _Binmap_type __bint = 0;
648     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
649       {
650         if (__ct > __bin_max)
651           {
652             __bin_max <<= 1;
653             ++__bint;
654           }
655         *__bp++ = __bint;
656       }
657       
658     // Initialize _M_bin and its members.
659     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
660     _M_bin = static_cast<_Bin_record*>(__v);
661       
662     // If __gthread_active_p() create and initialize the list of
663     // free thread ids. Single threaded applications use thread id 0
664     // directly and have no need for this.
665     if (__gthread_active_p())
666       {
667         {
668           __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
669
670           if (!__gnu_internal::freelist._M_thread_freelist_array
671               || __gnu_internal::freelist._M_max_threads
672                  < _M_options._M_max_threads)
673             {
674               const size_t __k = sizeof(_Thread_record)
675                                  * _M_options._M_max_threads;
676               __v = ::operator new(__k);
677               _Thread_record* _M_thread_freelist
678                 = static_cast<_Thread_record*>(__v);
679
680               // NOTE! The first assignable thread id is 1 since the
681               // global pool uses id 0
682               size_t __i;
683               for (__i = 1; __i < _M_options._M_max_threads; ++__i)
684                 {
685                   _Thread_record& __tr = _M_thread_freelist[__i - 1];
686                   __tr._M_next = &_M_thread_freelist[__i];
687                   __tr._M_id = __i;
688                 }
689
690               // Set last record.
691               _M_thread_freelist[__i - 1]._M_next = NULL;
692               _M_thread_freelist[__i - 1]._M_id = __i;
693
694               if (!__gnu_internal::freelist._M_thread_freelist_array)
695                 {
696                   // Initialize per thread key to hold pointer to
697                   // _M_thread_freelist.
698                   __gthread_key_create(&__gnu_internal::freelist._M_key,
699                                        __gnu_internal::_M_destroy_thread_key);
700                   __gnu_internal::freelist._M_thread_freelist
701                     = _M_thread_freelist;
702                 }
703               else
704                 {
705                   _Thread_record* _M_old_freelist
706                     = __gnu_internal::freelist._M_thread_freelist;
707                   _Thread_record* _M_old_array
708                     = __gnu_internal::freelist._M_thread_freelist_array;
709                   __gnu_internal::freelist._M_thread_freelist
710                     = &_M_thread_freelist[_M_old_freelist - _M_old_array];
711                   while (_M_old_freelist)
712                     {
713                       size_t next_id;
714                       if (_M_old_freelist->_M_next)
715                         next_id = _M_old_freelist->_M_next - _M_old_array;
716                       else
717                         next_id = __gnu_internal::freelist._M_max_threads;
718                       _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
719                         = &_M_thread_freelist[next_id];
720                       _M_old_freelist = _M_old_freelist->_M_next;
721                     }
722                   ::operator delete(static_cast<void*>(_M_old_array));
723                 }
724               __gnu_internal::freelist._M_thread_freelist_array
725                 = _M_thread_freelist;
726               __gnu_internal::freelist._M_max_threads
727                 = _M_options._M_max_threads;
728             }
729         }
730
731         const size_t __max_threads = _M_options._M_max_threads + 1;
732         for (size_t __n = 0; __n < _M_bin_size; ++__n)
733           {
734             _Bin_record& __bin = _M_bin[__n];
735             __v = ::operator new(sizeof(_Block_record*) * __max_threads);
736             __bin._M_first = static_cast<_Block_record**>(__v);
737
738             __bin._M_address = NULL;
739
740             __v = ::operator new(sizeof(size_t) * __max_threads);
741             __bin._M_free = static_cast<size_t*>(__v);
742               
743             __v = ::operator new(sizeof(size_t) * __max_threads);
744             __bin._M_used = static_cast<size_t*>(__v);
745               
746             __v = ::operator new(sizeof(__gthread_mutex_t));
747             __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
748               
749 #ifdef __GTHREAD_MUTEX_INIT
750             {
751               // Do not copy a POSIX/gthr mutex once in use.
752               __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
753               *__bin._M_mutex = __tmp;
754             }
755 #else
756             { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
757 #endif
758             for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
759               {
760                 __bin._M_first[__threadn] = NULL;
761                 __bin._M_free[__threadn] = 0;
762                 __bin._M_used[__threadn] = 0;
763               }
764           }
765       }
766     else
767       {
768         for (size_t __n = 0; __n < _M_bin_size; ++__n)
769           {
770             _Bin_record& __bin = _M_bin[__n];
771             __v = ::operator new(sizeof(_Block_record*));
772             __bin._M_first = static_cast<_Block_record**>(__v);
773             __bin._M_first[0] = NULL;
774             __bin._M_address = NULL;
775           }
776       }
777     _M_init = true;
778   }
779 #endif
780
781   // Instantiations.
782   template class __mt_alloc<char>;
783   template class __mt_alloc<wchar_t>;
784
785 _GLIBCXX_END_NAMESPACE