OSDN Git Service

* src/c++98/compatibility.cc (_ZTIe): Use
[pf3gnuchains/gcc-fork.git] / libstdc++-v3 / src / c++98 / mt_allocator.cc
1 // Allocator details.
2
3 // Copyright (C) 2004, 2005, 2006, 2009, 2010 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 //
26 // ISO C++ 14882:
27 //
28
29 #include <bits/c++config.h>
30 #include <ext/concurrence.h>
31 #include <ext/mt_allocator.h>
32 #include <cstring>
33
34 namespace
35 {
36 #ifdef __GTHREADS
37   struct __freelist
38   {
39     typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
40     _Thread_record*     _M_thread_freelist;
41     _Thread_record*     _M_thread_freelist_array;
42     size_t              _M_max_threads;
43     __gthread_key_t     _M_key;
44
45     ~__freelist()
46     {
47       if (_M_thread_freelist_array)
48         {
49           __gthread_key_delete(_M_key);
50           ::operator delete(static_cast<void*>(_M_thread_freelist_array));
51         }
52     }
53   };
54
55   __freelist&
56   get_freelist()
57   {
58     static __freelist freelist;
59     return freelist;
60   }
61
62   __gnu_cxx::__mutex&
63   get_freelist_mutex()
64   {
65     static __gnu_cxx::__mutex freelist_mutex;
66     return freelist_mutex;
67   }
68
69   static void 
70   _M_destroy_thread_key(void* __id)
71   {
72     // Return this thread id record to the front of thread_freelist.
73     __freelist& freelist = get_freelist();
74     {
75       __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
76       size_t _M_id = reinterpret_cast<size_t>(__id);
77       
78       typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
79       _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
80       __tr->_M_next = freelist._M_thread_freelist;
81       freelist._M_thread_freelist = __tr;
82     }
83   }
84 #endif
85 } // anonymous namespace
86
87 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
88 {
89 _GLIBCXX_BEGIN_NAMESPACE_VERSION
90
91   void
92   __pool<false>::_M_destroy() throw()
93   {
94     if (_M_init && !_M_options._M_force_new)
95       {
96         for (size_t __n = 0; __n < _M_bin_size; ++__n)
97           {
98             _Bin_record& __bin = _M_bin[__n];
99             while (__bin._M_address)
100               {
101                 _Block_address* __tmp = __bin._M_address->_M_next;
102                 ::operator delete(__bin._M_address->_M_initial);
103                 __bin._M_address = __tmp;
104               }
105             ::operator delete(__bin._M_first);
106           }
107         ::operator delete(_M_bin);
108         ::operator delete(_M_binmap);
109       }
110   }
111
112   void
113   __pool<false>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
114   {
115     // Round up to power of 2 and figure out which bin to use.
116     const size_t __which = _M_binmap[__bytes];
117     _Bin_record& __bin = _M_bin[__which];
118
119     char* __c = __p - _M_get_align();
120     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
121       
122     // Single threaded application - return to global pool.
123     __block->_M_next = __bin._M_first[0];
124     __bin._M_first[0] = __block;
125   }
126
127   char* 
128   __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
129   {
130     // Round up to power of 2 and figure out which bin to use.
131     const size_t __which = _M_binmap[__bytes];
132     _Bin_record& __bin = _M_bin[__which];
133     const _Tune& __options = _M_get_options();
134     const size_t __bin_size = (__options._M_min_bin << __which) 
135                                + __options._M_align;
136     size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
137     __block_count /= __bin_size;          
138
139     // Get a new block dynamically, set it up for use.
140     void* __v = ::operator new(__options._M_chunk_size);
141     _Block_address* __address = static_cast<_Block_address*>(__v);
142     __address->_M_initial = __v;
143     __address->_M_next = __bin._M_address;
144     __bin._M_address = __address;
145
146     char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
147     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
148     __bin._M_first[__thread_id] = __block;
149     while (--__block_count > 0)
150       {
151         __c += __bin_size;
152         __block->_M_next = reinterpret_cast<_Block_record*>(__c);
153         __block = __block->_M_next;
154       }
155     __block->_M_next = 0;
156
157     __block = __bin._M_first[__thread_id];
158     __bin._M_first[__thread_id] = __block->_M_next;
159
160     // NB: For alignment reasons, we can't use the first _M_align
161     // bytes, even when sizeof(_Block_record) < _M_align.
162     return reinterpret_cast<char*>(__block) + __options._M_align;
163   }
164
165   void
166   __pool<false>::_M_initialize()
167   {
168     // _M_force_new must not change after the first allocate(), which
169     // in turn calls this method, so if it's false, it's false forever
170     // and we don't need to return here ever again.
171     if (_M_options._M_force_new) 
172       {
173         _M_init = true;
174         return;
175       }
176       
177     // Create the bins.
178     // Calculate the number of bins required based on _M_max_bytes.
179     // _M_bin_size is statically-initialized to one.
180     size_t __bin_size = _M_options._M_min_bin;
181     while (_M_options._M_max_bytes > __bin_size)
182       {
183         __bin_size <<= 1;
184         ++_M_bin_size;
185       }
186       
187     // Setup the bin map for quick lookup of the relevant bin.
188     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
189     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
190     _Binmap_type* __bp = _M_binmap;
191     _Binmap_type __bin_max = _M_options._M_min_bin;
192     _Binmap_type __bint = 0;
193     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
194       {
195         if (__ct > __bin_max)
196           {
197             __bin_max <<= 1;
198             ++__bint;
199           }
200         *__bp++ = __bint;
201       }
202       
203     // Initialize _M_bin and its members.
204     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
205     _M_bin = static_cast<_Bin_record*>(__v);
206     for (size_t __n = 0; __n < _M_bin_size; ++__n)
207       {
208         _Bin_record& __bin = _M_bin[__n];
209         __v = ::operator new(sizeof(_Block_record*));
210         __bin._M_first = static_cast<_Block_record**>(__v);
211         __bin._M_first[0] = 0;
212         __bin._M_address = 0;
213       }
214     _M_init = true;
215   }
216
217   
218 #ifdef __GTHREADS
219   void
220   __pool<true>::_M_destroy() throw()
221   {
222     if (_M_init && !_M_options._M_force_new)
223       {
224         if (__gthread_active_p())
225           {
226             for (size_t __n = 0; __n < _M_bin_size; ++__n)
227               {
228                 _Bin_record& __bin = _M_bin[__n];
229                 while (__bin._M_address)
230                   {
231                     _Block_address* __tmp = __bin._M_address->_M_next;
232                     ::operator delete(__bin._M_address->_M_initial);
233                     __bin._M_address = __tmp;
234                   }
235                 ::operator delete(__bin._M_first);
236                 ::operator delete(__bin._M_free);
237                 ::operator delete(__bin._M_used);
238                 ::operator delete(__bin._M_mutex);
239               }
240           }
241         else
242           {
243             for (size_t __n = 0; __n < _M_bin_size; ++__n)
244               {
245                 _Bin_record& __bin = _M_bin[__n];
246                 while (__bin._M_address)
247                   {
248                     _Block_address* __tmp = __bin._M_address->_M_next;
249                     ::operator delete(__bin._M_address->_M_initial);
250                     __bin._M_address = __tmp;
251                   }
252                 ::operator delete(__bin._M_first);
253               }
254           }
255         ::operator delete(_M_bin);
256         ::operator delete(_M_binmap);
257       }
258   }
259
260   void
261   __pool<true>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
262   {
263     // Round up to power of 2 and figure out which bin to use.
264     const size_t __which = _M_binmap[__bytes];
265     const _Bin_record& __bin = _M_bin[__which];
266
267     // Know __p not null, assume valid block.
268     char* __c = __p - _M_get_align();
269     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
270     if (__gthread_active_p())
271       {
272         // Calculate the number of records to remove from our freelist:
273         // in order to avoid too much contention we wait until the
274         // number of records is "high enough".
275         const size_t __thread_id = _M_get_thread_id();
276         const _Tune& __options = _M_get_options();      
277         const size_t __limit = (100 * (_M_bin_size - __which)
278                                 * __options._M_freelist_headroom);
279
280         size_t __remove = __bin._M_free[__thread_id];
281         __remove *= __options._M_freelist_headroom;
282
283         // NB: We assume that reads of _Atomic_words are atomic.
284         const size_t __max_threads = __options._M_max_threads + 1;
285         _Atomic_word* const __reclaimed_base =
286           reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
287         const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
288         const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
289
290         // NB: For performance sake we don't resync every time, in order
291         // to spare atomic ops.  Note that if __reclaimed increased by,
292         // say, 1024, since the last sync, it means that the other
293         // threads executed the atomic in the else below at least the
294         // same number of times (at least, because _M_reserve_block may
295         // have decreased the counter), therefore one more cannot hurt.
296         if (__reclaimed > 1024)
297           {
298             __bin._M_used[__thread_id] -= __reclaimed;
299             __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
300           }
301
302         if (__remove >= __net_used)
303           __remove -= __net_used;
304         else
305           __remove = 0;
306         if (__remove > __limit && __remove > __bin._M_free[__thread_id])
307           {
308             _Block_record* __first = __bin._M_first[__thread_id];
309             _Block_record* __tmp = __first;
310             __remove /= __options._M_freelist_headroom;
311             const size_t __removed = __remove;
312             while (--__remove > 0)
313               __tmp = __tmp->_M_next;
314             __bin._M_first[__thread_id] = __tmp->_M_next;
315             __bin._M_free[__thread_id] -= __removed;
316             
317             __gthread_mutex_lock(__bin._M_mutex);
318             __tmp->_M_next = __bin._M_first[0];
319             __bin._M_first[0] = __first;
320             __bin._M_free[0] += __removed;
321             __gthread_mutex_unlock(__bin._M_mutex);
322           }
323
324         // Return this block to our list and update counters and
325         // owner id as needed.
326         if (__block->_M_thread_id == __thread_id)
327           --__bin._M_used[__thread_id];
328         else
329           __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
330
331         __block->_M_next = __bin._M_first[__thread_id];
332         __bin._M_first[__thread_id] = __block;
333         
334         ++__bin._M_free[__thread_id];
335       }
336     else
337       {
338         // Not using threads, so single threaded application - return
339         // to global pool.
340         __block->_M_next = __bin._M_first[0];
341         __bin._M_first[0] = __block;
342       }
343   }
344
345   char* 
346   __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
347   {
348     // Round up to power of 2 and figure out which bin to use.
349     const size_t __which = _M_binmap[__bytes];
350     const _Tune& __options = _M_get_options();
351     const size_t __bin_size = ((__options._M_min_bin << __which)
352                                + __options._M_align);
353     size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
354     __block_count /= __bin_size;          
355     
356     // Are we using threads?
357     // - Yes, check if there are free blocks on the global
358     //   list. If so, grab up to __block_count blocks in one
359     //   lock and change ownership. If the global list is 
360     //   empty, we allocate a new chunk and add those blocks 
361     //   directly to our own freelist (with us as owner).
362     // - No, all operations are made directly to global pool 0
363     //   no need to lock or change ownership but check for free
364     //   blocks on global list (and if not add new ones) and
365     //   get the first one.
366     _Bin_record& __bin = _M_bin[__which];
367     _Block_record* __block = 0;
368     if (__gthread_active_p())
369       {
370         // Resync the _M_used counters.
371         const size_t __max_threads = __options._M_max_threads + 1;
372         _Atomic_word* const __reclaimed_base =
373           reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
374         const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
375         __bin._M_used[__thread_id] -= __reclaimed;
376         __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
377
378         __gthread_mutex_lock(__bin._M_mutex);
379         if (__bin._M_first[0] == 0)
380           {
381             void* __v = ::operator new(__options._M_chunk_size);
382             _Block_address* __address = static_cast<_Block_address*>(__v);
383             __address->_M_initial = __v;
384             __address->_M_next = __bin._M_address;
385             __bin._M_address = __address;
386             __gthread_mutex_unlock(__bin._M_mutex);
387
388             // No need to hold the lock when we are adding a whole
389             // chunk to our own list.
390             char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
391             __block = reinterpret_cast<_Block_record*>(__c);
392             __bin._M_free[__thread_id] = __block_count;
393             __bin._M_first[__thread_id] = __block;
394             while (--__block_count > 0)
395               {
396                 __c += __bin_size;
397                 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
398                 __block = __block->_M_next;
399               }
400             __block->_M_next = 0;
401           }
402         else
403           {
404             // Is the number of required blocks greater than or equal
405             // to the number that can be provided by the global free
406             // list?
407             __bin._M_first[__thread_id] = __bin._M_first[0];
408             if (__block_count >= __bin._M_free[0])
409               {
410                 __bin._M_free[__thread_id] = __bin._M_free[0];
411                 __bin._M_free[0] = 0;
412                 __bin._M_first[0] = 0;
413               }
414             else
415               {
416                 __bin._M_free[__thread_id] = __block_count;
417                 __bin._M_free[0] -= __block_count;
418                 __block = __bin._M_first[0];
419                 while (--__block_count > 0)
420                   __block = __block->_M_next;
421                 __bin._M_first[0] = __block->_M_next;
422                 __block->_M_next = 0;
423               }
424             __gthread_mutex_unlock(__bin._M_mutex);
425           }
426       }
427     else
428       {
429         void* __v = ::operator new(__options._M_chunk_size);
430         _Block_address* __address = static_cast<_Block_address*>(__v);
431         __address->_M_initial = __v;
432         __address->_M_next = __bin._M_address;
433         __bin._M_address = __address;
434
435         char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
436         __block = reinterpret_cast<_Block_record*>(__c);
437         __bin._M_first[0] = __block;
438         while (--__block_count > 0)
439           {
440             __c += __bin_size;
441             __block->_M_next = reinterpret_cast<_Block_record*>(__c);
442             __block = __block->_M_next;
443           }
444         __block->_M_next = 0;
445       }
446       
447     __block = __bin._M_first[__thread_id];
448     __bin._M_first[__thread_id] = __block->_M_next;
449
450     if (__gthread_active_p())
451       {
452         __block->_M_thread_id = __thread_id;
453         --__bin._M_free[__thread_id];
454         ++__bin._M_used[__thread_id];
455       }
456
457     // NB: For alignment reasons, we can't use the first _M_align
458     // bytes, even when sizeof(_Block_record) < _M_align.
459     return reinterpret_cast<char*>(__block) + __options._M_align;
460   }
461
462   void
463   __pool<true>::_M_initialize()
464   {
465     // _M_force_new must not change after the first allocate(),
466     // which in turn calls this method, so if it's false, it's false
467     // forever and we don't need to return here ever again.
468     if (_M_options._M_force_new) 
469       {
470         _M_init = true;
471         return;
472       }
473
474     // Create the bins.
475     // Calculate the number of bins required based on _M_max_bytes.
476     // _M_bin_size is statically-initialized to one.
477     size_t __bin_size = _M_options._M_min_bin;
478     while (_M_options._M_max_bytes > __bin_size)
479       {
480         __bin_size <<= 1;
481         ++_M_bin_size;
482       }
483       
484     // Setup the bin map for quick lookup of the relevant bin.
485     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
486     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
487     _Binmap_type* __bp = _M_binmap;
488     _Binmap_type __bin_max = _M_options._M_min_bin;
489     _Binmap_type __bint = 0;
490     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
491       {
492         if (__ct > __bin_max)
493           {
494             __bin_max <<= 1;
495             ++__bint;
496           }
497         *__bp++ = __bint;
498       }
499       
500     // Initialize _M_bin and its members.
501     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
502     _M_bin = static_cast<_Bin_record*>(__v);
503       
504     // If __gthread_active_p() create and initialize the list of
505     // free thread ids. Single threaded applications use thread id 0
506     // directly and have no need for this.
507     if (__gthread_active_p())
508       {
509         __freelist& freelist = get_freelist();
510         {
511           __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
512
513           if (!freelist._M_thread_freelist_array
514               || freelist._M_max_threads < _M_options._M_max_threads)
515             {
516               const size_t __k = sizeof(_Thread_record)
517                                  * _M_options._M_max_threads;
518               __v = ::operator new(__k);
519               _M_thread_freelist = static_cast<_Thread_record*>(__v);
520
521               // NOTE! The first assignable thread id is 1 since the
522               // global pool uses id 0
523               size_t __i;
524               for (__i = 1; __i < _M_options._M_max_threads; ++__i)
525                 {
526                   _Thread_record& __tr = _M_thread_freelist[__i - 1];
527                   __tr._M_next = &_M_thread_freelist[__i];
528                   __tr._M_id = __i;
529                 }
530
531               // Set last record.
532               _M_thread_freelist[__i - 1]._M_next = 0;
533               _M_thread_freelist[__i - 1]._M_id = __i;
534
535               if (!freelist._M_thread_freelist_array)
536                 {
537                   // Initialize per thread key to hold pointer to
538                   // _M_thread_freelist.
539                   __gthread_key_create(&freelist._M_key,
540                                        ::_M_destroy_thread_key);
541                   freelist._M_thread_freelist = _M_thread_freelist;
542                 }
543               else
544                 {
545                   _Thread_record* _M_old_freelist
546                     = freelist._M_thread_freelist;
547                   _Thread_record* _M_old_array
548                     = freelist._M_thread_freelist_array;
549                   freelist._M_thread_freelist
550                     = &_M_thread_freelist[_M_old_freelist - _M_old_array];
551                   while (_M_old_freelist)
552                     {
553                       size_t next_id;
554                       if (_M_old_freelist->_M_next)
555                         next_id = _M_old_freelist->_M_next - _M_old_array;
556                       else
557                         next_id = freelist._M_max_threads;
558                       _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
559                         = &_M_thread_freelist[next_id];
560                       _M_old_freelist = _M_old_freelist->_M_next;
561                     }
562                   ::operator delete(static_cast<void*>(_M_old_array));
563                 }
564               freelist._M_thread_freelist_array = _M_thread_freelist;
565               freelist._M_max_threads = _M_options._M_max_threads;
566             }
567         }
568
569         const size_t __max_threads = _M_options._M_max_threads + 1;
570         for (size_t __n = 0; __n < _M_bin_size; ++__n)
571           {
572             _Bin_record& __bin = _M_bin[__n];
573             __v = ::operator new(sizeof(_Block_record*) * __max_threads);
574             std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);    
575             __bin._M_first = static_cast<_Block_record**>(__v);
576
577             __bin._M_address = 0;
578
579             __v = ::operator new(sizeof(size_t) * __max_threads);
580             std::memset(__v, 0, sizeof(size_t) * __max_threads);
581
582             __bin._M_free = static_cast<size_t*>(__v);
583
584             __v = ::operator new(sizeof(size_t) * __max_threads
585                                  + sizeof(_Atomic_word) * __max_threads);
586             std::memset(__v, 0, (sizeof(size_t) * __max_threads
587                                  + sizeof(_Atomic_word) * __max_threads));
588             __bin._M_used = static_cast<size_t*>(__v);
589               
590             __v = ::operator new(sizeof(__gthread_mutex_t));
591             __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
592               
593 #ifdef __GTHREAD_MUTEX_INIT
594             {
595               // Do not copy a POSIX/gthr mutex once in use.
596               __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
597               *__bin._M_mutex = __tmp;
598             }
599 #else
600             { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
601 #endif
602           }
603       }
604     else
605       {
606         for (size_t __n = 0; __n < _M_bin_size; ++__n)
607           {
608             _Bin_record& __bin = _M_bin[__n];
609             __v = ::operator new(sizeof(_Block_record*));
610             __bin._M_first = static_cast<_Block_record**>(__v);
611             __bin._M_first[0] = 0;
612             __bin._M_address = 0;
613           }
614       }
615     _M_init = true;
616   }
617
618   size_t
619   __pool<true>::_M_get_thread_id()
620   {
621     // If we have thread support and it's active we check the thread
622     // key value and return its id or if it's not set we take the
623     // first record from _M_thread_freelist and sets the key and
624     // returns its id.
625     if (__gthread_active_p())
626       {
627         __freelist& freelist = get_freelist();
628         void* v = __gthread_getspecific(freelist._M_key);
629         size_t _M_id = (size_t)v;
630         if (_M_id == 0)
631           {
632             {
633               __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
634               if (freelist._M_thread_freelist)
635                 {
636                   _M_id = freelist._M_thread_freelist->_M_id;
637                   freelist._M_thread_freelist
638                     = freelist._M_thread_freelist->_M_next;
639                 }
640             }
641
642             __gthread_setspecific(freelist._M_key, (void*)_M_id);
643           }
644         return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
645       }
646
647     // Otherwise (no thread support or inactive) all requests are
648     // served from the global pool 0.
649     return 0;
650   }
651
652   // XXX GLIBCXX_ABI Deprecated
653   void 
654   __pool<true>::_M_destroy_thread_key(void*) throw () { }
655
656   // XXX GLIBCXX_ABI Deprecated
657   void
658   __pool<true>::_M_initialize(__destroy_handler)
659   {
660     // _M_force_new must not change after the first allocate(),
661     // which in turn calls this method, so if it's false, it's false
662     // forever and we don't need to return here ever again.
663     if (_M_options._M_force_new) 
664       {
665         _M_init = true;
666         return;
667       }
668
669     // Create the bins.
670     // Calculate the number of bins required based on _M_max_bytes.
671     // _M_bin_size is statically-initialized to one.
672     size_t __bin_size = _M_options._M_min_bin;
673     while (_M_options._M_max_bytes > __bin_size)
674       {
675         __bin_size <<= 1;
676         ++_M_bin_size;
677       }
678       
679     // Setup the bin map for quick lookup of the relevant bin.
680     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
681     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
682     _Binmap_type* __bp = _M_binmap;
683     _Binmap_type __bin_max = _M_options._M_min_bin;
684     _Binmap_type __bint = 0;
685     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
686       {
687         if (__ct > __bin_max)
688           {
689             __bin_max <<= 1;
690             ++__bint;
691           }
692         *__bp++ = __bint;
693       }
694       
695     // Initialize _M_bin and its members.
696     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
697     _M_bin = static_cast<_Bin_record*>(__v);
698       
699     // If __gthread_active_p() create and initialize the list of
700     // free thread ids. Single threaded applications use thread id 0
701     // directly and have no need for this.
702     if (__gthread_active_p())
703       {
704         __freelist& freelist = get_freelist();
705         {
706           __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
707
708           if (!freelist._M_thread_freelist_array
709               || freelist._M_max_threads < _M_options._M_max_threads)
710             {
711               const size_t __k = sizeof(_Thread_record)
712                                  * _M_options._M_max_threads;
713               __v = ::operator new(__k);
714               _M_thread_freelist = static_cast<_Thread_record*>(__v);
715
716               // NOTE! The first assignable thread id is 1 since the
717               // global pool uses id 0
718               size_t __i;
719               for (__i = 1; __i < _M_options._M_max_threads; ++__i)
720                 {
721                   _Thread_record& __tr = _M_thread_freelist[__i - 1];
722                   __tr._M_next = &_M_thread_freelist[__i];
723                   __tr._M_id = __i;
724                 }
725
726               // Set last record.
727               _M_thread_freelist[__i - 1]._M_next = 0;
728               _M_thread_freelist[__i - 1]._M_id = __i;
729
730               if (!freelist._M_thread_freelist_array)
731                 {
732                   // Initialize per thread key to hold pointer to
733                   // _M_thread_freelist.
734                   __gthread_key_create(&freelist._M_key, 
735                                        ::_M_destroy_thread_key);
736                   freelist._M_thread_freelist = _M_thread_freelist;
737                 }
738               else
739                 {
740                   _Thread_record* _M_old_freelist
741                     = freelist._M_thread_freelist;
742                   _Thread_record* _M_old_array
743                     = freelist._M_thread_freelist_array;
744                   freelist._M_thread_freelist
745                     = &_M_thread_freelist[_M_old_freelist - _M_old_array];
746                   while (_M_old_freelist)
747                     {
748                       size_t next_id;
749                       if (_M_old_freelist->_M_next)
750                         next_id = _M_old_freelist->_M_next - _M_old_array;
751                       else
752                         next_id = freelist._M_max_threads;
753                       _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
754                         = &_M_thread_freelist[next_id];
755                       _M_old_freelist = _M_old_freelist->_M_next;
756                     }
757                   ::operator delete(static_cast<void*>(_M_old_array));
758                 }
759               freelist._M_thread_freelist_array = _M_thread_freelist;
760               freelist._M_max_threads = _M_options._M_max_threads;
761             }
762         }
763
764         const size_t __max_threads = _M_options._M_max_threads + 1;
765         for (size_t __n = 0; __n < _M_bin_size; ++__n)
766           {
767             _Bin_record& __bin = _M_bin[__n];
768             __v = ::operator new(sizeof(_Block_record*) * __max_threads);
769             std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
770             __bin._M_first = static_cast<_Block_record**>(__v);
771
772             __bin._M_address = 0;
773
774             __v = ::operator new(sizeof(size_t) * __max_threads);
775             std::memset(__v, 0, sizeof(size_t) * __max_threads);
776             __bin._M_free = static_cast<size_t*>(__v);
777               
778             __v = ::operator new(sizeof(size_t) * __max_threads + 
779                                  sizeof(_Atomic_word) * __max_threads);
780             std::memset(__v, 0, (sizeof(size_t) * __max_threads
781                                  + sizeof(_Atomic_word) * __max_threads));
782             __bin._M_used = static_cast<size_t*>(__v);
783
784             __v = ::operator new(sizeof(__gthread_mutex_t));
785             __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
786               
787 #ifdef __GTHREAD_MUTEX_INIT
788             {
789               // Do not copy a POSIX/gthr mutex once in use.
790               __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
791               *__bin._M_mutex = __tmp;
792             }
793 #else
794             { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
795 #endif
796           }
797       }
798     else
799       {
800         for (size_t __n = 0; __n < _M_bin_size; ++__n)
801           {
802             _Bin_record& __bin = _M_bin[__n];
803             __v = ::operator new(sizeof(_Block_record*));
804             __bin._M_first = static_cast<_Block_record**>(__v);
805             __bin._M_first[0] = 0;
806             __bin._M_address = 0;
807           }
808       }
809     _M_init = true;
810   }
811 #endif
812
813   // Instantiations.
814   template class __mt_alloc<char>;
815   template class __mt_alloc<wchar_t>;
816
817 _GLIBCXX_END_NAMESPACE_VERSION
818 } // namespace