]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/libstdc++-v3/contrib/libstdc++-v3-4.3.3/src/mt_allocator.cc
update
[l4.git] / l4 / pkg / libstdc++-v3 / contrib / libstdc++-v3-4.3.3 / src / mt_allocator.cc
1 // Allocator details.
2
3 // Copyright (C) 2004, 2005, 2006, 2009 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING.  If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 // USA.
20
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction.  Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License.  This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29
30 //
31 // ISO C++ 14882:
32 //
33
34 #include <bits/c++config.h>
35 #include <ext/concurrence.h>
36 #include <ext/mt_allocator.h>
37 #include <cstring>
38
39 namespace
40 {
41 #ifdef __GTHREADS
42   struct __freelist
43   {
44     typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
45     _Thread_record*     _M_thread_freelist;
46     _Thread_record*     _M_thread_freelist_array;
47     size_t              _M_max_threads;
48     __gthread_key_t     _M_key;
49
50     ~__freelist()
51     {
52       if (_M_thread_freelist_array)
53         {
54           __gthread_key_delete(_M_key);
55           ::operator delete(static_cast<void*>(_M_thread_freelist_array));
56         }
57     }
58   };
59
60   __freelist&
61   get_freelist()
62   {
63     static __freelist freelist;
64     return freelist;
65   }
66
67   __gnu_cxx::__mutex&
68   get_freelist_mutex()
69   {
70     static __gnu_cxx::__mutex freelist_mutex;
71     return freelist_mutex;
72   }
73
74   static void 
75   _M_destroy_thread_key(void* __id)
76   {
77     // Return this thread id record to the front of thread_freelist.
78     __freelist& freelist = get_freelist();
79     {
80       __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
81       size_t _M_id = reinterpret_cast<size_t>(__id);
82       
83       typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
84       _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
85       __tr->_M_next = freelist._M_thread_freelist;
86       freelist._M_thread_freelist = __tr;
87     }
88   }
89 #endif
90 } // anonymous namespace
91
92 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
93
94   void
95   __pool<false>::_M_destroy() throw()
96   {
97     if (_M_init && !_M_options._M_force_new)
98       {
99         for (size_t __n = 0; __n < _M_bin_size; ++__n)
100           {
101             _Bin_record& __bin = _M_bin[__n];
102             while (__bin._M_address)
103               {
104                 _Block_address* __tmp = __bin._M_address->_M_next;
105                 ::operator delete(__bin._M_address->_M_initial);
106                 __bin._M_address = __tmp;
107               }
108             ::operator delete(__bin._M_first);
109           }
110         ::operator delete(_M_bin);
111         ::operator delete(_M_binmap);
112       }
113   }
114
115   void
116   __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
117   {
118     // Round up to power of 2 and figure out which bin to use.
119     const size_t __which = _M_binmap[__bytes];
120     _Bin_record& __bin = _M_bin[__which];
121
122     char* __c = __p - _M_get_align();
123     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
124       
125     // Single threaded application - return to global pool.
126     __block->_M_next = __bin._M_first[0];
127     __bin._M_first[0] = __block;
128   }
129
130   char* 
131   __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
132   {
133     // Round up to power of 2 and figure out which bin to use.
134     const size_t __which = _M_binmap[__bytes];
135     _Bin_record& __bin = _M_bin[__which];
136     const _Tune& __options = _M_get_options();
137     const size_t __bin_size = (__options._M_min_bin << __which) 
138                                + __options._M_align;
139     size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
140     __block_count /= __bin_size;          
141
142     // Get a new block dynamically, set it up for use.
143     void* __v = ::operator new(__options._M_chunk_size);
144     _Block_address* __address = static_cast<_Block_address*>(__v);
145     __address->_M_initial = __v;
146     __address->_M_next = __bin._M_address;
147     __bin._M_address = __address;
148
149     char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
150     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
151     __bin._M_first[__thread_id] = __block;
152     while (--__block_count > 0)
153       {
154         __c += __bin_size;
155         __block->_M_next = reinterpret_cast<_Block_record*>(__c);
156         __block = __block->_M_next;
157       }
158     __block->_M_next = NULL;
159
160     __block = __bin._M_first[__thread_id];
161     __bin._M_first[__thread_id] = __block->_M_next;
162
163     // NB: For alignment reasons, we can't use the first _M_align
164     // bytes, even when sizeof(_Block_record) < _M_align.
165     return reinterpret_cast<char*>(__block) + __options._M_align;
166   }
167
168   void
169   __pool<false>::_M_initialize()
170   {
171     // _M_force_new must not change after the first allocate(), which
172     // in turn calls this method, so if it's false, it's false forever
173     // and we don't need to return here ever again.
174     if (_M_options._M_force_new) 
175       {
176         _M_init = true;
177         return;
178       }
179       
180     // Create the bins.
181     // Calculate the number of bins required based on _M_max_bytes.
182     // _M_bin_size is statically-initialized to one.
183     size_t __bin_size = _M_options._M_min_bin;
184     while (_M_options._M_max_bytes > __bin_size)
185       {
186         __bin_size <<= 1;
187         ++_M_bin_size;
188       }
189       
190     // Setup the bin map for quick lookup of the relevant bin.
191     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
192     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
193     _Binmap_type* __bp = _M_binmap;
194     _Binmap_type __bin_max = _M_options._M_min_bin;
195     _Binmap_type __bint = 0;
196     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
197       {
198         if (__ct > __bin_max)
199           {
200             __bin_max <<= 1;
201             ++__bint;
202           }
203         *__bp++ = __bint;
204       }
205       
206     // Initialize _M_bin and its members.
207     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
208     _M_bin = static_cast<_Bin_record*>(__v);
209     for (size_t __n = 0; __n < _M_bin_size; ++__n)
210       {
211         _Bin_record& __bin = _M_bin[__n];
212         __v = ::operator new(sizeof(_Block_record*));
213         __bin._M_first = static_cast<_Block_record**>(__v);
214         __bin._M_first[0] = NULL;
215         __bin._M_address = NULL;
216       }
217     _M_init = true;
218   }
219
220   
221 #ifdef __GTHREADS
222   void
223   __pool<true>::_M_destroy() throw()
224   {
225     if (_M_init && !_M_options._M_force_new)
226       {
227         if (__gthread_active_p())
228           {
229             for (size_t __n = 0; __n < _M_bin_size; ++__n)
230               {
231                 _Bin_record& __bin = _M_bin[__n];
232                 while (__bin._M_address)
233                   {
234                     _Block_address* __tmp = __bin._M_address->_M_next;
235                     ::operator delete(__bin._M_address->_M_initial);
236                     __bin._M_address = __tmp;
237                   }
238                 ::operator delete(__bin._M_first);
239                 ::operator delete(__bin._M_free);
240                 ::operator delete(__bin._M_used);
241                 ::operator delete(__bin._M_mutex);
242               }
243           }
244         else
245           {
246             for (size_t __n = 0; __n < _M_bin_size; ++__n)
247               {
248                 _Bin_record& __bin = _M_bin[__n];
249                 while (__bin._M_address)
250                   {
251                     _Block_address* __tmp = __bin._M_address->_M_next;
252                     ::operator delete(__bin._M_address->_M_initial);
253                     __bin._M_address = __tmp;
254                   }
255                 ::operator delete(__bin._M_first);
256               }
257           }
258         ::operator delete(_M_bin);
259         ::operator delete(_M_binmap);
260       }
261   }
262
263   void
264   __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
265   {
266     // Round up to power of 2 and figure out which bin to use.
267     const size_t __which = _M_binmap[__bytes];
268     const _Bin_record& __bin = _M_bin[__which];
269
270     // Know __p not null, assume valid block.
271     char* __c = __p - _M_get_align();
272     _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
273     if (__gthread_active_p())
274       {
275         // Calculate the number of records to remove from our freelist:
276         // in order to avoid too much contention we wait until the
277         // number of records is "high enough".
278         const size_t __thread_id = _M_get_thread_id();
279         const _Tune& __options = _M_get_options();      
280         const size_t __limit = (100 * (_M_bin_size - __which)
281                                 * __options._M_freelist_headroom);
282
283         size_t __remove = __bin._M_free[__thread_id];
284         __remove *= __options._M_freelist_headroom;
285
286         // NB: We assume that reads of _Atomic_words are atomic.
287         const size_t __max_threads = __options._M_max_threads + 1;
288         _Atomic_word* const __reclaimed_base =
289           reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
290         const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
291         const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
292
293         // NB: For performance sake we don't resync every time, in order
294         // to spare atomic ops.  Note that if __reclaimed increased by,
295         // say, 1024, since the last sync, it means that the other
296         // threads executed the atomic in the else below at least the
297         // same number of times (at least, because _M_reserve_block may
298         // have decreased the counter), therefore one more cannot hurt.
299         if (__reclaimed > 1024)
300           {
301             __bin._M_used[__thread_id] -= __reclaimed;
302             __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
303           }
304
305         if (__remove >= __net_used)
306           __remove -= __net_used;
307         else
308           __remove = 0;
309         if (__remove > __limit && __remove > __bin._M_free[__thread_id])
310           {
311             _Block_record* __first = __bin._M_first[__thread_id];
312             _Block_record* __tmp = __first;
313             __remove /= __options._M_freelist_headroom;
314             const size_t __removed = __remove;
315             while (--__remove > 0)
316               __tmp = __tmp->_M_next;
317             __bin._M_first[__thread_id] = __tmp->_M_next;
318             __bin._M_free[__thread_id] -= __removed;
319             
320             __gthread_mutex_lock(__bin._M_mutex);
321             __tmp->_M_next = __bin._M_first[0];
322             __bin._M_first[0] = __first;
323             __bin._M_free[0] += __removed;
324             __gthread_mutex_unlock(__bin._M_mutex);
325           }
326
327         // Return this block to our list and update counters and
328         // owner id as needed.
329         if (__block->_M_thread_id == __thread_id)
330           --__bin._M_used[__thread_id];
331         else
332           __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
333
334         __block->_M_next = __bin._M_first[__thread_id];
335         __bin._M_first[__thread_id] = __block;
336         
337         ++__bin._M_free[__thread_id];
338       }
339     else
340       {
341         // Not using threads, so single threaded application - return
342         // to global pool.
343         __block->_M_next = __bin._M_first[0];
344         __bin._M_first[0] = __block;
345       }
346   }
347
348   char* 
349   __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
350   {
351     // Round up to power of 2 and figure out which bin to use.
352     const size_t __which = _M_binmap[__bytes];
353     const _Tune& __options = _M_get_options();
354     const size_t __bin_size = ((__options._M_min_bin << __which)
355                                + __options._M_align);
356     size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
357     __block_count /= __bin_size;          
358     
359     // Are we using threads?
360     // - Yes, check if there are free blocks on the global
361     //   list. If so, grab up to __block_count blocks in one
362     //   lock and change ownership. If the global list is 
363     //   empty, we allocate a new chunk and add those blocks 
364     //   directly to our own freelist (with us as owner).
365     // - No, all operations are made directly to global pool 0
366     //   no need to lock or change ownership but check for free
367     //   blocks on global list (and if not add new ones) and
368     //   get the first one.
369     _Bin_record& __bin = _M_bin[__which];
370     _Block_record* __block = NULL;
371     if (__gthread_active_p())
372       {
373         // Resync the _M_used counters.
374         const size_t __max_threads = __options._M_max_threads + 1;
375         _Atomic_word* const __reclaimed_base =
376           reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
377         const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
378         __bin._M_used[__thread_id] -= __reclaimed;
379         __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
380
381         __gthread_mutex_lock(__bin._M_mutex);
382         if (__bin._M_first[0] == NULL)
383           {
384             void* __v = ::operator new(__options._M_chunk_size);
385             _Block_address* __address = static_cast<_Block_address*>(__v);
386             __address->_M_initial = __v;
387             __address->_M_next = __bin._M_address;
388             __bin._M_address = __address;
389             __gthread_mutex_unlock(__bin._M_mutex);
390
391             // No need to hold the lock when we are adding a whole
392             // chunk to our own list.
393             char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
394             __block = reinterpret_cast<_Block_record*>(__c);
395             __bin._M_free[__thread_id] = __block_count;
396             __bin._M_first[__thread_id] = __block;
397             while (--__block_count > 0)
398               {
399                 __c += __bin_size;
400                 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
401                 __block = __block->_M_next;
402               }
403             __block->_M_next = NULL;
404           }
405         else
406           {
407             // Is the number of required blocks greater than or equal
408             // to the number that can be provided by the global free
409             // list?
410             __bin._M_first[__thread_id] = __bin._M_first[0];
411             if (__block_count >= __bin._M_free[0])
412               {
413                 __bin._M_free[__thread_id] = __bin._M_free[0];
414                 __bin._M_free[0] = 0;
415                 __bin._M_first[0] = NULL;
416               }
417             else
418               {
419                 __bin._M_free[__thread_id] = __block_count;
420                 __bin._M_free[0] -= __block_count;
421                 __block = __bin._M_first[0];
422                 while (--__block_count > 0)
423                   __block = __block->_M_next;
424                 __bin._M_first[0] = __block->_M_next;
425                 __block->_M_next = NULL;
426               }
427             __gthread_mutex_unlock(__bin._M_mutex);
428           }
429       }
430     else
431       {
432         void* __v = ::operator new(__options._M_chunk_size);
433         _Block_address* __address = static_cast<_Block_address*>(__v);
434         __address->_M_initial = __v;
435         __address->_M_next = __bin._M_address;
436         __bin._M_address = __address;
437
438         char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
439         __block = reinterpret_cast<_Block_record*>(__c);
440         __bin._M_first[0] = __block;
441         while (--__block_count > 0)
442           {
443             __c += __bin_size;
444             __block->_M_next = reinterpret_cast<_Block_record*>(__c);
445             __block = __block->_M_next;
446           }
447         __block->_M_next = NULL;
448       }
449       
450     __block = __bin._M_first[__thread_id];
451     __bin._M_first[__thread_id] = __block->_M_next;
452
453     if (__gthread_active_p())
454       {
455         __block->_M_thread_id = __thread_id;
456         --__bin._M_free[__thread_id];
457         ++__bin._M_used[__thread_id];
458       }
459
460     // NB: For alignment reasons, we can't use the first _M_align
461     // bytes, even when sizeof(_Block_record) < _M_align.
462     return reinterpret_cast<char*>(__block) + __options._M_align;
463   }
464
465   void
466   __pool<true>::_M_initialize()
467   {
468     // _M_force_new must not change after the first allocate(),
469     // which in turn calls this method, so if it's false, it's false
470     // forever and we don't need to return here ever again.
471     if (_M_options._M_force_new) 
472       {
473         _M_init = true;
474         return;
475       }
476
477     // Create the bins.
478     // Calculate the number of bins required based on _M_max_bytes.
479     // _M_bin_size is statically-initialized to one.
480     size_t __bin_size = _M_options._M_min_bin;
481     while (_M_options._M_max_bytes > __bin_size)
482       {
483         __bin_size <<= 1;
484         ++_M_bin_size;
485       }
486       
487     // Setup the bin map for quick lookup of the relevant bin.
488     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
489     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
490     _Binmap_type* __bp = _M_binmap;
491     _Binmap_type __bin_max = _M_options._M_min_bin;
492     _Binmap_type __bint = 0;
493     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
494       {
495         if (__ct > __bin_max)
496           {
497             __bin_max <<= 1;
498             ++__bint;
499           }
500         *__bp++ = __bint;
501       }
502       
503     // Initialize _M_bin and its members.
504     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
505     _M_bin = static_cast<_Bin_record*>(__v);
506       
507     // If __gthread_active_p() create and initialize the list of
508     // free thread ids. Single threaded applications use thread id 0
509     // directly and have no need for this.
510     if (__gthread_active_p())
511       {
512         __freelist& freelist = get_freelist();
513         {
514           __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
515
516           if (!freelist._M_thread_freelist_array
517               || freelist._M_max_threads < _M_options._M_max_threads)
518             {
519               const size_t __k = sizeof(_Thread_record)
520                                  * _M_options._M_max_threads;
521               __v = ::operator new(__k);
522               _M_thread_freelist = static_cast<_Thread_record*>(__v);
523
524               // NOTE! The first assignable thread id is 1 since the
525               // global pool uses id 0
526               size_t __i;
527               for (__i = 1; __i < _M_options._M_max_threads; ++__i)
528                 {
529                   _Thread_record& __tr = _M_thread_freelist[__i - 1];
530                   __tr._M_next = &_M_thread_freelist[__i];
531                   __tr._M_id = __i;
532                 }
533
534               // Set last record.
535               _M_thread_freelist[__i - 1]._M_next = NULL;
536               _M_thread_freelist[__i - 1]._M_id = __i;
537
538               if (!freelist._M_thread_freelist_array)
539                 {
540                   // Initialize per thread key to hold pointer to
541                   // _M_thread_freelist.
542                   __gthread_key_create(&freelist._M_key,
543                                        ::_M_destroy_thread_key);
544                   freelist._M_thread_freelist = _M_thread_freelist;
545                 }
546               else
547                 {
548                   _Thread_record* _M_old_freelist
549                     = freelist._M_thread_freelist;
550                   _Thread_record* _M_old_array
551                     = freelist._M_thread_freelist_array;
552                   freelist._M_thread_freelist
553                     = &_M_thread_freelist[_M_old_freelist - _M_old_array];
554                   while (_M_old_freelist)
555                     {
556                       size_t next_id;
557                       if (_M_old_freelist->_M_next)
558                         next_id = _M_old_freelist->_M_next - _M_old_array;
559                       else
560                         next_id = freelist._M_max_threads;
561                       _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
562                         = &_M_thread_freelist[next_id];
563                       _M_old_freelist = _M_old_freelist->_M_next;
564                     }
565                   ::operator delete(static_cast<void*>(_M_old_array));
566                 }
567               freelist._M_thread_freelist_array = _M_thread_freelist;
568               freelist._M_max_threads = _M_options._M_max_threads;
569             }
570         }
571
572         const size_t __max_threads = _M_options._M_max_threads + 1;
573         for (size_t __n = 0; __n < _M_bin_size; ++__n)
574           {
575             _Bin_record& __bin = _M_bin[__n];
576             __v = ::operator new(sizeof(_Block_record*) * __max_threads);
577             std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);    
578             __bin._M_first = static_cast<_Block_record**>(__v);
579
580             __bin._M_address = NULL;
581
582             __v = ::operator new(sizeof(size_t) * __max_threads);
583             std::memset(__v, 0, sizeof(size_t) * __max_threads);
584
585             __bin._M_free = static_cast<size_t*>(__v);
586
587             __v = ::operator new(sizeof(size_t) * __max_threads
588                                  + sizeof(_Atomic_word) * __max_threads);
589             std::memset(__v, 0, (sizeof(size_t) * __max_threads
590                                  + sizeof(_Atomic_word) * __max_threads));
591             __bin._M_used = static_cast<size_t*>(__v);
592               
593             __v = ::operator new(sizeof(__gthread_mutex_t));
594             __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
595               
596 #ifdef __GTHREAD_MUTEX_INIT
597             {
598               // Do not copy a POSIX/gthr mutex once in use.
599               __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
600               *__bin._M_mutex = __tmp;
601             }
602 #else
603             { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
604 #endif
605           }
606       }
607     else
608       {
609         for (size_t __n = 0; __n < _M_bin_size; ++__n)
610           {
611             _Bin_record& __bin = _M_bin[__n];
612             __v = ::operator new(sizeof(_Block_record*));
613             __bin._M_first = static_cast<_Block_record**>(__v);
614             __bin._M_first[0] = NULL;
615             __bin._M_address = NULL;
616           }
617       }
618     _M_init = true;
619   }
620
621   size_t
622   __pool<true>::_M_get_thread_id()
623   {
624     // If we have thread support and it's active we check the thread
625     // key value and return its id or if it's not set we take the
626     // first record from _M_thread_freelist and sets the key and
627     // returns it's id.
628     if (__gthread_active_p())
629       {
630         __freelist& freelist = get_freelist();
631         void* v = __gthread_getspecific(freelist._M_key);
632         size_t _M_id = (size_t)v;
633         if (_M_id == 0)
634           {
635             {
636               __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
637               if (freelist._M_thread_freelist)
638                 {
639                   _M_id = freelist._M_thread_freelist->_M_id;
640                   freelist._M_thread_freelist
641                     = freelist._M_thread_freelist->_M_next;
642                 }
643             }
644
645             __gthread_setspecific(freelist._M_key, (void*)_M_id);
646           }
647         return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
648       }
649
650     // Otherwise (no thread support or inactive) all requests are
651     // served from the global pool 0.
652     return 0;
653   }
654
655   // XXX GLIBCXX_ABI Deprecated
656   void 
657   __pool<true>::_M_destroy_thread_key(void*) { }
658
659   // XXX GLIBCXX_ABI Deprecated
660   void
661   __pool<true>::_M_initialize(__destroy_handler)
662   {
663     // _M_force_new must not change after the first allocate(),
664     // which in turn calls this method, so if it's false, it's false
665     // forever and we don't need to return here ever again.
666     if (_M_options._M_force_new) 
667       {
668         _M_init = true;
669         return;
670       }
671
672     // Create the bins.
673     // Calculate the number of bins required based on _M_max_bytes.
674     // _M_bin_size is statically-initialized to one.
675     size_t __bin_size = _M_options._M_min_bin;
676     while (_M_options._M_max_bytes > __bin_size)
677       {
678         __bin_size <<= 1;
679         ++_M_bin_size;
680       }
681       
682     // Setup the bin map for quick lookup of the relevant bin.
683     const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
684     _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
685     _Binmap_type* __bp = _M_binmap;
686     _Binmap_type __bin_max = _M_options._M_min_bin;
687     _Binmap_type __bint = 0;
688     for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
689       {
690         if (__ct > __bin_max)
691           {
692             __bin_max <<= 1;
693             ++__bint;
694           }
695         *__bp++ = __bint;
696       }
697       
698     // Initialize _M_bin and its members.
699     void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
700     _M_bin = static_cast<_Bin_record*>(__v);
701       
702     // If __gthread_active_p() create and initialize the list of
703     // free thread ids. Single threaded applications use thread id 0
704     // directly and have no need for this.
705     if (__gthread_active_p())
706       {
707         __freelist& freelist = get_freelist();
708         {
709           __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
710
711           if (!freelist._M_thread_freelist_array
712               || freelist._M_max_threads < _M_options._M_max_threads)
713             {
714               const size_t __k = sizeof(_Thread_record)
715                                  * _M_options._M_max_threads;
716               __v = ::operator new(__k);
717               _M_thread_freelist = static_cast<_Thread_record*>(__v);
718
719               // NOTE! The first assignable thread id is 1 since the
720               // global pool uses id 0
721               size_t __i;
722               for (__i = 1; __i < _M_options._M_max_threads; ++__i)
723                 {
724                   _Thread_record& __tr = _M_thread_freelist[__i - 1];
725                   __tr._M_next = &_M_thread_freelist[__i];
726                   __tr._M_id = __i;
727                 }
728
729               // Set last record.
730               _M_thread_freelist[__i - 1]._M_next = NULL;
731               _M_thread_freelist[__i - 1]._M_id = __i;
732
733               if (!freelist._M_thread_freelist_array)
734                 {
735                   // Initialize per thread key to hold pointer to
736                   // _M_thread_freelist.
737                   __gthread_key_create(&freelist._M_key, 
738                                        ::_M_destroy_thread_key);
739                   freelist._M_thread_freelist = _M_thread_freelist;
740                 }
741               else
742                 {
743                   _Thread_record* _M_old_freelist
744                     = freelist._M_thread_freelist;
745                   _Thread_record* _M_old_array
746                     = freelist._M_thread_freelist_array;
747                   freelist._M_thread_freelist
748                     = &_M_thread_freelist[_M_old_freelist - _M_old_array];
749                   while (_M_old_freelist)
750                     {
751                       size_t next_id;
752                       if (_M_old_freelist->_M_next)
753                         next_id = _M_old_freelist->_M_next - _M_old_array;
754                       else
755                         next_id = freelist._M_max_threads;
756                       _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
757                         = &_M_thread_freelist[next_id];
758                       _M_old_freelist = _M_old_freelist->_M_next;
759                     }
760                   ::operator delete(static_cast<void*>(_M_old_array));
761                 }
762               freelist._M_thread_freelist_array = _M_thread_freelist;
763               freelist._M_max_threads = _M_options._M_max_threads;
764             }
765         }
766
767         const size_t __max_threads = _M_options._M_max_threads + 1;
768         for (size_t __n = 0; __n < _M_bin_size; ++__n)
769           {
770             _Bin_record& __bin = _M_bin[__n];
771             __v = ::operator new(sizeof(_Block_record*) * __max_threads);
772             std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
773             __bin._M_first = static_cast<_Block_record**>(__v);
774
775             __bin._M_address = NULL;
776
777             __v = ::operator new(sizeof(size_t) * __max_threads);
778             std::memset(__v, 0, sizeof(size_t) * __max_threads);
779             __bin._M_free = static_cast<size_t*>(__v);
780               
781             __v = ::operator new(sizeof(size_t) * __max_threads + 
782                                  sizeof(_Atomic_word) * __max_threads);
783             std::memset(__v, 0, (sizeof(size_t) * __max_threads
784                                  + sizeof(_Atomic_word) * __max_threads));
785             __bin._M_used = static_cast<size_t*>(__v);
786
787             __v = ::operator new(sizeof(__gthread_mutex_t));
788             __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
789               
790 #ifdef __GTHREAD_MUTEX_INIT
791             {
792               // Do not copy a POSIX/gthr mutex once in use.
793               __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
794               *__bin._M_mutex = __tmp;
795             }
796 #else
797             { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
798 #endif
799           }
800       }
801     else
802       {
803         for (size_t __n = 0; __n < _M_bin_size; ++__n)
804           {
805             _Bin_record& __bin = _M_bin[__n];
806             __v = ::operator new(sizeof(_Block_record*));
807             __bin._M_first = static_cast<_Block_record**>(__v);
808             __bin._M_first[0] = NULL;
809             __bin._M_address = NULL;
810           }
811       }
812     _M_init = true;
813   }
814 #endif
815
816   // Instantiations.
817   template class __mt_alloc<char>;
818   template class __mt_alloc<wchar_t>;
819
820 _GLIBCXX_END_NAMESPACE