3 // Copyright (C) 2004, 2005, 2006, 2009 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
34 #include <bits/c++config.h>
35 #include <ext/concurrence.h>
36 #include <ext/mt_allocator.h>
44 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
45 _Thread_record* _M_thread_freelist;
46 _Thread_record* _M_thread_freelist_array;
47 size_t _M_max_threads;
48 __gthread_key_t _M_key;
52 if (_M_thread_freelist_array)
54 __gthread_key_delete(_M_key);
55 ::operator delete(static_cast<void*>(_M_thread_freelist_array));
63 static __freelist freelist;
70 static __gnu_cxx::__mutex freelist_mutex;
71 return freelist_mutex;
75 _M_destroy_thread_key(void* __id)
77 // Return this thread id record to the front of thread_freelist.
78 __freelist& freelist = get_freelist();
80 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
81 size_t _M_id = reinterpret_cast<size_t>(__id);
83 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
84 _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
85 __tr->_M_next = freelist._M_thread_freelist;
86 freelist._M_thread_freelist = __tr;
90 } // anonymous namespace
92 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
95 __pool<false>::_M_destroy() throw()
97 if (_M_init && !_M_options._M_force_new)
99 for (size_t __n = 0; __n < _M_bin_size; ++__n)
101 _Bin_record& __bin = _M_bin[__n];
102 while (__bin._M_address)
104 _Block_address* __tmp = __bin._M_address->_M_next;
105 ::operator delete(__bin._M_address->_M_initial);
106 __bin._M_address = __tmp;
108 ::operator delete(__bin._M_first);
110 ::operator delete(_M_bin);
111 ::operator delete(_M_binmap);
116 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
118 // Round up to power of 2 and figure out which bin to use.
119 const size_t __which = _M_binmap[__bytes];
120 _Bin_record& __bin = _M_bin[__which];
122 char* __c = __p - _M_get_align();
123 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
125 // Single threaded application - return to global pool.
126 __block->_M_next = __bin._M_first[0];
127 __bin._M_first[0] = __block;
131 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
133 // Round up to power of 2 and figure out which bin to use.
134 const size_t __which = _M_binmap[__bytes];
135 _Bin_record& __bin = _M_bin[__which];
136 const _Tune& __options = _M_get_options();
137 const size_t __bin_size = (__options._M_min_bin << __which)
138 + __options._M_align;
139 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
140 __block_count /= __bin_size;
142 // Get a new block dynamically, set it up for use.
143 void* __v = ::operator new(__options._M_chunk_size);
144 _Block_address* __address = static_cast<_Block_address*>(__v);
145 __address->_M_initial = __v;
146 __address->_M_next = __bin._M_address;
147 __bin._M_address = __address;
149 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
150 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
151 __bin._M_first[__thread_id] = __block;
152 while (--__block_count > 0)
155 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
156 __block = __block->_M_next;
158 __block->_M_next = NULL;
160 __block = __bin._M_first[__thread_id];
161 __bin._M_first[__thread_id] = __block->_M_next;
163 // NB: For alignment reasons, we can't use the first _M_align
164 // bytes, even when sizeof(_Block_record) < _M_align.
165 return reinterpret_cast<char*>(__block) + __options._M_align;
169 __pool<false>::_M_initialize()
171 // _M_force_new must not change after the first allocate(), which
172 // in turn calls this method, so if it's false, it's false forever
173 // and we don't need to return here ever again.
174 if (_M_options._M_force_new)
181 // Calculate the number of bins required based on _M_max_bytes.
182 // _M_bin_size is statically-initialized to one.
183 size_t __bin_size = _M_options._M_min_bin;
184 while (_M_options._M_max_bytes > __bin_size)
190 // Setup the bin map for quick lookup of the relevant bin.
191 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
192 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
193 _Binmap_type* __bp = _M_binmap;
194 _Binmap_type __bin_max = _M_options._M_min_bin;
195 _Binmap_type __bint = 0;
196 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
198 if (__ct > __bin_max)
206 // Initialize _M_bin and its members.
207 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
208 _M_bin = static_cast<_Bin_record*>(__v);
209 for (size_t __n = 0; __n < _M_bin_size; ++__n)
211 _Bin_record& __bin = _M_bin[__n];
212 __v = ::operator new(sizeof(_Block_record*));
213 __bin._M_first = static_cast<_Block_record**>(__v);
214 __bin._M_first[0] = NULL;
215 __bin._M_address = NULL;
223 __pool<true>::_M_destroy() throw()
225 if (_M_init && !_M_options._M_force_new)
227 if (__gthread_active_p())
229 for (size_t __n = 0; __n < _M_bin_size; ++__n)
231 _Bin_record& __bin = _M_bin[__n];
232 while (__bin._M_address)
234 _Block_address* __tmp = __bin._M_address->_M_next;
235 ::operator delete(__bin._M_address->_M_initial);
236 __bin._M_address = __tmp;
238 ::operator delete(__bin._M_first);
239 ::operator delete(__bin._M_free);
240 ::operator delete(__bin._M_used);
241 ::operator delete(__bin._M_mutex);
246 for (size_t __n = 0; __n < _M_bin_size; ++__n)
248 _Bin_record& __bin = _M_bin[__n];
249 while (__bin._M_address)
251 _Block_address* __tmp = __bin._M_address->_M_next;
252 ::operator delete(__bin._M_address->_M_initial);
253 __bin._M_address = __tmp;
255 ::operator delete(__bin._M_first);
258 ::operator delete(_M_bin);
259 ::operator delete(_M_binmap);
264 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
266 // Round up to power of 2 and figure out which bin to use.
267 const size_t __which = _M_binmap[__bytes];
268 const _Bin_record& __bin = _M_bin[__which];
270 // Know __p not null, assume valid block.
271 char* __c = __p - _M_get_align();
272 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
273 if (__gthread_active_p())
275 // Calculate the number of records to remove from our freelist:
276 // in order to avoid too much contention we wait until the
277 // number of records is "high enough".
278 const size_t __thread_id = _M_get_thread_id();
279 const _Tune& __options = _M_get_options();
280 const size_t __limit = (100 * (_M_bin_size - __which)
281 * __options._M_freelist_headroom);
283 size_t __remove = __bin._M_free[__thread_id];
284 __remove *= __options._M_freelist_headroom;
286 // NB: We assume that reads of _Atomic_words are atomic.
287 const size_t __max_threads = __options._M_max_threads + 1;
288 _Atomic_word* const __reclaimed_base =
289 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
290 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
291 const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
293 // NB: For performance sake we don't resync every time, in order
294 // to spare atomic ops. Note that if __reclaimed increased by,
295 // say, 1024, since the last sync, it means that the other
296 // threads executed the atomic in the else below at least the
297 // same number of times (at least, because _M_reserve_block may
298 // have decreased the counter), therefore one more cannot hurt.
299 if (__reclaimed > 1024)
301 __bin._M_used[__thread_id] -= __reclaimed;
302 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
305 if (__remove >= __net_used)
306 __remove -= __net_used;
309 if (__remove > __limit && __remove > __bin._M_free[__thread_id])
311 _Block_record* __first = __bin._M_first[__thread_id];
312 _Block_record* __tmp = __first;
313 __remove /= __options._M_freelist_headroom;
314 const size_t __removed = __remove;
315 while (--__remove > 0)
316 __tmp = __tmp->_M_next;
317 __bin._M_first[__thread_id] = __tmp->_M_next;
318 __bin._M_free[__thread_id] -= __removed;
320 __gthread_mutex_lock(__bin._M_mutex);
321 __tmp->_M_next = __bin._M_first[0];
322 __bin._M_first[0] = __first;
323 __bin._M_free[0] += __removed;
324 __gthread_mutex_unlock(__bin._M_mutex);
327 // Return this block to our list and update counters and
328 // owner id as needed.
329 if (__block->_M_thread_id == __thread_id)
330 --__bin._M_used[__thread_id];
332 __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
334 __block->_M_next = __bin._M_first[__thread_id];
335 __bin._M_first[__thread_id] = __block;
337 ++__bin._M_free[__thread_id];
341 // Not using threads, so single threaded application - return
343 __block->_M_next = __bin._M_first[0];
344 __bin._M_first[0] = __block;
349 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
351 // Round up to power of 2 and figure out which bin to use.
352 const size_t __which = _M_binmap[__bytes];
353 const _Tune& __options = _M_get_options();
354 const size_t __bin_size = ((__options._M_min_bin << __which)
355 + __options._M_align);
356 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
357 __block_count /= __bin_size;
359 // Are we using threads?
360 // - Yes, check if there are free blocks on the global
361 // list. If so, grab up to __block_count blocks in one
362 // lock and change ownership. If the global list is
363 // empty, we allocate a new chunk and add those blocks
364 // directly to our own freelist (with us as owner).
365 // - No, all operations are made directly to global pool 0
366 // no need to lock or change ownership but check for free
367 // blocks on global list (and if not add new ones) and
368 // get the first one.
369 _Bin_record& __bin = _M_bin[__which];
370 _Block_record* __block = NULL;
371 if (__gthread_active_p())
373 // Resync the _M_used counters.
374 const size_t __max_threads = __options._M_max_threads + 1;
375 _Atomic_word* const __reclaimed_base =
376 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
377 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
378 __bin._M_used[__thread_id] -= __reclaimed;
379 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
381 __gthread_mutex_lock(__bin._M_mutex);
382 if (__bin._M_first[0] == NULL)
384 void* __v = ::operator new(__options._M_chunk_size);
385 _Block_address* __address = static_cast<_Block_address*>(__v);
386 __address->_M_initial = __v;
387 __address->_M_next = __bin._M_address;
388 __bin._M_address = __address;
389 __gthread_mutex_unlock(__bin._M_mutex);
391 // No need to hold the lock when we are adding a whole
392 // chunk to our own list.
393 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
394 __block = reinterpret_cast<_Block_record*>(__c);
395 __bin._M_free[__thread_id] = __block_count;
396 __bin._M_first[__thread_id] = __block;
397 while (--__block_count > 0)
400 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
401 __block = __block->_M_next;
403 __block->_M_next = NULL;
407 // Is the number of required blocks greater than or equal
408 // to the number that can be provided by the global free
410 __bin._M_first[__thread_id] = __bin._M_first[0];
411 if (__block_count >= __bin._M_free[0])
413 __bin._M_free[__thread_id] = __bin._M_free[0];
414 __bin._M_free[0] = 0;
415 __bin._M_first[0] = NULL;
419 __bin._M_free[__thread_id] = __block_count;
420 __bin._M_free[0] -= __block_count;
421 __block = __bin._M_first[0];
422 while (--__block_count > 0)
423 __block = __block->_M_next;
424 __bin._M_first[0] = __block->_M_next;
425 __block->_M_next = NULL;
427 __gthread_mutex_unlock(__bin._M_mutex);
432 void* __v = ::operator new(__options._M_chunk_size);
433 _Block_address* __address = static_cast<_Block_address*>(__v);
434 __address->_M_initial = __v;
435 __address->_M_next = __bin._M_address;
436 __bin._M_address = __address;
438 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
439 __block = reinterpret_cast<_Block_record*>(__c);
440 __bin._M_first[0] = __block;
441 while (--__block_count > 0)
444 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
445 __block = __block->_M_next;
447 __block->_M_next = NULL;
450 __block = __bin._M_first[__thread_id];
451 __bin._M_first[__thread_id] = __block->_M_next;
453 if (__gthread_active_p())
455 __block->_M_thread_id = __thread_id;
456 --__bin._M_free[__thread_id];
457 ++__bin._M_used[__thread_id];
460 // NB: For alignment reasons, we can't use the first _M_align
461 // bytes, even when sizeof(_Block_record) < _M_align.
462 return reinterpret_cast<char*>(__block) + __options._M_align;
466 __pool<true>::_M_initialize()
468 // _M_force_new must not change after the first allocate(),
469 // which in turn calls this method, so if it's false, it's false
470 // forever and we don't need to return here ever again.
471 if (_M_options._M_force_new)
478 // Calculate the number of bins required based on _M_max_bytes.
479 // _M_bin_size is statically-initialized to one.
480 size_t __bin_size = _M_options._M_min_bin;
481 while (_M_options._M_max_bytes > __bin_size)
487 // Setup the bin map for quick lookup of the relevant bin.
488 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
489 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
490 _Binmap_type* __bp = _M_binmap;
491 _Binmap_type __bin_max = _M_options._M_min_bin;
492 _Binmap_type __bint = 0;
493 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
495 if (__ct > __bin_max)
503 // Initialize _M_bin and its members.
504 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
505 _M_bin = static_cast<_Bin_record*>(__v);
507 // If __gthread_active_p() create and initialize the list of
508 // free thread ids. Single threaded applications use thread id 0
509 // directly and have no need for this.
510 if (__gthread_active_p())
512 __freelist& freelist = get_freelist();
514 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
516 if (!freelist._M_thread_freelist_array
517 || freelist._M_max_threads < _M_options._M_max_threads)
519 const size_t __k = sizeof(_Thread_record)
520 * _M_options._M_max_threads;
521 __v = ::operator new(__k);
522 _M_thread_freelist = static_cast<_Thread_record*>(__v);
524 // NOTE! The first assignable thread id is 1 since the
525 // global pool uses id 0
527 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
529 _Thread_record& __tr = _M_thread_freelist[__i - 1];
530 __tr._M_next = &_M_thread_freelist[__i];
535 _M_thread_freelist[__i - 1]._M_next = NULL;
536 _M_thread_freelist[__i - 1]._M_id = __i;
538 if (!freelist._M_thread_freelist_array)
540 // Initialize per thread key to hold pointer to
541 // _M_thread_freelist.
542 __gthread_key_create(&freelist._M_key,
543 ::_M_destroy_thread_key);
544 freelist._M_thread_freelist = _M_thread_freelist;
548 _Thread_record* _M_old_freelist
549 = freelist._M_thread_freelist;
550 _Thread_record* _M_old_array
551 = freelist._M_thread_freelist_array;
552 freelist._M_thread_freelist
553 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
554 while (_M_old_freelist)
557 if (_M_old_freelist->_M_next)
558 next_id = _M_old_freelist->_M_next - _M_old_array;
560 next_id = freelist._M_max_threads;
561 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
562 = &_M_thread_freelist[next_id];
563 _M_old_freelist = _M_old_freelist->_M_next;
565 ::operator delete(static_cast<void*>(_M_old_array));
567 freelist._M_thread_freelist_array = _M_thread_freelist;
568 freelist._M_max_threads = _M_options._M_max_threads;
572 const size_t __max_threads = _M_options._M_max_threads + 1;
573 for (size_t __n = 0; __n < _M_bin_size; ++__n)
575 _Bin_record& __bin = _M_bin[__n];
576 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
577 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
578 __bin._M_first = static_cast<_Block_record**>(__v);
580 __bin._M_address = NULL;
582 __v = ::operator new(sizeof(size_t) * __max_threads);
583 std::memset(__v, 0, sizeof(size_t) * __max_threads);
585 __bin._M_free = static_cast<size_t*>(__v);
587 __v = ::operator new(sizeof(size_t) * __max_threads
588 + sizeof(_Atomic_word) * __max_threads);
589 std::memset(__v, 0, (sizeof(size_t) * __max_threads
590 + sizeof(_Atomic_word) * __max_threads));
591 __bin._M_used = static_cast<size_t*>(__v);
593 __v = ::operator new(sizeof(__gthread_mutex_t));
594 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
596 #ifdef __GTHREAD_MUTEX_INIT
598 // Do not copy a POSIX/gthr mutex once in use.
599 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
600 *__bin._M_mutex = __tmp;
603 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
609 for (size_t __n = 0; __n < _M_bin_size; ++__n)
611 _Bin_record& __bin = _M_bin[__n];
612 __v = ::operator new(sizeof(_Block_record*));
613 __bin._M_first = static_cast<_Block_record**>(__v);
614 __bin._M_first[0] = NULL;
615 __bin._M_address = NULL;
622 __pool<true>::_M_get_thread_id()
624 // If we have thread support and it's active we check the thread
625 // key value and return its id or if it's not set we take the
626 // first record from _M_thread_freelist and sets the key and
628 if (__gthread_active_p())
630 __freelist& freelist = get_freelist();
631 void* v = __gthread_getspecific(freelist._M_key);
632 size_t _M_id = (size_t)v;
636 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
637 if (freelist._M_thread_freelist)
639 _M_id = freelist._M_thread_freelist->_M_id;
640 freelist._M_thread_freelist
641 = freelist._M_thread_freelist->_M_next;
645 __gthread_setspecific(freelist._M_key, (void*)_M_id);
647 return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
650 // Otherwise (no thread support or inactive) all requests are
651 // served from the global pool 0.
655 // XXX GLIBCXX_ABI Deprecated
657 __pool<true>::_M_destroy_thread_key(void*) { }
659 // XXX GLIBCXX_ABI Deprecated
661 __pool<true>::_M_initialize(__destroy_handler)
663 // _M_force_new must not change after the first allocate(),
664 // which in turn calls this method, so if it's false, it's false
665 // forever and we don't need to return here ever again.
666 if (_M_options._M_force_new)
673 // Calculate the number of bins required based on _M_max_bytes.
674 // _M_bin_size is statically-initialized to one.
675 size_t __bin_size = _M_options._M_min_bin;
676 while (_M_options._M_max_bytes > __bin_size)
682 // Setup the bin map for quick lookup of the relevant bin.
683 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
684 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
685 _Binmap_type* __bp = _M_binmap;
686 _Binmap_type __bin_max = _M_options._M_min_bin;
687 _Binmap_type __bint = 0;
688 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
690 if (__ct > __bin_max)
698 // Initialize _M_bin and its members.
699 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
700 _M_bin = static_cast<_Bin_record*>(__v);
702 // If __gthread_active_p() create and initialize the list of
703 // free thread ids. Single threaded applications use thread id 0
704 // directly and have no need for this.
705 if (__gthread_active_p())
707 __freelist& freelist = get_freelist();
709 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
711 if (!freelist._M_thread_freelist_array
712 || freelist._M_max_threads < _M_options._M_max_threads)
714 const size_t __k = sizeof(_Thread_record)
715 * _M_options._M_max_threads;
716 __v = ::operator new(__k);
717 _M_thread_freelist = static_cast<_Thread_record*>(__v);
719 // NOTE! The first assignable thread id is 1 since the
720 // global pool uses id 0
722 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
724 _Thread_record& __tr = _M_thread_freelist[__i - 1];
725 __tr._M_next = &_M_thread_freelist[__i];
730 _M_thread_freelist[__i - 1]._M_next = NULL;
731 _M_thread_freelist[__i - 1]._M_id = __i;
733 if (!freelist._M_thread_freelist_array)
735 // Initialize per thread key to hold pointer to
736 // _M_thread_freelist.
737 __gthread_key_create(&freelist._M_key,
738 ::_M_destroy_thread_key);
739 freelist._M_thread_freelist = _M_thread_freelist;
743 _Thread_record* _M_old_freelist
744 = freelist._M_thread_freelist;
745 _Thread_record* _M_old_array
746 = freelist._M_thread_freelist_array;
747 freelist._M_thread_freelist
748 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
749 while (_M_old_freelist)
752 if (_M_old_freelist->_M_next)
753 next_id = _M_old_freelist->_M_next - _M_old_array;
755 next_id = freelist._M_max_threads;
756 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
757 = &_M_thread_freelist[next_id];
758 _M_old_freelist = _M_old_freelist->_M_next;
760 ::operator delete(static_cast<void*>(_M_old_array));
762 freelist._M_thread_freelist_array = _M_thread_freelist;
763 freelist._M_max_threads = _M_options._M_max_threads;
767 const size_t __max_threads = _M_options._M_max_threads + 1;
768 for (size_t __n = 0; __n < _M_bin_size; ++__n)
770 _Bin_record& __bin = _M_bin[__n];
771 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
772 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
773 __bin._M_first = static_cast<_Block_record**>(__v);
775 __bin._M_address = NULL;
777 __v = ::operator new(sizeof(size_t) * __max_threads);
778 std::memset(__v, 0, sizeof(size_t) * __max_threads);
779 __bin._M_free = static_cast<size_t*>(__v);
781 __v = ::operator new(sizeof(size_t) * __max_threads +
782 sizeof(_Atomic_word) * __max_threads);
783 std::memset(__v, 0, (sizeof(size_t) * __max_threads
784 + sizeof(_Atomic_word) * __max_threads));
785 __bin._M_used = static_cast<size_t*>(__v);
787 __v = ::operator new(sizeof(__gthread_mutex_t));
788 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
790 #ifdef __GTHREAD_MUTEX_INIT
792 // Do not copy a POSIX/gthr mutex once in use.
793 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
794 *__bin._M_mutex = __tmp;
797 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
803 for (size_t __n = 0; __n < _M_bin_size; ++__n)
805 _Bin_record& __bin = _M_bin[__n];
806 __v = ::operator new(sizeof(_Block_record*));
807 __bin._M_first = static_cast<_Block_record**>(__v);
808 __bin._M_first[0] = NULL;
809 __bin._M_address = NULL;
817 template class __mt_alloc<char>;
818 template class __mt_alloc<wchar_t>;
820 _GLIBCXX_END_NAMESPACE