1 // Allocator details. 2 3 // Copyright (C) 2004-2019 Free Software Foundation, Inc. 4 // 5 // This file is part of the GNU ISO C++ Library. This library is free 6 // software; you can redistribute it and/or modify it under the 7 // terms of the GNU General Public License as published by the 8 // Free Software Foundation; either version 3, or (at your option) 9 // any later version. 10 11 // This library is distributed in the hope that it will be useful, 12 // but WITHOUT ANY WARRANTY; without even the implied warranty of 13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 // GNU General Public License for more details. 15 16 // Under Section 7 of GPL version 3, you are granted additional 17 // permissions described in the GCC Runtime Library Exception, version 18 // 3.1, as published by the Free Software Foundation. 19 20 // You should have received a copy of the GNU General Public License and 21 // a copy of the GCC Runtime Library Exception along with this program; 22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23 // <http://www.gnu.org/licenses/>. 24 25 // 26 // ISO C++ 14882: 27 // 28 29 #include <bits/c++config.h> 30 #include <ext/concurrence.h> 31 #include <ext/mt_allocator.h> 32 #include <cstring> 33 34 // The include file is needed for uintptr_t. If this file does not compile, 35 // check to make sure the target has <stdint.h> and that it provides 36 // uintptr_t. 37 #include <stdint.h> 38 39 namespace 40 { 41 #ifdef __GTHREADS 42 struct __freelist 43 { 44 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record; 45 _Thread_record* _M_thread_freelist; 46 _Thread_record* _M_thread_freelist_array; 47 size_t _M_max_threads; 48 __gthread_key_t _M_key; 49 ~__freelist__anon5d2043360111::__freelist50 ~__freelist() 51 { 52 if (_M_thread_freelist_array) 53 { 54 __gthread_key_delete(_M_key); 55 ::operator delete(static_cast<void*>(_M_thread_freelist_array)); 56 _M_thread_freelist = 0; 57 } 58 } 59 }; 60 61 __freelist& get_freelist()62 get_freelist() 63 { 64 static __freelist freelist; 65 return freelist; 66 } 67 68 __gnu_cxx::__mutex& get_freelist_mutex()69 get_freelist_mutex() 70 { 71 static __gnu_cxx::__mutex freelist_mutex; 72 return freelist_mutex; 73 } 74 75 static void _M_destroy_thread_key(void * __id)76 _M_destroy_thread_key(void* __id) 77 { 78 // Return this thread id record to the front of thread_freelist. 79 __freelist& freelist = get_freelist(); 80 { 81 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex()); 82 uintptr_t _M_id = reinterpret_cast<uintptr_t>(__id); 83 84 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record; 85 _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1]; 86 __tr->_M_next = freelist._M_thread_freelist; 87 freelist._M_thread_freelist = __tr; 88 } 89 } 90 #endif 91 } // anonymous namespace 92 93 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default) 94 { 95 _GLIBCXX_BEGIN_NAMESPACE_VERSION 96 97 void _M_destroy()98 __pool<false>::_M_destroy() throw() 99 { 100 if (_M_init && !_M_options._M_force_new) 101 { 102 for (size_t __n = 0; __n < _M_bin_size; ++__n) 103 { 104 _Bin_record& __bin = _M_bin[__n]; 105 while (__bin._M_address) 106 { 107 _Block_address* __tmp = __bin._M_address->_M_next; 108 ::operator delete(__bin._M_address->_M_initial); 109 __bin._M_address = __tmp; 110 } 111 ::operator delete(__bin._M_first); 112 } 113 ::operator delete(_M_bin); 114 ::operator delete(_M_binmap); 115 } 116 } 117 118 void _M_reclaim_block(char * __p,size_t __bytes)119 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes) throw () 120 { 121 // Round up to power of 2 and figure out which bin to use. 122 const size_t __which = _M_binmap[__bytes]; 123 _Bin_record& __bin = _M_bin[__which]; 124 125 char* __c = __p - _M_get_align(); 126 _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 127 128 // Single threaded application - return to global pool. 129 __block->_M_next = __bin._M_first[0]; 130 __bin._M_first[0] = __block; 131 } 132 133 char* _M_reserve_block(size_t __bytes,const size_t __thread_id)134 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id) 135 { 136 // Round up to power of 2 and figure out which bin to use. 137 const size_t __which = _M_binmap[__bytes]; 138 _Bin_record& __bin = _M_bin[__which]; 139 const _Tune& __options = _M_get_options(); 140 const size_t __bin_size = (__options._M_min_bin << __which) 141 + __options._M_align; 142 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address); 143 __block_count /= __bin_size; 144 145 // Get a new block dynamically, set it up for use. 146 void* __v = ::operator new(__options._M_chunk_size); 147 _Block_address* __address = static_cast<_Block_address*>(__v); 148 __address->_M_initial = __v; 149 __address->_M_next = __bin._M_address; 150 __bin._M_address = __address; 151 152 char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 153 _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 154 __bin._M_first[__thread_id] = __block; 155 while (--__block_count > 0) 156 { 157 __c += __bin_size; 158 __block->_M_next = reinterpret_cast<_Block_record*>(__c); 159 __block = __block->_M_next; 160 } 161 __block->_M_next = 0; 162 163 __block = __bin._M_first[__thread_id]; 164 __bin._M_first[__thread_id] = __block->_M_next; 165 166 // NB: For alignment reasons, we can't use the first _M_align 167 // bytes, even when sizeof(_Block_record) < _M_align. 168 return reinterpret_cast<char*>(__block) + __options._M_align; 169 } 170 171 void _M_initialize()172 __pool<false>::_M_initialize() 173 { 174 // _M_force_new must not change after the first allocate(), which 175 // in turn calls this method, so if it's false, it's false forever 176 // and we don't need to return here ever again. 177 if (_M_options._M_force_new) 178 { 179 _M_init = true; 180 return; 181 } 182 183 // Create the bins. 184 // Calculate the number of bins required based on _M_max_bytes. 185 // _M_bin_size is statically-initialized to one. 186 size_t __bin_size = _M_options._M_min_bin; 187 while (_M_options._M_max_bytes > __bin_size) 188 { 189 __bin_size <<= 1; 190 ++_M_bin_size; 191 } 192 193 // Setup the bin map for quick lookup of the relevant bin. 194 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 195 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 196 _Binmap_type* __bp = _M_binmap; 197 _Binmap_type __bin_max = _M_options._M_min_bin; 198 _Binmap_type __bint = 0; 199 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 200 { 201 if (__ct > __bin_max) 202 { 203 __bin_max <<= 1; 204 ++__bint; 205 } 206 *__bp++ = __bint; 207 } 208 209 // Initialize _M_bin and its members. 210 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 211 _M_bin = static_cast<_Bin_record*>(__v); 212 for (size_t __n = 0; __n < _M_bin_size; ++__n) 213 { 214 _Bin_record& __bin = _M_bin[__n]; 215 __v = ::operator new(sizeof(_Block_record*)); 216 __bin._M_first = static_cast<_Block_record**>(__v); 217 __bin._M_first[0] = 0; 218 __bin._M_address = 0; 219 } 220 _M_init = true; 221 } 222 223 224 #ifdef __GTHREADS 225 void _M_destroy()226 __pool<true>::_M_destroy() throw() 227 { 228 if (_M_init && !_M_options._M_force_new) 229 { 230 if (__gthread_active_p()) 231 { 232 for (size_t __n = 0; __n < _M_bin_size; ++__n) 233 { 234 _Bin_record& __bin = _M_bin[__n]; 235 while (__bin._M_address) 236 { 237 _Block_address* __tmp = __bin._M_address->_M_next; 238 ::operator delete(__bin._M_address->_M_initial); 239 __bin._M_address = __tmp; 240 } 241 ::operator delete(__bin._M_first); 242 ::operator delete(__bin._M_free); 243 ::operator delete(__bin._M_used); 244 ::operator delete(__bin._M_mutex); 245 } 246 } 247 else 248 { 249 for (size_t __n = 0; __n < _M_bin_size; ++__n) 250 { 251 _Bin_record& __bin = _M_bin[__n]; 252 while (__bin._M_address) 253 { 254 _Block_address* __tmp = __bin._M_address->_M_next; 255 ::operator delete(__bin._M_address->_M_initial); 256 __bin._M_address = __tmp; 257 } 258 ::operator delete(__bin._M_first); 259 } 260 } 261 ::operator delete(_M_bin); 262 ::operator delete(_M_binmap); 263 } 264 } 265 266 void _M_reclaim_block(char * __p,size_t __bytes)267 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes) throw () 268 { 269 // Round up to power of 2 and figure out which bin to use. 270 const size_t __which = _M_binmap[__bytes]; 271 const _Bin_record& __bin = _M_bin[__which]; 272 273 // Know __p not null, assume valid block. 274 char* __c = __p - _M_get_align(); 275 _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 276 if (__gthread_active_p()) 277 { 278 // Calculate the number of records to remove from our freelist: 279 // in order to avoid too much contention we wait until the 280 // number of records is "high enough". 281 const size_t __thread_id = _M_get_thread_id(); 282 const _Tune& __options = _M_get_options(); 283 const size_t __limit = (100 * (_M_bin_size - __which) 284 * __options._M_freelist_headroom); 285 286 size_t __remove = __bin._M_free[__thread_id]; 287 __remove *= __options._M_freelist_headroom; 288 289 // NB: We assume that reads of _Atomic_words are atomic. 290 const size_t __max_threads = __options._M_max_threads + 1; 291 _Atomic_word* const __reclaimed_base = 292 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads); 293 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id]; 294 const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed; 295 296 // NB: For performance sake we don't resync every time, in order 297 // to spare atomic ops. Note that if __reclaimed increased by, 298 // say, 1024, since the last sync, it means that the other 299 // threads executed the atomic in the else below at least the 300 // same number of times (at least, because _M_reserve_block may 301 // have decreased the counter), therefore one more cannot hurt. 302 if (__reclaimed > 1024) 303 { 304 __bin._M_used[__thread_id] -= __reclaimed; 305 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed); 306 } 307 308 if (__remove >= __net_used) 309 __remove -= __net_used; 310 else 311 __remove = 0; 312 if (__remove > __limit && __remove > __bin._M_free[__thread_id]) 313 { 314 _Block_record* __first = __bin._M_first[__thread_id]; 315 _Block_record* __tmp = __first; 316 __remove /= __options._M_freelist_headroom; 317 const size_t __removed = __remove; 318 while (--__remove > 0) 319 __tmp = __tmp->_M_next; 320 __bin._M_first[__thread_id] = __tmp->_M_next; 321 __bin._M_free[__thread_id] -= __removed; 322 323 __gthread_mutex_lock(__bin._M_mutex); 324 __tmp->_M_next = __bin._M_first[0]; 325 __bin._M_first[0] = __first; 326 __bin._M_free[0] += __removed; 327 __gthread_mutex_unlock(__bin._M_mutex); 328 } 329 330 // Return this block to our list and update counters and 331 // owner id as needed. 332 if (__block->_M_thread_id == __thread_id) 333 --__bin._M_used[__thread_id]; 334 else 335 __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1); 336 337 __block->_M_next = __bin._M_first[__thread_id]; 338 __bin._M_first[__thread_id] = __block; 339 340 ++__bin._M_free[__thread_id]; 341 } 342 else 343 { 344 // Not using threads, so single threaded application - return 345 // to global pool. 346 __block->_M_next = __bin._M_first[0]; 347 __bin._M_first[0] = __block; 348 } 349 } 350 351 char* _M_reserve_block(size_t __bytes,const size_t __thread_id)352 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id) 353 { 354 // Round up to power of 2 and figure out which bin to use. 355 const size_t __which = _M_binmap[__bytes]; 356 const _Tune& __options = _M_get_options(); 357 const size_t __bin_size = ((__options._M_min_bin << __which) 358 + __options._M_align); 359 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address); 360 __block_count /= __bin_size; 361 362 // Are we using threads? 363 // - Yes, check if there are free blocks on the global 364 // list. If so, grab up to __block_count blocks in one 365 // lock and change ownership. If the global list is 366 // empty, we allocate a new chunk and add those blocks 367 // directly to our own freelist (with us as owner). 368 // - No, all operations are made directly to global pool 0 369 // no need to lock or change ownership but check for free 370 // blocks on global list (and if not add new ones) and 371 // get the first one. 372 _Bin_record& __bin = _M_bin[__which]; 373 _Block_record* __block = 0; 374 if (__gthread_active_p()) 375 { 376 // Resync the _M_used counters. 377 const size_t __max_threads = __options._M_max_threads + 1; 378 _Atomic_word* const __reclaimed_base = 379 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads); 380 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id]; 381 __bin._M_used[__thread_id] -= __reclaimed; 382 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed); 383 384 __gthread_mutex_lock(__bin._M_mutex); 385 if (__bin._M_first[0] == 0) 386 { 387 void* __v = ::operator new(__options._M_chunk_size); 388 _Block_address* __address = static_cast<_Block_address*>(__v); 389 __address->_M_initial = __v; 390 __address->_M_next = __bin._M_address; 391 __bin._M_address = __address; 392 __gthread_mutex_unlock(__bin._M_mutex); 393 394 // No need to hold the lock when we are adding a whole 395 // chunk to our own list. 396 char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 397 __block = reinterpret_cast<_Block_record*>(__c); 398 __bin._M_free[__thread_id] = __block_count; 399 __bin._M_first[__thread_id] = __block; 400 while (--__block_count > 0) 401 { 402 __c += __bin_size; 403 __block->_M_next = reinterpret_cast<_Block_record*>(__c); 404 __block = __block->_M_next; 405 } 406 __block->_M_next = 0; 407 } 408 else 409 { 410 // Is the number of required blocks greater than or equal 411 // to the number that can be provided by the global free 412 // list? 413 __bin._M_first[__thread_id] = __bin._M_first[0]; 414 if (__block_count >= __bin._M_free[0]) 415 { 416 __bin._M_free[__thread_id] = __bin._M_free[0]; 417 __bin._M_free[0] = 0; 418 __bin._M_first[0] = 0; 419 } 420 else 421 { 422 __bin._M_free[__thread_id] = __block_count; 423 __bin._M_free[0] -= __block_count; 424 __block = __bin._M_first[0]; 425 while (--__block_count > 0) 426 __block = __block->_M_next; 427 __bin._M_first[0] = __block->_M_next; 428 __block->_M_next = 0; 429 } 430 __gthread_mutex_unlock(__bin._M_mutex); 431 } 432 } 433 else 434 { 435 void* __v = ::operator new(__options._M_chunk_size); 436 _Block_address* __address = static_cast<_Block_address*>(__v); 437 __address->_M_initial = __v; 438 __address->_M_next = __bin._M_address; 439 __bin._M_address = __address; 440 441 char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 442 __block = reinterpret_cast<_Block_record*>(__c); 443 __bin._M_first[0] = __block; 444 while (--__block_count > 0) 445 { 446 __c += __bin_size; 447 __block->_M_next = reinterpret_cast<_Block_record*>(__c); 448 __block = __block->_M_next; 449 } 450 __block->_M_next = 0; 451 } 452 453 __block = __bin._M_first[__thread_id]; 454 __bin._M_first[__thread_id] = __block->_M_next; 455 456 if (__gthread_active_p()) 457 { 458 __block->_M_thread_id = __thread_id; 459 --__bin._M_free[__thread_id]; 460 ++__bin._M_used[__thread_id]; 461 } 462 463 // NB: For alignment reasons, we can't use the first _M_align 464 // bytes, even when sizeof(_Block_record) < _M_align. 465 return reinterpret_cast<char*>(__block) + __options._M_align; 466 } 467 468 void _M_initialize()469 __pool<true>::_M_initialize() 470 { 471 // _M_force_new must not change after the first allocate(), 472 // which in turn calls this method, so if it's false, it's false 473 // forever and we don't need to return here ever again. 474 if (_M_options._M_force_new) 475 { 476 _M_init = true; 477 return; 478 } 479 480 // Create the bins. 481 // Calculate the number of bins required based on _M_max_bytes. 482 // _M_bin_size is statically-initialized to one. 483 size_t __bin_size = _M_options._M_min_bin; 484 while (_M_options._M_max_bytes > __bin_size) 485 { 486 __bin_size <<= 1; 487 ++_M_bin_size; 488 } 489 490 // Setup the bin map for quick lookup of the relevant bin. 491 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 492 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 493 _Binmap_type* __bp = _M_binmap; 494 _Binmap_type __bin_max = _M_options._M_min_bin; 495 _Binmap_type __bint = 0; 496 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 497 { 498 if (__ct > __bin_max) 499 { 500 __bin_max <<= 1; 501 ++__bint; 502 } 503 *__bp++ = __bint; 504 } 505 506 // Initialize _M_bin and its members. 507 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 508 _M_bin = static_cast<_Bin_record*>(__v); 509 510 // If __gthread_active_p() create and initialize the list of 511 // free thread ids. Single threaded applications use thread id 0 512 // directly and have no need for this. 513 if (__gthread_active_p()) 514 { 515 __freelist& freelist = get_freelist(); 516 { 517 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex()); 518 519 if (!freelist._M_thread_freelist_array 520 || freelist._M_max_threads < _M_options._M_max_threads) 521 { 522 const size_t __k = sizeof(_Thread_record) 523 * _M_options._M_max_threads; 524 __v = ::operator new(__k); 525 _M_thread_freelist = static_cast<_Thread_record*>(__v); 526 527 // NOTE! The first assignable thread id is 1 since the 528 // global pool uses id 0 529 size_t __i; 530 for (__i = 1; __i < _M_options._M_max_threads; ++__i) 531 { 532 _Thread_record& __tr = _M_thread_freelist[__i - 1]; 533 __tr._M_next = &_M_thread_freelist[__i]; 534 __tr._M_id = __i; 535 } 536 537 // Set last record. 538 _M_thread_freelist[__i - 1]._M_next = 0; 539 _M_thread_freelist[__i - 1]._M_id = __i; 540 541 if (!freelist._M_thread_freelist_array) 542 { 543 // Initialize per thread key to hold pointer to 544 // _M_thread_freelist. 545 __gthread_key_create(&freelist._M_key, 546 ::_M_destroy_thread_key); 547 freelist._M_thread_freelist = _M_thread_freelist; 548 } 549 else 550 { 551 _Thread_record* _M_old_freelist 552 = freelist._M_thread_freelist; 553 _Thread_record* _M_old_array 554 = freelist._M_thread_freelist_array; 555 freelist._M_thread_freelist 556 = &_M_thread_freelist[_M_old_freelist - _M_old_array]; 557 while (_M_old_freelist) 558 { 559 size_t next_id; 560 if (_M_old_freelist->_M_next) 561 next_id = _M_old_freelist->_M_next - _M_old_array; 562 else 563 next_id = freelist._M_max_threads; 564 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next 565 = &_M_thread_freelist[next_id]; 566 _M_old_freelist = _M_old_freelist->_M_next; 567 } 568 ::operator delete(static_cast<void*>(_M_old_array)); 569 } 570 freelist._M_thread_freelist_array = _M_thread_freelist; 571 freelist._M_max_threads = _M_options._M_max_threads; 572 } 573 } 574 575 const size_t __max_threads = _M_options._M_max_threads + 1; 576 for (size_t __n = 0; __n < _M_bin_size; ++__n) 577 { 578 _Bin_record& __bin = _M_bin[__n]; 579 __v = ::operator new(sizeof(_Block_record*) * __max_threads); 580 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads); 581 __bin._M_first = static_cast<_Block_record**>(__v); 582 583 __bin._M_address = 0; 584 585 __v = ::operator new(sizeof(size_t) * __max_threads); 586 std::memset(__v, 0, sizeof(size_t) * __max_threads); 587 588 __bin._M_free = static_cast<size_t*>(__v); 589 590 __v = ::operator new(sizeof(size_t) * __max_threads 591 + sizeof(_Atomic_word) * __max_threads); 592 std::memset(__v, 0, (sizeof(size_t) * __max_threads 593 + sizeof(_Atomic_word) * __max_threads)); 594 __bin._M_used = static_cast<size_t*>(__v); 595 596 __v = ::operator new(sizeof(__gthread_mutex_t)); 597 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v); 598 599 #ifdef __GTHREAD_MUTEX_INIT 600 { 601 // Do not copy a POSIX/gthr mutex once in use. 602 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; 603 *__bin._M_mutex = __tmp; 604 } 605 #else 606 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); } 607 #endif 608 } 609 } 610 else 611 { 612 for (size_t __n = 0; __n < _M_bin_size; ++__n) 613 { 614 _Bin_record& __bin = _M_bin[__n]; 615 __v = ::operator new(sizeof(_Block_record*)); 616 __bin._M_first = static_cast<_Block_record**>(__v); 617 __bin._M_first[0] = 0; 618 __bin._M_address = 0; 619 } 620 } 621 _M_init = true; 622 } 623 624 size_t _M_get_thread_id()625 __pool<true>::_M_get_thread_id() 626 { 627 // If we have thread support and it's active we check the thread 628 // key value and return its id or if it's not set we take the 629 // first record from _M_thread_freelist and sets the key and 630 // returns its id. 631 if (__gthread_active_p()) 632 { 633 __freelist& freelist = get_freelist(); 634 void* v = __gthread_getspecific(freelist._M_key); 635 uintptr_t _M_id = (uintptr_t)v; 636 if (_M_id == 0) 637 { 638 { 639 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex()); 640 if (freelist._M_thread_freelist) 641 { 642 _M_id = freelist._M_thread_freelist->_M_id; 643 freelist._M_thread_freelist 644 = freelist._M_thread_freelist->_M_next; 645 } 646 } 647 648 __gthread_setspecific(freelist._M_key, (void*)_M_id); 649 } 650 return _M_id >= _M_options._M_max_threads ? 0 : _M_id; 651 } 652 653 // Otherwise (no thread support or inactive) all requests are 654 // served from the global pool 0. 655 return 0; 656 } 657 658 // XXX GLIBCXX_ABI Deprecated 659 void _M_destroy_thread_key(void *)660 __pool<true>::_M_destroy_thread_key(void*) throw () { } 661 662 // XXX GLIBCXX_ABI Deprecated 663 void _M_initialize(__destroy_handler)664 __pool<true>::_M_initialize(__destroy_handler) 665 { 666 // _M_force_new must not change after the first allocate(), 667 // which in turn calls this method, so if it's false, it's false 668 // forever and we don't need to return here ever again. 669 if (_M_options._M_force_new) 670 { 671 _M_init = true; 672 return; 673 } 674 675 // Create the bins. 676 // Calculate the number of bins required based on _M_max_bytes. 677 // _M_bin_size is statically-initialized to one. 678 size_t __bin_size = _M_options._M_min_bin; 679 while (_M_options._M_max_bytes > __bin_size) 680 { 681 __bin_size <<= 1; 682 ++_M_bin_size; 683 } 684 685 // Setup the bin map for quick lookup of the relevant bin. 686 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 687 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 688 _Binmap_type* __bp = _M_binmap; 689 _Binmap_type __bin_max = _M_options._M_min_bin; 690 _Binmap_type __bint = 0; 691 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 692 { 693 if (__ct > __bin_max) 694 { 695 __bin_max <<= 1; 696 ++__bint; 697 } 698 *__bp++ = __bint; 699 } 700 701 // Initialize _M_bin and its members. 702 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 703 _M_bin = static_cast<_Bin_record*>(__v); 704 705 // If __gthread_active_p() create and initialize the list of 706 // free thread ids. Single threaded applications use thread id 0 707 // directly and have no need for this. 708 if (__gthread_active_p()) 709 { 710 __freelist& freelist = get_freelist(); 711 { 712 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex()); 713 714 if (!freelist._M_thread_freelist_array 715 || freelist._M_max_threads < _M_options._M_max_threads) 716 { 717 const size_t __k = sizeof(_Thread_record) 718 * _M_options._M_max_threads; 719 __v = ::operator new(__k); 720 _M_thread_freelist = static_cast<_Thread_record*>(__v); 721 722 // NOTE! The first assignable thread id is 1 since the 723 // global pool uses id 0 724 size_t __i; 725 for (__i = 1; __i < _M_options._M_max_threads; ++__i) 726 { 727 _Thread_record& __tr = _M_thread_freelist[__i - 1]; 728 __tr._M_next = &_M_thread_freelist[__i]; 729 __tr._M_id = __i; 730 } 731 732 // Set last record. 733 _M_thread_freelist[__i - 1]._M_next = 0; 734 _M_thread_freelist[__i - 1]._M_id = __i; 735 736 if (!freelist._M_thread_freelist_array) 737 { 738 // Initialize per thread key to hold pointer to 739 // _M_thread_freelist. 740 __gthread_key_create(&freelist._M_key, 741 ::_M_destroy_thread_key); 742 freelist._M_thread_freelist = _M_thread_freelist; 743 } 744 else 745 { 746 _Thread_record* _M_old_freelist 747 = freelist._M_thread_freelist; 748 _Thread_record* _M_old_array 749 = freelist._M_thread_freelist_array; 750 freelist._M_thread_freelist 751 = &_M_thread_freelist[_M_old_freelist - _M_old_array]; 752 while (_M_old_freelist) 753 { 754 size_t next_id; 755 if (_M_old_freelist->_M_next) 756 next_id = _M_old_freelist->_M_next - _M_old_array; 757 else 758 next_id = freelist._M_max_threads; 759 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next 760 = &_M_thread_freelist[next_id]; 761 _M_old_freelist = _M_old_freelist->_M_next; 762 } 763 ::operator delete(static_cast<void*>(_M_old_array)); 764 } 765 freelist._M_thread_freelist_array = _M_thread_freelist; 766 freelist._M_max_threads = _M_options._M_max_threads; 767 } 768 } 769 770 const size_t __max_threads = _M_options._M_max_threads + 1; 771 for (size_t __n = 0; __n < _M_bin_size; ++__n) 772 { 773 _Bin_record& __bin = _M_bin[__n]; 774 __v = ::operator new(sizeof(_Block_record*) * __max_threads); 775 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads); 776 __bin._M_first = static_cast<_Block_record**>(__v); 777 778 __bin._M_address = 0; 779 780 __v = ::operator new(sizeof(size_t) * __max_threads); 781 std::memset(__v, 0, sizeof(size_t) * __max_threads); 782 __bin._M_free = static_cast<size_t*>(__v); 783 784 __v = ::operator new(sizeof(size_t) * __max_threads + 785 sizeof(_Atomic_word) * __max_threads); 786 std::memset(__v, 0, (sizeof(size_t) * __max_threads 787 + sizeof(_Atomic_word) * __max_threads)); 788 __bin._M_used = static_cast<size_t*>(__v); 789 790 __v = ::operator new(sizeof(__gthread_mutex_t)); 791 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v); 792 793 #ifdef __GTHREAD_MUTEX_INIT 794 { 795 // Do not copy a POSIX/gthr mutex once in use. 796 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; 797 *__bin._M_mutex = __tmp; 798 } 799 #else 800 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); } 801 #endif 802 } 803 } 804 else 805 { 806 for (size_t __n = 0; __n < _M_bin_size; ++__n) 807 { 808 _Bin_record& __bin = _M_bin[__n]; 809 __v = ::operator new(sizeof(_Block_record*)); 810 __bin._M_first = static_cast<_Block_record**>(__v); 811 __bin._M_first[0] = 0; 812 __bin._M_address = 0; 813 } 814 } 815 _M_init = true; 816 } 817 #endif 818 819 // Instantiations. 820 template class __mt_alloc<char>; 821 template class __mt_alloc<wchar_t>; 822 823 _GLIBCXX_END_NAMESPACE_VERSION 824 } // namespace 825