From a6b417728c0b097bb6dea4802c7beb9fd54d29ca Mon Sep 17 00:00:00 2001 From: bkoz Date: Fri, 12 Mar 2004 21:11:51 +0000 Subject: [PATCH] 2004-03-12 Benjamin Kosnik * testsuite/20_util/allocator/14176.cc: New. * include/ext/mt_allocator.h: Formatting fixes. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@79407 138bc75d-0d04-0410-961f-82ee72b054a4 --- libstdc++-v3/ChangeLog | 8 +- libstdc++-v3/include/ext/mt_allocator.h | 317 +++++++++++----------- libstdc++-v3/testsuite/20_util/allocator/14176.cc | 42 +++ 3 files changed, 203 insertions(+), 164 deletions(-) create mode 100644 libstdc++-v3/testsuite/20_util/allocator/14176.cc diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index 3213a843428..6f53566c475 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,8 @@ +2004-03-12 Benjamin Kosnik + + * testsuite/20_util/allocator/14176.cc: New. + * include/ext/mt_allocator.h: Formatting fixes. + 2004-03-11 Dhruv Matani * include/Makefile.am (ext_headers): Add @@ -5,7 +10,8 @@ * include/Makefile.in: Regenerate. * docs/html/ext/ballocator_doc.txt: New file. * include/ext/bitmap_allocator.h: New file. - * testsuite/performance/20_util/allocator/list_sort_search.cc: New test. + * testsuite/performance/20_util/allocator/list_sort_search.cc: Add + test. * testsuite/performance/20_util/allocator/map_mt_find.cc: Likewise. * testsuite/performance/20_util/allocator/producer_consumer.cc: Add test for the bitmap_allocator<>. diff --git a/libstdc++-v3/include/ext/mt_allocator.h b/libstdc++-v3/include/ext/mt_allocator.h index 7b42300ba46..7204d841745 100644 --- a/libstdc++-v3/include/ext/mt_allocator.h +++ b/libstdc++-v3/include/ext/mt_allocator.h @@ -216,10 +216,10 @@ namespace __gnu_cxx static void _S_destroy_thread_key(void* freelist_pos); +#endif static size_t _S_get_thread_id(); -#endif struct block_record { @@ -293,18 +293,14 @@ namespace __gnu_cxx } // Round up to power of 2 and figure out which bin to use. - size_t bin = _S_binmap[__bytes]; - -#ifdef __GTHREADS - size_t thread_id = _S_get_thread_id(); -#else - size_t thread_id = 0; -#endif + const size_t __which = _S_binmap[__bytes]; + const size_t __thread_id = _S_get_thread_id(); // Find out if we have blocks on our freelist. If so, go ahead // and use them directly without having to lock anything. + const bin_record& __bin = _S_bin[__which]; block_record* block = NULL; - if (_S_bin[bin].first[thread_id] == NULL) + if (__bin.first[__thread_id] == NULL) { // Are we using threads? // - Yes, check if there are free blocks on the global @@ -319,124 +315,115 @@ namespace __gnu_cxx #ifdef __GTHREADS if (__gthread_active_p()) { - size_t bin_t = 1 << bin; - size_t block_count = - _S_options._M_chunk_size /(bin_t + sizeof(block_record)); + const size_t bin_size = (1 << __which) + sizeof(block_record); + size_t block_count = _S_options._M_chunk_size / bin_size; - __gthread_mutex_lock(_S_bin[bin].mutex); - - if (_S_bin[bin].first[0] == NULL) + __gthread_mutex_lock(__bin.mutex); + if (__bin.first[0] == NULL) { // No need to hold the lock when we are adding a // whole chunk to our own list. - __gthread_mutex_unlock(_S_bin[bin].mutex); - - _S_bin[bin].first[thread_id] = - static_cast(::operator new(_S_options._M_chunk_size)); - - if (!_S_bin[bin].first[thread_id]) - std::__throw_bad_alloc(); + __gthread_mutex_unlock(__bin.mutex); - _S_bin[bin].free[thread_id] = block_count; + void* v = ::operator new(_S_options._M_chunk_size); + __bin.first[__thread_id] = static_cast(v); + __bin.free[__thread_id] = block_count; block_count--; - block = _S_bin[bin].first[thread_id]; + block = __bin.first[__thread_id]; while (block_count > 0) { - block->next = (block_record*)((char*)block + - (bin_t + sizeof(block_record))); - block->thread_id = thread_id; + char* c = reinterpret_cast(block) + bin_size; + block->next = reinterpret_cast(c); + block->thread_id = __thread_id; block = block->next; block_count--; } block->next = NULL; - block->thread_id = thread_id; + block->thread_id = __thread_id; } else { size_t global_count = 0; block_record* tmp; - while (_S_bin[bin].first[0] != NULL - && global_count < block_count) + while (__bin.first[0] != NULL && global_count < block_count) { - tmp = _S_bin[bin].first[0]->next; - block = _S_bin[bin].first[0]; + tmp = __bin.first[0]->next; + block = __bin.first[0]; - if (_S_bin[bin].first[thread_id] == NULL) + if (__bin.first[__thread_id] == NULL) { - _S_bin[bin].first[thread_id] = block; + __bin.first[__thread_id] = block; block->next = NULL; } else { - block->next = _S_bin[bin].first[thread_id]; - _S_bin[bin].first[thread_id] = block; + block->next = __bin.first[__thread_id]; + __bin.first[__thread_id] = block; } - block->thread_id = thread_id; - _S_bin[bin].free[thread_id]++; - _S_bin[bin].first[0] = tmp; + block->thread_id = __thread_id; + __bin.free[__thread_id]++; + __bin.first[0] = tmp; global_count++; } - __gthread_mutex_unlock(_S_bin[bin].mutex); + __gthread_mutex_unlock(__bin.mutex); } // Return the first newly added block in our list and // update the counters - block = _S_bin[bin].first[thread_id]; - _S_bin[bin].first[thread_id] = - _S_bin[bin].first[thread_id]->next; - _S_bin[bin].free[thread_id]--; - _S_bin[bin].used[thread_id]++; + block = __bin.first[__thread_id]; + __bin.first[__thread_id] = __bin.first[__thread_id]->next; + __bin.free[__thread_id]--; + __bin.used[__thread_id]++; } else #endif { - _S_bin[bin].first[0] = - static_cast(::operator new(_S_options._M_chunk_size)); + void* __v = ::operator new(_S_options._M_chunk_size); + __bin.first[0] = static_cast(__v); - size_t bin_t = 1 << bin; - size_t block_count = - _S_options._M_chunk_size / (bin_t + sizeof(block_record)); + const size_t bin_size = (1 << __which) + sizeof(block_record); + size_t block_count = _S_options._M_chunk_size / bin_size; block_count--; - block = _S_bin[bin].first[0]; + block = __bin.first[0]; while (block_count > 0) { - block->next = (block_record*)((char*)block + - (bin_t + sizeof(block_record))); + char* __c = reinterpret_cast(block) + bin_size; + block->next = reinterpret_cast(__c); block = block->next; block_count--; } block->next = NULL; - block = _S_bin[bin].first[0]; // Remove from list. - _S_bin[bin].first[0] = _S_bin[bin].first[0]->next; + block = __bin.first[0]; + __bin.first[0] = __bin.first[0]->next; } } else { - // "Default" operation - we have blocks on our own - // freelist grab the first record and update the counters. - block = _S_bin[bin].first[thread_id]; - - _S_bin[bin].first[thread_id] = _S_bin[bin].first[thread_id]->next; - + // "Default" operation - we have blocks on our own freelist + // grab the first record and update the counters. + block = __bin.first[__thread_id]; + __bin.first[__thread_id] = __bin.first[__thread_id]->next; + #ifdef __GTHREADS if (__gthread_active_p()) { - _S_bin[bin].free[thread_id]--; - _S_bin[bin].used[thread_id]++; + __bin.free[__thread_id]--; + __bin.used[__thread_id]++; } #endif } - return static_cast<_Tp*>(static_cast((char*)block + - sizeof(block_record))); + char* __c = reinterpret_cast(block) + sizeof(block_record); + return static_cast<_Tp*>(static_cast(__c)); } + template void __mt_alloc<_Tp>:: @@ -444,83 +431,79 @@ namespace __gnu_cxx { // Requests larger than _M_max_bytes are handled by operators // new/delete directly. - if (__n * sizeof(_Tp) > _S_options._M_max_bytes - || _S_options._M_force_new) + const size_t __bytes = __n * sizeof(_Tp); + if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new) { ::operator delete(__p); return; } // Round up to power of 2 and figure out which bin to use. - size_t bin = _S_binmap[__n * sizeof(_Tp)]; - -#ifdef __GTHREADS - size_t thread_id = _S_get_thread_id(); -#else - size_t thread_id = 0; -#endif - - block_record* block = (block_record*)((char*)__p - - sizeof(block_record)); + const size_t __which = _S_binmap[__bytes]; + const size_t thread_id = _S_get_thread_id(); + const bin_record& __bin = _S_bin[__which]; + + char* __c = reinterpret_cast(__p) - sizeof(block_record); + block_record* block = reinterpret_cast(__c); #ifdef __GTHREADS if (__gthread_active_p()) { // Calculate the number of records to remove from our freelist. - int remove = _S_bin[bin].free[thread_id] - - (_S_bin[bin].used[thread_id] / _S_options._M_freelist_headroom); + int remove = __bin.free[thread_id] - + (__bin.used[thread_id] / _S_options._M_freelist_headroom); // The calculation above will almost always tell us to // remove one or two records at a time, but this creates too // much contention when locking and therefore we wait until // the number of records is "high enough". - if (remove > (int)(100 * (_S_bin_size - bin)) && - remove > (int)(_S_bin[bin].free[thread_id] / - _S_options._M_freelist_headroom)) + int __cond1 = static_cast(100 * (_S_bin_size - __which)); + int __cond2 = static_cast(__bin.free[thread_id] / _S_options._M_freelist_headroom); + if (remove > __cond1 && remove > __cond2) { - __gthread_mutex_lock(_S_bin[bin].mutex); + __gthread_mutex_lock(__bin.mutex); block_record* tmp; while (remove > 0) { - tmp = _S_bin[bin].first[thread_id]->next; - if (_S_bin[bin].first[0] == NULL) + tmp = __bin.first[thread_id]->next; + if (__bin.first[0] == NULL) { - _S_bin[bin].first[0] = _S_bin[bin].first[thread_id]; - _S_bin[bin].first[0]->next = NULL; + __bin.first[0] = __bin.first[thread_id]; + __bin.first[0]->next = NULL; } else { - _S_bin[bin].first[thread_id]->next = _S_bin[bin].first[0]; - _S_bin[bin].first[0] = _S_bin[bin].first[thread_id]; + __bin.first[thread_id]->next = __bin.first[0]; + __bin.first[0] = __bin.first[thread_id]; } - _S_bin[bin].first[thread_id] = tmp; - _S_bin[bin].free[thread_id]--; + __bin.first[thread_id] = tmp; + __bin.free[thread_id]--; remove--; } - __gthread_mutex_unlock(_S_bin[bin].mutex); + __gthread_mutex_unlock(__bin.mutex); } // Return this block to our list and update counters and // owner id as needed. - if (_S_bin[bin].first[thread_id] == NULL) + if (__bin.first[thread_id] == NULL) { - _S_bin[bin].first[thread_id] = block; + __bin.first[thread_id] = block; block->next = NULL; } else { - block->next = _S_bin[bin].first[thread_id]; - _S_bin[bin].first[thread_id] = block; + block->next = __bin.first[thread_id]; + __bin.first[thread_id] = block; } - _S_bin[bin].free[thread_id]++; + __bin.free[thread_id]++; if (thread_id == block->thread_id) - _S_bin[bin].used[thread_id]--; + __bin.used[thread_id]--; else { - _S_bin[bin].used[block->thread_id]--; + __bin.used[block->thread_id]--; block->thread_id = thread_id; } } @@ -528,15 +511,15 @@ namespace __gnu_cxx #endif { // Single threaded application - return to global pool. - if (_S_bin[bin].first[0] == NULL) + if (__bin.first[0] == NULL) { - _S_bin[bin].first[0] = block; + __bin.first[0] = block; block->next = NULL; } else { - block->next = _S_bin[bin].first[0]; - _S_bin[bin].first[0] = block; + block->next = __bin.first[0]; + __bin.first[0] = block; } } } @@ -551,52 +534,54 @@ namespace __gnu_cxx // Calculate the number of bins required based on _M_max_bytes. // _S_bin_size is statically-initialized to one. - size_t bin_size = 1; - while (_S_options._M_max_bytes > bin_size) + size_t __bin_size = 1; + while (_S_options._M_max_bytes > __bin_size) { - bin_size = bin_size << 1; + __bin_size = __bin_size << 1; _S_bin_size++; } // Setup the bin map for quick lookup of the relevant bin. - const size_t n1 = (_S_options._M_max_bytes + 1) * sizeof(binmap_type); - _S_binmap = static_cast(::operator new(n1)); + const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(binmap_type); + _S_binmap = static_cast(::operator new(__j)); - binmap_type* bp_t = _S_binmap; - binmap_type bin_max_t = 1; - binmap_type bin_t = 0; - for (binmap_type ct = 0; ct <= _S_options._M_max_bytes; ct++) + binmap_type* __bp = _S_binmap; + binmap_type __bin_max = 1; + binmap_type __bint = 0; + for (binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; __ct++) { - if (ct > bin_max_t) + if (__ct > __bin_max) { - bin_max_t <<= 1; - bin_t++; + __bin_max <<= 1; + __bint++; } - *bp_t++ = bin_t; + *__bp++ = __bint; } // If __gthread_active_p() create and initialize the list of // free thread ids. Single threaded applications use thread id 0 // directly and have no need for this. + void* __v; #ifdef __GTHREADS if (__gthread_active_p()) { - const size_t n2 = sizeof(thread_record) * _S_options._M_max_threads; - _S_thread_freelist_first = static_cast(::operator new(n2)); + const size_t __k = sizeof(thread_record) * _S_options._M_max_threads; + __v = ::operator new(__k); + _S_thread_freelist_first = static_cast(__v); // NOTE! The first assignable thread id is 1 since the // global pool uses id 0 - size_t i; - for (i = 1; i < _S_options._M_max_threads; i++) + size_t __i; + for (__i = 1; __i < _S_options._M_max_threads; __i++) { - thread_record& tr = _S_thread_freelist_first[i - 1]; - tr.next = &_S_thread_freelist_first[i]; - tr.id = i; + thread_record& __tr = _S_thread_freelist_first[__i - 1]; + __tr.next = &_S_thread_freelist_first[__i]; + __tr.id = __i; } // Set last record. - _S_thread_freelist_first[i - 1].next = NULL; - _S_thread_freelist_first[i - 1].id = i; + _S_thread_freelist_first[__i - 1].next = NULL; + _S_thread_freelist_first[__i - 1].id = __i; // Make sure this is initialized. @@ -610,50 +595,54 @@ namespace __gnu_cxx #endif // Initialize _S_bin and its members. - _S_bin = static_cast(::operator - new(sizeof(bin_record) * _S_bin_size)); - + __v = ::operator new(sizeof(bin_record) * _S_bin_size); + _S_bin = static_cast(__v); + // Maximum number of threads. - size_t __n = 1; + size_t __max_threads = 1; #ifdef __GTHREADS if (__gthread_active_p()) - __n = _S_options._M_max_threads + 1; + __max_threads = _S_options._M_max_threads + 1; #endif - for (size_t bin = 0; bin < _S_bin_size; bin++) + for (size_t __n = 0; __n < _S_bin_size; __n++) { - bin_record& br = _S_bin[bin]; - br.first = static_cast(::operator new(sizeof(block_record*) * __n)); + bin_record& __bin = _S_bin[__n]; + __v = ::operator new(sizeof(block_record*) * __max_threads); + __bin.first = static_cast(__v); #ifdef __GTHREADS if (__gthread_active_p()) { - br.free = static_cast(::operator new(sizeof(size_t) - * __n)); - br.used = static_cast(::operator new(sizeof(size_t) - * __n)); - br.mutex = static_cast<__gthread_mutex_t*>(::operator new(sizeof(__gthread_mutex_t))); + __v = ::operator new(sizeof(size_t) * __max_threads); + __bin.free = static_cast(__v); + + __v = ::operator new(sizeof(size_t) * __max_threads); + __bin.used = static_cast(__v); + + __v = ::operator new(sizeof(__gthread_mutex_t)); + __bin.mutex = static_cast<__gthread_mutex_t*>(__v); #ifdef __GTHREAD_MUTEX_INIT { // Do not copy a POSIX/gthr mutex once in use. __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; - *br.mutex = __tmp; + *__bin.mutex = __tmp; } #else - { __GTHREAD_MUTEX_INIT_FUNCTION(br.mutex); } + { __GTHREAD_MUTEX_INIT_FUNCTION(__bin.mutex); } #endif } #endif - for (size_t thread = 0; thread < __n; thread++) + for (size_t __threadn = 0; __threadn < __max_threads; __threadn++) { - br.first[thread] = NULL; + __bin.first[__threadn] = NULL; #ifdef __GTHREADS if (__gthread_active_p()) { - br.free[thread] = 0; - br.used[thread] = 0; + __bin.free[__threadn] = 0; + __bin.used[__threadn] = 0; } #endif } @@ -661,51 +650,53 @@ namespace __gnu_cxx _S_init = true; } -#ifdef __GTHREADS - template - void - __mt_alloc<_Tp>:: - _S_destroy_thread_key(void* freelist_pos) - { - // Return this thread id record to front of thread_freelist. - __gthread_mutex_lock(&_S_thread_freelist_mutex); - ((thread_record*)freelist_pos)->next = _S_thread_freelist_first; - _S_thread_freelist_first = (thread_record*)freelist_pos; - __gthread_mutex_unlock(&_S_thread_freelist_mutex); - } - template size_t __mt_alloc<_Tp>:: _S_get_thread_id() { +#ifdef __GTHREADS // If we have thread support and it's active we check the thread // key value and return it's id or if it's not set we take the // first record from _S_thread_freelist and sets the key and // returns it's id. if (__gthread_active_p()) { - thread_record* freelist_pos = static_cast(__gthread_getspecific(_S_thread_key)); - if (freelist_pos == NULL) + thread_record* __freelist_pos = static_cast(__gthread_getspecific(_S_thread_key)); + if (__freelist_pos == NULL) { // Since _S_options._M_max_threads must be larger than // the theoretical max number of threads of the OS the // list can never be empty. __gthread_mutex_lock(&_S_thread_freelist_mutex); - freelist_pos = _S_thread_freelist_first; + __freelist_pos = _S_thread_freelist_first; _S_thread_freelist_first = _S_thread_freelist_first->next; __gthread_mutex_unlock(&_S_thread_freelist_mutex); __gthread_setspecific(_S_thread_key, - static_cast(freelist_pos)); + static_cast(__freelist_pos)); } - return freelist_pos->id; + return __freelist_pos->id; } - +#endif // Otherwise (no thread support or inactive) all requests are // served from the global pool 0. return 0; } + +#ifdef __GTHREADS + template + void + __mt_alloc<_Tp>:: + _S_destroy_thread_key(void* __freelist_pos) + { + // Return this thread id record to front of thread_freelist. + __gthread_mutex_lock(&_S_thread_freelist_mutex); + thread_record* __tr = static_cast(__freelist_pos); + __tr->next = _S_thread_freelist_first; + _S_thread_freelist_first = __tr; + __gthread_mutex_unlock(&_S_thread_freelist_mutex); + } #endif template diff --git a/libstdc++-v3/testsuite/20_util/allocator/14176.cc b/libstdc++-v3/testsuite/20_util/allocator/14176.cc new file mode 100644 index 00000000000..cb8a2f5c4bf --- /dev/null +++ b/libstdc++-v3/testsuite/20_util/allocator/14176.cc @@ -0,0 +1,42 @@ +// Copyright (C) 2004 Free Software Foundation, Inc. +// +// This file is part of the GNU ISO C++ Library. This library is free +// software; you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the +// Free Software Foundation; either version 2, or (at your option) +// any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License along +// with this library; see the file COPYING. If not, write to the Free +// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, +// USA. + +// 20.4.1.1 allocator members + +#include +#include + +// libstdc++/14176 +void test02() +{ + unsigned int len = 0; + std::allocator a; + int* p = a.allocate(len); + a.deallocate(p, len); +} + +#if !__GXX_WEAK__ && _MT_ALLOCATOR_H +// Explicitly instantiate for systems with no COMDAT or weak support. +template class __gnu_cxx::__mt_alloc; +#endif + +int main() +{ + test02(); + return 0; +} -- 2.11.0