X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=libstdc%2B%2B-v3%2Finclude%2Fbits%2Fhashtable.h;h=929f0bb790ad91574dfc6feb6e2faadea7f495ca;hp=f284126e247a663bc91a041a1e10714f9a39f9cc;hb=e3efa6120a328b3c6854e1cba6e6c6dbe293f68d;hpb=a484fc356e716397e5545f3c48005268e28b632d diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h index f284126e247..929f0bb790a 100644 --- a/libstdc++-v3/include/bits/hashtable.h +++ b/libstdc++-v3/include/bits/hashtable.h @@ -1,6 +1,7 @@ // hashtable.h header -*- C++ -*- -// Copyright (C) 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. +// Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012 +// Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the @@ -48,7 +49,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // value type is Value. As a conforming extension, we allow for // value type != Value. - // _ExtractKey: function object that takes a object of type Value + // _ExtractKey: function object that takes an object of type Value // and returns a value of type _Key. // _Equal: function object that takes two objects of type k and returns @@ -78,9 +79,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // count. If so, returns make_pair(true, n), where n is the new // bucket count. If not, returns make_pair(false, ). - // ??? Right now it is hard-wired that the number of buckets never - // shrinks. Should we allow _RehashPolicy to change that? - // __cache_hash_code: bool. true if we store the value of the hash // function along with the value. This is a time-space tradeoff. // Storing it may improve lookup speed by reducing the number of times @@ -94,6 +92,51 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // is always at most one, false if it may be an arbitrary number. This // true for unordered_set and unordered_map, false for unordered_multiset // and unordered_multimap. + /** + * Here's _Hashtable data structure, each _Hashtable has: + * - _Bucket[] _M_buckets + * - _Hash_node_base _M_before_begin + * - size_type _M_bucket_count + * - size_type _M_element_count + * + * with _Bucket being _Hash_node* and _Hash_node constaining: + * - _Hash_node* _M_next + * - Tp _M_value + * - size_t _M_code if cache_hash_code is true + * + * In terms of Standard containers the hastable is like the aggregation of: + * - std::forward_list<_Node> containing the elements + * - std::vector::iterator> representing the buckets + * + * The non-empty buckets contain the node before the first bucket node. This + * design allow to implement something like a std::forward_list::insert_after + * on container insertion and std::forward_list::erase_after on container + * erase calls. _M_before_begin is equivalent to + * std::foward_list::before_begin. Empty buckets are containing nullptr. + * Note that one of the non-empty bucket contains &_M_before_begin which is + * not a derefenrenceable node so the node pointers in buckets shall never be + * derefenrenced, only its next node can be. + * + * Walk through a bucket nodes require a check on the hash code to see if the + * node is still in the bucket. Such a design impose a quite efficient hash + * functor and is one of the reasons it is highly advise to set + * __cache_hash_code to true. + * + * The container iterators are simply built from nodes. This way incrementing + * the iterator is perfectly efficient independent of how many empty buckets + * there are in the container. + * + * On insert we compute element hash code and thanks to it find the bucket + * index. If the element must be inserted on an empty bucket we add it at the + * beginning of the singly linked list and make the bucket point to + * _M_before_begin. The bucket that used to point to _M_before_begin, if any, + * is updated to point to its new before begin node. + * + * On erase, the simple iterator design impose to use the hash functor to get + * the index of the bucket to update. For this reason, when __cache_hash_code + * is set to false, there is a static assertion that the hash functor cannot + * throw. + */ template >, - public __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, + public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2, _Hash, __cache_hash_code>, public __detail::_Map_base<_Key, _Value, _ExtractKey, __unique_keys, _Hashtable<_Key, _Value, _Allocator, @@ -130,6 +173,39 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __constant_iterators, __unique_keys> > { + template + using __if_hash_code_cached + = __or_<__not_>, _Cond>; + + template + using __if_hash_code_not_cached + = __or_, _Cond>; + + // When hash codes are not cached the hash functor shall not throw + // because it is used in methods (erase, swap...) that shall not throw. + static_assert(__if_hash_code_not_cached<__detail::__is_noexcept_hash<_Key, + _H1>>::value, + "Cache the hash code or qualify your hash functor with noexcept"); + + // Following two static assertions are necessary to guarantee that + // swapping two hashtable instances won't invalidate associated local + // iterators. + + // When hash codes are cached local iterator only uses H2 which must then + // be empty. + static_assert(__if_hash_code_cached>::value, + "Functor used to map hash code to bucket index must be empty"); + + typedef __detail::_Hash_code_base<_Key, _Value, _ExtractKey, + _H1, _H2, _Hash, + __cache_hash_code> _HCBase; + + // When hash codes are not cached local iterator is going to use _HCBase + // above to compute node bucket index so it has to be empty. + static_assert(__if_hash_code_not_cached>::value, + "Cache the hash code or make functors involved in hash code" + " and bucket index computation empty"); + public: typedef _Allocator allocator_type; typedef _Value value_type; @@ -144,20 +220,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; + typedef __detail::_Local_iterator + local_iterator; + typedef __detail::_Local_const_iterator + const_local_iterator; typedef __detail::_Node_iterator - local_iterator; + iterator; typedef __detail::_Node_const_iterator - const_local_iterator; - - typedef __detail::_Hashtable_iterator - iterator; - typedef __detail::_Hashtable_const_iterator const_iterator; template _Node; typedef typename _Allocator::template rebind<_Node>::other _Node_allocator_type; - typedef typename _Allocator::template rebind<_Node*>::other + typedef __detail::_Hash_node_base _BaseNode; + typedef _BaseNode* _Bucket; + typedef typename _Allocator::template rebind<_Bucket>::other _Bucket_allocator_type; typedef typename _Allocator::template rebind<_Value>::other _Value_allocator_type; - _Node_allocator_type _M_node_allocator; - _Node** _M_buckets; - size_type _M_bucket_count; - size_type _M_begin_bucket_index; // First non-empty bucket. - size_type _M_element_count; - _RehashPolicy _M_rehash_policy; + _Node_allocator_type _M_node_allocator; + _Bucket* _M_buckets; + size_type _M_bucket_count; + _BaseNode _M_before_begin; + size_type _M_element_count; + _RehashPolicy _M_rehash_policy; template _Node* @@ -188,14 +269,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void _M_deallocate_node(_Node* __n); + // Deallocate the linked list of nodes pointed to by __n void - _M_deallocate_nodes(_Node**, size_type); + _M_deallocate_nodes(_Node* __n); - _Node** + _Bucket* _M_allocate_buckets(size_type __n); void - _M_deallocate_buckets(_Node**, size_type __n); + _M_deallocate_buckets(_Bucket*, size_type __n); + + // Gets bucket begin, deals with the fact that non-empty buckets contain + // their before begin node. + _Node* + _M_bucket_begin(size_type __bkt) const; + + _Node* + _M_begin() const + { return static_cast<_Node*>(_M_before_begin._M_nxt); } public: // Constructor, destructor, assignment, swap @@ -233,65 +324,65 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return *this; } - ~_Hashtable(); + ~_Hashtable() noexcept; void swap(_Hashtable&); // Basic container operations iterator - begin() - { return iterator(_M_buckets + _M_begin_bucket_index); } + begin() noexcept + { return iterator(_M_begin()); } const_iterator - begin() const - { return const_iterator(_M_buckets + _M_begin_bucket_index); } + begin() const noexcept + { return const_iterator(_M_begin()); } iterator - end() - { return iterator(_M_buckets + _M_bucket_count); } + end() noexcept + { return iterator(nullptr); } const_iterator - end() const - { return const_iterator(_M_buckets + _M_bucket_count); } + end() const noexcept + { return const_iterator(nullptr); } const_iterator - cbegin() const - { return const_iterator(_M_buckets + _M_begin_bucket_index); } + cbegin() const noexcept + { return const_iterator(_M_begin()); } const_iterator - cend() const - { return const_iterator(_M_buckets + _M_bucket_count); } + cend() const noexcept + { return const_iterator(nullptr); } size_type - size() const + size() const noexcept { return _M_element_count; } bool - empty() const + empty() const noexcept { return size() == 0; } allocator_type - get_allocator() const + get_allocator() const noexcept { return allocator_type(_M_node_allocator); } size_type - max_size() const + max_size() const noexcept { return _M_node_allocator.max_size(); } // Observers key_equal key_eq() const - { return this->_M_eq; } + { return this->_M_eq(); } // hash_function, if present, comes from _Hash_code_base. // Bucket operations size_type - bucket_count() const + bucket_count() const noexcept { return _M_bucket_count; } size_type - max_bucket_count() const + max_bucket_count() const noexcept { return max_size(); } size_type @@ -300,38 +391,38 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION size_type bucket(const key_type& __k) const - { - return this->_M_bucket_index(__k, this->_M_hash_code(__k), - bucket_count()); - } + { return _M_bucket_index(__k, this->_M_hash_code(__k)); } local_iterator begin(size_type __n) - { return local_iterator(_M_buckets[__n]); } + { return local_iterator(_M_bucket_begin(__n), __n, + _M_bucket_count); } local_iterator - end(size_type) - { return local_iterator(0); } + end(size_type __n) + { return local_iterator(nullptr, __n, _M_bucket_count); } const_local_iterator begin(size_type __n) const - { return const_local_iterator(_M_buckets[__n]); } + { return const_local_iterator(_M_bucket_begin(__n), __n, + _M_bucket_count); } const_local_iterator - end(size_type) const - { return const_local_iterator(0); } + end(size_type __n) const + { return const_local_iterator(nullptr, __n, _M_bucket_count); } // DR 691. const_local_iterator cbegin(size_type __n) const - { return const_local_iterator(_M_buckets[__n]); } + { return const_local_iterator(_M_bucket_begin(__n), __n, + _M_bucket_count); } const_local_iterator - cend(size_type) const - { return const_local_iterator(0); } + cend(size_type __n) const + { return const_local_iterator(nullptr, __n, _M_bucket_count); } float - load_factor() const + load_factor() const noexcept { return static_cast(size()) / static_cast(bucket_count()); } @@ -364,23 +455,49 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION equal_range(const key_type& __k) const; private: + // Bucket index computation helpers. + size_type + _M_bucket_index(_Node* __n) const + { return _HCBase::_M_bucket_index(__n, _M_bucket_count); } + + size_type + _M_bucket_index(const key_type& __k, + typename _Hashtable::_Hash_code_type __c) const + { return _HCBase::_M_bucket_index(__k, __c, _M_bucket_count); } + // Find and insert helper functions and types + // Find the node before the one matching the criteria. + _BaseNode* + _M_find_before_node(size_type, const key_type&, + typename _Hashtable::_Hash_code_type) const; + _Node* - _M_find_node(_Node*, const key_type&, - typename _Hashtable::_Hash_code_type) const; + _M_find_node(size_type __bkt, const key_type& __key, + typename _Hashtable::_Hash_code_type __c) const + { + _BaseNode* __before_n = _M_find_before_node(__bkt, __key, __c); + if (__before_n) + return static_cast<_Node*>(__before_n->_M_nxt); + return nullptr; + } - template - iterator - _M_insert_bucket(_Arg&&, size_type, - typename _Hashtable::_Hash_code_type); + // Insert a node at the beginning of a bucket. + void + _M_insert_bucket_begin(size_type, _Node*); - template - std::pair - _M_insert(_Arg&&, std::true_type); + // Remove the bucket first node + void + _M_remove_bucket_begin(size_type __bkt, _Node* __next_n, + size_type __next_bkt); + + // Get the node before __n in the bucket __bkt + _BaseNode* + _M_get_previous_node(size_type __bkt, _BaseNode* __n); template iterator - _M_insert(_Arg&&, std::false_type); + _M_insert_bucket(_Arg&&, size_type, + typename _Hashtable::_Hash_code_type); typedef typename std::conditional<__unique_keys, std::pair, @@ -393,38 +510,57 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION >::type _Insert_Conv_Type; + protected: + template + std::pair + _M_emplace(std::true_type, _Args&&... __args); + + template + iterator + _M_emplace(std::false_type, _Args&&... __args); + + template + std::pair + _M_insert(_Arg&&, std::true_type); + + template + iterator + _M_insert(_Arg&&, std::false_type); + public: - // Insert and erase + // Emplace, insert and erase + template + _Insert_Return_Type + emplace(_Args&&... __args) + { return _M_emplace(integral_constant(), + std::forward<_Args>(__args)...); } + + template + iterator + emplace_hint(const_iterator, _Args&&... __args) + { return _Insert_Conv_Type()(emplace(std::forward<_Args>(__args)...)); } + _Insert_Return_Type insert(const value_type& __v) - { return _M_insert(__v, std::integral_constant()); } + { return _M_insert(__v, integral_constant()); } iterator insert(const_iterator, const value_type& __v) { return _Insert_Conv_Type()(insert(__v)); } - _Insert_Return_Type - insert(value_type&& __v) - { return _M_insert(std::move(__v), - std::integral_constant()); } - - iterator - insert(const_iterator, value_type&& __v) - { return _Insert_Conv_Type()(insert(std::move(__v))); } - template::value>::type> + std::enable_if<__and_, + std::is_constructible>::value>::type> _Insert_Return_Type insert(_Pair&& __v) { return _M_insert(std::forward<_Pair>(__v), - std::integral_constant()); } + integral_constant()); } template::value>::type> + std::enable_if<__and_, + std::is_constructible>::value>::type> iterator insert(const_iterator, _Pair&& __v) { return _Insert_Conv_Type()(insert(std::forward<_Pair>(__v))); } @@ -440,6 +576,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION iterator erase(const_iterator); + // LWG 2059. + iterator + erase(iterator __it) + { return erase(const_iterator(__it)); } + size_type erase(const key_type&); @@ -447,7 +588,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION erase(const_iterator, const_iterator); void - clear(); + clear() noexcept; // Set number of buckets to be appropriate for container of n element. void rehash(size_type __n); @@ -456,8 +597,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // reserve, if present, comes from _Rehash_base. private: - // Unconditionally change size of bucket array to n. - void _M_rehash(size_type __n); + // Helper rehash method used when keys are unique. + void _M_rehash_aux(size_type __n, std::true_type); + + // Helper rehash method used when keys can be non-unique. + void _M_rehash_aux(size_type __n, std::false_type); + + // Unconditionally change size of bucket array to n, restore hash policy + // state to __state on exception. + void _M_rehash(size_type __n, const _RehashPolicyState& __state); }; @@ -478,7 +626,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __try { _M_node_allocator.construct(__n, std::forward<_Args>(__args)...); - __n->_M_next = 0; return __n; } __catch(...) @@ -508,18 +655,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - _M_deallocate_nodes(_Node** __array, size_type __n) + _M_deallocate_nodes(_Node* __n) { - for (size_type __i = 0; __i < __n; ++__i) + while (__n) { - _Node* __p = __array[__i]; - while (__p) - { - _Node* __tmp = __p; - __p = __p->_M_next; - _M_deallocate_node(__tmp); - } - __array[__i] = 0; + _Node* __tmp = __n; + __n = __n->_M_next(); + _M_deallocate_node(__tmp); } } @@ -529,18 +671,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION bool __chc, bool __cit, bool __uk> typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, - __chc, __cit, __uk>::_Node** + __chc, __cit, __uk>::_Bucket* _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_allocate_buckets(size_type __n) { _Bucket_allocator_type __alloc(_M_node_allocator); - // We allocate one extra bucket to hold a sentinel, an arbitrary - // non-null pointer. Iterator increment relies on this. - _Node** __p = __alloc.allocate(__n + 1); - std::fill(__p, __p + __n, (_Node*) 0); - __p[__n] = reinterpret_cast<_Node*>(0x1000); + _Bucket* __p = __alloc.allocate(__n); + __builtin_memset(__p, 0, __n * sizeof(_Bucket)); return __p; } @@ -551,10 +690,25 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - _M_deallocate_buckets(_Node** __p, size_type __n) + _M_deallocate_buckets(_Bucket* __p, size_type __n) { _Bucket_allocator_type __alloc(_M_node_allocator); - __alloc.deallocate(__p, __n + 1); + __alloc.deallocate(__p, __n); + } + + template + typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, + _Equal, _H1, _H2, _Hash, _RehashPolicy, + __chc, __cit, __uk>::_Node* + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_bucket_begin(size_type __bkt) const + { + _BaseNode* __n = _M_buckets[__bkt]; + return __n ? static_cast<_Node*>(__n->_M_nxt) : nullptr; } template(), - __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, - _H1, _H2, _Hash, __chc>(__exk, __eq, - __h1, __h2, __h), + __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal, + _H1, _H2, _Hash, __chc>(__exk, __h1, __h2, __h, + __eq), __detail::_Map_base<_Key, _Value, _ExtractKey, __uk, _Hashtable>(), _M_node_allocator(__a), _M_bucket_count(0), @@ -578,8 +732,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_rehash_policy() { _M_bucket_count = _M_rehash_policy._M_next_bkt(__bucket_hint); + // We don't want the rehash policy to ask for the hashtable to shrink + // on the first insertion so we need to reset its previous resize level. + _M_rehash_policy._M_prev_resize = 0; _M_buckets = _M_allocate_buckets(_M_bucket_count); - _M_begin_bucket_index = _M_bucket_count; } template(), - __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, - _H1, _H2, _Hash, __chc>(__exk, __eq, - __h1, __h2, __h), + __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal, + _H1, _H2, _Hash, __chc>(__exk, __h1, __h2, __h, + __eq), __detail::_Map_base<_Key, _Value, _ExtractKey, __uk, _Hashtable>(), _M_node_allocator(__a), _M_bucket_count(0), _M_element_count(0), _M_rehash_policy() { - _M_bucket_count = std::max(_M_rehash_policy._M_next_bkt(__bucket_hint), - _M_rehash_policy. - _M_bkt_for_elements(__detail:: - __distance_fw(__f, - __l))); + _M_bucket_count = + _M_rehash_policy._M_bkt_for_elements(__detail::__distance_fw(__f, + __l)); + if (_M_bucket_count <= __bucket_hint) + _M_bucket_count = _M_rehash_policy._M_next_bkt(__bucket_hint); + + // We don't want the rehash policy to ask for the hashtable to shrink + // on the first insertion so we need to reset its previous resize + // level. + _M_rehash_policy._M_prev_resize = 0; _M_buckets = _M_allocate_buckets(_M_bucket_count); - _M_begin_bucket_index = _M_bucket_count; __try { for (; __f != __l; ++__f) @@ -632,29 +792,39 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _Hashtable(const _Hashtable& __ht) : __detail::_Rehash_base<_RehashPolicy, _Hashtable>(__ht), - __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, + __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2, _Hash, __chc>(__ht), __detail::_Map_base<_Key, _Value, _ExtractKey, __uk, _Hashtable>(__ht), _M_node_allocator(__ht._M_node_allocator), _M_bucket_count(__ht._M_bucket_count), - _M_begin_bucket_index(__ht._M_begin_bucket_index), _M_element_count(__ht._M_element_count), _M_rehash_policy(__ht._M_rehash_policy) { _M_buckets = _M_allocate_buckets(_M_bucket_count); __try { - for (size_type __i = 0; __i < __ht._M_bucket_count; ++__i) + if (!__ht._M_before_begin._M_nxt) + return; + + // First deal with the special first node pointed to by + // _M_before_begin. + const _Node* __ht_n = __ht._M_begin(); + _Node* __this_n = _M_allocate_node(__ht_n->_M_v); + this->_M_copy_code(__this_n, __ht_n); + _M_before_begin._M_nxt = __this_n; + _M_buckets[_M_bucket_index(__this_n)] = &_M_before_begin; + + // Then deal with other nodes. + _BaseNode* __prev_n = __this_n; + for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next()) { - _Node* __n = __ht._M_buckets[__i]; - _Node** __tail = _M_buckets + __i; - while (__n) - { - *__tail = _M_allocate_node(__n->_M_v); - this->_M_copy_code(*__tail, __n); - __tail = &((*__tail)->_M_next); - __n = __n->_M_next; - } + __this_n = _M_allocate_node(__ht_n->_M_v); + __prev_n->_M_nxt = __this_n; + this->_M_copy_code(__this_n, __ht_n); + size_type __bkt = _M_bucket_index(__this_n); + if (!_M_buckets[__bkt]) + _M_buckets[__bkt] = __prev_n; + __prev_n = __this_n; } } __catch(...) @@ -673,22 +843,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _Hashtable(_Hashtable&& __ht) : __detail::_Rehash_base<_RehashPolicy, _Hashtable>(__ht), - __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, + __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2, _Hash, __chc>(__ht), __detail::_Map_base<_Key, _Value, _ExtractKey, __uk, _Hashtable>(__ht), - _M_node_allocator(__ht._M_node_allocator), + _M_node_allocator(std::move(__ht._M_node_allocator)), _M_buckets(__ht._M_buckets), _M_bucket_count(__ht._M_bucket_count), - _M_begin_bucket_index(__ht._M_begin_bucket_index), + _M_before_begin(__ht._M_before_begin._M_nxt), _M_element_count(__ht._M_element_count), _M_rehash_policy(__ht._M_rehash_policy) { - size_type __n_bkt = __ht._M_rehash_policy._M_next_bkt(0); - __ht._M_buckets = __ht._M_allocate_buckets(__n_bkt); - __ht._M_bucket_count = __n_bkt; - __ht._M_begin_bucket_index = __ht._M_bucket_count; - __ht._M_element_count = 0; + // Update, if necessary, bucket pointing to before begin that hasn't move. + if (_M_begin()) + _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; __ht._M_rehash_policy = _RehashPolicy(); + __ht._M_bucket_count = __ht._M_rehash_policy._M_next_bkt(0); + __ht._M_buckets = __ht._M_allocate_buckets(__ht._M_bucket_count); + __ht._M_before_begin._M_nxt = nullptr; + __ht._M_element_count = 0; } template _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - ~_Hashtable() + ~_Hashtable() noexcept { clear(); _M_deallocate_buckets(_M_buckets, _M_bucket_count); @@ -715,8 +887,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // The only base class with member variables is hash_code_base. We // define _Hash_code_base::_M_swap because different specializations // have different members. - __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, - _H1, _H2, _Hash, __chc>::_M_swap(__x); + this->_M_swap(__x); // _GLIBCXX_RESOLVE_LIB_DEFECTS // 431. Swapping containers with unequal allocators. @@ -726,8 +897,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION std::swap(_M_rehash_policy, __x._M_rehash_policy); std::swap(_M_buckets, __x._M_buckets); std::swap(_M_bucket_count, __x._M_bucket_count); - std::swap(_M_begin_bucket_index, __x._M_begin_bucket_index); + std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt); std::swap(_M_element_count, __x._M_element_count); + // Fix buckets containing the _M_before_begin pointers that can't be + // swapped. + if (_M_begin()) + _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; + if (__x._M_begin()) + __x._M_buckets[__x._M_bucket_index(__x._M_begin())] + = &(__x._M_before_begin); } template:: __rehash_policy(const _RehashPolicy& __pol) { - _M_rehash_policy = __pol; size_type __n_bkt = __pol._M_bkt_for_elements(_M_element_count); - if (__n_bkt > _M_bucket_count) - _M_rehash(__n_bkt); + if (__n_bkt != _M_bucket_count) + _M_rehash(__n_bkt, _M_rehash_policy._M_state()); + _M_rehash_policy = __pol; } template_M_hash_code(__k); - std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - _Node* __p = _M_find_node(_M_buckets[__n], __k, __code); - return __p ? iterator(__p, _M_buckets + __n) : this->end(); + std::size_t __n = _M_bucket_index(__k, __code); + _Node* __p = _M_find_node(__n, __k, __code); + return __p ? iterator(__p) : this->end(); } template_M_hash_code(__k); - std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - _Node* __p = _M_find_node(_M_buckets[__n], __k, __code); - return __p ? const_iterator(__p, _M_buckets + __n) : this->end(); + std::size_t __n = _M_bucket_index(__k, __code); + _Node* __p = _M_find_node(__n, __k, __code); + return __p ? const_iterator(__p) : this->end(); } template_M_hash_code(__k); - std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); + std::size_t __n = _M_bucket_index(__k, __code); + _Node* __p = _M_bucket_begin(__n); + if (!__p) + return 0; + std::size_t __result = 0; - for (_Node* __p = _M_buckets[__n]; __p; __p = __p->_M_next) - if (this->_M_compare(__k, __code, __p)) - ++__result; + for (;; __p = __p->_M_next()) + { + if (this->_M_equals(__k, __code, __p)) + ++__result; + else if (__result) + // All equivalent values are next to each other, if we found a not + // equivalent value after an equivalent one it means that we won't + // find anymore an equivalent value. + break; + if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n) + break; + } return __result; } @@ -816,22 +1007,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION equal_range(const key_type& __k) { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); - std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - _Node** __head = _M_buckets + __n; - _Node* __p = _M_find_node(*__head, __k, __code); + std::size_t __n = _M_bucket_index(__k, __code); + _Node* __p = _M_find_node(__n, __k, __code); if (__p) { - _Node* __p1 = __p->_M_next; - for (; __p1; __p1 = __p1->_M_next) - if (!this->_M_compare(__k, __code, __p1)) - break; - - iterator __first(__p, __head); - iterator __last(__p1, __head); - if (!__p1) - __last._M_incr_bucket(); - return std::make_pair(__first, __last); + _Node* __p1 = __p->_M_next(); + while (__p1 && _M_bucket_index(__p1) == __n + && this->_M_equals(__k, __code, __p1)) + __p1 = __p1->_M_next(); + + return std::make_pair(iterator(__p), iterator(__p1)); } else return std::make_pair(this->end(), this->end()); @@ -854,47 +1040,230 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION equal_range(const key_type& __k) const { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); - std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - _Node** __head = _M_buckets + __n; - _Node* __p = _M_find_node(*__head, __k, __code); + std::size_t __n = _M_bucket_index(__k, __code); + _Node* __p = _M_find_node(__n, __k, __code); if (__p) { - _Node* __p1 = __p->_M_next; - for (; __p1; __p1 = __p1->_M_next) - if (!this->_M_compare(__k, __code, __p1)) - break; - - const_iterator __first(__p, __head); - const_iterator __last(__p1, __head); - if (!__p1) - __last._M_incr_bucket(); - return std::make_pair(__first, __last); + _Node* __p1 = __p->_M_next(); + while (__p1 && _M_bucket_index(__p1) == __n + && this->_M_equals(__k, __code, __p1)) + __p1 = __p1->_M_next(); + + return std::make_pair(const_iterator(__p), const_iterator(__p1)); } else return std::make_pair(this->end(), this->end()); } - // Find the node whose key compares equal to k, beginning the search - // at p (usually the head of a bucket). Return nil if no node is found. + // Find the node whose key compares equal to k in the bucket n. Return nullptr + // if no node is found. template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, - __chc, __cit, __uk>::_Node* + __chc, __cit, __uk>::_BaseNode* + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_find_before_node(size_type __n, const key_type& __k, + typename _Hashtable::_Hash_code_type __code) const + { + _BaseNode* __prev_p = _M_buckets[__n]; + if (!__prev_p) + return nullptr; + _Node* __p = static_cast<_Node*>(__prev_p->_M_nxt); + for (;; __p = __p->_M_next()) + { + if (this->_M_equals(__k, __code, __p)) + return __prev_p; + if (!(__p->_M_nxt) || _M_bucket_index(__p->_M_next()) != __n) + break; + __prev_p = __p; + } + return nullptr; + } + + template + void + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_insert_bucket_begin(size_type __bkt, _Node* __new_node) + { + if (_M_buckets[__bkt]) + { + // Bucket is not empty, we just need to insert the new node after the + // bucket before begin. + __new_node->_M_nxt = _M_buckets[__bkt]->_M_nxt; + _M_buckets[__bkt]->_M_nxt = __new_node; + } + else + { + // The bucket is empty, the new node is inserted at the beginning of + // the singly linked list and the bucket will contain _M_before_begin + // pointer. + __new_node->_M_nxt = _M_before_begin._M_nxt; + _M_before_begin._M_nxt = __new_node; + if (__new_node->_M_nxt) + // We must update former begin bucket that is pointing to + // _M_before_begin. + _M_buckets[_M_bucket_index(__new_node->_M_next())] = __new_node; + _M_buckets[__bkt] = &_M_before_begin; + } + } + + template + void + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_remove_bucket_begin(size_type __bkt, _Node* __next, size_type __next_bkt) + { + if (!__next || __next_bkt != __bkt) + { + // Bucket is now empty + // First update next bucket if any + if (__next) + _M_buckets[__next_bkt] = _M_buckets[__bkt]; + // Second update before begin node if necessary + if (&_M_before_begin == _M_buckets[__bkt]) + _M_before_begin._M_nxt = __next; + _M_buckets[__bkt] = nullptr; + } + } + + template + typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, + _Equal, _H1, _H2, _Hash, _RehashPolicy, + __chc, __cit, __uk>::_BaseNode* _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - _M_find_node(_Node* __p, const key_type& __k, - typename _Hashtable::_Hash_code_type __code) const + _M_get_previous_node(size_type __bkt, _BaseNode* __n) { - for (; __p; __p = __p->_M_next) - if (this->_M_compare(__k, __code, __p)) - return __p; - return false; + _BaseNode* __prev_n = _M_buckets[__bkt]; + while (__prev_n->_M_nxt != __n) + __prev_n = __prev_n->_M_nxt; + return __prev_n; } + template + template + std::pair::iterator, bool> + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_emplace(std::true_type, _Args&&... __args) + { + // First build the node to get access to the hash code + _Node* __new_node = _M_allocate_node(std::forward<_Args>(__args)...); + __try + { + const key_type& __k = this->_M_extract()(__new_node->_M_v); + typename _Hashtable::_Hash_code_type __code + = this->_M_hash_code(__k); + size_type __bkt = _M_bucket_index(__k, __code); + + if (_Node* __p = _M_find_node(__bkt, __k, __code)) + { + // There is already an equivalent node, no insertion + _M_deallocate_node(__new_node); + return std::make_pair(iterator(__p), false); + } + + // We are going to insert this node + this->_M_store_code(__new_node, __code); + const _RehashPolicyState& __saved_state + = _M_rehash_policy._M_state(); + std::pair __do_rehash + = _M_rehash_policy._M_need_rehash(_M_bucket_count, + _M_element_count, 1); + + if (__do_rehash.first) + { + _M_rehash(__do_rehash.second, __saved_state); + __bkt = _M_bucket_index(__k, __code); + } + + _M_insert_bucket_begin(__bkt, __new_node); + ++_M_element_count; + return std::make_pair(iterator(__new_node), true); + } + __catch(...) + { + _M_deallocate_node(__new_node); + __throw_exception_again; + } + } + + template + template + typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, + __chc, __cit, __uk>::iterator + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_emplace(std::false_type, _Args&&... __args) + { + const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state(); + std::pair __do_rehash + = _M_rehash_policy._M_need_rehash(_M_bucket_count, + _M_element_count, 1); + + // First build the node to get its hash code. + _Node* __new_node = _M_allocate_node(std::forward<_Args>(__args)...); + __try + { + const key_type& __k = this->_M_extract()(__new_node->_M_v); + typename _Hashtable::_Hash_code_type __code + = this->_M_hash_code(__k); + this->_M_store_code(__new_node, __code); + + // Second, do rehash if necessary. + if (__do_rehash.first) + _M_rehash(__do_rehash.second, __saved_state); + + // Third, find the node before an equivalent one. + size_type __bkt = _M_bucket_index(__k, __code); + _BaseNode* __prev = _M_find_before_node(__bkt, __k, __code); + + if (__prev) + { + // Insert after the node before the equivalent one. + __new_node->_M_nxt = __prev->_M_nxt; + __prev->_M_nxt = __new_node; + } + else + // The inserted node has no equivalent in the hashtable. We must + // insert the new node at the beginning of the bucket to preserve + // equivalent elements relative positions. + _M_insert_bucket_begin(__bkt, __new_node); + ++_M_element_count; + return iterator(__new_node); + } + __catch(...) + { + _M_deallocate_node(__new_node); + __throw_exception_again; + } + } + // Insert v in bucket n (assumes no element with its key already present). template __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); if (__do_rehash.first) { - const key_type& __k = this->_M_extract(__v); - __n = this->_M_bucket_index(__k, __code, __do_rehash.second); + const key_type& __k = this->_M_extract()(__v); + __n = _HCBase::_M_bucket_index(__k, __code, __do_rehash.second); } - // Allocate the new node before doing the rehash so that we don't - // do a rehash if the allocation throws. - _Node* __new_node = _M_allocate_node(std::forward<_Arg>(__v)); - + _Node* __new_node = nullptr; __try { + // Allocate the new node before doing the rehash so that we + // don't do a rehash if the allocation throws. + __new_node = _M_allocate_node(std::forward<_Arg>(__v)); + this->_M_store_code(__new_node, __code); if (__do_rehash.first) - _M_rehash(__do_rehash.second); + _M_rehash(__do_rehash.second, __saved_state); - __new_node->_M_next = _M_buckets[__n]; - this->_M_store_code(__new_node, __code); - _M_buckets[__n] = __new_node; + _M_insert_bucket_begin(__n, __new_node); ++_M_element_count; - if (__n < _M_begin_bucket_index) - _M_begin_bucket_index = __n; - return iterator(__new_node, _M_buckets + __n); + return iterator(__new_node); } __catch(...) { - _M_deallocate_node(__new_node); + if (!__new_node) + _M_rehash_policy._M_reset(__saved_state); + else + _M_deallocate_node(__new_node); __throw_exception_again; } } @@ -957,12 +1327,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_insert(_Arg&& __v, std::true_type) { - const key_type& __k = this->_M_extract(__v); + const key_type& __k = this->_M_extract()(__v); typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); - size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count); + size_type __n = _M_bucket_index(__k, __code); - if (_Node* __p = _M_find_node(_M_buckets[__n], __k, __code)) - return std::make_pair(iterator(__p, _M_buckets + __n), false); + if (_Node* __p = _M_find_node(__n, __k, __code)) + return std::make_pair(iterator(__p), false); return std::make_pair(_M_insert_bucket(std::forward<_Arg>(__v), __n, __code), true); } @@ -980,36 +1350,51 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_insert(_Arg&& __v, std::false_type) { + const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state(); std::pair __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); - if (__do_rehash.first) - _M_rehash(__do_rehash.second); - - const key_type& __k = this->_M_extract(__v); - typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); - size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - // First find the node, avoid leaking new_node if compare throws. - _Node* __prev = _M_find_node(_M_buckets[__n], __k, __code); - _Node* __new_node = _M_allocate_node(std::forward<_Arg>(__v)); + // First compute the hash code so that we don't do anything if it throws. + typename _Hashtable::_Hash_code_type __code + = this->_M_hash_code(this->_M_extract()(__v)); - if (__prev) + _Node* __new_node = nullptr; + __try { - __new_node->_M_next = __prev->_M_next; - __prev->_M_next = __new_node; + // Second allocate new node so that we don't rehash if it throws. + __new_node = _M_allocate_node(std::forward<_Arg>(__v)); + this->_M_store_code(__new_node, __code); + if (__do_rehash.first) + _M_rehash(__do_rehash.second, __saved_state); + + // Third, find the node before an equivalent one. + size_type __bkt = _M_bucket_index(__new_node); + _BaseNode* __prev + = _M_find_before_node(__bkt, this->_M_extract()(__new_node->_M_v), + __code); + if (__prev) + { + // Insert after the node before the equivalent one. + __new_node->_M_nxt = __prev->_M_nxt; + __prev->_M_nxt = __new_node; + } + else + // The inserted node has no equivalent in the hashtable. We must + // insert the new node at the beginning of the bucket to preserve + // equivalent elements relative positions. + _M_insert_bucket_begin(__bkt, __new_node); + ++_M_element_count; + return iterator(__new_node); } - else + __catch(...) { - __new_node->_M_next = _M_buckets[__n]; - _M_buckets[__n] = __new_node; - if (__n < _M_begin_bucket_index) - _M_begin_bucket_index = __n; + if (!__new_node) + _M_rehash_policy._M_reset(__saved_state); + else + _M_deallocate_node(__new_node); + __throw_exception_again; } - this->_M_store_code(__new_node, __code); - - ++_M_element_count; - return iterator(__new_node, _M_buckets + __n); } template __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, __n_elt); if (__do_rehash.first) - _M_rehash(__do_rehash.second); + _M_rehash(__do_rehash.second, __saved_state); for (; __first != __last; ++__first) this->insert(*__first); @@ -1044,31 +1430,25 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: erase(const_iterator __it) { - iterator __result(__it._M_cur_node, __it._M_cur_bucket); - ++__result; - - _Node* __cur = *__it._M_cur_bucket; - if (__cur == __it._M_cur_node) + _Node* __n = __it._M_cur; + std::size_t __bkt = _M_bucket_index(__n); + + // Look for previous node to unlink it from the erased one, this is why + // we need buckets to contain the before begin to make this research fast. + _BaseNode* __prev_n = _M_get_previous_node(__bkt, __n); + if (__n == _M_bucket_begin(__bkt)) + _M_remove_bucket_begin(__bkt, __n->_M_next(), + __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0); + else if (__n->_M_nxt) { - *__it._M_cur_bucket = __cur->_M_next; - - // If _M_begin_bucket_index no longer indexes the first non-empty - // bucket - its single node is being erased - update it. - if (!_M_buckets[_M_begin_bucket_index]) - _M_begin_bucket_index = __result._M_cur_bucket - _M_buckets; - } - else - { - _Node* __next = __cur->_M_next; - while (__next != __it._M_cur_node) - { - __cur = __next; - __next = __cur->_M_next; - } - __cur->_M_next = __next->_M_next; + size_type __next_bkt = _M_bucket_index(__n->_M_next()); + if (__next_bkt != __bkt) + _M_buckets[__next_bkt] = __prev_n; } - _M_deallocate_node(__it._M_cur_node); + __prev_n->_M_nxt = __n->_M_nxt; + iterator __result(__n->_M_next()); + _M_deallocate_node(__n); --_M_element_count; return __result; @@ -1086,64 +1466,50 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION erase(const key_type& __k) { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); - std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); + std::size_t __bkt = _M_bucket_index(__k, __code); + // Look for the node before the first matching node. + _BaseNode* __prev_n = _M_find_before_node(__bkt, __k, __code); + if (!__prev_n) + return 0; + _Node* __n = static_cast<_Node*>(__prev_n->_M_nxt); + bool __is_bucket_begin = _M_buckets[__bkt] == __prev_n; + + // We found a matching node, start deallocation loop from it + std::size_t __next_bkt = __bkt; + _Node* __next_n = __n; size_type __result = 0; - - _Node** __slot = _M_buckets + __n; - while (*__slot && !this->_M_compare(__k, __code, *__slot)) - __slot = &((*__slot)->_M_next); - - _Node** __saved_slot = 0; - while (*__slot && this->_M_compare(__k, __code, *__slot)) + _Node* __saved_n = nullptr; + do { + _Node* __p = __next_n; + __next_n = __p->_M_next(); // _GLIBCXX_RESOLVE_LIB_DEFECTS // 526. Is it undefined if a function in the standard changes // in parameters? - if (std::__addressof(this->_M_extract((*__slot)->_M_v)) + if (std::__addressof(this->_M_extract()(__p->_M_v)) != std::__addressof(__k)) - { - _Node* __p = *__slot; - *__slot = __p->_M_next; - _M_deallocate_node(__p); - --_M_element_count; - ++__result; - } + _M_deallocate_node(__p); else - { - __saved_slot = __slot; - __slot = &((*__slot)->_M_next); - } - } - - if (__saved_slot) - { - _Node* __p = *__saved_slot; - *__saved_slot = __p->_M_next; - _M_deallocate_node(__p); + __saved_n = __p; --_M_element_count; ++__result; + if (!__next_n) + break; + __next_bkt = _M_bucket_index(__next_n); } - - // If the entire bucket indexed by _M_begin_bucket_index has been - // erased look forward for the first non-empty bucket. - if (!_M_buckets[_M_begin_bucket_index]) - { - if (!_M_element_count) - _M_begin_bucket_index = _M_bucket_count; - else - { - ++_M_begin_bucket_index; - while (!_M_buckets[_M_begin_bucket_index]) - ++_M_begin_bucket_index; - } - } - + while (__next_bkt == __bkt && this->_M_equals(__k, __code, __next_n)); + + if (__saved_n) + _M_deallocate_node(__saved_n); + if (__is_bucket_begin) + _M_remove_bucket_begin(__bkt, __next_n, __next_bkt); + else if (__next_n && __next_bkt != __bkt) + _M_buckets[__next_bkt] = __prev_n; + if (__prev_n) + __prev_n->_M_nxt = __next_n; return __result; } - // ??? This could be optimized by taking advantage of the bucket - // structure, but it's not clear that it's worth doing. It probably - // wouldn't even be an optimization unless the load factor is large. template:: erase(const_iterator __first, const_iterator __last) { - while (__first != __last) - __first = this->erase(__first); - return iterator(__last._M_cur_node, __last._M_cur_bucket); + _Node* __n = __first._M_cur; + _Node* __last_n = __last._M_cur; + if (__n == __last_n) + return iterator(__n); + + std::size_t __bkt = _M_bucket_index(__n); + + _BaseNode* __prev_n = _M_get_previous_node(__bkt, __n); + bool __is_bucket_begin = __n == _M_bucket_begin(__bkt); + std::size_t __n_bkt = __bkt; + for (;;) + { + do + { + _Node* __tmp = __n; + __n = __n->_M_next(); + _M_deallocate_node(__tmp); + --_M_element_count; + if (!__n) + break; + __n_bkt = _M_bucket_index(__n); + } + while (__n != __last_n && __n_bkt == __bkt); + if (__is_bucket_begin) + _M_remove_bucket_begin(__bkt, __n, __n_bkt); + if (__n == __last_n) + break; + __is_bucket_begin = true; + __bkt = __n_bkt; + } + + if (__n && (__n_bkt != __bkt || __is_bucket_begin)) + _M_buckets[__n_bkt] = __prev_n; + __prev_n->_M_nxt = __n; + return iterator(__n); } template:: - clear() + clear() noexcept { - _M_deallocate_nodes(_M_buckets, _M_bucket_count); + _M_deallocate_nodes(_M_begin()); + __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(_Bucket)); _M_element_count = 0; - _M_begin_bucket_index = _M_bucket_count; + _M_before_begin._M_nxt = nullptr; } template:: rehash(size_type __n) { - _M_rehash(std::max(_M_rehash_policy._M_next_bkt(__n), - _M_rehash_policy._M_bkt_for_elements(_M_element_count - + 1))); + const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state(); + std::size_t __buckets + = _M_rehash_policy._M_bkt_for_elements(_M_element_count + 1); + if (__buckets <= __n) + __buckets = _M_rehash_policy._M_next_bkt(__n); + + if (__buckets != _M_bucket_count) + { + _M_rehash(__buckets, __saved_state); + + // We don't want the rehash policy to ask for the hashtable to shrink + // on the next insertion so we need to reset its previous resize + // level. + _M_rehash_policy._M_prev_resize = 0; + } } template:: - _M_rehash(size_type __n) + _M_rehash(size_type __n, const _RehashPolicyState& __state) { - _Node** __new_array = _M_allocate_buckets(__n); __try { - _M_begin_bucket_index = __n; - for (size_type __i = 0; __i < _M_bucket_count; ++__i) - while (_Node* __p = _M_buckets[__i]) - { - std::size_t __new_index = this->_M_bucket_index(__p, __n); - _M_buckets[__i] = __p->_M_next; - __p->_M_next = __new_array[__new_index]; - __new_array[__new_index] = __p; - if (__new_index < _M_begin_bucket_index) - _M_begin_bucket_index = __new_index; - } - _M_deallocate_buckets(_M_buckets, _M_bucket_count); - _M_bucket_count = __n; - _M_buckets = __new_array; + _M_rehash_aux(__n, integral_constant()); } __catch(...) { - // A failure here means that a hash function threw an exception. - // We can't restore the previous state without calling the hash - // function again, so the only sensible recovery is to delete - // everything. - _M_deallocate_nodes(__new_array, __n); - _M_deallocate_buckets(__new_array, __n); - _M_deallocate_nodes(_M_buckets, _M_bucket_count); - _M_element_count = 0; - _M_begin_bucket_index = _M_bucket_count; + // A failure here means that buckets allocation failed. We only + // have to restore hash policy previous state. + _M_rehash_policy._M_reset(__state); __throw_exception_again; } } + // Rehash when there is no equivalent elements. + template + void + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_rehash_aux(size_type __n, std::true_type) + { + _Bucket* __new_buckets = _M_allocate_buckets(__n); + _Node* __p = _M_begin(); + _M_before_begin._M_nxt = nullptr; + std::size_t __bbegin_bkt; + while (__p) + { + _Node* __next = __p->_M_next(); + std::size_t __bkt = _HCBase::_M_bucket_index(__p, __n); + if (!__new_buckets[__bkt]) + { + __p->_M_nxt = _M_before_begin._M_nxt; + _M_before_begin._M_nxt = __p; + __new_buckets[__bkt] = &_M_before_begin; + if (__p->_M_nxt) + __new_buckets[__bbegin_bkt] = __p; + __bbegin_bkt = __bkt; + } + else + { + __p->_M_nxt = __new_buckets[__bkt]->_M_nxt; + __new_buckets[__bkt]->_M_nxt = __p; + } + __p = __next; + } + _M_deallocate_buckets(_M_buckets, _M_bucket_count); + _M_bucket_count = __n; + _M_buckets = __new_buckets; + } + + // Rehash when there can be equivalent elements, preserve their relative + // order. + template + void + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_rehash_aux(size_type __n, std::false_type) + { + _Bucket* __new_buckets = _M_allocate_buckets(__n); + + _Node* __p = _M_begin(); + _M_before_begin._M_nxt = nullptr; + std::size_t __bbegin_bkt; + std::size_t __prev_bkt; + _Node* __prev_p = nullptr; + bool __check_bucket = false; + + while (__p) + { + _Node* __next = __p->_M_next(); + std::size_t __bkt = _HCBase::_M_bucket_index(__p, __n); + + if (__prev_p && __prev_bkt == __bkt) + { + // Previous insert was already in this bucket, we insert after + // the previously inserted one to preserve equivalent elements + // relative order. + __p->_M_nxt = __prev_p->_M_nxt; + __prev_p->_M_nxt = __p; + + // Inserting after a node in a bucket require to check that we + // haven't change the bucket last node, in this case next + // bucket containing its before begin node must be updated. We + // schedule a check as soon as we move out of the sequence of + // equivalent nodes to limit the number of checks. + __check_bucket = true; + } + else + { + if (__check_bucket) + { + // Check if we shall update the next bucket because of insertions + // into __prev_bkt bucket. + if (__prev_p->_M_nxt) + { + std::size_t __next_bkt + = _HCBase::_M_bucket_index(__prev_p->_M_next(), __n); + if (__next_bkt != __prev_bkt) + __new_buckets[__next_bkt] = __prev_p; + } + __check_bucket = false; + } + if (!__new_buckets[__bkt]) + { + __p->_M_nxt = _M_before_begin._M_nxt; + _M_before_begin._M_nxt = __p; + __new_buckets[__bkt] = &_M_before_begin; + if (__p->_M_nxt) + __new_buckets[__bbegin_bkt] = __p; + __bbegin_bkt = __bkt; + } + else + { + __p->_M_nxt = __new_buckets[__bkt]->_M_nxt; + __new_buckets[__bkt]->_M_nxt = __p; + } + } + + __prev_p = __p; + __prev_bkt = __bkt; + __p = __next; + } + + if (__check_bucket && __prev_p->_M_nxt) + { + std::size_t __next_bkt + = _HCBase::_M_bucket_index(__prev_p->_M_next(), __n); + if (__next_bkt != __prev_bkt) + __new_buckets[__next_bkt] = __prev_p; + } + + _M_deallocate_buckets(_M_buckets, _M_bucket_count); + _M_bucket_count = __n; + _M_buckets = __new_buckets; + } + _GLIBCXX_END_NAMESPACE_VERSION } // namespace std