// reserve, if present, comes from _Rehash_base.
private:
- // Unconditionally change size of bucket array to n.
- void _M_rehash(size_type __n);
+ // Unconditionally change size of bucket array to n, restore hash policy
+ // resize value to __next_resize on exception.
+ void _M_rehash(size_type __n, size_type __next_resize);
};
_M_rehash_policy = __pol;
size_type __n_bkt = __pol._M_bkt_for_elements(_M_element_count);
if (__n_bkt > _M_bucket_count)
- _M_rehash(__n_bkt);
+ _M_rehash(__n_bkt, __pol._M_next_resize);
}
template<typename _Key, typename _Value,
_M_insert_bucket(_Arg&& __v, size_type __n,
typename _Hashtable::_Hash_code_type __code)
{
+ const size_type __saved_next_resize = _M_rehash_policy._M_next_resize;
std::pair<bool, std::size_t> __do_rehash
= _M_rehash_policy._M_need_rehash(_M_bucket_count,
_M_element_count, 1);
__n = this->_M_bucket_index(__k, __code, __do_rehash.second);
}
- // Allocate the new node before doing the rehash so that we don't
- // do a rehash if the allocation throws.
- _Node* __new_node = _M_allocate_node(std::forward<_Arg>(__v));
-
+ _Node* __new_node = 0;
__try
{
+ // Allocate the new node before doing the rehash so that we
+ // don't do a rehash if the allocation throws.
+ __new_node = _M_allocate_node(std::forward<_Arg>(__v));
if (__do_rehash.first)
- _M_rehash(__do_rehash.second);
+ _M_rehash(__do_rehash.second, __saved_next_resize);
__new_node->_M_next = _M_buckets[__n];
this->_M_store_code(__new_node, __code);
}
__catch(...)
{
- _M_deallocate_node(__new_node);
+ if (!__new_node)
+ _M_rehash_policy._M_next_resize = __saved_next_resize;
+ else
+ _M_deallocate_node(__new_node);
__throw_exception_again;
}
}
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
_M_insert(_Arg&& __v, std::false_type)
{
+ const size_type __saved_next_resize = _M_rehash_policy._M_next_resize;
std::pair<bool, std::size_t> __do_rehash
= _M_rehash_policy._M_need_rehash(_M_bucket_count,
_M_element_count, 1);
if (__do_rehash.first)
- _M_rehash(__do_rehash.second);
+ _M_rehash(__do_rehash.second, __saved_next_resize);
const key_type& __k = this->_M_extract(__v);
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
insert(_InputIterator __first, _InputIterator __last)
{
size_type __n_elt = __detail::__distance_fw(__first, __last);
+ const size_type __saved_next_resize = _M_rehash_policy._M_next_resize;
std::pair<bool, std::size_t> __do_rehash
= _M_rehash_policy._M_need_rehash(_M_bucket_count,
_M_element_count, __n_elt);
if (__do_rehash.first)
- _M_rehash(__do_rehash.second);
+ _M_rehash(__do_rehash.second, __saved_next_resize);
for (; __first != __last; ++__first)
this->insert(*__first);
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
rehash(size_type __n)
{
+ const size_type __saved_next_resize = _M_rehash_policy._M_next_resize;
_M_rehash(std::max(_M_rehash_policy._M_next_bkt(__n),
_M_rehash_policy._M_bkt_for_elements(_M_element_count
- + 1)));
+ + 1)),
+ __saved_next_resize);
}
template<typename _Key, typename _Value,
void
_Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
- _M_rehash(size_type __n)
+ _M_rehash(size_type __n, size_type __next_resize)
{
- _Node** __new_array = _M_allocate_buckets(__n);
+ _Node** __new_array = 0;
__try
{
+ __new_array = _M_allocate_buckets(__n);
_M_begin_bucket_index = __n;
for (size_type __i = 0; __i < _M_bucket_count; ++__i)
while (_Node* __p = _M_buckets[__i])
}
__catch(...)
{
- // A failure here means that a hash function threw an exception.
- // We can't restore the previous state without calling the hash
- // function again, so the only sensible recovery is to delete
- // everything.
- _M_deallocate_nodes(__new_array, __n);
- _M_deallocate_buckets(__new_array, __n);
- _M_deallocate_nodes(_M_buckets, _M_bucket_count);
- _M_element_count = 0;
- _M_begin_bucket_index = _M_bucket_count;
+ if (__new_array)
+ {
+ // A failure here means that a hash function threw an exception.
+ // We can't restore the previous state without calling the hash
+ // function again, so the only sensible recovery is to delete
+ // everything.
+ _M_deallocate_nodes(__new_array, __n);
+ _M_deallocate_buckets(__new_array, __n);
+ _M_deallocate_nodes(_M_buckets, _M_bucket_count);
+ _M_element_count = 0;
+ _M_begin_bucket_index = _M_bucket_count;
+ _M_rehash_policy._M_next_resize = 0;
+ }
+ else
+ // A failure here means that buckets allocation failed. We only
+ // have to restore hash policy previous state.
+ _M_rehash_policy._M_next_resize = __next_resize;
__throw_exception_again;
}
}