1 // Internal header for TR1 unordered_set and unordered_map -*- C++ -*-
3 // Copyright (C) 2005 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 * This is a TR1 C++ Library header.
34 // This header file defines std::tr1::hashtable, which is used to
35 // implement std::tr1::unordered_set, std::tr1::unordered_map,
36 // std::tr1::unordered_multiset, and std::tr1::unordered_multimap.
37 // hashtable has many template parameters, partly to accommodate
38 // the differences between those four classes and partly to
39 // accommodate policy choices that go beyond what TR1 calls for.
41 // ??? Arguably this should be Internal::hashtable, not std::tr1::hashtable.
43 // Class template hashtable attempts to encapsulate all reasonable
44 // variation among hash tables that use chaining. It does not handle
48 // M. Austern, "A Proposal to Add Hash Tables to the Standard
49 // Library (revision 4)," WG21 Document N1456=03-0039, 2003.
50 // D. E. Knuth, The Art of Computer Programming, v. 3, Sorting and Searching.
51 // A. Tavori and V. Dreizin, "Generic Associative Containers", 2004.
54 #ifndef GNU_LIBSTDCXX_TR1_HASHTABLE_
55 #define GNU_LIBSTDCXX_TR1_HASHTABLE_
57 #include <utility> // For std::pair
62 #include <tr1/type_traits> // For true_type and false_type
64 //----------------------------------------------------------------------
68 template <bool Flag, typename IfTrue, typename IfFalse> struct IF;
70 template <typename IfTrue, typename IfFalse>
71 struct IF <true, IfTrue, IfFalse> { typedef IfTrue type; };
73 template <typename IfTrue, typename IfFalse>
74 struct IF <false, IfTrue, IfFalse> { typedef IfFalse type; };
76 // Helper function: return distance(first, last) for forward
77 // iterators, or 0 for input iterators.
79 template <class Iterator>
80 inline typename std::iterator_traits<Iterator>::difference_type
81 distance_fw (Iterator first, Iterator last, std::input_iterator_tag)
86 template <class Iterator>
87 inline typename std::iterator_traits<Iterator>::difference_type
88 distance_fw (Iterator first, Iterator last, std::forward_iterator_tag)
90 return std::distance(first, last);
93 template <class Iterator>
94 inline typename std::iterator_traits<Iterator>::difference_type
95 distance_fw (Iterator first, Iterator last)
97 typedef typename std::iterator_traits<Iterator>::iterator_category tag;
98 return distance_fw(first, last, tag());
101 } // namespace Internal
103 //----------------------------------------------------------------------
104 // Auxiliary types used for all instantiations of hashtable: nodes
107 // Nodes, used to wrap elements stored in the hash table. A policy
108 // template parameter of class template hashtable controls whether
109 // nodes also store a hash code. In some cases (e.g. strings) this may
110 // be a performance win.
114 template <typename Value, bool cache_hash_code> struct hash_node;
116 template <typename Value>
117 struct hash_node<Value, true> {
119 std::size_t hash_code;
123 template <typename Value>
124 struct hash_node<Value, false> {
129 // Local iterators, used to iterate within a bucket but not between
132 template <typename Value, bool cache>
133 struct node_iterator_base {
134 node_iterator_base(hash_node<Value, cache>* p) : m_cur(p) { }
135 void incr() { m_cur = m_cur->m_next; }
137 hash_node<Value, cache>* m_cur;
140 template <typename Value, bool cache>
141 inline bool operator== (const node_iterator_base<Value, cache>& x,
142 const node_iterator_base<Value, cache>& y)
144 return x.m_cur == y.m_cur;
147 template <typename Value, bool cache>
148 inline bool operator!= (const node_iterator_base<Value, cache>& x,
149 const node_iterator_base<Value, cache>& y)
151 return x.m_cur != y.m_cur;
154 template <typename Value, bool is_const, bool cache>
155 struct node_iterator : public node_iterator_base<Value, cache> {
156 typedef Value value_type;
157 typedef typename IF<is_const, const Value*, Value*>::type pointer;
158 typedef typename IF<is_const, const Value&, Value&>::type reference;
159 typedef std::ptrdiff_t difference_type;
160 typedef std::forward_iterator_tag iterator_category;
162 explicit node_iterator (hash_node<Value, cache>* p = 0)
163 : node_iterator_base<Value, cache>(p) { }
164 node_iterator (const node_iterator<Value, true, cache>& x)
165 : node_iterator_base<Value, cache>(x.m_cur) { }
167 reference operator*() const { return this->m_cur->m_v; }
168 pointer operator->() const { return &this->m_cur->m_v; }
170 node_iterator& operator++() { this->incr(); return *this; }
171 node_iterator operator++(int)
172 { node_iterator tmp(*this); this->incr(); return tmp; }
175 template <typename Value, bool cache>
176 struct hashtable_iterator_base {
177 hashtable_iterator_base(hash_node<Value, cache>* node,
178 hash_node<Value, cache>** bucket)
179 : m_cur_node (node), m_cur_bucket (bucket)
183 m_cur_node = m_cur_node->m_next;
188 void m_incr_bucket();
190 hash_node<Value, cache>* m_cur_node;
191 hash_node<Value, cache>** m_cur_bucket;
195 // Global iterators, used for arbitrary iteration within a hash
196 // table. Larger and more expensive than local iterators.
198 template <typename Value, bool cache>
199 void hashtable_iterator_base<Value, cache>::m_incr_bucket()
203 // This loop requires the bucket array to have a non-null sentinel
204 while (!*m_cur_bucket)
206 m_cur_node = *m_cur_bucket;
209 template <typename Value, bool cache>
210 inline bool operator== (const hashtable_iterator_base<Value, cache>& x,
211 const hashtable_iterator_base<Value, cache>& y)
213 return x.m_cur_node == y.m_cur_node;
216 template <typename Value, bool cache>
217 inline bool operator!= (const hashtable_iterator_base<Value, cache>& x,
218 const hashtable_iterator_base<Value, cache>& y)
220 return x.m_cur_node != y.m_cur_node;
223 template <typename Value, bool is_const, bool cache>
224 struct hashtable_iterator : public hashtable_iterator_base<Value, cache>
226 typedef Value value_type;
227 typedef typename IF<is_const, const Value*, Value*>::type pointer;
228 typedef typename IF<is_const, const Value&, Value&>::type reference;
229 typedef std::ptrdiff_t difference_type;
230 typedef std::forward_iterator_tag iterator_category;
232 hashtable_iterator (hash_node<Value, cache>* p, hash_node<Value, cache>** b)
233 : hashtable_iterator_base<Value, cache>(p, b) { }
234 hashtable_iterator (hash_node<Value, cache>** b)
235 : hashtable_iterator_base<Value, cache>(*b, b) { }
236 hashtable_iterator (const hashtable_iterator<Value, true, cache>& x)
237 : hashtable_iterator_base<Value, cache>(x.m_cur_node, x.m_cur_bucket) { }
239 reference operator*() const { return this->m_cur_node->m_v; }
240 pointer operator->() const { return &this->m_cur_node->m_v; }
242 hashtable_iterator& operator++() { this->incr(); return *this; }
243 hashtable_iterator operator++(int)
244 { hashtable_iterator tmp(*this); this->incr(); return tmp; }
247 } // namespace Internal
249 // ----------------------------------------------------------------------
250 // Many of class template hashtable's template parameters are policy
251 // classes. These are defaults for the policies.
255 // The two key extraction policies used by the *set and *map variants.
256 template <typename T>
258 T operator()(const T& t) const { return t; }
261 template <typename Pair>
263 typename Pair::first_type operator()(const Pair& p) const { return p.first; }
266 // Default range hashing function: use division to fold a large number
267 // into the range [0, N).
268 struct mod_range_hashing
270 typedef std::size_t first_argument_type;
271 typedef std::size_t second_argument_type;
272 typedef std::size_t result_type;
274 result_type operator() (first_argument_type r, second_argument_type N) const
278 // Default ranged hash function H. In principle it should be a
279 // function object composed from objects of type H1 and H2 such that
280 // h(k, N) = h2(h1(k), N), but that would mean making extra copies of
281 // h1 and h2. So instead we'll just use a tag to tell class template
282 // hashtable to do that composition.
283 struct default_ranged_hash { };
285 // Default value for rehash policy. Bucket size is (usually) the
286 // smallest prime that keeps the load factor small enough.
288 struct prime_rehash_policy
290 prime_rehash_policy (float z = 1.0);
292 float max_load_factor() const;
294 // Return a bucket size no smaller than n.
295 std::size_t next_bkt (std::size_t n) const;
297 // Return a bucket count appropriate for n elements
298 std::size_t bkt_for_elements (std::size_t n) const;
300 // n_bkt is current bucket count, n_elt is current element count,
301 // and n_ins is number of elements to be inserted. Do we need to
302 // increase bucket count? If so, return make_pair(true, n), where n
303 // is the new bucket count. If not, return make_pair(false, 0).
304 std::pair<bool, std::size_t>
305 need_rehash (std::size_t n_bkt, std::size_t n_elt, std::size_t n_ins) const;
307 float m_max_load_factor;
308 float m_growth_factor;
309 mutable std::size_t m_next_resize;
312 // XXX This is a hack. prime_rehash_policy's member functions, and
313 // certainly the list of primes, should be defined in a .cc file.
314 // We're temporarily putting them in a header because we don't have a
315 // place to put TR1 .cc files yet. There's no good reason for any of
316 // prime_rehash_policy's member functions to be inline, and there's
317 // certainly no good reason for X<> to exist at all.
320 template <typename X, typename Y> bool operator()(X x, Y y) { return x < y; }
325 static const int n_primes = 256;
326 static const unsigned long primes[n_primes + 1];
330 const int X<dummy>::n_primes;
333 const unsigned long X<dummy>::primes[n_primes + 1] =
335 2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul,
336 37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul,
337 83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul,
338 157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul,
339 277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul,
340 503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul,
341 953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul,
342 1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul,
343 3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul,
344 5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul,
345 11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul,
346 19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul,
347 33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul,
348 57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul,
349 99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul,
350 159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul,
351 256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul,
352 410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul,
353 658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul,
354 1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul,
355 1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul,
356 2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul,
357 4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul,
358 6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul,
359 11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul,
360 16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul,
361 24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul,
362 36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul,
363 54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul,
364 80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul,
365 118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul,
366 176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul,
367 260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul,
368 386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul,
369 573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul,
370 849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul,
371 1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul,
372 1725587117ul, 1866894511ul, 2019773507ul, 2185171673ul,
373 2364114217ul, 2557710269ul, 2767159799ul, 2993761039ul,
374 3238918481ul, 3504151727ul, 3791104843ul, 4101556399ul,
376 4294967291ul // sentinel so we don't have to test result of lower_bound
379 inline prime_rehash_policy::prime_rehash_policy (float z)
380 : m_max_load_factor(z),
381 m_growth_factor (2.f),
385 inline float prime_rehash_policy::max_load_factor() const
387 return m_max_load_factor;
390 // Return a prime no smaller than n.
391 inline std::size_t prime_rehash_policy::next_bkt (std::size_t n) const
393 const unsigned long* const last = X<0>::primes + X<0>::n_primes;
394 const unsigned long* p = std::lower_bound (X<0>::primes, last, n);
395 m_next_resize = static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
399 // Return the smallest prime p such that alpha p >= n, where alpha
400 // is the load factor.
401 inline std::size_t prime_rehash_policy::bkt_for_elements (std::size_t n) const
403 const unsigned long* const last = X<0>::primes + X<0>::n_primes;
404 const float min_bkts = n / m_max_load_factor;
405 const unsigned long* p = std::lower_bound (X<0>::primes, last, min_bkts, lt());
406 m_next_resize = static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
410 // Finds the smallest prime p such that alpha p > n_elt + n_ins.
411 // If p > n_bkt, return make_pair(true, p); otherwise return
412 // make_pair(false, 0). In principle this isn't very different from
415 // The only tricky part is that we're caching the element count at
416 // which we need to rehash, so we don't have to do a floating-point
417 // multiply for every insertion.
419 inline std::pair<bool, std::size_t>
421 ::need_rehash (std::size_t n_bkt, std::size_t n_elt, std::size_t n_ins) const
423 if (n_elt + n_ins > m_next_resize) {
424 float min_bkts = (float(n_ins) + float(n_elt)) / m_max_load_factor;
425 if (min_bkts > n_bkt) {
426 min_bkts = std::max (min_bkts, m_growth_factor * n_bkt);
427 const unsigned long* const last = X<0>::primes + X<0>::n_primes;
428 const unsigned long* p = std::lower_bound (X<0>::primes, last, min_bkts, lt());
429 m_next_resize = static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
430 return std::make_pair(true, *p);
433 m_next_resize = static_cast<std::size_t>(std::ceil(n_bkt * m_max_load_factor));
434 return std::make_pair(false, 0);
438 return std::make_pair(false, 0);
441 } // namespace Internal
443 //----------------------------------------------------------------------
444 // Base classes for std::tr1::hashtable. We define these base classes
445 // because in some cases we want to do different things depending on
446 // the value of a policy class. In some cases the policy class affects
447 // which member functions and nested typedefs are defined; we handle that
448 // by specializing base class templates. Several of the base class templates
449 // need to access other members of class template hashtable, so we use
450 // the "curiously recurring template pattern" for them.
454 // class template map_base. If the hashtable has a value type of the
455 // form pair<T1, T2> and a key extraction policy that returns the
456 // first part of the pair, the hashtable gets a mapped_type typedef.
457 // If it satisfies those criteria and also has unique keys, then it
458 // also gets an operator[].
460 template <typename K, typename V, typename Ex, bool unique, typename Hashtable>
463 template <typename K, typename Pair, typename Hashtable>
464 struct map_base<K, Pair, extract1st<Pair>, false, Hashtable>
466 typedef typename Pair::second_type mapped_type;
469 template <typename K, typename Pair, typename Hashtable>
470 struct map_base<K, Pair, extract1st<Pair>, true, Hashtable>
472 typedef typename Pair::second_type mapped_type;
473 mapped_type& operator[](const K& k) {
474 Hashtable* h = static_cast<Hashtable*>(this);
475 typename Hashtable::iterator it = h->insert(std::make_pair(k, mapped_type())).first;
480 // class template rehash_base. Give hashtable the max_load_factor
481 // functions iff the rehash policy is prime_rehash_policy.
482 template <typename RehashPolicy, typename Hashtable>
483 struct rehash_base { };
485 template <typename Hashtable>
486 struct rehash_base<prime_rehash_policy, Hashtable>
488 float max_load_factor() const {
489 const Hashtable* This = static_cast<const Hashtable*>(this);
490 return This->rehash_policy()->max_load_factor();
493 void max_load_factor(float z) {
494 Hashtable* This = static_cast<Hashtable*>(this);
495 This->rehash_policy(prime_rehash_policy(z));
499 // Class template hash_code_base. Encapsulates two policy issues that
500 // aren't quite orthogonal.
501 // (1) the difference between using a ranged hash function and using
502 // the combination of a hash function and a range-hashing function.
503 // In the former case we don't have such things as hash codes, so
504 // we have a dummy type as placeholder.
505 // (2) Whether or not we cache hash codes. Caching hash codes is
506 // meaningless if we have a ranged hash function.
507 // We also put the key extraction and equality comparison function
508 // objects here, for convenience.
510 // Primary template: unused except as a hook for specializations.
512 template <typename Key, typename Value,
513 typename ExtractKey, typename Equal,
514 typename H1, typename H2, typename H,
515 bool cache_hash_code>
516 struct hash_code_base;
518 // Specialization: ranged hash function, no caching hash codes. H1
519 // and H2 are provided but ignored. We define a dummy hash code type.
520 template <typename Key, typename Value,
521 typename ExtractKey, typename Equal,
522 typename H1, typename H2, typename H>
523 struct hash_code_base <Key, Value, ExtractKey, Equal, H1, H2, H, false>
526 hash_code_base (const ExtractKey& ex, const Equal& eq,
527 const H1&, const H2&, const H& h)
528 : m_extract(ex), m_eq(eq), m_ranged_hash(h) { }
530 typedef void* hash_code_t;
531 hash_code_t m_hash_code (const Key& k) const { return 0; }
532 std::size_t bucket_index (const Key& k, hash_code_t, std::size_t N) const
533 { return m_ranged_hash (k, N); }
534 std::size_t bucket_index (const hash_node<Value, false>* p, std::size_t N) const {
535 return m_ranged_hash (m_extract (p->m_v), N);
538 bool compare (const Key& k, hash_code_t, hash_node<Value, false>* n) const
539 { return m_eq (k, m_extract(n->m_v)); }
541 void copy_code (hash_node<Value, false>*, const hash_node<Value, false>*) const { }
543 void m_swap(hash_code_base& x) {
546 m_ranged_hash.m_swap(x);
550 ExtractKey m_extract;
556 // No specialization for ranged hash function while caching hash codes.
557 // That combination is meaningless, and trying to do it is an error.
560 // Specialization: ranged hash function, cache hash codes. This
561 // combination is meaningless, so we provide only a declaration
562 // and no definition.
564 template <typename Key, typename Value,
565 typename ExtractKey, typename Equal,
566 typename H1, typename H2, typename H>
567 struct hash_code_base <Key, Value, ExtractKey, Equal, H1, H2, H, true>;
570 // Specialization: hash function and range-hashing function, no
571 // caching of hash codes. H is provided but ignored. Provides
572 // typedef and accessor required by TR1.
574 template <typename Key, typename Value,
575 typename ExtractKey, typename Equal,
576 typename H1, typename H2>
577 struct hash_code_base <Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, false>
580 hasher hash_function() const { return m_h1; }
583 hash_code_base (const ExtractKey& ex, const Equal& eq,
584 const H1& h1, const H2& h2, const default_ranged_hash&)
585 : m_extract(ex), m_eq(eq), m_h1(h1), m_h2(h2) { }
587 typedef std::size_t hash_code_t;
588 hash_code_t m_hash_code (const Key& k) const { return m_h1(k); }
589 std::size_t bucket_index (const Key&, hash_code_t c, std::size_t N) const
590 { return m_h2 (c, N); }
591 std::size_t bucket_index (const hash_node<Value, false>* p, std::size_t N) const {
592 return m_h2 (m_h1 (m_extract (p->m_v)), N);
595 bool compare (const Key& k, hash_code_t, hash_node<Value, false>* n) const
596 { return m_eq (k, m_extract(n->m_v)); }
598 void copy_code (hash_node<Value, false>*, const hash_node<Value, false>*) const { }
600 void m_swap(hash_code_base& x) {
608 ExtractKey m_extract;
614 // Specialization: hash function and range-hashing function,
615 // caching hash codes. H is provided but ignored. Provides
616 // typedef and accessor required by TR1.
617 template <typename Key, typename Value,
618 typename ExtractKey, typename Equal,
619 typename H1, typename H2>
620 struct hash_code_base <Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, true>
623 hasher hash_function() const { return m_h1; }
626 hash_code_base (const ExtractKey& ex, const Equal& eq,
627 const H1& h1, const H2& h2, const default_ranged_hash&)
628 : m_extract(ex), m_eq(eq), m_h1(h1), m_h2(h2) { }
630 typedef std::size_t hash_code_t;
631 hash_code_t m_hash_code (const Key& k) const { return m_h1(k); }
632 std::size_t bucket_index (const Key&, hash_code_t c, std::size_t N) const
633 { return m_h2 (c, N); }
635 std::size_t bucket_index (const hash_node<Value, true>* p, std::size_t N) const {
636 return m_h2 (p->hash_code, N);
639 bool compare (const Key& k, hash_code_t c, hash_node<Value, true>* n) const
640 { return c == n->hash_code && m_eq (k, m_extract(n->m_v)); }
642 void copy_code (hash_node<Value, true>* to, const hash_node<Value, true>* from) const
643 { to->hash_code = from->hash_code; }
645 void m_swap(hash_code_base& x) {
653 ExtractKey m_extract;
659 } // namespace internal
661 namespace std { namespace tr1 {
663 //----------------------------------------------------------------------
664 // Class template hashtable, class definition.
666 // Meaning of class template hashtable's template parameters
668 // Key and Value: arbitrary CopyConstructible types.
670 // Allocator: an allocator type ([lib.allocator.requirements]) whose
671 // value type is Value.
673 // ExtractKey: function object that takes a object of type Value
674 // and returns a value of type Key.
676 // Equal: function object that takes two objects of type k and returns
677 // a bool-like value that is true if the two objects are considered equal.
679 // H1: the hash function. A unary function object with argument type
680 // Key and result type size_t. Return values should be distributed
681 // over the entire range [0, numeric_limits<size_t>:::max()].
683 // H2: the range-hashing function (in the terminology of Tavori and
684 // Dreizin). A binary function object whose argument types and result
685 // type are all size_t. Given arguments r and N, the return value is
686 // in the range [0, N).
688 // H: the ranged hash function (Tavori and Dreizin). A binary function
689 // whose argument types are Key and size_t and whose result type is
690 // size_t. Given arguments k and N, the return value is in the range
691 // [0, N). Default: h(k, N) = h2(h1(k), N). If H is anything other
692 // than the default, H1 and H2 are ignored.
694 // RehashPolicy: Policy class with three members, all of which govern
695 // the bucket count. n_bkt(n) returns a bucket count no smaller
696 // than n. bkt_for_elements(n) returns a bucket count appropriate
697 // for an element count of n. need_rehash(n_bkt, n_elt, n_ins)
698 // determines whether, if the current bucket count is n_bkt and the
699 // current element count is n_elt, we need to increase the bucket
700 // count. If so, returns make_pair(true, n), where n is the new
701 // bucket count. If not, returns make_pair(false, <anything>).
703 // ??? Right now it is hard-wired that the number of buckets never
704 // shrinks. Should we allow RehashPolicy to change that?
706 // cache_hash_code: bool. true if we store the value of the hash
707 // function along with the value. This is a time-space tradeoff.
708 // Storing it may improve lookup speed by reducing the number of times
709 // we need to call the Equal function.
711 // mutable_iterators: bool. true if hashtable::iterator is a mutable
712 // iterator, false if iterator and const_iterator are both const
713 // iterators. This is true for unordered_map and unordered_multimap,
714 // false for unordered_set and unordered_multiset.
716 // unique_keys: bool. true if the return value of hashtable::count(k)
717 // is always at most one, false if it may be an arbitrary number. This
718 // true for unordered_set and unordered_map, false for unordered_multiset
719 // and unordered_multimap.
721 template <typename Key, typename Value,
723 typename ExtractKey, typename Equal,
724 typename H1, typename H2,
725 typename H, typename RehashPolicy,
726 bool cache_hash_code,
727 bool mutable_iterators,
730 : public Internal::rehash_base<RehashPolicy, hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H, RehashPolicy, cache_hash_code, mutable_iterators, unique_keys> >,
731 public Internal::hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, cache_hash_code>,
732 public Internal::map_base<Key, Value, ExtractKey, unique_keys, hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H, RehashPolicy, cache_hash_code, mutable_iterators, unique_keys> >
735 typedef Allocator allocator_type;
736 typedef Value value_type;
737 typedef Key key_type;
738 typedef Equal key_equal;
739 // mapped_type, if present, comes from map_base.
740 // hasher, if present, comes from hash_code_base.
741 typedef typename Allocator::difference_type difference_type;
742 typedef typename Allocator::size_type size_type;
743 typedef typename Allocator::reference reference;
744 typedef typename Allocator::const_reference const_reference;
746 typedef Internal::node_iterator<value_type, !mutable_iterators, cache_hash_code>
748 typedef Internal::node_iterator<value_type, false, cache_hash_code>
749 const_local_iterator;
751 typedef Internal::hashtable_iterator<value_type, !mutable_iterators, cache_hash_code>
753 typedef Internal::hashtable_iterator<value_type, false, cache_hash_code>
757 typedef Internal::hash_node<Value, cache_hash_code> node;
758 typedef typename Allocator::template rebind<node>::other node_allocator_t;
759 typedef typename Allocator::template rebind<node*>::other bucket_allocator_t;
762 node_allocator_t m_node_allocator;
764 size_type m_bucket_count;
765 size_type m_element_count;
766 RehashPolicy m_rehash_policy;
768 node* m_allocate_node (const value_type& v);
769 void m_deallocate_node (node* n);
770 void m_deallocate_nodes (node**, size_type);
772 node** m_allocate_buckets (size_type n);
773 void m_deallocate_buckets (node**, size_type n);
775 public: // Constructor, destructor, assignment, swap
776 hashtable(size_type bucket_hint,
777 const H1&, const H2&, const H&,
778 const Equal&, const ExtractKey&,
779 const allocator_type&);
781 template <typename InIter>
782 hashtable(InIter first, InIter last,
783 size_type bucket_hint,
784 const H1&, const H2&, const H&,
785 const Equal&, const ExtractKey&,
786 const allocator_type&);
788 hashtable(const hashtable&);
789 hashtable& operator=(const hashtable&);
792 void swap(hashtable&);
794 public: // Basic container operations
796 iterator i(m_buckets);
802 const_iterator begin() const {
803 const_iterator i(m_buckets);
810 { return iterator(m_buckets + m_bucket_count); }
811 const_iterator end() const
812 { return const_iterator(m_buckets + m_bucket_count); }
814 size_type size() const { return m_element_count; }
815 bool empty() const { return size() == 0; }
817 allocator_type get_allocator() const { return m_node_allocator; }
818 size_type max_size() const { return m_node_allocator.max_size(); }
820 public: // Bucket operations
821 size_type bucket_count() const
822 { return m_bucket_count; }
823 size_type max_bucket_count() const
824 { return max_size(); }
825 size_type bucket_size (size_type n) const
826 { return std::distance(begin(n), end(n)); }
827 size_type bucket (const key_type& k) const
828 { return this->bucket_index (k, this->m_hash_code, this->m_bucket_count); }
830 local_iterator begin(size_type n)
831 { return local_iterator(m_buckets[n]); }
832 local_iterator end(size_type n)
833 { return local_iterator(0); }
834 const_local_iterator begin(size_type n) const
835 { return const_local_iterator(m_buckets[n]); }
836 const_local_iterator end(size_type n) const
837 { return const_local_iterator(0); }
839 float load_factor() const
840 { return static_cast<float>(size()) / static_cast<float>(bucket_count()); }
841 // max_load_factor, if present, comes from rehash_base.
843 // Generalization of max_load_factor. Extension, not found in TR1. Only
844 // useful if RehashPolicy is something other than the default.
845 const RehashPolicy& rehash_policy() const { return m_rehash_policy; }
846 void rehash_policy (const RehashPolicy&);
849 iterator find(const key_type&);
850 const_iterator find(const key_type& k) const;
851 size_type count(const key_type& k) const;
852 std::pair<iterator, iterator> equal_range(const key_type& k);
853 std::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
855 private: // Insert and erase helper functions
856 // ??? This dispatching is a workaround for the fact that we don't
857 // have partial specialization of member templates; it would be
858 // better to just specialize insert on unique_keys. There may be a
859 // cleaner workaround.
860 typedef typename Internal::IF<unique_keys, std::pair<iterator, bool>, iterator>::type
863 node* find_node (node* p, const key_type& k, typename hashtable::hash_code_t c);
865 std::pair<iterator, bool> insert (const value_type&, std::tr1::true_type);
866 iterator insert (const value_type&, std::tr1::false_type);
868 public: // Insert and erase
869 Insert_Return_Type insert (const value_type& v)
870 { return this->insert (v, std::tr1::integral_constant<bool, unique_keys>()); }
871 Insert_Return_Type insert (const_iterator, const value_type& v)
872 { return this->insert(v); }
874 template <typename InIter> void insert(InIter first, InIter last);
876 void erase(const_iterator);
877 size_type erase(const key_type&);
878 void erase(const_iterator, const_iterator);
882 // Set number of buckets to be apropriate for container of n element.
883 void rehash (size_type n);
886 // Unconditionally change size of bucket array to n.
887 void m_rehash (size_type n);
890 //----------------------------------------------------------------------
891 // Definitions of class template hashtable's out-of-line member functions.
893 template <typename K, typename V,
894 typename A, typename Ex, typename Eq,
895 typename H1, typename H2, typename H, typename RP,
896 bool c, bool m, bool u>
897 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::node*
898 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::m_allocate_node (const value_type& v)
900 node* n = m_node_allocator.allocate(1);
902 get_allocator().construct(&n->m_v, v);
907 m_node_allocator.deallocate(n, 1);
912 template <typename K, typename V,
913 typename A, typename Ex, typename Eq,
914 typename H1, typename H2, typename H, typename RP,
915 bool c, bool m, bool u>
917 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::m_deallocate_node (node* n)
919 get_allocator().destroy(&n->m_v);
920 m_node_allocator.deallocate(n, 1);
923 template <typename K, typename V,
924 typename A, typename Ex, typename Eq,
925 typename H1, typename H2, typename H, typename RP,
926 bool c, bool m, bool u>
928 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
929 ::m_deallocate_nodes (node** array, size_type n)
931 for (size_type i = 0; i < n; ++i) {
936 m_deallocate_node (tmp);
942 template <typename K, typename V,
943 typename A, typename Ex, typename Eq,
944 typename H1, typename H2, typename H, typename RP,
945 bool c, bool m, bool u>
946 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::node**
947 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::m_allocate_buckets (size_type n)
949 bucket_allocator_t alloc(m_node_allocator);
951 // We allocate one extra bucket to hold a sentinel, an arbitrary
952 // non-null pointer. Iterator increment relies on this.
953 node** p = alloc.allocate(n+1);
954 std::fill(p, p+n, (node*) 0);
955 p[n] = reinterpret_cast<node*>(0x1000);
959 template <typename K, typename V,
960 typename A, typename Ex, typename Eq,
961 typename H1, typename H2, typename H, typename RP,
962 bool c, bool m, bool u>
964 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
965 ::m_deallocate_buckets (node** p, size_type n)
967 bucket_allocator_t alloc(m_node_allocator);
968 alloc.deallocate(p, n+1);
971 template <typename K, typename V,
972 typename A, typename Ex, typename Eq,
973 typename H1, typename H2, typename H, typename RP,
974 bool c, bool m, bool u>
975 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
976 ::hashtable(size_type bucket_hint,
977 const H1& h1, const H2& h2, const H& h,
978 const Eq& eq, const Ex& exk,
979 const allocator_type& a)
980 : Internal::rehash_base<RP,hashtable> (),
981 Internal::hash_code_base<K,V,Ex,Eq,H1,H2,H,c> (exk, eq, h1, h2, h),
982 Internal::map_base<K,V,Ex,u,hashtable> (),
988 m_bucket_count = m_rehash_policy.next_bkt(bucket_hint);
989 m_buckets = m_allocate_buckets (m_bucket_count);
992 template <typename K, typename V,
993 typename A, typename Ex, typename Eq,
994 typename H1, typename H2, typename H, typename RP,
995 bool c, bool m, bool u>
996 template <typename InIter>
997 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
998 ::hashtable(InIter f, InIter l,
999 size_type bucket_hint,
1000 const H1& h1, const H2& h2, const H& h,
1001 const Eq& eq, const Ex& exk,
1002 const allocator_type& a)
1003 : Internal::rehash_base<RP,hashtable> (),
1004 Internal::hash_code_base<K,V,Ex,Eq,H1,H2,H,c> (exk, eq, h1, h2, h),
1005 Internal::map_base<K,V,Ex,u,hashtable> (),
1006 m_node_allocator(a),
1008 m_element_count (0),
1011 m_bucket_count = std::max(m_rehash_policy.next_bkt(bucket_hint),
1012 m_rehash_policy.bkt_for_elements(Internal::distance_fw(f, l)));
1013 m_buckets = m_allocate_buckets (m_bucket_count);
1020 m_deallocate_buckets (m_buckets, m_bucket_count);
1025 template <typename K, typename V,
1026 typename A, typename Ex, typename Eq,
1027 typename H1, typename H2, typename H, typename RP,
1028 bool c, bool m, bool u>
1029 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
1030 ::hashtable(const hashtable& ht)
1031 : Internal::rehash_base<RP,hashtable> (ht),
1032 Internal::hash_code_base<K,V,Ex,Eq,H1,H2,H,c> (ht),
1033 Internal::map_base<K,V,Ex,u,hashtable> (ht),
1034 m_node_allocator(ht.get_allocator()),
1035 m_bucket_count (ht.m_bucket_count),
1036 m_element_count (ht.m_element_count),
1037 m_rehash_policy (ht.m_rehash_policy)
1039 m_buckets = m_allocate_buckets (m_bucket_count);
1041 for (size_t i = 0; i < ht.m_bucket_count; ++i) {
1042 node* n = ht.m_buckets[i];
1043 node** tail = m_buckets + i;
1045 *tail = m_allocate_node (n);
1046 (*tail).copy_code_from (n);
1047 tail = &((*tail)->m_next);
1054 m_deallocate_buckets (m_buckets, m_bucket_count);
1059 template <typename K, typename V,
1060 typename A, typename Ex, typename Eq,
1061 typename H1, typename H2, typename H, typename RP,
1062 bool c, bool m, bool u>
1063 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>&
1064 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::operator= (const hashtable& ht)
1071 template <typename K, typename V,
1072 typename A, typename Ex, typename Eq,
1073 typename H1, typename H2, typename H, typename RP,
1074 bool c, bool m, bool u>
1075 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::~hashtable()
1078 m_deallocate_buckets(m_buckets, m_bucket_count);
1081 template <typename K, typename V,
1082 typename A, typename Ex, typename Eq,
1083 typename H1, typename H2, typename H, typename RP,
1084 bool c, bool m, bool u>
1085 void hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::swap (hashtable& x)
1087 // The only base class with member variables is hash_code_base. We
1088 // define hash_code_base::m_swap because different specializations
1089 // have different members.
1090 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>::m_swap(x);
1092 // open LWG issue 431
1093 // std::swap(m_node_allocator, x.m_node_allocator);
1094 std::swap (m_rehash_policy, x.m_rehash_policy);
1095 std::swap (m_buckets, x.m_buckets);
1096 std::swap (m_bucket_count, x.m_bucket_count);
1097 std::swap (m_element_count, x.m_element_count);
1100 template <typename K, typename V,
1101 typename A, typename Ex, typename Eq,
1102 typename H1, typename H2, typename H, typename RP,
1103 bool c, bool m, bool u>
1105 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::rehash_policy (const RP& pol)
1107 m_rehash_policy = pol;
1108 size_type n_bkt = pol.bkt_for_elements(m_element_count);
1109 if (n_bkt > m_bucket_count)
1113 template <typename K, typename V,
1114 typename A, typename Ex, typename Eq,
1115 typename H1, typename H2, typename H, typename RP,
1116 bool c, bool m, bool u>
1117 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::iterator
1118 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::find (const key_type& k)
1120 typename hashtable::hash_code_t code = this->m_hash_code (k);
1121 std::size_t n = this->bucket_index (k, code, this->bucket_count());
1122 node* p = find_node (m_buckets[n], k, code);
1123 return p ? iterator(p, m_buckets + n) : this->end();
1126 template <typename K, typename V,
1127 typename A, typename Ex, typename Eq,
1128 typename H1, typename H2, typename H, typename RP,
1129 bool c, bool m, bool u>
1130 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::const_iterator
1131 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::find (const key_type& k) const
1133 typename hashtable::hash_code_t code = this->m_hash_code (k);
1134 std::size_t n = this->bucket_index (k, code, this->bucket_count());
1135 node* p = find_node (m_buckets[n], k, code);
1136 return p ? const_iterator(p, m_buckets + n) : this->end();
1139 template <typename K, typename V,
1140 typename A, typename Ex, typename Eq,
1141 typename H1, typename H2, typename H, typename RP,
1142 bool c, bool m, bool u>
1143 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::size_type
1144 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::count (const key_type& k) const
1146 typename hashtable::hash_code_t code = this->m_hash_code (k);
1147 std::size_t n = this->bucket_index (k, code, this->bucket_count());
1149 for (node* p = m_buckets[n]; p ; p = p->m_next)
1150 if (this->compare (k, code, p))
1155 template <typename K, typename V,
1156 typename A, typename Ex, typename Eq,
1157 typename H1, typename H2, typename H, typename RP,
1158 bool c, bool m, bool u>
1159 std::pair<typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::iterator,
1160 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::iterator>
1161 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::equal_range (const key_type& k)
1163 typename hashtable::hash_code_t code = this->m_hash_code (k);
1164 std::size_t n = this->bucket_index (k, code, this->bucket_count());
1165 node** head = m_buckets + n;
1166 node* p = find_node (*head, k, code);
1169 node* p1 = p->m_next;
1170 for (; p1 ; p1 = p1->m_next)
1171 if (!this->compare (k, code, p1))
1173 iterator first(p, head);
1174 iterator last(p1, head);
1176 last.m_incr_bucket();
1177 return std::make_pair(first, last);
1180 return std::make_pair (this->end(), this->end());
1183 template <typename K, typename V,
1184 typename A, typename Ex, typename Eq,
1185 typename H1, typename H2, typename H, typename RP,
1186 bool c, bool m, bool u>
1187 std::pair<typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::const_iterator,
1188 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::const_iterator>
1189 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::equal_range (const key_type& k) const
1191 typename hashtable::hash_code_t code = this->m_hash_code (k);
1192 std::size_t n = this->bucket_index (k, code, this->bucket_count());
1193 node** head = m_buckets + n;
1194 node* p = find_node (*head, k, code);
1197 node* p1 = p->m_next;
1198 for (; p1 ; p1 = p1->m_next)
1199 if (!this->compare (k, code, p1))
1201 const_iterator first(p, head);
1202 const_iterator last(p1, head);
1204 last.m_incr_bucket();
1205 return std::make_pair(first, last);
1208 return std::make_pair (this->end(), this->end());
1211 // Find the node whose key compares equal to k, beginning the search
1212 // at p (usually the head of a bucket). Return nil if no node is found.
1213 template <typename K, typename V,
1214 typename A, typename Ex, typename Eq,
1215 typename H1, typename H2, typename H, typename RP,
1216 bool c, bool m, bool u>
1217 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::node*
1218 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
1219 ::find_node (node* p, const key_type& k, typename hashtable::hash_code_t code)
1221 for ( ; p ; p = p->m_next)
1222 if (this->compare (k, code, p))
1227 // Insert v if no element with its key is already present.
1228 template <typename K, typename V,
1229 typename A, typename Ex, typename Eq,
1230 typename H1, typename H2, typename H, typename RP,
1231 bool c, bool m, bool u>
1232 std::pair<typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::iterator, bool>
1233 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
1234 ::insert (const value_type& v, std::tr1::true_type)
1236 const key_type& k = this->m_extract(v);
1237 typename hashtable::hash_code_t code = this->m_hash_code (k);
1238 size_type n = this->bucket_index (k, code, m_bucket_count);
1240 if (node* p = find_node (m_buckets[n], k, code))
1241 return std::make_pair(iterator(p, m_buckets + n), false);
1243 std::pair<bool, size_t> do_rehash
1244 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, 1);
1246 // Allocate the new node before doing the rehash so that we don't
1247 // do a rehash if the allocation throws.
1248 node* new_node = m_allocate_node (v);
1251 if (do_rehash.first) {
1252 n = this->bucket_index (k, code, do_rehash.second);
1253 m_rehash(do_rehash.second);
1256 new_node->m_next = m_buckets[n];
1257 m_buckets[n] = new_node;
1259 return std::make_pair(iterator (new_node, m_buckets + n), true);
1262 m_deallocate_node (new_node);
1267 // Insert v unconditionally.
1268 template <typename K, typename V,
1269 typename A, typename Ex, typename Eq,
1270 typename H1, typename H2, typename H, typename RP,
1271 bool c, bool m, bool u>
1272 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::iterator
1273 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
1274 ::insert (const value_type& v, std::tr1::false_type)
1276 std::pair<bool, std::size_t> do_rehash
1277 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, 1);
1278 if (do_rehash.first)
1279 m_rehash(do_rehash.second);
1281 const key_type& k = this->m_extract(v);
1282 typename hashtable::hash_code_t code = this->m_hash_code (k);
1283 size_type n = this->bucket_index (k, code, m_bucket_count);
1285 node* new_node = m_allocate_node (v);
1286 node* prev = find_node (m_buckets[n], k, code);
1288 new_node->m_next = prev->m_next;
1289 prev->m_next = new_node;
1292 new_node->m_next = m_buckets[n];
1293 m_buckets[n] = new_node;
1297 return iterator (new_node, m_buckets + n);
1300 template <typename K, typename V,
1301 typename A, typename Ex, typename Eq,
1302 typename H1, typename H2, typename H, typename RP,
1303 bool c, bool m, bool u>
1304 template <typename InIter>
1306 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::insert(InIter first, InIter last)
1308 size_type n_elt = Internal::distance_fw (first, last);
1309 std::pair<bool, std::size_t> do_rehash
1310 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, n_elt);
1311 if (do_rehash.first)
1312 m_rehash(do_rehash.second);
1314 for (; first != last; ++first)
1315 this->insert (*first);
1318 // XXX We're following the TR in giving this a return type of void,
1319 // but that ought to change. The return type should be const_iterator,
1320 // and it should return the iterator following the one we've erased.
1321 // That would simplify range erase.
1322 template <typename K, typename V,
1323 typename A, typename Ex, typename Eq,
1324 typename H1, typename H2, typename H, typename RP,
1325 bool c, bool m, bool u>
1326 void hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::erase (const_iterator i)
1328 node* p = i.m_cur_node;
1329 node* cur = *i.m_cur_bucket;
1331 *i.m_cur_bucket = cur->m_next;
1333 node* next = cur->m_next;
1338 cur->m_next = next->m_next;
1341 m_deallocate_node (p);
1345 template <typename K, typename V,
1346 typename A, typename Ex, typename Eq,
1347 typename H1, typename H2, typename H, typename RP,
1348 bool c, bool m, bool u>
1349 typename hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::size_type
1350 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::erase(const key_type& k)
1352 typename hashtable::hash_code_t code = this->m_hash_code (k);
1353 size_type n = this->bucket_index (k, code, m_bucket_count);
1355 node** slot = m_buckets + n;
1356 while (*slot && ! this->compare (k, code, *slot))
1357 slot = &((*slot)->m_next);
1359 while (*slot && this->compare (k, code, *slot)) {
1362 m_deallocate_node (n);
1367 // ??? This could be optimized by taking advantage of the bucket
1368 // structure, but it's not clear that it's worth doing. It probably
1369 // wouldn't even be an optimization unless the load factor is large.
1370 template <typename K, typename V,
1371 typename A, typename Ex, typename Eq,
1372 typename H1, typename H2, typename H, typename RP,
1373 bool c, bool m, bool u>
1374 void hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>
1375 ::erase(const_iterator first, const_iterator last)
1377 while (first != last) {
1378 const_iterator next = first;
1385 template <typename K, typename V,
1386 typename A, typename Ex, typename Eq,
1387 typename H1, typename H2, typename H, typename RP,
1388 bool c, bool m, bool u>
1389 void hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::clear()
1391 m_deallocate_nodes (m_buckets, m_bucket_count);
1392 m_element_count = 0;
1395 template <typename K, typename V,
1396 typename A, typename Ex, typename Eq,
1397 typename H1, typename H2, typename H, typename RP,
1398 bool c, bool m, bool u>
1400 hashtable<K,V,A,Ex,Eq,H1,H2,H,RP,c,m,u>::m_rehash (size_type N)
1402 node** new_array = m_allocate_buckets (N);
1404 for (size_type i = 0; i < m_bucket_count; ++i)
1405 while (node* p = m_buckets[i]) {
1406 size_type new_index = this->bucket_index (p, N);
1407 m_buckets[i] = p->m_next;
1408 p->m_next = new_array[new_index];
1409 new_array[new_index] = p;
1411 m_deallocate_buckets (m_buckets, m_bucket_count);
1413 m_buckets = new_array;
1416 // A failure here means that a hash function threw an exception.
1417 // We can't restore the previous state without calling the hash
1418 // function again, so the only sensible recovery is to delete
1420 m_deallocate_nodes (new_array, N);
1421 m_deallocate_buckets (new_array, N);
1422 m_deallocate_nodes (m_buckets, m_bucket_count);
1423 m_element_count = 0;
1428 } } // Namespace std::tr1
1430 #endif /* GNU_LIBSTDCXX_TR1_HASHTABLE_ */