1 // Internal header for TR1 unordered_set and unordered_map -*- C++ -*-
3 // Copyright (C) 2005, 2006 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 * This is a TR1 C++ Library header.
34 // This header file defines std::tr1::hashtable, which is used to
35 // implement std::tr1::unordered_set, std::tr1::unordered_map,
36 // std::tr1::unordered_multiset, and std::tr1::unordered_multimap.
37 // hashtable has many template parameters, partly to accommodate
38 // the differences between those four classes and partly to
39 // accommodate policy choices that go beyond what TR1 calls for.
41 // ??? Arguably this should be Internal::hashtable, not std::tr1::hashtable.
43 // Class template hashtable attempts to encapsulate all reasonable
44 // variation among hash tables that use chaining. It does not handle
48 // M. Austern, "A Proposal to Add Hash Tables to the Standard
49 // Library (revision 4)," WG21 Document N1456=03-0039, 2003.
50 // D. E. Knuth, The Art of Computer Programming, v. 3, Sorting and Searching.
51 // A. Tavori and V. Dreizin, "Generic Associative Containers", 2004.
54 #ifndef GNU_LIBSTDCXX_TR1_HASHTABLE_
55 #define GNU_LIBSTDCXX_TR1_HASHTABLE_
57 #include <utility> // For std::pair
63 #include <bits/functexcept.h>
64 #include <tr1/type_traits> // For true_type and false_type
66 //----------------------------------------------------------------------
71 template<bool Flag, typename IfTrue, typename IfFalse>
74 template<typename IfTrue, typename IfFalse>
75 struct IF<true, IfTrue, IfFalse>
76 { typedef IfTrue type; };
78 template <typename IfTrue, typename IfFalse>
79 struct IF<false, IfTrue, IfFalse>
80 { typedef IfFalse type; };
82 // Helper function: return distance(first, last) for forward
83 // iterators, or 0 for input iterators.
84 template<class Iterator>
85 inline typename std::iterator_traits<Iterator>::difference_type
86 distance_fw(Iterator first, Iterator last, std::input_iterator_tag)
89 template<class Iterator>
90 inline typename std::iterator_traits<Iterator>::difference_type
91 distance_fw(Iterator first, Iterator last, std::forward_iterator_tag)
92 { return std::distance(first, last); }
94 template<class Iterator>
95 inline typename std::iterator_traits<Iterator>::difference_type
96 distance_fw(Iterator first, Iterator last)
98 typedef typename std::iterator_traits<Iterator>::iterator_category tag;
99 return distance_fw(first, last, tag());
102 } // namespace Internal
105 //----------------------------------------------------------------------
106 // Auxiliary types used for all instantiations of hashtable: nodes
109 // Nodes, used to wrap elements stored in the hash table. A policy
110 // template parameter of class template hashtable controls whether
111 // nodes also store a hash code. In some cases (e.g. strings) this may
112 // be a performance win.
116 template<typename Value, bool cache_hash_code>
119 template<typename Value>
120 struct hash_node<Value, true>
123 std::size_t hash_code;
127 template<typename Value>
128 struct hash_node<Value, false>
134 // Local iterators, used to iterate within a bucket but not between
137 template<typename Value, bool cache>
138 struct node_iterator_base
140 node_iterator_base(hash_node<Value, cache>* p)
145 { m_cur = m_cur->m_next; }
147 hash_node<Value, cache>* m_cur;
150 template<typename Value, bool cache>
152 operator==(const node_iterator_base<Value, cache>& x,
153 const node_iterator_base<Value, cache>& y)
154 { return x.m_cur == y.m_cur; }
156 template<typename Value, bool cache>
158 operator!=(const node_iterator_base<Value, cache>& x,
159 const node_iterator_base<Value, cache>& y)
160 { return x.m_cur != y.m_cur; }
162 template<typename Value, bool constant_iterators, bool cache>
164 : public node_iterator_base<Value, cache>
166 typedef Value value_type;
167 typedef typename IF<constant_iterators, const Value*, Value*>::type
169 typedef typename IF<constant_iterators, const Value&, Value&>::type
171 typedef std::ptrdiff_t difference_type;
172 typedef std::forward_iterator_tag iterator_category;
175 : node_iterator_base<Value, cache>(0) { }
178 node_iterator(hash_node<Value, cache>* p)
179 : node_iterator_base<Value, cache>(p) { }
183 { return this->m_cur->m_v; }
187 { return &this->m_cur->m_v; }
199 node_iterator tmp(*this);
205 template<typename Value, bool constant_iterators, bool cache>
206 struct node_const_iterator
207 : public node_iterator_base<Value, cache>
209 typedef Value value_type;
210 typedef const Value* pointer;
211 typedef const Value& reference;
212 typedef std::ptrdiff_t difference_type;
213 typedef std::forward_iterator_tag iterator_category;
215 node_const_iterator()
216 : node_iterator_base<Value, cache>(0) { }
219 node_const_iterator(hash_node<Value, cache>* p)
220 : node_iterator_base<Value, cache>(p) { }
222 node_const_iterator(const node_iterator<Value, constant_iterators,
224 : node_iterator_base<Value, cache>(x.m_cur) { }
228 { return this->m_cur->m_v; }
232 { return &this->m_cur->m_v; }
244 node_const_iterator tmp(*this);
250 template<typename Value, bool cache>
251 struct hashtable_iterator_base
253 hashtable_iterator_base(hash_node<Value, cache>* node,
254 hash_node<Value, cache>** bucket)
255 : m_cur_node(node), m_cur_bucket(bucket) { }
260 m_cur_node = m_cur_node->m_next;
268 hash_node<Value, cache>* m_cur_node;
269 hash_node<Value, cache>** m_cur_bucket;
272 // Global iterators, used for arbitrary iteration within a hash
273 // table. Larger and more expensive than local iterators.
274 template<typename Value, bool cache>
276 hashtable_iterator_base<Value, cache>::
281 // This loop requires the bucket array to have a non-null sentinel.
282 while (!*m_cur_bucket)
284 m_cur_node = *m_cur_bucket;
287 template<typename Value, bool cache>
289 operator==(const hashtable_iterator_base<Value, cache>& x,
290 const hashtable_iterator_base<Value, cache>& y)
291 { return x.m_cur_node == y.m_cur_node; }
293 template<typename Value, bool cache>
295 operator!=(const hashtable_iterator_base<Value, cache>& x,
296 const hashtable_iterator_base<Value, cache>& y)
297 { return x.m_cur_node != y.m_cur_node; }
299 template<typename Value, bool constant_iterators, bool cache>
300 struct hashtable_iterator
301 : public hashtable_iterator_base<Value, cache>
303 typedef Value value_type;
304 typedef typename IF<constant_iterators, const Value*, Value*>::type
306 typedef typename IF<constant_iterators, const Value&, Value&>::type
308 typedef std::ptrdiff_t difference_type;
309 typedef std::forward_iterator_tag iterator_category;
312 : hashtable_iterator_base<Value, cache>(0, 0) { }
314 hashtable_iterator(hash_node<Value, cache>* p,
315 hash_node<Value, cache>** b)
316 : hashtable_iterator_base<Value, cache>(p, b) { }
319 hashtable_iterator(hash_node<Value, cache>** b)
320 : hashtable_iterator_base<Value, cache>(*b, b) { }
324 { return this->m_cur_node->m_v; }
328 { return &this->m_cur_node->m_v; }
340 hashtable_iterator tmp(*this);
346 template<typename Value, bool constant_iterators, bool cache>
347 struct hashtable_const_iterator
348 : public hashtable_iterator_base<Value, cache>
350 typedef Value value_type;
351 typedef const Value* pointer;
352 typedef const Value& reference;
353 typedef std::ptrdiff_t difference_type;
354 typedef std::forward_iterator_tag iterator_category;
356 hashtable_const_iterator()
357 : hashtable_iterator_base<Value, cache>(0, 0) { }
359 hashtable_const_iterator(hash_node<Value, cache>* p,
360 hash_node<Value, cache>** b)
361 : hashtable_iterator_base<Value, cache>(p, b) { }
364 hashtable_const_iterator(hash_node<Value, cache>** b)
365 : hashtable_iterator_base<Value, cache>(*b, b) { }
367 hashtable_const_iterator(const hashtable_iterator<Value,
368 constant_iterators, cache>& x)
369 : hashtable_iterator_base<Value, cache>(x.m_cur_node, x.m_cur_bucket) { }
373 { return this->m_cur_node->m_v; }
377 { return &this->m_cur_node->m_v; }
379 hashtable_const_iterator&
386 hashtable_const_iterator
389 hashtable_const_iterator tmp(*this);
394 } // namespace Internal
397 // ----------------------------------------------------------------------
398 // Many of class template hashtable's template parameters are policy
399 // classes. These are defaults for the policies.
403 // The two key extraction policies used by the *set and *map variants.
408 operator()(const T& t) const
412 template<typename Pair>
415 const typename Pair::first_type&
416 operator()(const Pair& p) const
420 // Default range hashing function: use division to fold a large number
421 // into the range [0, N).
422 struct mod_range_hashing
424 typedef std::size_t first_argument_type;
425 typedef std::size_t second_argument_type;
426 typedef std::size_t result_type;
429 operator()(first_argument_type r, second_argument_type N) const
433 // Default ranged hash function H. In principle it should be a
434 // function object composed from objects of type H1 and H2 such that
435 // h(k, N) = h2(h1(k), N), but that would mean making extra copies of
436 // h1 and h2. So instead we'll just use a tag to tell class template
437 // hashtable to do that composition.
438 struct default_ranged_hash { };
440 // Default value for rehash policy. Bucket size is (usually) the
441 // smallest prime that keeps the load factor small enough.
442 struct prime_rehash_policy
444 prime_rehash_policy(float z = 1.0);
447 max_load_factor() const;
449 // Return a bucket size no smaller than n.
451 next_bkt(std::size_t n) const;
453 // Return a bucket count appropriate for n elements
455 bkt_for_elements(std::size_t n) const;
457 // n_bkt is current bucket count, n_elt is current element count,
458 // and n_ins is number of elements to be inserted. Do we need to
459 // increase bucket count? If so, return make_pair(true, n), where n
460 // is the new bucket count. If not, return make_pair(false, 0).
461 std::pair<bool, std::size_t>
462 need_rehash(std::size_t n_bkt, std::size_t n_elt, std::size_t n_ins) const;
464 float m_max_load_factor;
465 float m_growth_factor;
466 mutable std::size_t m_next_resize;
469 // XXX This is a hack. prime_rehash_policy's member functions, and
470 // certainly the list of primes, should be defined in a .cc file.
471 // We're temporarily putting them in a header because we don't have a
472 // place to put TR1 .cc files yet. There's no good reason for any of
473 // prime_rehash_policy's member functions to be inline, and there's
474 // certainly no good reason for X<> to exist at all.
478 template<typename X, typename Y>
484 template<int ulongsize = sizeof(unsigned long)>
487 static const int n_primes = ulongsize != 8 ? 256 : 256 + 48;
488 static const unsigned long primes[256 + 48 + 1];
491 template<int ulongsize>
492 const int X<ulongsize>::n_primes;
494 template<int ulongsize>
495 const unsigned long X<ulongsize>::primes[256 + 48 + 1] =
497 2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul,
498 37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul,
499 83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul,
500 157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul,
501 277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul,
502 503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul,
503 953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul,
504 1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul,
505 3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul,
506 5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul,
507 11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul,
508 19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul,
509 33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul,
510 57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul,
511 99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul,
512 159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul,
513 256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul,
514 410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul,
515 658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul,
516 1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul,
517 1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul,
518 2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul,
519 4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul,
520 6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul,
521 11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul,
522 16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul,
523 24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul,
524 36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul,
525 54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul,
526 80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul,
527 118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul,
528 176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul,
529 260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul,
530 386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul,
531 573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul,
532 849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul,
533 1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul,
534 1725587117ul, 1866894511ul, 2019773507ul, 2185171673ul,
535 2364114217ul, 2557710269ul, 2767159799ul, 2993761039ul,
536 3238918481ul, 3504151727ul, 3791104843ul, 4101556399ul,
538 // Sentinel, so we don't have to test the result of lower_bound,
539 // or, on 64-bit machines, rest of the table.
540 ulongsize != 8 ? 4294967291ul : (unsigned long)6442450933ull,
541 (unsigned long)8589934583ull,
542 (unsigned long)12884901857ull, (unsigned long)17179869143ull,
543 (unsigned long)25769803693ull, (unsigned long)34359738337ull,
544 (unsigned long)51539607367ull, (unsigned long)68719476731ull,
545 (unsigned long)103079215087ull, (unsigned long)137438953447ull,
546 (unsigned long)206158430123ull, (unsigned long)274877906899ull,
547 (unsigned long)412316860387ull, (unsigned long)549755813881ull,
548 (unsigned long)824633720731ull, (unsigned long)1099511627689ull,
549 (unsigned long)1649267441579ull, (unsigned long)2199023255531ull,
550 (unsigned long)3298534883309ull, (unsigned long)4398046511093ull,
551 (unsigned long)6597069766607ull, (unsigned long)8796093022151ull,
552 (unsigned long)13194139533241ull, (unsigned long)17592186044399ull,
553 (unsigned long)26388279066581ull, (unsigned long)35184372088777ull,
554 (unsigned long)52776558133177ull, (unsigned long)70368744177643ull,
555 (unsigned long)105553116266399ull, (unsigned long)140737488355213ull,
556 (unsigned long)211106232532861ull, (unsigned long)281474976710597ull,
557 (unsigned long)562949953421231ull, (unsigned long)1125899906842597ull,
558 (unsigned long)2251799813685119ull, (unsigned long)4503599627370449ull,
559 (unsigned long)9007199254740881ull, (unsigned long)18014398509481951ull,
560 (unsigned long)36028797018963913ull, (unsigned long)72057594037927931ull,
561 (unsigned long)144115188075855859ull,
562 (unsigned long)288230376151711717ull,
563 (unsigned long)576460752303423433ull,
564 (unsigned long)1152921504606846883ull,
565 (unsigned long)2305843009213693951ull,
566 (unsigned long)4611686018427387847ull,
567 (unsigned long)9223372036854775783ull,
568 (unsigned long)18446744073709551557ull,
569 (unsigned long)18446744073709551557ull
573 prime_rehash_policy::
574 prime_rehash_policy(float z)
575 : m_max_load_factor(z), m_growth_factor(2.f), m_next_resize(0)
579 prime_rehash_policy::
580 max_load_factor() const
581 { return m_max_load_factor; }
583 // Return a prime no smaller than n.
585 prime_rehash_policy::
586 next_bkt(std::size_t n) const
588 const unsigned long* const last = X<>::primes + X<>::n_primes;
589 const unsigned long* p = std::lower_bound(X<>::primes, last, n);
590 m_next_resize = static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
594 // Return the smallest prime p such that alpha p >= n, where alpha
595 // is the load factor.
597 prime_rehash_policy::
598 bkt_for_elements(std::size_t n) const
600 const unsigned long* const last = X<>::primes + X<>::n_primes;
601 const float min_bkts = n / m_max_load_factor;
602 const unsigned long* p = std::lower_bound(X<>::primes, last,
604 m_next_resize = static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
608 // Finds the smallest prime p such that alpha p > n_elt + n_ins.
609 // If p > n_bkt, return make_pair(true, p); otherwise return
610 // make_pair(false, 0). In principle this isn't very different from
613 // The only tricky part is that we're caching the element count at
614 // which we need to rehash, so we don't have to do a floating-point
615 // multiply for every insertion.
617 inline std::pair<bool, std::size_t>
618 prime_rehash_policy::
619 need_rehash(std::size_t n_bkt, std::size_t n_elt, std::size_t n_ins) const
621 if (n_elt + n_ins > m_next_resize)
623 float min_bkts = (float(n_ins) + float(n_elt)) / m_max_load_factor;
624 if (min_bkts > n_bkt)
626 min_bkts = std::max(min_bkts, m_growth_factor * n_bkt);
627 const unsigned long* const last = X<>::primes + X<>::n_primes;
628 const unsigned long* p = std::lower_bound(X<>::primes, last,
631 static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
632 return std::make_pair(true, *p);
637 static_cast<std::size_t>(std::ceil(n_bkt * m_max_load_factor));
638 return std::make_pair(false, 0);
642 return std::make_pair(false, 0);
645 } // namespace Internal
648 //----------------------------------------------------------------------
649 // Base classes for std::tr1::hashtable. We define these base classes
650 // because in some cases we want to do different things depending on
651 // the value of a policy class. In some cases the policy class affects
652 // which member functions and nested typedefs are defined; we handle that
653 // by specializing base class templates. Several of the base class templates
654 // need to access other members of class template hashtable, so we use
655 // the "curiously recurring template pattern" for them.
659 // class template map_base. If the hashtable has a value type of the
660 // form pair<T1, T2> and a key extraction policy that returns the
661 // first part of the pair, the hashtable gets a mapped_type typedef.
662 // If it satisfies those criteria and also has unique keys, then it
663 // also gets an operator[].
665 template<typename K, typename V, typename Ex, bool unique, typename Hashtable>
668 template<typename K, typename Pair, typename Hashtable>
669 struct map_base<K, Pair, extract1st<Pair>, false, Hashtable>
671 typedef typename Pair::second_type mapped_type;
674 template<typename K, typename Pair, typename Hashtable>
675 struct map_base<K, Pair, extract1st<Pair>, true, Hashtable>
677 typedef typename Pair::second_type mapped_type;
680 operator[](const K& k)
682 Hashtable* h = static_cast<Hashtable*>(this);
683 typename Hashtable::hash_code_t code = h->m_hash_code(k);
684 typename Hashtable::iterator it = h->m_find(k, code);
686 it = h->m_insert_bucket(std::make_pair(k, mapped_type()),
687 it.m_cur_bucket - h->m_buckets, code);
692 // class template rehash_base. Give hashtable the max_load_factor
693 // functions iff the rehash policy is prime_rehash_policy.
694 template<typename RehashPolicy, typename Hashtable>
695 struct rehash_base { };
697 template<typename Hashtable>
698 struct rehash_base<prime_rehash_policy, Hashtable>
701 max_load_factor() const
703 const Hashtable* This = static_cast<const Hashtable*>(this);
704 return This->rehash_policy().max_load_factor();
708 max_load_factor(float z)
710 Hashtable* This = static_cast<Hashtable*>(this);
711 This->rehash_policy(prime_rehash_policy(z));
715 // Class template hash_code_base. Encapsulates two policy issues that
716 // aren't quite orthogonal.
717 // (1) the difference between using a ranged hash function and using
718 // the combination of a hash function and a range-hashing function.
719 // In the former case we don't have such things as hash codes, so
720 // we have a dummy type as placeholder.
721 // (2) Whether or not we cache hash codes. Caching hash codes is
722 // meaningless if we have a ranged hash function.
723 // We also put the key extraction and equality comparison function
724 // objects here, for convenience.
726 // Primary template: unused except as a hook for specializations.
728 template<typename Key, typename Value,
729 typename ExtractKey, typename Equal,
730 typename H1, typename H2, typename H,
731 bool cache_hash_code>
732 struct hash_code_base;
734 // Specialization: ranged hash function, no caching hash codes. H1
735 // and H2 are provided but ignored. We define a dummy hash code type.
736 template<typename Key, typename Value,
737 typename ExtractKey, typename Equal,
738 typename H1, typename H2, typename H>
739 struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, false>
742 hash_code_base(const ExtractKey& ex, const Equal& eq,
743 const H1&, const H2&, const H& h)
744 : m_extract(ex), m_eq(eq), m_ranged_hash(h) { }
746 typedef void* hash_code_t;
749 m_hash_code(const Key& k) const
753 bucket_index(const Key& k, hash_code_t, std::size_t N) const
754 { return m_ranged_hash(k, N); }
757 bucket_index(const hash_node<Value, false>* p, std::size_t N) const
758 { return m_ranged_hash(m_extract(p->m_v), N); }
761 compare(const Key& k, hash_code_t, hash_node<Value, false>* n) const
762 { return m_eq(k, m_extract(n->m_v)); }
765 store_code(hash_node<Value, false>*, hash_code_t) const
769 copy_code(hash_node<Value, false>*, const hash_node<Value, false>*) const
773 m_swap(hash_code_base& x)
775 std::swap(m_extract, x.m_extract);
776 std::swap(m_eq, x.m_eq);
777 std::swap(m_ranged_hash, x.m_ranged_hash);
781 ExtractKey m_extract;
787 // No specialization for ranged hash function while caching hash codes.
788 // That combination is meaningless, and trying to do it is an error.
791 // Specialization: ranged hash function, cache hash codes. This
792 // combination is meaningless, so we provide only a declaration
793 // and no definition.
795 template<typename Key, typename Value,
796 typename ExtractKey, typename Equal,
797 typename H1, typename H2, typename H>
798 struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, true>;
801 // Specialization: hash function and range-hashing function, no
802 // caching of hash codes. H is provided but ignored. Provides
803 // typedef and accessor required by TR1.
805 template<typename Key, typename Value,
806 typename ExtractKey, typename Equal,
807 typename H1, typename H2>
808 struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2,
809 default_ranged_hash, false>
814 hash_function() const
818 hash_code_base(const ExtractKey& ex, const Equal& eq,
819 const H1& h1, const H2& h2, const default_ranged_hash&)
820 : m_extract(ex), m_eq(eq), m_h1(h1), m_h2(h2) { }
822 typedef std::size_t hash_code_t;
825 m_hash_code(const Key& k) const
829 bucket_index(const Key&, hash_code_t c, std::size_t N) const
830 { return m_h2(c, N); }
833 bucket_index(const hash_node<Value, false>* p, std::size_t N) const
834 { return m_h2(m_h1(m_extract(p->m_v)), N); }
837 compare(const Key& k, hash_code_t, hash_node<Value, false>* n) const
838 { return m_eq(k, m_extract(n->m_v)); }
841 store_code(hash_node<Value, false>*, hash_code_t) const
845 copy_code(hash_node<Value, false>*, const hash_node<Value, false>*) const
849 m_swap(hash_code_base& x)
851 std::swap(m_extract, x.m_extract);
852 std::swap(m_eq, x.m_eq);
853 std::swap(m_h1, x.m_h1);
854 std::swap(m_h2, x.m_h2);
858 ExtractKey m_extract;
864 // Specialization: hash function and range-hashing function,
865 // caching hash codes. H is provided but ignored. Provides
866 // typedef and accessor required by TR1.
867 template<typename Key, typename Value,
868 typename ExtractKey, typename Equal,
869 typename H1, typename H2>
870 struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2,
871 default_ranged_hash, true>
876 hash_function() const
880 hash_code_base(const ExtractKey& ex, const Equal& eq,
881 const H1& h1, const H2& h2, const default_ranged_hash&)
882 : m_extract(ex), m_eq(eq), m_h1(h1), m_h2(h2) { }
884 typedef std::size_t hash_code_t;
887 m_hash_code(const Key& k) const
891 bucket_index(const Key&, hash_code_t c, std::size_t N) const
892 { return m_h2(c, N); }
895 bucket_index(const hash_node<Value, true>* p, std::size_t N) const
896 { return m_h2(p->hash_code, N); }
899 compare(const Key& k, hash_code_t c, hash_node<Value, true>* n) const
900 { return c == n->hash_code && m_eq(k, m_extract(n->m_v)); }
903 store_code(hash_node<Value, true>* n, hash_code_t c) const
904 { n->hash_code = c; }
907 copy_code(hash_node<Value, true>* to,
908 const hash_node<Value, true>* from) const
909 { to->hash_code = from->hash_code; }
912 m_swap(hash_code_base& x)
914 std::swap(m_extract, x.m_extract);
915 std::swap(m_eq, x.m_eq);
916 std::swap(m_h1, x.m_h1);
917 std::swap(m_h2, x.m_h2);
921 ExtractKey m_extract;
927 } // namespace internal
931 _GLIBCXX_BEGIN_NAMESPACE(tr1)
933 //----------------------------------------------------------------------
934 // Class template hashtable, class definition.
936 // Meaning of class template hashtable's template parameters
938 // Key and Value: arbitrary CopyConstructible types.
940 // Allocator: an allocator type ([lib.allocator.requirements]) whose
941 // value type is Value.
943 // ExtractKey: function object that takes a object of type Value
944 // and returns a value of type Key.
946 // Equal: function object that takes two objects of type k and returns
947 // a bool-like value that is true if the two objects are considered equal.
949 // H1: the hash function. A unary function object with argument type
950 // Key and result type size_t. Return values should be distributed
951 // over the entire range [0, numeric_limits<size_t>:::max()].
953 // H2: the range-hashing function (in the terminology of Tavori and
954 // Dreizin). A binary function object whose argument types and result
955 // type are all size_t. Given arguments r and N, the return value is
956 // in the range [0, N).
958 // H: the ranged hash function (Tavori and Dreizin). A binary function
959 // whose argument types are Key and size_t and whose result type is
960 // size_t. Given arguments k and N, the return value is in the range
961 // [0, N). Default: h(k, N) = h2(h1(k), N). If H is anything other
962 // than the default, H1 and H2 are ignored.
964 // RehashPolicy: Policy class with three members, all of which govern
965 // the bucket count. n_bkt(n) returns a bucket count no smaller
966 // than n. bkt_for_elements(n) returns a bucket count appropriate
967 // for an element count of n. need_rehash(n_bkt, n_elt, n_ins)
968 // determines whether, if the current bucket count is n_bkt and the
969 // current element count is n_elt, we need to increase the bucket
970 // count. If so, returns make_pair(true, n), where n is the new
971 // bucket count. If not, returns make_pair(false, <anything>).
973 // ??? Right now it is hard-wired that the number of buckets never
974 // shrinks. Should we allow RehashPolicy to change that?
976 // cache_hash_code: bool. true if we store the value of the hash
977 // function along with the value. This is a time-space tradeoff.
978 // Storing it may improve lookup speed by reducing the number of times
979 // we need to call the Equal function.
981 // constant_iterators: bool. true if iterator and const_iterator are
982 // both constant iterator types. This is true for unordered_set and
983 // unordered_multiset, false for unordered_map and unordered_multimap.
985 // unique_keys: bool. true if the return value of hashtable::count(k)
986 // is always at most one, false if it may be an arbitrary number. This
987 // true for unordered_set and unordered_map, false for unordered_multiset
988 // and unordered_multimap.
990 template<typename Key, typename Value,
992 typename ExtractKey, typename Equal,
993 typename H1, typename H2,
994 typename H, typename RehashPolicy,
995 bool cache_hash_code,
996 bool constant_iterators,
999 : public Internal::rehash_base<RehashPolicy,
1000 hashtable<Key, Value, Allocator, ExtractKey,
1001 Equal, H1, H2, H, RehashPolicy,
1002 cache_hash_code, constant_iterators,
1004 public Internal::hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H,
1006 public Internal::map_base<Key, Value, ExtractKey, unique_keys,
1007 hashtable<Key, Value, Allocator, ExtractKey,
1008 Equal, H1, H2, H, RehashPolicy,
1009 cache_hash_code, constant_iterators,
1013 typedef Allocator allocator_type;
1014 typedef Value value_type;
1015 typedef Key key_type;
1016 typedef Equal key_equal;
1017 // mapped_type, if present, comes from map_base.
1018 // hasher, if present, comes from hash_code_base.
1019 typedef typename Allocator::difference_type difference_type;
1020 typedef typename Allocator::size_type size_type;
1021 typedef typename Allocator::reference reference;
1022 typedef typename Allocator::const_reference const_reference;
1024 typedef Internal::node_iterator<value_type, constant_iterators,
1027 typedef Internal::node_const_iterator<value_type, constant_iterators,
1029 const_local_iterator;
1031 typedef Internal::hashtable_iterator<value_type, constant_iterators,
1034 typedef Internal::hashtable_const_iterator<value_type, constant_iterators,
1038 template<typename K, typename Pair, typename Hashtable>
1039 friend struct Internal::map_base;
1042 typedef Internal::hash_node<Value, cache_hash_code> node;
1043 typedef typename Allocator::template rebind<node>::other
1045 typedef typename Allocator::template rebind<node*>::other
1049 node_allocator_t m_node_allocator;
1051 size_type m_bucket_count;
1052 size_type m_element_count;
1053 RehashPolicy m_rehash_policy;
1056 m_allocate_node(const value_type& v);
1059 m_deallocate_node(node* n);
1062 m_deallocate_nodes(node**, size_type);
1065 m_allocate_buckets(size_type n);
1068 m_deallocate_buckets(node**, size_type n);
1070 public: // Constructor, destructor, assignment, swap
1071 hashtable(size_type bucket_hint,
1072 const H1&, const H2&, const H&,
1073 const Equal&, const ExtractKey&,
1074 const allocator_type&);
1076 template<typename InIter>
1077 hashtable(InIter first, InIter last,
1078 size_type bucket_hint,
1079 const H1&, const H2&, const H&,
1080 const Equal&, const ExtractKey&,
1081 const allocator_type&);
1083 hashtable(const hashtable&);
1086 operator=(const hashtable&);
1090 void swap(hashtable&);
1092 public: // Basic container operations
1096 iterator i(m_buckets);
1105 const_iterator i(m_buckets);
1113 { return iterator(m_buckets + m_bucket_count); }
1117 { return const_iterator(m_buckets + m_bucket_count); }
1121 { return m_element_count; }
1125 { return size() == 0; }
1128 get_allocator() const
1129 { return m_node_allocator; }
1133 { return m_node_allocator.max_size(); }
1135 public: // Observers
1138 { return this->m_eq; }
1140 // hash_function, if present, comes from hash_code_base.
1142 public: // Bucket operations
1144 bucket_count() const
1145 { return m_bucket_count; }
1148 max_bucket_count() const
1149 { return max_size(); }
1152 bucket_size(size_type n) const
1153 { return std::distance(begin(n), end(n)); }
1156 bucket(const key_type& k) const
1158 return this->bucket_index(k, this->m_hash_code(k),
1159 this->m_bucket_count);
1164 { return local_iterator(m_buckets[n]); }
1168 { return local_iterator(0); }
1170 const_local_iterator
1171 begin(size_type n) const
1172 { return const_local_iterator(m_buckets[n]); }
1174 const_local_iterator
1175 end(size_type) const
1176 { return const_local_iterator(0); }
1181 return static_cast<float>(size()) / static_cast<float>(bucket_count());
1184 // max_load_factor, if present, comes from rehash_base.
1186 // Generalization of max_load_factor. Extension, not found in TR1. Only
1187 // useful if RehashPolicy is something other than the default.
1189 rehash_policy() const
1190 { return m_rehash_policy; }
1193 rehash_policy(const RehashPolicy&);
1197 find(const key_type& k);
1200 find(const key_type& k) const;
1203 count(const key_type& k) const;
1205 std::pair<iterator, iterator>
1206 equal_range(const key_type& k);
1208 std::pair<const_iterator, const_iterator>
1209 equal_range(const key_type& k) const;
1211 private: // Find, insert and erase helper functions
1212 // ??? This dispatching is a workaround for the fact that we don't
1213 // have partial specialization of member templates; it would be
1214 // better to just specialize insert on unique_keys. There may be a
1215 // cleaner workaround.
1216 typedef typename Internal::IF<unique_keys,
1217 std::pair<iterator, bool>, iterator>::type
1220 typedef typename Internal::IF<unique_keys,
1221 Internal::extract1st<Insert_Return_Type>,
1222 Internal::identity<Insert_Return_Type>
1227 m_find(const key_type&, typename hashtable::hash_code_t) const;
1230 m_find_node(node*, const key_type&,
1231 typename hashtable::hash_code_t) const;
1234 m_insert_bucket(const value_type&, size_type,
1235 typename hashtable::hash_code_t);
1237 std::pair<iterator, bool>
1238 m_insert(const value_type&, std::tr1::true_type);
1241 m_insert(const value_type&, std::tr1::false_type);
1244 m_erase_node(node*, node**);
1246 public: // Insert and erase
1248 insert(const value_type& v)
1249 { return m_insert(v, std::tr1::integral_constant<bool, unique_keys>()); }
1252 insert(iterator, const value_type& v)
1253 { return iterator(Insert_Conv_Type()(this->insert(v))); }
1256 insert(const_iterator, const value_type& v)
1257 { return const_iterator(Insert_Conv_Type()(this->insert(v))); }
1259 template<typename InIter>
1261 insert(InIter first, InIter last);
1267 erase(const_iterator);
1270 erase(const key_type&);
1273 erase(iterator, iterator);
1276 erase(const_iterator, const_iterator);
1282 // Set number of buckets to be appropriate for container of n element.
1283 void rehash(size_type n);
1286 // Unconditionally change size of bucket array to n.
1287 void m_rehash(size_type n);
1291 //----------------------------------------------------------------------
1292 // Definitions of class template hashtable's out-of-line member functions.
1294 template<typename K, typename V,
1295 typename A, typename Ex, typename Eq,
1296 typename H1, typename H2, typename H, typename RP,
1297 bool c, bool ci, bool u>
1298 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::node*
1299 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1300 m_allocate_node(const value_type& v)
1302 node* n = m_node_allocator.allocate(1);
1305 get_allocator().construct(&n->m_v, v);
1311 m_node_allocator.deallocate(n, 1);
1312 __throw_exception_again;
1316 template<typename K, typename V,
1317 typename A, typename Ex, typename Eq,
1318 typename H1, typename H2, typename H, typename RP,
1319 bool c, bool ci, bool u>
1321 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1322 m_deallocate_node(node* n)
1324 get_allocator().destroy(&n->m_v);
1325 m_node_allocator.deallocate(n, 1);
1328 template<typename K, typename V,
1329 typename A, typename Ex, typename Eq,
1330 typename H1, typename H2, typename H, typename RP,
1331 bool c, bool ci, bool u>
1333 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1334 m_deallocate_nodes(node** array, size_type n)
1336 for (size_type i = 0; i < n; ++i)
1343 m_deallocate_node(tmp);
1349 template<typename K, typename V,
1350 typename A, typename Ex, typename Eq,
1351 typename H1, typename H2, typename H, typename RP,
1352 bool c, bool ci, bool u>
1353 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::node**
1354 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1355 m_allocate_buckets(size_type n)
1357 bucket_allocator_t alloc(m_node_allocator);
1359 // We allocate one extra bucket to hold a sentinel, an arbitrary
1360 // non-null pointer. Iterator increment relies on this.
1361 node** p = alloc.allocate(n + 1);
1362 std::fill(p, p + n, (node*) 0);
1363 p[n] = reinterpret_cast<node*>(0x1000);
1367 template<typename K, typename V,
1368 typename A, typename Ex, typename Eq,
1369 typename H1, typename H2, typename H, typename RP,
1370 bool c, bool ci, bool u>
1372 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1373 m_deallocate_buckets(node** p, size_type n)
1375 bucket_allocator_t alloc(m_node_allocator);
1376 alloc.deallocate(p, n + 1);
1379 template<typename K, typename V,
1380 typename A, typename Ex, typename Eq,
1381 typename H1, typename H2, typename H, typename RP,
1382 bool c, bool ci, bool u>
1383 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1384 hashtable(size_type bucket_hint,
1385 const H1& h1, const H2& h2, const H& h,
1386 const Eq& eq, const Ex& exk,
1387 const allocator_type& a)
1388 : Internal::rehash_base<RP, hashtable>(),
1389 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>(exk, eq, h1, h2, h),
1390 Internal::map_base<K, V, Ex, u, hashtable>(),
1391 m_node_allocator(a),
1396 m_bucket_count = m_rehash_policy.next_bkt(bucket_hint);
1397 m_buckets = m_allocate_buckets(m_bucket_count);
1400 template<typename K, typename V,
1401 typename A, typename Ex, typename Eq,
1402 typename H1, typename H2, typename H, typename RP,
1403 bool c, bool ci, bool u>
1404 template<typename InIter>
1405 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1406 hashtable(InIter f, InIter l,
1407 size_type bucket_hint,
1408 const H1& h1, const H2& h2, const H& h,
1409 const Eq& eq, const Ex& exk,
1410 const allocator_type& a)
1411 : Internal::rehash_base<RP, hashtable>(),
1412 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>(exk, eq,
1414 Internal::map_base<K, V, Ex, u, hashtable>(),
1415 m_node_allocator(a),
1420 m_bucket_count = std::max(m_rehash_policy.next_bkt(bucket_hint),
1422 bkt_for_elements(Internal::
1423 distance_fw(f, l)));
1424 m_buckets = m_allocate_buckets(m_bucket_count);
1433 m_deallocate_buckets(m_buckets, m_bucket_count);
1434 __throw_exception_again;
1438 template<typename K, typename V,
1439 typename A, typename Ex, typename Eq,
1440 typename H1, typename H2, typename H, typename RP,
1441 bool c, bool ci, bool u>
1442 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1443 hashtable(const hashtable& ht)
1444 : Internal::rehash_base<RP, hashtable>(ht),
1445 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>(ht),
1446 Internal::map_base<K, V, Ex, u, hashtable>(ht),
1447 m_node_allocator(ht.get_allocator()),
1448 m_bucket_count(ht.m_bucket_count),
1449 m_element_count(ht.m_element_count),
1450 m_rehash_policy(ht.m_rehash_policy)
1452 m_buckets = m_allocate_buckets(m_bucket_count);
1455 for (size_type i = 0; i < ht.m_bucket_count; ++i)
1457 node* n = ht.m_buckets[i];
1458 node** tail = m_buckets + i;
1461 *tail = m_allocate_node(n->m_v);
1462 this->copy_code(*tail, n);
1463 tail = &((*tail)->m_next);
1471 m_deallocate_buckets(m_buckets, m_bucket_count);
1472 __throw_exception_again;
1476 template<typename K, typename V,
1477 typename A, typename Ex, typename Eq,
1478 typename H1, typename H2, typename H, typename RP,
1479 bool c, bool ci, bool u>
1480 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>&
1481 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1482 operator=(const hashtable& ht)
1489 template<typename K, typename V,
1490 typename A, typename Ex, typename Eq,
1491 typename H1, typename H2, typename H, typename RP,
1492 bool c, bool ci, bool u>
1493 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1497 m_deallocate_buckets(m_buckets, m_bucket_count);
1500 template<typename K, typename V,
1501 typename A, typename Ex, typename Eq,
1502 typename H1, typename H2, typename H, typename RP,
1503 bool c, bool ci, bool u>
1505 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1508 // The only base class with member variables is hash_code_base. We
1509 // define hash_code_base::m_swap because different specializations
1510 // have different members.
1511 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>::m_swap(x);
1513 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1514 // 431. Swapping containers with unequal allocators.
1515 std::__alloc_swap<node_allocator_t>::_S_do_it(m_node_allocator,
1516 x.m_node_allocator);
1518 std::swap(m_rehash_policy, x.m_rehash_policy);
1519 std::swap(m_buckets, x.m_buckets);
1520 std::swap(m_bucket_count, x.m_bucket_count);
1521 std::swap(m_element_count, x.m_element_count);
1524 template<typename K, typename V,
1525 typename A, typename Ex, typename Eq,
1526 typename H1, typename H2, typename H, typename RP,
1527 bool c, bool ci, bool u>
1529 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1530 rehash_policy(const RP& pol)
1532 m_rehash_policy = pol;
1533 size_type n_bkt = pol.bkt_for_elements(m_element_count);
1534 if (n_bkt > m_bucket_count)
1538 template<typename K, typename V,
1539 typename A, typename Ex, typename Eq,
1540 typename H1, typename H2, typename H, typename RP,
1541 bool c, bool ci, bool u>
1542 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1543 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1544 m_find(const key_type& k, typename hashtable::hash_code_t code) const
1546 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1547 node* p = m_find_node(m_buckets[n], k, code);
1548 return iterator(p, m_buckets + n);
1551 template<typename K, typename V,
1552 typename A, typename Ex, typename Eq,
1553 typename H1, typename H2, typename H, typename RP,
1554 bool c, bool ci, bool u>
1555 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1556 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1557 find(const key_type& k)
1559 typename hashtable::hash_code_t code = this->m_hash_code(k);
1560 iterator i = m_find(k, code);
1561 return i.m_cur_node ? i : this->end();
1564 template<typename K, typename V,
1565 typename A, typename Ex, typename Eq,
1566 typename H1, typename H2, typename H, typename RP,
1567 bool c, bool ci, bool u>
1568 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::const_iterator
1569 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1570 find(const key_type& k) const
1572 typename hashtable::hash_code_t code = this->m_hash_code(k);
1573 const_iterator i = const_iterator(m_find(k, code));
1574 return i.m_cur_node ? i : this->end();
1577 template<typename K, typename V,
1578 typename A, typename Ex, typename Eq,
1579 typename H1, typename H2, typename H, typename RP,
1580 bool c, bool ci, bool u>
1581 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::size_type
1582 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1583 count(const key_type& k) const
1585 typename hashtable::hash_code_t code = this->m_hash_code(k);
1586 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1587 std::size_t result = 0;
1588 for (node* p = m_buckets[n]; p; p = p->m_next)
1589 if (this->compare(k, code, p))
1594 template<typename K, typename V,
1595 typename A, typename Ex, typename Eq,
1596 typename H1, typename H2, typename H, typename RP,
1597 bool c, bool ci, bool u>
1598 std::pair<typename hashtable<K, V, A, Ex, Eq, H1,
1599 H2, H, RP, c, ci, u>::iterator,
1600 typename hashtable<K, V, A, Ex, Eq, H1,
1601 H2, H, RP, c, ci, u>::iterator>
1602 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1603 equal_range(const key_type& k)
1605 typename hashtable::hash_code_t code = this->m_hash_code(k);
1606 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1607 node** head = m_buckets + n;
1608 node* p = m_find_node(*head, k, code);
1612 node* p1 = p->m_next;
1613 for (; p1; p1 = p1->m_next)
1614 if (!this->compare(k, code, p1))
1617 iterator first(p, head);
1618 iterator last(p1, head);
1620 last.m_incr_bucket();
1621 return std::make_pair(first, last);
1624 return std::make_pair(this->end(), this->end());
1627 template<typename K, typename V,
1628 typename A, typename Ex, typename Eq,
1629 typename H1, typename H2, typename H, typename RP,
1630 bool c, bool ci, bool u>
1631 std::pair<typename hashtable<K, V, A, Ex, Eq, H1,
1632 H2, H, RP, c, ci, u>::const_iterator,
1633 typename hashtable<K, V, A, Ex, Eq, H1,
1634 H2, H, RP, c, ci, u>::const_iterator>
1635 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1636 equal_range(const key_type& k) const
1638 typename hashtable::hash_code_t code = this->m_hash_code(k);
1639 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1640 node** head = m_buckets + n;
1641 node* p = m_find_node(*head, k, code);
1645 node* p1 = p->m_next;
1646 for (; p1; p1 = p1->m_next)
1647 if (!this->compare(k, code, p1))
1650 const_iterator first(p, head);
1651 const_iterator last(p1, head);
1653 last.m_incr_bucket();
1654 return std::make_pair(first, last);
1657 return std::make_pair(this->end(), this->end());
1660 // Find the node whose key compares equal to k, beginning the search
1661 // at p (usually the head of a bucket). Return nil if no node is found.
1662 template<typename K, typename V,
1663 typename A, typename Ex, typename Eq,
1664 typename H1, typename H2, typename H, typename RP,
1665 bool c, bool ci, bool u>
1666 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::node*
1667 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1668 m_find_node(node* p, const key_type& k,
1669 typename hashtable::hash_code_t code) const
1671 for (; p; p = p->m_next)
1672 if (this->compare(k, code, p))
1677 // Insert v in bucket n (assumes no element with its key already present).
1678 template<typename K, typename V,
1679 typename A, typename Ex, typename Eq,
1680 typename H1, typename H2, typename H, typename RP,
1681 bool c, bool ci, bool u>
1682 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1683 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1684 m_insert_bucket(const value_type& v, size_type n,
1685 typename hashtable::hash_code_t code)
1687 std::pair<bool, std::size_t> do_rehash
1688 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, 1);
1690 // Allocate the new node before doing the rehash so that we don't
1691 // do a rehash if the allocation throws.
1692 node* new_node = m_allocate_node(v);
1696 if (do_rehash.first)
1698 const key_type& k = this->m_extract(v);
1699 n = this->bucket_index(k, code, do_rehash.second);
1700 m_rehash(do_rehash.second);
1703 new_node->m_next = m_buckets[n];
1704 this->store_code(new_node, code);
1705 m_buckets[n] = new_node;
1707 return iterator(new_node, m_buckets + n);
1711 m_deallocate_node(new_node);
1712 __throw_exception_again;
1716 // Insert v if no element with its key is already present.
1717 template<typename K, typename V,
1718 typename A, typename Ex, typename Eq,
1719 typename H1, typename H2, typename H, typename RP,
1720 bool c, bool ci, bool u>
1721 std::pair<typename hashtable<K, V, A, Ex, Eq, H1,
1722 H2, H, RP, c, ci, u>::iterator, bool>
1723 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1724 m_insert(const value_type& v, std::tr1::true_type)
1726 const key_type& k = this->m_extract(v);
1727 typename hashtable::hash_code_t code = this->m_hash_code(k);
1728 size_type n = this->bucket_index(k, code, m_bucket_count);
1730 if (node* p = m_find_node(m_buckets[n], k, code))
1731 return std::make_pair(iterator(p, m_buckets + n), false);
1732 return std::make_pair(m_insert_bucket(v, n, code), true);
1735 // Insert v unconditionally.
1736 template<typename K, typename V,
1737 typename A, typename Ex, typename Eq,
1738 typename H1, typename H2, typename H, typename RP,
1739 bool c, bool ci, bool u>
1740 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1741 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1742 m_insert(const value_type& v, std::tr1::false_type)
1744 std::pair<bool, std::size_t> do_rehash
1745 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, 1);
1746 if (do_rehash.first)
1747 m_rehash(do_rehash.second);
1749 const key_type& k = this->m_extract(v);
1750 typename hashtable::hash_code_t code = this->m_hash_code(k);
1751 size_type n = this->bucket_index(k, code, m_bucket_count);
1753 node* new_node = m_allocate_node(v);
1754 node* prev = m_find_node(m_buckets[n], k, code);
1757 new_node->m_next = prev->m_next;
1758 prev->m_next = new_node;
1762 new_node->m_next = m_buckets[n];
1763 m_buckets[n] = new_node;
1765 this->store_code(new_node, code);
1768 return iterator(new_node, m_buckets + n);
1771 // For erase(iterator) and erase(const_iterator).
1772 template<typename K, typename V,
1773 typename A, typename Ex, typename Eq,
1774 typename H1, typename H2, typename H, typename RP,
1775 bool c, bool ci, bool u>
1777 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1778 m_erase_node(node* p, node** b)
1785 node* next = cur->m_next;
1791 cur->m_next = next->m_next;
1794 m_deallocate_node(p);
1798 template<typename K, typename V,
1799 typename A, typename Ex, typename Eq,
1800 typename H1, typename H2, typename H, typename RP,
1801 bool c, bool ci, bool u>
1802 template<typename InIter>
1804 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1805 insert(InIter first, InIter last)
1807 size_type n_elt = Internal::distance_fw(first, last);
1808 std::pair<bool, std::size_t> do_rehash
1809 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, n_elt);
1810 if (do_rehash.first)
1811 m_rehash(do_rehash.second);
1813 for (; first != last; ++first)
1814 this->insert(*first);
1817 template<typename K, typename V,
1818 typename A, typename Ex, typename Eq,
1819 typename H1, typename H2, typename H, typename RP,
1820 bool c, bool ci, bool u>
1821 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1822 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1825 iterator result = it;
1827 m_erase_node(it.m_cur_node, it.m_cur_bucket);
1831 template<typename K, typename V,
1832 typename A, typename Ex, typename Eq,
1833 typename H1, typename H2, typename H, typename RP,
1834 bool c, bool ci, bool u>
1835 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::const_iterator
1836 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1837 erase(const_iterator it)
1839 const_iterator result = it;
1841 m_erase_node(it.m_cur_node, it.m_cur_bucket);
1845 template<typename K, typename V,
1846 typename A, typename Ex, typename Eq,
1847 typename H1, typename H2, typename H, typename RP,
1848 bool c, bool ci, bool u>
1849 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::size_type
1850 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1851 erase(const key_type& k)
1853 typename hashtable::hash_code_t code = this->m_hash_code(k);
1854 std::size_t n = this->bucket_index(k, code, m_bucket_count);
1855 size_type result = 0;
1857 node** slot = m_buckets + n;
1858 while (*slot && !this->compare(k, code, *slot))
1859 slot = &((*slot)->m_next);
1861 while (*slot && this->compare(k, code, *slot))
1865 m_deallocate_node(p);
1873 // ??? This could be optimized by taking advantage of the bucket
1874 // structure, but it's not clear that it's worth doing. It probably
1875 // wouldn't even be an optimization unless the load factor is large.
1876 template<typename K, typename V,
1877 typename A, typename Ex, typename Eq,
1878 typename H1, typename H2, typename H, typename RP,
1879 bool c, bool ci, bool u>
1880 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1881 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1882 erase(iterator first, iterator last)
1884 while (first != last)
1885 first = this->erase(first);
1889 template<typename K, typename V,
1890 typename A, typename Ex, typename Eq,
1891 typename H1, typename H2, typename H, typename RP,
1892 bool c, bool ci, bool u>
1893 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::const_iterator
1894 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1895 erase(const_iterator first, const_iterator last)
1897 while (first != last)
1898 first = this->erase(first);
1902 template<typename K, typename V,
1903 typename A, typename Ex, typename Eq,
1904 typename H1, typename H2, typename H, typename RP,
1905 bool c, bool ci, bool u>
1907 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1910 m_deallocate_nodes(m_buckets, m_bucket_count);
1911 m_element_count = 0;
1914 template<typename K, typename V,
1915 typename A, typename Ex, typename Eq,
1916 typename H1, typename H2, typename H, typename RP,
1917 bool c, bool ci, bool u>
1919 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1922 m_rehash(std::max(m_rehash_policy.next_bkt(n),
1923 m_rehash_policy.bkt_for_elements(m_element_count
1927 template<typename K, typename V,
1928 typename A, typename Ex, typename Eq,
1929 typename H1, typename H2, typename H, typename RP,
1930 bool c, bool ci, bool u>
1932 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1933 m_rehash(size_type n)
1935 node** new_array = m_allocate_buckets(n);
1938 for (size_type i = 0; i < m_bucket_count; ++i)
1939 while (node* p = m_buckets[i])
1941 std::size_t new_index = this->bucket_index(p, n);
1942 m_buckets[i] = p->m_next;
1943 p->m_next = new_array[new_index];
1944 new_array[new_index] = p;
1946 m_deallocate_buckets(m_buckets, m_bucket_count);
1948 m_buckets = new_array;
1952 // A failure here means that a hash function threw an exception.
1953 // We can't restore the previous state without calling the hash
1954 // function again, so the only sensible recovery is to delete
1956 m_deallocate_nodes(new_array, n);
1957 m_deallocate_buckets(new_array, n);
1958 m_deallocate_nodes(m_buckets, m_bucket_count);
1959 m_element_count = 0;
1960 __throw_exception_again;
1964 _GLIBCXX_END_NAMESPACE
1965 } // Namespace std::tr1
1967 #endif /* GNU_LIBSTDCXX_TR1_HASHTABLE_ */