libstdc++
hashtable_policy.h
Go to the documentation of this file.
1 // Internal policy header for TR1 unordered_set and unordered_map -*- C++ -*-
2 
3 // Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file tr1_impl/hashtable_policy.h
26  * This is an internal header file, included by other library headers.
27  * You should not attempt to use it directly.
28  */
29 
30 namespace std
31 {
32 _GLIBCXX_BEGIN_NAMESPACE_TR1
33 
34 namespace __detail
35 {
36  // Helper function: return distance(first, last) for forward
37  // iterators, or 0 for input iterators.
38  template<class _Iterator>
39  inline typename std::iterator_traits<_Iterator>::difference_type
40  __distance_fw(_Iterator __first, _Iterator __last,
42  { return 0; }
43 
44  template<class _Iterator>
45  inline typename std::iterator_traits<_Iterator>::difference_type
46  __distance_fw(_Iterator __first, _Iterator __last,
48  { return std::distance(__first, __last); }
49 
50  template<class _Iterator>
51  inline typename std::iterator_traits<_Iterator>::difference_type
52  __distance_fw(_Iterator __first, _Iterator __last)
53  {
54  typedef typename std::iterator_traits<_Iterator>::iterator_category _Tag;
55  return __distance_fw(__first, __last, _Tag());
56  }
57 
58  template<typename _RAIter, typename _Tp>
59  _RAIter
60  __lower_bound(_RAIter __first, _RAIter __last, const _Tp& __val)
61  {
62  typedef typename std::iterator_traits<_RAIter>::difference_type _DType;
63 
64  _DType __len = __last - __first;
65  while (__len > 0)
66  {
67  _DType __half = __len >> 1;
68  _RAIter __middle = __first + __half;
69  if (*__middle < __val)
70  {
71  __first = __middle;
72  ++__first;
73  __len = __len - __half - 1;
74  }
75  else
76  __len = __half;
77  }
78  return __first;
79  }
80 
81  // Auxiliary types used for all instantiations of _Hashtable: nodes
82  // and iterators.
83 
84  // Nodes, used to wrap elements stored in the hash table. A policy
85  // template parameter of class template _Hashtable controls whether
86  // nodes also store a hash code. In some cases (e.g. strings) this
87  // may be a performance win.
88  template<typename _Value, bool __cache_hash_code>
89  struct _Hash_node;
90 
91  template<typename _Value>
92  struct _Hash_node<_Value, true>
93  {
94  _Value _M_v;
95  std::size_t _M_hash_code;
96  _Hash_node* _M_next;
97 
98 #ifdef _GLIBCXX_INCLUDE_AS_CXX0X
99  template<typename... _Args>
100  _Hash_node(_Args&&... __args)
101  : _M_v(std::forward<_Args>(__args)...),
102  _M_hash_code(), _M_next() { }
103 #endif
104  };
105 
106  template<typename _Value>
107  struct _Hash_node<_Value, false>
108  {
109  _Value _M_v;
110  _Hash_node* _M_next;
111 
112 #ifdef _GLIBCXX_INCLUDE_AS_CXX0X
113  template<typename... _Args>
114  _Hash_node(_Args&&... __args)
115  : _M_v(std::forward<_Args>(__args)...),
116  _M_next() { }
117 #endif
118  };
119 
120  // Local iterators, used to iterate within a bucket but not between
121  // buckets.
122  template<typename _Value, bool __cache>
123  struct _Node_iterator_base
124  {
125  _Node_iterator_base(_Hash_node<_Value, __cache>* __p)
126  : _M_cur(__p) { }
127 
128  void
129  _M_incr()
130  { _M_cur = _M_cur->_M_next; }
131 
132  _Hash_node<_Value, __cache>* _M_cur;
133  };
134 
135  template<typename _Value, bool __cache>
136  inline bool
137  operator==(const _Node_iterator_base<_Value, __cache>& __x,
138  const _Node_iterator_base<_Value, __cache>& __y)
139  { return __x._M_cur == __y._M_cur; }
140 
141  template<typename _Value, bool __cache>
142  inline bool
143  operator!=(const _Node_iterator_base<_Value, __cache>& __x,
144  const _Node_iterator_base<_Value, __cache>& __y)
145  { return __x._M_cur != __y._M_cur; }
146 
147  template<typename _Value, bool __constant_iterators, bool __cache>
148  struct _Node_iterator
149  : public _Node_iterator_base<_Value, __cache>
150  {
151  typedef _Value value_type;
152  typedef typename
153  __gnu_cxx::__conditional_type<__constant_iterators,
154  const _Value*, _Value*>::__type
155  pointer;
156  typedef typename
157  __gnu_cxx::__conditional_type<__constant_iterators,
158  const _Value&, _Value&>::__type
159  reference;
160  typedef std::ptrdiff_t difference_type;
161  typedef std::forward_iterator_tag iterator_category;
162 
163  _Node_iterator()
164  : _Node_iterator_base<_Value, __cache>(0) { }
165 
166  explicit
167  _Node_iterator(_Hash_node<_Value, __cache>* __p)
168  : _Node_iterator_base<_Value, __cache>(__p) { }
169 
170  reference
171  operator*() const
172  { return this->_M_cur->_M_v; }
173 
174  pointer
175  operator->() const
176  { return &this->_M_cur->_M_v; }
177 
178  _Node_iterator&
179  operator++()
180  {
181  this->_M_incr();
182  return *this;
183  }
184 
185  _Node_iterator
186  operator++(int)
187  {
188  _Node_iterator __tmp(*this);
189  this->_M_incr();
190  return __tmp;
191  }
192  };
193 
194  template<typename _Value, bool __constant_iterators, bool __cache>
195  struct _Node_const_iterator
196  : public _Node_iterator_base<_Value, __cache>
197  {
198  typedef _Value value_type;
199  typedef const _Value* pointer;
200  typedef const _Value& reference;
201  typedef std::ptrdiff_t difference_type;
202  typedef std::forward_iterator_tag iterator_category;
203 
204  _Node_const_iterator()
205  : _Node_iterator_base<_Value, __cache>(0) { }
206 
207  explicit
208  _Node_const_iterator(_Hash_node<_Value, __cache>* __p)
209  : _Node_iterator_base<_Value, __cache>(__p) { }
210 
211  _Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
212  __cache>& __x)
213  : _Node_iterator_base<_Value, __cache>(__x._M_cur) { }
214 
215  reference
216  operator*() const
217  { return this->_M_cur->_M_v; }
218 
219  pointer
220  operator->() const
221  { return &this->_M_cur->_M_v; }
222 
223  _Node_const_iterator&
224  operator++()
225  {
226  this->_M_incr();
227  return *this;
228  }
229 
230  _Node_const_iterator
231  operator++(int)
232  {
233  _Node_const_iterator __tmp(*this);
234  this->_M_incr();
235  return __tmp;
236  }
237  };
238 
239  template<typename _Value, bool __cache>
240  struct _Hashtable_iterator_base
241  {
242  _Hashtable_iterator_base(_Hash_node<_Value, __cache>* __node,
243  _Hash_node<_Value, __cache>** __bucket)
244  : _M_cur_node(__node), _M_cur_bucket(__bucket) { }
245 
246  void
247  _M_incr()
248  {
249  _M_cur_node = _M_cur_node->_M_next;
250  if (!_M_cur_node)
251  _M_incr_bucket();
252  }
253 
254  void
255  _M_incr_bucket();
256 
257  _Hash_node<_Value, __cache>* _M_cur_node;
258  _Hash_node<_Value, __cache>** _M_cur_bucket;
259  };
260 
261  // Global iterators, used for arbitrary iteration within a hash
262  // table. Larger and more expensive than local iterators.
263  template<typename _Value, bool __cache>
264  void
265  _Hashtable_iterator_base<_Value, __cache>::
266  _M_incr_bucket()
267  {
268  ++_M_cur_bucket;
269 
270  // This loop requires the bucket array to have a non-null sentinel.
271  while (!*_M_cur_bucket)
272  ++_M_cur_bucket;
273  _M_cur_node = *_M_cur_bucket;
274  }
275 
276  template<typename _Value, bool __cache>
277  inline bool
278  operator==(const _Hashtable_iterator_base<_Value, __cache>& __x,
279  const _Hashtable_iterator_base<_Value, __cache>& __y)
280  { return __x._M_cur_node == __y._M_cur_node; }
281 
282  template<typename _Value, bool __cache>
283  inline bool
284  operator!=(const _Hashtable_iterator_base<_Value, __cache>& __x,
285  const _Hashtable_iterator_base<_Value, __cache>& __y)
286  { return __x._M_cur_node != __y._M_cur_node; }
287 
288  template<typename _Value, bool __constant_iterators, bool __cache>
289  struct _Hashtable_iterator
290  : public _Hashtable_iterator_base<_Value, __cache>
291  {
292  typedef _Value value_type;
293  typedef typename
294  __gnu_cxx::__conditional_type<__constant_iterators,
295  const _Value*, _Value*>::__type
296  pointer;
297  typedef typename
298  __gnu_cxx::__conditional_type<__constant_iterators,
299  const _Value&, _Value&>::__type
300  reference;
301  typedef std::ptrdiff_t difference_type;
302  typedef std::forward_iterator_tag iterator_category;
303 
304  _Hashtable_iterator()
305  : _Hashtable_iterator_base<_Value, __cache>(0, 0) { }
306 
307  _Hashtable_iterator(_Hash_node<_Value, __cache>* __p,
308  _Hash_node<_Value, __cache>** __b)
309  : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { }
310 
311  explicit
312  _Hashtable_iterator(_Hash_node<_Value, __cache>** __b)
313  : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { }
314 
315  reference
316  operator*() const
317  { return this->_M_cur_node->_M_v; }
318 
319  pointer
320  operator->() const
321  { return &this->_M_cur_node->_M_v; }
322 
323  _Hashtable_iterator&
324  operator++()
325  {
326  this->_M_incr();
327  return *this;
328  }
329 
330  _Hashtable_iterator
331  operator++(int)
332  {
333  _Hashtable_iterator __tmp(*this);
334  this->_M_incr();
335  return __tmp;
336  }
337  };
338 
339  template<typename _Value, bool __constant_iterators, bool __cache>
340  struct _Hashtable_const_iterator
341  : public _Hashtable_iterator_base<_Value, __cache>
342  {
343  typedef _Value value_type;
344  typedef const _Value* pointer;
345  typedef const _Value& reference;
346  typedef std::ptrdiff_t difference_type;
347  typedef std::forward_iterator_tag iterator_category;
348 
349  _Hashtable_const_iterator()
350  : _Hashtable_iterator_base<_Value, __cache>(0, 0) { }
351 
352  _Hashtable_const_iterator(_Hash_node<_Value, __cache>* __p,
353  _Hash_node<_Value, __cache>** __b)
354  : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { }
355 
356  explicit
357  _Hashtable_const_iterator(_Hash_node<_Value, __cache>** __b)
358  : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { }
359 
360  _Hashtable_const_iterator(const _Hashtable_iterator<_Value,
361  __constant_iterators, __cache>& __x)
362  : _Hashtable_iterator_base<_Value, __cache>(__x._M_cur_node,
363  __x._M_cur_bucket) { }
364 
365  reference
366  operator*() const
367  { return this->_M_cur_node->_M_v; }
368 
369  pointer
370  operator->() const
371  { return &this->_M_cur_node->_M_v; }
372 
373  _Hashtable_const_iterator&
374  operator++()
375  {
376  this->_M_incr();
377  return *this;
378  }
379 
380  _Hashtable_const_iterator
381  operator++(int)
382  {
383  _Hashtable_const_iterator __tmp(*this);
384  this->_M_incr();
385  return __tmp;
386  }
387  };
388 
389 
390  // Many of class template _Hashtable's template parameters are policy
391  // classes. These are defaults for the policies.
392 
393  // Default range hashing function: use division to fold a large number
394  // into the range [0, N).
395  struct _Mod_range_hashing
396  {
397  typedef std::size_t first_argument_type;
398  typedef std::size_t second_argument_type;
399  typedef std::size_t result_type;
400 
401  result_type
402  operator()(first_argument_type __num, second_argument_type __den) const
403  { return __num % __den; }
404  };
405 
406  // Default ranged hash function H. In principle it should be a
407  // function object composed from objects of type H1 and H2 such that
408  // h(k, N) = h2(h1(k), N), but that would mean making extra copies of
409  // h1 and h2. So instead we'll just use a tag to tell class template
410  // hashtable to do that composition.
411  struct _Default_ranged_hash { };
412 
413  // Default value for rehash policy. Bucket size is (usually) the
414  // smallest prime that keeps the load factor small enough.
415  struct _Prime_rehash_policy
416  {
417  _Prime_rehash_policy(float __z = 1.0)
418  : _M_max_load_factor(__z), _M_growth_factor(2.f), _M_next_resize(0) { }
419 
420  float
421  max_load_factor() const
422  { return _M_max_load_factor; }
423 
424  // Return a bucket size no smaller than n.
425  std::size_t
426  _M_next_bkt(std::size_t __n) const;
427 
428  // Return a bucket count appropriate for n elements
429  std::size_t
430  _M_bkt_for_elements(std::size_t __n) const;
431 
432  // __n_bkt is current bucket count, __n_elt is current element count,
433  // and __n_ins is number of elements to be inserted. Do we need to
434  // increase bucket count? If so, return make_pair(true, n), where n
435  // is the new bucket count. If not, return make_pair(false, 0).
437  _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
438  std::size_t __n_ins) const;
439 
440  enum { _S_n_primes = sizeof(unsigned long) != 8 ? 256 : 256 + 48 };
441 
442  float _M_max_load_factor;
443  float _M_growth_factor;
444  mutable std::size_t _M_next_resize;
445  };
446 
447  extern const unsigned long __prime_list[];
448 
449  // XXX This is a hack. There's no good reason for any of
450  // _Prime_rehash_policy's member functions to be inline.
451 
452  // Return a prime no smaller than n.
453  inline std::size_t
454  _Prime_rehash_policy::
455  _M_next_bkt(std::size_t __n) const
456  {
457  const unsigned long* __p = __lower_bound(__prime_list, __prime_list
458  + _S_n_primes, __n);
459  _M_next_resize =
460  static_cast<std::size_t>(__builtin_ceil(*__p * _M_max_load_factor));
461  return *__p;
462  }
463 
464  // Return the smallest prime p such that alpha p >= n, where alpha
465  // is the load factor.
466  inline std::size_t
467  _Prime_rehash_policy::
468  _M_bkt_for_elements(std::size_t __n) const
469  {
470  const float __min_bkts = __n / _M_max_load_factor;
471  const unsigned long* __p = __lower_bound(__prime_list, __prime_list
472  + _S_n_primes, __min_bkts);
473  _M_next_resize =
474  static_cast<std::size_t>(__builtin_ceil(*__p * _M_max_load_factor));
475  return *__p;
476  }
477 
478  // Finds the smallest prime p such that alpha p > __n_elt + __n_ins.
479  // If p > __n_bkt, return make_pair(true, p); otherwise return
480  // make_pair(false, 0). In principle this isn't very different from
481  // _M_bkt_for_elements.
482 
483  // The only tricky part is that we're caching the element count at
484  // which we need to rehash, so we don't have to do a floating-point
485  // multiply for every insertion.
486 
488  _Prime_rehash_policy::
489  _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
490  std::size_t __n_ins) const
491  {
492  if (__n_elt + __n_ins > _M_next_resize)
493  {
494  float __min_bkts = ((float(__n_ins) + float(__n_elt))
495  / _M_max_load_factor);
496  if (__min_bkts > __n_bkt)
497  {
498  __min_bkts = std::max(__min_bkts, _M_growth_factor * __n_bkt);
499  const unsigned long* __p =
500  __lower_bound(__prime_list, __prime_list + _S_n_primes,
501  __min_bkts);
502  _M_next_resize = static_cast<std::size_t>
503  (__builtin_ceil(*__p * _M_max_load_factor));
504  return std::make_pair(true, *__p);
505  }
506  else
507  {
508  _M_next_resize = static_cast<std::size_t>
509  (__builtin_ceil(__n_bkt * _M_max_load_factor));
510  return std::make_pair(false, 0);
511  }
512  }
513  else
514  return std::make_pair(false, 0);
515  }
516 
517  // Base classes for std::tr1::_Hashtable. We define these base
518  // classes because in some cases we want to do different things
519  // depending on the value of a policy class. In some cases the
520  // policy class affects which member functions and nested typedefs
521  // are defined; we handle that by specializing base class templates.
522  // Several of the base class templates need to access other members
523  // of class template _Hashtable, so we use the "curiously recurring
524  // template pattern" for them.
525 
526  // class template _Map_base. If the hashtable has a value type of the
527  // form pair<T1, T2> and a key extraction policy that returns the
528  // first part of the pair, the hashtable gets a mapped_type typedef.
529  // If it satisfies those criteria and also has unique keys, then it
530  // also gets an operator[].
531  template<typename _Key, typename _Value, typename _Ex, bool __unique,
532  typename _Hashtable>
533  struct _Map_base { };
534 
535  template<typename _Key, typename _Pair, typename _Hashtable>
536  struct _Map_base<_Key, _Pair, std::_Select1st<_Pair>, false, _Hashtable>
537  {
538  typedef typename _Pair::second_type mapped_type;
539  };
540 
541  template<typename _Key, typename _Pair, typename _Hashtable>
542  struct _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>
543  {
544  typedef typename _Pair::second_type mapped_type;
545 
546  mapped_type&
547  operator[](const _Key& __k);
548 
549 #ifdef _GLIBCXX_INCLUDE_AS_CXX0X
550  // _GLIBCXX_RESOLVE_LIB_DEFECTS
551  // DR 761. unordered_map needs an at() member function.
552  mapped_type&
553  at(const _Key& __k);
554 
555  const mapped_type&
556  at(const _Key& __k) const;
557 #endif
558  };
559 
560  template<typename _Key, typename _Pair, typename _Hashtable>
561  typename _Map_base<_Key, _Pair, std::_Select1st<_Pair>,
562  true, _Hashtable>::mapped_type&
563  _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>::
564  operator[](const _Key& __k)
565  {
566  _Hashtable* __h = static_cast<_Hashtable*>(this);
567  typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
568  std::size_t __n = __h->_M_bucket_index(__k, __code,
569  __h->_M_bucket_count);
570 
571  typename _Hashtable::_Node* __p =
572  __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
573  if (!__p)
574  return __h->_M_insert_bucket(std::make_pair(__k, mapped_type()),
575  __n, __code)->second;
576  return (__p->_M_v).second;
577  }
578 
579 #ifdef _GLIBCXX_INCLUDE_AS_CXX0X
580  template<typename _Key, typename _Pair, typename _Hashtable>
581  typename _Map_base<_Key, _Pair, std::_Select1st<_Pair>,
582  true, _Hashtable>::mapped_type&
583  _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>::
584  at(const _Key& __k)
585  {
586  _Hashtable* __h = static_cast<_Hashtable*>(this);
587  typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
588  std::size_t __n = __h->_M_bucket_index(__k, __code,
589  __h->_M_bucket_count);
590 
591  typename _Hashtable::_Node* __p =
592  __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
593  if (!__p)
594  __throw_out_of_range(__N("_Map_base::at"));
595  return (__p->_M_v).second;
596  }
597 
598  template<typename _Key, typename _Pair, typename _Hashtable>
599  const typename _Map_base<_Key, _Pair, std::_Select1st<_Pair>,
600  true, _Hashtable>::mapped_type&
601  _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>::
602  at(const _Key& __k) const
603  {
604  const _Hashtable* __h = static_cast<const _Hashtable*>(this);
605  typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
606  std::size_t __n = __h->_M_bucket_index(__k, __code,
607  __h->_M_bucket_count);
608 
609  typename _Hashtable::_Node* __p =
610  __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
611  if (!__p)
612  __throw_out_of_range(__N("_Map_base::at"));
613  return (__p->_M_v).second;
614  }
615 #endif
616 
617  // class template _Rehash_base. Give hashtable the max_load_factor
618  // functions iff the rehash policy is _Prime_rehash_policy.
619  template<typename _RehashPolicy, typename _Hashtable>
620  struct _Rehash_base { };
621 
622  template<typename _Hashtable>
623  struct _Rehash_base<_Prime_rehash_policy, _Hashtable>
624  {
625  float
626  max_load_factor() const
627  {
628  const _Hashtable* __this = static_cast<const _Hashtable*>(this);
629  return __this->__rehash_policy().max_load_factor();
630  }
631 
632  void
633  max_load_factor(float __z)
634  {
635  _Hashtable* __this = static_cast<_Hashtable*>(this);
636  __this->__rehash_policy(_Prime_rehash_policy(__z));
637  }
638  };
639 
640  // Class template _Hash_code_base. Encapsulates two policy issues that
641  // aren't quite orthogonal.
642  // (1) the difference between using a ranged hash function and using
643  // the combination of a hash function and a range-hashing function.
644  // In the former case we don't have such things as hash codes, so
645  // we have a dummy type as placeholder.
646  // (2) Whether or not we cache hash codes. Caching hash codes is
647  // meaningless if we have a ranged hash function.
648  // We also put the key extraction and equality comparison function
649  // objects here, for convenience.
650 
651  // Primary template: unused except as a hook for specializations.
652  template<typename _Key, typename _Value,
653  typename _ExtractKey, typename _Equal,
654  typename _H1, typename _H2, typename _Hash,
655  bool __cache_hash_code>
656  struct _Hash_code_base;
657 
658  // Specialization: ranged hash function, no caching hash codes. H1
659  // and H2 are provided but ignored. We define a dummy hash code type.
660  template<typename _Key, typename _Value,
661  typename _ExtractKey, typename _Equal,
662  typename _H1, typename _H2, typename _Hash>
663  struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
664  _Hash, false>
665  {
666  protected:
667  _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
668  const _H1&, const _H2&, const _Hash& __h)
669  : _M_extract(__ex), _M_eq(__eq), _M_ranged_hash(__h) { }
670 
671  typedef void* _Hash_code_type;
672 
673  _Hash_code_type
674  _M_hash_code(const _Key& __key) const
675  { return 0; }
676 
677  std::size_t
678  _M_bucket_index(const _Key& __k, _Hash_code_type,
679  std::size_t __n) const
680  { return _M_ranged_hash(__k, __n); }
681 
682  std::size_t
683  _M_bucket_index(const _Hash_node<_Value, false>* __p,
684  std::size_t __n) const
685  { return _M_ranged_hash(_M_extract(__p->_M_v), __n); }
686 
687  bool
688  _M_compare(const _Key& __k, _Hash_code_type,
689  _Hash_node<_Value, false>* __n) const
690  { return _M_eq(__k, _M_extract(__n->_M_v)); }
691 
692  void
693  _M_store_code(_Hash_node<_Value, false>*, _Hash_code_type) const
694  { }
695 
696  void
697  _M_copy_code(_Hash_node<_Value, false>*,
698  const _Hash_node<_Value, false>*) const
699  { }
700 
701  void
702  _M_swap(_Hash_code_base& __x)
703  {
704  std::swap(_M_extract, __x._M_extract);
705  std::swap(_M_eq, __x._M_eq);
706  std::swap(_M_ranged_hash, __x._M_ranged_hash);
707  }
708 
709  protected:
710  _ExtractKey _M_extract;
711  _Equal _M_eq;
712  _Hash _M_ranged_hash;
713  };
714 
715 
716  // No specialization for ranged hash function while caching hash codes.
717  // That combination is meaningless, and trying to do it is an error.
718 
719 
720  // Specialization: ranged hash function, cache hash codes. This
721  // combination is meaningless, so we provide only a declaration
722  // and no definition.
723  template<typename _Key, typename _Value,
724  typename _ExtractKey, typename _Equal,
725  typename _H1, typename _H2, typename _Hash>
726  struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
727  _Hash, true>;
728 
729  // Specialization: hash function and range-hashing function, no
730  // caching of hash codes. H is provided but ignored. Provides
731  // typedef and accessor required by TR1.
732  template<typename _Key, typename _Value,
733  typename _ExtractKey, typename _Equal,
734  typename _H1, typename _H2>
735  struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
736  _Default_ranged_hash, false>
737  {
738  typedef _H1 hasher;
739 
740  hasher
741  hash_function() const
742  { return _M_h1; }
743 
744  protected:
745  _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
746  const _H1& __h1, const _H2& __h2,
747  const _Default_ranged_hash&)
748  : _M_extract(__ex), _M_eq(__eq), _M_h1(__h1), _M_h2(__h2) { }
749 
750  typedef std::size_t _Hash_code_type;
751 
752  _Hash_code_type
753  _M_hash_code(const _Key& __k) const
754  { return _M_h1(__k); }
755 
756  std::size_t
757  _M_bucket_index(const _Key&, _Hash_code_type __c,
758  std::size_t __n) const
759  { return _M_h2(__c, __n); }
760 
761  std::size_t
762  _M_bucket_index(const _Hash_node<_Value, false>* __p,
763  std::size_t __n) const
764  { return _M_h2(_M_h1(_M_extract(__p->_M_v)), __n); }
765 
766  bool
767  _M_compare(const _Key& __k, _Hash_code_type,
768  _Hash_node<_Value, false>* __n) const
769  { return _M_eq(__k, _M_extract(__n->_M_v)); }
770 
771  void
772  _M_store_code(_Hash_node<_Value, false>*, _Hash_code_type) const
773  { }
774 
775  void
776  _M_copy_code(_Hash_node<_Value, false>*,
777  const _Hash_node<_Value, false>*) const
778  { }
779 
780  void
781  _M_swap(_Hash_code_base& __x)
782  {
783  std::swap(_M_extract, __x._M_extract);
784  std::swap(_M_eq, __x._M_eq);
785  std::swap(_M_h1, __x._M_h1);
786  std::swap(_M_h2, __x._M_h2);
787  }
788 
789  protected:
790  _ExtractKey _M_extract;
791  _Equal _M_eq;
792  _H1 _M_h1;
793  _H2 _M_h2;
794  };
795 
796  // Specialization: hash function and range-hashing function,
797  // caching hash codes. H is provided but ignored. Provides
798  // typedef and accessor required by TR1.
799  template<typename _Key, typename _Value,
800  typename _ExtractKey, typename _Equal,
801  typename _H1, typename _H2>
802  struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
803  _Default_ranged_hash, true>
804  {
805  typedef _H1 hasher;
806 
807  hasher
808  hash_function() const
809  { return _M_h1; }
810 
811  protected:
812  _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
813  const _H1& __h1, const _H2& __h2,
814  const _Default_ranged_hash&)
815  : _M_extract(__ex), _M_eq(__eq), _M_h1(__h1), _M_h2(__h2) { }
816 
817  typedef std::size_t _Hash_code_type;
818 
819  _Hash_code_type
820  _M_hash_code(const _Key& __k) const
821  { return _M_h1(__k); }
822 
823  std::size_t
824  _M_bucket_index(const _Key&, _Hash_code_type __c,
825  std::size_t __n) const
826  { return _M_h2(__c, __n); }
827 
828  std::size_t
829  _M_bucket_index(const _Hash_node<_Value, true>* __p,
830  std::size_t __n) const
831  { return _M_h2(__p->_M_hash_code, __n); }
832 
833  bool
834  _M_compare(const _Key& __k, _Hash_code_type __c,
835  _Hash_node<_Value, true>* __n) const
836  { return __c == __n->_M_hash_code && _M_eq(__k, _M_extract(__n->_M_v)); }
837 
838  void
839  _M_store_code(_Hash_node<_Value, true>* __n, _Hash_code_type __c) const
840  { __n->_M_hash_code = __c; }
841 
842  void
843  _M_copy_code(_Hash_node<_Value, true>* __to,
844  const _Hash_node<_Value, true>* __from) const
845  { __to->_M_hash_code = __from->_M_hash_code; }
846 
847  void
848  _M_swap(_Hash_code_base& __x)
849  {
850  std::swap(_M_extract, __x._M_extract);
851  std::swap(_M_eq, __x._M_eq);
852  std::swap(_M_h1, __x._M_h1);
853  std::swap(_M_h2, __x._M_h2);
854  }
855 
856  protected:
857  _ExtractKey _M_extract;
858  _Equal _M_eq;
859  _H1 _M_h1;
860  _H2 _M_h2;
861  };
862 } // namespace __detail
863 
864 _GLIBCXX_END_NAMESPACE_TR1
865 }
ISO C++ entities toplevel namespace is std.
pair holds two objects of arbitrary type.
Definition: stl_pair.h:67
Forward iterators support a superset of input iterator operations.
const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:209
Marking input iterators.
iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
void swap(_Tp &, _Tp &)
Swaps two values.
Definition: move.h:76