// value type is Value. As a conforming extension, we allow for
// value type != Value.
- // _ExtractKey: function object that takes a object of type Value
+ // _ExtractKey: function object that takes an object of type Value
// and returns a value of type _Key.
// _Equal: function object that takes two objects of type k and returns
// count. If so, returns make_pair(true, n), where n is the new
// bucket count. If not, returns make_pair(false, <anything>).
- // ??? Right now it is hard-wired that the number of buckets never
- // shrinks. Should we allow _RehashPolicy to change that?
-
// __cache_hash_code: bool. true if we store the value of the hash
// function along with the value. This is a time-space tradeoff.
// Storing it may improve lookup speed by reducing the number of times
// is always at most one, false if it may be an arbitrary number. This
// true for unordered_set and unordered_map, false for unordered_multiset
// and unordered_multimap.
+ /**
+ * Here's _Hashtable data structure, each _Hashtable has:
+ * - _Bucket[] _M_buckets
+ * - size_type _M_bucket_count
+ * - size_type _M_begin_bucket_index
+ * - size_type _M_element_count
+ *
+ * with _Bucket being _Node* and _Node:
+ * - _Node* _M_next
+ * - Tp _M_value
+ * - size_t _M_code if cache_hash_code is true
+ *
+ * In terms of Standard containers the hastable is like the aggregation of:
+ * - std::forward_list<_Node> containing the elements
+ * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
+ *
+ * The first non-empty bucket with index _M_begin_bucket_index contains the
+ * first container node which is also the first bucket node whereas other
+ * non-empty buckets contain the node before the first bucket node. This is so
+ * to implement something like a std::forward_list::erase_after on container
+ * erase calls.
+ *
+ * Access to the bucket last element require a check on the hash code to see
+ * if the node is still in the bucket. Such a design impose a quite efficient
+ * hash functor and is one of the reasons it is highly advise to set
+ * __cache_hash_code to true.
+ *
+ * The container iterators are simply built from nodes. This way incrementing
+ * the iterator is perfectly efficient no matter how many empty buckets there
+ * are in the container.
+ *
+ * On insert we compute element hash code and thanks to it find the bucket
+ * index. If the element is the first one in the bucket we must find the
+ * previous non-empty bucket where the previous node rely. To keep this loop
+ * minimal it is important that the number of bucket is not too high compared
+ * to the number of elements. So the hash policy must be carefully design so
+ * that it computes a bucket count large enough to respect the user defined
+ * load factor but also not too large to limit impact on the insert operation.
+ *
+ * On erase, the simple iterator design impose to use the hash functor to get
+ * the index of the bucket to update. For this reason, when __cache_hash_code
+ * is set to false, there is a static assertion that the hash functor cannot
+ * throw.
+ *
+ * _M_begin_bucket_index is used to offer contant time access to the container
+ * begin iterator.
+ */
template<typename _Key, typename _Value, typename _Allocator,
typename _ExtractKey, typename _Equal,
__constant_iterators,
__unique_keys> >
{
+ static_assert(__or_<integral_constant<bool, __cache_hash_code>,
+ __detail::__is_noexcept_hash<_Key, _H1>>::value,
+ "Cache the hash code or qualify your hash functor with noexcept");
public:
typedef _Allocator allocator_type;
typedef _Value value_type;
__cache_hash_code>
const_local_iterator;
- typedef __detail::_Hashtable_iterator<value_type, __constant_iterators,
- __cache_hash_code>
- iterator;
- typedef __detail::_Hashtable_const_iterator<value_type,
- __constant_iterators,
- __cache_hash_code>
- const_iterator;
+ typedef local_iterator iterator;
+ typedef const_local_iterator const_iterator;
template<typename _Key2, typename _Value2, typename _Ex2, bool __unique2,
typename _Hashtable2>
friend struct __detail::_Map_base;
private:
+ typedef typename _RehashPolicy::_State _RehashPolicyState;
typedef __detail::_Hash_node<_Value, __cache_hash_code> _Node;
typedef typename _Allocator::template rebind<_Node>::other
_Node_allocator_type;
- typedef typename _Allocator::template rebind<_Node*>::other
+ typedef _Node* _Bucket;
+ //typedef __detail::_Bucket<_Value, __cache_hash_code> _Bucket;
+ typedef typename _Allocator::template rebind<_Bucket>::other
_Bucket_allocator_type;
typedef typename _Allocator::template rebind<_Value>::other
_Value_allocator_type;
_Node_allocator_type _M_node_allocator;
- _Node** _M_buckets;
+ _Bucket* _M_buckets;
size_type _M_bucket_count;
size_type _M_begin_bucket_index; // First non-empty bucket.
size_type _M_element_count;
void
_M_deallocate_node(_Node* __n);
+ // Deallocate all nodes contained in the bucket array, buckets' nodes
+ // are not linked to each other
+ void
+ _M_deallocate_nodes(_Bucket*, size_type);
+
+ // Deallocate the linked list of nodes pointed to by __n
void
- _M_deallocate_nodes(_Node**, size_type);
+ _M_deallocate_nodes(_Node* __n);
- _Node**
+ _Bucket*
_M_allocate_buckets(size_type __n);
void
- _M_deallocate_buckets(_Node**, size_type __n);
+ _M_deallocate_buckets(_Bucket*, size_type __n);
+
+ // Gets bucket begin dealing with the difference between first non-empty
+ // bucket containing the first container node and the other non-empty
+ // buckets containing the node before the one belonging to the bucket.
+ _Node*
+ _M_bucket_begin(size_type __bkt) const;
+
+ // Gets the bucket last node if any
+ _Node*
+ _M_bucket_end(size_type __bkt) const;
+
+ // Gets the bucket node after the last if any
+ _Node*
+ _M_bucket_past_the_end(size_type __bkt) const
+ {
+ _Node* __end = _M_bucket_end(__bkt);
+ return __end ? __end->_M_next : nullptr;
+ }
public:
// Constructor, destructor, assignment, swap
// Basic container operations
iterator
begin() noexcept
- { return iterator(_M_buckets + _M_begin_bucket_index); }
+ { return iterator(_M_buckets[_M_begin_bucket_index]); }
const_iterator
begin() const noexcept
- { return const_iterator(_M_buckets + _M_begin_bucket_index); }
+ { return const_iterator(_M_buckets[_M_begin_bucket_index]); }
iterator
end() noexcept
- { return iterator(_M_buckets + _M_bucket_count); }
+ { return iterator(nullptr); }
const_iterator
end() const noexcept
- { return const_iterator(_M_buckets + _M_bucket_count); }
+ { return const_iterator(nullptr); }
const_iterator
cbegin() const noexcept
- { return const_iterator(_M_buckets + _M_begin_bucket_index); }
+ { return const_iterator(_M_buckets[_M_begin_bucket_index]); }
const_iterator
cend() const noexcept
- { return const_iterator(_M_buckets + _M_bucket_count); }
+ { return const_iterator(nullptr); }
size_type
size() const noexcept
local_iterator
begin(size_type __n)
- { return local_iterator(_M_buckets[__n]); }
+ { return local_iterator(_M_bucket_begin(__n)); }
local_iterator
- end(size_type)
- { return local_iterator(0); }
+ end(size_type __n)
+ { return local_iterator(_M_bucket_past_the_end(__n)); }
const_local_iterator
begin(size_type __n) const
- { return const_local_iterator(_M_buckets[__n]); }
+ { return const_local_iterator(_M_bucket_begin(__n)); }
const_local_iterator
- end(size_type) const
- { return const_local_iterator(0); }
+ end(size_type __n) const
+ { return const_local_iterator(_M_bucket_past_the_end(__n)); }
// DR 691.
const_local_iterator
cbegin(size_type __n) const
- { return const_local_iterator(_M_buckets[__n]); }
+ { return const_local_iterator(_M_bucket_begin(__n)); }
const_local_iterator
- cend(size_type) const
- { return const_local_iterator(0); }
+ cend(size_type __n) const
+ { return const_local_iterator(_M_bucket_past_the_end(__n)); }
float
load_factor() const noexcept
private:
// Find and insert helper functions and types
_Node*
- _M_find_node(_Node*, const key_type&,
+ _M_find_node(size_type, const key_type&,
typename _Hashtable::_Hash_code_type) const;
+ // Insert a node in an empty bucket
+ void
+ _M_insert_bucket_begin(size_type, _Node*);
+
+ // Insert a node after an other one in a non-empty bucket
+ void
+ _M_insert_after(size_type, _Node*, _Node*);
+
+ // Remove the bucket first node
+ void
+ _M_remove_bucket_begin(size_type __bkt, _Node* __next_n,
+ size_type __next_bkt);
+
+ // Get the node before __n in the bucket __bkt
+ _Node*
+ _M_get_previous_node(size_type __bkt, _Node* __n);
+
template<typename _Arg>
iterator
_M_insert_bucket(_Arg&&, size_type,
private:
// Unconditionally change size of bucket array to n, restore hash policy
- // resize value to __next_resize on exception.
- void _M_rehash(size_type __n, size_type __next_resize);
+ // state to __state on exception.
+ void _M_rehash(size_type __n, const _RehashPolicyState& __state);
};
__try
{
_M_node_allocator.construct(__n, std::forward<_Args>(__args)...);
- __n->_M_next = 0;
return __n;
}
__catch(...)
void
_Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
- _M_deallocate_nodes(_Node** __array, size_type __n)
+ _M_deallocate_nodes(_Bucket* __buckets, size_type __n)
{
- for (size_type __i = 0; __i < __n; ++__i)
+ for (size_type __i = 0; __i != __n; ++__i)
+ _M_deallocate_nodes(__buckets[__i]);
+ }
+
+ template<typename _Key, typename _Value,
+ typename _Allocator, typename _ExtractKey, typename _Equal,
+ typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
+ bool __chc, bool __cit, bool __uk>
+ void
+ _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
+ _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
+ _M_deallocate_nodes(_Node* __n)
+ {
+ while (__n)
{
- _Node* __p = __array[__i];
- while (__p)
- {
- _Node* __tmp = __p;
- __p = __p->_M_next;
- _M_deallocate_node(__tmp);
- }
- __array[__i] = 0;
+ _Node* __tmp = __n;
+ __n = __n->_M_next;
+ _M_deallocate_node(__tmp);
}
}
bool __chc, bool __cit, bool __uk>
typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy,
- __chc, __cit, __uk>::_Node**
+ __chc, __cit, __uk>::_Bucket*
_Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
_M_allocate_buckets(size_type __n)
{
_Bucket_allocator_type __alloc(_M_node_allocator);
- // We allocate one extra bucket to hold a sentinel, an arbitrary
- // non-null pointer. Iterator increment relies on this.
- _Node** __p = __alloc.allocate(__n + 1);
- std::fill(__p, __p + __n, (_Node*) 0);
- __p[__n] = reinterpret_cast<_Node*>(0x1000);
+ // We allocate one extra bucket to have _M_begin_bucket_index
+ // point to it as long as container is empty
+ _Bucket* __p = __alloc.allocate(__n + 1);
+ __builtin_memset(__p, 0, (__n + 1) * sizeof(_Bucket));
return __p;
}
void
_Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
- _M_deallocate_buckets(_Node** __p, size_type __n)
+ _M_deallocate_buckets(_Bucket* __p, size_type __n)
{
_Bucket_allocator_type __alloc(_M_node_allocator);
__alloc.deallocate(__p, __n + 1);
typename _Allocator, typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
bool __chc, bool __cit, bool __uk>
+ typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey,
+ _Equal, _H1, _H2, _Hash, _RehashPolicy,
+ __chc, __cit, __uk>::_Node*
+ _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
+ _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
+ _M_bucket_begin(size_type __bkt) const
+ {
+ if (__bkt == _M_begin_bucket_index)
+ return _M_buckets[__bkt];
+ _Node* __n = _M_buckets[__bkt];
+ return __n ? __n->_M_next : nullptr;
+ }
+
+ template<typename _Key, typename _Value,
+ typename _Allocator, typename _ExtractKey, typename _Equal,
+ typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
+ bool __chc, bool __cit, bool __uk>
+ typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey,
+ _Equal, _H1, _H2, _Hash, _RehashPolicy,
+ __chc, __cit, __uk>::_Node*
+ _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
+ _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
+ _M_bucket_end(size_type __bkt) const
+ {
+ _Node* __n = _M_bucket_begin(__bkt);
+ if (__n)
+ for (;; __n = __n->_M_next)
+ if (!__n->_M_next
+ || this->_M_bucket_index(__n->_M_next, _M_bucket_count)
+ != __bkt)
+ break;
+ return __n;
+ }
+
+ template<typename _Key, typename _Value,
+ typename _Allocator, typename _ExtractKey, typename _Equal,
+ typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
+ bool __chc, bool __cit, bool __uk>
_Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
_Hashtable(size_type __bucket_hint,
_M_rehash_policy()
{
_M_bucket_count = _M_rehash_policy._M_next_bkt(__bucket_hint);
+ // We don't want the rehash policy to ask for the hashtable to shrink
+ // on the first insertion so we need to reset its previous resize level.
+ _M_rehash_policy._M_prev_resize = 0;
_M_buckets = _M_allocate_buckets(_M_bucket_count);
_M_begin_bucket_index = _M_bucket_count;
}
_M_bkt_for_elements(__detail::
__distance_fw(__f,
__l)));
+ // We don't want the rehash policy to ask for the hashtable to shrink
+ // on the first insertion so we need to reset its previous resize
+ // level.
+ _M_rehash_policy._M_prev_resize = 0;
_M_buckets = _M_allocate_buckets(_M_bucket_count);
_M_begin_bucket_index = _M_bucket_count;
__try
_M_buckets = _M_allocate_buckets(_M_bucket_count);
__try
{
- for (size_type __i = 0; __i < __ht._M_bucket_count; ++__i)
+ const _Node* __ht_n = __ht._M_buckets[__ht._M_begin_bucket_index];
+ if (!__ht_n)
+ return;
+
+ // Note that the copy constructor do not rely on hash code usage.
+ // First deal with the special first node that is directly store in
+ // the first non-empty bucket
+ _Node* __this_n = _M_allocate_node(__ht_n->_M_v);
+ this->_M_copy_code(__this_n, __ht_n);
+ _M_buckets[_M_begin_bucket_index] = __this_n;
+ __ht_n = __ht_n->_M_next;
+ // Second deal with following non-empty buckets containing previous
+ // nodes node.
+ for (size_type __i = __ht._M_begin_bucket_index + 1;
+ __i != __ht._M_bucket_count; ++__i)
{
- _Node* __n = __ht._M_buckets[__i];
- _Node** __tail = _M_buckets + __i;
- while (__n)
+ if (!__ht._M_buckets[__i])
+ continue;
+
+ for (; __ht_n != __ht._M_buckets[__i]->_M_next;
+ __ht_n = __ht_n->_M_next)
{
- *__tail = _M_allocate_node(__n->_M_v);
- this->_M_copy_code(*__tail, __n);
- __tail = &((*__tail)->_M_next);
- __n = __n->_M_next;
+ __this_n->_M_next = _M_allocate_node(__ht_n->_M_v);
+ this->_M_copy_code(__this_n->_M_next, __ht_n);
+ __this_n = __this_n->_M_next;
}
+
+ _M_buckets[__i] = __this_n;
+ }
+ // Last finalize copy of the nodes of the last non-empty bucket
+ for (; __ht_n; __ht_n = __ht_n->_M_next)
+ {
+ __this_n->_M_next = _M_allocate_node(__ht_n->_M_v);
+ this->_M_copy_code(__this_n->_M_next, __ht_n);
+ __this_n = __this_n->_M_next;
}
}
__catch(...)
__rehash_policy(const _RehashPolicy& __pol)
{
size_type __n_bkt = __pol._M_bkt_for_elements(_M_element_count);
- if (__n_bkt > _M_bucket_count)
- _M_rehash(__n_bkt, _M_rehash_policy._M_next_resize);
+ if (__n_bkt != _M_bucket_count)
+ _M_rehash(__n_bkt, _M_rehash_policy._M_state());
_M_rehash_policy = __pol;
}
{
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
- _Node* __p = _M_find_node(_M_buckets[__n], __k, __code);
- return __p ? iterator(__p, _M_buckets + __n) : this->end();
+ _Node* __p = _M_find_node(__n, __k, __code);
+ return __p ? iterator(__p) : this->end();
}
template<typename _Key, typename _Value,
{
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
- _Node* __p = _M_find_node(_M_buckets[__n], __k, __code);
- return __p ? const_iterator(__p, _M_buckets + __n) : this->end();
+ _Node* __p = _M_find_node(__n, __k, __code);
+ return __p ? const_iterator(__p) : this->end();
}
template<typename _Key, typename _Value,
{
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+ _Node* __p = _M_bucket_begin(__n);
+ if (!__p)
+ return 0;
+
std::size_t __result = 0;
- for (_Node* __p = _M_buckets[__n]; __p; __p = __p->_M_next)
- if (this->_M_compare(__k, __code, __p))
- ++__result;
+ for (;; __p = __p->_M_next)
+ {
+ if (this->_M_compare(__k, __code, __p))
+ ++__result;
+ else if (__result)
+ // All equivalent values are next to each other, if we found a not
+ // equivalent value after an equivalent one it means that we won't
+ // find anymore an equivalent value.
+ break;
+ if (!__p->_M_next
+ || this->_M_bucket_index(__p->_M_next, _M_bucket_count)
+ != __n)
+ break;
+ }
return __result;
}
{
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
- _Node** __head = _M_buckets + __n;
- _Node* __p = _M_find_node(*__head, __k, __code);
+ _Node* __p = _M_find_node(__n, __k, __code);
if (__p)
{
_Node* __p1 = __p->_M_next;
- for (; __p1; __p1 = __p1->_M_next)
- if (!this->_M_compare(__k, __code, __p1))
- break;
+ while (__p1
+ && this->_M_bucket_index(__p1, _M_bucket_count) == __n
+ && this->_M_compare(__k, __code, __p1))
+ __p1 = __p1->_M_next;
- iterator __first(__p, __head);
- iterator __last(__p1, __head);
- if (!__p1)
- __last._M_incr_bucket();
- return std::make_pair(__first, __last);
+ return std::make_pair(iterator(__p), iterator(__p1));
}
else
return std::make_pair(this->end(), this->end());
{
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
- _Node** __head = _M_buckets + __n;
- _Node* __p = _M_find_node(*__head, __k, __code);
+ _Node* __p = _M_find_node(__n, __k, __code);
if (__p)
{
_Node* __p1 = __p->_M_next;
- for (; __p1; __p1 = __p1->_M_next)
- if (!this->_M_compare(__k, __code, __p1))
- break;
+ while (__p1
+ && this->_M_bucket_index(__p1, _M_bucket_count) == __n
+ && this->_M_compare(__k, __code, __p1))
+ __p1 = __p1->_M_next;
- const_iterator __first(__p, __head);
- const_iterator __last(__p1, __head);
- if (!__p1)
- __last._M_incr_bucket();
- return std::make_pair(__first, __last);
+ return std::make_pair(const_iterator(__p), const_iterator(__p1));
}
else
return std::make_pair(this->end(), this->end());
}
- // Find the node whose key compares equal to k, beginning the search
- // at p (usually the head of a bucket). Return nullptr if no node is found.
+ // Find the node whose key compares equal to k in the bucket n. Return nullptr
+ // if no node is found.
template<typename _Key, typename _Value,
typename _Allocator, typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
__chc, __cit, __uk>::_Node*
_Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
- _M_find_node(_Node* __p, const key_type& __k,
+ _M_find_node(size_type __n, const key_type& __k,
typename _Hashtable::_Hash_code_type __code) const
{
- for (; __p; __p = __p->_M_next)
- if (this->_M_compare(__k, __code, __p))
- return __p;
+ _Node* __p = _M_bucket_begin(__n);
+ if (!__p)
+ return nullptr;
+ for (;; __p = __p->_M_next)
+ {
+ if (this->_M_compare(__k, __code, __p))
+ return __p;
+ if (!(__p->_M_next)
+ || this->_M_bucket_index(__p->_M_next, _M_bucket_count) != __n)
+ break;
+ }
return nullptr;
}
+ template<typename _Key, typename _Value,
+ typename _Allocator, typename _ExtractKey, typename _Equal,
+ typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
+ bool __chc, bool __cit, bool __uk>
+ void
+ _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
+ _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
+ _M_insert_bucket_begin(size_type __bkt, _Node* __new_node)
+ {
+ _Node* __prev_n;
+ if (__bkt < _M_begin_bucket_index)
+ {
+ if (_M_begin_bucket_index != _M_bucket_count)
+ {
+ __new_node->_M_next = _M_buckets[_M_begin_bucket_index];
+ _M_buckets[_M_begin_bucket_index] = __new_node;
+ }
+ __prev_n = __new_node;
+ _M_begin_bucket_index = __bkt;
+ }
+ else
+ {
+ // We need to find previous non-empty bucket to link the new node.
+ // There are several ways to find this previous bucket:
+ // 1. Move backward until we find it (the current method)
+ // 2. Start from the begin bucket index and move forward until we
+ // cross __n position.
+ // 3. Move forward until we find a non-empty bucket that will
+ // contain the previous node.
+ size_type __prev_bkt;
+ for (__prev_bkt = __bkt; __prev_bkt-- != 0;)
+ if (_M_buckets[__prev_bkt])
+ break;
+ __prev_n = _M_bucket_end(__prev_bkt);
+ _M_insert_after(__prev_bkt, __prev_n, __new_node);
+ }
+ _M_buckets[__bkt] = __prev_n;
+ }
+
+ template<typename _Key, typename _Value,
+ typename _Allocator, typename _ExtractKey, typename _Equal,
+ typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
+ bool __chc, bool __cit, bool __uk>
+ void
+ _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
+ _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
+ _M_insert_after(size_type __bkt, _Node* __prev_n, _Node* __new_n)
+ {
+ if (__prev_n->_M_next)
+ {
+ size_type __next_bkt =
+ this->_M_bucket_index(__prev_n->_M_next, _M_bucket_count);
+ if (__next_bkt != __bkt)
+ _M_buckets[__next_bkt] = __new_n;
+ }
+ __new_n->_M_next = __prev_n->_M_next;
+ __prev_n->_M_next = __new_n;
+ }
+
+ template<typename _Key, typename _Value,
+ typename _Allocator, typename _ExtractKey, typename _Equal,
+ typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
+ bool __chc, bool __cit, bool __uk>
+ void
+ _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
+ _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
+ _M_remove_bucket_begin(size_type __bkt, _Node* __next, size_type __next_bkt)
+ {
+ if (!__next || __next_bkt != __bkt)
+ {
+ // Bucket is now empty
+ if (__next && __next_bkt != __bkt)
+ // Update next non-empty bucket before begin node
+ _M_buckets[__next_bkt] = _M_buckets[__bkt];
+ _M_buckets[__bkt] = nullptr;
+ if (__bkt == _M_begin_bucket_index)
+ // We need to update begin bucket index
+ if (__next)
+ {
+ _M_begin_bucket_index = __next_bkt;
+ _M_buckets[_M_begin_bucket_index] = __next;
+ }
+ else
+ _M_begin_bucket_index = _M_bucket_count;
+ }
+ else if (__bkt == _M_begin_bucket_index)
+ _M_buckets[__bkt] = __next;
+ }
+
+ template<typename _Key, typename _Value,
+ typename _Allocator, typename _ExtractKey, typename _Equal,
+ typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
+ bool __chc, bool __cit, bool __uk>
+ typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey,
+ _Equal, _H1, _H2, _Hash, _RehashPolicy,
+ __chc, __cit, __uk>::_Node*
+ _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
+ _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
+ _M_get_previous_node(size_type __bkt, _Node* __n)
+ {
+ _Node* __prev_n = nullptr;
+ if (__bkt != _M_begin_bucket_index || __n != _M_buckets[__bkt])
+ {
+ __prev_n = _M_buckets[__bkt];
+ while (__prev_n->_M_next != __n)
+ __prev_n = __prev_n->_M_next;
+ }
+ return __prev_n;
+ }
+
// Insert v in bucket n (assumes no element with its key already present).
template<typename _Key, typename _Value,
typename _Allocator, typename _ExtractKey, typename _Equal,
_M_insert_bucket(_Arg&& __v, size_type __n,
typename _Hashtable::_Hash_code_type __code)
{
- const size_type __saved_next_resize = _M_rehash_policy._M_next_resize;
+ const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state();
std::pair<bool, std::size_t> __do_rehash
= _M_rehash_policy._M_need_rehash(_M_bucket_count,
_M_element_count, 1);
__n = this->_M_bucket_index(__k, __code, __do_rehash.second);
}
- _Node* __new_node = 0;
+ _Node* __new_node = nullptr;
__try
{
// Allocate the new node before doing the rehash so that we
// don't do a rehash if the allocation throws.
__new_node = _M_allocate_node(std::forward<_Arg>(__v));
+ this->_M_store_code(__new_node, __code);
if (__do_rehash.first)
- _M_rehash(__do_rehash.second, __saved_next_resize);
+ _M_rehash(__do_rehash.second, __saved_state);
- __new_node->_M_next = _M_buckets[__n];
- this->_M_store_code(__new_node, __code);
- _M_buckets[__n] = __new_node;
+ if (_M_buckets[__n])
+ _M_insert_after(__n, _M_buckets[__n], __new_node);
+ else
+ _M_insert_bucket_begin(__n, __new_node);
++_M_element_count;
- if (__n < _M_begin_bucket_index)
- _M_begin_bucket_index = __n;
- return iterator(__new_node, _M_buckets + __n);
+ return iterator(__new_node);
}
__catch(...)
{
if (!__new_node)
- _M_rehash_policy._M_next_resize = __saved_next_resize;
+ _M_rehash_policy._M_reset(__saved_state);
else
_M_deallocate_node(__new_node);
__throw_exception_again;
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
- if (_Node* __p = _M_find_node(_M_buckets[__n], __k, __code))
- return std::make_pair(iterator(__p, _M_buckets + __n), false);
+ if (_Node* __p = _M_find_node(__n, __k, __code))
+ return std::make_pair(iterator(__p), false);
return std::make_pair(_M_insert_bucket(std::forward<_Arg>(__v),
__n, __code), true);
}
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
_M_insert(_Arg&& __v, std::false_type)
{
- const size_type __saved_next_resize = _M_rehash_policy._M_next_resize;
+ const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state();
std::pair<bool, std::size_t> __do_rehash
= _M_rehash_policy._M_need_rehash(_M_bucket_count,
_M_element_count, 1);
- if (__do_rehash.first)
- _M_rehash(__do_rehash.second, __saved_next_resize);
const key_type& __k = this->_M_extract(__v);
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
// First find the node, avoid leaking new_node if compare throws.
- _Node* __prev = _M_find_node(_M_buckets[__n], __k, __code);
- _Node* __new_node = _M_allocate_node(std::forward<_Arg>(__v));
-
- if (__prev)
+ _Node* __prev = _M_find_node(__n, __k, __code);
+ _Node* __new_node = nullptr;
+ __try
{
- __new_node->_M_next = __prev->_M_next;
- __prev->_M_next = __new_node;
+ // Second allocate new node so that we don't rehash if it throws
+ __new_node = _M_allocate_node(std::forward<_Arg>(__v));
+ this->_M_store_code(__new_node, __code);
+ if (__do_rehash.first)
+ {
+ _M_rehash(__do_rehash.second, __saved_state);
+ __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
+ // __prev is still valid because rehash do not invalidate nodes
+ }
+
+ if (__prev)
+ // Insert after the previous equivalent node
+ _M_insert_after(__n, __prev, __new_node);
+ else if (_M_buckets[__n])
+ // Bucket is not empty and the inserted node has no equivalent in
+ // the hashtable. We must insert the new node at the beginning or
+ // end of the bucket to preserve equivalent elements relative
+ // positions.
+ if (__n != _M_begin_bucket_index)
+ // We insert the new node at the beginning
+ _M_insert_after(__n, _M_buckets[__n], __new_node);
+ else
+ // We insert the new node at the end
+ _M_insert_after(__n, _M_bucket_end(__n), __new_node);
+ else
+ _M_insert_bucket_begin(__n, __new_node);
+ ++_M_element_count;
+ return iterator(__new_node);
}
- else
+ __catch(...)
{
- __new_node->_M_next = _M_buckets[__n];
- _M_buckets[__n] = __new_node;
- if (__n < _M_begin_bucket_index)
- _M_begin_bucket_index = __n;
+ if (!__new_node)
+ _M_rehash_policy._M_reset(__saved_state);
+ else
+ _M_deallocate_node(__new_node);
+ __throw_exception_again;
}
- this->_M_store_code(__new_node, __code);
- ++_M_element_count;
- return iterator(__new_node, _M_buckets + __n);
}
template<typename _Key, typename _Value,
insert(_InputIterator __first, _InputIterator __last)
{
size_type __n_elt = __detail::__distance_fw(__first, __last);
- const size_type __saved_next_resize = _M_rehash_policy._M_next_resize;
+ const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state();
std::pair<bool, std::size_t> __do_rehash
= _M_rehash_policy._M_need_rehash(_M_bucket_count,
_M_element_count, __n_elt);
if (__do_rehash.first)
- _M_rehash(__do_rehash.second, __saved_next_resize);
+ _M_rehash(__do_rehash.second, __saved_state);
for (; __first != __last; ++__first)
this->insert(*__first);
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
erase(const_iterator __it)
{
- iterator __result(__it._M_cur_node, __it._M_cur_bucket);
- ++__result;
-
- _Node* __cur = *__it._M_cur_bucket;
- if (__cur == __it._M_cur_node)
+ _Node* __n = __it._M_cur;
+ std::size_t __bkt = this->_M_bucket_index(__n, _M_bucket_count);
+
+ // Look for previous node to unlink it from the erased one, this is why
+ // we need buckets to contain the before begin node of the bucket to make
+ // this research fast.
+ _Node* __prev_n = _M_get_previous_node(__bkt, __n);
+ if (__n == _M_bucket_begin(__bkt))
+ _M_remove_bucket_begin(__bkt, __n->_M_next,
+ __n->_M_next ? this->_M_bucket_index(__n->_M_next, _M_bucket_count)
+ : 0);
+ else if (__n->_M_next)
{
- *__it._M_cur_bucket = __cur->_M_next;
-
- // If _M_begin_bucket_index no longer indexes the first non-empty
- // bucket - its single node is being erased - update it.
- if (!_M_buckets[_M_begin_bucket_index])
- _M_begin_bucket_index = __result._M_cur_bucket - _M_buckets;
- }
- else
- {
- _Node* __next = __cur->_M_next;
- while (__next != __it._M_cur_node)
- {
- __cur = __next;
- __next = __cur->_M_next;
- }
- __cur->_M_next = __next->_M_next;
+ size_type __next_bkt =
+ this->_M_bucket_index(__n->_M_next, _M_bucket_count);
+ if (__next_bkt != __bkt)
+ _M_buckets[__next_bkt] = __prev_n;
}
- _M_deallocate_node(__it._M_cur_node);
+ if (__prev_n)
+ __prev_n->_M_next = __n->_M_next;
+ iterator __result(__n->_M_next);
+ _M_deallocate_node(__n);
--_M_element_count;
return __result;
erase(const key_type& __k)
{
typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k);
- std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count);
- size_type __result = 0;
-
- _Node** __slot = _M_buckets + __n;
- while (*__slot && !this->_M_compare(__k, __code, *__slot))
- __slot = &((*__slot)->_M_next);
+ std::size_t __bkt = this->_M_bucket_index(__k, __code, _M_bucket_count);
+ // Look for the first matching node with its previous node at the same
+ // time
+ _Node* __n = _M_buckets[__bkt];
+ if (!__n)
+ return 0;
+ _Node* __prev_n = nullptr;
+ if (__bkt != _M_begin_bucket_index)
+ {
+ __prev_n = __n;
+ __n = __n->_M_next;
+ }
+ bool __is_bucket_begin = true;
+ for (;; __prev_n = __n, __n = __n->_M_next)
+ {
+ if (this->_M_compare(__k, __code, __n))
+ break;
+ if (!(__n->_M_next)
+ || this->_M_bucket_index(__n->_M_next, _M_bucket_count) != __bkt)
+ return 0;
+ __is_bucket_begin = false;
+ }
- _Node** __saved_slot = 0;
- while (*__slot && this->_M_compare(__k, __code, *__slot))
+ // We found a matching node, start deallocation loop from it
+ std::size_t __next_bkt = __bkt;
+ _Node* __next_n = __n;
+ size_type __result = 0;
+ _Node* __saved_n = nullptr;
+ do
{
+ _Node* __p = __next_n;
+ __next_n = __p->_M_next;
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 526. Is it undefined if a function in the standard changes
// in parameters?
- if (std::__addressof(this->_M_extract((*__slot)->_M_v))
+ if (std::__addressof(this->_M_extract(__p->_M_v))
!= std::__addressof(__k))
- {
- _Node* __p = *__slot;
- *__slot = __p->_M_next;
- _M_deallocate_node(__p);
- --_M_element_count;
- ++__result;
- }
+ _M_deallocate_node(__p);
else
- {
- __saved_slot = __slot;
- __slot = &((*__slot)->_M_next);
- }
- }
-
- if (__saved_slot)
- {
- _Node* __p = *__saved_slot;
- *__saved_slot = __p->_M_next;
- _M_deallocate_node(__p);
+ __saved_n = __p;
--_M_element_count;
++__result;
+ if (!__next_n)
+ break;
+ __next_bkt = this->_M_bucket_index(__next_n, _M_bucket_count);
}
-
- // If the entire bucket indexed by _M_begin_bucket_index has been
- // erased look forward for the first non-empty bucket.
- if (!_M_buckets[_M_begin_bucket_index])
- {
- if (!_M_element_count)
- _M_begin_bucket_index = _M_bucket_count;
- else
- {
- ++_M_begin_bucket_index;
- while (!_M_buckets[_M_begin_bucket_index])
- ++_M_begin_bucket_index;
- }
- }
-
+ while (__next_bkt == __bkt && this->_M_compare(__k, __code, __next_n));
+
+ if (__saved_n)
+ _M_deallocate_node(__saved_n);
+ if (__is_bucket_begin)
+ _M_remove_bucket_begin(__bkt, __next_n, __next_bkt);
+ else if (__next_n && __next_bkt != __bkt)
+ _M_buckets[__next_bkt] = __prev_n;
+ if (__prev_n)
+ __prev_n->_M_next = __next_n;
return __result;
}
- // ??? This could be optimized by taking advantage of the bucket
- // structure, but it's not clear that it's worth doing. It probably
- // wouldn't even be an optimization unless the load factor is large.
template<typename _Key, typename _Value,
typename _Allocator, typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
erase(const_iterator __first, const_iterator __last)
{
- while (__first != __last)
- __first = this->erase(__first);
- return iterator(__last._M_cur_node, __last._M_cur_bucket);
+ _Node* __n = __first._M_cur;
+ _Node* __last_n = __last._M_cur;
+ if (__n == __last_n)
+ return iterator(__n);
+
+ std::size_t __bkt = this->_M_bucket_index(__n, _M_bucket_count);
+
+ _Node* __prev_n = _M_get_previous_node(__bkt, __n);
+ bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
+ std::size_t __n_bkt = __bkt;
+ for (;;)
+ {
+ do
+ {
+ _Node* __tmp = __n;
+ __n = __n->_M_next;
+ _M_deallocate_node(__tmp);
+ --_M_element_count;
+ if (!__n)
+ break;
+ __n_bkt = this->_M_bucket_index(__n, _M_bucket_count);
+ }
+ while (__n != __last_n && __n_bkt == __bkt);
+ if (__is_bucket_begin)
+ _M_remove_bucket_begin(__bkt, __n, __n_bkt);
+ if (__n == __last_n)
+ break;
+ __is_bucket_begin = true;
+ __bkt = __n_bkt;
+ }
+
+ if (__n && __n_bkt != __bkt)
+ _M_buckets[__n_bkt] = __prev_n;
+ if (__prev_n)
+ __prev_n->_M_next = __n;
+ return iterator(__n);
}
template<typename _Key, typename _Value,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
clear() noexcept
{
- _M_deallocate_nodes(_M_buckets, _M_bucket_count);
+ _M_deallocate_nodes(_M_buckets[_M_begin_bucket_index]);
+ __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(_Bucket));
_M_element_count = 0;
_M_begin_bucket_index = _M_bucket_count;
}
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
rehash(size_type __n)
{
- const size_type __saved_next_resize = _M_rehash_policy._M_next_resize;
+ const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state();
_M_rehash(std::max(_M_rehash_policy._M_next_bkt(__n),
_M_rehash_policy._M_bkt_for_elements(_M_element_count
+ 1)),
- __saved_next_resize);
+ __saved_state);
}
template<typename _Key, typename _Value,
void
_Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::
- _M_rehash(size_type __n, size_type __next_resize)
+ _M_rehash(size_type __n, const _RehashPolicyState& __state)
{
- _Node** __new_array = 0;
+ _Bucket* __new_buckets = nullptr;
+ _Node* __p = _M_buckets[_M_begin_bucket_index];
__try
{
- __new_array = _M_allocate_buckets(__n);
+ __new_buckets = _M_allocate_buckets(__n);
+ // First loop to store each node in its new bucket
+ while (__p)
+ {
+ _Node* __next = __p->_M_next;
+ std::size_t __new_index = this->_M_bucket_index(__p, __n);
+ if (!__new_buckets[__new_index])
+ // Store temporarily bucket end node in _M_buckets if possible.
+ // This will boost second loop where we need to access bucket
+ // end node quickly.
+ if (__new_index < _M_bucket_count)
+ _M_buckets[__new_index] = __p;
+ __p->_M_next = __new_buckets[__new_index];
+ __new_buckets[__new_index] = __p;
+ __p = __next;
+ }
_M_begin_bucket_index = __n;
- for (size_type __i = 0; __i < _M_bucket_count; ++__i)
- while (_Node* __p = _M_buckets[__i])
+ _Node* __prev_node = nullptr;
+ // Second loop to link all nodes together and to fix bucket values so
+ // that they contain the before begin node of the bucket.
+ for (size_type __i = 0; __i != __n; ++__i)
+ if (__new_buckets[__i])
{
- std::size_t __new_index = this->_M_bucket_index(__p, __n);
- _M_buckets[__i] = __p->_M_next;
- __p->_M_next = __new_array[__new_index];
- __new_array[__new_index] = __p;
- if (__new_index < _M_begin_bucket_index)
- _M_begin_bucket_index = __new_index;
+ if (__prev_node)
+ {
+ __prev_node->_M_next = __new_buckets[__i];
+ __new_buckets[__i] = __prev_node;
+ }
+ else
+ _M_begin_bucket_index = __i;
+ if (__i < _M_bucket_count)
+ __prev_node = _M_buckets[__i];
+ else
+ {
+ __prev_node = __new_buckets[__i];
+ while (__prev_node->_M_next)
+ __prev_node = __prev_node->_M_next;
+ }
}
_M_deallocate_buckets(_M_buckets, _M_bucket_count);
_M_bucket_count = __n;
- _M_buckets = __new_array;
+ _M_buckets = __new_buckets;
}
__catch(...)
{
- if (__new_array)
+ if (__new_buckets)
{
// A failure here means that a hash function threw an exception.
// We can't restore the previous state without calling the hash
// function again, so the only sensible recovery is to delete
// everything.
- _M_deallocate_nodes(__new_array, __n);
- _M_deallocate_buckets(__new_array, __n);
- _M_deallocate_nodes(_M_buckets, _M_bucket_count);
+ _M_deallocate_nodes(__new_buckets, __n);
+ _M_deallocate_buckets(__new_buckets, __n);
+ _M_deallocate_nodes(__p);
+ __builtin_memset(_M_buckets, 0, sizeof(_Bucket) * _M_bucket_count);
_M_element_count = 0;
_M_begin_bucket_index = _M_bucket_count;
- _M_rehash_policy._M_next_resize = 0;
+ _M_rehash_policy._M_reset(_RehashPolicyState());
}
else
// A failure here means that buckets allocation failed. We only
// have to restore hash policy previous state.
- _M_rehash_policy._M_next_resize = __next_resize;
+ _M_rehash_policy._M_reset(__state);
__throw_exception_again;
}
}
return __distance_fw(__first, __last, _Tag());
}
+ // Helper type used to detect when the hash functor is noexcept qualified or
+ // not
+ template <typename _Key, typename _Hash>
+ struct __is_noexcept_hash : std::integral_constant<bool,
+ noexcept(declval<const _Hash&>()(declval<const _Key&>()))>
+ {};
+
// Auxiliary types used for all instantiations of _Hashtable: nodes
// and iterators.
}
};
- template<typename _Value, bool __cache>
- struct _Hashtable_iterator_base
- {
- _Hashtable_iterator_base(_Hash_node<_Value, __cache>* __node,
- _Hash_node<_Value, __cache>** __bucket)
- : _M_cur_node(__node), _M_cur_bucket(__bucket) { }
-
- void
- _M_incr()
- {
- _M_cur_node = _M_cur_node->_M_next;
- if (!_M_cur_node)
- _M_incr_bucket();
- }
-
- void
- _M_incr_bucket();
-
- _Hash_node<_Value, __cache>* _M_cur_node;
- _Hash_node<_Value, __cache>** _M_cur_bucket;
- };
-
- // Global iterators, used for arbitrary iteration within a hash
- // table. Larger and more expensive than local iterators.
- template<typename _Value, bool __cache>
- void
- _Hashtable_iterator_base<_Value, __cache>::
- _M_incr_bucket()
- {
- ++_M_cur_bucket;
-
- // This loop requires the bucket array to have a non-null sentinel.
- while (!*_M_cur_bucket)
- ++_M_cur_bucket;
- _M_cur_node = *_M_cur_bucket;
- }
-
- template<typename _Value, bool __cache>
- inline bool
- operator==(const _Hashtable_iterator_base<_Value, __cache>& __x,
- const _Hashtable_iterator_base<_Value, __cache>& __y)
- { return __x._M_cur_node == __y._M_cur_node; }
-
- template<typename _Value, bool __cache>
- inline bool
- operator!=(const _Hashtable_iterator_base<_Value, __cache>& __x,
- const _Hashtable_iterator_base<_Value, __cache>& __y)
- { return __x._M_cur_node != __y._M_cur_node; }
-
- template<typename _Value, bool __constant_iterators, bool __cache>
- struct _Hashtable_iterator
- : public _Hashtable_iterator_base<_Value, __cache>
- {
- typedef _Value value_type;
- typedef typename std::conditional<__constant_iterators,
- const _Value*, _Value*>::type
- pointer;
- typedef typename std::conditional<__constant_iterators,
- const _Value&, _Value&>::type
- reference;
- typedef std::ptrdiff_t difference_type;
- typedef std::forward_iterator_tag iterator_category;
-
- _Hashtable_iterator()
- : _Hashtable_iterator_base<_Value, __cache>(0, 0) { }
-
- _Hashtable_iterator(_Hash_node<_Value, __cache>* __p,
- _Hash_node<_Value, __cache>** __b)
- : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { }
-
- explicit
- _Hashtable_iterator(_Hash_node<_Value, __cache>** __b)
- : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { }
-
- reference
- operator*() const
- { return this->_M_cur_node->_M_v; }
-
- pointer
- operator->() const
- { return std::__addressof(this->_M_cur_node->_M_v); }
-
- _Hashtable_iterator&
- operator++()
- {
- this->_M_incr();
- return *this;
- }
-
- _Hashtable_iterator
- operator++(int)
- {
- _Hashtable_iterator __tmp(*this);
- this->_M_incr();
- return __tmp;
- }
- };
-
- template<typename _Value, bool __constant_iterators, bool __cache>
- struct _Hashtable_const_iterator
- : public _Hashtable_iterator_base<_Value, __cache>
- {
- typedef _Value value_type;
- typedef const _Value* pointer;
- typedef const _Value& reference;
- typedef std::ptrdiff_t difference_type;
- typedef std::forward_iterator_tag iterator_category;
-
- _Hashtable_const_iterator()
- : _Hashtable_iterator_base<_Value, __cache>(0, 0) { }
-
- _Hashtable_const_iterator(_Hash_node<_Value, __cache>* __p,
- _Hash_node<_Value, __cache>** __b)
- : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { }
-
- explicit
- _Hashtable_const_iterator(_Hash_node<_Value, __cache>** __b)
- : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { }
-
- _Hashtable_const_iterator(const _Hashtable_iterator<_Value,
- __constant_iterators, __cache>& __x)
- : _Hashtable_iterator_base<_Value, __cache>(__x._M_cur_node,
- __x._M_cur_bucket) { }
-
- reference
- operator*() const
- { return this->_M_cur_node->_M_v; }
-
- pointer
- operator->() const
- { return std::__addressof(this->_M_cur_node->_M_v); }
-
- _Hashtable_const_iterator&
- operator++()
- {
- this->_M_incr();
- return *this;
- }
-
- _Hashtable_const_iterator
- operator++(int)
- {
- _Hashtable_const_iterator __tmp(*this);
- this->_M_incr();
- return __tmp;
- }
- };
-
-
// Many of class template _Hashtable's template parameters are policy
// classes. These are defaults for the policies.
struct _Prime_rehash_policy
{
_Prime_rehash_policy(float __z = 1.0)
- : _M_max_load_factor(__z), _M_growth_factor(2.f), _M_next_resize(0) { }
+ : _M_max_load_factor(__z), _M_prev_resize(0), _M_next_resize(0) { }
float
max_load_factor() const noexcept
_M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
std::size_t __n_ins) const;
+ typedef std::pair<std::size_t, std::size_t> _State;
+
+ _State
+ _M_state() const
+ { return std::make_pair(_M_prev_resize, _M_next_resize); }
+
+ void
+ _M_reset(const _State& __state)
+ {
+ _M_prev_resize = __state.first;
+ _M_next_resize = __state.second;
+ }
+
enum { _S_n_primes = sizeof(unsigned long) != 8 ? 256 : 256 + 48 };
float _M_max_load_factor;
- float _M_growth_factor;
+ mutable std::size_t _M_prev_resize;
mutable std::size_t _M_next_resize;
};
{
// Optimize lookups involving the first elements of __prime_list.
// (useful to speed-up, eg, constructors)
- static const unsigned char __fast_bkt[12]
+ static const unsigned long __fast_bkt[12]
= { 2, 2, 2, 3, 5, 5, 7, 7, 11, 11, 11, 11 };
- const unsigned long __p
- = __n <= 11 ? __fast_bkt[__n]
- : *std::lower_bound(__prime_list + 5,
- __prime_list + _S_n_primes, __n);
- _M_next_resize = __builtin_floor(__p * (long double)_M_max_load_factor);
- return __p;
+ const unsigned long* __p
+ = __n <= 11 ? __fast_bkt + __n
+ : std::lower_bound(__prime_list + 5,
+ __prime_list + _S_n_primes, __n);
+
+ _M_prev_resize = __builtin_floor(*__p * (long double)_M_max_load_factor);
+ if (__p != __fast_bkt)
+ _M_prev_resize = std::min(_M_prev_resize,
+ static_cast<std::size_t>(*(__p - 1)));
+ // Lets guaranty a minimal grow step of 11:
+ if (*__p - __n < 11)
+ __p = std::lower_bound(__prime_list + 5,
+ __prime_list + _S_n_primes, __n + 11);
+ _M_next_resize = __builtin_floor(*__p * (long double)_M_max_load_factor);
+ return *__p;
}
// Return the smallest prime p such that alpha p >= n, where alpha
_M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
std::size_t __n_ins) const
{
- if (__n_elt + __n_ins > _M_next_resize)
+ if (__n_elt + __n_ins >= _M_next_resize)
{
- long double __min_bkts = ((__n_elt + __n_ins)
- / (long double)_M_max_load_factor);
- if (__min_bkts > __n_bkt)
- {
- __min_bkts = std::max(__min_bkts, (long double)_M_growth_factor
- * __n_bkt);
- return std::make_pair(true,
- _M_next_bkt(__builtin_ceil(__min_bkts)));
- }
+ long double __min_bkts = (__n_elt + __n_ins)
+ / (long double)_M_max_load_factor;
+ if (__min_bkts >= __n_bkt)
+ return std::make_pair(true,
+ _M_next_bkt(__builtin_floor(__min_bkts) + 1));
else
{
_M_next_resize
return std::make_pair(false, 0);
}
}
+ else if (__n_elt + __n_ins < _M_prev_resize)
+ {
+ long double __min_bkts = (__n_elt + __n_ins)
+ / (long double)_M_max_load_factor;
+ return std::make_pair(true,
+ _M_next_bkt(__builtin_floor(__min_bkts) + 1));
+ }
else
return std::make_pair(false, 0);
}
std::size_t __n = __h->_M_bucket_index(__k, __code,
__h->_M_bucket_count);
- typename _Hashtable::_Node* __p =
- __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
+ typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code);
if (!__p)
return __h->_M_insert_bucket(std::make_pair(__k, mapped_type()),
__n, __code)->second;
std::size_t __n = __h->_M_bucket_index(__k, __code,
__h->_M_bucket_count);
- typename _Hashtable::_Node* __p =
- __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
+ typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code);
if (!__p)
return __h->_M_insert_bucket(std::make_pair(std::move(__k),
mapped_type()),
std::size_t __n = __h->_M_bucket_index(__k, __code,
__h->_M_bucket_count);
- typename _Hashtable::_Node* __p =
- __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
+ typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code);
if (!__p)
__throw_out_of_range(__N("_Map_base::at"));
return (__p->_M_v).second;
std::size_t __n = __h->_M_bucket_index(__k, __code,
__h->_M_bucket_count);
- typename _Hashtable::_Node* __p =
- __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
+ typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code);
if (!__p)
__throw_out_of_range(__N("_Map_base::at"));
return (__p->_M_v).second;