2 Copyright (c) 2005-2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
17 /* Container implementations in this header are based on PPL implementations
18 provided by Microsoft. */
20 #ifndef __TBB__concurrent_unordered_impl_H
21 #define __TBB__concurrent_unordered_impl_H
22 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
23 #error Do not #include this internal file directly; use public TBB headers instead.
26 #include "../tbb_stddef.h"
29 #include <utility> // Need std::pair
30 #include <functional> // Need std::equal_to (in ../concurrent_unordered_*.h)
31 #include <string> // For tbb_hasher
32 #include <cstring> // Need std::memset
33 #include __TBB_STD_SWAP_HEADER
35 #include "../atomic.h"
36 #include "../tbb_exception.h"
37 #include "../tbb_allocator.h"
39 #if __TBB_INITIALIZER_LISTS_PRESENT
40 #include <initializer_list>
43 #if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_COPY_DELETION_BROKEN
44 #define __TBB_UNORDERED_NODE_HANDLE_PRESENT 1
47 #include "_allocator_traits.h"
48 #include "_tbb_hash_compare_impl.h"
49 #include "_template_helpers.h"
51 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
52 #include "_node_handle_impl.h"
53 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
56 namespace interface5 {
60 template <typename T, typename Allocator>
61 class split_ordered_list;
62 template <typename Traits>
63 class concurrent_unordered_base;
65 // Forward list iterators (without skipping dummy elements)
66 template<class Solist, typename Value>
67 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
69 template <typename T, typename Allocator>
70 friend class split_ordered_list;
71 template <typename Traits>
72 friend class concurrent_unordered_base;
73 template<class M, typename V>
74 friend class flist_iterator;
76 typedef typename Solist::nodeptr_t nodeptr_t;
78 typedef typename Solist::value_type value_type;
79 typedef typename Solist::difference_type difference_type;
80 typedef typename Solist::pointer pointer;
81 typedef typename Solist::reference reference;
83 flist_iterator() : my_node_ptr(0) {}
84 flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
85 : my_node_ptr(other.my_node_ptr) {}
87 flist_iterator& operator=( const flist_iterator<Solist, typename Solist::value_type> &other ) {
88 my_node_ptr = other.my_node_ptr;
92 reference operator*() const { return my_node_ptr->my_element; }
93 pointer operator->() const { return &**this; }
95 flist_iterator& operator++() {
96 my_node_ptr = my_node_ptr->my_next;
100 flist_iterator operator++(int) {
101 flist_iterator tmp = *this;
107 flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
108 nodeptr_t get_node_ptr() const { return my_node_ptr; }
110 nodeptr_t my_node_ptr;
112 template<typename M, typename T, typename U>
113 friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
114 template<typename M, typename T, typename U>
115 friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
118 template<typename Solist, typename T, typename U>
119 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
120 return i.my_node_ptr == j.my_node_ptr;
122 template<typename Solist, typename T, typename U>
123 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
124 return i.my_node_ptr != j.my_node_ptr;
127 // Split-order list iterators, needed to skip dummy elements
128 template<class Solist, typename Value>
129 class solist_iterator : public flist_iterator<Solist, Value>
131 typedef flist_iterator<Solist, Value> base_type;
132 typedef typename Solist::nodeptr_t nodeptr_t;
133 using base_type::get_node_ptr;
134 template <typename T, typename Allocator>
135 friend class split_ordered_list;
136 template<class M, typename V>
137 friend class solist_iterator;
138 template <typename Traits>
139 friend class concurrent_unordered_base;
140 template<typename M, typename T, typename U>
141 friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
142 template<typename M, typename T, typename U>
143 friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
145 const Solist *my_list_ptr;
146 solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
149 typedef typename Solist::value_type value_type;
150 typedef typename Solist::difference_type difference_type;
151 typedef typename Solist::pointer pointer;
152 typedef typename Solist::reference reference;
155 solist_iterator( const solist_iterator<Solist, typename Solist::value_type> &other )
156 : base_type(other), my_list_ptr(other.my_list_ptr) {}
158 solist_iterator& operator=( const solist_iterator<Solist, typename Solist::value_type> &other ) {
159 base_type::my_node_ptr = other.get_node_ptr();
160 my_list_ptr = other.my_list_ptr;
164 reference operator*() const {
165 return this->base_type::operator*();
168 pointer operator->() const {
172 solist_iterator& operator++() {
173 do ++(*(base_type *)this);
174 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
179 solist_iterator operator++(int) {
180 solist_iterator tmp = *this;
182 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
188 template<typename Solist, typename T, typename U>
189 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
190 return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
192 template<typename Solist, typename T, typename U>
193 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
194 return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
197 // Forward type and class definitions
198 typedef size_t sokey_t;
201 // Forward list in which elements are sorted in a split-order
202 template <typename T, typename Allocator>
203 class split_ordered_list
206 typedef split_ordered_list<T, Allocator> self_type;
208 typedef typename tbb::internal::allocator_rebind<Allocator, T>::type allocator_type;
211 typedef node *nodeptr_t;
213 typedef typename tbb::internal::allocator_traits<allocator_type>::value_type value_type;
214 typedef typename tbb::internal::allocator_traits<allocator_type>::size_type size_type;
215 typedef typename tbb::internal::allocator_traits<allocator_type>::difference_type difference_type;
216 typedef typename tbb::internal::allocator_traits<allocator_type>::pointer pointer;
217 typedef typename tbb::internal::allocator_traits<allocator_type>::const_pointer const_pointer;
218 // No support for reference/const_reference in allocator traits
219 typedef value_type& reference;
220 typedef const value_type& const_reference;
222 typedef solist_iterator<self_type, const value_type> const_iterator;
223 typedef solist_iterator<self_type, value_type> iterator;
224 typedef flist_iterator<self_type, const value_type> raw_const_iterator;
225 typedef flist_iterator<self_type, value_type> raw_iterator;
227 // Node that holds the element in a split-ordered list
228 struct node : tbb::internal::no_assign
231 // for compilers that try to generate default constructors though they are not needed.
232 node(); // VS 2008, 2010, 2012
234 // Initialize the node with the given order key
235 void init(sokey_t order_key) {
236 my_order_key = order_key;
240 // Return the order key (needed for hashing)
241 sokey_t get_order_key() const { // TODO: remove
245 // get() and value() is a common interface for getting access to node`s element (required by node_handle)
246 value_type* storage() {
247 return reinterpret_cast<value_type*>(&my_element);
250 value_type& value() {
254 // Inserts the new element in the list in an atomic fashion
255 nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
257 // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
258 nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);
260 if (exchange_node == current_node) // TODO: why this branch?
262 // Operation succeeded, return the new node
267 // Operation failed, return the "interfering" node
268 return exchange_node;
272 // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
273 // in the hash table to quickly index into the right subsection of the split-ordered list.
274 bool is_dummy() const {
275 return (my_order_key & 0x1) == 0;
279 nodeptr_t my_next; // Next element in the list
280 value_type my_element; // Element storage
281 sokey_t my_order_key; // Order key for this element
284 // Allocate a new node with the given order key; used to allocate dummy nodes
285 nodeptr_t create_node(sokey_t order_key) {
286 nodeptr_t pnode = my_node_allocator.allocate(1);
287 pnode->init(order_key);
291 // Allocate a new node with the given order key and value
292 template<typename Arg>
293 nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t,
294 /*AllowCreate=*/tbb::internal::true_type=tbb::internal::true_type()){
295 nodeptr_t pnode = my_node_allocator.allocate(1);
297 //TODO: use RAII scoped guard instead of explicit catch
299 new(static_cast<void*>(&pnode->my_element)) T(tbb::internal::forward<Arg>(t));
300 pnode->init(order_key);
302 my_node_allocator.deallocate(pnode, 1);
309 // A helper to avoid excessive requiremens in internal_insert
310 template<typename Arg>
311 nodeptr_t create_node(sokey_t, __TBB_FORWARDING_REF(Arg),
312 /*AllowCreate=*/tbb::internal::false_type){
313 __TBB_ASSERT(false, "This compile-time helper should never get called");
317 // Allocate a new node with the given parameters for constructing value
318 template<typename __TBB_PARAMETER_PACK Args>
319 nodeptr_t create_node_v( __TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args){
320 nodeptr_t pnode = my_node_allocator.allocate(1);
322 //TODO: use RAII scoped guard instead of explicit catch
324 new(static_cast<void*>(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward<Args>(args)));
326 my_node_allocator.deallocate(pnode, 1);
333 split_ordered_list(allocator_type a = allocator_type())
334 : my_node_allocator(a), my_element_count(0)
336 // Immediately allocate a dummy node with order key of 0. This node
337 // will always be the head of the list.
338 my_head = create_node(sokey_t(0));
341 ~split_ordered_list()
346 // Remove the head element which is not cleared by clear()
347 nodeptr_t pnode = my_head;
350 __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
355 // Common forward list functions
357 allocator_type get_allocator() const {
358 return (my_node_allocator);
363 nodeptr_t pnode = my_head;
365 __TBB_ASSERT(my_head != NULL, "Invalid head list node");
366 pnext = pnode->my_next;
367 pnode->my_next = NULL;
370 while (pnode != NULL)
372 pnext = pnode->my_next;
377 my_element_count = 0;
380 // Returns a first non-dummy element in the SOL
382 return first_real_iterator(raw_begin());
385 // Returns a first non-dummy element in the SOL
386 const_iterator begin() const {
387 return first_real_iterator(raw_begin());
391 return (iterator(0, this));
394 const_iterator end() const {
395 return (const_iterator(0, this));
398 const_iterator cbegin() const {
399 return (((const self_type *)this)->begin());
402 const_iterator cend() const {
403 return (((const self_type *)this)->end());
406 // Checks if the number of elements (non-dummy) is 0
408 return (my_element_count == 0);
411 // Returns the number of non-dummy elements in the list
412 size_type size() const {
413 return my_element_count;
416 // Returns the maximum size of the list, determined by the allocator
417 size_type max_size() const {
418 return my_node_allocator.max_size();
421 // Swaps 'this' list with the passed in one
422 void swap(self_type& other)
430 std::swap(my_element_count, other.my_element_count);
431 std::swap(my_head, other.my_head);
434 // Split-order list functions
436 // Returns a first element in the SOL, which is always a dummy
437 raw_iterator raw_begin() {
438 return raw_iterator(my_head);
441 // Returns a first element in the SOL, which is always a dummy
442 raw_const_iterator raw_begin() const {
443 return raw_const_iterator(my_head);
446 raw_iterator raw_end() {
447 return raw_iterator(0);
450 raw_const_iterator raw_end() const {
451 return raw_const_iterator(0);
454 static sokey_t get_order_key(const raw_const_iterator& it) {
455 return it.get_node_ptr()->get_order_key();
458 static sokey_t get_safe_order_key(const raw_const_iterator& it) {
459 if( !it.get_node_ptr() ) return ~sokey_t(0);
460 return it.get_node_ptr()->get_order_key();
463 // Returns a public iterator version of the internal iterator. Public iterator must not
464 // be a dummy private iterator.
465 iterator get_iterator(raw_iterator it) {
466 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
467 return iterator(it.get_node_ptr(), this);
470 // Returns a public iterator version of the internal iterator. Public iterator must not
471 // be a dummy private iterator.
472 const_iterator get_iterator(raw_const_iterator it) const {
473 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
474 return const_iterator(it.get_node_ptr(), this);
477 // Returns a non-const version of the raw_iterator
478 raw_iterator get_iterator(raw_const_iterator it) {
479 return raw_iterator(it.get_node_ptr());
482 // Returns a non-const version of the iterator
483 static iterator get_iterator(const_iterator it) {
484 return iterator(it.my_node_ptr, it.my_list_ptr);
487 // Returns a public iterator version of a first non-dummy internal iterator at or after
488 // the passed in internal iterator.
489 iterator first_real_iterator(raw_iterator it)
491 // Skip all dummy, internal only iterators
492 while (it != raw_end() && it.get_node_ptr()->is_dummy())
495 return iterator(it.get_node_ptr(), this);
498 // Returns a public iterator version of a first non-dummy internal iterator at or after
499 // the passed in internal iterator.
500 const_iterator first_real_iterator(raw_const_iterator it) const
502 // Skip all dummy, internal only iterators
503 while (it != raw_end() && it.get_node_ptr()->is_dummy())
506 return const_iterator(it.get_node_ptr(), this);
509 // Erase an element using the allocator
510 void destroy_node(nodeptr_t pnode) {
511 if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);
512 my_node_allocator.deallocate(pnode, 1);
515 // Try to insert a new element in the list.
516 // If insert fails, return the node that was inserted instead.
517 static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
518 new_node->my_next = current_node;
519 return previous->atomic_set_next(new_node, current_node);
522 // Insert a new element between passed in iterators
523 std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
525 nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr());
527 if (inserted_node == pnode)
529 // If the insert succeeded, check that the order is correct and increment the element count
530 check_range(it, next);
531 *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment();
532 return std::pair<iterator, bool>(iterator(pnode, this), true);
536 return std::pair<iterator, bool>(end(), false);
540 // Insert a new dummy element, starting search at a parent dummy element
541 raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
543 raw_iterator last = raw_end();
544 raw_iterator where = it;
546 __TBB_ASSERT(where != last, "Invalid head node");
550 // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
551 nodeptr_t dummy_node = create_node(order_key);
555 __TBB_ASSERT(it != last, "Invalid head list node");
557 // If the head iterator is at the end of the list, or past the point where this dummy
558 // node needs to be inserted, then try to insert it.
559 if (where == last || get_order_key(where) > order_key)
561 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
563 // Try to insert it in the right place
564 nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr());
566 if (inserted_node == dummy_node)
568 // Insertion succeeded, check the list for order violations
569 check_range(it, where);
570 return raw_iterator(dummy_node);
574 // Insertion failed: either dummy node was inserted by another thread, or
575 // a real element was inserted at exactly the same place as dummy node.
576 // Proceed with the search from the previous location where order key was
577 // known to be larger (note: this is legal only because there is no safe
578 // concurrent erase operation supported).
584 else if (get_order_key(where) == order_key)
586 // Another dummy node with the same value found, discard the new one.
587 destroy_node(dummy_node);
591 // Move the iterator forward
598 nodeptr_t erase_node_impl(raw_iterator previous, raw_const_iterator& where) {
599 nodeptr_t pnode = (where++).get_node_ptr();
600 nodeptr_t prevnode = previous.get_node_ptr();
601 __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
602 prevnode->my_next = pnode->my_next;
606 // This erase function can handle both real and dummy nodes
607 void erase_node(raw_iterator previous, raw_const_iterator& where,
608 /*allow_destroy*/tbb::internal::true_type)
610 nodeptr_t pnode = erase_node_impl(previous, where);
614 void erase_node(raw_iterator previous, raw_const_iterator& where,
615 /*allow_destroy*/tbb::internal::false_type)
617 erase_node_impl(previous, where);
620 void erase_node(raw_iterator previous, raw_const_iterator& where) {
621 erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
624 // Erase the element (previous node needs to be passed because this is a forward only list)
625 template<typename AllowDestroy>
626 iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
628 raw_const_iterator it = where;
629 erase_node(previous, it, AllowDestroy());
632 return get_iterator(first_real_iterator(it));
635 iterator erase_node(raw_iterator previous, const_iterator& where) {
636 return erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
641 // Move all elements from the passed in split-ordered list to this one
642 void move_all(self_type& source)
644 raw_const_iterator first = source.raw_begin();
645 raw_const_iterator last = source.raw_end();
650 nodeptr_t previous_node = my_head;
651 raw_const_iterator begin_iterator = first++;
653 // Move all elements one by one, including dummy ones
654 for (raw_const_iterator it = first; it != last;)
656 nodeptr_t pnode = it.get_node_ptr();
658 nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
659 previous_node = try_insert_atomic(previous_node, dummy_node, NULL);
660 __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
661 raw_const_iterator where = it++;
662 source.erase_node(get_iterator(begin_iterator), where);
669 //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base
670 template <typename Traits>
671 friend class concurrent_unordered_base;
673 // Check the list for order violations
674 void check_range( raw_iterator first, raw_iterator last )
677 for (raw_iterator it = first; it != last; ++it)
679 raw_iterator next = it;
682 __TBB_ASSERT(next == raw_end() || get_order_key(next) >= get_order_key(it), "!!! List order inconsistency !!!");
685 tbb::internal::suppress_unused_warning(first, last);
691 check_range( raw_begin(), raw_end() );
695 typename tbb::internal::allocator_rebind<allocator_type, node>::type my_node_allocator; // allocator object for nodes
696 size_type my_element_count; // Total item count, not counting dummy nodes
697 nodeptr_t my_head; // pointer to head node
700 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
701 #pragma warning(push)
702 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
705 template <typename Traits>
706 class concurrent_unordered_base : public Traits
710 typedef concurrent_unordered_base<Traits> self_type;
711 typedef typename Traits::value_type value_type;
712 typedef typename Traits::key_type key_type;
713 typedef typename Traits::hash_compare hash_compare;
714 typedef typename Traits::allocator_type allocator_type;
715 typedef typename hash_compare::hasher hasher;
716 typedef typename hash_compare::key_equal key_equal;
718 typedef typename tbb::internal::allocator_traits<allocator_type>::size_type size_type;
719 typedef typename tbb::internal::allocator_traits<allocator_type>::difference_type difference_type;
720 typedef typename tbb::internal::allocator_traits<allocator_type>::pointer pointer;
721 typedef typename tbb::internal::allocator_traits<allocator_type>::const_pointer const_pointer;
722 // No support for reference/const_reference in allocator
723 typedef typename allocator_type::value_type& reference;
724 typedef const typename allocator_type::value_type& const_reference;
726 typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
727 typedef typename solist_t::nodeptr_t nodeptr_t;
728 // Iterators that walk the entire split-order list, including dummy nodes
729 typedef typename solist_t::raw_iterator raw_iterator;
730 typedef typename solist_t::raw_const_iterator raw_const_iterator;
731 typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
732 typedef typename solist_t::const_iterator const_iterator;
733 typedef iterator local_iterator;
734 typedef const_iterator const_local_iterator;
735 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
736 typedef typename Traits::node_type node_type;
737 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
738 using Traits::my_hash_compare;
739 using Traits::get_key;
740 using Traits::allow_multimapping;
742 static const size_type initial_bucket_number = 8; // Initial number of buckets
745 template<typename OtherTraits>
746 friend class concurrent_unordered_base;
748 typedef std::pair<iterator, iterator> pairii_t;
749 typedef std::pair<const_iterator, const_iterator> paircc_t;
751 static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
752 static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
754 struct call_internal_clear_on_exit{
755 concurrent_unordered_base* my_instance;
756 call_internal_clear_on_exit(concurrent_unordered_base* instance) : my_instance(instance) {}
757 void dismiss(){ my_instance = NULL;}
758 ~call_internal_clear_on_exit(){
760 my_instance->internal_clear();
765 // Constructors/Destructors
766 concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
767 const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
768 : Traits(hc), my_solist(a),
769 my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
771 if( n_of_buckets == 0) ++n_of_buckets;
772 my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
776 concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
777 : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
780 internal_copy(right);
783 concurrent_unordered_base(const concurrent_unordered_base& right)
784 : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
786 //FIXME:exception safety seems to be broken here
788 internal_copy(right);
791 #if __TBB_CPP11_RVALUE_REF_PRESENT
792 concurrent_unordered_base(concurrent_unordered_base&& right)
793 : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()),
794 my_maximum_bucket_size(float(initial_bucket_load))
796 my_number_of_buckets = initial_bucket_number;
801 concurrent_unordered_base(concurrent_unordered_base&& right, const allocator_type& a)
802 : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
804 call_internal_clear_on_exit clear_buckets_on_exception(this);
807 if (a == right.get_allocator()){
808 my_number_of_buckets = initial_bucket_number;
809 my_maximum_bucket_size = float(initial_bucket_load);
812 my_maximum_bucket_size = right.my_maximum_bucket_size;
813 my_number_of_buckets = right.my_number_of_buckets;
814 my_solist.my_element_count = right.my_solist.my_element_count;
816 if (! right.my_solist.empty()){
817 nodeptr_t previous_node = my_solist.my_head;
819 // Move all elements one by one, including dummy ones
820 for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)
822 const nodeptr_t pnode = it.get_node_ptr();
824 if (pnode->is_dummy()) {
825 node = my_solist.create_node(pnode->get_order_key());
826 size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;
827 set_bucket(bucket, node);
829 node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
832 previous_node = my_solist.try_insert_atomic(previous_node, node, NULL);
833 __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
835 my_solist.check_range();
839 clear_buckets_on_exception.dismiss();
842 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
844 concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
846 internal_copy(right);
850 #if __TBB_CPP11_RVALUE_REF_PRESENT
851 concurrent_unordered_base& operator=(concurrent_unordered_base&& other)
854 typedef typename tbb::internal::allocator_traits<allocator_type>::propagate_on_container_move_assignment pocma_t;
855 if(pocma_t::value || this->my_allocator == other.my_allocator) {
856 concurrent_unordered_base trash (std::move(*this));
858 if (pocma_t::value) {
860 //TODO: swapping allocators here may be a problem, replace with single direction moving
861 swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);
862 swap(this->my_allocator, other.my_allocator);
865 concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);
866 this->swap(moved_copy);
872 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
874 #if __TBB_INITIALIZER_LISTS_PRESENT
875 //! assignment operator from initializer_list
876 concurrent_unordered_base& operator=(std::initializer_list<value_type> il)
879 this->insert(il.begin(),il.end());
882 #endif // __TBB_INITIALIZER_LISTS_PRESENT
885 ~concurrent_unordered_base() {
886 // Delete all node segments
890 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
891 template<typename SourceType>
892 void internal_merge(SourceType& source) {
893 typedef typename SourceType::iterator source_iterator;
894 __TBB_STATIC_ASSERT((tbb::internal::is_same_type<node_type,
895 typename SourceType::node_type>::value),
896 "Incompatible containers cannot be merged");
898 for(source_iterator it = source.begin(); it != source.end();) {
899 source_iterator where = it++;
900 if (allow_multimapping || find(get_key(*where)) == end()) {
901 std::pair<node_type, raw_iterator> extract_result = source.internal_extract(where);
903 // Remember the old order key
904 sokey_t old_order_key = extract_result.first.my_node->get_order_key();
906 // If the insertion fails, it returns ownership of the node to extract_result.first
907 // extract_result.first remains valid node handle
908 if (!insert(std::move(extract_result.first)).second) {
909 raw_iterator next = extract_result.second;
910 raw_iterator current = next++;
912 // Revert order key to old value
913 extract_result.first.my_node->init(old_order_key);
915 __TBB_ASSERT(extract_result.first.my_node->get_order_key() >= current.get_node_ptr()->get_order_key(),
916 "Wrong nodes order in source container");
917 __TBB_ASSERT(next==source.my_solist.raw_end() ||
918 extract_result.first.my_node->get_order_key() <= next.get_node_ptr()->get_order_key(),
919 "Wrong nodes order in source container");
921 size_t new_count = 0;// To use try_insert()
923 source.my_solist.try_insert(current, next, extract_result.first.my_node, &new_count).second;
924 __TBB_ASSERT_EX(insert_result, "Return to source must be successful. "
925 "Changing source container while merging is unsafe.");
927 extract_result.first.deactivate();
931 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
934 allocator_type get_allocator() const {
935 return my_solist.get_allocator();
938 // Size and capacity function
940 return my_solist.empty();
943 size_type size() const {
944 return my_solist.size();
947 size_type max_size() const {
948 return my_solist.max_size();
953 return my_solist.begin();
956 const_iterator begin() const {
957 return my_solist.begin();
961 return my_solist.end();
964 const_iterator end() const {
965 return my_solist.end();
968 const_iterator cbegin() const {
969 return my_solist.cbegin();
972 const_iterator cend() const {
973 return my_solist.cend();
976 // Parallel traversal support
977 class const_range_type : tbb::internal::no_assign {
978 const concurrent_unordered_base &my_table;
979 raw_const_iterator my_begin_node;
980 raw_const_iterator my_end_node;
981 mutable raw_const_iterator my_midpoint_node;
983 //! Type for size of a range
984 typedef typename concurrent_unordered_base::size_type size_type;
985 typedef typename concurrent_unordered_base::value_type value_type;
986 typedef typename concurrent_unordered_base::reference reference;
987 typedef typename concurrent_unordered_base::difference_type difference_type;
988 typedef typename concurrent_unordered_base::const_iterator iterator;
990 //! True if range is empty.
991 bool empty() const {return my_begin_node == my_end_node;}
993 //! True if range can be partitioned into two subranges.
994 bool is_divisible() const {
995 return my_midpoint_node != my_end_node;
998 const_range_type( const_range_type &r, split ) :
999 my_table(r.my_table), my_end_node(r.my_end_node)
1001 r.my_end_node = my_begin_node = r.my_midpoint_node;
1002 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
1003 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
1007 //! Init range with container and grainsize specified
1008 const_range_type( const concurrent_unordered_base &a_table ) :
1009 my_table(a_table), my_begin_node(a_table.my_solist.begin()),
1010 my_end_node(a_table.my_solist.end())
1014 iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
1015 iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
1016 //! The grain size for this range.
1017 size_type grainsize() const { return 1; }
1019 //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
1020 void set_midpoint() const {
1021 if( my_begin_node == my_end_node ) // not divisible
1022 my_midpoint_node = my_end_node;
1024 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
1025 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
1026 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
1027 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
1028 if(__TBB_ReverseBits(mid_bucket) > begin_key) {
1029 // found a dummy_node between begin and end
1030 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
1033 // didn't find a dummy node between begin and end.
1034 my_midpoint_node = my_end_node;
1038 sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
1039 __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
1040 __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
1042 #endif // TBB_USE_ASSERT
1047 class range_type : public const_range_type {
1049 typedef typename concurrent_unordered_base::iterator iterator;
1051 range_type( range_type &r, split ) : const_range_type( r, split() ) {}
1052 //! Init range with container and grainsize specified
1053 range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
1055 iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
1056 iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
1059 range_type range() {
1060 return range_type( *this );
1063 const_range_type range() const {
1064 return const_range_type( *this );
1068 std::pair<iterator, bool> insert(const value_type& value) {
1069 return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1070 /*AllowDestroy=*/tbb::internal::true_type>(value);
1073 iterator insert(const_iterator, const value_type& value) {
1075 return insert(value).first;
1078 #if __TBB_CPP11_RVALUE_REF_PRESENT
1079 std::pair<iterator, bool> insert(value_type&& value) {
1080 return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1081 /*AllowDestroy=*/tbb::internal::true_type>(std::move(value));
1084 iterator insert(const_iterator, value_type&& value) {
1086 return insert(std::move(value)).first;
1088 #endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/
1090 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1091 std::pair<iterator, bool> insert(node_type&& nh) {
1093 nodeptr_t handled_node = nh.my_node;
1094 std::pair<iterator, bool> insert_result =
1095 internal_insert</*AllowCreate=*/tbb::internal::false_type,
1096 /*AllowDestroy=*/tbb::internal::false_type>
1097 (handled_node->my_element, handled_node);
1098 if (insert_result.second)
1100 return insert_result;
1102 return std::pair<iterator, bool>(end(), false);
1105 iterator insert(const_iterator, node_type&& nh) {
1106 return insert(std::move(nh)).first;
1108 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1110 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1111 template<typename... Args>
1112 std::pair<iterator, bool> emplace(Args&&... args) {
1113 nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward<Args>(args)...);
1115 return internal_insert</*AllowCreate=*/tbb::internal::false_type,
1116 /*AllowDestroy=*/tbb::internal::true_type>(pnode->my_element, pnode);
1119 template<typename... Args>
1120 iterator emplace_hint(const_iterator, Args&&... args) {
1122 return emplace(tbb::internal::forward<Args>(args)...).first;
1124 #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1127 template<class Iterator>
1128 void insert(Iterator first, Iterator last) {
1129 for (Iterator it = first; it != last; ++it)
1133 #if __TBB_INITIALIZER_LISTS_PRESENT
1134 //! Insert initializer list
1135 void insert(std::initializer_list<value_type> il) {
1136 insert(il.begin(), il.end());
1140 iterator unsafe_erase(const_iterator where) {
1141 return internal_erase(where);
1144 iterator unsafe_erase(const_iterator first, const_iterator last) {
1145 while (first != last)
1146 unsafe_erase(first++);
1147 return my_solist.get_iterator(first);
1150 size_type unsafe_erase(const key_type& key) {
1151 pairii_t where = equal_range(key);
1152 size_type item_count = internal_distance(where.first, where.second);
1153 unsafe_erase(where.first, where.second);
1157 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1158 node_type unsafe_extract(const_iterator where) {
1159 return internal_extract(where).first;
1162 node_type unsafe_extract(const key_type& key) {
1163 pairii_t where = equal_range(key);
1164 if (where.first == end()) return node_type(); // element was not found
1165 return internal_extract(where.first).first;
1167 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1169 void swap(concurrent_unordered_base& right) {
1170 if (this != &right) {
1171 std::swap(my_hash_compare, right.my_hash_compare);
1172 my_solist.swap(right.my_solist);
1173 internal_swap_buckets(right);
1174 std::swap(my_number_of_buckets, right.my_number_of_buckets);
1175 std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
1180 hasher hash_function() const {
1181 return my_hash_compare.my_hash_object;
1184 key_equal key_eq() const {
1185 return my_hash_compare.my_key_compare_object;
1195 // Initialize bucket 0
1196 __TBB_ASSERT(my_buckets[0] == NULL, NULL);
1197 raw_iterator dummy_node = my_solist.raw_begin();
1198 set_bucket(0, dummy_node);
1202 iterator find(const key_type& key) {
1203 return internal_find(key);
1206 const_iterator find(const key_type& key) const {
1207 return const_cast<self_type*>(this)->internal_find(key);
1210 size_type count(const key_type& key) const {
1211 if(allow_multimapping) {
1212 paircc_t answer = equal_range(key);
1213 size_type item_count = internal_distance(answer.first, answer.second);
1216 return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
1220 std::pair<iterator, iterator> equal_range(const key_type& key) {
1221 return internal_equal_range(key);
1224 std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
1225 return const_cast<self_type*>(this)->internal_equal_range(key);
1228 // Bucket interface - for debugging
1229 size_type unsafe_bucket_count() const {
1230 return my_number_of_buckets;
1233 size_type unsafe_max_bucket_count() const {
1234 return segment_size(pointers_per_table-1);
1237 size_type unsafe_bucket_size(size_type bucket) {
1238 size_type item_count = 0;
1239 if (is_initialized(bucket)) {
1240 raw_iterator it = get_bucket(bucket);
1242 for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
1248 size_type unsafe_bucket(const key_type& key) const {
1249 sokey_t order_key = (sokey_t) my_hash_compare(key);
1250 size_type bucket = order_key % my_number_of_buckets;
1254 // If the bucket is initialized, return a first non-dummy element in it
1255 local_iterator unsafe_begin(size_type bucket) {
1256 if (!is_initialized(bucket))
1259 raw_iterator it = get_bucket(bucket);
1260 return my_solist.first_real_iterator(it);
1263 // If the bucket is initialized, return a first non-dummy element in it
1264 const_local_iterator unsafe_begin(size_type bucket) const
1266 if (!is_initialized(bucket))
1269 raw_const_iterator it = get_bucket(bucket);
1270 return my_solist.first_real_iterator(it);
1273 // @REVIEW: Takes O(n)
1274 // Returns the iterator after the last non-dummy element in the bucket
1275 local_iterator unsafe_end(size_type bucket)
1277 if (!is_initialized(bucket))
1280 raw_iterator it = get_bucket(bucket);
1282 // Find the end of the bucket, denoted by the dummy element
1284 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1286 // Return the first real element past the end of the bucket
1287 return my_solist.first_real_iterator(it);
1290 // @REVIEW: Takes O(n)
1291 // Returns the iterator after the last non-dummy element in the bucket
1292 const_local_iterator unsafe_end(size_type bucket) const
1294 if (!is_initialized(bucket))
1297 raw_const_iterator it = get_bucket(bucket);
1299 // Find the end of the bucket, denoted by the dummy element
1301 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1303 // Return the first real element past the end of the bucket
1304 return my_solist.first_real_iterator(it);
1307 const_local_iterator unsafe_cbegin(size_type bucket) const {
1308 return ((const self_type *) this)->unsafe_begin(bucket);
1311 const_local_iterator unsafe_cend(size_type bucket) const {
1312 return ((const self_type *) this)->unsafe_end(bucket);
1316 float load_factor() const {
1317 return (float) size() / (float) unsafe_bucket_count();
1320 float max_load_factor() const {
1321 return my_maximum_bucket_size;
1324 void max_load_factor(float newmax) {
1325 if (newmax != newmax || newmax < 0)
1326 tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
1327 my_maximum_bucket_size = newmax;
1330 // This function is a noop, because the underlying split-ordered list
1331 // is already sorted, so an increase in the bucket number will be
1332 // reflected next time this bucket is touched.
1333 void rehash(size_type buckets) {
1334 size_type current_buckets = my_number_of_buckets;
1335 if (current_buckets >= buckets)
1337 my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
1342 // Initialize the hash and keep the first bucket open
1343 void internal_init() {
1344 // Initialize the array of segment pointers
1345 memset(my_buckets, 0, sizeof(my_buckets));
1347 // Initialize bucket 0
1348 raw_iterator dummy_node = my_solist.raw_begin();
1349 set_bucket(0, dummy_node);
1352 void internal_clear() {
1353 for (size_type index = 0; index < pointers_per_table; ++index) {
1354 if (my_buckets[index] != NULL) {
1355 size_type sz = segment_size(index);
1356 for (size_type index2 = 0; index2 < sz; ++index2)
1357 my_allocator.destroy(&my_buckets[index][index2]);
1358 my_allocator.deallocate(my_buckets[index], sz);
1359 my_buckets[index] = 0;
1364 void internal_copy(const self_type& right) {
1367 my_maximum_bucket_size = right.my_maximum_bucket_size;
1368 my_number_of_buckets = right.my_number_of_buckets;
1371 insert(right.begin(), right.end());
1372 my_hash_compare = right.my_hash_compare;
1373 } __TBB_CATCH(...) {
1379 void internal_swap_buckets(concurrent_unordered_base& right)
1381 // Swap all node segments
1382 for (size_type index = 0; index < pointers_per_table; ++index)
1384 raw_iterator * iterator_pointer = my_buckets[index];
1385 my_buckets[index] = right.my_buckets[index];
1386 right.my_buckets[index] = iterator_pointer;
1390 //TODO: why not use std::distance?
1392 static size_type internal_distance(const_iterator first, const_iterator last)
1396 for (const_iterator it = first; it != last; ++it)
1402 // Insert an element in the hash given its value
1403 template<typename AllowCreate, typename AllowDestroy, typename ValueType>
1404 std::pair<iterator, bool> internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL)
1406 const key_type *pkey = &get_key(value);
1407 sokey_t hash_key = (sokey_t) my_hash_compare(*pkey);
1408 size_type new_count = 0;
1409 sokey_t order_key = split_order_key_regular(hash_key);
1410 raw_iterator previous = prepare_bucket(hash_key);
1411 raw_iterator last = my_solist.raw_end();
1412 __TBB_ASSERT(previous != last, "Invalid head node");
1415 // Set new order_key to node
1416 pnode->init(order_key);
1419 // First node is a dummy node
1420 for (raw_iterator where = previous;;)
1423 if (where == last || solist_t::get_order_key(where) > order_key ||
1424 // if multimapped, stop at the first item equal to us.
1425 (allow_multimapping && solist_t::get_order_key(where) == order_key &&
1426 !my_hash_compare(get_key(*where), *pkey))) // TODO: fix negation
1429 pnode = my_solist.create_node(order_key, tbb::internal::forward<ValueType>(value), AllowCreate());
1430 // If the value was moved, the known reference to key might be invalid
1431 pkey = &get_key(pnode->my_element);
1434 // Try to insert 'pnode' between 'previous' and 'where'
1435 std::pair<iterator, bool> result = my_solist.try_insert(previous, where, pnode, &new_count);
1439 // Insertion succeeded, adjust the table size, if needed
1440 adjust_table_size(new_count, my_number_of_buckets);
1445 // Insertion failed: either the same node was inserted by another thread, or
1446 // another element was inserted at exactly the same place as this node.
1447 // Proceed with the search from the previous location where order key was
1448 // known to be larger (note: this is legal only because there is no safe
1449 // concurrent erase operation supported).
1454 else if (!allow_multimapping && solist_t::get_order_key(where) == order_key &&
1455 !my_hash_compare(get_key(*where), *pkey)) // TODO: fix negation
1456 { // Element already in the list, return it
1457 if (pnode && AllowDestroy::value)
1458 my_solist.destroy_node(pnode);
1459 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1461 // Move the iterator forward
1466 // Find the element in the split-ordered list
1467 iterator internal_find(const key_type& key)
1469 sokey_t hash_key = (sokey_t) my_hash_compare(key);
1470 sokey_t order_key = split_order_key_regular(hash_key);
1471 raw_iterator last = my_solist.raw_end();
1473 for (raw_iterator it = prepare_bucket(hash_key); it != last; ++it)
1475 if (solist_t::get_order_key(it) > order_key)
1477 // If the order key is smaller than the current order key, the element
1478 // is not in the hash.
1481 else if (solist_t::get_order_key(it) == order_key)
1483 // The fact that order keys match does not mean that the element is found.
1484 // Key function comparison has to be performed to check whether this is the
1485 // right element. If not, keep searching while order key is the same.
1486 if (!my_hash_compare(get_key(*it), key)) // TODO: fix negation
1487 return my_solist.get_iterator(it);
1494 // Erase an element from the list. This is not a concurrency safe function.
1495 iterator internal_erase(const_iterator it)
1497 sokey_t hash_key = (sokey_t) my_hash_compare(get_key(*it));
1498 raw_iterator previous = prepare_bucket(hash_key);
1499 raw_iterator last = my_solist.raw_end();
1500 __TBB_ASSERT(previous != last, "Invalid head node");
1502 // First node is a dummy node
1503 for (raw_iterator where = previous; where != last; previous = where) {
1505 if (my_solist.get_iterator(where) == it)
1506 return my_solist.erase_node(previous, it);
1511 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1512 std::pair<node_type, raw_iterator> internal_extract(const_iterator it) {
1513 sokey_t hash_key = sokey_t(my_hash_compare(get_key(*it)));
1514 raw_iterator previous = prepare_bucket(hash_key);
1515 raw_iterator last = my_solist.raw_end();
1516 __TBB_ASSERT(previous != last, "Invalid head node");
1518 for(raw_iterator where = previous; where != last; previous = where) {
1520 if (my_solist.get_iterator(where) == it) {
1521 const_iterator result = it;
1522 my_solist.erase_node(previous, it, /*allow_destroy*/tbb::internal::false_type());
1523 return std::pair<node_type, raw_iterator>( node_type(result.get_node_ptr()),
1527 return std::pair<node_type, iterator>(node_type(), end());
1529 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1531 // Return the [begin, end) pair of iterators with the same key values.
1532 // This operation makes sense only if mapping is many-to-one.
1533 pairii_t internal_equal_range(const key_type& key)
1535 sokey_t hash_key = (sokey_t) my_hash_compare(key);
1536 sokey_t order_key = split_order_key_regular(hash_key);
1537 raw_iterator end_it = my_solist.raw_end();
1539 for (raw_iterator it = prepare_bucket(hash_key); it != end_it; ++it)
1541 if (solist_t::get_order_key(it) > order_key)
1543 // There is no element with the given key
1544 return pairii_t(end(), end());
1546 else if (solist_t::get_order_key(it) == order_key &&
1547 !my_hash_compare(get_key(*it), key)) // TODO: fix negation; also below
1549 iterator first = my_solist.get_iterator(it);
1550 iterator last = first;
1551 do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
1552 return pairii_t(first, last);
1556 return pairii_t(end(), end());
1560 void init_bucket(size_type bucket)
1562 // Bucket 0 has no parent.
1563 __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
1565 size_type parent_bucket = get_parent(bucket);
1567 // All parent_bucket buckets have to be initialized before this bucket is
1568 if (!is_initialized(parent_bucket))
1569 init_bucket(parent_bucket);
1571 raw_iterator parent = get_bucket(parent_bucket);
1573 // Create a dummy first node in this bucket
1574 raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
1575 set_bucket(bucket, dummy_node);
1578 void adjust_table_size(size_type total_elements, size_type current_size)
1580 // Grow the table by a factor of 2 if possible and needed
1581 if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1583 // Double the size of the hash only if size has not changed in between loads
1584 my_number_of_buckets.compare_and_swap(2u*current_size, current_size);
1585 //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
1586 //due to overzealous compiler warnings in /Wp64 mode
1590 size_type get_parent(size_type bucket) const
1592 // Unsets bucket's most significant turned-on bit
1593 size_type msb = __TBB_Log2((uintptr_t)bucket);
1594 return bucket & ~(size_type(1) << msb);
1598 // Dynamic sized array (segments)
1599 //! @return segment index of given index in the array
1600 static size_type segment_index_of( size_type index ) {
1601 return size_type( __TBB_Log2( uintptr_t(index|1) ) );
1604 //! @return the first array index of given segment
1605 static size_type segment_base( size_type k ) {
1606 return (size_type(1)<<k & ~size_type(1));
1609 //! @return segment size
1610 static size_type segment_size( size_type k ) {
1611 return k? size_type(1)<<k : 2;
1614 raw_iterator get_bucket(size_type bucket) const {
1615 size_type segment = segment_index_of(bucket);
1616 bucket -= segment_base(segment);
1617 __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1618 return my_buckets[segment][bucket];
1621 raw_iterator prepare_bucket(sokey_t hash_key) {
1622 size_type bucket = hash_key % my_number_of_buckets;
1623 size_type segment = segment_index_of(bucket);
1624 size_type index = bucket - segment_base(segment);
1625 if (my_buckets[segment] == NULL || my_buckets[segment][index].get_node_ptr() == NULL)
1626 init_bucket(bucket);
1627 return my_buckets[segment][index];
1630 void set_bucket(size_type bucket, raw_iterator dummy_head) {
1631 size_type segment = segment_index_of(bucket);
1632 bucket -= segment_base(segment);
1634 if (my_buckets[segment] == NULL) {
1635 size_type sz = segment_size(segment);
1636 raw_iterator * new_segment = my_allocator.allocate(sz);
1637 std::memset(static_cast<void*>(new_segment), 0, sz*sizeof(raw_iterator));
1639 if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)
1640 my_allocator.deallocate(new_segment, sz);
1643 my_buckets[segment][bucket] = dummy_head;
1646 bool is_initialized(size_type bucket) const {
1647 size_type segment = segment_index_of(bucket);
1648 bucket -= segment_base(segment);
1650 if (my_buckets[segment] == NULL)
1653 raw_iterator it = my_buckets[segment][bucket];
1654 return (it.get_node_ptr() != NULL);
1657 // Utilities for keys
1659 // A regular order key has its original hash value reversed and the last bit set
1660 sokey_t split_order_key_regular(sokey_t order_key) const {
1661 return __TBB_ReverseBits(order_key) | 0x1;
1664 // A dummy order key has its original hash value reversed and the last bit unset
1665 sokey_t split_order_key_dummy(sokey_t order_key) const {
1666 return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);
1670 atomic<size_type> my_number_of_buckets; // Current table size
1671 solist_t my_solist; // List where all the elements are kept
1672 typename tbb::internal::allocator_rebind<allocator_type, raw_iterator>::type my_allocator; // Allocator object for segments
1673 float my_maximum_bucket_size; // Maximum size of the bucket
1674 atomic<raw_iterator*> my_buckets[pointers_per_table]; // The segment table
1676 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1677 #pragma warning(pop) // warning 4127 is back
1680 } // namespace internal
1682 } // namespace interface5
1684 #endif // __TBB__concurrent_unordered_impl_H