2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 David Levin <levin@chromium.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details.
13 * You should have received a copy of the GNU Library General Public License
14 * along with this library; see the file COPYING.LIB. If not, write to
15 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
16 * Boston, MA 02110-1301, USA.
20 #ifndef WTF_HashTable_h
21 #define WTF_HashTable_h
23 #include "wtf/Alignment.h"
24 #include "wtf/Assertions.h"
25 #include "wtf/DefaultAllocator.h"
26 #include "wtf/HashTraits.h"
29 #define DUMP_HASHTABLE_STATS 0
30 #define DUMP_HASHTABLE_STATS_PER_TABLE 0
32 #if DUMP_HASHTABLE_STATS_PER_TABLE
33 #include "wtf/DataLog.h"
36 #if DUMP_HASHTABLE_STATS
37 #if DUMP_HASHTABLE_STATS_PER_TABLE
38 #define UPDATE_PROBE_COUNTS() \
40 HashTableStats::recordCollisionAtCount(probeCount); \
41 ++perTableProbeCount; \
42 m_stats->recordCollisionAtCount(perTableProbeCount)
43 #define UPDATE_ACCESS_COUNTS() \
44 atomicIncrement(&HashTableStats::numAccesses); \
46 ++m_stats->numAccesses; \
47 int perTableProbeCount = 0
49 #define UPDATE_PROBE_COUNTS() \
51 HashTableStats::recordCollisionAtCount(probeCount)
52 #define UPDATE_ACCESS_COUNTS() \
53 atomicIncrement(&HashTableStats::numAccesses); \
57 #if DUMP_HASHTABLE_STATS_PER_TABLE
58 #define UPDATE_PROBE_COUNTS() \
59 ++perTableProbeCount; \
60 m_stats->recordCollisionAtCount(perTableProbeCount)
61 #define UPDATE_ACCESS_COUNTS() \
62 ++m_stats->numAccesses; \
63 int perTableProbeCount = 0
65 #define UPDATE_PROBE_COUNTS() do { } while (0)
66 #define UPDATE_ACCESS_COUNTS() do { } while (0)
72 #if DUMP_HASHTABLE_STATS
74 struct HashTableStats {
75 // The following variables are all atomically incremented when modified.
76 static int numAccesses;
77 static int numRehashes;
78 static int numRemoves;
79 static int numReinserts;
81 // The following variables are only modified in the recordCollisionAtCount method within a mutex.
82 static int maxCollisions;
83 static int numCollisions;
84 static int collisionGraph[4096];
86 static void recordCollisionAtCount(int count);
87 static void dumpStats();
92 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
94 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
95 class HashTableIterator;
96 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
97 class HashTableConstIterator;
98 template<typename Value, typename HashFunctions, typename HashTraits, typename Allocator>
100 template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z>
101 struct WeakProcessingHashTableHelper;
103 typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
105 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
106 class HashTableConstIterator {
108 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
109 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
110 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
111 typedef Value ValueType;
112 typedef typename Traits::IteratorConstGetType GetType;
113 typedef const ValueType* PointerType;
115 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
116 friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
118 void skipEmptyBuckets()
120 while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBucket(*m_position))
124 HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container)
125 : m_position(position)
126 , m_endPosition(endPosition)
128 , m_container(container)
129 , m_containerModifications(container->modifications())
135 HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container, HashItemKnownGoodTag)
136 : m_position(position)
137 , m_endPosition(endPosition)
139 , m_container(container)
140 , m_containerModifications(container->modifications())
143 ASSERT(m_containerModifications == m_container->modifications());
146 void checkModifications() const
148 // HashTable and collections that build on it do not support
149 // modifications while there is an iterator in use. The exception
150 // is ListHashSet, which has its own iterators that tolerate
151 // modification of the underlying set.
152 ASSERT(m_containerModifications == m_container->modifications());
156 HashTableConstIterator()
162 checkModifications();
165 typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
166 GetType operator->() const { return get(); }
168 const_iterator& operator++()
170 ASSERT(m_position != m_endPosition);
171 checkModifications();
177 // postfix ++ intentionally omitted
180 bool operator==(const const_iterator& other) const
182 return m_position == other.m_position;
184 bool operator!=(const const_iterator& other) const
186 return m_position != other.m_position;
188 bool operator==(const iterator& other) const
190 return *this == static_cast<const_iterator>(other);
192 bool operator!=(const iterator& other) const
194 return *this != static_cast<const_iterator>(other);
198 PointerType m_position;
199 PointerType m_endPosition;
201 const HashTableType* m_container;
202 int64_t m_containerModifications;
206 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
207 class HashTableIterator {
209 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
210 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
211 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
212 typedef Value ValueType;
213 typedef typename Traits::IteratorGetType GetType;
214 typedef ValueType* PointerType;
216 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
218 HashTableIterator(PointerType pos, PointerType end, const HashTableType* container) : m_iterator(pos, end, container) { }
219 HashTableIterator(PointerType pos, PointerType end, const HashTableType* container, HashItemKnownGoodTag tag) : m_iterator(pos, end, container, tag) { }
222 HashTableIterator() { }
224 // default copy, assignment and destructor are OK
226 GetType get() const { return const_cast<GetType>(m_iterator.get()); }
227 typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
228 GetType operator->() const { return get(); }
230 iterator& operator++() { ++m_iterator; return *this; }
232 // postfix ++ intentionally omitted
235 bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; }
236 bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; }
237 bool operator==(const const_iterator& other) const { return m_iterator == other; }
238 bool operator!=(const const_iterator& other) const { return m_iterator != other; }
240 operator const_iterator() const { return m_iterator; }
243 const_iterator m_iterator;
248 // Work around MSVC's standard library, whose swap for pairs does not swap by component.
249 template<typename T> inline void hashTableSwap(T& a, T& b)
254 template<typename T, typename U> inline void hashTableSwap(KeyValuePair<T, U>& a, KeyValuePair<T, U>& b)
257 swap(a.value, b.value);
260 template<typename T, bool useSwap> struct Mover;
261 template<typename T> struct Mover<T, true> { static void move(T& from, T& to) { hashTableSwap(from, to); } };
262 template<typename T> struct Mover<T, false> { static void move(T& from, T& to) { to = from; } };
264 template<typename HashFunctions> class IdentityHashTranslator {
266 template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
267 template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
268 template<typename T, typename U, typename V> static void translate(T& location, const U&, const V& value) { location = value; }
271 template<typename HashTableType, typename ValueType> struct HashTableAddResult {
272 HashTableAddResult(const HashTableType* container, ValueType* storedValue, bool isNewEntry)
273 : storedValue(storedValue)
274 , isNewEntry(isNewEntry)
275 #if ENABLE(SECURITY_ASSERT)
276 , m_container(container)
277 , m_containerModifications(container->modifications())
280 ASSERT_UNUSED(container, container);
283 ~HashTableAddResult()
285 // If rehash happened before accessing storedValue, it's
286 // use-after-free. Any modification may cause a rehash, so we check
287 // for modifications here.
288 // Rehash after accessing storedValue is harmless but will assert if
289 // the AddResult destructor takes place after a modification. You
290 // may need to limit the scope of the AddResult.
291 ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications == m_container->modifications());
294 ValueType* storedValue;
297 #if ENABLE(SECURITY_ASSERT)
299 const HashTableType* m_container;
300 const int64_t m_containerModifications;
304 template<typename Value, typename Extractor, typename KeyTraits>
305 struct HashTableHelper {
306 static bool isEmptyBucket(const Value& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
307 static bool isDeletedBucket(const Value& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
308 static bool isEmptyOrDeletedBucket(const Value& value) { return isEmptyBucket(value) || isDeletedBucket(value); }
311 template<typename HashTranslator, typename KeyTraits, bool safeToCompareToEmptyOrDeleted>
312 struct HashTableKeyChecker {
313 // There's no simple generic way to make this check if safeToCompareToEmptyOrDeleted is false,
314 // so the check always passes.
315 template <typename T>
316 static bool checkKey(const T&) { return true; }
319 template<typename HashTranslator, typename KeyTraits>
320 struct HashTableKeyChecker<HashTranslator, KeyTraits, true> {
321 template <typename T>
322 static bool checkKey(const T& key)
324 // FIXME : Check also equality to the deleted value.
325 return !HashTranslator::equal(KeyTraits::emptyValue(), key);
329 // Don't declare a destructor for HeapAllocated hash tables.
330 template<typename Derived, bool isGarbageCollected>
331 class HashTableDestructorBase;
333 template<typename Derived>
334 class HashTableDestructorBase<Derived, true> { };
336 template<typename Derived>
337 class HashTableDestructorBase<Derived, false> {
339 ~HashTableDestructorBase() { static_cast<Derived*>(this)->finalize(); }
342 // Note: empty or deleted key values are not allowed, using them may lead to undefined behavior.
343 // For pointer keys this means that null pointers are not allowed unless you supply custom key traits.
344 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
345 class HashTable : public HashTableDestructorBase<HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>, Allocator::isGarbageCollected> {
347 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
348 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
349 typedef Traits ValueTraits;
351 typedef typename KeyTraits::PeekInType KeyPeekInType;
352 typedef typename KeyTraits::PassInType KeyPassInType;
353 typedef Value ValueType;
354 typedef Extractor ExtractorType;
355 typedef KeyTraits KeyTraitsType;
356 typedef typename Traits::PassInType ValuePassInType;
357 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
358 typedef HashTableAddResult<HashTable, ValueType> AddResult;
360 #if DUMP_HASHTABLE_STATS_PER_TABLE
380 int collisionGraph[4096];
382 void recordCollisionAtCount(int count)
384 if (count > maxCollisions)
385 maxCollisions = count;
387 collisionGraph[count]++;
392 dataLogF("\nWTF::HashTable::Stats dump\n\n");
393 dataLogF("%d accesses\n", numAccesses);
394 dataLogF("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
395 dataLogF("longest collision chain: %d\n", maxCollisions);
396 for (int i = 1; i <= maxCollisions; i++) {
397 dataLogF(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
399 dataLogF("%d rehashes\n", numRehashes);
400 dataLogF("%d reinserts\n", numReinserts);
408 ASSERT(!Allocator::isGarbageCollected);
409 if (LIKELY(!m_table))
411 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
415 HashTable(const HashTable&);
416 void swap(HashTable&);
417 HashTable& operator=(const HashTable&);
419 // When the hash table is empty, just return the same iterator for end as for begin.
420 // This is more efficient because we don't have to skip all the empty and deleted
421 // buckets, and iterating an empty table is a common case that's worth optimizing.
422 iterator begin() { return isEmpty() ? end() : makeIterator(m_table); }
423 iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); }
424 const_iterator begin() const { return isEmpty() ? end() : makeConstIterator(m_table); }
425 const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tableSize); }
427 unsigned size() const { return m_keyCount; }
428 unsigned capacity() const { return m_tableSize; }
429 bool isEmpty() const { return !m_keyCount; }
431 AddResult add(ValuePassInType value)
433 return add<IdentityTranslatorType>(Extractor::extract(value), value);
436 // A special version of add() that finds the object by hashing and comparing
437 // with some other type, to avoid the cost of type conversion if the object is already
439 template<typename HashTranslator, typename T, typename Extra> AddResult add(const T& key, const Extra&);
440 template<typename HashTranslator, typename T, typename Extra> AddResult addPassingHashCode(const T& key, const Extra&);
442 iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); }
443 const_iterator find(KeyPeekInType key) const { return find<IdentityTranslatorType>(key); }
444 bool contains(KeyPeekInType key) const { return contains<IdentityTranslatorType>(key); }
446 template<typename HashTranslator, typename T> iterator find(const T&);
447 template<typename HashTranslator, typename T> const_iterator find(const T&) const;
448 template<typename HashTranslator, typename T> bool contains(const T&) const;
450 void remove(KeyPeekInType);
451 void remove(iterator);
452 void remove(const_iterator);
455 static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
456 static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
457 static bool isEmptyOrDeletedBucket(const ValueType& value) { return HashTableHelper<ValueType, Extractor, KeyTraits>:: isEmptyOrDeletedBucket(value); }
459 ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType, KeyPeekInType>(key); }
460 template<typename HashTranslator, typename T> ValueType* lookup(T);
461 template<typename HashTranslator, typename T> const ValueType* lookup(T) const;
463 void trace(typename Allocator::Visitor*);
466 int64_t modifications() const { return m_modifications; }
467 void registerModification() { m_modifications++; }
468 // HashTable and collections that build on it do not support
469 // modifications while there is an iterator in use. The exception is
470 // ListHashSet, which has its own iterators that tolerate modification
471 // of the underlying set.
472 void checkModifications(int64_t mods) const { ASSERT(mods == m_modifications); }
474 int64_t modifications() const { return 0; }
475 void registerModification() { }
476 void checkModifications(int64_t mods) const { }
480 static ValueType* allocateTable(unsigned size);
481 static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
483 typedef std::pair<ValueType*, bool> LookupType;
484 typedef std::pair<LookupType, unsigned> FullLookupType;
486 LookupType lookupForWriting(const Key& key) { return lookupForWriting<IdentityTranslatorType>(key); };
487 template<typename HashTranslator, typename T> FullLookupType fullLookupForWriting(const T&);
488 template<typename HashTranslator, typename T> LookupType lookupForWriting(const T&);
490 void remove(ValueType*);
492 bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; }
493 bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; }
494 bool shouldShrink() const
496 // isAllocationAllowed check should be at the last because it's
498 return m_keyCount * m_minLoad < m_tableSize
499 && m_tableSize > KeyTraits::minimumTableSize
500 && Allocator::isAllocationAllowed();
502 ValueType* expand(ValueType* entry = 0);
503 void shrink() { rehash(m_tableSize / 2, 0); }
505 ValueType* rehash(unsigned newTableSize, ValueType* entry);
506 ValueType* reinsert(ValueType&);
508 static void initializeBucket(ValueType& bucket);
509 static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket); }
511 FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash)
512 { return FullLookupType(LookupType(position, found), hash); }
514 iterator makeIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this); }
515 const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this); }
516 iterator makeKnownGoodIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
517 const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
519 static const unsigned m_maxLoad = 2;
520 static const unsigned m_minLoad = 6;
522 unsigned tableSizeMask() const
524 size_t mask = m_tableSize - 1;
525 ASSERT((mask & m_tableSize) == 0);
529 void setEnqueued() { m_queueFlag = true; }
530 void clearEnqueued() { m_queueFlag = false; }
531 bool enqueued() { return m_queueFlag; }
534 unsigned m_tableSize;
536 unsigned m_deletedCount:31;
539 unsigned m_modifications;
542 #if DUMP_HASHTABLE_STATS_PER_TABLE
544 mutable OwnPtr<Stats> m_stats;
547 template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z> friend struct WeakProcessingHashTableHelper;
548 template<typename T, typename U, typename V, typename W> friend class LinkedHashSet;
551 // Set all the bits to one after the most significant bit: 00110101010 -> 00111111111.
552 template<unsigned size> struct OneifyLowBits;
554 struct OneifyLowBits<0> {
555 static const unsigned value = 0;
557 template<unsigned number>
558 struct OneifyLowBits {
559 static const unsigned value = number | OneifyLowBits<(number >> 1)>::value;
561 // Compute the first power of two integer that is an upper bound of the parameter 'number'.
562 template<unsigned number>
563 struct UpperPowerOfTwoBound {
564 static const unsigned value = (OneifyLowBits<number - 1>::value + 1) * 2;
567 // Because power of two numbers are the limit of maxLoad, their capacity is twice the
568 // UpperPowerOfTwoBound, or 4 times their values.
569 template<unsigned size, bool isPowerOfTwo> struct HashTableCapacityForSizeSplitter;
570 template<unsigned size>
571 struct HashTableCapacityForSizeSplitter<size, true> {
572 static const unsigned value = size * 4;
574 template<unsigned size>
575 struct HashTableCapacityForSizeSplitter<size, false> {
576 static const unsigned value = UpperPowerOfTwoBound<size>::value;
579 // HashTableCapacityForSize computes the upper power of two capacity to hold the size parameter.
580 // This is done at compile time to initialize the HashTraits.
581 template<unsigned size>
582 struct HashTableCapacityForSize {
583 static const unsigned value = HashTableCapacityForSizeSplitter<size, !(size & (size - 1))>::value;
584 COMPILE_ASSERT(size > 0, HashTableNonZeroMinimumCapacity);
585 COMPILE_ASSERT(!static_cast<int>(value >> 31), HashTableNoCapacityOverflow);
586 COMPILE_ASSERT(value > (2 * size), HashTableCapacityHoldsContentSize);
589 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
590 inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable()
599 #if DUMP_HASHTABLE_STATS_PER_TABLE
600 , m_stats(adoptPtr(new Stats))
605 inline unsigned doubleHash(unsigned key)
607 key = ~key + (key >> 23);
615 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
616 template<typename HashTranslator, typename T>
617 inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key)
619 return const_cast<Value*>(const_cast<const HashTable*>(this)->lookup<HashTranslator, T>(key));
622 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
623 template<typename HashTranslator, typename T>
624 inline const Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key) const
626 ASSERT((HashTableKeyChecker<HashTranslator, KeyTraits, HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key)));
627 const ValueType* table = m_table;
632 size_t sizeMask = tableSizeMask();
633 unsigned h = HashTranslator::hash(key);
634 size_t i = h & sizeMask;
636 UPDATE_ACCESS_COUNTS();
639 const ValueType* entry = table + i;
641 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
642 if (HashTranslator::equal(Extractor::extract(*entry), key))
645 if (isEmptyBucket(*entry))
648 if (isEmptyBucket(*entry))
651 if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(*entry), key))
654 UPDATE_PROBE_COUNTS();
656 k = 1 | doubleHash(h);
657 i = (i + k) & sizeMask;
661 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
662 template<typename HashTranslator, typename T>
663 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookupForWriting(const T& key)
666 registerModification();
668 ValueType* table = m_table;
670 size_t sizeMask = tableSizeMask();
671 unsigned h = HashTranslator::hash(key);
672 size_t i = h & sizeMask;
674 UPDATE_ACCESS_COUNTS();
676 ValueType* deletedEntry = 0;
679 ValueType* entry = table + i;
681 if (isEmptyBucket(*entry))
682 return LookupType(deletedEntry ? deletedEntry : entry, false);
684 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
685 if (HashTranslator::equal(Extractor::extract(*entry), key))
686 return LookupType(entry, true);
688 if (isDeletedBucket(*entry))
689 deletedEntry = entry;
691 if (isDeletedBucket(*entry))
692 deletedEntry = entry;
693 else if (HashTranslator::equal(Extractor::extract(*entry), key))
694 return LookupType(entry, true);
696 UPDATE_PROBE_COUNTS();
698 k = 1 | doubleHash(h);
699 i = (i + k) & sizeMask;
703 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
704 template<typename HashTranslator, typename T>
705 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::fullLookupForWriting(const T& key)
708 registerModification();
710 ValueType* table = m_table;
712 size_t sizeMask = tableSizeMask();
713 unsigned h = HashTranslator::hash(key);
714 size_t i = h & sizeMask;
716 UPDATE_ACCESS_COUNTS();
718 ValueType* deletedEntry = 0;
721 ValueType* entry = table + i;
723 if (isEmptyBucket(*entry))
724 return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
726 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
727 if (HashTranslator::equal(Extractor::extract(*entry), key))
728 return makeLookupResult(entry, true, h);
730 if (isDeletedBucket(*entry))
731 deletedEntry = entry;
733 if (isDeletedBucket(*entry))
734 deletedEntry = entry;
735 else if (HashTranslator::equal(Extractor::extract(*entry), key))
736 return makeLookupResult(entry, true, h);
738 UPDATE_PROBE_COUNTS();
740 k = 1 | doubleHash(h);
741 i = (i + k) & sizeMask;
745 template<bool emptyValueIsZero> struct HashTableBucketInitializer;
747 template<> struct HashTableBucketInitializer<false> {
748 template<typename Traits, typename Value> static void initialize(Value& bucket)
750 new (NotNull, &bucket) Value(Traits::emptyValue());
754 template<> struct HashTableBucketInitializer<true> {
755 template<typename Traits, typename Value> static void initialize(Value& bucket)
757 // This initializes the bucket without copying the empty value.
758 // That makes it possible to use this with types that don't support copying.
759 // The memset to 0 looks like a slow operation but is optimized by the compilers.
760 memset(&bucket, 0, sizeof(bucket));
764 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
765 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::initializeBucket(ValueType& bucket)
767 HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Traits>(bucket);
770 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
771 template<typename HashTranslator, typename T, typename Extra>
772 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::add(const T& key, const Extra& extra)
774 ASSERT(Allocator::isAllocationAllowed());
780 ValueType* table = m_table;
782 size_t sizeMask = tableSizeMask();
783 unsigned h = HashTranslator::hash(key);
784 size_t i = h & sizeMask;
786 UPDATE_ACCESS_COUNTS();
788 ValueType* deletedEntry = 0;
793 if (isEmptyBucket(*entry))
796 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
797 if (HashTranslator::equal(Extractor::extract(*entry), key))
798 return AddResult(this, entry, false);
800 if (isDeletedBucket(*entry))
801 deletedEntry = entry;
803 if (isDeletedBucket(*entry))
804 deletedEntry = entry;
805 else if (HashTranslator::equal(Extractor::extract(*entry), key))
806 return AddResult(this, entry, false);
808 UPDATE_PROBE_COUNTS();
810 k = 1 | doubleHash(h);
811 i = (i + k) & sizeMask;
814 registerModification();
817 initializeBucket(*deletedEntry);
818 entry = deletedEntry;
822 HashTranslator::translate(*entry, key, extra);
823 ASSERT(!isEmptyOrDeletedBucket(*entry));
828 entry = expand(entry);
830 return AddResult(this, entry, true);
833 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
834 template<typename HashTranslator, typename T, typename Extra>
835 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::addPassingHashCode(const T& key, const Extra& extra)
837 ASSERT(Allocator::isAllocationAllowed());
841 FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
843 ValueType* entry = lookupResult.first.first;
844 bool found = lookupResult.first.second;
845 unsigned h = lookupResult.second;
848 return AddResult(this, entry, false);
850 registerModification();
852 if (isDeletedBucket(*entry)) {
853 initializeBucket(*entry);
857 HashTranslator::translate(*entry, key, extra, h);
858 ASSERT(!isEmptyOrDeletedBucket(*entry));
862 entry = expand(entry);
864 return AddResult(this, entry, true);
867 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
868 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::reinsert(ValueType& entry)
871 registerModification();
872 ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
873 ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first)));
874 #if DUMP_HASHTABLE_STATS
875 atomicIncrement(&HashTableStats::numReinserts);
877 #if DUMP_HASHTABLE_STATS_PER_TABLE
878 ++m_stats->numReinserts;
880 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
881 Mover<ValueType, Traits::needsDestruction>::move(entry, *newEntry);
886 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
887 template <typename HashTranslator, typename T>
888 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key)
890 ValueType* entry = lookup<HashTranslator>(key);
894 return makeKnownGoodIterator(entry);
897 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
898 template <typename HashTranslator, typename T>
899 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key) const
901 ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
905 return makeKnownGoodConstIterator(entry);
908 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
909 template <typename HashTranslator, typename T>
910 bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::contains(const T& key) const
912 return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
915 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
916 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(ValueType* pos)
918 registerModification();
919 #if DUMP_HASHTABLE_STATS
920 atomicIncrement(&HashTableStats::numRemoves);
922 #if DUMP_HASHTABLE_STATS_PER_TABLE
923 ++m_stats->numRemoves;
934 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
935 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(iterator it)
940 remove(const_cast<ValueType*>(it.m_iterator.m_position));
943 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
944 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(const_iterator it)
949 remove(const_cast<ValueType*>(it.m_position));
952 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
953 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(KeyPeekInType key)
958 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
959 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::allocateTable(unsigned size)
961 typedef typename Allocator::template HashTableBackingHelper<HashTable>::Type HashTableBacking;
963 size_t allocSize = size * sizeof(ValueType);
965 COMPILE_ASSERT(!Traits::emptyValueIsZero || !IsPolymorphic<ValueType>::value, EmptyValueCannotBeZeroForThingsWithAVtable);
966 if (Traits::emptyValueIsZero) {
967 result = Allocator::template zeroedBackingMalloc<ValueType*, HashTableBacking>(allocSize);
969 result = Allocator::template backingMalloc<ValueType*, HashTableBacking>(allocSize);
970 for (unsigned i = 0; i < size; i++)
971 initializeBucket(result[i]);
976 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
977 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::deleteAllBucketsAndDeallocate(ValueType* table, unsigned size)
979 if (Traits::needsDestruction) {
980 for (unsigned i = 0; i < size; ++i) {
981 // This code is called when the hash table is cleared or
982 // resized. We have allocated a new backing store and we need
983 // to run the destructors on the old backing store, as it is
984 // being freed. If we are GCing we need to both call the
985 // destructor and mark the bucket as deleted, otherwise the
986 // destructor gets called again when the GC finds the backing
987 // store. With the default allocator it's enough to call the
988 // destructor, since we will free the memory explicitly and
989 // we won't see the memory with the bucket again.
990 if (!isEmptyOrDeletedBucket(table[i])) {
991 if (Allocator::isGarbageCollected)
992 deleteBucket(table[i]);
994 table[i].~ValueType();
998 Allocator::backingFree(table);
1001 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1002 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::expand(Value* entry)
1006 newSize = KeyTraits::minimumTableSize;
1007 } else if (mustRehashInPlace()) {
1008 newSize = m_tableSize;
1010 newSize = m_tableSize * 2;
1011 RELEASE_ASSERT(newSize > m_tableSize);
1014 return rehash(newSize, entry);
1017 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1018 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::rehash(unsigned newTableSize, Value* entry)
1020 unsigned oldTableSize = m_tableSize;
1021 ValueType* oldTable = m_table;
1023 #if DUMP_HASHTABLE_STATS
1024 if (oldTableSize != 0)
1025 atomicIncrement(&HashTableStats::numRehashes);
1028 #if DUMP_HASHTABLE_STATS_PER_TABLE
1029 if (oldTableSize != 0)
1030 ++m_stats->numRehashes;
1033 m_table = allocateTable(newTableSize);
1034 m_tableSize = newTableSize;
1036 Value* newEntry = 0;
1037 for (unsigned i = 0; i != oldTableSize; ++i) {
1038 if (isEmptyOrDeletedBucket(oldTable[i])) {
1039 ASSERT(&oldTable[i] != entry);
1043 Value* reinsertedEntry = reinsert(oldTable[i]);
1044 if (&oldTable[i] == entry) {
1046 newEntry = reinsertedEntry;
1052 deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1057 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1058 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::clear()
1060 registerModification();
1064 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1070 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1071 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable(const HashTable& other)
1076 , m_queueFlag(false)
1078 , m_modifications(0)
1080 #if DUMP_HASHTABLE_STATS_PER_TABLE
1081 , m_stats(adoptPtr(new Stats(*other.m_stats)))
1084 // Copy the hash table the dumb way, by adding each element to the new table.
1085 // It might be more efficient to copy the table slots, but it's not clear that efficiency is needed.
1086 const_iterator end = other.end();
1087 for (const_iterator it = other.begin(); it != end; ++it)
1091 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1092 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::swap(HashTable& other)
1094 std::swap(m_table, other.m_table);
1095 std::swap(m_tableSize, other.m_tableSize);
1096 std::swap(m_keyCount, other.m_keyCount);
1097 // std::swap does not work for bit fields.
1098 unsigned deleted = m_deletedCount;
1099 m_deletedCount = other.m_deletedCount;
1100 other.m_deletedCount = deleted;
1101 ASSERT(!m_queueFlag);
1102 ASSERT(!other.m_queueFlag);
1105 std::swap(m_modifications, other.m_modifications);
1108 #if DUMP_HASHTABLE_STATS_PER_TABLE
1109 m_stats.swap(other.m_stats);
1113 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1114 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::operator=(const HashTable& other)
1116 HashTable tmp(other);
1121 template<WeakHandlingFlag weakHandlingFlag, typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1122 struct WeakProcessingHashTableHelper;
1124 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1125 struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
1126 static void process(typename Allocator::Visitor* visitor, void* closure) { }
1127 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure) { }
1128 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure) { }
1131 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1132 struct WeakProcessingHashTableHelper<WeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
1133 // Used for purely weak and for weak-and-strong tables (ephemerons).
1134 static void process(typename Allocator::Visitor* visitor, void* closure)
1136 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1137 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1138 if (table->m_table) {
1139 // This just marks it live and does not push anything onto the
1141 Allocator::markNoTracing(visitor, table->m_table);
1142 // Now perform weak processing (this is a no-op if the backing
1143 // was accessible through an iterator and was already marked
1145 typedef typename HashTableType::ValueType ValueType;
1146 for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
1147 if (!HashTableType::isEmptyOrDeletedBucket(*element)) {
1148 // At this stage calling trace can make no difference
1149 // (everything is already traced), but we use the
1150 // return value to remove things from the collection.
1151 if (TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element)) {
1152 table->registerModification();
1153 HashTableType::deleteBucket(*element); // Also calls the destructor.
1154 table->m_deletedCount++;
1155 table->m_keyCount--;
1156 // We don't rehash the backing until the next add
1157 // or delete, because that would cause allocation
1165 // Called repeatedly for tables that have both weak and strong pointers.
1166 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure)
1168 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1169 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1170 if (table->m_table) {
1171 // Check the hash table for elements that we now know will not
1172 // be removed by weak processing. Those elements need to have
1173 // their strong pointers traced.
1174 typedef typename HashTableType::ValueType ValueType;
1175 for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
1176 if (!HashTableType::isEmptyOrDeletedBucket(*element))
1177 TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element);
1182 // Called when the ephemeron iteration is done and before running the per thread
1183 // weak processing. It is guaranteed to be called before any thread is resumed.
1184 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure)
1186 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1187 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1188 ASSERT(Allocator::weakTableRegistered(visitor, table));
1189 table->clearEnqueued();
1193 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1194 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::trace(typename Allocator::Visitor* visitor)
1196 // If someone else already marked the backing and queued up the trace
1197 // and/or weak callback then we are done. This optimization does not
1198 // happen for ListHashSet since its iterator does not point at the
1200 if (!m_table || visitor->isAlive(m_table))
1202 // Normally, we mark the backing store without performing trace. This
1203 // means it is marked live, but the pointers inside it are not marked.
1204 // Instead we will mark the pointers below. However, for backing
1205 // stores that contain weak pointers the handling is rather different.
1206 // We don't mark the backing store here, so the marking GC will leave
1207 // the backing unmarked. If the backing is found in any other way than
1208 // through its HashTable (ie from an iterator) then the mark bit will
1209 // be set and the pointers will be marked strongly, avoiding problems
1210 // with iterating over things that disappear due to weak processing
1211 // while we are iterating over them. The weakProcessing callback will
1212 // mark the backing as a void pointer, and will perform weak processing
1214 if (Traits::weakHandlingFlag == NoWeakHandlingInCollections)
1215 Allocator::markNoTracing(visitor, m_table);
1217 Allocator::registerWeakMembers(visitor, this, m_table, WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::process);
1218 if (ShouldBeTraced<Traits>::value) {
1219 if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
1220 // If we have both strong and weak pointers in the collection
1221 // then we queue up the collection for fixed point iteration a
1223 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
1224 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
1225 ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this));
1227 Allocator::registerWeakTable(visitor, this,
1228 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIteration,
1229 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterationDone);
1232 // We don't need to trace the elements here, since registering
1233 // as a weak table above will cause them to be traced (perhaps
1234 // several times). It's better to wait until everything else is
1235 // traced before tracing the elements for the first time; this
1236 // may reduce (by one) the number of iterations needed to get
1237 // to a fixed point.
1240 for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; element--) {
1241 if (!isEmptyOrDeletedBucket(*element))
1242 Allocator::template trace<ValueType, Traits>(visitor, *element);
1247 // iterator adapters
1249 template<typename HashTableType, typename Traits> struct HashTableConstIteratorAdapter {
1250 HashTableConstIteratorAdapter() {}
1251 HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {}
1252 typedef typename Traits::IteratorConstGetType GetType;
1253 typedef typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType;
1255 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1256 typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
1257 GetType operator->() const { return get(); }
1259 HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; }
1260 // postfix ++ intentionally omitted
1262 typename HashTableType::const_iterator m_impl;
1265 template<typename HashTableType, typename Traits> struct HashTableIteratorAdapter {
1266 typedef typename Traits::IteratorGetType GetType;
1267 typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType;
1269 HashTableIteratorAdapter() {}
1270 HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {}
1272 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1273 typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
1274 GetType operator->() const { return get(); }
1276 HashTableIteratorAdapter& operator++() { ++m_impl; return *this; }
1277 // postfix ++ intentionally omitted
1279 operator HashTableConstIteratorAdapter<HashTableType, Traits>()
1281 typename HashTableType::const_iterator i = m_impl;
1285 typename HashTableType::iterator m_impl;
1288 template<typename T, typename U>
1289 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1291 return a.m_impl == b.m_impl;
1294 template<typename T, typename U>
1295 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1297 return a.m_impl != b.m_impl;
1300 template<typename T, typename U>
1301 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1303 return a.m_impl == b.m_impl;
1306 template<typename T, typename U>
1307 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1309 return a.m_impl != b.m_impl;
1312 // All 4 combinations of ==, != and Const,non const.
1313 template<typename T, typename U>
1314 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1316 return a.m_impl == b.m_impl;
1319 template<typename T, typename U>
1320 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1322 return a.m_impl != b.m_impl;
1325 template<typename T, typename U>
1326 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1328 return a.m_impl == b.m_impl;
1331 template<typename T, typename U>
1332 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1334 return a.m_impl != b.m_impl;
1337 template<typename Collection1, typename Collection2>
1338 inline void removeAll(Collection1& collection, const Collection2& toBeRemoved)
1340 if (collection.isEmpty() || toBeRemoved.isEmpty())
1342 typedef typename Collection2::const_iterator CollectionIterator;
1343 CollectionIterator end(toBeRemoved.end());
1344 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it)
1345 collection.remove(*it);
1350 #include "wtf/HashIterators.h"
1352 #endif // WTF_HashTable_h