2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 David Levin <levin@chromium.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details.
13 * You should have received a copy of the GNU Library General Public License
14 * along with this library; see the file COPYING.LIB. If not, write to
15 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
16 * Boston, MA 02110-1301, USA.
20 #ifndef WTF_HashTable_h
21 #define WTF_HashTable_h
23 #include "wtf/Alignment.h"
24 #include "wtf/Assertions.h"
25 #include "wtf/DefaultAllocator.h"
26 #include "wtf/HashTraits.h"
29 #define DUMP_HASHTABLE_STATS 0
30 #define DUMP_HASHTABLE_STATS_PER_TABLE 0
32 #if DUMP_HASHTABLE_STATS_PER_TABLE
33 #include "wtf/DataLog.h"
36 #if DUMP_HASHTABLE_STATS
37 #if DUMP_HASHTABLE_STATS_PER_TABLE
38 #define UPDATE_PROBE_COUNTS() \
40 HashTableStats::recordCollisionAtCount(probeCount); \
41 ++perTableProbeCount; \
42 m_stats->recordCollisionAtCount(perTableProbeCount)
43 #define UPDATE_ACCESS_COUNTS() \
44 atomicIncrement(&HashTableStats::numAccesses); \
46 ++m_stats->numAccesses; \
47 int perTableProbeCount = 0
49 #define UPDATE_PROBE_COUNTS() \
51 HashTableStats::recordCollisionAtCount(probeCount)
52 #define UPDATE_ACCESS_COUNTS() \
53 atomicIncrement(&HashTableStats::numAccesses); \
57 #if DUMP_HASHTABLE_STATS_PER_TABLE
58 #define UPDATE_PROBE_COUNTS() \
59 ++perTableProbeCount; \
60 m_stats->recordCollisionAtCount(perTableProbeCount)
61 #define UPDATE_ACCESS_COUNTS() \
62 ++m_stats->numAccesses; \
63 int perTableProbeCount = 0
65 #define UPDATE_PROBE_COUNTS() do { } while (0)
66 #define UPDATE_ACCESS_COUNTS() do { } while (0)
72 #if DUMP_HASHTABLE_STATS
74 struct HashTableStats {
75 // The following variables are all atomically incremented when modified.
76 static int numAccesses;
77 static int numRehashes;
78 static int numRemoves;
79 static int numReinserts;
81 // The following variables are only modified in the recordCollisionAtCount method within a mutex.
82 static int maxCollisions;
83 static int numCollisions;
84 static int collisionGraph[4096];
86 static void recordCollisionAtCount(int count);
87 static void dumpStats();
92 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
94 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
95 class HashTableIterator;
96 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
97 class HashTableConstIterator;
98 template<typename Value, typename HashFunctions, typename HashTraits, typename Allocator>
100 template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z>
101 struct WeakProcessingHashTableHelper;
103 typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
105 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
106 class HashTableConstIterator {
108 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
109 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
110 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
111 typedef Value ValueType;
112 typedef typename Traits::IteratorConstGetType GetType;
113 typedef const ValueType* PointerType;
115 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
116 friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
118 void skipEmptyBuckets()
120 while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBucket(*m_position))
124 HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container)
125 : m_position(position)
126 , m_endPosition(endPosition)
128 , m_container(container)
129 , m_containerModifications(container->modifications())
135 HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container, HashItemKnownGoodTag)
136 : m_position(position)
137 , m_endPosition(endPosition)
139 , m_container(container)
140 , m_containerModifications(container->modifications())
143 ASSERT(m_containerModifications == m_container->modifications());
146 void checkModifications() const
148 // HashTable and collections that build on it do not support
149 // modifications while there is an iterator in use. The exception
150 // is ListHashSet, which has its own iterators that tolerate
151 // modification of the underlying set.
152 ASSERT(m_containerModifications == m_container->modifications());
156 HashTableConstIterator()
162 checkModifications();
165 typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
166 GetType operator->() const { return get(); }
168 const_iterator& operator++()
170 ASSERT(m_position != m_endPosition);
171 checkModifications();
177 // postfix ++ intentionally omitted
180 bool operator==(const const_iterator& other) const
182 return m_position == other.m_position;
184 bool operator!=(const const_iterator& other) const
186 return m_position != other.m_position;
188 bool operator==(const iterator& other) const
190 return *this == static_cast<const_iterator>(other);
192 bool operator!=(const iterator& other) const
194 return *this != static_cast<const_iterator>(other);
198 PointerType m_position;
199 PointerType m_endPosition;
201 const HashTableType* m_container;
202 int64_t m_containerModifications;
206 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
207 class HashTableIterator {
209 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
210 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
211 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
212 typedef Value ValueType;
213 typedef typename Traits::IteratorGetType GetType;
214 typedef ValueType* PointerType;
216 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
218 HashTableIterator(PointerType pos, PointerType end, const HashTableType* container) : m_iterator(pos, end, container) { }
219 HashTableIterator(PointerType pos, PointerType end, const HashTableType* container, HashItemKnownGoodTag tag) : m_iterator(pos, end, container, tag) { }
222 HashTableIterator() { }
224 // default copy, assignment and destructor are OK
226 GetType get() const { return const_cast<GetType>(m_iterator.get()); }
227 typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
228 GetType operator->() const { return get(); }
230 iterator& operator++() { ++m_iterator; return *this; }
232 // postfix ++ intentionally omitted
235 bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; }
236 bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; }
237 bool operator==(const const_iterator& other) const { return m_iterator == other; }
238 bool operator!=(const const_iterator& other) const { return m_iterator != other; }
240 operator const_iterator() const { return m_iterator; }
243 const_iterator m_iterator;
248 // Work around MSVC's standard library, whose swap for pairs does not swap by component.
249 template<typename T> inline void hashTableSwap(T& a, T& b)
254 template<typename T, typename U> inline void hashTableSwap(KeyValuePair<T, U>& a, KeyValuePair<T, U>& b)
257 swap(a.value, b.value);
260 template<typename T, typename Allocator, bool useSwap> struct Mover;
261 template<typename T, typename Allocator> struct Mover<T, Allocator, true> {
262 static void move(T& from, T& to)
264 // A swap operation should not normally allocate, but it may do so
265 // if it is falling back on some sort of triple assignment in the
266 // style of t = a; a = b; b = t because there is no overloaded swap
267 // operation. We can't allow allocation both because it is slower
268 // than a true swap operation, but also because allocation implies
269 // allowing GC: We cannot allow a GC after swapping only the key.
270 // The value is only traced if the key is present and therefore the
271 // GC will not see the value in the old backing if the key has been
272 // moved to the new backing. Therefore, we cannot allow GC until
273 // after both key and value have been moved.
274 Allocator::enterNoAllocationScope();
275 hashTableSwap(from, to);
276 Allocator::leaveNoAllocationScope();
279 template<typename T, typename Allocator> struct Mover<T, Allocator, false> {
280 static void move(T& from, T& to) { to = from; }
283 template<typename HashFunctions> class IdentityHashTranslator {
285 template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
286 template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
287 template<typename T, typename U, typename V> static void translate(T& location, const U&, const V& value) { location = value; }
290 template<typename HashTableType, typename ValueType> struct HashTableAddResult {
291 HashTableAddResult(const HashTableType* container, ValueType* storedValue, bool isNewEntry)
292 : storedValue(storedValue)
293 , isNewEntry(isNewEntry)
294 #if ENABLE(SECURITY_ASSERT)
295 , m_container(container)
296 , m_containerModifications(container->modifications())
299 ASSERT_UNUSED(container, container);
302 ~HashTableAddResult()
304 // If rehash happened before accessing storedValue, it's
305 // use-after-free. Any modification may cause a rehash, so we check
306 // for modifications here.
307 // Rehash after accessing storedValue is harmless but will assert if
308 // the AddResult destructor takes place after a modification. You
309 // may need to limit the scope of the AddResult.
310 ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications == m_container->modifications());
313 ValueType* storedValue;
316 #if ENABLE(SECURITY_ASSERT)
318 const HashTableType* m_container;
319 const int64_t m_containerModifications;
323 template<typename Value, typename Extractor, typename KeyTraits>
324 struct HashTableHelper {
325 static bool isEmptyBucket(const Value& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
326 static bool isDeletedBucket(const Value& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
327 static bool isEmptyOrDeletedBucket(const Value& value) { return isEmptyBucket(value) || isDeletedBucket(value); }
330 template<typename HashTranslator, typename KeyTraits, bool safeToCompareToEmptyOrDeleted>
331 struct HashTableKeyChecker {
332 // There's no simple generic way to make this check if safeToCompareToEmptyOrDeleted is false,
333 // so the check always passes.
334 template <typename T>
335 static bool checkKey(const T&) { return true; }
338 template<typename HashTranslator, typename KeyTraits>
339 struct HashTableKeyChecker<HashTranslator, KeyTraits, true> {
340 template <typename T>
341 static bool checkKey(const T& key)
343 // FIXME : Check also equality to the deleted value.
344 return !HashTranslator::equal(KeyTraits::emptyValue(), key);
348 // Don't declare a destructor for HeapAllocated hash tables.
349 template<typename Derived, bool isGarbageCollected>
350 class HashTableDestructorBase;
352 template<typename Derived>
353 class HashTableDestructorBase<Derived, true> { };
355 template<typename Derived>
356 class HashTableDestructorBase<Derived, false> {
358 ~HashTableDestructorBase() { static_cast<Derived*>(this)->finalize(); }
361 // Note: empty or deleted key values are not allowed, using them may lead to undefined behavior.
362 // For pointer keys this means that null pointers are not allowed unless you supply custom key traits.
363 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
364 class HashTable : public HashTableDestructorBase<HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>, Allocator::isGarbageCollected> {
366 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
367 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
368 typedef Traits ValueTraits;
370 typedef typename KeyTraits::PeekInType KeyPeekInType;
371 typedef typename KeyTraits::PassInType KeyPassInType;
372 typedef Value ValueType;
373 typedef Extractor ExtractorType;
374 typedef KeyTraits KeyTraitsType;
375 typedef typename Traits::PassInType ValuePassInType;
376 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
377 typedef HashTableAddResult<HashTable, ValueType> AddResult;
379 #if DUMP_HASHTABLE_STATS_PER_TABLE
399 int collisionGraph[4096];
401 void recordCollisionAtCount(int count)
403 if (count > maxCollisions)
404 maxCollisions = count;
406 collisionGraph[count]++;
411 dataLogF("\nWTF::HashTable::Stats dump\n\n");
412 dataLogF("%d accesses\n", numAccesses);
413 dataLogF("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
414 dataLogF("longest collision chain: %d\n", maxCollisions);
415 for (int i = 1; i <= maxCollisions; i++) {
416 dataLogF(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
418 dataLogF("%d rehashes\n", numRehashes);
419 dataLogF("%d reinserts\n", numReinserts);
427 ASSERT(!Allocator::isGarbageCollected);
428 if (LIKELY(!m_table))
430 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
434 HashTable(const HashTable&);
435 void swap(HashTable&);
436 HashTable& operator=(const HashTable&);
438 // When the hash table is empty, just return the same iterator for end as for begin.
439 // This is more efficient because we don't have to skip all the empty and deleted
440 // buckets, and iterating an empty table is a common case that's worth optimizing.
441 iterator begin() { return isEmpty() ? end() : makeIterator(m_table); }
442 iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); }
443 const_iterator begin() const { return isEmpty() ? end() : makeConstIterator(m_table); }
444 const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tableSize); }
446 unsigned size() const { return m_keyCount; }
447 unsigned capacity() const { return m_tableSize; }
448 bool isEmpty() const { return !m_keyCount; }
450 AddResult add(ValuePassInType value)
452 return add<IdentityTranslatorType>(Extractor::extract(value), value);
455 // A special version of add() that finds the object by hashing and comparing
456 // with some other type, to avoid the cost of type conversion if the object is already
458 template<typename HashTranslator, typename T, typename Extra> AddResult add(const T& key, const Extra&);
459 template<typename HashTranslator, typename T, typename Extra> AddResult addPassingHashCode(const T& key, const Extra&);
461 iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); }
462 const_iterator find(KeyPeekInType key) const { return find<IdentityTranslatorType>(key); }
463 bool contains(KeyPeekInType key) const { return contains<IdentityTranslatorType>(key); }
465 template<typename HashTranslator, typename T> iterator find(const T&);
466 template<typename HashTranslator, typename T> const_iterator find(const T&) const;
467 template<typename HashTranslator, typename T> bool contains(const T&) const;
469 void remove(KeyPeekInType);
470 void remove(iterator);
471 void remove(const_iterator);
474 static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
475 static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
476 static bool isEmptyOrDeletedBucket(const ValueType& value) { return HashTableHelper<ValueType, Extractor, KeyTraits>:: isEmptyOrDeletedBucket(value); }
478 ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType, KeyPeekInType>(key); }
479 template<typename HashTranslator, typename T> ValueType* lookup(T);
480 template<typename HashTranslator, typename T> const ValueType* lookup(T) const;
482 void trace(typename Allocator::Visitor*);
485 int64_t modifications() const { return m_modifications; }
486 void registerModification() { m_modifications++; }
487 // HashTable and collections that build on it do not support
488 // modifications while there is an iterator in use. The exception is
489 // ListHashSet, which has its own iterators that tolerate modification
490 // of the underlying set.
491 void checkModifications(int64_t mods) const { ASSERT(mods == m_modifications); }
493 int64_t modifications() const { return 0; }
494 void registerModification() { }
495 void checkModifications(int64_t mods) const { }
499 static ValueType* allocateTable(unsigned size);
500 static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
502 typedef std::pair<ValueType*, bool> LookupType;
503 typedef std::pair<LookupType, unsigned> FullLookupType;
505 LookupType lookupForWriting(const Key& key) { return lookupForWriting<IdentityTranslatorType>(key); };
506 template<typename HashTranslator, typename T> FullLookupType fullLookupForWriting(const T&);
507 template<typename HashTranslator, typename T> LookupType lookupForWriting(const T&);
509 void remove(ValueType*);
511 bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; }
512 bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; }
513 bool shouldShrink() const
515 // isAllocationAllowed check should be at the last because it's
517 return m_keyCount * m_minLoad < m_tableSize
518 && m_tableSize > KeyTraits::minimumTableSize
519 && Allocator::isAllocationAllowed();
521 ValueType* expand(ValueType* entry = 0);
522 void shrink() { rehash(m_tableSize / 2, 0); }
524 ValueType* rehash(unsigned newTableSize, ValueType* entry);
525 ValueType* reinsert(ValueType&);
527 static void initializeBucket(ValueType& bucket);
528 static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected); }
530 FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash)
531 { return FullLookupType(LookupType(position, found), hash); }
533 iterator makeIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this); }
534 const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this); }
535 iterator makeKnownGoodIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
536 const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
538 static const unsigned m_maxLoad = 2;
539 static const unsigned m_minLoad = 6;
541 unsigned tableSizeMask() const
543 size_t mask = m_tableSize - 1;
544 ASSERT((mask & m_tableSize) == 0);
548 void setEnqueued() { m_queueFlag = true; }
549 void clearEnqueued() { m_queueFlag = false; }
550 bool enqueued() { return m_queueFlag; }
553 unsigned m_tableSize;
555 unsigned m_deletedCount:31;
558 unsigned m_modifications;
561 #if DUMP_HASHTABLE_STATS_PER_TABLE
563 mutable OwnPtr<Stats> m_stats;
566 template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z> friend struct WeakProcessingHashTableHelper;
567 template<typename T, typename U, typename V, typename W> friend class LinkedHashSet;
570 // Set all the bits to one after the most significant bit: 00110101010 -> 00111111111.
571 template<unsigned size> struct OneifyLowBits;
573 struct OneifyLowBits<0> {
574 static const unsigned value = 0;
576 template<unsigned number>
577 struct OneifyLowBits {
578 static const unsigned value = number | OneifyLowBits<(number >> 1)>::value;
580 // Compute the first power of two integer that is an upper bound of the parameter 'number'.
581 template<unsigned number>
582 struct UpperPowerOfTwoBound {
583 static const unsigned value = (OneifyLowBits<number - 1>::value + 1) * 2;
586 // Because power of two numbers are the limit of maxLoad, their capacity is twice the
587 // UpperPowerOfTwoBound, or 4 times their values.
588 template<unsigned size, bool isPowerOfTwo> struct HashTableCapacityForSizeSplitter;
589 template<unsigned size>
590 struct HashTableCapacityForSizeSplitter<size, true> {
591 static const unsigned value = size * 4;
593 template<unsigned size>
594 struct HashTableCapacityForSizeSplitter<size, false> {
595 static const unsigned value = UpperPowerOfTwoBound<size>::value;
598 // HashTableCapacityForSize computes the upper power of two capacity to hold the size parameter.
599 // This is done at compile time to initialize the HashTraits.
600 template<unsigned size>
601 struct HashTableCapacityForSize {
602 static const unsigned value = HashTableCapacityForSizeSplitter<size, !(size & (size - 1))>::value;
603 COMPILE_ASSERT(size > 0, HashTableNonZeroMinimumCapacity);
604 COMPILE_ASSERT(!static_cast<int>(value >> 31), HashTableNoCapacityOverflow);
605 COMPILE_ASSERT(value > (2 * size), HashTableCapacityHoldsContentSize);
608 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
609 inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable()
618 #if DUMP_HASHTABLE_STATS_PER_TABLE
619 , m_stats(adoptPtr(new Stats))
624 inline unsigned doubleHash(unsigned key)
626 key = ~key + (key >> 23);
634 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
635 template<typename HashTranslator, typename T>
636 inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key)
638 return const_cast<Value*>(const_cast<const HashTable*>(this)->lookup<HashTranslator, T>(key));
641 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
642 template<typename HashTranslator, typename T>
643 inline const Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key) const
645 ASSERT((HashTableKeyChecker<HashTranslator, KeyTraits, HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key)));
646 const ValueType* table = m_table;
651 size_t sizeMask = tableSizeMask();
652 unsigned h = HashTranslator::hash(key);
653 size_t i = h & sizeMask;
655 UPDATE_ACCESS_COUNTS();
658 const ValueType* entry = table + i;
660 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
661 if (HashTranslator::equal(Extractor::extract(*entry), key))
664 if (isEmptyBucket(*entry))
667 if (isEmptyBucket(*entry))
670 if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(*entry), key))
673 UPDATE_PROBE_COUNTS();
675 k = 1 | doubleHash(h);
676 i = (i + k) & sizeMask;
680 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
681 template<typename HashTranslator, typename T>
682 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookupForWriting(const T& key)
685 registerModification();
687 ValueType* table = m_table;
689 size_t sizeMask = tableSizeMask();
690 unsigned h = HashTranslator::hash(key);
691 size_t i = h & sizeMask;
693 UPDATE_ACCESS_COUNTS();
695 ValueType* deletedEntry = 0;
698 ValueType* entry = table + i;
700 if (isEmptyBucket(*entry))
701 return LookupType(deletedEntry ? deletedEntry : entry, false);
703 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
704 if (HashTranslator::equal(Extractor::extract(*entry), key))
705 return LookupType(entry, true);
707 if (isDeletedBucket(*entry))
708 deletedEntry = entry;
710 if (isDeletedBucket(*entry))
711 deletedEntry = entry;
712 else if (HashTranslator::equal(Extractor::extract(*entry), key))
713 return LookupType(entry, true);
715 UPDATE_PROBE_COUNTS();
717 k = 1 | doubleHash(h);
718 i = (i + k) & sizeMask;
722 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
723 template<typename HashTranslator, typename T>
724 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::fullLookupForWriting(const T& key)
727 registerModification();
729 ValueType* table = m_table;
731 size_t sizeMask = tableSizeMask();
732 unsigned h = HashTranslator::hash(key);
733 size_t i = h & sizeMask;
735 UPDATE_ACCESS_COUNTS();
737 ValueType* deletedEntry = 0;
740 ValueType* entry = table + i;
742 if (isEmptyBucket(*entry))
743 return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
745 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
746 if (HashTranslator::equal(Extractor::extract(*entry), key))
747 return makeLookupResult(entry, true, h);
749 if (isDeletedBucket(*entry))
750 deletedEntry = entry;
752 if (isDeletedBucket(*entry))
753 deletedEntry = entry;
754 else if (HashTranslator::equal(Extractor::extract(*entry), key))
755 return makeLookupResult(entry, true, h);
757 UPDATE_PROBE_COUNTS();
759 k = 1 | doubleHash(h);
760 i = (i + k) & sizeMask;
764 template<bool emptyValueIsZero> struct HashTableBucketInitializer;
766 template<> struct HashTableBucketInitializer<false> {
767 template<typename Traits, typename Value> static void initialize(Value& bucket)
769 new (NotNull, &bucket) Value(Traits::emptyValue());
773 template<> struct HashTableBucketInitializer<true> {
774 template<typename Traits, typename Value> static void initialize(Value& bucket)
776 // This initializes the bucket without copying the empty value.
777 // That makes it possible to use this with types that don't support copying.
778 // The memset to 0 looks like a slow operation but is optimized by the compilers.
779 memset(&bucket, 0, sizeof(bucket));
783 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
784 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::initializeBucket(ValueType& bucket)
786 // For hash maps the key and value cannot be initialied simultaneously,
787 // and it would be wrong to have a GC when only one was initialized and
788 // the other still contained garbage (eg. from a previous use of the
789 // same slot). Therefore we forbid allocation (and thus GC) while the
790 // slot is initalized to an empty value.
791 Allocator::enterNoAllocationScope();
792 HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Traits>(bucket);
793 Allocator::leaveNoAllocationScope();
796 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
797 template<typename HashTranslator, typename T, typename Extra>
798 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::add(const T& key, const Extra& extra)
800 ASSERT(Allocator::isAllocationAllowed());
806 ValueType* table = m_table;
808 size_t sizeMask = tableSizeMask();
809 unsigned h = HashTranslator::hash(key);
810 size_t i = h & sizeMask;
812 UPDATE_ACCESS_COUNTS();
814 ValueType* deletedEntry = 0;
819 if (isEmptyBucket(*entry))
822 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
823 if (HashTranslator::equal(Extractor::extract(*entry), key))
824 return AddResult(this, entry, false);
826 if (isDeletedBucket(*entry))
827 deletedEntry = entry;
829 if (isDeletedBucket(*entry))
830 deletedEntry = entry;
831 else if (HashTranslator::equal(Extractor::extract(*entry), key))
832 return AddResult(this, entry, false);
834 UPDATE_PROBE_COUNTS();
836 k = 1 | doubleHash(h);
837 i = (i + k) & sizeMask;
840 registerModification();
843 // Overwrite any data left over from last use, using placement new
845 initializeBucket(*deletedEntry);
846 entry = deletedEntry;
850 HashTranslator::translate(*entry, key, extra);
851 ASSERT(!isEmptyOrDeletedBucket(*entry));
856 entry = expand(entry);
858 return AddResult(this, entry, true);
861 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
862 template<typename HashTranslator, typename T, typename Extra>
863 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::addPassingHashCode(const T& key, const Extra& extra)
865 ASSERT(Allocator::isAllocationAllowed());
869 FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
871 ValueType* entry = lookupResult.first.first;
872 bool found = lookupResult.first.second;
873 unsigned h = lookupResult.second;
876 return AddResult(this, entry, false);
878 registerModification();
880 if (isDeletedBucket(*entry)) {
881 initializeBucket(*entry);
885 HashTranslator::translate(*entry, key, extra, h);
886 ASSERT(!isEmptyOrDeletedBucket(*entry));
890 entry = expand(entry);
892 return AddResult(this, entry, true);
895 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
896 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::reinsert(ValueType& entry)
899 registerModification();
900 ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
901 ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first)));
902 #if DUMP_HASHTABLE_STATS
903 atomicIncrement(&HashTableStats::numReinserts);
905 #if DUMP_HASHTABLE_STATS_PER_TABLE
906 ++m_stats->numReinserts;
908 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
909 Mover<ValueType, Allocator, Traits::needsDestruction>::move(entry, *newEntry);
914 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
915 template <typename HashTranslator, typename T>
916 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key)
918 ValueType* entry = lookup<HashTranslator>(key);
922 return makeKnownGoodIterator(entry);
925 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
926 template <typename HashTranslator, typename T>
927 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key) const
929 ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
933 return makeKnownGoodConstIterator(entry);
936 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
937 template <typename HashTranslator, typename T>
938 bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::contains(const T& key) const
940 return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
943 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
944 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(ValueType* pos)
946 registerModification();
947 #if DUMP_HASHTABLE_STATS
948 atomicIncrement(&HashTableStats::numRemoves);
950 #if DUMP_HASHTABLE_STATS_PER_TABLE
951 ++m_stats->numRemoves;
962 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
963 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(iterator it)
968 remove(const_cast<ValueType*>(it.m_iterator.m_position));
971 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
972 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(const_iterator it)
977 remove(const_cast<ValueType*>(it.m_position));
980 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
981 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(KeyPeekInType key)
986 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
987 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::allocateTable(unsigned size)
989 typedef typename Allocator::template HashTableBackingHelper<HashTable>::Type HashTableBacking;
991 size_t allocSize = size * sizeof(ValueType);
993 // Assert that we will not use memset on things with a vtable entry.
994 // The compiler will also check this on some platforms. We would
995 // like to check this on the whole value (key-value pair), but
996 // IsPolymorphic will return false for a pair of two types, even if
997 // one of the components is polymorphic.
998 COMPILE_ASSERT(!Traits::emptyValueIsZero || !IsPolymorphic<KeyType>::value, EmptyValueCannotBeZeroForThingsWithAVtable);
999 if (Traits::emptyValueIsZero) {
1000 result = Allocator::template zeroedBackingMalloc<ValueType*, HashTableBacking>(allocSize);
1002 result = Allocator::template backingMalloc<ValueType*, HashTableBacking>(allocSize);
1003 for (unsigned i = 0; i < size; i++)
1004 initializeBucket(result[i]);
1009 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1010 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::deleteAllBucketsAndDeallocate(ValueType* table, unsigned size)
1012 if (Traits::needsDestruction) {
1013 for (unsigned i = 0; i < size; ++i) {
1014 // This code is called when the hash table is cleared or
1015 // resized. We have allocated a new backing store and we need
1016 // to run the destructors on the old backing store, as it is
1017 // being freed. If we are GCing we need to both call the
1018 // destructor and mark the bucket as deleted, otherwise the
1019 // destructor gets called again when the GC finds the backing
1020 // store. With the default allocator it's enough to call the
1021 // destructor, since we will free the memory explicitly and
1022 // we won't see the memory with the bucket again.
1023 if (!isEmptyOrDeletedBucket(table[i])) {
1024 if (Allocator::isGarbageCollected)
1025 deleteBucket(table[i]);
1027 table[i].~ValueType();
1031 Allocator::backingFree(table);
1034 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1035 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::expand(Value* entry)
1039 newSize = KeyTraits::minimumTableSize;
1040 } else if (mustRehashInPlace()) {
1041 newSize = m_tableSize;
1043 newSize = m_tableSize * 2;
1044 RELEASE_ASSERT(newSize > m_tableSize);
1047 return rehash(newSize, entry);
1050 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1051 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::rehash(unsigned newTableSize, Value* entry)
1053 unsigned oldTableSize = m_tableSize;
1054 ValueType* oldTable = m_table;
1056 #if DUMP_HASHTABLE_STATS
1057 if (oldTableSize != 0)
1058 atomicIncrement(&HashTableStats::numRehashes);
1061 #if DUMP_HASHTABLE_STATS_PER_TABLE
1062 if (oldTableSize != 0)
1063 ++m_stats->numRehashes;
1066 m_table = allocateTable(newTableSize);
1067 m_tableSize = newTableSize;
1069 Value* newEntry = 0;
1070 for (unsigned i = 0; i != oldTableSize; ++i) {
1071 if (isEmptyOrDeletedBucket(oldTable[i])) {
1072 ASSERT(&oldTable[i] != entry);
1076 Value* reinsertedEntry = reinsert(oldTable[i]);
1077 if (&oldTable[i] == entry) {
1079 newEntry = reinsertedEntry;
1085 deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1090 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1091 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::clear()
1093 registerModification();
1097 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1103 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1104 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable(const HashTable& other)
1109 , m_queueFlag(false)
1111 , m_modifications(0)
1113 #if DUMP_HASHTABLE_STATS_PER_TABLE
1114 , m_stats(adoptPtr(new Stats(*other.m_stats)))
1117 // Copy the hash table the dumb way, by adding each element to the new table.
1118 // It might be more efficient to copy the table slots, but it's not clear that efficiency is needed.
1119 const_iterator end = other.end();
1120 for (const_iterator it = other.begin(); it != end; ++it)
1124 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1125 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::swap(HashTable& other)
1127 std::swap(m_table, other.m_table);
1128 std::swap(m_tableSize, other.m_tableSize);
1129 std::swap(m_keyCount, other.m_keyCount);
1130 // std::swap does not work for bit fields.
1131 unsigned deleted = m_deletedCount;
1132 m_deletedCount = other.m_deletedCount;
1133 other.m_deletedCount = deleted;
1134 ASSERT(!m_queueFlag);
1135 ASSERT(!other.m_queueFlag);
1138 std::swap(m_modifications, other.m_modifications);
1141 #if DUMP_HASHTABLE_STATS_PER_TABLE
1142 m_stats.swap(other.m_stats);
1146 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1147 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::operator=(const HashTable& other)
1149 HashTable tmp(other);
1154 template<WeakHandlingFlag weakHandlingFlag, typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1155 struct WeakProcessingHashTableHelper;
1157 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1158 struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
1159 static void process(typename Allocator::Visitor* visitor, void* closure) { }
1160 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure) { }
1161 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure) { }
1164 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1165 struct WeakProcessingHashTableHelper<WeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
1166 // Used for purely weak and for weak-and-strong tables (ephemerons).
1167 static void process(typename Allocator::Visitor* visitor, void* closure)
1169 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1170 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1171 if (table->m_table) {
1172 // This is run as part of weak processing after full
1173 // marking. The backing store is therefore marked if
1175 ASSERT(visitor->isAlive(table->m_table));
1176 // Now perform weak processing (this is a no-op if the backing
1177 // was accessible through an iterator and was already marked
1179 typedef typename HashTableType::ValueType ValueType;
1180 for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
1181 if (!HashTableType::isEmptyOrDeletedBucket(*element)) {
1182 // At this stage calling trace can make no difference
1183 // (everything is already traced), but we use the
1184 // return value to remove things from the collection.
1185 if (TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element)) {
1186 table->registerModification();
1187 HashTableType::deleteBucket(*element); // Also calls the destructor.
1188 table->m_deletedCount++;
1189 table->m_keyCount--;
1190 // We don't rehash the backing until the next add
1191 // or delete, because that would cause allocation
1199 // Called repeatedly for tables that have both weak and strong pointers.
1200 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure)
1202 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1203 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1204 if (table->m_table) {
1205 // Check the hash table for elements that we now know will not
1206 // be removed by weak processing. Those elements need to have
1207 // their strong pointers traced.
1208 typedef typename HashTableType::ValueType ValueType;
1209 for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
1210 if (!HashTableType::isEmptyOrDeletedBucket(*element))
1211 TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element);
1216 // Called when the ephemeron iteration is done and before running the per thread
1217 // weak processing. It is guaranteed to be called before any thread is resumed.
1218 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure)
1220 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1221 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1222 ASSERT(Allocator::weakTableRegistered(visitor, table));
1223 table->clearEnqueued();
1227 template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1228 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::trace(typename Allocator::Visitor* visitor)
1230 // If someone else already marked the backing and queued up the trace
1231 // and/or weak callback then we are done. This optimization does not
1232 // happen for ListHashSet since its iterator does not point at the
1234 if (!m_table || visitor->isAlive(m_table))
1236 // Normally, we mark the backing store without performing trace. This
1237 // means it is marked live, but the pointers inside it are not marked.
1238 // Instead we will mark the pointers below. However, for backing
1239 // stores that contain weak pointers the handling is rather different.
1240 // We don't mark the backing store here, so the marking GC will leave
1241 // the backing unmarked. If the backing is found in any other way than
1242 // through its HashTable (ie from an iterator) then the mark bit will
1243 // be set and the pointers will be marked strongly, avoiding problems
1244 // with iterating over things that disappear due to weak processing
1245 // while we are iterating over them. We register the backing store
1246 // pointer for delayed marking which will take place after we know if
1247 // the backing is reachable from elsewhere. We also register a
1248 // weakProcessing callback which will perform weak processing if needed.
1249 if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) {
1250 Allocator::markNoTracing(visitor, m_table);
1252 Allocator::registerDelayedMarkNoTracing(visitor, m_table);
1253 Allocator::registerWeakMembers(visitor, this, m_table, WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::process);
1255 if (ShouldBeTraced<Traits>::value) {
1256 if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
1257 // If we have both strong and weak pointers in the collection
1258 // then we queue up the collection for fixed point iteration a
1260 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
1261 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
1262 ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this));
1264 Allocator::registerWeakTable(visitor, this,
1265 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIteration,
1266 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterationDone);
1269 // We don't need to trace the elements here, since registering
1270 // as a weak table above will cause them to be traced (perhaps
1271 // several times). It's better to wait until everything else is
1272 // traced before tracing the elements for the first time; this
1273 // may reduce (by one) the number of iterations needed to get
1274 // to a fixed point.
1277 for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; element--) {
1278 if (!isEmptyOrDeletedBucket(*element))
1279 Allocator::template trace<ValueType, Traits>(visitor, *element);
1284 // iterator adapters
1286 template<typename HashTableType, typename Traits> struct HashTableConstIteratorAdapter {
1287 HashTableConstIteratorAdapter() {}
1288 HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {}
1289 typedef typename Traits::IteratorConstGetType GetType;
1290 typedef typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType;
1292 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1293 typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
1294 GetType operator->() const { return get(); }
1296 HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; }
1297 // postfix ++ intentionally omitted
1299 typename HashTableType::const_iterator m_impl;
1302 template<typename HashTableType, typename Traits> struct HashTableIteratorAdapter {
1303 typedef typename Traits::IteratorGetType GetType;
1304 typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType;
1306 HashTableIteratorAdapter() {}
1307 HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {}
1309 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1310 typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
1311 GetType operator->() const { return get(); }
1313 HashTableIteratorAdapter& operator++() { ++m_impl; return *this; }
1314 // postfix ++ intentionally omitted
1316 operator HashTableConstIteratorAdapter<HashTableType, Traits>()
1318 typename HashTableType::const_iterator i = m_impl;
1322 typename HashTableType::iterator m_impl;
1325 template<typename T, typename U>
1326 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1328 return a.m_impl == b.m_impl;
1331 template<typename T, typename U>
1332 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1334 return a.m_impl != b.m_impl;
1337 template<typename T, typename U>
1338 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1340 return a.m_impl == b.m_impl;
1343 template<typename T, typename U>
1344 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1346 return a.m_impl != b.m_impl;
1349 // All 4 combinations of ==, != and Const,non const.
1350 template<typename T, typename U>
1351 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1353 return a.m_impl == b.m_impl;
1356 template<typename T, typename U>
1357 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1359 return a.m_impl != b.m_impl;
1362 template<typename T, typename U>
1363 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1365 return a.m_impl == b.m_impl;
1368 template<typename T, typename U>
1369 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1371 return a.m_impl != b.m_impl;
1374 template<typename Collection1, typename Collection2>
1375 inline void removeAll(Collection1& collection, const Collection2& toBeRemoved)
1377 if (collection.isEmpty() || toBeRemoved.isEmpty())
1379 typedef typename Collection2::const_iterator CollectionIterator;
1380 CollectionIterator end(toBeRemoved.end());
1381 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it)
1382 collection.remove(*it);
1387 #include "wtf/HashIterators.h"
1389 #endif // WTF_HashTable_h