Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / wtf / HashTable.h
1 /*
2  * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 David Levin <levin@chromium.org>
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Library General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU * Library General Public License for more details.
12  *
13  * You should have received a copy of the GNU Library General Public License
14  * along with this library; see the file COPYING.LIB.  If not, write to
15  * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
16  * Boston, MA 02110-1301, USA.
17  *
18  */
19
20 #ifndef WTF_HashTable_h
21 #define WTF_HashTable_h
22
23 #include "wtf/Alignment.h"
24 #include "wtf/Assertions.h"
25 #include "wtf/DefaultAllocator.h"
26 #include "wtf/HashTraits.h"
27 #include "wtf/WTF.h"
28
29 #define DUMP_HASHTABLE_STATS 0
30 #define DUMP_HASHTABLE_STATS_PER_TABLE 0
31
32 #if DUMP_HASHTABLE_STATS_PER_TABLE
33 #include "wtf/DataLog.h"
34 #endif
35
36 #if DUMP_HASHTABLE_STATS
37 #if DUMP_HASHTABLE_STATS_PER_TABLE
38 #define UPDATE_PROBE_COUNTS()                            \
39     ++probeCount;                                        \
40     HashTableStats::recordCollisionAtCount(probeCount);  \
41     ++perTableProbeCount;                                \
42     m_stats->recordCollisionAtCount(perTableProbeCount)
43 #define UPDATE_ACCESS_COUNTS()                           \
44     atomicIncrement(&HashTableStats::numAccesses);       \
45     int probeCount = 0;                                  \
46     ++m_stats->numAccesses;                              \
47     int perTableProbeCount = 0
48 #else
49 #define UPDATE_PROBE_COUNTS()                            \
50     ++probeCount;                                        \
51     HashTableStats::recordCollisionAtCount(probeCount)
52 #define UPDATE_ACCESS_COUNTS()                           \
53     atomicIncrement(&HashTableStats::numAccesses);       \
54     int probeCount = 0
55 #endif
56 #else
57 #if DUMP_HASHTABLE_STATS_PER_TABLE
58 #define UPDATE_PROBE_COUNTS()                            \
59     ++perTableProbeCount;                                \
60     m_stats->recordCollisionAtCount(perTableProbeCount)
61 #define UPDATE_ACCESS_COUNTS()                           \
62     ++m_stats->numAccesses;                              \
63     int perTableProbeCount = 0
64 #else
65 #define UPDATE_PROBE_COUNTS() do { } while (0)
66 #define UPDATE_ACCESS_COUNTS() do { } while (0)
67 #endif
68 #endif
69
70 namespace WTF {
71
72 #if DUMP_HASHTABLE_STATS
73
74     struct HashTableStats {
75         // The following variables are all atomically incremented when modified.
76         static int numAccesses;
77         static int numRehashes;
78         static int numRemoves;
79         static int numReinserts;
80
81         // The following variables are only modified in the recordCollisionAtCount method within a mutex.
82         static int maxCollisions;
83         static int numCollisions;
84         static int collisionGraph[4096];
85
86         static void recordCollisionAtCount(int count);
87         static void dumpStats();
88     };
89
90 #endif
91
92     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
93     class HashTable;
94     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
95     class HashTableIterator;
96     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
97     class HashTableConstIterator;
98     template<typename Value, typename HashFunctions, typename HashTraits, typename Allocator>
99     class LinkedHashSet;
100     template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z>
101     struct WeakProcessingHashTableHelper;
102
103     typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
104
105     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
106     class HashTableConstIterator {
107     private:
108         typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
109         typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
110         typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
111         typedef Value ValueType;
112         typedef typename Traits::IteratorConstGetType GetType;
113         typedef const ValueType* PointerType;
114
115         friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
116         friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
117
118         void skipEmptyBuckets()
119         {
120             while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBucket(*m_position))
121                 ++m_position;
122         }
123
124         HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container)
125             : m_position(position)
126             , m_endPosition(endPosition)
127 #if ENABLE(ASSERT)
128             , m_container(container)
129             , m_containerModifications(container->modifications())
130 #endif
131         {
132             skipEmptyBuckets();
133         }
134
135         HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container, HashItemKnownGoodTag)
136             : m_position(position)
137             , m_endPosition(endPosition)
138 #if ENABLE(ASSERT)
139             , m_container(container)
140             , m_containerModifications(container->modifications())
141 #endif
142         {
143             ASSERT(m_containerModifications == m_container->modifications());
144         }
145
146         void checkModifications() const
147         {
148             // HashTable and collections that build on it do not support
149             // modifications while there is an iterator in use. The exception
150             // is ListHashSet, which has its own iterators that tolerate
151             // modification of the underlying set.
152             ASSERT(m_containerModifications == m_container->modifications());
153         }
154
155     public:
156         HashTableConstIterator()
157         {
158         }
159
160         GetType get() const
161         {
162             checkModifications();
163             return m_position;
164         }
165         typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
166         GetType operator->() const { return get(); }
167
168         const_iterator& operator++()
169         {
170             ASSERT(m_position != m_endPosition);
171             checkModifications();
172             ++m_position;
173             skipEmptyBuckets();
174             return *this;
175         }
176
177         // postfix ++ intentionally omitted
178
179         // Comparison.
180         bool operator==(const const_iterator& other) const
181         {
182             return m_position == other.m_position;
183         }
184         bool operator!=(const const_iterator& other) const
185         {
186             return m_position != other.m_position;
187         }
188         bool operator==(const iterator& other) const
189         {
190             return *this == static_cast<const_iterator>(other);
191         }
192         bool operator!=(const iterator& other) const
193         {
194             return *this != static_cast<const_iterator>(other);
195         }
196
197     private:
198         PointerType m_position;
199         PointerType m_endPosition;
200 #if ENABLE(ASSERT)
201         const HashTableType* m_container;
202         int64_t m_containerModifications;
203 #endif
204     };
205
206     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
207     class HashTableIterator {
208     private:
209         typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
210         typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
211         typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
212         typedef Value ValueType;
213         typedef typename Traits::IteratorGetType GetType;
214         typedef ValueType* PointerType;
215
216         friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
217
218         HashTableIterator(PointerType pos, PointerType end, const HashTableType* container) : m_iterator(pos, end, container) { }
219         HashTableIterator(PointerType pos, PointerType end, const HashTableType* container, HashItemKnownGoodTag tag) : m_iterator(pos, end, container, tag) { }
220
221     public:
222         HashTableIterator() { }
223
224         // default copy, assignment and destructor are OK
225
226         GetType get() const { return const_cast<GetType>(m_iterator.get()); }
227         typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
228         GetType operator->() const { return get(); }
229
230         iterator& operator++() { ++m_iterator; return *this; }
231
232         // postfix ++ intentionally omitted
233
234         // Comparison.
235         bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; }
236         bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; }
237         bool operator==(const const_iterator& other) const { return m_iterator == other; }
238         bool operator!=(const const_iterator& other) const { return m_iterator != other; }
239
240         operator const_iterator() const { return m_iterator; }
241
242     private:
243         const_iterator m_iterator;
244     };
245
246     using std::swap;
247
248     // Work around MSVC's standard library, whose swap for pairs does not swap by component.
249     template<typename T> inline void hashTableSwap(T& a, T& b)
250     {
251         swap(a, b);
252     }
253
254     template<typename T, typename U> inline void hashTableSwap(KeyValuePair<T, U>& a, KeyValuePair<T, U>& b)
255     {
256         swap(a.key, b.key);
257         swap(a.value, b.value);
258     }
259
260     template<typename T, bool useSwap> struct Mover;
261     template<typename T> struct Mover<T, true> { static void move(T& from, T& to) { hashTableSwap(from, to); } };
262     template<typename T> struct Mover<T, false> { static void move(T& from, T& to) { to = from; } };
263
264     template<typename HashFunctions> class IdentityHashTranslator {
265     public:
266         template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
267         template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
268         template<typename T, typename U, typename V> static void translate(T& location, const U&, const V& value) { location = value; }
269     };
270
271     template<typename HashTableType, typename ValueType> struct HashTableAddResult {
272         HashTableAddResult(const HashTableType* container, ValueType* storedValue, bool isNewEntry)
273             : storedValue(storedValue)
274             , isNewEntry(isNewEntry)
275 #if ENABLE(SECURITY_ASSERT)
276             , m_container(container)
277             , m_containerModifications(container->modifications())
278 #endif
279         {
280             ASSERT_UNUSED(container, container);
281         }
282
283         ~HashTableAddResult()
284         {
285             // If rehash happened before accessing storedValue, it's
286             // use-after-free. Any modification may cause a rehash, so we check
287             // for modifications here.
288             // Rehash after accessing storedValue is harmless but will assert if
289             // the AddResult destructor takes place after a modification. You
290             // may need to limit the scope of the AddResult.
291             ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications == m_container->modifications());
292         }
293
294         ValueType* storedValue;
295         bool isNewEntry;
296
297 #if ENABLE(SECURITY_ASSERT)
298     private:
299         const HashTableType* m_container;
300         const int64_t m_containerModifications;
301 #endif
302     };
303
304     template<typename Value, typename Extractor, typename KeyTraits>
305     struct HashTableHelper {
306         static bool isEmptyBucket(const Value& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
307         static bool isDeletedBucket(const Value& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
308         static bool isEmptyOrDeletedBucket(const Value& value) { return isEmptyBucket(value) || isDeletedBucket(value); }
309     };
310
311     template<typename HashTranslator, typename KeyTraits, bool safeToCompareToEmptyOrDeleted>
312     struct HashTableKeyChecker {
313         // There's no simple generic way to make this check if safeToCompareToEmptyOrDeleted is false,
314         // so the check always passes.
315         template <typename T>
316         static bool checkKey(const T&) { return true; }
317     };
318
319     template<typename HashTranslator, typename KeyTraits>
320     struct HashTableKeyChecker<HashTranslator, KeyTraits, true> {
321         template <typename T>
322         static bool checkKey(const T& key)
323         {
324             // FIXME : Check also equality to the deleted value.
325             return !HashTranslator::equal(KeyTraits::emptyValue(), key);
326         }
327     };
328
329     // Don't declare a destructor for HeapAllocated hash tables.
330     template<typename Derived, bool isGarbageCollected>
331     class HashTableDestructorBase;
332
333     template<typename Derived>
334     class HashTableDestructorBase<Derived, true> { };
335
336     template<typename Derived>
337     class HashTableDestructorBase<Derived, false> {
338     public:
339         ~HashTableDestructorBase() { static_cast<Derived*>(this)->finalize(); }
340     };
341
342     // Note: empty or deleted key values are not allowed, using them may lead to undefined behavior.
343     // For pointer keys this means that null pointers are not allowed unless you supply custom key traits.
344     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
345     class HashTable : public HashTableDestructorBase<HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>, Allocator::isGarbageCollected> {
346     public:
347         typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
348         typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
349         typedef Traits ValueTraits;
350         typedef Key KeyType;
351         typedef typename KeyTraits::PeekInType KeyPeekInType;
352         typedef typename KeyTraits::PassInType KeyPassInType;
353         typedef Value ValueType;
354         typedef Extractor ExtractorType;
355         typedef KeyTraits KeyTraitsType;
356         typedef typename Traits::PassInType ValuePassInType;
357         typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
358         typedef HashTableAddResult<HashTable, ValueType> AddResult;
359
360 #if DUMP_HASHTABLE_STATS_PER_TABLE
361         struct Stats {
362             Stats()
363                 : numAccesses(0)
364                 , numRehashes(0)
365                 , numRemoves(0)
366                 , numReinserts(0)
367                 , maxCollisions(0)
368                 , numCollisions(0)
369                 , collisionGraph()
370             {
371             }
372
373             int numAccesses;
374             int numRehashes;
375             int numRemoves;
376             int numReinserts;
377
378             int maxCollisions;
379             int numCollisions;
380             int collisionGraph[4096];
381
382             void recordCollisionAtCount(int count)
383             {
384                 if (count > maxCollisions)
385                     maxCollisions = count;
386                 numCollisions++;
387                 collisionGraph[count]++;
388             }
389
390             void dumpStats()
391             {
392                 dataLogF("\nWTF::HashTable::Stats dump\n\n");
393                 dataLogF("%d accesses\n", numAccesses);
394                 dataLogF("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
395                 dataLogF("longest collision chain: %d\n", maxCollisions);
396                 for (int i = 1; i <= maxCollisions; i++) {
397                     dataLogF("  %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
398                 }
399                 dataLogF("%d rehashes\n", numRehashes);
400                 dataLogF("%d reinserts\n", numReinserts);
401             }
402         };
403 #endif
404
405         HashTable();
406         void finalize()
407         {
408             ASSERT(!Allocator::isGarbageCollected);
409             if (LIKELY(!m_table))
410                 return;
411             deleteAllBucketsAndDeallocate(m_table, m_tableSize);
412             m_table = 0;
413         }
414
415         HashTable(const HashTable&);
416         void swap(HashTable&);
417         HashTable& operator=(const HashTable&);
418
419         // When the hash table is empty, just return the same iterator for end as for begin.
420         // This is more efficient because we don't have to skip all the empty and deleted
421         // buckets, and iterating an empty table is a common case that's worth optimizing.
422         iterator begin() { return isEmpty() ? end() : makeIterator(m_table); }
423         iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); }
424         const_iterator begin() const { return isEmpty() ? end() : makeConstIterator(m_table); }
425         const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tableSize); }
426
427         unsigned size() const { return m_keyCount; }
428         unsigned capacity() const { return m_tableSize; }
429         bool isEmpty() const { return !m_keyCount; }
430
431         AddResult add(ValuePassInType value)
432         {
433             return add<IdentityTranslatorType>(Extractor::extract(value), value);
434         }
435
436         // A special version of add() that finds the object by hashing and comparing
437         // with some other type, to avoid the cost of type conversion if the object is already
438         // in the table.
439         template<typename HashTranslator, typename T, typename Extra> AddResult add(const T& key, const Extra&);
440         template<typename HashTranslator, typename T, typename Extra> AddResult addPassingHashCode(const T& key, const Extra&);
441
442         iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); }
443         const_iterator find(KeyPeekInType key) const { return find<IdentityTranslatorType>(key); }
444         bool contains(KeyPeekInType key) const { return contains<IdentityTranslatorType>(key); }
445
446         template<typename HashTranslator, typename T> iterator find(const T&);
447         template<typename HashTranslator, typename T> const_iterator find(const T&) const;
448         template<typename HashTranslator, typename T> bool contains(const T&) const;
449
450         void remove(KeyPeekInType);
451         void remove(iterator);
452         void remove(const_iterator);
453         void clear();
454
455         static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
456         static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
457         static bool isEmptyOrDeletedBucket(const ValueType& value) { return HashTableHelper<ValueType, Extractor, KeyTraits>:: isEmptyOrDeletedBucket(value); }
458
459         ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType, KeyPeekInType>(key); }
460         template<typename HashTranslator, typename T> ValueType* lookup(T);
461         template<typename HashTranslator, typename T> const ValueType* lookup(T) const;
462
463         void trace(typename Allocator::Visitor*);
464
465 #if ENABLE(ASSERT)
466         int64_t modifications() const { return m_modifications; }
467         void registerModification() { m_modifications++; }
468         // HashTable and collections that build on it do not support
469         // modifications while there is an iterator in use. The exception is
470         // ListHashSet, which has its own iterators that tolerate modification
471         // of the underlying set.
472         void checkModifications(int64_t mods) const { ASSERT(mods == m_modifications); }
473 #else
474         int64_t modifications() const { return 0; }
475         void registerModification() { }
476         void checkModifications(int64_t mods) const { }
477 #endif
478
479     private:
480         static ValueType* allocateTable(unsigned size);
481         static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
482
483         typedef std::pair<ValueType*, bool> LookupType;
484         typedef std::pair<LookupType, unsigned> FullLookupType;
485
486         LookupType lookupForWriting(const Key& key) { return lookupForWriting<IdentityTranslatorType>(key); };
487         template<typename HashTranslator, typename T> FullLookupType fullLookupForWriting(const T&);
488         template<typename HashTranslator, typename T> LookupType lookupForWriting(const T&);
489
490         void remove(ValueType*);
491
492         bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; }
493         bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; }
494         bool shouldShrink() const
495         {
496             // isAllocationAllowed check should be at the last because it's
497             // expensive.
498             return m_keyCount * m_minLoad < m_tableSize
499                 && m_tableSize > KeyTraits::minimumTableSize
500                 && Allocator::isAllocationAllowed();
501         }
502         ValueType* expand(ValueType* entry = 0);
503         void shrink() { rehash(m_tableSize / 2, 0); }
504
505         ValueType* rehash(unsigned newTableSize, ValueType* entry);
506         ValueType* reinsert(ValueType&);
507
508         static void initializeBucket(ValueType& bucket);
509         static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket); }
510
511         FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash)
512             { return FullLookupType(LookupType(position, found), hash); }
513
514         iterator makeIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this); }
515         const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this); }
516         iterator makeKnownGoodIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
517         const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
518
519         static const unsigned m_maxLoad = 2;
520         static const unsigned m_minLoad = 6;
521
522         unsigned tableSizeMask() const
523         {
524             size_t mask = m_tableSize - 1;
525             ASSERT((mask & m_tableSize) == 0);
526             return mask;
527         }
528
529         void setEnqueued() { m_queueFlag = true; }
530         void clearEnqueued() { m_queueFlag = false; }
531         bool enqueued() { return m_queueFlag; }
532
533         ValueType* m_table;
534         unsigned m_tableSize;
535         unsigned m_keyCount;
536         unsigned m_deletedCount:31;
537         bool m_queueFlag:1;
538 #if ENABLE(ASSERT)
539         unsigned m_modifications;
540 #endif
541
542 #if DUMP_HASHTABLE_STATS_PER_TABLE
543     public:
544         mutable OwnPtr<Stats> m_stats;
545 #endif
546
547         template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z> friend struct WeakProcessingHashTableHelper;
548         template<typename T, typename U, typename V, typename W> friend class LinkedHashSet;
549     };
550
551     // Set all the bits to one after the most significant bit: 00110101010 -> 00111111111.
552     template<unsigned size> struct OneifyLowBits;
553     template<>
554     struct OneifyLowBits<0> {
555         static const unsigned value = 0;
556     };
557     template<unsigned number>
558     struct OneifyLowBits {
559         static const unsigned value = number | OneifyLowBits<(number >> 1)>::value;
560     };
561     // Compute the first power of two integer that is an upper bound of the parameter 'number'.
562     template<unsigned number>
563     struct UpperPowerOfTwoBound {
564         static const unsigned value = (OneifyLowBits<number - 1>::value + 1) * 2;
565     };
566
567     // Because power of two numbers are the limit of maxLoad, their capacity is twice the
568     // UpperPowerOfTwoBound, or 4 times their values.
569     template<unsigned size, bool isPowerOfTwo> struct HashTableCapacityForSizeSplitter;
570     template<unsigned size>
571     struct HashTableCapacityForSizeSplitter<size, true> {
572         static const unsigned value = size * 4;
573     };
574     template<unsigned size>
575     struct HashTableCapacityForSizeSplitter<size, false> {
576         static const unsigned value = UpperPowerOfTwoBound<size>::value;
577     };
578
579     // HashTableCapacityForSize computes the upper power of two capacity to hold the size parameter.
580     // This is done at compile time to initialize the HashTraits.
581     template<unsigned size>
582     struct HashTableCapacityForSize {
583         static const unsigned value = HashTableCapacityForSizeSplitter<size, !(size & (size - 1))>::value;
584         COMPILE_ASSERT(size > 0, HashTableNonZeroMinimumCapacity);
585         COMPILE_ASSERT(!static_cast<int>(value >> 31), HashTableNoCapacityOverflow);
586         COMPILE_ASSERT(value > (2 * size), HashTableCapacityHoldsContentSize);
587     };
588
589     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
590     inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable()
591         : m_table(0)
592         , m_tableSize(0)
593         , m_keyCount(0)
594         , m_deletedCount(0)
595         , m_queueFlag(false)
596 #if ENABLE(ASSERT)
597         , m_modifications(0)
598 #endif
599 #if DUMP_HASHTABLE_STATS_PER_TABLE
600         , m_stats(adoptPtr(new Stats))
601 #endif
602     {
603     }
604
605     inline unsigned doubleHash(unsigned key)
606     {
607         key = ~key + (key >> 23);
608         key ^= (key << 12);
609         key ^= (key >> 7);
610         key ^= (key << 2);
611         key ^= (key >> 20);
612         return key;
613     }
614
615     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
616     template<typename HashTranslator, typename T>
617     inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key)
618     {
619         return const_cast<Value*>(const_cast<const HashTable*>(this)->lookup<HashTranslator, T>(key));
620     }
621
622     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
623     template<typename HashTranslator, typename T>
624     inline const Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key) const
625     {
626         ASSERT((HashTableKeyChecker<HashTranslator, KeyTraits, HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key)));
627         const ValueType* table = m_table;
628         if (!table)
629             return 0;
630
631         size_t k = 0;
632         size_t sizeMask = tableSizeMask();
633         unsigned h = HashTranslator::hash(key);
634         size_t i = h & sizeMask;
635
636         UPDATE_ACCESS_COUNTS();
637
638         while (1) {
639             const ValueType* entry = table + i;
640
641             if (HashFunctions::safeToCompareToEmptyOrDeleted) {
642                 if (HashTranslator::equal(Extractor::extract(*entry), key))
643                     return entry;
644
645                 if (isEmptyBucket(*entry))
646                     return 0;
647             } else {
648                 if (isEmptyBucket(*entry))
649                     return 0;
650
651                 if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(*entry), key))
652                     return entry;
653             }
654             UPDATE_PROBE_COUNTS();
655             if (!k)
656                 k = 1 | doubleHash(h);
657             i = (i + k) & sizeMask;
658         }
659     }
660
661     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
662     template<typename HashTranslator, typename T>
663     inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookupForWriting(const T& key)
664     {
665         ASSERT(m_table);
666         registerModification();
667
668         ValueType* table = m_table;
669         size_t k = 0;
670         size_t sizeMask = tableSizeMask();
671         unsigned h = HashTranslator::hash(key);
672         size_t i = h & sizeMask;
673
674         UPDATE_ACCESS_COUNTS();
675
676         ValueType* deletedEntry = 0;
677
678         while (1) {
679             ValueType* entry = table + i;
680
681             if (isEmptyBucket(*entry))
682                 return LookupType(deletedEntry ? deletedEntry : entry, false);
683
684             if (HashFunctions::safeToCompareToEmptyOrDeleted) {
685                 if (HashTranslator::equal(Extractor::extract(*entry), key))
686                     return LookupType(entry, true);
687
688                 if (isDeletedBucket(*entry))
689                     deletedEntry = entry;
690             } else {
691                 if (isDeletedBucket(*entry))
692                     deletedEntry = entry;
693                 else if (HashTranslator::equal(Extractor::extract(*entry), key))
694                     return LookupType(entry, true);
695             }
696             UPDATE_PROBE_COUNTS();
697             if (!k)
698                 k = 1 | doubleHash(h);
699             i = (i + k) & sizeMask;
700         }
701     }
702
703     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
704     template<typename HashTranslator, typename T>
705     inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::fullLookupForWriting(const T& key)
706     {
707         ASSERT(m_table);
708         registerModification();
709
710         ValueType* table = m_table;
711         size_t k = 0;
712         size_t sizeMask = tableSizeMask();
713         unsigned h = HashTranslator::hash(key);
714         size_t i = h & sizeMask;
715
716         UPDATE_ACCESS_COUNTS();
717
718         ValueType* deletedEntry = 0;
719
720         while (1) {
721             ValueType* entry = table + i;
722
723             if (isEmptyBucket(*entry))
724                 return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
725
726             if (HashFunctions::safeToCompareToEmptyOrDeleted) {
727                 if (HashTranslator::equal(Extractor::extract(*entry), key))
728                     return makeLookupResult(entry, true, h);
729
730                 if (isDeletedBucket(*entry))
731                     deletedEntry = entry;
732             } else {
733                 if (isDeletedBucket(*entry))
734                     deletedEntry = entry;
735                 else if (HashTranslator::equal(Extractor::extract(*entry), key))
736                     return makeLookupResult(entry, true, h);
737             }
738             UPDATE_PROBE_COUNTS();
739             if (!k)
740                 k = 1 | doubleHash(h);
741             i = (i + k) & sizeMask;
742         }
743     }
744
745     template<bool emptyValueIsZero> struct HashTableBucketInitializer;
746
747     template<> struct HashTableBucketInitializer<false> {
748         template<typename Traits, typename Value> static void initialize(Value& bucket)
749         {
750             new (NotNull, &bucket) Value(Traits::emptyValue());
751         }
752     };
753
754     template<> struct HashTableBucketInitializer<true> {
755         template<typename Traits, typename Value> static void initialize(Value& bucket)
756         {
757             // This initializes the bucket without copying the empty value.
758             // That makes it possible to use this with types that don't support copying.
759             // The memset to 0 looks like a slow operation but is optimized by the compilers.
760             memset(&bucket, 0, sizeof(bucket));
761         }
762     };
763
764     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
765     inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::initializeBucket(ValueType& bucket)
766     {
767         HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Traits>(bucket);
768     }
769
770     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
771     template<typename HashTranslator, typename T, typename Extra>
772     typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::add(const T& key, const Extra& extra)
773     {
774         ASSERT(Allocator::isAllocationAllowed());
775         if (!m_table)
776             expand();
777
778         ASSERT(m_table);
779
780         ValueType* table = m_table;
781         size_t k = 0;
782         size_t sizeMask = tableSizeMask();
783         unsigned h = HashTranslator::hash(key);
784         size_t i = h & sizeMask;
785
786         UPDATE_ACCESS_COUNTS();
787
788         ValueType* deletedEntry = 0;
789         ValueType* entry;
790         while (1) {
791             entry = table + i;
792
793             if (isEmptyBucket(*entry))
794                 break;
795
796             if (HashFunctions::safeToCompareToEmptyOrDeleted) {
797                 if (HashTranslator::equal(Extractor::extract(*entry), key))
798                     return AddResult(this, entry, false);
799
800                 if (isDeletedBucket(*entry))
801                     deletedEntry = entry;
802             } else {
803                 if (isDeletedBucket(*entry))
804                     deletedEntry = entry;
805                 else if (HashTranslator::equal(Extractor::extract(*entry), key))
806                     return AddResult(this, entry, false);
807             }
808             UPDATE_PROBE_COUNTS();
809             if (!k)
810                 k = 1 | doubleHash(h);
811             i = (i + k) & sizeMask;
812         }
813
814         registerModification();
815
816         if (deletedEntry) {
817             initializeBucket(*deletedEntry);
818             entry = deletedEntry;
819             --m_deletedCount;
820         }
821
822         HashTranslator::translate(*entry, key, extra);
823         ASSERT(!isEmptyOrDeletedBucket(*entry));
824
825         ++m_keyCount;
826
827         if (shouldExpand())
828             entry = expand(entry);
829
830         return AddResult(this, entry, true);
831     }
832
833     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
834     template<typename HashTranslator, typename T, typename Extra>
835     typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::addPassingHashCode(const T& key, const Extra& extra)
836     {
837         ASSERT(Allocator::isAllocationAllowed());
838         if (!m_table)
839             expand();
840
841         FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
842
843         ValueType* entry = lookupResult.first.first;
844         bool found = lookupResult.first.second;
845         unsigned h = lookupResult.second;
846
847         if (found)
848             return AddResult(this, entry, false);
849
850         registerModification();
851
852         if (isDeletedBucket(*entry)) {
853             initializeBucket(*entry);
854             --m_deletedCount;
855         }
856
857         HashTranslator::translate(*entry, key, extra, h);
858         ASSERT(!isEmptyOrDeletedBucket(*entry));
859
860         ++m_keyCount;
861         if (shouldExpand())
862             entry = expand(entry);
863
864         return AddResult(this, entry, true);
865     }
866
867     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
868     Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::reinsert(ValueType& entry)
869     {
870         ASSERT(m_table);
871         registerModification();
872         ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
873         ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first)));
874 #if DUMP_HASHTABLE_STATS
875         atomicIncrement(&HashTableStats::numReinserts);
876 #endif
877 #if DUMP_HASHTABLE_STATS_PER_TABLE
878         ++m_stats->numReinserts;
879 #endif
880         Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
881         Mover<ValueType, Traits::needsDestruction>::move(entry, *newEntry);
882
883         return newEntry;
884     }
885
886     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
887     template <typename HashTranslator, typename T>
888     inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key)
889     {
890         ValueType* entry = lookup<HashTranslator>(key);
891         if (!entry)
892             return end();
893
894         return makeKnownGoodIterator(entry);
895     }
896
897     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
898     template <typename HashTranslator, typename T>
899     inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key) const
900     {
901         ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
902         if (!entry)
903             return end();
904
905         return makeKnownGoodConstIterator(entry);
906     }
907
908     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
909     template <typename HashTranslator, typename T>
910     bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::contains(const T& key) const
911     {
912         return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
913     }
914
915     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
916     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(ValueType* pos)
917     {
918         registerModification();
919 #if DUMP_HASHTABLE_STATS
920         atomicIncrement(&HashTableStats::numRemoves);
921 #endif
922 #if DUMP_HASHTABLE_STATS_PER_TABLE
923         ++m_stats->numRemoves;
924 #endif
925
926         deleteBucket(*pos);
927         ++m_deletedCount;
928         --m_keyCount;
929
930         if (shouldShrink())
931             shrink();
932     }
933
934     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
935     inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(iterator it)
936     {
937         if (it == end())
938             return;
939
940         remove(const_cast<ValueType*>(it.m_iterator.m_position));
941     }
942
943     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
944     inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(const_iterator it)
945     {
946         if (it == end())
947             return;
948
949         remove(const_cast<ValueType*>(it.m_position));
950     }
951
952     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
953     inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(KeyPeekInType key)
954     {
955         remove(find(key));
956     }
957
958     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
959     Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::allocateTable(unsigned size)
960     {
961         typedef typename Allocator::template HashTableBackingHelper<HashTable>::Type HashTableBacking;
962
963         size_t allocSize = size * sizeof(ValueType);
964         ValueType* result;
965         COMPILE_ASSERT(!Traits::emptyValueIsZero || !IsPolymorphic<ValueType>::value, EmptyValueCannotBeZeroForThingsWithAVtable);
966         if (Traits::emptyValueIsZero) {
967             result = Allocator::template zeroedBackingMalloc<ValueType*, HashTableBacking>(allocSize);
968         } else {
969             result = Allocator::template backingMalloc<ValueType*, HashTableBacking>(allocSize);
970             for (unsigned i = 0; i < size; i++)
971                 initializeBucket(result[i]);
972         }
973         return result;
974     }
975
976     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
977     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::deleteAllBucketsAndDeallocate(ValueType* table, unsigned size)
978     {
979         if (Traits::needsDestruction) {
980             for (unsigned i = 0; i < size; ++i) {
981                 // This code is called when the hash table is cleared or
982                 // resized. We have allocated a new backing store and we need
983                 // to run the destructors on the old backing store, as it is
984                 // being freed. If we are GCing we need to both call the
985                 // destructor and mark the bucket as deleted, otherwise the
986                 // destructor gets called again when the GC finds the backing
987                 // store. With the default allocator it's enough to call the
988                 // destructor, since we will free the memory explicitly and
989                 // we won't see the memory with the bucket again.
990                 if (!isEmptyOrDeletedBucket(table[i])) {
991                     if (Allocator::isGarbageCollected)
992                         deleteBucket(table[i]);
993                     else
994                         table[i].~ValueType();
995                 }
996             }
997         }
998         Allocator::backingFree(table);
999     }
1000
1001     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1002     Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::expand(Value* entry)
1003     {
1004         unsigned newSize;
1005         if (!m_tableSize) {
1006             newSize = KeyTraits::minimumTableSize;
1007         } else if (mustRehashInPlace()) {
1008             newSize = m_tableSize;
1009         } else {
1010             newSize = m_tableSize * 2;
1011             RELEASE_ASSERT(newSize > m_tableSize);
1012         }
1013
1014         return rehash(newSize, entry);
1015     }
1016
1017     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1018     Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::rehash(unsigned newTableSize, Value* entry)
1019     {
1020         unsigned oldTableSize = m_tableSize;
1021         ValueType* oldTable = m_table;
1022
1023 #if DUMP_HASHTABLE_STATS
1024         if (oldTableSize != 0)
1025             atomicIncrement(&HashTableStats::numRehashes);
1026 #endif
1027
1028 #if DUMP_HASHTABLE_STATS_PER_TABLE
1029         if (oldTableSize != 0)
1030             ++m_stats->numRehashes;
1031 #endif
1032
1033         m_table = allocateTable(newTableSize);
1034         m_tableSize = newTableSize;
1035
1036         Value* newEntry = 0;
1037         for (unsigned i = 0; i != oldTableSize; ++i) {
1038             if (isEmptyOrDeletedBucket(oldTable[i])) {
1039                 ASSERT(&oldTable[i] != entry);
1040                 continue;
1041             }
1042
1043             Value* reinsertedEntry = reinsert(oldTable[i]);
1044             if (&oldTable[i] == entry) {
1045                 ASSERT(!newEntry);
1046                 newEntry = reinsertedEntry;
1047             }
1048         }
1049
1050         m_deletedCount = 0;
1051
1052         deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1053
1054         return newEntry;
1055     }
1056
1057     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1058     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::clear()
1059     {
1060         registerModification();
1061         if (!m_table)
1062             return;
1063
1064         deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1065         m_table = 0;
1066         m_tableSize = 0;
1067         m_keyCount = 0;
1068     }
1069
1070     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1071     HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable(const HashTable& other)
1072         : m_table(0)
1073         , m_tableSize(0)
1074         , m_keyCount(0)
1075         , m_deletedCount(0)
1076         , m_queueFlag(false)
1077 #if ENABLE(ASSERT)
1078         , m_modifications(0)
1079 #endif
1080 #if DUMP_HASHTABLE_STATS_PER_TABLE
1081         , m_stats(adoptPtr(new Stats(*other.m_stats)))
1082 #endif
1083     {
1084         // Copy the hash table the dumb way, by adding each element to the new table.
1085         // It might be more efficient to copy the table slots, but it's not clear that efficiency is needed.
1086         const_iterator end = other.end();
1087         for (const_iterator it = other.begin(); it != end; ++it)
1088             add(*it);
1089     }
1090
1091     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1092     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::swap(HashTable& other)
1093     {
1094         std::swap(m_table, other.m_table);
1095         std::swap(m_tableSize, other.m_tableSize);
1096         std::swap(m_keyCount, other.m_keyCount);
1097         // std::swap does not work for bit fields.
1098         unsigned deleted = m_deletedCount;
1099         m_deletedCount = other.m_deletedCount;
1100         other.m_deletedCount = deleted;
1101         ASSERT(!m_queueFlag);
1102         ASSERT(!other.m_queueFlag);
1103
1104 #if ENABLE(ASSERT)
1105         std::swap(m_modifications, other.m_modifications);
1106 #endif
1107
1108 #if DUMP_HASHTABLE_STATS_PER_TABLE
1109         m_stats.swap(other.m_stats);
1110 #endif
1111     }
1112
1113     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1114     HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::operator=(const HashTable& other)
1115     {
1116         HashTable tmp(other);
1117         swap(tmp);
1118         return *this;
1119     }
1120
1121     template<WeakHandlingFlag weakHandlingFlag, typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1122     struct WeakProcessingHashTableHelper;
1123
1124     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1125     struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
1126         static void process(typename Allocator::Visitor* visitor, void* closure) { }
1127         static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure) { }
1128         static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure) { }
1129     };
1130
1131     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1132     struct WeakProcessingHashTableHelper<WeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
1133         // Used for purely weak and for weak-and-strong tables (ephemerons).
1134         static void process(typename Allocator::Visitor* visitor, void* closure)
1135         {
1136             typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1137             HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1138             if (table->m_table) {
1139                 // This just marks it live and does not push anything onto the
1140                 // marking stack.
1141                 Allocator::markNoTracing(visitor, table->m_table);
1142                 // Now perform weak processing (this is a no-op if the backing
1143                 // was accessible through an iterator and was already marked
1144                 // strongly).
1145                 typedef typename HashTableType::ValueType ValueType;
1146                 for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
1147                     if (!HashTableType::isEmptyOrDeletedBucket(*element)) {
1148                         // At this stage calling trace can make no difference
1149                         // (everything is already traced), but we use the
1150                         // return value to remove things from the collection.
1151                         if (TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element)) {
1152                             table->registerModification();
1153                             HashTableType::deleteBucket(*element); // Also calls the destructor.
1154                             table->m_deletedCount++;
1155                             table->m_keyCount--;
1156                             // We don't rehash the backing until the next add
1157                             // or delete, because that would cause allocation
1158                             // during GC.
1159                         }
1160                     }
1161                 }
1162             }
1163         }
1164
1165         // Called repeatedly for tables that have both weak and strong pointers.
1166         static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure)
1167         {
1168             typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1169             HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1170             if (table->m_table) {
1171                 // Check the hash table for elements that we now know will not
1172                 // be removed by weak processing. Those elements need to have
1173                 // their strong pointers traced.
1174                 typedef typename HashTableType::ValueType ValueType;
1175                 for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
1176                     if (!HashTableType::isEmptyOrDeletedBucket(*element))
1177                         TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element);
1178                 }
1179             }
1180         }
1181
1182         // Called when the ephemeron iteration is done and before running the per thread
1183         // weak processing. It is guaranteed to be called before any thread is resumed.
1184         static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure)
1185         {
1186             typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1187             HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1188             ASSERT(Allocator::weakTableRegistered(visitor, table));
1189             table->clearEnqueued();
1190         }
1191     };
1192
1193     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1194     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::trace(typename Allocator::Visitor* visitor)
1195     {
1196         // If someone else already marked the backing and queued up the trace
1197         // and/or weak callback then we are done. This optimization does not
1198         // happen for ListHashSet since its iterator does not point at the
1199         // backing.
1200         if (!m_table || visitor->isAlive(m_table))
1201             return;
1202         // Normally, we mark the backing store without performing trace. This
1203         // means it is marked live, but the pointers inside it are not marked.
1204         // Instead we will mark the pointers below. However, for backing
1205         // stores that contain weak pointers the handling is rather different.
1206         // We don't mark the backing store here, so the marking GC will leave
1207         // the backing unmarked. If the backing is found in any other way than
1208         // through its HashTable (ie from an iterator) then the mark bit will
1209         // be set and the pointers will be marked strongly, avoiding problems
1210         // with iterating over things that disappear due to weak processing
1211         // while we are iterating over them. The weakProcessing callback will
1212         // mark the backing as a void pointer, and will perform weak processing
1213         // if needed.
1214         if (Traits::weakHandlingFlag == NoWeakHandlingInCollections)
1215             Allocator::markNoTracing(visitor, m_table);
1216         else
1217             Allocator::registerWeakMembers(visitor, this, m_table, WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::process);
1218         if (ShouldBeTraced<Traits>::value) {
1219             if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
1220                 // If we have both strong and weak pointers in the collection
1221                 // then we queue up the collection for fixed point iteration a
1222                 // la Ephemerons:
1223                 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
1224                 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
1225                 ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this));
1226                 if (!enqueued()) {
1227                     Allocator::registerWeakTable(visitor, this,
1228                         WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIteration,
1229                         WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterationDone);
1230                     setEnqueued();
1231                 }
1232                 // We don't need to trace the elements here, since registering
1233                 // as a weak table above will cause them to be traced (perhaps
1234                 // several times). It's better to wait until everything else is
1235                 // traced before tracing the elements for the first time; this
1236                 // may reduce (by one) the number of iterations needed to get
1237                 // to a fixed point.
1238                 return;
1239             }
1240             for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; element--) {
1241                 if (!isEmptyOrDeletedBucket(*element))
1242                     Allocator::template trace<ValueType, Traits>(visitor, *element);
1243             }
1244         }
1245     }
1246
1247     // iterator adapters
1248
1249     template<typename HashTableType, typename Traits> struct HashTableConstIteratorAdapter {
1250         HashTableConstIteratorAdapter() {}
1251         HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {}
1252         typedef typename Traits::IteratorConstGetType GetType;
1253         typedef typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType;
1254
1255         GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1256         typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
1257         GetType operator->() const { return get(); }
1258
1259         HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; }
1260         // postfix ++ intentionally omitted
1261
1262         typename HashTableType::const_iterator m_impl;
1263     };
1264
1265     template<typename HashTableType, typename Traits> struct HashTableIteratorAdapter {
1266         typedef typename Traits::IteratorGetType GetType;
1267         typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType;
1268
1269         HashTableIteratorAdapter() {}
1270         HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {}
1271
1272         GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1273         typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
1274         GetType operator->() const { return get(); }
1275
1276         HashTableIteratorAdapter& operator++() { ++m_impl; return *this; }
1277         // postfix ++ intentionally omitted
1278
1279         operator HashTableConstIteratorAdapter<HashTableType, Traits>()
1280         {
1281             typename HashTableType::const_iterator i = m_impl;
1282             return i;
1283         }
1284
1285         typename HashTableType::iterator m_impl;
1286     };
1287
1288     template<typename T, typename U>
1289     inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1290     {
1291         return a.m_impl == b.m_impl;
1292     }
1293
1294     template<typename T, typename U>
1295     inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1296     {
1297         return a.m_impl != b.m_impl;
1298     }
1299
1300     template<typename T, typename U>
1301     inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1302     {
1303         return a.m_impl == b.m_impl;
1304     }
1305
1306     template<typename T, typename U>
1307     inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1308     {
1309         return a.m_impl != b.m_impl;
1310     }
1311
1312     // All 4 combinations of ==, != and Const,non const.
1313     template<typename T, typename U>
1314     inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1315     {
1316         return a.m_impl == b.m_impl;
1317     }
1318
1319     template<typename T, typename U>
1320     inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1321     {
1322         return a.m_impl != b.m_impl;
1323     }
1324
1325     template<typename T, typename U>
1326     inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1327     {
1328         return a.m_impl == b.m_impl;
1329     }
1330
1331     template<typename T, typename U>
1332     inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1333     {
1334         return a.m_impl != b.m_impl;
1335     }
1336
1337     template<typename Collection1, typename Collection2>
1338     inline void removeAll(Collection1& collection, const Collection2& toBeRemoved)
1339     {
1340         if (collection.isEmpty() || toBeRemoved.isEmpty())
1341             return;
1342         typedef typename Collection2::const_iterator CollectionIterator;
1343         CollectionIterator end(toBeRemoved.end());
1344         for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it)
1345             collection.remove(*it);
1346     }
1347
1348 } // namespace WTF
1349
1350 #include "wtf/HashIterators.h"
1351
1352 #endif // WTF_HashTable_h