Upstream version 10.39.225.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / wtf / HashTable.h
index e5f01ac..ba73d84 100644 (file)
 #include "wtf/DataLog.h"
 #endif
 
+#if DUMP_HASHTABLE_STATS
+#if DUMP_HASHTABLE_STATS_PER_TABLE
+#define UPDATE_PROBE_COUNTS()                            \
+    ++probeCount;                                        \
+    HashTableStats::recordCollisionAtCount(probeCount);  \
+    ++perTableProbeCount;                                \
+    m_stats->recordCollisionAtCount(perTableProbeCount)
+#define UPDATE_ACCESS_COUNTS()                           \
+    atomicIncrement(&HashTableStats::numAccesses);       \
+    int probeCount = 0;                                  \
+    ++m_stats->numAccesses;                              \
+    int perTableProbeCount = 0
+#else
+#define UPDATE_PROBE_COUNTS()                            \
+    ++probeCount;                                        \
+    HashTableStats::recordCollisionAtCount(probeCount)
+#define UPDATE_ACCESS_COUNTS()                           \
+    atomicIncrement(&HashTableStats::numAccesses);       \
+    int probeCount = 0
+#endif
+#else
+#if DUMP_HASHTABLE_STATS_PER_TABLE
+#define UPDATE_PROBE_COUNTS()                            \
+    ++perTableProbeCount;                                \
+    m_stats->recordCollisionAtCount(perTableProbeCount)
+#define UPDATE_ACCESS_COUNTS()                           \
+    ++m_stats->numAccesses;                              \
+    int perTableProbeCount = 0
+#else
+#define UPDATE_PROBE_COUNTS() do { } while (0)
+#define UPDATE_ACCESS_COUNTS() do { } while (0)
+#endif
+#endif
+
 namespace WTF {
 
 #if DUMP_HASHTABLE_STATS
@@ -61,7 +95,9 @@ namespace WTF {
     class HashTableIterator;
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     class HashTableConstIterator;
-    template<bool x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z>
+    template<typename Value, typename HashFunctions, typename HashTraits, typename Allocator>
+    class LinkedHashSet;
+    template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z>
     struct WeakProcessingHashTableHelper;
 
     typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
@@ -73,7 +109,7 @@ namespace WTF {
         typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
         typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
         typedef Value ValueType;
-        typedef const ValueType& ReferenceType;
+        typedef typename Traits::IteratorConstGetType GetType;
         typedef const ValueType* PointerType;
 
         friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
@@ -85,15 +121,35 @@ namespace WTF {
                 ++m_position;
         }
 
-        HashTableConstIterator(const HashTableType* table, PointerType position, PointerType endPosition)
-            : m_position(position), m_endPosition(endPosition)
+        HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container)
+            : m_position(position)
+            , m_endPosition(endPosition)
+#if ENABLE(ASSERT)
+            , m_container(container)
+            , m_containerModifications(container->modifications())
+#endif
         {
             skipEmptyBuckets();
         }
 
-        HashTableConstIterator(const HashTableType* table, PointerType position, PointerType endPosition, HashItemKnownGoodTag)
-            : m_position(position), m_endPosition(endPosition)
+        HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container, HashItemKnownGoodTag)
+            : m_position(position)
+            , m_endPosition(endPosition)
+#if ENABLE(ASSERT)
+            , m_container(container)
+            , m_containerModifications(container->modifications())
+#endif
         {
+            ASSERT(m_containerModifications == m_container->modifications());
+        }
+
+        void checkModifications() const
+        {
+            // HashTable and collections that build on it do not support
+            // modifications while there is an iterator in use. The exception
+            // is ListHashSet, which has its own iterators that tolerate
+            // modification of the underlying set.
+            ASSERT(m_containerModifications == m_container->modifications());
         }
 
     public:
@@ -101,16 +157,18 @@ namespace WTF {
         {
         }
 
-        PointerType get() const
+        GetType get() const
         {
+            checkModifications();
             return m_position;
         }
-        ReferenceType operator*() const { return *get(); }
-        PointerType operator->() const { return get(); }
+        typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
+        GetType operator->() const { return get(); }
 
         const_iterator& operator++()
         {
             ASSERT(m_position != m_endPosition);
+            checkModifications();
             ++m_position;
             skipEmptyBuckets();
             return *this;
@@ -139,6 +197,10 @@ namespace WTF {
     private:
         PointerType m_position;
         PointerType m_endPosition;
+#if ENABLE(ASSERT)
+        const HashTableType* m_container;
+        int64_t m_containerModifications;
+#endif
     };
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
@@ -153,8 +215,8 @@ namespace WTF {
 
         friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
 
-        HashTableIterator(HashTableType* table, PointerType pos, PointerType end) : m_iterator(table, pos, end) { }
-        HashTableIterator(HashTableType* table, PointerType pos, PointerType end, HashItemKnownGoodTag tag) : m_iterator(table, pos, end, tag) { }
+        HashTableIterator(PointerType pos, PointerType end, const HashTableType* container) : m_iterator(pos, end, container) { }
+        HashTableIterator(PointerType pos, PointerType end, const HashTableType* container, HashItemKnownGoodTag tag) : m_iterator(pos, end, container, tag) { }
 
     public:
         HashTableIterator() { }
@@ -195,21 +257,67 @@ namespace WTF {
         swap(a.value, b.value);
     }
 
-    template<typename T, bool useSwap> struct Mover;
-    template<typename T> struct Mover<T, true> { static void move(T& from, T& to) { hashTableSwap(from, to); } };
-    template<typename T> struct Mover<T, false> { static void move(T& from, T& to) { to = from; } };
+    template<typename T, typename Allocator, bool useSwap> struct Mover;
+    template<typename T, typename Allocator> struct Mover<T, Allocator, true> {
+        static void move(T& from, T& to)
+        {
+            // A swap operation should not normally allocate, but it may do so
+            // if it is falling back on some sort of triple assignment in the
+            // style of t = a; a = b; b = t because there is no overloaded swap
+            // operation. We can't allow allocation both because it is slower
+            // than a true swap operation, but also because allocation implies
+            // allowing GC: We cannot allow a GC after swapping only the key.
+            // The value is only traced if the key is present and therefore the
+            // GC will not see the value in the old backing if the key has been
+            // moved to the new backing. Therefore, we cannot allow GC until
+            // after both key and value have been moved.
+            Allocator::enterNoAllocationScope();
+            hashTableSwap(from, to);
+            Allocator::leaveNoAllocationScope();
+        }
+    };
+    template<typename T, typename Allocator> struct Mover<T, Allocator, false> {
+        static void move(T& from, T& to) { to = from; }
+    };
 
     template<typename HashFunctions> class IdentityHashTranslator {
     public:
         template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
         template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
-        template<typename T, typename U> static void translate(T& location, const U&, const T& value) { location = value; }
+        template<typename T, typename U, typename V> static void translate(T& location, const U&, const V& value) { location = value; }
     };
 
-    template<typename IteratorType> struct HashTableAddResult {
-        HashTableAddResult(IteratorType iter, bool isNewEntry) : iterator(iter), isNewEntry(isNewEntry) { }
-        IteratorType iterator;
+    template<typename HashTableType, typename ValueType> struct HashTableAddResult {
+        HashTableAddResult(const HashTableType* container, ValueType* storedValue, bool isNewEntry)
+            : storedValue(storedValue)
+            , isNewEntry(isNewEntry)
+#if ENABLE(SECURITY_ASSERT)
+            , m_container(container)
+            , m_containerModifications(container->modifications())
+#endif
+        {
+            ASSERT_UNUSED(container, container);
+        }
+
+        ~HashTableAddResult()
+        {
+            // If rehash happened before accessing storedValue, it's
+            // use-after-free. Any modification may cause a rehash, so we check
+            // for modifications here.
+            // Rehash after accessing storedValue is harmless but will assert if
+            // the AddResult destructor takes place after a modification. You
+            // may need to limit the scope of the AddResult.
+            ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications == m_container->modifications());
+        }
+
+        ValueType* storedValue;
         bool isNewEntry;
+
+#if ENABLE(SECURITY_ASSERT)
+    private:
+        const HashTableType* m_container;
+        const int64_t m_containerModifications;
+#endif
     };
 
     template<typename Value, typename Extractor, typename KeyTraits>
@@ -219,8 +327,41 @@ namespace WTF {
         static bool isEmptyOrDeletedBucket(const Value& value) { return isEmptyBucket(value) || isDeletedBucket(value); }
     };
 
+    template<typename HashTranslator, typename KeyTraits, bool safeToCompareToEmptyOrDeleted>
+    struct HashTableKeyChecker {
+        // There's no simple generic way to make this check if safeToCompareToEmptyOrDeleted is false,
+        // so the check always passes.
+        template <typename T>
+        static bool checkKey(const T&) { return true; }
+    };
+
+    template<typename HashTranslator, typename KeyTraits>
+    struct HashTableKeyChecker<HashTranslator, KeyTraits, true> {
+        template <typename T>
+        static bool checkKey(const T& key)
+        {
+            // FIXME : Check also equality to the deleted value.
+            return !HashTranslator::equal(KeyTraits::emptyValue(), key);
+        }
+    };
+
+    // Don't declare a destructor for HeapAllocated hash tables.
+    template<typename Derived, bool isGarbageCollected>
+    class HashTableDestructorBase;
+
+    template<typename Derived>
+    class HashTableDestructorBase<Derived, true> { };
+
+    template<typename Derived>
+    class HashTableDestructorBase<Derived, false> {
+    public:
+        ~HashTableDestructorBase() { static_cast<Derived*>(this)->finalize(); }
+    };
+
+    // Note: empty or deleted key values are not allowed, using them may lead to undefined behavior.
+    // For pointer keys this means that null pointers are not allowed unless you supply custom key traits.
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
-    class HashTable {
+    class HashTable : public HashTableDestructorBase<HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>, Allocator::isGarbageCollected> {
     public:
         typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
         typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
@@ -229,9 +370,11 @@ namespace WTF {
         typedef typename KeyTraits::PeekInType KeyPeekInType;
         typedef typename KeyTraits::PassInType KeyPassInType;
         typedef Value ValueType;
-        typedef typename Traits::PeekInType ValuePeekInType;
+        typedef Extractor ExtractorType;
+        typedef KeyTraits KeyTraitsType;
+        typedef typename Traits::PassInType ValuePassInType;
         typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
-        typedef HashTableAddResult<iterator> AddResult;
+        typedef HashTableAddResult<HashTable, ValueType> AddResult;
 
 #if DUMP_HASHTABLE_STATS_PER_TABLE
         struct Stats {
@@ -279,11 +422,12 @@ namespace WTF {
 #endif
 
         HashTable();
-        ~HashTable()
+        void finalize()
         {
+            ASSERT(!Allocator::isGarbageCollected);
             if (LIKELY(!m_table))
                 return;
-            deallocateTable(m_table, m_tableSize);
+            deleteAllBucketsAndDeallocate(m_table, m_tableSize);
             m_table = 0;
         }
 
@@ -303,7 +447,7 @@ namespace WTF {
         unsigned capacity() const { return m_tableSize; }
         bool isEmpty() const { return !m_keyCount; }
 
-        AddResult add(ValuePeekInType value)
+        AddResult add(ValuePassInType value)
         {
             return add<IdentityTranslatorType>(Extractor::extract(value), value);
         }
@@ -331,14 +475,29 @@ namespace WTF {
         static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
         static bool isEmptyOrDeletedBucket(const ValueType& value) { return HashTableHelper<ValueType, Extractor, KeyTraits>:: isEmptyOrDeletedBucket(value); }
 
-        ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType>(key); }
-        template<typename HashTranslator, typename T> ValueType* lookup(const T&);
+        ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType, KeyPeekInType>(key); }
+        template<typename HashTranslator, typename T> ValueType* lookup(T);
+        template<typename HashTranslator, typename T> const ValueType* lookup(T) const;
 
         void trace(typename Allocator::Visitor*);
 
+#if ENABLE(ASSERT)
+        int64_t modifications() const { return m_modifications; }
+        void registerModification() { m_modifications++; }
+        // HashTable and collections that build on it do not support
+        // modifications while there is an iterator in use. The exception is
+        // ListHashSet, which has its own iterators that tolerate modification
+        // of the underlying set.
+        void checkModifications(int64_t mods) const { ASSERT(mods == m_modifications); }
+#else
+        int64_t modifications() const { return 0; }
+        void registerModification() { }
+        void checkModifications(int64_t mods) const { }
+#endif
+
     private:
         static ValueType* allocateTable(unsigned size);
-        static void deallocateTable(ValueType* table, unsigned size);
+        static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
 
         typedef std::pair<ValueType*, bool> LookupType;
         typedef std::pair<LookupType, unsigned> FullLookupType;
@@ -351,39 +510,61 @@ namespace WTF {
 
         bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; }
         bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; }
-        bool shouldShrink() const { return m_keyCount * m_minLoad < m_tableSize && m_tableSize > KeyTraits::minimumTableSize; }
-        void expand();
-        void shrink() { rehash(m_tableSize / 2); }
+        bool shouldShrink() const
+        {
+            // isAllocationAllowed check should be at the last because it's
+            // expensive.
+            return m_keyCount * m_minLoad < m_tableSize
+                && m_tableSize > KeyTraits::minimumTableSize
+                && Allocator::isAllocationAllowed();
+        }
+        ValueType* expand(ValueType* entry = 0);
+        void shrink() { rehash(m_tableSize / 2, 0); }
 
-        void rehash(unsigned newTableSize);
-        void reinsert(ValueType&);
+        ValueType* rehash(unsigned newTableSize, ValueType* entry);
+        ValueType* reinsert(ValueType&);
 
         static void initializeBucket(ValueType& bucket);
-        static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket); }
+        static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected); }
 
         FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash)
             { return FullLookupType(LookupType(position, found), hash); }
 
-        iterator makeIterator(ValueType* pos) { return iterator(this, pos, m_table + m_tableSize); }
-        const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(this, pos, m_table + m_tableSize); }
-        iterator makeKnownGoodIterator(ValueType* pos) { return iterator(this, pos, m_table + m_tableSize, HashItemKnownGood); }
-        const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(this, pos, m_table + m_tableSize, HashItemKnownGood); }
+        iterator makeIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this); }
+        const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this); }
+        iterator makeKnownGoodIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
+        const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
 
         static const unsigned m_maxLoad = 2;
         static const unsigned m_minLoad = 6;
 
+        unsigned tableSizeMask() const
+        {
+            size_t mask = m_tableSize - 1;
+            ASSERT((mask & m_tableSize) == 0);
+            return mask;
+        }
+
+        void setEnqueued() { m_queueFlag = true; }
+        void clearEnqueued() { m_queueFlag = false; }
+        bool enqueued() { return m_queueFlag; }
+
         ValueType* m_table;
         unsigned m_tableSize;
-        unsigned m_tableSizeMask;
         unsigned m_keyCount;
-        unsigned m_deletedCount;
+        unsigned m_deletedCount:31;
+        bool m_queueFlag:1;
+#if ENABLE(ASSERT)
+        unsigned m_modifications;
+#endif
 
 #if DUMP_HASHTABLE_STATS_PER_TABLE
     public:
         mutable OwnPtr<Stats> m_stats;
 #endif
 
-        template<bool x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z> friend struct WeakProcessingHashTableHelper;
+        template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z> friend struct WeakProcessingHashTableHelper;
+        template<typename T, typename U, typename V, typename W> friend class LinkedHashSet;
     };
 
     // Set all the bits to one after the most significant bit: 00110101010 -> 00111111111.
@@ -428,9 +609,12 @@ namespace WTF {
     inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable()
         : m_table(0)
         , m_tableSize(0)
-        , m_tableSizeMask(0)
         , m_keyCount(0)
         , m_deletedCount(0)
+        , m_queueFlag(false)
+#if ENABLE(ASSERT)
+        , m_modifications(0)
+#endif
 #if DUMP_HASHTABLE_STATS_PER_TABLE
         , m_stats(adoptPtr(new Stats))
 #endif
@@ -449,31 +633,30 @@ namespace WTF {
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     template<typename HashTranslator, typename T>
-    inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(const T& key)
+    inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key)
     {
-        ValueType* table = m_table;
+        return const_cast<Value*>(const_cast<const HashTable*>(this)->lookup<HashTranslator, T>(key));
+    }
+
+    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
+    template<typename HashTranslator, typename T>
+    inline const Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key) const
+    {
+        ASSERT((HashTableKeyChecker<HashTranslator, KeyTraits, HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key)));
+        const ValueType* table = m_table;
         if (!table)
             return 0;
 
         size_t k = 0;
-        size_t sizeMask = m_tableSizeMask;
+        size_t sizeMask = tableSizeMask();
         unsigned h = HashTranslator::hash(key);
         size_t i = h & sizeMask;
 
-#if DUMP_HASHTABLE_STATS
-        atomicIncrement(&HashTableStats::numAccesses);
-        int probeCount = 0;
-#endif
-
-#if DUMP_HASHTABLE_STATS_PER_TABLE
-        ++m_stats->numAccesses;
-        int perTableProbeCount = 0;
-#endif
+        UPDATE_ACCESS_COUNTS();
 
         while (1) {
-            ValueType* entry = table + i;
+            const ValueType* entry = table + i;
 
-            // we count on the compiler to optimize out this branch
             if (HashFunctions::safeToCompareToEmptyOrDeleted) {
                 if (HashTranslator::equal(Extractor::extract(*entry), key))
                     return entry;
@@ -487,16 +670,7 @@ namespace WTF {
                 if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(*entry), key))
                     return entry;
             }
-#if DUMP_HASHTABLE_STATS
-            ++probeCount;
-            HashTableStats::recordCollisionAtCount(probeCount);
-#endif
-
-#if DUMP_HASHTABLE_STATS_PER_TABLE
-            ++perTableProbeCount;
-            m_stats->recordCollisionAtCount(perTableProbeCount);
-#endif
-
+            UPDATE_PROBE_COUNTS();
             if (!k)
                 k = 1 | doubleHash(h);
             i = (i + k) & sizeMask;
@@ -508,57 +682,37 @@ namespace WTF {
     inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookupForWriting(const T& key)
     {
         ASSERT(m_table);
+        registerModification();
 
-        size_t k = 0;
         ValueType* table = m_table;
-        size_t sizeMask = m_tableSizeMask;
+        size_t k = 0;
+        size_t sizeMask = tableSizeMask();
         unsigned h = HashTranslator::hash(key);
         size_t i = h & sizeMask;
 
-#if DUMP_HASHTABLE_STATS
-        atomicIncrement(&HashTableStats::numAccesses);
-        int probeCount = 0;
-#endif
-
-#if DUMP_HASHTABLE_STATS_PER_TABLE
-        ++m_stats->numAccesses;
-        int perTableProbeCount = 0;
-#endif
+        UPDATE_ACCESS_COUNTS();
 
         ValueType* deletedEntry = 0;
 
         while (1) {
             ValueType* entry = table + i;
 
-            // we count on the compiler to optimize out this branch
-            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
-                if (isEmptyBucket(*entry))
-                    return LookupType(deletedEntry ? deletedEntry : entry, false);
+            if (isEmptyBucket(*entry))
+                return LookupType(deletedEntry ? deletedEntry : entry, false);
 
+            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
                 if (HashTranslator::equal(Extractor::extract(*entry), key))
                     return LookupType(entry, true);
 
                 if (isDeletedBucket(*entry))
                     deletedEntry = entry;
             } else {
-                if (isEmptyBucket(*entry))
-                    return LookupType(deletedEntry ? deletedEntry : entry, false);
-
                 if (isDeletedBucket(*entry))
                     deletedEntry = entry;
                 else if (HashTranslator::equal(Extractor::extract(*entry), key))
                     return LookupType(entry, true);
             }
-#if DUMP_HASHTABLE_STATS
-            ++probeCount;
-            HashTableStats::recordCollisionAtCount(probeCount);
-#endif
-
-#if DUMP_HASHTABLE_STATS_PER_TABLE
-            ++perTableProbeCount;
-            m_stats->recordCollisionAtCount(perTableProbeCount);
-#endif
-
+            UPDATE_PROBE_COUNTS();
             if (!k)
                 k = 1 | doubleHash(h);
             i = (i + k) & sizeMask;
@@ -570,57 +724,37 @@ namespace WTF {
     inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::fullLookupForWriting(const T& key)
     {
         ASSERT(m_table);
+        registerModification();
 
-        size_t k = 0;
         ValueType* table = m_table;
-        size_t sizeMask = m_tableSizeMask;
+        size_t k = 0;
+        size_t sizeMask = tableSizeMask();
         unsigned h = HashTranslator::hash(key);
         size_t i = h & sizeMask;
 
-#if DUMP_HASHTABLE_STATS
-        atomicIncrement(&HashTableStats::numAccesses);
-        int probeCount = 0;
-#endif
-
-#if DUMP_HASHTABLE_STATS_PER_TABLE
-        ++m_stats->numAccesses;
-        int perTableProbeCount = 0;
-#endif
+        UPDATE_ACCESS_COUNTS();
 
         ValueType* deletedEntry = 0;
 
         while (1) {
             ValueType* entry = table + i;
 
-            // we count on the compiler to optimize out this branch
-            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
-                if (isEmptyBucket(*entry))
-                    return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
+            if (isEmptyBucket(*entry))
+                return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
 
+            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
                 if (HashTranslator::equal(Extractor::extract(*entry), key))
                     return makeLookupResult(entry, true, h);
 
                 if (isDeletedBucket(*entry))
                     deletedEntry = entry;
             } else {
-                if (isEmptyBucket(*entry))
-                    return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
-
                 if (isDeletedBucket(*entry))
                     deletedEntry = entry;
                 else if (HashTranslator::equal(Extractor::extract(*entry), key))
                     return makeLookupResult(entry, true, h);
             }
-#if DUMP_HASHTABLE_STATS
-            ++probeCount;
-            HashTableStats::recordCollisionAtCount(probeCount);
-#endif
-
-#if DUMP_HASHTABLE_STATS_PER_TABLE
-            ++perTableProbeCount;
-            m_stats->recordCollisionAtCount(perTableProbeCount);
-#endif
-
+            UPDATE_PROBE_COUNTS();
             if (!k)
                 k = 1 | doubleHash(h);
             i = (i + k) & sizeMask;
@@ -649,101 +783,86 @@ namespace WTF {
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::initializeBucket(ValueType& bucket)
     {
+        // For hash maps the key and value cannot be initialied simultaneously,
+        // and it would be wrong to have a GC when only one was initialized and
+        // the other still contained garbage (eg. from a previous use of the
+        // same slot). Therefore we forbid allocation (and thus GC) while the
+        // slot is initalized to an empty value.
+        Allocator::enterNoAllocationScope();
         HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Traits>(bucket);
+        Allocator::leaveNoAllocationScope();
     }
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     template<typename HashTranslator, typename T, typename Extra>
     typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::add(const T& key, const Extra& extra)
     {
+        ASSERT(Allocator::isAllocationAllowed());
         if (!m_table)
             expand();
 
         ASSERT(m_table);
 
-        size_t k = 0;
         ValueType* table = m_table;
-        size_t sizeMask = m_tableSizeMask;
+        size_t k = 0;
+        size_t sizeMask = tableSizeMask();
         unsigned h = HashTranslator::hash(key);
         size_t i = h & sizeMask;
 
-#if DUMP_HASHTABLE_STATS
-        atomicIncrement(&HashTableStats::numAccesses);
-        int probeCount = 0;
-#endif
-
-#if DUMP_HASHTABLE_STATS_PER_TABLE
-        ++m_stats->numAccesses;
-        int perTableProbeCount = 0;
-#endif
+        UPDATE_ACCESS_COUNTS();
 
         ValueType* deletedEntry = 0;
         ValueType* entry;
         while (1) {
             entry = table + i;
 
-            // we count on the compiler to optimize out this branch
-            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
-                if (isEmptyBucket(*entry))
-                    break;
+            if (isEmptyBucket(*entry))
+                break;
 
+            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
                 if (HashTranslator::equal(Extractor::extract(*entry), key))
-                    return AddResult(makeKnownGoodIterator(entry), false);
+                    return AddResult(this, entry, false);
 
                 if (isDeletedBucket(*entry))
                     deletedEntry = entry;
             } else {
-                if (isEmptyBucket(*entry))
-                    break;
-
                 if (isDeletedBucket(*entry))
                     deletedEntry = entry;
                 else if (HashTranslator::equal(Extractor::extract(*entry), key))
-                    return AddResult(makeKnownGoodIterator(entry), false);
+                    return AddResult(this, entry, false);
             }
-#if DUMP_HASHTABLE_STATS
-            ++probeCount;
-            HashTableStats::recordCollisionAtCount(probeCount);
-#endif
-
-#if DUMP_HASHTABLE_STATS_PER_TABLE
-            ++perTableProbeCount;
-            m_stats->recordCollisionAtCount(perTableProbeCount);
-#endif
-
+            UPDATE_PROBE_COUNTS();
             if (!k)
                 k = 1 | doubleHash(h);
             i = (i + k) & sizeMask;
         }
 
+        registerModification();
+
         if (deletedEntry) {
+            // Overwrite any data left over from last use, using placement new
+            // or memset.
             initializeBucket(*deletedEntry);
             entry = deletedEntry;
             --m_deletedCount;
         }
 
         HashTranslator::translate(*entry, key, extra);
+        ASSERT(!isEmptyOrDeletedBucket(*entry));
 
         ++m_keyCount;
 
-        if (shouldExpand()) {
-            // FIXME: This makes an extra copy on expand. Probably not that bad since
-            // expand is rare, but would be better to have a version of expand that can
-            // follow a pivot entry and return the new position.
-            typename WTF::RemoveReference<KeyPassInType>::Type enteredKey = Extractor::extract(*entry);
-            expand();
-            AddResult result(find(enteredKey), true);
-            ASSERT(result.iterator != end());
-            return result;
-        }
+        if (shouldExpand())
+            entry = expand(entry);
 
-        return AddResult(makeKnownGoodIterator(entry), true);
+        return AddResult(this, entry, true);
     }
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     template<typename HashTranslator, typename T, typename Extra>
     typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::addPassingHashCode(const T& key, const Extra& extra)
     {
+        ASSERT(Allocator::isAllocationAllowed());
         if (!m_table)
             expand();
 
@@ -754,7 +873,9 @@ namespace WTF {
         unsigned h = lookupResult.second;
 
         if (found)
-            return AddResult(makeKnownGoodIterator(entry), false);
+            return AddResult(this, entry, false);
+
+        registerModification();
 
         if (isDeletedBucket(*entry)) {
             initializeBucket(*entry);
@@ -762,25 +883,20 @@ namespace WTF {
         }
 
         HashTranslator::translate(*entry, key, extra, h);
+        ASSERT(!isEmptyOrDeletedBucket(*entry));
+
         ++m_keyCount;
-        if (shouldExpand()) {
-            // FIXME: This makes an extra copy on expand. Probably not that bad since
-            // expand is rare, but would be better to have a version of expand that can
-            // follow a pivot entry and return the new position.
-            typename WTF::RemoveReference<KeyPassInType>::Type enteredKey = Extractor::extract(*entry);
-            expand();
-            AddResult result(find(enteredKey), true);
-            ASSERT(result.iterator != end());
-            return result;
-        }
+        if (shouldExpand())
+            entry = expand(entry);
 
-        return AddResult(makeKnownGoodIterator(entry), true);
+        return AddResult(this, entry, true);
     }
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
-    inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::reinsert(ValueType& entry)
+    Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::reinsert(ValueType& entry)
     {
         ASSERT(m_table);
+        registerModification();
         ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
         ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first)));
 #if DUMP_HASHTABLE_STATS
@@ -789,8 +905,10 @@ namespace WTF {
 #if DUMP_HASHTABLE_STATS_PER_TABLE
         ++m_stats->numReinserts;
 #endif
+        Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
+        Mover<ValueType, Allocator, Traits::needsDestruction>::move(entry, *newEntry);
 
-        Mover<ValueType, Traits::needsDestruction>::move(entry, *lookupForWriting(Extractor::extract(entry)).first);
+        return newEntry;
     }
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
@@ -825,6 +943,7 @@ namespace WTF {
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(ValueType* pos)
     {
+        registerModification();
 #if DUMP_HASHTABLE_STATS
         atomicIncrement(&HashTableStats::numRemoves);
 #endif
@@ -867,10 +986,16 @@ namespace WTF {
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::allocateTable(unsigned size)
     {
-        typedef typename Allocator::template HashTableBackingHelper<Key, Value, Extractor, Traits, KeyTraits>::Type HashTableBacking;
+        typedef typename Allocator::template HashTableBackingHelper<HashTable>::Type HashTableBacking;
 
         size_t allocSize = size * sizeof(ValueType);
         ValueType* result;
+        // Assert that we will not use memset on things with a vtable entry.
+        // The compiler will also check this on some platforms. We would
+        // like to check this on the whole value (key-value pair), but
+        // IsPolymorphic will return false for a pair of two types, even if
+        // one of the components is polymorphic.
+        COMPILE_ASSERT(!Traits::emptyValueIsZero || !IsPolymorphic<KeyType>::value, EmptyValueCannotBeZeroForThingsWithAVtable);
         if (Traits::emptyValueIsZero) {
             result = Allocator::template zeroedBackingMalloc<ValueType*, HashTableBacking>(allocSize);
         } else {
@@ -882,19 +1007,32 @@ namespace WTF {
     }
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
-    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::deallocateTable(ValueType* table, unsigned size)
+    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::deleteAllBucketsAndDeallocate(ValueType* table, unsigned size)
     {
         if (Traits::needsDestruction) {
             for (unsigned i = 0; i < size; ++i) {
-                if (!isDeletedBucket(table[i]))
-                    table[i].~ValueType();
+                // This code is called when the hash table is cleared or
+                // resized. We have allocated a new backing store and we need
+                // to run the destructors on the old backing store, as it is
+                // being freed. If we are GCing we need to both call the
+                // destructor and mark the bucket as deleted, otherwise the
+                // destructor gets called again when the GC finds the backing
+                // store. With the default allocator it's enough to call the
+                // destructor, since we will free the memory explicitly and
+                // we won't see the memory with the bucket again.
+                if (!isEmptyOrDeletedBucket(table[i])) {
+                    if (Allocator::isGarbageCollected)
+                        deleteBucket(table[i]);
+                    else
+                        table[i].~ValueType();
+                }
             }
         }
         Allocator::backingFree(table);
     }
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
-    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::expand()
+    Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::expand(Value* entry)
     {
         unsigned newSize;
         if (!m_tableSize) {
@@ -906,11 +1044,11 @@ namespace WTF {
             RELEASE_ASSERT(newSize > m_tableSize);
         }
 
-        rehash(newSize);
+        return rehash(newSize, entry);
     }
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
-    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::rehash(unsigned newTableSize)
+    Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::rehash(unsigned newTableSize, Value* entry)
     {
         unsigned oldTableSize = m_tableSize;
         ValueType* oldTable = m_table;
@@ -927,27 +1065,38 @@ namespace WTF {
 
         m_table = allocateTable(newTableSize);
         m_tableSize = newTableSize;
-        m_tableSizeMask = newTableSize - 1;
 
-        for (unsigned i = 0; i != oldTableSize; ++i)
-            if (!isEmptyOrDeletedBucket(oldTable[i]))
-                reinsert(oldTable[i]);
+        Value* newEntry = 0;
+        for (unsigned i = 0; i != oldTableSize; ++i) {
+            if (isEmptyOrDeletedBucket(oldTable[i])) {
+                ASSERT(&oldTable[i] != entry);
+                continue;
+            }
+
+            Value* reinsertedEntry = reinsert(oldTable[i]);
+            if (&oldTable[i] == entry) {
+                ASSERT(!newEntry);
+                newEntry = reinsertedEntry;
+            }
+        }
 
         m_deletedCount = 0;
 
-        deallocateTable(oldTable, oldTableSize);
+        deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
+
+        return newEntry;
     }
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::clear()
     {
+        registerModification();
         if (!m_table)
             return;
 
-        deallocateTable(m_table, m_tableSize);
+        deleteAllBucketsAndDeallocate(m_table, m_tableSize);
         m_table = 0;
         m_tableSize = 0;
-        m_tableSizeMask = 0;
         m_keyCount = 0;
     }
 
@@ -955,9 +1104,12 @@ namespace WTF {
     HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable(const HashTable& other)
         : m_table(0)
         , m_tableSize(0)
-        , m_tableSizeMask(0)
         , m_keyCount(0)
         , m_deletedCount(0)
+        , m_queueFlag(false)
+#if ENABLE(ASSERT)
+        , m_modifications(0)
+#endif
 #if DUMP_HASHTABLE_STATS_PER_TABLE
         , m_stats(adoptPtr(new Stats(*other.m_stats)))
 #endif
@@ -972,25 +1124,19 @@ namespace WTF {
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::swap(HashTable& other)
     {
-        ValueType* tmpTable = m_table;
-        m_table = other.m_table;
-        other.m_table = tmpTable;
-
-        size_t tmpTableSize = m_tableSize;
-        m_tableSize = other.m_tableSize;
-        other.m_tableSize = tmpTableSize;
-
-        size_t tmpTableSizeMask = m_tableSizeMask;
-        m_tableSizeMask = other.m_tableSizeMask;
-        other.m_tableSizeMask = tmpTableSizeMask;
-
-        size_t tmpKeyCount = m_keyCount;
-        m_keyCount = other.m_keyCount;
-        other.m_keyCount = tmpKeyCount;
-
-        size_t tmpDeletedCount = m_deletedCount;
+        std::swap(m_table, other.m_table);
+        std::swap(m_tableSize, other.m_tableSize);
+        std::swap(m_keyCount, other.m_keyCount);
+        // std::swap does not work for bit fields.
+        unsigned deleted = m_deletedCount;
         m_deletedCount = other.m_deletedCount;
-        other.m_deletedCount = tmpDeletedCount;
+        other.m_deletedCount = deleted;
+        ASSERT(!m_queueFlag);
+        ASSERT(!other.m_queueFlag);
+
+#if ENABLE(ASSERT)
+        std::swap(m_modifications, other.m_modifications);
+#endif
 
 #if DUMP_HASHTABLE_STATS_PER_TABLE
         m_stats.swap(other.m_stats);
@@ -1005,30 +1151,39 @@ namespace WTF {
         return *this;
     }
 
-    template<bool isWeak, typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
+    template<WeakHandlingFlag weakHandlingFlag, typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     struct WeakProcessingHashTableHelper;
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
-    struct WeakProcessingHashTableHelper<false, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
+    struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
         static void process(typename Allocator::Visitor* visitor, void* closure) { }
+        static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure) { }
+        static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure) { }
     };
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
-    struct WeakProcessingHashTableHelper<true, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
+    struct WeakProcessingHashTableHelper<WeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
+        // Used for purely weak and for weak-and-strong tables (ephemerons).
         static void process(typename Allocator::Visitor* visitor, void* closure)
         {
             typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
             HashTableType* table = reinterpret_cast<HashTableType*>(closure);
             if (table->m_table) {
-                // This just marks it live and does not push anything onto the
-                // marking stack.
-                Allocator::markNoTracing(visitor, table->m_table);
+                // This is run as part of weak processing after full
+                // marking. The backing store is therefore marked if
+                // we get here.
+                ASSERT(visitor->isAlive(table->m_table));
                 // Now perform weak processing (this is a no-op if the backing
                 // was accessible through an iterator and was already marked
                 // strongly).
-                for (typename HashTableType::ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
+                typedef typename HashTableType::ValueType ValueType;
+                for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
                     if (!HashTableType::isEmptyOrDeletedBucket(*element)) {
-                        if (Allocator::hasDeadMember(visitor, *element)) {
+                        // At this stage calling trace can make no difference
+                        // (everything is already traced), but we use the
+                        // return value to remove things from the collection.
+                        if (TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element)) {
+                            table->registerModification();
                             HashTableType::deleteBucket(*element); // Also calls the destructor.
                             table->m_deletedCount++;
                             table->m_keyCount--;
@@ -1040,13 +1195,42 @@ namespace WTF {
                 }
             }
         }
+
+        // Called repeatedly for tables that have both weak and strong pointers.
+        static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure)
+        {
+            typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
+            HashTableType* table = reinterpret_cast<HashTableType*>(closure);
+            if (table->m_table) {
+                // Check the hash table for elements that we now know will not
+                // be removed by weak processing. Those elements need to have
+                // their strong pointers traced.
+                typedef typename HashTableType::ValueType ValueType;
+                for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
+                    if (!HashTableType::isEmptyOrDeletedBucket(*element))
+                        TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element);
+                }
+            }
+        }
+
+        // Called when the ephemeron iteration is done and before running the per thread
+        // weak processing. It is guaranteed to be called before any thread is resumed.
+        static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure)
+        {
+            typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
+            HashTableType* table = reinterpret_cast<HashTableType*>(closure);
+            ASSERT(Allocator::weakTableRegistered(visitor, table));
+            table->clearEnqueued();
+        }
     };
 
     template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
     void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::trace(typename Allocator::Visitor* visitor)
     {
         // If someone else already marked the backing and queued up the trace
-        // and/or weak callback then we are done.
+        // and/or weak callback then we are done. This optimization does not
+        // happen for ListHashSet since its iterator does not point at the
+        // backing.
         if (!m_table || visitor->isAlive(m_table))
             return;
         // Normally, we mark the backing store without performing trace. This
@@ -1058,17 +1242,41 @@ namespace WTF {
         // through its HashTable (ie from an iterator) then the mark bit will
         // be set and the pointers will be marked strongly, avoiding problems
         // with iterating over things that disappear due to weak processing
-        // while we are iterating over them. The weakProcessing callback will
-        // mark the backing as a void pointer, and will perform weak processing
-        // if needed.
-        if (!Traits::isWeak)
+        // while we are iterating over them. We register the backing store
+        // pointer for delayed marking which will take place after we know if
+        // the backing is reachable from elsewhere. We also register a
+        // weakProcessing callback which will perform weak processing if needed.
+        if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) {
             Allocator::markNoTracing(visitor, m_table);
-        else
-            Allocator::registerWeakMembers(visitor, this, WeakProcessingHashTableHelper<Traits::isWeak, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::process);
-        if (Traits::needsTracing) {
+        } else {
+            Allocator::registerDelayedMarkNoTracing(visitor, m_table);
+            Allocator::registerWeakMembers(visitor, this, m_table, WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::process);
+        }
+        if (ShouldBeTraced<Traits>::value) {
+            if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
+                // If we have both strong and weak pointers in the collection
+                // then we queue up the collection for fixed point iteration a
+                // la Ephemerons:
+                // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
+                // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
+                ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this));
+                if (!enqueued()) {
+                    Allocator::registerWeakTable(visitor, this,
+                        WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIteration,
+                        WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterationDone);
+                    setEnqueued();
+                }
+                // We don't need to trace the elements here, since registering
+                // as a weak table above will cause them to be traced (perhaps
+                // several times). It's better to wait until everything else is
+                // traced before tracing the elements for the first time; this
+                // may reduce (by one) the number of iterations needed to get
+                // to a fixed point.
+                return;
+            }
             for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; element--) {
                 if (!isEmptyOrDeletedBucket(*element))
-                    Allocator::template mark<ValueType, Traits>(visitor, *element);
+                    Allocator::template trace<ValueType, Traits>(visitor, *element);
             }
         }
     }
@@ -1081,7 +1289,7 @@ namespace WTF {
         typedef typename Traits::IteratorConstGetType GetType;
         typedef typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType;
 
-        GetType get() const { return (GetType)SourceGetType(m_impl.get()); }
+        GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
         typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
         GetType operator->() const { return get(); }
 
@@ -1098,7 +1306,7 @@ namespace WTF {
         HashTableIteratorAdapter() {}
         HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {}
 
-        GetType get() const { return (GetType)SourceGetType(m_impl.get()); }
+        GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
         typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
         GetType operator->() const { return get(); }
 
@@ -1163,6 +1371,17 @@ namespace WTF {
         return a.m_impl != b.m_impl;
     }
 
+    template<typename Collection1, typename Collection2>
+    inline void removeAll(Collection1& collection, const Collection2& toBeRemoved)
+    {
+        if (collection.isEmpty() || toBeRemoved.isEmpty())
+            return;
+        typedef typename Collection2::const_iterator CollectionIterator;
+        CollectionIterator end(toBeRemoved.end());
+        for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it)
+            collection.remove(*it);
+    }
+
 } // namespace WTF
 
 #include "wtf/HashIterators.h"