2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "platform/PlatformExport.h"
35 #include "platform/heap/AddressSanitizer.h"
36 #include "platform/heap/ThreadState.h"
37 #include "platform/heap/Visitor.h"
39 #include "wtf/Assertions.h"
40 #include "wtf/HashCountedSet.h"
41 #include "wtf/LinkedHashSet.h"
42 #include "wtf/ListHashSet.h"
43 #include "wtf/OwnPtr.h"
44 #include "wtf/PassRefPtr.h"
45 #include "wtf/ThreadSafeRefCounted.h"
51 const size_t blinkPageSizeLog2 = 17;
52 const size_t blinkPageSize = 1 << blinkPageSizeLog2;
53 const size_t blinkPageOffsetMask = blinkPageSize - 1;
54 const size_t blinkPageBaseMask = ~blinkPageOffsetMask;
55 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte
56 // align all allocations even on 32 bit.
57 const size_t allocationGranularity = 8;
58 const size_t allocationMask = allocationGranularity - 1;
59 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularity) - 1)) / (8 * allocationGranularity);
60 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
61 const size_t maxHeapObjectSize = 1 << 27;
63 const size_t markBitMask = 1;
64 const size_t freeListMask = 2;
65 const size_t debugBitMask = 4;
66 const size_t sizeMask = ~7;
67 const uint8_t freelistZapValue = 42;
68 const uint8_t finalizedZapValue = 24;
72 template<ThreadAffinity affinity> class ThreadLocalPersistents;
73 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity > > class Persistent;
74 template<typename T> class CrossThreadPersistent;
76 PLATFORM_EXPORT size_t osPageSize();
78 // Blink heap pages are set up with a guard page before and after the
80 inline size_t blinkPagePayloadSize()
82 return blinkPageSize - 2 * osPageSize();
85 // Blink heap pages are aligned to the Blink heap page size.
86 // Therefore, the start of a Blink page can be obtained by
87 // rounding down to the Blink page size.
88 inline Address roundToBlinkPageStart(Address address)
90 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
93 inline Address roundToBlinkPageEnd(Address address)
95 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) & blinkPageBaseMask) + blinkPageSize;
98 // Compute the amount of padding we have to add to a header to make
99 // the size of the header plus the padding a multiple of 8 bytes.
100 template<typename Header>
101 inline size_t headerPadding()
103 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity;
106 // Masks an address down to the enclosing blink page base address.
107 inline Address blinkPageAddress(Address address)
109 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
114 // Sanity check for a page header address: the address of the page
115 // header should be OS page size away from being Blink page size
117 inline bool isPageHeaderAddress(Address address)
119 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPageSize());
123 // Mask an address down to the enclosing oilpan heap page base address.
124 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
125 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our typed heaps.
126 // This is only exported to enable tests in HeapTest.cpp.
127 PLATFORM_EXPORT inline Address pageHeaderAddress(Address address)
129 return blinkPageAddress(address) + osPageSize();
132 // Common header for heap pages.
135 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
138 , m_threadState(state)
141 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
144 // Check if the given address points to an object in this
145 // heap page. If so, find the start of that object and mark it
146 // using the given Visitor. Otherwise do nothing. The pointer must
147 // be within the same aligned blinkPageSize as the this-pointer.
149 // This is used during conservative stack scanning to
150 // conservatively mark all objects that could be referenced from
152 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
154 #if ENABLE(GC_TRACING)
155 virtual const GCInfo* findGCInfo(Address) = 0;
158 Address address() { return reinterpret_cast<Address>(this); }
159 PageMemory* storage() const { return m_storage; }
160 ThreadState* threadState() const { return m_threadState; }
161 const GCInfo* gcInfo() { return m_gcInfo; }
162 virtual bool isLargeObject() { return false; }
165 // Accessor to silence unused warnings.
166 void* padding() const { return m_padding; }
168 PageMemory* m_storage;
169 const GCInfo* m_gcInfo;
170 ThreadState* m_threadState;
171 // Free word only needed to ensure proper alignment of the
176 // Large allocations are allocated as separate objects and linked in a
179 // In order to use the same memory allocation routines for everything
180 // allocated in the heap, large objects are considered heap pages
181 // containing only one object.
183 // The layout of a large heap object is as follows:
185 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
186 template<typename Header>
187 class LargeHeapObject : public BaseHeapPage {
189 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) : BaseHeapPage(storage, gcInfo, state)
191 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), large_heap_object_header_misaligned);
194 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
195 virtual bool isLargeObject() OVERRIDE { return true; }
197 #if ENABLE(GC_TRACING)
198 virtual const GCInfo* findGCInfo(Address address)
200 if (!objectContains(address))
206 void link(LargeHeapObject<Header>** previousNext)
208 m_next = *previousNext;
209 *previousNext = this;
212 void unlink(LargeHeapObject<Header>** previousNext)
214 *previousNext = m_next;
217 // The LargeHeapObject pseudo-page contains one actual object. Determine
218 // whether the pointer is within that object.
219 bool objectContains(Address object)
221 return (payload() <= object) && (object < address() + size());
224 // Returns true for any address that is on one of the pages that this
225 // large object uses. That ensures that we can use a negative result to
226 // populate the negative page cache.
227 bool contains(Address object)
229 return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPageEnd(address() + size());
232 LargeHeapObject<Header>* next()
239 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
242 Address payload() { return heapObjectHeader()->payload(); }
243 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
245 Header* heapObjectHeader()
247 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
248 return reinterpret_cast<Header*>(headerAddress);
253 void getStats(HeapStats&);
258 friend class ThreadHeap<Header>;
260 LargeHeapObject<Header>* m_next;
263 // The BasicObjectHeader is the minimal object header. It is used when
264 // encountering heap space of size allocationGranularity to mark it as
265 // as freelist entry.
266 class PLATFORM_EXPORT BasicObjectHeader {
269 explicit BasicObjectHeader(size_t encodedSize)
270 : m_size(encodedSize) { }
272 static size_t freeListEncodedSize(size_t size) { return size | freeListMask; }
275 bool isFree() { return m_size & freeListMask; }
278 size_t size() const { return m_size & sizeMask; }
284 // Our heap object layout is layered with the HeapObjectHeader closest
285 // to the payload, this can be wrapped in a FinalizedObjectHeader if the
286 // object is on the GeneralHeap and not on a specific TypedHeap.
287 // Finally if the object is a large object (> blinkPageSize/2) then it is
288 // wrapped with a LargeObjectHeader.
290 // Object memory layout:
291 // [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payload
292 // The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHeader
293 // are independently optional.
294 class PLATFORM_EXPORT HeapObjectHeader : public BasicObjectHeader {
297 explicit HeapObjectHeader(size_t encodedSize)
298 : BasicObjectHeader(encodedSize)
305 HeapObjectHeader(size_t encodedSize, const GCInfo*)
306 : BasicObjectHeader(encodedSize)
312 inline void checkHeader() const;
313 inline bool isMarked() const;
316 inline void unmark();
318 inline const GCInfo* gcInfo() { return 0; }
320 inline Address payload();
321 inline size_t payloadSize();
322 inline Address payloadEnd();
324 inline void setDebugMark();
325 inline void clearDebugMark();
326 inline bool hasDebugMark() const;
328 // Zap magic number with a new magic number that means there was once an
329 // object allocated here, but it was freed because nobody marked it during
333 static void finalize(const GCInfo*, Address, size_t);
334 static HeapObjectHeader* fromPayload(const void*);
336 static const intptr_t magic = 0xc0de247;
337 static const intptr_t zappedMagic = 0xC0DEdead;
338 // The zap value for vtables should be < 4K to ensure it cannot be
339 // used for dispatch.
340 static const intptr_t zappedVTable = 0xd0d;
348 const size_t objectHeaderSize = sizeof(HeapObjectHeader);
350 // Each object on the GeneralHeap needs to carry a pointer to its
351 // own GCInfo structure for tracing and potential finalization.
352 class PLATFORM_EXPORT FinalizedHeapObjectHeader : public HeapObjectHeader {
355 FinalizedHeapObjectHeader(size_t encodedSize, const GCInfo* gcInfo)
356 : HeapObjectHeader(encodedSize)
361 inline Address payload();
362 inline size_t payloadSize();
365 const GCInfo* gcInfo() { return m_gcInfo; }
368 TraceCallback traceCallback() { return m_gcInfo->m_trace; }
373 inline bool hasFinalizer() { return m_gcInfo->hasFinalizer(); }
375 static FinalizedHeapObjectHeader* fromPayload(const void*);
378 bool hasVTable() { return m_gcInfo->hasVTable(); }
381 const GCInfo* m_gcInfo;
384 const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader);
386 class FreeListEntry : public HeapObjectHeader {
389 explicit FreeListEntry(size_t size)
390 : HeapObjectHeader(freeListEncodedSize(size))
393 #if !defined(NDEBUG) && !ASAN
394 // Zap free area with asterisks, aka 0x2a2a2a2a.
395 // For ASAN don't zap since we keep accounting in the freelist entry.
396 for (size_t i = sizeof(*this); i < size; i++)
397 reinterpret_cast<Address>(this)[i] = freelistZapValue;
398 ASSERT(size >= objectHeaderSize);
403 Address address() { return reinterpret_cast<Address>(this); }
406 void unlink(FreeListEntry** prevNext)
413 void link(FreeListEntry** prevNext)
419 #if defined(ADDRESS_SANITIZER)
421 bool shouldAddToFreeList()
423 // Init if not already magic.
424 if ((m_asanMagic & ~asanDeferMemoryReuseMask) != asanMagic) {
425 m_asanMagic = asanMagic | asanDeferMemoryReuseCount;
428 // Decrement if count part of asanMagic > 0.
429 if (m_asanMagic & asanDeferMemoryReuseMask)
431 return !(m_asanMagic & asanDeferMemoryReuseMask);
436 FreeListEntry* m_next;
437 #if defined(ADDRESS_SANITIZER)
438 unsigned m_asanMagic;
442 // Representation of Blink heap pages.
444 // Pages are specialized on the type of header on the object they
445 // contain. If a heap page only contains a certain type of object all
446 // of the objects will have the same GCInfo pointer and therefore that
447 // pointer can be stored in the HeapPage instead of in the header of
448 // each object. In that case objects have only a HeapObjectHeader and
449 // not a FinalizedHeapObjectHeader saving a word per object.
450 template<typename Header>
451 class HeapPage : public BaseHeapPage {
453 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
455 void link(HeapPage**);
456 static void unlink(HeapPage*, HeapPage**);
460 // Returns true for the whole blinkPageSize page that the page is on, even
461 // for the header, and the unmapped guard page at the start. That ensures
462 // the result can be used to populate the negative page cache.
463 bool contains(Address addr)
465 Address blinkPageStart = roundToBlinkPageStart(address());
466 ASSERT(blinkPageStart = address() - osPageSize()); // Page is at aligned address plus guard page size.
467 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
470 HeapPage* next() { return m_next; }
474 return address() + sizeof(*this) + headerPadding<Header>();
477 static size_t payloadSize()
479 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header>()) & ~allocationMask;
482 Address end() { return payload() + payloadSize(); }
484 void getStats(HeapStats&);
487 void clearObjectStartBitMap();
488 void finalize(Header*);
489 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
490 #if ENABLE(GC_TRACING)
491 const GCInfo* findGCInfo(Address) OVERRIDE;
493 ThreadHeap<Header>* heap() { return m_heap; }
494 #if defined(ADDRESS_SANITIZER)
495 void poisonUnmarkedObjects();
499 Header* findHeaderFromAddress(Address);
500 void populateObjectStartBitMap();
501 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
502 TraceCallback traceCallback(Header*);
503 bool hasVTable(Header*);
505 HeapPage<Header>* m_next;
506 ThreadHeap<Header>* m_heap;
507 bool m_objectStartBitMapComputed;
508 uint8_t m_objectStartBitMap[reservedForObjectBitMap];
510 friend class ThreadHeap<Header>;
515 AddressEntry() : m_address(0) { }
517 explicit AddressEntry(Address address) : m_address(address) { }
519 Address address() const { return m_address; }
525 class PositiveEntry : public AddressEntry {
529 , m_containingPage(0)
533 PositiveEntry(Address address, BaseHeapPage* containingPage)
534 : AddressEntry(address)
535 , m_containingPage(containingPage)
539 BaseHeapPage* result() const { return m_containingPage; }
541 typedef BaseHeapPage* LookupResult;
544 BaseHeapPage* m_containingPage;
547 class NegativeEntry : public AddressEntry {
549 NegativeEntry() : AddressEntry() { }
551 NegativeEntry(Address address, bool) : AddressEntry(address) { }
553 bool result() const { return true; }
555 typedef bool LookupResult;
558 // A HeapExtentCache provides a fast way of taking an arbitrary
559 // pointer-sized word, and determining whether it can be interpreted
560 // as a pointer to an area that is managed by the garbage collected
561 // Blink heap. There is a cache of 'pages' that have previously been
562 // determined to be wholly inside the heap. The size of these pages must be
563 // smaller than the allocation alignment of the heap pages. We determine
564 // on-heap-ness by rounding down the pointer to the nearest page and looking up
565 // the page in the cache. If there is a miss in the cache we can ask the heap
566 // to determine the status of the pointer by iterating over all of the heap.
567 // The result is then cached in the two-way associative page cache.
569 // A HeapContainsCache is a positive cache. Therefore, it must be flushed when
570 // memory is removed from the Blink heap. The HeapDoesNotContainCache is a
571 // negative cache, so it must be flushed when memory is added to the heap.
572 template<typename Entry>
573 class HeapExtentCache {
576 : m_entries(adoptArrayPtr(new Entry[HeapExtentCache::numberOfEntries]))
577 , m_hasEntries(false)
582 bool contains(Address);
583 bool isEmpty() { return !m_hasEntries; }
585 // Perform a lookup in the cache.
587 // If lookup returns null/false the argument address was not found in
588 // the cache and it is unknown if the address is in the Blink
591 // If lookup returns true/a page, the argument address was found in the
592 // cache. For the HeapContainsCache this means the address is in the heap.
593 // For the HeapDoesNotContainCache this means the address is not in the
595 PLATFORM_EXPORT typename Entry::LookupResult lookup(Address);
597 // Add an entry to the cache.
598 PLATFORM_EXPORT void addEntry(Address, typename Entry::LookupResult);
601 static const int numberOfEntriesLog2 = 12;
602 static const int numberOfEntries = 1 << numberOfEntriesLog2;
604 static size_t hash(Address);
606 WTF::OwnPtr<Entry[]> m_entries;
609 friend class ThreadState;
612 // Normally these would be typedefs instead of subclasses, but that makes them
613 // very hard to forward declare.
614 class HeapContainsCache : public HeapExtentCache<PositiveEntry> {
616 BaseHeapPage* lookup(Address);
617 void addEntry(Address, BaseHeapPage*);
620 class HeapDoesNotContainCache : public HeapExtentCache<NegativeEntry> { };
622 // FIXME: This is currently used by the WebAudio code.
623 // We should attempt to restructure the WebAudio code so that the main thread
624 // alone determines life-time and receives messages about life-time from the
627 class ThreadSafeRefCountedGarbageCollected : public GarbageCollectedFinalized<T>, public WTF::ThreadSafeRefCountedBase {
628 WTF_MAKE_NONCOPYABLE(ThreadSafeRefCountedGarbageCollected);
631 ThreadSafeRefCountedGarbageCollected()
633 m_keepAlive = adoptPtr(new CrossThreadPersistent<T>(static_cast<T*>(this)));
636 // Override ref to deal with a case where a reference count goes up
637 // from 0 to 1. This can happen in the following scenario:
638 // (1) The reference count becomes 0, but on-stack pointers keep references to the object.
639 // (2) The on-stack pointer is assigned to a RefPtr. The reference count becomes 1.
640 // In this case, we have to resurrect m_keepAlive.
643 MutexLocker lock(m_mutex);
644 if (UNLIKELY(!refCount())) {
645 ASSERT(!m_keepAlive);
646 m_keepAlive = adoptPtr(new CrossThreadPersistent<T>(static_cast<T*>(this)));
648 WTF::ThreadSafeRefCountedBase::ref();
651 // Override deref to deal with our own deallocation based on ref counting.
654 MutexLocker lock(m_mutex);
661 using GarbageCollectedFinalized<T>::operator new;
662 using GarbageCollectedFinalized<T>::operator delete;
665 ~ThreadSafeRefCountedGarbageCollected() { }
668 OwnPtr<CrossThreadPersistent<T> > m_keepAlive;
669 mutable Mutex m_mutex;
672 // The CallbackStack contains all the visitor callbacks used to trace and mark
673 // objects. A specific CallbackStack instance contains at most bufferSize elements.
674 // If more space is needed a new CallbackStack instance is created and chained
675 // together with the former instance. I.e. a logical CallbackStack can be made of
676 // multiple chained CallbackStack object instances.
677 // There are two logical callback stacks. One containing all the marking callbacks and
678 // one containing the weak pointer callbacks.
679 class CallbackStack {
681 CallbackStack(CallbackStack** first)
682 : m_limit(&(m_buffer[bufferSize]))
683 , m_current(&(m_buffer[0]))
695 void assertIsEmpty();
700 Item(void* object, VisitorCallback callback)
702 , m_callback(callback)
705 void* object() { return m_object; }
706 VisitorCallback callback() { return m_callback; }
710 VisitorCallback m_callback;
713 static void init(CallbackStack** first);
714 static void shutdown(CallbackStack** first);
715 bool popAndInvokeCallback(CallbackStack** first, Visitor*);
717 Item* allocateEntry(CallbackStack** first)
719 if (m_current < m_limit)
721 return (new CallbackStack(first))->allocateEntry(first);
725 static const size_t bufferSize = 8000;
726 Item m_buffer[bufferSize];
729 CallbackStack* m_next;
732 // Non-template super class used to pass a heap around to other classes.
735 virtual ~BaseHeap() { }
737 // Find the page in this thread heap containing the given
738 // address. Returns 0 if the address is not contained in any
739 // page in this thread heap.
740 virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
742 #if ENABLE(GC_TRACING)
743 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0;
746 // Sweep this part of the Blink heap. This finalizes dead objects
747 // and builds freelists for all the unused memory.
748 virtual void sweep() = 0;
750 // Forcefully finalize all objects in this part of the Blink heap
751 // (potentially with the exception of one object). This is used
752 // during thread termination to make sure that all objects for the
753 // dying thread are finalized.
754 virtual void assertEmpty() = 0;
756 virtual void clearFreeLists() = 0;
757 virtual void clearMarks() = 0;
759 virtual void getScannedStats(HeapStats&) = 0;
762 virtual void makeConsistentForGC() = 0;
763 virtual bool isConsistentForGC() = 0;
765 // Returns a bucket number for inserting a FreeListEntry of a
766 // given size. All FreeListEntries in the given bucket, n, have
768 static int bucketIndexForSize(size_t);
771 // Thread heaps represent a part of the per-thread Blink heap.
773 // Each Blink thread has a number of thread heaps: one general heap
774 // that contains any type of object and a number of heaps specialized
775 // for specific object types (such as Node).
777 // Each thread heap contains the functionality to allocate new objects
778 // (potentially adding new pages to the heap), to find and mark
779 // objects during conservative stack scanning and to sweep the set of
781 template<typename Header>
782 class ThreadHeap : public BaseHeap {
784 ThreadHeap(ThreadState*);
785 virtual ~ThreadHeap();
787 virtual BaseHeapPage* heapPageFromAddress(Address);
788 #if ENABLE(GC_TRACING)
789 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
791 virtual void sweep();
792 virtual void assertEmpty();
793 virtual void clearFreeLists();
794 virtual void clearMarks();
796 virtual void getScannedStats(HeapStats&);
799 virtual void makeConsistentForGC();
800 virtual bool isConsistentForGC();
802 ThreadState* threadState() { return m_threadState; }
803 HeapStats& stats() { return m_threadState->stats(); }
804 void flushHeapContainsCache()
806 m_threadState->heapContainsCache()->flush();
809 inline Address allocate(size_t, const GCInfo*);
810 void addToFreeList(Address, size_t);
811 void addPageToPool(HeapPage<Header>*);
812 inline static size_t roundedAllocationSize(size_t size)
814 return allocationSizeFromSize(size) - sizeof(Header);
818 // Once pages have been used for one thread heap they will never
819 // be reused for another thread heap. Instead of unmapping, we add
820 // the pages to a pool of pages to be reused later by this thread
821 // heap. This is done as a security feature to avoid type
822 // confusion. The heap is type segregated by having separate
823 // thread heaps for various types of objects. Holding on to pages
824 // ensures that the same virtual address space cannot be used for
825 // objects of another type than the type contained in this thread
827 class PagePoolEntry {
829 PagePoolEntry(PageMemory* storage, PagePoolEntry* next)
834 PageMemory* storage() { return m_storage; }
835 PagePoolEntry* next() { return m_next; }
838 PageMemory* m_storage;
839 PagePoolEntry* m_next;
842 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
843 static size_t allocationSizeFromSize(size_t);
844 void addPageToHeap(const GCInfo*);
845 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
846 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
847 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
848 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
849 void setAllocationPoint(Address point, size_t size)
851 ASSERT(!point || heapPageFromAddress(point));
852 ASSERT(size <= HeapPage<Header>::payloadSize());
853 m_currentAllocationPoint = point;
854 m_remainingAllocationSize = size;
856 void ensureCurrentAllocation(size_t, const GCInfo*);
857 bool allocateFromFreeList(size_t);
859 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
861 void allocatePage(const GCInfo*);
862 PageMemory* takePageFromPool();
863 void clearPagePool();
866 Address m_currentAllocationPoint;
867 size_t m_remainingAllocationSize;
869 HeapPage<Header>* m_firstPage;
870 LargeHeapObject<Header>* m_firstLargeHeapObject;
872 int m_biggestFreeListIndex;
873 ThreadState* m_threadState;
875 // All FreeListEntries in the nth list have size >= 2^n.
876 FreeListEntry* m_freeLists[blinkPageSizeLog2];
878 // List of pages that have been previously allocated, but are now
880 PagePoolEntry* m_pagePool;
883 class PLATFORM_EXPORT Heap {
886 static void shutdown();
887 static void doShutdown();
889 static BaseHeapPage* contains(Address);
890 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
891 static BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
893 // Push a trace callback on the marking stack.
894 static void pushTraceCallback(void* containerObject, TraceCallback);
896 // Add a weak pointer callback to the weak callback work list. General
897 // object pointer callbacks are added to a thread local weak callback work
898 // list and the callback is called on the thread that owns the object, with
899 // the closure pointer as an argument. Most of the time, the closure and
900 // the containerObject can be the same thing, but the containerObject is
901 // constrained to be on the heap, since the heap is used to identify the
903 static void pushWeakObjectPointerCallback(void* closure, void* containerObject, WeakPointerCallback);
905 // Similar to the more general pushWeakObjectPointerCallback, but cell
906 // pointer callbacks are added to a static callback work list and the weak
907 // callback is performed on the thread performing garbage collection. This
908 // is OK because cells are just cleared and no deallocation can happen.
909 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback);
911 // Pop the top of the marking stack and call the callback with the visitor
912 // and the object. Returns false when there is nothing more to do.
913 static bool popAndInvokeTraceCallback(Visitor*);
915 // Remove an item from the weak callback work list and call the callback
916 // with the visitor and the closure pointer. Returns false when there is
917 // nothing more to do.
918 static bool popAndInvokeWeakPointerCallback(Visitor*);
920 template<typename T> static Address allocate(size_t);
921 template<typename T> static Address reallocate(void* previous, size_t);
923 static void collectGarbage(ThreadState::StackState);
924 static void collectAllGarbage();
925 static void setForcePreciseGCForTesting();
927 static void prepareForGC();
929 // Conservatively checks whether an address is a pointer in any of the thread
930 // heaps. If so marks the object pointed to as live.
931 static Address checkAndMarkPointer(Visitor*, Address);
933 #if ENABLE(GC_TRACING)
934 // Dump the path to specified object on the next GC. This method is to be invoked from GDB.
935 static void dumpPathToObjectOnNextGC(void* p);
937 // Forcibly find GCInfo of the object at Address.
938 // This is slow and should only be used for debug purposes.
939 // It involves finding the heap page and scanning the heap page for an object header.
940 static const GCInfo* findGCInfo(Address);
943 // Collect heap stats for all threads attached to the Blink
944 // garbage collector. Should only be called during garbage
945 // collection where threads are known to be at safe points.
946 static void getStats(HeapStats*);
948 static bool isConsistentForGC();
949 static void makeConsistentForGC();
951 static void flushHeapDoesNotContainCache();
952 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCache->isEmpty(); }
955 static Visitor* s_markingVisitor;
957 static CallbackStack* s_markingStack;
958 static CallbackStack* s_weakCallbackStack;
959 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
960 static bool s_shutdownCalled;
961 friend class ThreadState;
964 // The NoAllocationScope class is used in debug mode to catch unwanted
965 // allocations. E.g. allocations during GC.
966 template<ThreadAffinity Affinity>
967 class NoAllocationScope {
969 NoAllocationScope() : m_active(true) { enter(); }
971 explicit NoAllocationScope(bool active) : m_active(active) { enter(); }
973 NoAllocationScope(const NoAllocationScope& other) : m_active(other.m_active) { enter(); }
975 NoAllocationScope& operator=(const NoAllocationScope& other)
978 m_active = other.m_active;
983 ~NoAllocationScope() { release(); }
988 ThreadStateFor<Affinity>::state()->leaveNoAllocationScope();
997 ThreadStateFor<Affinity>::state()->enterNoAllocationScope();
1003 // Base class for objects allocated in the Blink garbage-collected
1006 // Defines a 'new' operator that allocates the memory in the
1007 // heap. 'delete' should not be called on objects that inherit from
1008 // GarbageCollected.
1010 // Instances of GarbageCollected will *NOT* get finalized. Their
1011 // destructor will not be called. Therefore, only classes that have
1012 // trivial destructors with no semantic meaning (including all their
1013 // subclasses) should inherit from GarbageCollected. If there are
1014 // non-trival destructors in a given class or any of its subclasses,
1015 // GarbageCollectedFinalized should be used which guarantees that the
1016 // destructor is called on an instance when the garbage collector
1017 // determines that it is no longer reachable.
1018 template<typename T>
1019 class GarbageCollected {
1020 WTF_MAKE_NONCOPYABLE(GarbageCollected);
1022 // For now direct allocation of arrays on the heap is not allowed.
1023 void* operator new[](size_t size);
1024 #if OS(WIN) && COMPILER(MSVC)
1025 // Due to some quirkiness in the MSVC compiler we have to provide
1026 // the delete[] operator in the GarbageCollected subclasses as it
1027 // is called when a class is exported in a DLL.
1029 void operator delete[](void* p)
1031 ASSERT_NOT_REACHED();
1034 void operator delete[](void* p);
1037 typedef T GarbageCollectedBase;
1039 void* operator new(size_t size)
1041 return Heap::allocate<T>(size);
1044 void operator delete(void* p)
1046 ASSERT_NOT_REACHED();
1055 // Base class for objects allocated in the Blink garbage-collected
1058 // Defines a 'new' operator that allocates the memory in the
1059 // heap. 'delete' should not be called on objects that inherit from
1060 // GarbageCollected.
1062 // Instances of GarbageCollectedFinalized will have their destructor
1063 // called when the garbage collector determines that the object is no
1064 // longer reachable.
1065 template<typename T>
1066 class GarbageCollectedFinalized : public GarbageCollected<T> {
1067 WTF_MAKE_NONCOPYABLE(GarbageCollectedFinalized);
1070 // finalizeGarbageCollectedObject is called when the object is
1071 // freed from the heap. By default finalization means calling the
1072 // destructor on the object. finalizeGarbageCollectedObject can be
1073 // overridden to support calling the destructor of a
1074 // subclass. This is useful for objects without vtables that
1075 // require explicit dispatching. The name is intentionally a bit
1076 // long to make name conflicts less likely.
1077 void finalizeGarbageCollectedObject()
1079 static_cast<T*>(this)->~T();
1082 GarbageCollectedFinalized() { }
1083 ~GarbageCollectedFinalized() { }
1085 template<typename U> friend struct HasFinalizer;
1086 template<typename U, bool> friend struct FinalizerTraitImpl;
1089 // Base class for objects that are in the Blink garbage-collected heap
1090 // and are still reference counted.
1092 // This class should be used sparingly and only to gradually move
1093 // objects from being reference counted to being managed by the blink
1094 // garbage collector.
1096 // While the current reference counting keeps one of these objects
1097 // alive it will have a Persistent handle to itself allocated so we
1098 // will not reclaim the memory. When the reference count reaches 0 the
1099 // persistent handle will be deleted. When the garbage collector
1100 // determines that there are no other references to the object it will
1101 // be reclaimed and the destructor of the reclaimed object will be
1102 // called at that time.
1103 template<typename T>
1104 class RefCountedGarbageCollected : public GarbageCollectedFinalized<T> {
1105 WTF_MAKE_NONCOPYABLE(RefCountedGarbageCollected);
1108 RefCountedGarbageCollected()
1114 // Implement method to increase reference count for use with
1117 // In contrast to the normal WTF::RefCounted, the reference count
1118 // can reach 0 and increase again. This happens in the following
1121 // (1) The reference count becomes 0, but members, persistents, or
1122 // on-stack pointers keep references to the object.
1124 // (2) The pointer is assigned to a RefPtr again and the reference
1127 // In this case, we have to resurrect m_keepAlive.
1130 if (UNLIKELY(!m_refCount)) {
1131 ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
1137 // Implement method to decrease reference count for use with
1140 // In contrast to the normal WTF::RefCounted implementation, the
1141 // object itself is not deleted when the reference count reaches
1142 // 0. Instead, the keep-alive persistent handle is deallocated so
1143 // that the object can be reclaimed when the garbage collector
1144 // determines that there are no other references to the object.
1147 ASSERT(m_refCount > 0);
1148 if (!--m_refCount) {
1156 return m_refCount == 1;
1160 ~RefCountedGarbageCollected() { }
1163 void makeKeepAlive()
1165 ASSERT(!m_keepAlive);
1166 m_keepAlive = new Persistent<T>(static_cast<T*>(this));
1170 Persistent<T>* m_keepAlive;
1173 template<typename T>
1174 T* adoptRefCountedGarbageCollected(T* ptr)
1176 ASSERT(ptr->hasOneRef());
1182 // Classes that contain heap references but aren't themselves heap
1183 // allocated, have some extra macros available which allows their use
1184 // to be restricted to cases where the garbage collector is able
1185 // to discover their heap references.
1187 // STACK_ALLOCATED(): Use if the object is only stack allocated. Heap objects
1188 // should be in Members but you do not need the trace method as they are on
1189 // the stack. (Down the line these might turn in to raw pointers, but for
1190 // now Members indicates that we have thought about them and explicitly
1191 // taken care of them.)
1193 // DISALLOW_ALLOCATION(): Cannot be allocated with new operators but can
1194 // be a part object. If it has Members you need a trace method and the
1195 // containing object needs to call that trace method.
1197 // ALLOW_ONLY_INLINE_ALLOCATION(): Allows only placement new operator.
1198 // This disallows general allocation of this object but allows to put
1199 // the object as a value object in collections. If these have Members you
1200 // need to have a trace method. That trace method will be called
1201 // automatically by the Heap collections.
1203 #if COMPILER_SUPPORTS(CXX_DELETED_FUNCTIONS)
1204 #define DISALLOW_ALLOCATION() \
1206 void* operator new(size_t) = delete; \
1207 void* operator new(size_t, NotNullTag, void*) = delete; \
1208 void* operator new(size_t, void*) = delete;
1210 #define ALLOW_ONLY_INLINE_ALLOCATION() \
1212 void* operator new(size_t, NotNullTag, void* location) { return location; } \
1213 void* operator new(size_t, void* location) { return location; } \
1215 void* operator new(size_t) = delete;
1217 #define STATIC_ONLY(Type) \
1223 #define DISALLOW_ALLOCATION() \
1225 void* operator new(size_t); \
1226 void* operator new(size_t, NotNullTag, void*); \
1227 void* operator new(size_t, void*)
1229 #define ALLOW_ONLY_INLINE_ALLOCATION() \
1231 void* operator new(size_t, NotNullTag, void* location) { return location; } \
1232 void* operator new(size_t, void* location) { return location; } \
1234 void* operator new(size_t);
1236 #define STATIC_ONLY(Type) \
1243 // These macros insert annotations that the Blink GC plugin for clang uses for
1244 // verification. STACK_ALLOCATED is used to declare that objects of this type
1245 // are always stack allocated. GC_PLUGIN_IGNORE is used to make the plugin
1246 // ignore a particular class or field when checking for proper usage. When using
1247 // GC_PLUGIN_IGNORE a bug-number should be provided as an argument where the
1248 // bug describes what needs to happen to remove the GC_PLUGIN_IGNORE again.
1250 #define STACK_ALLOCATED() \
1252 __attribute__((annotate("blink_stack_allocated"))) \
1253 void* operator new(size_t) = delete; \
1254 void* operator new(size_t, NotNullTag, void*) = delete; \
1255 void* operator new(size_t, void*) = delete;
1257 #define GC_PLUGIN_IGNORE(bug) \
1258 __attribute__((annotate("blink_gc_plugin_ignore")))
1260 #define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1261 #define GC_PLUGIN_IGNORE(bug)
1265 void HeapObjectHeader::checkHeader() const
1267 ASSERT(m_magic == magic);
1270 Address HeapObjectHeader::payload()
1272 return reinterpret_cast<Address>(this) + objectHeaderSize;
1275 size_t HeapObjectHeader::payloadSize()
1277 return size() - objectHeaderSize;
1280 Address HeapObjectHeader::payloadEnd()
1282 return reinterpret_cast<Address>(this) + size();
1286 void HeapObjectHeader::mark()
1289 m_size |= markBitMask;
1292 Address FinalizedHeapObjectHeader::payload()
1294 return reinterpret_cast<Address>(this) + finalizedHeaderSize;
1297 size_t FinalizedHeapObjectHeader::payloadSize()
1299 return size() - finalizedHeaderSize;
1302 template<typename Header>
1303 size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size)
1305 // Check the size before computing the actual allocation size. The
1306 // allocation size calculation can overflow for large sizes and
1307 // the check therefore has to happen before any calculation on the
1309 RELEASE_ASSERT(size < maxHeapObjectSize);
1311 // Add space for header.
1312 size_t allocationSize = size + sizeof(Header);
1313 // Align size with allocation granularity.
1314 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1315 return allocationSize;
1318 template<typename Header>
1319 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1321 size_t allocationSize = allocationSizeFromSize(size);
1322 bool isLargeObject = allocationSize > blinkPageSize / 2;
1324 return allocateLargeObject(allocationSize, gcInfo);
1325 if (m_remainingAllocationSize < allocationSize)
1326 return outOfLineAllocate(size, gcInfo);
1327 Address headerAddress = m_currentAllocationPoint;
1328 m_currentAllocationPoint += allocationSize;
1329 m_remainingAllocationSize -= allocationSize;
1330 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1331 size_t payloadSize = allocationSize - sizeof(Header);
1332 stats().increaseObjectSpace(payloadSize);
1333 Address result = headerAddress + sizeof(*header);
1334 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1335 // Unpoison the memory used for the object (payload).
1336 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize);
1337 memset(result, 0, payloadSize);
1338 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1));
1342 // FIXME: Allocate objects that do not need finalization separately
1343 // and use separate sweeping to not have to check for finalizers.
1344 template<typename T>
1345 Address Heap::allocate(size_t size)
1347 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1348 ASSERT(state->isAllocationAllowed());
1349 BaseHeap* heap = state->heap(HeapTrait<T>::index);
1351 static_cast<typename HeapTrait<T>::HeapType*>(heap)->allocate(size, GCInfoTrait<T>::get());
1355 // FIXME: Allocate objects that do not need finalization separately
1356 // and use separate sweeping to not have to check for finalizers.
1357 template<typename T>
1358 Address Heap::reallocate(void* previous, size_t size)
1361 // If the new size is 0 this is equivalent to either
1362 // free(previous) or malloc(0). In both cases we do
1363 // nothing and return 0.
1366 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1367 ASSERT(state->isAllocationAllowed());
1368 // FIXME: Currently only supports raw allocation on the
1369 // GeneralHeap. Hence we assume the header is a
1370 // FinalizedHeapObjectHeader.
1371 ASSERT(HeapTrait<T>::index == GeneralHeap);
1372 BaseHeap* heap = state->heap(HeapTrait<T>::index);
1373 Address address = static_cast<typename HeapTrait<T>::HeapType*>(heap)->allocate(size, GCInfoTrait<T>::get());
1375 // This is equivalent to malloc(size).
1378 FinalizedHeapObjectHeader* previousHeader = FinalizedHeapObjectHeader::fromPayload(previous);
1379 ASSERT(!previousHeader->hasFinalizer());
1380 ASSERT(previousHeader->gcInfo() == GCInfoTrait<T>::get());
1381 size_t copySize = previousHeader->payloadSize();
1382 if (copySize > size)
1384 memcpy(address, previous, copySize);
1388 class HeapAllocatorQuantizer {
1390 template<typename T>
1391 static size_t quantizedSize(size_t count)
1393 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1394 return HeapTrait<T>::HeapType::roundedAllocationSize(count * sizeof(T));
1396 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1399 // This is a static-only class used as a trait on collections to make them heap allocated.
1400 // However see also HeapListHashSetAllocator.
1401 class HeapAllocator {
1403 typedef HeapAllocatorQuantizer Quantizer;
1404 typedef WebCore::Visitor Visitor;
1405 static const bool isGarbageCollected = true;
1407 template <typename Return, typename Metadata>
1408 static Return backingMalloc(size_t size)
1410 return malloc<Return, Metadata>(size);
1412 template <typename Return, typename Metadata>
1413 static Return zeroedBackingMalloc(size_t size)
1415 return malloc<Return, Metadata>(size);
1417 template <typename Return, typename Metadata>
1418 static Return malloc(size_t size)
1420 return reinterpret_cast<Return>(Heap::allocate<Metadata>(size));
1422 static void backingFree(void* address) { }
1423 static void free(void* address) { }
1424 template<typename T>
1425 static void* newArray(size_t bytes)
1427 ASSERT_NOT_REACHED();
1431 static void deleteArray(void* ptr)
1433 ASSERT_NOT_REACHED();
1436 static void markUsingGCInfo(Visitor* visitor, const void* buffer)
1438 visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->traceCallback());
1441 static void markNoTracing(Visitor* visitor, const void* t) { visitor->markNoTracing(t); }
1443 template<typename T, typename Traits>
1444 static void trace(Visitor* visitor, T& t)
1446 CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::isWeak, WeakPointersActWeak, T, Traits>::trace(visitor, t);
1449 template<typename T>
1450 static bool hasDeadMember(Visitor*, const T&)
1455 template<typename T>
1456 static bool hasDeadMember(Visitor* visitor, const Member<T>& t)
1458 ASSERT(visitor->isAlive(t));
1462 template<typename T>
1463 static bool hasDeadMember(Visitor* visitor, const WeakMember<T>& t)
1465 return !visitor->isAlive(t);
1468 template<typename T, typename U>
1469 static bool hasDeadMember(Visitor* visitor, const WTF::KeyValuePair<T, U>& t)
1471 return hasDeadMember(visitor, t.key) || hasDeadMember(visitor, t.value);
1474 template<typename T>
1475 static bool hasDeadMember(Visitor*, const WTF::LinkedHashSetNode<T>&);
1477 static void registerWeakMembers(Visitor* visitor, const void* closure, const void* object, WeakPointerCallback callback)
1479 visitor->registerWeakMembers(closure, object, callback);
1482 template<typename T>
1487 // The WTF classes use Allocator::VectorBackingHelper in order to find a
1488 // class to template their backing allocation operation on. For off-heap
1489 // allocations the VectorBackingHelper is a dummy class, since the class is
1490 // not used during allocation of backing. For on-heap allocations this
1491 // typedef ensures that the allocation is done with the correct templated
1492 // instantiation of the allocation function. This ensures the correct GC
1493 // map is written when backing allocations take place.
1494 template<typename T, typename Traits>
1495 struct VectorBackingHelper {
1496 typedef HeapVectorBacking<T, Traits> Type;
1499 // Like the VectorBackingHelper, but this type is used for HashSet and
1500 // HashMap, both of which are implemented using HashTable.
1501 template<typename Table>
1502 struct HashTableBackingHelper {
1503 typedef HeapHashTableBacking<Table> Type;
1506 template<typename T>
1511 template<typename T>
1512 static T& getOther(T* other)
1517 static bool isAlive(Visitor* visitor, void* pointer) { return visitor->isAlive(pointer); }
1520 template<typename T, size_t u, typename V> friend class WTF::Vector;
1521 template<typename T, typename U, typename V, typename W> friend class WTF::HashSet;
1522 template<typename T, typename U, typename V, typename W, typename X, typename Y> friend class WTF::HashMap;
1525 template<typename Value>
1526 static void traceListHashSetValue(Visitor* visitor, Value& value)
1528 // We use the default hash traits for the value in the node, because
1529 // ListHashSet does not let you specify any specific ones.
1530 // We don't allow ListHashSet of WeakMember, so we set that one false
1531 // (there's an assert elsewhere), but we have to specify some value for the
1532 // strongify template argument, so we specify WeakPointersActWeak,
1534 CollectionBackingTraceTrait<WTF::ShouldBeTraced<WTF::HashTraits<Value> >::value, false, WeakPointersActWeak, Value, WTF::HashTraits<Value> >::trace(visitor, value);
1537 // The inline capacity is just a dummy template argument to match the off-heap
1539 // This inherits from the static-only HeapAllocator trait class, but we do
1540 // declare pointers to instances. These pointers are always null, and no
1541 // objects are instantiated.
1542 template<typename ValueArg, size_t inlineCapacity>
1543 struct HeapListHashSetAllocator : public HeapAllocator {
1544 typedef HeapAllocator TableAllocator;
1545 typedef WTF::ListHashSetNode<ValueArg, HeapListHashSetAllocator> Node;
1548 class AllocatorProvider {
1550 // For the heap allocation we don't need an actual allocator object, so we just
1552 HeapListHashSetAllocator* get() const { return 0; }
1554 // No allocator object is needed.
1555 void createAllocatorIfNeeded() { }
1557 // There is no allocator object in the HeapListHashSet (unlike in
1558 // the regular ListHashSet) so there is nothing to swap.
1559 void swap(AllocatorProvider& other) { }
1562 void deallocate(void* dummy) { }
1564 // This is not a static method even though it could be, because it
1565 // needs to match the one that the (off-heap) ListHashSetAllocator
1566 // has. The 'this' pointer will always be null.
1567 void* allocateNode()
1569 COMPILE_ASSERT(!WTF::IsWeak<ValueArg>::value, WeakPointersInAListHashSetWillJustResultInNullEntriesInTheSetThatsNotWhatYouWantConsiderUsingLinkedHashSetInstead);
1570 return malloc<void*, Node>(sizeof(Node));
1573 static void traceValue(Visitor* visitor, Node* node)
1575 traceListHashSetValue(visitor, node->m_value);
1579 // FIXME: These should just be template aliases:
1581 // template<typename T, size_t inlineCapacity = 0>
1582 // using HeapVector = Vector<T, inlineCapacity, HeapAllocator>;
1584 // as soon as all the compilers we care about support that.
1585 // MSVC supports it only in MSVC 2013.
1589 typename HashArg = typename DefaultHash<KeyArg>::Hash,
1590 typename KeyTraitsArg = HashTraits<KeyArg>,
1591 typename MappedTraitsArg = HashTraits<MappedArg> >
1592 class HeapHashMap : public HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg, HeapAllocator> { };
1596 typename HashArg = typename DefaultHash<ValueArg>::Hash,
1597 typename TraitsArg = HashTraits<ValueArg> >
1598 class HeapHashSet : public HashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1602 typename HashArg = typename DefaultHash<ValueArg>::Hash,
1603 typename TraitsArg = HashTraits<ValueArg> >
1604 class HeapLinkedHashSet : public LinkedHashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1608 size_t inlineCapacity = 0, // The inlineCapacity is just a dummy to match ListHashSet (off-heap).
1609 typename HashArg = typename DefaultHash<ValueArg>::Hash>
1610 class HeapListHashSet : public ListHashSet<ValueArg, inlineCapacity, HashArg, HeapListHashSetAllocator<ValueArg, inlineCapacity> > { };
1614 typename HashFunctions = typename DefaultHash<Value>::Hash,
1615 typename Traits = HashTraits<Value> >
1616 class HeapHashCountedSet : public HashCountedSet<Value, HashFunctions, Traits, HeapAllocator> { };
1618 template<typename T, size_t inlineCapacity = 0>
1619 class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
1623 explicit HeapVector(size_t size) : Vector<T, inlineCapacity, HeapAllocator>(size)
1627 HeapVector(size_t size, const T& val) : Vector<T, inlineCapacity, HeapAllocator>(size, val)
1631 template<size_t otherCapacity>
1632 HeapVector(const HeapVector<T, otherCapacity>& other)
1633 : Vector<T, inlineCapacity, HeapAllocator>(other)
1637 template<typename U>
1638 void append(const U& other)
1640 Vector<T, inlineCapacity, HeapAllocator>::append(other);
1643 template<typename U, size_t otherCapacity>
1644 void appendVector(const HeapVector<U, otherCapacity>& other)
1646 const Vector<U, otherCapacity, HeapAllocator>& otherVector = other;
1647 Vector<T, inlineCapacity, HeapAllocator>::appendVector(otherVector);
1651 template<typename T, size_t inlineCapacity = 0>
1652 class HeapDeque : public Deque<T, inlineCapacity, HeapAllocator> {
1656 explicit HeapDeque(size_t size) : Deque<T, inlineCapacity, HeapAllocator>(size)
1660 HeapDeque(size_t size, const T& val) : Deque<T, inlineCapacity, HeapAllocator>(size, val)
1664 // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1665 HeapDeque<T, 0>& operator=(const HeapDeque& other)
1667 HeapDeque<T> copy(other);
1672 // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1673 inline void swap(HeapDeque& other)
1675 Deque<T, inlineCapacity, HeapAllocator>::swap(other);
1678 template<size_t otherCapacity>
1679 HeapDeque(const HeapDeque<T, otherCapacity>& other)
1680 : Deque<T, inlineCapacity, HeapAllocator>(other)
1684 template<typename U>
1685 void append(const U& other)
1687 Deque<T, inlineCapacity, HeapAllocator>::append(other);
1691 template<typename T>
1692 bool HeapAllocator::hasDeadMember(Visitor* visitor, const WTF::LinkedHashSetNode<T>& t)
1694 return hasDeadMember(visitor, t.m_value);
1697 template<typename T>
1698 struct ThreadingTrait<Member<T> > {
1699 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1702 template<typename T>
1703 struct ThreadingTrait<WeakMember<T> > {
1704 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1707 template<typename Key, typename Value, typename T, typename U, typename V>
1708 struct ThreadingTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1709 static const ThreadAffinity Affinity =
1710 (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1711 && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1714 template<typename First, typename Second>
1715 struct ThreadingTrait<WTF::KeyValuePair<First, Second> > {
1716 static const ThreadAffinity Affinity =
1717 (ThreadingTrait<First>::Affinity == MainThreadOnly)
1718 && (ThreadingTrait<Second>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1721 template<typename T, typename U, typename V>
1722 struct ThreadingTrait<HashSet<T, U, V, HeapAllocator> > {
1723 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1727 template<typename T, size_t inlineCapacity>
1728 struct ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1729 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1732 template<typename T, typename Traits>
1733 struct ThreadingTrait<HeapVectorBacking<T, Traits> > {
1734 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1737 template<typename T, size_t inlineCapacity>
1738 struct ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > {
1739 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1742 template<typename T, typename U, typename V>
1743 struct ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > {
1744 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1747 template<typename Table>
1748 struct ThreadingTrait<HeapHashTableBacking<Table> > {
1749 typedef typename Table::KeyType Key;
1750 typedef typename Table::ValueType Value;
1751 static const ThreadAffinity Affinity =
1752 (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1753 && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1756 template<typename T, typename U, typename V, typename W, typename X>
1757 struct ThreadingTrait<HeapHashMap<T, U, V, W, X> > : public ThreadingTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
1759 template<typename T, typename U, typename V>
1760 struct ThreadingTrait<HeapHashSet<T, U, V> > : public ThreadingTrait<HashSet<T, U, V, HeapAllocator> > { };
1762 template<typename T, size_t inlineCapacity>
1763 struct ThreadingTrait<HeapVector<T, inlineCapacity> > : public ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
1765 template<typename T, size_t inlineCapacity>
1766 struct ThreadingTrait<HeapDeque<T, inlineCapacity> > : public ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
1768 template<typename T, typename U, typename V>
1769 struct ThreadingTrait<HeapHashCountedSet<T, U, V> > : public ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
1771 // The standard implementation of GCInfoTrait<T>::get() just returns a static
1772 // from the class T, but we can't do that for HashMap, HashSet, Vector, etc.
1773 // because they are in WTF and know nothing of GCInfos. Instead we have a
1774 // specialization of GCInfoTrait for these four classes here.
1776 template<typename Key, typename Value, typename T, typename U, typename V>
1777 struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1778 static const GCInfo* get()
1780 typedef HashMap<Key, Value, T, U, V, HeapAllocator> TargetType;
1781 static const GCInfo info = {
1782 TraceTrait<TargetType>::trace,
1784 false, // HashMap needs no finalizer.
1785 VTableTrait<TargetType>::hasVTable,
1786 #if ENABLE(GC_TRACING)
1787 TypenameStringTrait<TargetType>::get()
1794 template<typename T, typename U, typename V>
1795 struct GCInfoTrait<HashSet<T, U, V, HeapAllocator> > {
1796 static const GCInfo* get()
1798 typedef HashSet<T, U, V, HeapAllocator> TargetType;
1799 static const GCInfo info = {
1800 TraceTrait<TargetType>::trace,
1802 false, // HashSet needs no finalizer.
1803 VTableTrait<TargetType>::hasVTable,
1804 #if ENABLE(GC_TRACING)
1805 TypenameStringTrait<TargetType>::get()
1812 template<typename T, typename U, typename V>
1813 struct GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > {
1814 static const GCInfo* get()
1816 typedef LinkedHashSet<T, U, V, HeapAllocator> TargetType;
1817 static const GCInfo info = {
1818 TraceTrait<TargetType>::trace,
1819 LinkedHashSet<T, U, V, HeapAllocator>::finalize,
1820 true, // Needs finalization. The anchor needs to unlink itself from the chain.
1821 VTableTrait<TargetType>::hasVTable,
1822 #if ENABLE(GC_TRACING)
1823 TypenameStringTrait<TargetType>::get()
1830 template<typename ValueArg, size_t inlineCapacity, typename U>
1831 struct GCInfoTrait<ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > > {
1832 static const GCInfo* get()
1834 typedef WTF::ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > TargetType;
1835 static const GCInfo info = {
1836 TraceTrait<TargetType>::trace,
1838 false, // ListHashSet needs no finalization though its backing might.
1839 false, // no vtable.
1840 #if ENABLE(GC_TRACING)
1841 TypenameStringTrait<TargetType>::get()
1848 template<typename T, typename Allocator>
1849 struct GCInfoTrait<WTF::ListHashSetNode<T, Allocator> > {
1850 static const GCInfo* get()
1852 typedef WTF::ListHashSetNode<T, Allocator> TargetType;
1853 static const GCInfo info = {
1854 TraceTrait<TargetType>::trace,
1855 TargetType::finalize,
1856 WTF::HashTraits<T>::needsDestruction, // The node needs destruction if its data does.
1857 false, // no vtable.
1858 #if ENABLE(GC_TRACING)
1859 TypenameStringTrait<TargetType>::get()
1866 template<typename T>
1867 struct GCInfoTrait<Vector<T, 0, HeapAllocator> > {
1868 static const GCInfo* get()
1870 #if ENABLE(GC_TRACING)
1871 typedef Vector<T, 0, HeapAllocator> TargetType;
1873 static const GCInfo info = {
1874 TraceTrait<Vector<T, 0, HeapAllocator> >::trace,
1876 false, // Vector needs no finalizer if it has no inline capacity.
1877 VTableTrait<Vector<T, 0, HeapAllocator> >::hasVTable,
1878 #if ENABLE(GC_TRACING)
1879 TypenameStringTrait<TargetType>::get()
1886 template<typename T, size_t inlineCapacity>
1887 struct FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Vector<T, inlineCapacity, HeapAllocator>, true> { };
1889 template<typename T, size_t inlineCapacity>
1890 struct GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1891 static const GCInfo* get()
1893 typedef Vector<T, inlineCapacity, HeapAllocator> TargetType;
1894 static const GCInfo info = {
1895 TraceTrait<TargetType>::trace,
1896 FinalizerTrait<TargetType>::finalize,
1897 // Finalizer is needed to destruct things stored in the inline capacity.
1898 inlineCapacity && VectorTraits<T>::needsDestruction,
1899 VTableTrait<TargetType>::hasVTable,
1900 #if ENABLE(GC_TRACING)
1901 TypenameStringTrait<TargetType>::get()
1908 template<typename T>
1909 struct GCInfoTrait<Deque<T, 0, HeapAllocator> > {
1910 static const GCInfo* get()
1912 typedef Deque<T, 0, HeapAllocator> TargetType;
1913 static const GCInfo info = {
1914 TraceTrait<TargetType>::trace,
1916 false, // Deque needs no finalizer if it has no inline capacity.
1917 VTableTrait<TargetType>::hasVTable,
1918 #if ENABLE(GC_TRACING)
1919 TypenameStringTrait<TargetType>::get()
1924 static const GCInfo info;
1927 template<typename T, typename U, typename V>
1928 struct GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > {
1929 static const GCInfo* get()
1931 typedef HashCountedSet<T, U, V, HeapAllocator> TargetType;
1932 static const GCInfo info = {
1933 TraceTrait<TargetType>::trace,
1935 false, // HashCountedSet is just a HashTable, and needs no finalizer.
1936 VTableTrait<TargetType>::hasVTable,
1937 #if ENABLE(GC_TRACING)
1938 TypenameStringTrait<TargetType>::get()
1943 static const GCInfo info;
1946 template<typename T, size_t inlineCapacity>
1947 struct FinalizerTrait<Deque<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Deque<T, inlineCapacity, HeapAllocator>, true> { };
1949 template<typename T, size_t inlineCapacity>
1950 struct GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > {
1951 static const GCInfo* get()
1953 typedef Deque<T, inlineCapacity, HeapAllocator> TargetType;
1954 static const GCInfo info = {
1955 TraceTrait<TargetType>::trace,
1956 FinalizerTrait<TargetType>::finalize,
1957 // Finalizer is needed to destruct things stored in the inline capacity.
1958 inlineCapacity && VectorTraits<T>::needsDestruction,
1959 VTableTrait<TargetType>::hasVTable,
1960 #if ENABLE(GC_TRACING)
1961 TypenameStringTrait<TargetType>::get()
1966 static const GCInfo info;
1969 template<typename T, typename Traits>
1970 struct GCInfoTrait<HeapVectorBacking<T, Traits> > {
1971 static const GCInfo* get()
1973 typedef HeapVectorBacking<T, Traits> TargetType;
1974 static const GCInfo info = {
1975 TraceTrait<TargetType>::trace,
1976 FinalizerTrait<TargetType>::finalize,
1977 Traits::needsDestruction,
1978 false, // We don't support embedded objects in HeapVectors with vtables.
1979 #if ENABLE(GC_TRACING)
1980 TypenameStringTrait<TargetType>::get()
1987 template<typename Table>
1988 struct GCInfoTrait<HeapHashTableBacking<Table> > {
1989 static const GCInfo* get()
1991 typedef HeapHashTableBacking<Table> TargetType;
1992 static const GCInfo info = {
1993 TraceTrait<TargetType>::trace,
1994 HeapHashTableBacking<Table>::finalize,
1995 Table::ValueTraits::needsDestruction,
1996 VTableTrait<TargetType>::hasVTable,
1997 #if ENABLE(GC_TRACING)
1998 TypenameStringTrait<TargetType>::get()
2005 template<typename T, typename Traits>
2006 struct BaseVisitVectorBackingTrait {
2007 static void trace(WebCore::Visitor* visitor, void* self)
2009 // The allocator can oversize the allocation a little, according to
2010 // the allocation granularity. The extra size is included in the
2011 // payloadSize call below, since there is nowhere to store the
2012 // originally allocated memory. This assert ensures that visiting the
2013 // last bit of memory can't cause trouble.
2014 COMPILE_ASSERT(!WTF::ShouldBeTraced<Traits>::value || sizeof(T) > allocationGranularity || Traits::canInitializeWithMemset, HeapOverallocationCanCauseSpuriousVisits);
2016 T* array = reinterpret_cast<T*>(self);
2017 WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
2018 // Use the payload size as recorded by the heap to determine how many
2019 // elements to mark.
2020 size_t length = header->payloadSize() / sizeof(T);
2021 for (size_t i = 0; i < length; i++)
2022 CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::isWeak, WeakPointersActStrong, T, Traits>::trace(visitor, array[i]);
2026 // Almost all hash table backings are visited with this specialization.
2027 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Table>
2028 struct BaseVisitHashTableBackingTrait {
2029 typedef typename Table::ValueType Value;
2030 typedef typename Table::ValueTraits Traits;
2031 static void trace(WebCore::Visitor* visitor, void* self)
2033 Value* array = reinterpret_cast<Value*>(self);
2034 WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
2035 size_t length = header->payloadSize() / sizeof(Value);
2036 for (size_t i = 0; i < length; i++) {
2037 if (!WTF::HashTableHelper<Value, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i]))
2038 CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::isWeak, strongify, Value, Traits>::trace(visitor, array[i]);
2043 // This specialization of BaseVisitHashTableBackingTrait is for the backing of
2044 // HeapListHashSet. This is for the case that we find a reference to the
2045 // backing from the stack. That probably means we have a GC while we are in a
2046 // ListHashSet method since normal API use does not put pointers to the backing
2048 template<ShouldWeakPointersBeMarkedStrongly strongify, typename NodeContents, size_t inlineCapacity, typename T, typename U, typename V, typename W, typename X, typename Y>
2049 struct BaseVisitHashTableBackingTrait<strongify, WTF::HashTable<WTF::ListHashSetNode<NodeContents, HeapListHashSetAllocator<T, inlineCapacity> >*, U, V, W, X, Y, HeapAllocator> > {
2050 typedef WTF::ListHashSetNode<NodeContents, HeapListHashSetAllocator<T, inlineCapacity> > Node;
2051 typedef WTF::HashTable<Node*, U, V, W, X, Y, HeapAllocator> Table;
2052 static void trace(WebCore::Visitor* visitor, void* self)
2054 Node** array = reinterpret_cast<Node**>(self);
2055 WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
2056 size_t length = header->payloadSize() / sizeof(Node*);
2057 for (size_t i = 0; i < length; i++) {
2058 if (!WTF::HashTableHelper<Node*, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i])) {
2059 traceListHashSetValue(visitor, array[i]->m_value);
2060 // Just mark the node without tracing because we already traced
2061 // the contents, and there is no need to trace the next and
2062 // prev fields since iterating over the hash table backing will
2063 // find the whole chain.
2064 visitor->markNoTracing(array[i]);
2070 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2071 struct BaseVisitKeyValuePairTrait {
2072 static void trace(WebCore::Visitor* visitor, WTF::KeyValuePair<Key, Value>& self)
2074 ASSERT(WTF::ShouldBeTraced<Traits>::value || (Traits::isWeak && strongify == WeakPointersActStrong));
2075 CollectionBackingTraceTrait<WTF::ShouldBeTraced<typename Traits::KeyTraits>::value, Traits::KeyTraits::isWeak, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2076 CollectionBackingTraceTrait<WTF::ShouldBeTraced<typename Traits::ValueTraits>::value, Traits::ValueTraits::isWeak, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2080 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Traits>
2081 struct BaseVisitLinkedNodeTrait {
2082 static void trace(WebCore::Visitor* visitor, WTF::LinkedHashSetNode<Value>& self)
2084 ASSERT(WTF::ShouldBeTraced<Traits>::value || (Traits::isWeak && strongify == WeakPointersActStrong));
2085 CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::isWeak, strongify, Value, Traits>::trace(visitor, self.m_value);
2089 // Catch-all for things that don't need marking and have no weak pointers. We
2090 // do nothing, even if WeakPointersActStrong.
2091 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename U>
2092 struct CollectionBackingTraceTrait<false, false, strongify, T, U> {
2093 static void trace(Visitor*, const T&) { }
2094 static void trace(Visitor*, const void*) { }
2097 // Catch-all for things that don't need marking. They have weak pointers, but
2098 // we are not marking weak pointers in this object in this GC.
2099 template<typename T, typename U>
2100 struct CollectionBackingTraceTrait<false, true, WeakPointersActWeak, T, U> {
2101 static void trace(Visitor*, const T&) { }
2102 static void trace(Visitor*, const void*) { }
2105 // For each type that we understand we have the strongified case and the
2106 // needsMarking case. The strongified case is where we would not normally need
2107 // to mark it, but it has weak pointers, and we are marking them as strong
2108 // because there is a live iterator that would be disturbed if the collection
2109 // were subject to weak processing right now. The needsMarking case is the
2110 // case for things that need marking, which may also be strongified (eg. for
2111 // a map pair that has a key that is weak and a value that needs marking).
2113 // The body is the same for most objects, but there are two template
2114 // specializations because there is no way to express needsMarking || (isWeak
2115 // && WeakPointersActStrong) in a single specialization.
2117 // Hash table that would not normally need marking, but strongified.
2118 template<typename Table>
2119 struct CollectionBackingTraceTrait<false, true, WeakPointersActStrong, HeapHashTableBacking<Table>, void> : public BaseVisitHashTableBackingTrait<WeakPointersActStrong, Table> {
2122 // Hash table that needs marking, optionally also strongified.
2123 template<bool isWeak, ShouldWeakPointersBeMarkedStrongly strongify, typename Table>
2124 struct CollectionBackingTraceTrait<true, isWeak, strongify, HeapHashTableBacking<Table>, void> : public BaseVisitHashTableBackingTrait<strongify, Table> {
2127 // Key-value pair that would not normally need marking, but strongified.
2128 template<typename Key, typename Value, typename Traits>
2129 struct CollectionBackingTraceTrait<false, true, WeakPointersActStrong, WTF::KeyValuePair<Key, Value>, Traits> : public BaseVisitKeyValuePairTrait<WeakPointersActStrong, Key, Value, Traits> {
2132 // Key value pair that needs marking, optionally also strongified.
2133 template<bool isWeak, ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2134 struct CollectionBackingTraceTrait<true, isWeak, strongify, WTF::KeyValuePair<Key, Value>, Traits> : public BaseVisitKeyValuePairTrait<strongify, Key, Value, Traits> {
2137 // Linked hash set node that would not normally need marking, but strongified.
2138 template<typename Value, typename Traits>
2139 struct CollectionBackingTraceTrait<false, true, WeakPointersActStrong, WTF::LinkedHashSetNode<Value>, Traits> : public BaseVisitLinkedNodeTrait<WeakPointersActStrong, Value, Traits> {
2142 // Linked hash set node that needs marking, optionally also strongified.
2143 template<bool isWeak, ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Traits>
2144 struct CollectionBackingTraceTrait<true, isWeak, strongify, WTF::LinkedHashSetNode<Value>, Traits> : public BaseVisitLinkedNodeTrait<strongify, Value, Traits> {
2147 // ListHashSetNode pointers (a ListHashSet is implemented as a hash table of these pointers).
2148 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, size_t inlineCapacity, typename Traits>
2149 struct CollectionBackingTraceTrait<true, false, strongify, WTF::ListHashSetNode<Value, HeapListHashSetAllocator<Value, inlineCapacity> >*, Traits> {
2150 typedef WTF::ListHashSetNode<Value, HeapListHashSetAllocator<Value, inlineCapacity> > Node;
2151 static void trace(WebCore::Visitor* visitor, Node* node)
2153 traceListHashSetValue(visitor, node->m_value);
2154 // Just mark the node without tracing because we already traced the
2155 // contents, and there is no need to trace the next and prev fields
2156 // since iterating over the hash table backing will find the whole
2158 visitor->markNoTracing(node);
2162 // Vector backing that needs marking. We don't support weak members in vectors,
2163 // so we don't need a strongified variant here.
2164 template<bool isWeak, ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2165 struct CollectionBackingTraceTrait<true, isWeak, strongify, HeapVectorBacking<T, Traits>, void> : public BaseVisitVectorBackingTrait<T, Traits> {
2168 // Member always needs marking, never weak.
2169 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2170 struct CollectionBackingTraceTrait<true, false, strongify, Member<T>, Traits> {
2171 static void trace(WebCore::Visitor* visitor, Member<T> self)
2173 visitor->mark(self.get());
2177 // Weak member never has needsMarking, always weak, strongified case.
2178 template<typename T, typename Traits>
2179 struct CollectionBackingTraceTrait<false, true, WeakPointersActStrong, WeakMember<T>, Traits> {
2180 static void trace(WebCore::Visitor* visitor, WeakMember<T> self)
2182 // This can mark weak members as if they were strong. The reason we
2183 // need this is that we don't do weak processing unless we reach the
2184 // backing only through the hash table. Reaching it in any other way
2185 // makes it impossible to update the size and deleted slot count of the
2186 // table, and exposes us to weak processing during iteration issues.
2187 visitor->mark(self.get());
2191 // Catch-all for things that have a way to trace. For things that contain weak
2192 // pointers they will generally be visited weakly even if
2193 // WeakPointersActStrong. This is a deliberate choice: We currently don't
2194 // strongify weak members that are embedded in larger structures, they are just
2195 // zeroed during weak processing without the whole structure being
2196 // removed from the collection.
2197 template<bool isWeak, ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2198 struct CollectionBackingTraceTrait<true, isWeak, strongify, T, Traits> {
2199 static void trace(WebCore::Visitor* visitor, T& t)
2201 TraceTrait<T>::trace(visitor, &t);
2205 template<typename T, typename Traits>
2206 struct TraceTrait<HeapVectorBacking<T, Traits> > {
2207 typedef HeapVectorBacking<T, Traits> Backing;
2208 static void trace(WebCore::Visitor* visitor, void* self)
2210 COMPILE_ASSERT(!Traits::isWeak, WeDontSupportWeaknessInHeapVectorsOrDeques);
2211 if (WTF::ShouldBeTraced<Traits>::value)
2212 CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, false, WeakPointersActWeak, HeapVectorBacking<T, Traits>, void>::trace(visitor, self);
2214 static void mark(Visitor* visitor, const Backing* backing)
2216 visitor->mark(backing, &trace);
2218 static void checkGCInfo(Visitor* visitor, const Backing* backing)
2221 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2226 // The trace trait for the heap hashtable backing is used when we find a
2227 // direct pointer to the backing from the conservative stack scanner. This
2228 // normally indicates that there is an ongoing iteration over the table, and so
2229 // we disable weak processing of table entries. When the backing is found
2230 // through the owning hash table we mark differently, in order to do weak
2232 template<typename Table>
2233 struct TraceTrait<HeapHashTableBacking<Table> > {
2234 typedef HeapHashTableBacking<Table> Backing;
2235 typedef typename Table::ValueTraits Traits;
2236 static void trace(WebCore::Visitor* visitor, void* self)
2238 if (WTF::ShouldBeTraced<Traits>::value || Traits::isWeak)
2239 CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::isWeak, WeakPointersActStrong, Backing, void>::trace(visitor, self);
2241 static void mark(Visitor* visitor, const Backing* backing)
2243 if (WTF::ShouldBeTraced<Traits>::value || Traits::isWeak)
2244 visitor->mark(backing, &trace);
2246 visitor->markNoTracing(backing); // If we know the trace function will do nothing there is no need to call it.
2248 static void checkGCInfo(Visitor* visitor, const Backing* backing)
2251 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2256 template<typename Table>
2257 void HeapHashTableBacking<Table>::finalize(void* pointer)
2259 typedef typename Table::ValueType Value;
2260 ASSERT(Table::ValueTraits::needsDestruction);
2261 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(pointer);
2262 // Use the payload size as recorded by the heap to determine how many
2263 // elements to finalize.
2264 size_t length = header->payloadSize() / sizeof(Value);
2265 Value* table = reinterpret_cast<Value*>(pointer);
2266 for (unsigned i = 0; i < length; i++) {
2267 if (!Table::isEmptyOrDeletedBucket(table[i]))
2272 template<typename T, typename U, typename V, typename W, typename X>
2273 struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
2274 template<typename T, typename U, typename V>
2275 struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { };
2276 template<typename T, typename U, typename V>
2277 struct GCInfoTrait<HeapLinkedHashSet<T, U, V> > : public GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > { };
2278 template<typename T, size_t inlineCapacity, typename U>
2279 struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U> > : public GCInfoTrait<ListHashSet<T, inlineCapacity, U, HeapListHashSetAllocator<T, inlineCapacity> > > { };
2280 template<typename T, size_t inlineCapacity>
2281 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
2282 template<typename T, size_t inlineCapacity>
2283 struct GCInfoTrait<HeapDeque<T, inlineCapacity> > : public GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
2284 template<typename T, typename U, typename V>
2285 struct GCInfoTrait<HeapHashCountedSet<T, U, V> > : public GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
2287 template<typename T>
2288 struct IfWeakMember;
2290 template<typename T>
2291 struct IfWeakMember {
2292 template<typename U>
2293 static bool isDead(Visitor*, const U&) { return false; }
2296 template<typename T>
2297 struct IfWeakMember<WeakMember<T> > {
2298 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visitor->isAlive(t.get()); }
2302 // Clang does not export the symbols that we have explicitly asked it
2303 // to export. This forces it to export all the methods from ThreadHeap.
2304 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo*);
2305 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*);
2306 extern template class PLATFORM_EXPORT ThreadHeap<FinalizedHeapObjectHeader>;
2307 extern template class PLATFORM_EXPORT ThreadHeap<HeapObjectHeader>;