2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "platform/PlatformExport.h"
35 #include "platform/heap/AddressSanitizer.h"
36 #include "platform/heap/ThreadState.h"
37 #include "platform/heap/Visitor.h"
38 #include "public/platform/WebThread.h"
39 #include "wtf/Assertions.h"
40 #include "wtf/HashCountedSet.h"
41 #include "wtf/LinkedHashSet.h"
42 #include "wtf/ListHashSet.h"
43 #include "wtf/OwnPtr.h"
44 #include "wtf/PassRefPtr.h"
45 #include "wtf/ThreadSafeRefCounted.h"
51 const size_t blinkPageSizeLog2 = 17;
52 const size_t blinkPageSize = 1 << blinkPageSizeLog2;
53 const size_t blinkPageOffsetMask = blinkPageSize - 1;
54 const size_t blinkPageBaseMask = ~blinkPageOffsetMask;
56 // We allocate pages at random addresses but in groups of
57 // blinkPagesPerRegion at a given random address. We group pages to
58 // not spread out too much over the address space which would blow
59 // away the page tables and lead to bad performance.
60 const size_t blinkPagesPerRegion = 10;
62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte
63 // align all allocations even on 32 bit.
64 const size_t allocationGranularity = 8;
65 const size_t allocationMask = allocationGranularity - 1;
66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularity) - 1)) / (8 * allocationGranularity);
67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
68 const size_t maxHeapObjectSizeLog2 = 27;
69 const size_t maxHeapObjectSize = 1 << maxHeapObjectSizeLog2;
71 const size_t markBitMask = 1;
72 const size_t freeListMask = 2;
73 // The dead bit is used for objects that have gone through a GC marking, but did
74 // not get swept before a new GC started. In that case we set the dead bit on
75 // objects that were not marked in the previous GC to ensure we are not tracing
76 // them via a conservatively found pointer. Tracing dead objects could lead to
77 // tracing of already finalized objects in another thread's heap which is a
78 // use-after-free situation.
79 const size_t deadBitMask = 4;
80 // On free-list entries we reuse the dead bit to distinguish a normal free-list
81 // entry from one that has been promptly freed.
82 const size_t promptlyFreedMask = freeListMask | deadBitMask;
83 #if ENABLE(GC_PROFILE_HEAP)
84 const size_t heapObjectGenerations = 8;
85 const size_t maxHeapObjectAge = heapObjectGenerations - 1;
86 const size_t heapObjectAgeMask = ~(maxHeapObjectSize - 1);
87 const size_t sizeMask = ~heapObjectAgeMask & ~static_cast<size_t>(7);
89 const size_t sizeMask = ~static_cast<size_t>(7);
91 const uint8_t freelistZapValue = 42;
92 const uint8_t finalizedZapValue = 24;
93 // The orphaned zap value must be zero in the lowest bits to allow for using
94 // the mark bit when tracing.
95 const uint8_t orphanedZapValue = 240;
97 const int numberOfMarkingThreads = 2;
99 const int numberOfPagesToConsiderForCoalescing = 100;
101 enum CallbackInvocationMode {
109 template<ThreadAffinity affinity> class ThreadLocalPersistents;
110 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity > > class Persistent;
111 template<typename T> class CrossThreadPersistent;
113 #if ENABLE(GC_PROFILE_HEAP)
117 PLATFORM_EXPORT size_t osPageSize();
119 // Blink heap pages are set up with a guard page before and after the
121 inline size_t blinkPagePayloadSize()
123 return blinkPageSize - 2 * osPageSize();
126 // Blink heap pages are aligned to the Blink heap page size.
127 // Therefore, the start of a Blink page can be obtained by
128 // rounding down to the Blink page size.
129 inline Address roundToBlinkPageStart(Address address)
131 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
134 inline Address roundToBlinkPageEnd(Address address)
136 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) & blinkPageBaseMask) + blinkPageSize;
139 // Compute the amount of padding we have to add to a header to make
140 // the size of the header plus the padding a multiple of 8 bytes.
141 template<typename Header>
142 inline size_t headerPadding()
144 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity;
147 // Masks an address down to the enclosing blink page base address.
148 inline Address blinkPageAddress(Address address)
150 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
155 // Sanity check for a page header address: the address of the page
156 // header should be OS page size away from being Blink page size
158 inline bool isPageHeaderAddress(Address address)
160 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPageSize());
164 // Mask an address down to the enclosing oilpan heap base page.
165 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
166 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our typed heaps.
167 // This is only exported to enable tests in HeapTest.cpp.
168 PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object)
170 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
171 return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSize());
174 // Large allocations are allocated as separate objects and linked in a
177 // In order to use the same memory allocation routines for everything
178 // allocated in the heap, large objects are considered heap pages
179 // containing only one object.
181 // The layout of a large heap object is as follows:
183 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
184 template<typename Header>
185 class LargeHeapObject : public BaseHeapPage {
187 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) : BaseHeapPage(storage, gcInfo, state)
189 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), large_heap_object_header_misaligned);
192 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
193 virtual bool isLargeObject() OVERRIDE { return true; }
195 #if ENABLE(GC_PROFILE_MARKING)
196 virtual const GCInfo* findGCInfo(Address address)
198 if (!objectContains(address))
204 #if ENABLE(GC_PROFILE_HEAP)
205 void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
208 void link(LargeHeapObject<Header>** previousNext)
210 m_next = *previousNext;
211 *previousNext = this;
214 void unlink(LargeHeapObject<Header>** previousNext)
216 *previousNext = m_next;
219 // The LargeHeapObject pseudo-page contains one actual object. Determine
220 // whether the pointer is within that object.
221 bool objectContains(Address object)
223 return (payload() <= object) && (object < address() + size());
226 // Returns true for any address that is on one of the pages that this
227 // large object uses. That ensures that we can use a negative result to
228 // populate the negative page cache.
229 virtual bool contains(Address object) OVERRIDE
231 return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPageEnd(address() + size());
234 LargeHeapObject<Header>* next()
241 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
244 Address payload() { return heapObjectHeader()->payload(); }
245 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
247 Header* heapObjectHeader()
249 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
250 return reinterpret_cast<Header*>(headerAddress);
255 void getStats(HeapStats&);
259 virtual void markOrphaned()
261 // Zap the payload with a recognizable value to detect any incorrect
262 // cross thread pointer usage.
263 memset(payload(), orphanedZapValue, payloadSize());
264 BaseHeapPage::markOrphaned();
268 friend class ThreadHeap<Header>;
270 LargeHeapObject<Header>* m_next;
273 // The BasicObjectHeader is the minimal object header. It is used when
274 // encountering heap space of size allocationGranularity to mark it as
275 // as freelist entry.
276 class PLATFORM_EXPORT BasicObjectHeader {
279 explicit BasicObjectHeader(size_t encodedSize)
280 : m_size(encodedSize) { }
282 static size_t freeListEncodedSize(size_t size) { return size | freeListMask; }
285 bool isFree() { return m_size & freeListMask; }
288 bool isPromptlyFreed() { return (m_size & promptlyFreedMask) == promptlyFreedMask; }
291 void markPromptlyFreed() { m_size |= promptlyFreedMask; }
294 size_t size() const { return m_size & sizeMask; }
296 #if ENABLE(GC_PROFILE_HEAP)
298 size_t encodedSize() const { return m_size; }
301 size_t age() const { return m_size >> maxHeapObjectSizeLog2; }
306 size_t current = age();
307 if (current < maxHeapObjectAge)
308 m_size = ((current + 1) << maxHeapObjectSizeLog2) | (m_size & ~heapObjectAgeMask);
313 volatile unsigned m_size;
316 // Our heap object layout is layered with the HeapObjectHeader closest
317 // to the payload, this can be wrapped in a FinalizedObjectHeader if the
318 // object is on the GeneralHeap and not on a specific TypedHeap.
319 // Finally if the object is a large object (> blinkPageSize/2) then it is
320 // wrapped with a LargeObjectHeader.
322 // Object memory layout:
323 // [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payload
324 // The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHeader
325 // are independently optional.
326 class PLATFORM_EXPORT HeapObjectHeader : public BasicObjectHeader {
329 explicit HeapObjectHeader(size_t encodedSize)
330 : BasicObjectHeader(encodedSize)
337 HeapObjectHeader(size_t encodedSize, const GCInfo*)
338 : BasicObjectHeader(encodedSize)
344 inline void checkHeader() const;
345 inline bool isMarked() const;
348 inline void unmark();
350 inline const GCInfo* gcInfo() { return 0; }
352 inline Address payload();
353 inline size_t payloadSize();
354 inline Address payloadEnd();
356 inline void setDeadMark();
357 inline void clearDeadMark();
358 inline bool hasDeadMark() const;
360 // Zap magic number with a new magic number that means there was once an
361 // object allocated here, but it was freed because nobody marked it during
365 static void finalize(const GCInfo*, Address, size_t);
366 static HeapObjectHeader* fromPayload(const void*);
368 static const intptr_t magic = 0xc0de247;
369 static const intptr_t zappedMagic = 0xC0DEdead;
370 // The zap value for vtables should be < 4K to ensure it cannot be
371 // used for dispatch.
372 static const intptr_t zappedVTable = 0xd0d;
380 const size_t objectHeaderSize = sizeof(HeapObjectHeader);
382 // Each object on the GeneralHeap needs to carry a pointer to its
383 // own GCInfo structure for tracing and potential finalization.
384 class PLATFORM_EXPORT FinalizedHeapObjectHeader : public HeapObjectHeader {
387 FinalizedHeapObjectHeader(size_t encodedSize, const GCInfo* gcInfo)
388 : HeapObjectHeader(encodedSize)
393 inline Address payload();
394 inline size_t payloadSize();
397 const GCInfo* gcInfo() { return m_gcInfo; }
400 TraceCallback traceCallback() { return m_gcInfo->m_trace; }
405 inline bool hasFinalizer() { return m_gcInfo->hasFinalizer(); }
407 static FinalizedHeapObjectHeader* fromPayload(const void*);
410 bool hasVTable() { return m_gcInfo->hasVTable(); }
413 const GCInfo* m_gcInfo;
416 const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader);
418 class FreeListEntry : public HeapObjectHeader {
421 explicit FreeListEntry(size_t size)
422 : HeapObjectHeader(freeListEncodedSize(size))
425 #if ENABLE(ASSERT) && !defined(ADDRESS_SANITIZER)
426 // Zap free area with asterisks, aka 0x2a2a2a2a.
427 // For ASan don't zap since we keep accounting in the freelist entry.
428 for (size_t i = sizeof(*this); i < size; i++)
429 reinterpret_cast<Address>(this)[i] = freelistZapValue;
430 ASSERT(size >= objectHeaderSize);
435 Address address() { return reinterpret_cast<Address>(this); }
438 void unlink(FreeListEntry** prevNext)
445 void link(FreeListEntry** prevNext)
452 FreeListEntry* next() const { return m_next; }
455 void append(FreeListEntry* next)
461 #if defined(ADDRESS_SANITIZER)
463 bool shouldAddToFreeList()
465 // Init if not already magic.
466 if ((m_asanMagic & ~asanDeferMemoryReuseMask) != asanMagic) {
467 m_asanMagic = asanMagic | asanDeferMemoryReuseCount;
470 // Decrement if count part of asanMagic > 0.
471 if (m_asanMagic & asanDeferMemoryReuseMask)
473 return !(m_asanMagic & asanDeferMemoryReuseMask);
478 FreeListEntry* m_next;
479 #if defined(ADDRESS_SANITIZER)
480 unsigned m_asanMagic;
484 // Representation of Blink heap pages.
486 // Pages are specialized on the type of header on the object they
487 // contain. If a heap page only contains a certain type of object all
488 // of the objects will have the same GCInfo pointer and therefore that
489 // pointer can be stored in the HeapPage instead of in the header of
490 // each object. In that case objects have only a HeapObjectHeader and
491 // not a FinalizedHeapObjectHeader saving a word per object.
492 template<typename Header>
493 class HeapPage : public BaseHeapPage {
495 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
497 void link(HeapPage**);
498 static void unlink(ThreadHeap<Header>*, HeapPage*, HeapPage**);
502 // Returns true for the whole blinkPageSize page that the page is on, even
503 // for the header, and the unmapped guard page at the start. That ensures
504 // the result can be used to populate the negative page cache.
505 virtual bool contains(Address addr) OVERRIDE
507 Address blinkPageStart = roundToBlinkPageStart(address());
508 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligned address plus guard page size.
509 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
512 HeapPage* next() { return m_next; }
516 return address() + sizeof(*this) + headerPadding<Header>();
519 static size_t payloadSize()
521 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header>()) & ~allocationMask;
524 Address end() { return payload() + payloadSize(); }
526 void getStats(HeapStats&);
527 void clearLiveAndMarkDead();
528 void sweep(HeapStats*, ThreadHeap<Header>*);
529 void clearObjectStartBitMap();
530 void finalize(Header*);
531 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
532 #if ENABLE(GC_PROFILE_MARKING)
533 const GCInfo* findGCInfo(Address) OVERRIDE;
535 #if ENABLE(GC_PROFILE_HEAP)
536 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
539 #if defined(ADDRESS_SANITIZER)
540 void poisonUnmarkedObjects();
543 virtual void markOrphaned()
545 // Zap the payload with a recognizable value to detect any incorrect
546 // cross thread pointer usage.
547 #if defined(ADDRESS_SANITIZER)
548 // Don't use memset when running with ASan since this needs to zap
549 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
550 // only works for code in this method and not for calls to memset.
551 for (Address current = payload(); current < payload() + payloadSize(); ++current)
552 *current = orphanedZapValue;
554 memset(payload(), orphanedZapValue, payloadSize());
556 BaseHeapPage::markOrphaned();
560 Header* findHeaderFromAddress(Address);
561 void populateObjectStartBitMap();
562 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
563 TraceCallback traceCallback(Header*);
564 bool hasVTable(Header*);
566 intptr_t padding() const { return m_padding; }
568 HeapPage<Header>* m_next;
569 intptr_t m_padding; // Preserve 8-byte alignment on 32-bit systems.
570 bool m_objectStartBitMapComputed;
571 uint8_t m_objectStartBitMap[reservedForObjectBitMap];
573 friend class ThreadHeap<Header>;
578 AddressEntry() : m_address(0) { }
580 explicit AddressEntry(Address address) : m_address(address) { }
582 Address address() const { return m_address; }
588 class PositiveEntry : public AddressEntry {
592 , m_containingPage(0)
596 PositiveEntry(Address address, BaseHeapPage* containingPage)
597 : AddressEntry(address)
598 , m_containingPage(containingPage)
602 BaseHeapPage* result() const { return m_containingPage; }
604 typedef BaseHeapPage* LookupResult;
607 BaseHeapPage* m_containingPage;
610 class NegativeEntry : public AddressEntry {
612 NegativeEntry() : AddressEntry() { }
614 NegativeEntry(Address address, bool) : AddressEntry(address) { }
616 bool result() const { return true; }
618 typedef bool LookupResult;
621 // A HeapExtentCache provides a fast way of taking an arbitrary
622 // pointer-sized word, and determining whether it can be interpreted
623 // as a pointer to an area that is managed by the garbage collected
624 // Blink heap. There is a cache of 'pages' that have previously been
625 // determined to be wholly inside the heap. The size of these pages must be
626 // smaller than the allocation alignment of the heap pages. We determine
627 // on-heap-ness by rounding down the pointer to the nearest page and looking up
628 // the page in the cache. If there is a miss in the cache we can ask the heap
629 // to determine the status of the pointer by iterating over all of the heap.
630 // The result is then cached in the two-way associative page cache.
632 // A HeapContainsCache is a positive cache. Therefore, it must be flushed when
633 // memory is removed from the Blink heap. The HeapDoesNotContainCache is a
634 // negative cache, so it must be flushed when memory is added to the heap.
635 template<typename Entry>
636 class HeapExtentCache {
639 : m_entries(adoptArrayPtr(new Entry[HeapExtentCache::numberOfEntries]))
640 , m_hasEntries(false)
645 bool contains(Address);
646 bool isEmpty() { return !m_hasEntries; }
648 // Perform a lookup in the cache.
650 // If lookup returns null/false the argument address was not found in
651 // the cache and it is unknown if the address is in the Blink
654 // If lookup returns true/a page, the argument address was found in the
655 // cache. For the HeapContainsCache this means the address is in the heap.
656 // For the HeapDoesNotContainCache this means the address is not in the
658 PLATFORM_EXPORT typename Entry::LookupResult lookup(Address);
660 // Add an entry to the cache.
661 PLATFORM_EXPORT void addEntry(Address, typename Entry::LookupResult);
664 static const int numberOfEntriesLog2 = 12;
665 static const int numberOfEntries = 1 << numberOfEntriesLog2;
667 static size_t hash(Address);
669 WTF::OwnPtr<Entry[]> m_entries;
672 friend class ThreadState;
675 // Normally these would be typedefs instead of subclasses, but that makes them
676 // very hard to forward declare.
677 class HeapContainsCache : public HeapExtentCache<PositiveEntry> {
679 BaseHeapPage* lookup(Address);
680 void addEntry(Address, BaseHeapPage*);
683 class HeapDoesNotContainCache : public HeapExtentCache<NegativeEntry> { };
685 // FIXME: This is currently used by the WebAudio code.
686 // We should attempt to restructure the WebAudio code so that the main thread
687 // alone determines life-time and receives messages about life-time from the
690 class ThreadSafeRefCountedGarbageCollected : public GarbageCollectedFinalized<T>, public WTF::ThreadSafeRefCountedBase {
691 WTF_MAKE_NONCOPYABLE(ThreadSafeRefCountedGarbageCollected);
694 ThreadSafeRefCountedGarbageCollected()
699 // Override ref to deal with a case where a reference count goes up
700 // from 0 to 1. This can happen in the following scenario:
701 // (1) The reference count becomes 0, but on-stack pointers keep references to the object.
702 // (2) The on-stack pointer is assigned to a RefPtr. The reference count becomes 1.
703 // In this case, we have to resurrect m_keepAlive.
706 MutexLocker lock(m_mutex);
707 if (UNLIKELY(!refCount())) {
710 WTF::ThreadSafeRefCountedBase::ref();
713 // Override deref to deal with our own deallocation based on ref counting.
716 MutexLocker lock(m_mutex);
723 using GarbageCollectedFinalized<T>::operator new;
724 using GarbageCollectedFinalized<T>::operator delete;
727 ~ThreadSafeRefCountedGarbageCollected() { }
732 ASSERT(!m_keepAlive);
733 m_keepAlive = adoptPtr(new CrossThreadPersistent<T>(static_cast<T*>(this)));
736 OwnPtr<CrossThreadPersistent<T> > m_keepAlive;
737 mutable Mutex m_mutex;
740 template<typename DataType>
747 PoolEntry(DataType* data, PoolEntry* next)
756 PoolEntry* m_pool[NumberOfHeaps];
759 // Once pages have been used for one type of thread heap they will never be
760 // reused for another type of thread heap. Instead of unmapping, we add the
761 // pages to a pool of pages to be reused later by a thread heap of the same
762 // type. This is done as a security feature to avoid type confusion. The
763 // heaps are type segregated by having separate thread heaps for different
764 // types of objects. Holding on to pages ensures that the same virtual address
765 // space cannot be used for objects of another type than the type contained
766 // in this page to begin with.
767 class FreePagePool : public PagePool<PageMemory> {
770 void addFreePage(int, PageMemory*);
771 PageMemory* takeFreePage(int);
774 Mutex m_mutex[NumberOfHeaps];
777 class OrphanedPagePool : public PagePool<BaseHeapPage> {
780 void addOrphanedPage(int, BaseHeapPage*);
781 void decommitOrphanedPages();
783 bool contains(void*);
786 void clearMemory(PageMemory*);
789 // Non-template super class used to pass a heap around to other classes.
792 virtual ~BaseHeap() { }
793 virtual void cleanupPages() = 0;
795 // Find the page in this thread heap containing the given
796 // address. Returns 0 if the address is not contained in any
797 // page in this thread heap.
798 virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
800 #if ENABLE(GC_PROFILE_MARKING)
801 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0;
804 #if ENABLE(GC_PROFILE_HEAP)
805 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0;
808 // Sweep this part of the Blink heap. This finalizes dead objects
809 // and builds freelists for all the unused memory.
810 virtual void sweep(HeapStats*) = 0;
811 virtual void postSweepProcessing() = 0;
813 virtual void clearFreeLists() = 0;
814 virtual void clearLiveAndMarkDead() = 0;
816 virtual void makeConsistentForSweeping() = 0;
819 virtual bool isConsistentForSweeping() = 0;
821 virtual void getScannedStats(HeapStats&) = 0;
824 virtual void prepareHeapForTermination() = 0;
826 virtual int normalPageCount() = 0;
828 virtual BaseHeap* split(int normalPages) = 0;
829 virtual void merge(BaseHeap* other) = 0;
831 // Returns a bucket number for inserting a FreeListEntry of a
832 // given size. All FreeListEntries in the given bucket, n, have
834 static int bucketIndexForSize(size_t);
837 // Thread heaps represent a part of the per-thread Blink heap.
839 // Each Blink thread has a number of thread heaps: one general heap
840 // that contains any type of object and a number of heaps specialized
841 // for specific object types (such as Node).
843 // Each thread heap contains the functionality to allocate new objects
844 // (potentially adding new pages to the heap), to find and mark
845 // objects during conservative stack scanning and to sweep the set of
847 template<typename Header>
848 class ThreadHeap : public BaseHeap {
850 ThreadHeap(ThreadState*, int);
851 virtual ~ThreadHeap();
852 virtual void cleanupPages();
854 virtual BaseHeapPage* heapPageFromAddress(Address);
855 #if ENABLE(GC_PROFILE_MARKING)
856 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
858 #if ENABLE(GC_PROFILE_HEAP)
859 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
862 virtual void sweep(HeapStats*);
863 virtual void postSweepProcessing();
865 virtual void clearFreeLists();
866 virtual void clearLiveAndMarkDead();
868 virtual void makeConsistentForSweeping();
871 virtual bool isConsistentForSweeping();
873 virtual void getScannedStats(HeapStats&);
876 ThreadState* threadState() { return m_threadState; }
877 HeapStats& stats() { return m_threadState->stats(); }
878 void flushHeapContainsCache()
880 m_threadState->heapContainsCache()->flush();
883 inline Address allocate(size_t, const GCInfo*);
884 void addToFreeList(Address, size_t);
885 inline static size_t roundedAllocationSize(size_t size)
887 return allocationSizeFromSize(size) - sizeof(Header);
890 virtual void prepareHeapForTermination();
892 virtual int normalPageCount() { return m_numberOfNormalPages; }
894 virtual BaseHeap* split(int numberOfNormalPages);
895 virtual void merge(BaseHeap* splitOffBase);
897 void removePageFromHeap(HeapPage<Header>*);
899 PLATFORM_EXPORT void promptlyFreeObject(Header*);
902 void addPageToHeap(const GCInfo*);
903 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
904 static size_t allocationSizeFromSize(size_t);
905 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
906 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
907 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
908 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
909 void setAllocationPoint(Address point, size_t size)
911 ASSERT(!point || heapPageFromAddress(point));
912 ASSERT(size <= HeapPage<Header>::payloadSize());
913 m_currentAllocationPoint = point;
914 m_remainingAllocationSize = size;
916 void ensureCurrentAllocation(size_t, const GCInfo*);
917 bool allocateFromFreeList(size_t);
919 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
920 void allocatePage(const GCInfo*);
923 bool pagesToBeSweptContains(Address);
924 bool pagesAllocatedDuringSweepingContains(Address);
927 void sweepNormalPages(HeapStats*);
928 void sweepLargePages(HeapStats*);
929 bool coalesce(size_t);
931 Address m_currentAllocationPoint;
932 size_t m_remainingAllocationSize;
934 HeapPage<Header>* m_firstPage;
935 LargeHeapObject<Header>* m_firstLargeHeapObject;
937 HeapPage<Header>* m_firstPageAllocatedDuringSweeping;
938 HeapPage<Header>* m_lastPageAllocatedDuringSweeping;
940 // Merge point for parallel sweep.
941 HeapPage<Header>* m_mergePoint;
943 int m_biggestFreeListIndex;
945 ThreadState* m_threadState;
947 // All FreeListEntries in the nth list have size >= 2^n.
948 FreeListEntry* m_freeLists[blinkPageSizeLog2];
949 FreeListEntry* m_lastFreeListEntries[blinkPageSizeLog2];
951 // Index into the page pools. This is used to ensure that the pages of the
952 // same type go into the correct page pool and thus avoid type confusion.
955 int m_numberOfNormalPages;
957 // The promptly freed count contains the number of promptly freed objects
958 // since the last sweep or since it was manually reset to delay coalescing.
959 size_t m_promptlyFreedCount;
962 class PLATFORM_EXPORT Heap {
965 static void shutdown();
966 static void doShutdown();
968 static BaseHeapPage* contains(Address);
969 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
970 static BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
972 static bool containedInHeapOrOrphanedPage(void*);
975 // Push a trace callback on the marking stack.
976 static void pushTraceCallback(CallbackStack*, void* containerObject, TraceCallback);
978 // Push a trace callback on the post-marking callback stack. These callbacks
979 // are called after normal marking (including ephemeron iteration).
980 static void pushPostMarkingCallback(void*, TraceCallback);
982 // Add a weak pointer callback to the weak callback work list. General
983 // object pointer callbacks are added to a thread local weak callback work
984 // list and the callback is called on the thread that owns the object, with
985 // the closure pointer as an argument. Most of the time, the closure and
986 // the containerObject can be the same thing, but the containerObject is
987 // constrained to be on the heap, since the heap is used to identify the
989 static void pushWeakObjectPointerCallback(void* closure, void* containerObject, WeakPointerCallback);
991 // Similar to the more general pushWeakObjectPointerCallback, but cell
992 // pointer callbacks are added to a static callback work list and the weak
993 // callback is performed on the thread performing garbage collection. This
994 // is OK because cells are just cleared and no deallocation can happen.
995 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback);
997 // Pop the top of a marking stack and call the callback with the visitor
998 // and the object. Returns false when there is nothing more to do.
999 template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback(CallbackStack*, Visitor*);
1001 // Remove an item from the post-marking callback stack and call
1002 // the callback with the visitor and the object pointer. Returns
1003 // false when there is nothing more to do.
1004 static bool popAndInvokePostMarkingCallback(Visitor*);
1006 // Remove an item from the weak callback work list and call the callback
1007 // with the visitor and the closure pointer. Returns false when there is
1008 // nothing more to do.
1009 static bool popAndInvokeWeakPointerCallback(Visitor*);
1011 // Register an ephemeron table for fixed-point iteration.
1012 static void registerWeakTable(void* containerObject, EphemeronCallback, EphemeronCallback);
1014 static bool weakTableRegistered(const void*);
1017 template<typename T, typename HeapTraits> static Address allocate(size_t);
1018 // FIXME: remove this once c++11 is allowed everywhere:
1019 template<typename T> static Address allocate(size_t);
1021 template<typename T> static Address reallocate(void* previous, size_t);
1023 static void collectGarbage(ThreadState::StackState, ThreadState::CauseOfGC = ThreadState::NormalGC);
1024 static void collectGarbageForTerminatingThread(ThreadState*);
1025 static void collectAllGarbage();
1026 static void processMarkingStackEntries(int* numberOfMarkingThreads);
1027 static void processMarkingStackOnMultipleThreads();
1028 static void processMarkingStackInParallel();
1029 template<CallbackInvocationMode Mode> static void processMarkingStack();
1030 static void postMarkingProcessing();
1031 static void globalWeakProcessing();
1032 static void setForcePreciseGCForTesting();
1034 static void prepareForGC();
1036 // Conservatively checks whether an address is a pointer in any of the thread
1037 // heaps. If so marks the object pointed to as live.
1038 static Address checkAndMarkPointer(Visitor*, Address);
1040 #if ENABLE(GC_PROFILE_MARKING)
1041 // Dump the path to specified object on the next GC. This method is to be invoked from GDB.
1042 static void dumpPathToObjectOnNextGC(void* p);
1044 // Forcibly find GCInfo of the object at Address.
1045 // This is slow and should only be used for debug purposes.
1046 // It involves finding the heap page and scanning the heap page for an object header.
1047 static const GCInfo* findGCInfo(Address);
1049 static String createBacktraceString();
1052 // Collect heap stats for all threads attached to the Blink
1053 // garbage collector. Should only be called during garbage
1054 // collection where threads are known to be at safe points.
1055 static void getStats(HeapStats*);
1057 static void getHeapSpaceSize(uint64_t*, uint64_t*);
1059 static void makeConsistentForSweeping();
1062 static bool isConsistentForSweeping();
1065 static void flushHeapDoesNotContainCache();
1066 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCache->isEmpty(); }
1068 // Return true if the last GC found a pointer into a heap page
1069 // during conservative scanning.
1070 static bool lastGCWasConservative() { return s_lastGCWasConservative; }
1072 static FreePagePool* freePagePool() { return s_freePagePool; }
1073 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
1076 static Visitor* s_markingVisitor;
1077 static Vector<OwnPtr<blink::WebThread> >* s_markingThreads;
1078 static CallbackStack* s_markingStack;
1079 static CallbackStack* s_postMarkingCallbackStack;
1080 static CallbackStack* s_weakCallbackStack;
1081 static CallbackStack* s_ephemeronStack;
1082 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
1083 static bool s_shutdownCalled;
1084 static bool s_lastGCWasConservative;
1085 static FreePagePool* s_freePagePool;
1086 static OrphanedPagePool* s_orphanedPagePool;
1087 friend class ThreadState;
1090 // The NoAllocationScope class is used in debug mode to catch unwanted
1091 // allocations. E.g. allocations during GC.
1092 template<ThreadAffinity Affinity>
1093 class NoAllocationScope {
1095 NoAllocationScope() : m_active(true) { enter(); }
1097 explicit NoAllocationScope(bool active) : m_active(active) { enter(); }
1099 NoAllocationScope(const NoAllocationScope& other) : m_active(other.m_active) { enter(); }
1101 NoAllocationScope& operator=(const NoAllocationScope& other)
1104 m_active = other.m_active;
1109 ~NoAllocationScope() { release(); }
1114 ThreadStateFor<Affinity>::state()->leaveNoAllocationScope();
1123 ThreadStateFor<Affinity>::state()->enterNoAllocationScope();
1129 // Base class for objects allocated in the Blink garbage-collected
1132 // Defines a 'new' operator that allocates the memory in the
1133 // heap. 'delete' should not be called on objects that inherit from
1134 // GarbageCollected.
1136 // Instances of GarbageCollected will *NOT* get finalized. Their
1137 // destructor will not be called. Therefore, only classes that have
1138 // trivial destructors with no semantic meaning (including all their
1139 // subclasses) should inherit from GarbageCollected. If there are
1140 // non-trival destructors in a given class or any of its subclasses,
1141 // GarbageCollectedFinalized should be used which guarantees that the
1142 // destructor is called on an instance when the garbage collector
1143 // determines that it is no longer reachable.
1144 template<typename T>
1145 class GarbageCollected {
1146 WTF_MAKE_NONCOPYABLE(GarbageCollected);
1148 // For now direct allocation of arrays on the heap is not allowed.
1149 void* operator new[](size_t size);
1150 #if OS(WIN) && COMPILER(MSVC)
1151 // Due to some quirkiness in the MSVC compiler we have to provide
1152 // the delete[] operator in the GarbageCollected subclasses as it
1153 // is called when a class is exported in a DLL.
1155 void operator delete[](void* p)
1157 ASSERT_NOT_REACHED();
1160 void operator delete[](void* p);
1163 typedef T GarbageCollectedBase;
1165 void* operator new(size_t size)
1167 return Heap::allocate<T>(size);
1170 void operator delete(void* p)
1172 ASSERT_NOT_REACHED();
1181 // Base class for objects allocated in the Blink garbage-collected
1184 // Defines a 'new' operator that allocates the memory in the
1185 // heap. 'delete' should not be called on objects that inherit from
1186 // GarbageCollected.
1188 // Instances of GarbageCollectedFinalized will have their destructor
1189 // called when the garbage collector determines that the object is no
1190 // longer reachable.
1191 template<typename T>
1192 class GarbageCollectedFinalized : public GarbageCollected<T> {
1193 WTF_MAKE_NONCOPYABLE(GarbageCollectedFinalized);
1196 // finalizeGarbageCollectedObject is called when the object is
1197 // freed from the heap. By default finalization means calling the
1198 // destructor on the object. finalizeGarbageCollectedObject can be
1199 // overridden to support calling the destructor of a
1200 // subclass. This is useful for objects without vtables that
1201 // require explicit dispatching. The name is intentionally a bit
1202 // long to make name conflicts less likely.
1203 void finalizeGarbageCollectedObject()
1205 static_cast<T*>(this)->~T();
1208 GarbageCollectedFinalized() { }
1209 ~GarbageCollectedFinalized() { }
1211 template<typename U> friend struct HasFinalizer;
1212 template<typename U, bool> friend struct FinalizerTraitImpl;
1215 // Base class for objects that are in the Blink garbage-collected heap
1216 // and are still reference counted.
1218 // This class should be used sparingly and only to gradually move
1219 // objects from being reference counted to being managed by the blink
1220 // garbage collector.
1222 // While the current reference counting keeps one of these objects
1223 // alive it will have a Persistent handle to itself allocated so we
1224 // will not reclaim the memory. When the reference count reaches 0 the
1225 // persistent handle will be deleted. When the garbage collector
1226 // determines that there are no other references to the object it will
1227 // be reclaimed and the destructor of the reclaimed object will be
1228 // called at that time.
1229 template<typename T>
1230 class RefCountedGarbageCollected : public GarbageCollectedFinalized<T> {
1231 WTF_MAKE_NONCOPYABLE(RefCountedGarbageCollected);
1234 RefCountedGarbageCollected()
1240 // Implement method to increase reference count for use with
1243 // In contrast to the normal WTF::RefCounted, the reference count
1244 // can reach 0 and increase again. This happens in the following
1247 // (1) The reference count becomes 0, but members, persistents, or
1248 // on-stack pointers keep references to the object.
1250 // (2) The pointer is assigned to a RefPtr again and the reference
1253 // In this case, we have to resurrect m_keepAlive.
1256 if (UNLIKELY(!m_refCount)) {
1257 ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
1263 // Implement method to decrease reference count for use with
1266 // In contrast to the normal WTF::RefCounted implementation, the
1267 // object itself is not deleted when the reference count reaches
1268 // 0. Instead, the keep-alive persistent handle is deallocated so
1269 // that the object can be reclaimed when the garbage collector
1270 // determines that there are no other references to the object.
1273 ASSERT(m_refCount > 0);
1274 if (!--m_refCount) {
1282 return m_refCount == 1;
1286 ~RefCountedGarbageCollected() { }
1289 void makeKeepAlive()
1291 ASSERT(!m_keepAlive);
1292 m_keepAlive = new Persistent<T>(static_cast<T*>(this));
1296 Persistent<T>* m_keepAlive;
1299 template<typename T>
1300 T* adoptRefCountedGarbageCollected(T* ptr)
1302 ASSERT(ptr->hasOneRef());
1308 // Classes that contain heap references but aren't themselves heap
1309 // allocated, have some extra macros available which allows their use
1310 // to be restricted to cases where the garbage collector is able
1311 // to discover their heap references.
1313 // STACK_ALLOCATED(): Use if the object is only stack allocated. Heap objects
1314 // should be in Members but you do not need the trace method as they are on
1315 // the stack. (Down the line these might turn in to raw pointers, but for
1316 // now Members indicates that we have thought about them and explicitly
1317 // taken care of them.)
1319 // DISALLOW_ALLOCATION(): Cannot be allocated with new operators but can
1320 // be a part object. If it has Members you need a trace method and the
1321 // containing object needs to call that trace method.
1323 // ALLOW_ONLY_INLINE_ALLOCATION(): Allows only placement new operator.
1324 // This disallows general allocation of this object but allows to put
1325 // the object as a value object in collections. If these have Members you
1326 // need to have a trace method. That trace method will be called
1327 // automatically by the Heap collections.
1329 #if COMPILER_SUPPORTS(CXX_DELETED_FUNCTIONS)
1330 #define DISALLOW_ALLOCATION() \
1332 void* operator new(size_t) = delete; \
1333 void* operator new(size_t, NotNullTag, void*) = delete; \
1334 void* operator new(size_t, void*) = delete;
1336 #define ALLOW_ONLY_INLINE_ALLOCATION() \
1338 void* operator new(size_t, NotNullTag, void* location) { return location; } \
1339 void* operator new(size_t, void* location) { return location; } \
1341 void* operator new(size_t) = delete;
1343 #define STATIC_ONLY(Type) \
1349 #define DISALLOW_ALLOCATION() \
1351 void* operator new(size_t); \
1352 void* operator new(size_t, NotNullTag, void*); \
1353 void* operator new(size_t, void*)
1355 #define ALLOW_ONLY_INLINE_ALLOCATION() \
1357 void* operator new(size_t, NotNullTag, void* location) { return location; } \
1358 void* operator new(size_t, void* location) { return location; } \
1360 void* operator new(size_t);
1362 #define STATIC_ONLY(Type) \
1369 // These macros insert annotations that the Blink GC plugin for clang uses for
1370 // verification. STACK_ALLOCATED is used to declare that objects of this type
1371 // are always stack allocated. GC_PLUGIN_IGNORE is used to make the plugin
1372 // ignore a particular class or field when checking for proper usage. When using
1373 // GC_PLUGIN_IGNORE a bug-number should be provided as an argument where the
1374 // bug describes what needs to happen to remove the GC_PLUGIN_IGNORE again.
1376 #define STACK_ALLOCATED() \
1378 __attribute__((annotate("blink_stack_allocated"))) \
1379 void* operator new(size_t) = delete; \
1380 void* operator new(size_t, NotNullTag, void*) = delete; \
1381 void* operator new(size_t, void*) = delete;
1383 #define GC_PLUGIN_IGNORE(bug) \
1384 __attribute__((annotate("blink_gc_plugin_ignore")))
1386 #define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1387 #define GC_PLUGIN_IGNORE(bug)
1391 void HeapObjectHeader::checkHeader() const
1394 BaseHeapPage* page = pageHeaderFromObject(this);
1395 ASSERT(page->orphaned() || m_magic == magic);
1399 Address HeapObjectHeader::payload()
1401 return reinterpret_cast<Address>(this) + objectHeaderSize;
1404 size_t HeapObjectHeader::payloadSize()
1406 return size() - objectHeaderSize;
1409 Address HeapObjectHeader::payloadEnd()
1411 return reinterpret_cast<Address>(this) + size();
1415 void HeapObjectHeader::mark()
1418 // The use of atomic ops guarantees that the reads and writes are
1419 // atomic and that no memory operation reorderings take place.
1420 // Multiple threads can still read the old value and all store the
1421 // new value. However, the new value will be the same for all of
1422 // the threads and the end result is therefore consistent.
1423 unsigned size = asanUnsafeAcquireLoad(&m_size);
1424 asanUnsafeReleaseStore(&m_size, size | markBitMask);
1427 Address FinalizedHeapObjectHeader::payload()
1429 return reinterpret_cast<Address>(this) + finalizedHeaderSize;
1432 size_t FinalizedHeapObjectHeader::payloadSize()
1434 return size() - finalizedHeaderSize;
1437 template<typename Header>
1438 size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size)
1440 // Check the size before computing the actual allocation size. The
1441 // allocation size calculation can overflow for large sizes and
1442 // the check therefore has to happen before any calculation on the
1444 RELEASE_ASSERT(size < maxHeapObjectSize);
1446 // Add space for header.
1447 size_t allocationSize = size + sizeof(Header);
1448 // Align size with allocation granularity.
1449 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1450 return allocationSize;
1453 template<typename Header>
1454 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1456 size_t allocationSize = allocationSizeFromSize(size);
1457 bool isLargeObject = allocationSize > blinkPageSize / 2;
1459 return allocateLargeObject(allocationSize, gcInfo);
1460 if (m_remainingAllocationSize < allocationSize)
1461 return outOfLineAllocate(size, gcInfo);
1462 Address headerAddress = m_currentAllocationPoint;
1463 m_currentAllocationPoint += allocationSize;
1464 m_remainingAllocationSize -= allocationSize;
1465 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1466 size_t payloadSize = allocationSize - sizeof(Header);
1467 stats().increaseObjectSpace(payloadSize);
1468 Address result = headerAddress + sizeof(*header);
1469 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1470 // Unpoison the memory used for the object (payload).
1471 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize);
1472 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1473 memset(result, 0, payloadSize);
1475 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1));
1479 template<typename T, typename HeapTraits>
1480 Address Heap::allocate(size_t size)
1482 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1483 ASSERT(state->isAllocationAllowed());
1484 const GCInfo* gcInfo = GCInfoTrait<T>::get();
1485 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer());
1486 BaseHeap* heap = state->heap(heapIndex);
1487 return static_cast<typename HeapTraits::HeapType*>(heap)->allocate(size, gcInfo);
1490 template<typename T>
1491 Address Heap::allocate(size_t size)
1493 return allocate<T, HeapTypeTrait<T> >(size);
1496 template<typename T>
1497 Address Heap::reallocate(void* previous, size_t size)
1500 // If the new size is 0 this is equivalent to either
1501 // free(previous) or malloc(0). In both cases we do
1502 // nothing and return 0.
1505 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1506 ASSERT(state->isAllocationAllowed());
1507 const GCInfo* gcInfo = GCInfoTrait<T>::get();
1508 int heapIndex = HeapTypeTrait<T>::index(gcInfo->hasFinalizer());
1509 // FIXME: Currently only supports raw allocation on the
1510 // GeneralHeap. Hence we assume the header is a
1511 // FinalizedHeapObjectHeader.
1512 ASSERT(heapIndex == GeneralHeap || heapIndex == GeneralHeapNonFinalized);
1513 BaseHeap* heap = state->heap(heapIndex);
1514 Address address = static_cast<typename HeapTypeTrait<T>::HeapType*>(heap)->allocate(size, gcInfo);
1516 // This is equivalent to malloc(size).
1519 FinalizedHeapObjectHeader* previousHeader = FinalizedHeapObjectHeader::fromPayload(previous);
1520 ASSERT(!previousHeader->hasFinalizer());
1521 ASSERT(previousHeader->gcInfo() == gcInfo);
1522 size_t copySize = previousHeader->payloadSize();
1523 if (copySize > size)
1525 memcpy(address, previous, copySize);
1529 class HeapAllocatorQuantizer {
1531 template<typename T>
1532 static size_t quantizedSize(size_t count)
1534 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1535 return HeapIndexTrait<CollectionBackingHeap>::HeapType::roundedAllocationSize(count * sizeof(T));
1537 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1540 // This is a static-only class used as a trait on collections to make them heap allocated.
1541 // However see also HeapListHashSetAllocator.
1542 class HeapAllocator {
1544 typedef HeapAllocatorQuantizer Quantizer;
1545 typedef blink::Visitor Visitor;
1546 static const bool isGarbageCollected = true;
1548 template <typename Return, typename Metadata>
1549 static Return backingMalloc(size_t size)
1551 return reinterpret_cast<Return>(Heap::allocate<Metadata, HeapIndexTrait<CollectionBackingHeap> >(size));
1553 template <typename Return, typename Metadata>
1554 static Return zeroedBackingMalloc(size_t size)
1556 return backingMalloc<Return, Metadata>(size);
1558 template <typename Return, typename Metadata>
1559 static Return malloc(size_t size)
1561 return reinterpret_cast<Return>(Heap::allocate<Metadata>(size));
1563 PLATFORM_EXPORT static void backingFree(void* address);
1565 static void free(void* address) { }
1566 template<typename T>
1567 static void* newArray(size_t bytes)
1569 ASSERT_NOT_REACHED();
1573 static void deleteArray(void* ptr)
1575 ASSERT_NOT_REACHED();
1578 static bool isAllocationAllowed()
1580 return ThreadState::current()->isAllocationAllowed();
1583 static void markUsingGCInfo(Visitor* visitor, const void* buffer)
1585 visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->traceCallback());
1588 static void markNoTracing(Visitor* visitor, const void* t) { visitor->markNoTracing(t); }
1590 template<typename T, typename Traits>
1591 static void trace(Visitor* visitor, T& t)
1593 CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, WTF::WeakPointersActWeak, T, Traits>::trace(visitor, t);
1596 static void registerDelayedMarkNoTracing(Visitor* visitor, const void* object)
1598 visitor->registerDelayedMarkNoTracing(object);
1601 static void registerWeakMembers(Visitor* visitor, const void* closure, const void* object, WeakPointerCallback callback)
1603 visitor->registerWeakMembers(closure, object, callback);
1606 static void registerWeakTable(Visitor* visitor, const void* closure, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
1608 visitor->registerWeakTable(closure, iterationCallback, iterationDoneCallback);
1612 static bool weakTableRegistered(Visitor* visitor, const void* closure)
1614 return visitor->weakTableRegistered(closure);
1618 template<typename T>
1623 // The WTF classes use Allocator::VectorBackingHelper in order to find a
1624 // class to template their backing allocation operation on. For off-heap
1625 // allocations the VectorBackingHelper is a dummy class, since the class is
1626 // not used during allocation of backing. For on-heap allocations this
1627 // typedef ensures that the allocation is done with the correct templated
1628 // instantiation of the allocation function. This ensures the correct GC
1629 // map is written when backing allocations take place.
1630 template<typename T, typename Traits>
1631 struct VectorBackingHelper {
1632 typedef HeapVectorBacking<T, Traits> Type;
1635 // Like the VectorBackingHelper, but this type is used for HashSet and
1636 // HashMap, both of which are implemented using HashTable.
1637 template<typename Table>
1638 struct HashTableBackingHelper {
1639 typedef HeapHashTableBacking<Table> Type;
1642 template<typename T>
1647 template<typename T>
1648 static T& getOther(T* other)
1653 static void enterNoAllocationScope()
1656 ThreadStateFor<AnyThread>::state()->enterNoAllocationScope();
1660 static void leaveNoAllocationScope()
1663 ThreadStateFor<AnyThread>::state()->leaveNoAllocationScope();
1668 template<typename T, size_t u, typename V> friend class WTF::Vector;
1669 template<typename T, typename U, typename V, typename W> friend class WTF::HashSet;
1670 template<typename T, typename U, typename V, typename W, typename X, typename Y> friend class WTF::HashMap;
1673 template<typename Value>
1674 static void traceListHashSetValue(Visitor* visitor, Value& value)
1676 // We use the default hash traits for the value in the node, because
1677 // ListHashSet does not let you specify any specific ones.
1678 // We don't allow ListHashSet of WeakMember, so we set that one false
1679 // (there's an assert elsewhere), but we have to specify some value for the
1680 // strongify template argument, so we specify WTF::WeakPointersActWeak,
1682 CollectionBackingTraceTrait<WTF::ShouldBeTraced<WTF::HashTraits<Value> >::value, WTF::NoWeakHandlingInCollections, WTF::WeakPointersActWeak, Value, WTF::HashTraits<Value> >::trace(visitor, value);
1685 // The inline capacity is just a dummy template argument to match the off-heap
1687 // This inherits from the static-only HeapAllocator trait class, but we do
1688 // declare pointers to instances. These pointers are always null, and no
1689 // objects are instantiated.
1690 template<typename ValueArg, size_t inlineCapacity>
1691 struct HeapListHashSetAllocator : public HeapAllocator {
1692 typedef HeapAllocator TableAllocator;
1693 typedef WTF::ListHashSetNode<ValueArg, HeapListHashSetAllocator> Node;
1696 class AllocatorProvider {
1698 // For the heap allocation we don't need an actual allocator object, so we just
1700 HeapListHashSetAllocator* get() const { return 0; }
1702 // No allocator object is needed.
1703 void createAllocatorIfNeeded() { }
1705 // There is no allocator object in the HeapListHashSet (unlike in
1706 // the regular ListHashSet) so there is nothing to swap.
1707 void swap(AllocatorProvider& other) { }
1710 void deallocate(void* dummy) { }
1712 // This is not a static method even though it could be, because it
1713 // needs to match the one that the (off-heap) ListHashSetAllocator
1714 // has. The 'this' pointer will always be null.
1715 void* allocateNode()
1717 COMPILE_ASSERT(!WTF::IsWeak<ValueArg>::value, WeakPointersInAListHashSetWillJustResultInNullEntriesInTheSetThatsNotWhatYouWantConsiderUsingLinkedHashSetInstead);
1718 return malloc<void*, Node>(sizeof(Node));
1721 static void traceValue(Visitor* visitor, Node* node)
1723 traceListHashSetValue(visitor, node->m_value);
1727 // FIXME: These should just be template aliases:
1729 // template<typename T, size_t inlineCapacity = 0>
1730 // using HeapVector = Vector<T, inlineCapacity, HeapAllocator>;
1732 // as soon as all the compilers we care about support that.
1733 // MSVC supports it only in MSVC 2013.
1737 typename HashArg = typename DefaultHash<KeyArg>::Hash,
1738 typename KeyTraitsArg = HashTraits<KeyArg>,
1739 typename MappedTraitsArg = HashTraits<MappedArg> >
1740 class HeapHashMap : public HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg, HeapAllocator> { };
1744 typename HashArg = typename DefaultHash<ValueArg>::Hash,
1745 typename TraitsArg = HashTraits<ValueArg> >
1746 class HeapHashSet : public HashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1750 typename HashArg = typename DefaultHash<ValueArg>::Hash,
1751 typename TraitsArg = HashTraits<ValueArg> >
1752 class HeapLinkedHashSet : public LinkedHashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1756 size_t inlineCapacity = 0, // The inlineCapacity is just a dummy to match ListHashSet (off-heap).
1757 typename HashArg = typename DefaultHash<ValueArg>::Hash>
1758 class HeapListHashSet : public ListHashSet<ValueArg, inlineCapacity, HashArg, HeapListHashSetAllocator<ValueArg, inlineCapacity> > { };
1762 typename HashFunctions = typename DefaultHash<Value>::Hash,
1763 typename Traits = HashTraits<Value> >
1764 class HeapHashCountedSet : public HashCountedSet<Value, HashFunctions, Traits, HeapAllocator> { };
1766 template<typename T, size_t inlineCapacity = 0>
1767 class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
1771 explicit HeapVector(size_t size) : Vector<T, inlineCapacity, HeapAllocator>(size)
1775 HeapVector(size_t size, const T& val) : Vector<T, inlineCapacity, HeapAllocator>(size, val)
1779 template<size_t otherCapacity>
1780 HeapVector(const HeapVector<T, otherCapacity>& other)
1781 : Vector<T, inlineCapacity, HeapAllocator>(other)
1785 template<typename U>
1786 void append(const U& other)
1788 Vector<T, inlineCapacity, HeapAllocator>::append(other);
1791 template<typename U, size_t otherCapacity>
1792 void appendVector(const HeapVector<U, otherCapacity>& other)
1794 const Vector<U, otherCapacity, HeapAllocator>& otherVector = other;
1795 Vector<T, inlineCapacity, HeapAllocator>::appendVector(otherVector);
1799 template<typename T, size_t inlineCapacity = 0>
1800 class HeapDeque : public Deque<T, inlineCapacity, HeapAllocator> {
1804 explicit HeapDeque(size_t size) : Deque<T, inlineCapacity, HeapAllocator>(size)
1808 HeapDeque(size_t size, const T& val) : Deque<T, inlineCapacity, HeapAllocator>(size, val)
1812 // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1813 HeapDeque<T, 0>& operator=(const HeapDeque& other)
1815 HeapDeque<T> copy(other);
1820 // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1821 inline void swap(HeapDeque& other)
1823 Deque<T, inlineCapacity, HeapAllocator>::swap(other);
1826 template<size_t otherCapacity>
1827 HeapDeque(const HeapDeque<T, otherCapacity>& other)
1828 : Deque<T, inlineCapacity, HeapAllocator>(other)
1832 template<typename U>
1833 void append(const U& other)
1835 Deque<T, inlineCapacity, HeapAllocator>::append(other);
1839 template<typename T, size_t i>
1840 inline void swap(HeapVector<T, i>& a, HeapVector<T, i>& b) { a.swap(b); }
1841 template<typename T, size_t i>
1842 inline void swap(HeapDeque<T, i>& a, HeapDeque<T, i>& b) { a.swap(b); }
1843 template<typename T, typename U, typename V>
1844 inline void swap(HeapHashSet<T, U, V>& a, HeapHashSet<T, U, V>& b) { a.swap(b); }
1845 template<typename T, typename U, typename V, typename W, typename X>
1846 inline void swap(HeapHashMap<T, U, V, W, X>& a, HeapHashMap<T, U, V, W, X>& b) { a.swap(b); }
1847 template<typename T, size_t i, typename U>
1848 inline void swap(HeapListHashSet<T, i, U>& a, HeapListHashSet<T, i, U>& b) { a.swap(b); }
1849 template<typename T, typename U, typename V>
1850 inline void swap(HeapLinkedHashSet<T, U, V>& a, HeapLinkedHashSet<T, U, V>& b) { a.swap(b); }
1851 template<typename T, typename U, typename V>
1852 inline void swap(HeapHashCountedSet<T, U, V>& a, HeapHashCountedSet<T, U, V>& b) { a.swap(b); }
1854 template<typename T>
1855 struct ThreadingTrait<Member<T> > {
1856 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1859 template<typename T>
1860 struct ThreadingTrait<WeakMember<T> > {
1861 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1864 template<typename Key, typename Value, typename T, typename U, typename V>
1865 struct ThreadingTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1866 static const ThreadAffinity Affinity =
1867 (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1868 && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1871 template<typename First, typename Second>
1872 struct ThreadingTrait<WTF::KeyValuePair<First, Second> > {
1873 static const ThreadAffinity Affinity =
1874 (ThreadingTrait<First>::Affinity == MainThreadOnly)
1875 && (ThreadingTrait<Second>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1878 template<typename T, typename U, typename V>
1879 struct ThreadingTrait<HashSet<T, U, V, HeapAllocator> > {
1880 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1884 template<typename T, size_t inlineCapacity>
1885 struct ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1886 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1889 template<typename T, typename Traits>
1890 struct ThreadingTrait<HeapVectorBacking<T, Traits> > {
1891 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1894 template<typename T, size_t inlineCapacity>
1895 struct ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > {
1896 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1899 template<typename T, typename U, typename V>
1900 struct ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > {
1901 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1904 template<typename Table>
1905 struct ThreadingTrait<HeapHashTableBacking<Table> > {
1906 typedef typename Table::KeyType Key;
1907 typedef typename Table::ValueType Value;
1908 static const ThreadAffinity Affinity =
1909 (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1910 && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1913 template<typename T, typename U, typename V, typename W, typename X>
1914 struct ThreadingTrait<HeapHashMap<T, U, V, W, X> > : public ThreadingTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
1916 template<typename T, typename U, typename V>
1917 struct ThreadingTrait<HeapHashSet<T, U, V> > : public ThreadingTrait<HashSet<T, U, V, HeapAllocator> > { };
1919 template<typename T, size_t inlineCapacity>
1920 struct ThreadingTrait<HeapVector<T, inlineCapacity> > : public ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
1922 template<typename T, size_t inlineCapacity>
1923 struct ThreadingTrait<HeapDeque<T, inlineCapacity> > : public ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
1925 template<typename T, typename U, typename V>
1926 struct ThreadingTrait<HeapHashCountedSet<T, U, V> > : public ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
1928 // The standard implementation of GCInfoTrait<T>::get() just returns a static
1929 // from the class T, but we can't do that for HashMap, HashSet, Vector, etc.
1930 // because they are in WTF and know nothing of GCInfos. Instead we have a
1931 // specialization of GCInfoTrait for these four classes here.
1933 template<typename Key, typename Value, typename T, typename U, typename V>
1934 struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1935 static const GCInfo* get()
1937 typedef HashMap<Key, Value, T, U, V, HeapAllocator> TargetType;
1938 static const GCInfo info = {
1939 TraceTrait<TargetType>::trace,
1941 false, // HashMap needs no finalizer.
1942 WTF::IsPolymorphic<TargetType>::value,
1943 #if ENABLE(GC_PROFILING)
1944 TypenameStringTrait<TargetType>::get()
1951 template<typename T, typename U, typename V>
1952 struct GCInfoTrait<HashSet<T, U, V, HeapAllocator> > {
1953 static const GCInfo* get()
1955 typedef HashSet<T, U, V, HeapAllocator> TargetType;
1956 static const GCInfo info = {
1957 TraceTrait<TargetType>::trace,
1959 false, // HashSet needs no finalizer.
1960 WTF::IsPolymorphic<TargetType>::value,
1961 #if ENABLE(GC_PROFILING)
1962 TypenameStringTrait<TargetType>::get()
1969 template<typename T, typename U, typename V>
1970 struct GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > {
1971 static const GCInfo* get()
1973 typedef LinkedHashSet<T, U, V, HeapAllocator> TargetType;
1974 static const GCInfo info = {
1975 TraceTrait<TargetType>::trace,
1976 LinkedHashSet<T, U, V, HeapAllocator>::finalize,
1977 true, // Needs finalization. The anchor needs to unlink itself from the chain.
1978 WTF::IsPolymorphic<TargetType>::value,
1979 #if ENABLE(GC_PROFILING)
1980 TypenameStringTrait<TargetType>::get()
1987 template<typename ValueArg, size_t inlineCapacity, typename U>
1988 struct GCInfoTrait<ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > > {
1989 static const GCInfo* get()
1991 typedef WTF::ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > TargetType;
1992 static const GCInfo info = {
1993 TraceTrait<TargetType>::trace,
1995 false, // ListHashSet needs no finalization though its backing might.
1996 false, // no vtable.
1997 #if ENABLE(GC_PROFILING)
1998 TypenameStringTrait<TargetType>::get()
2005 template<typename T, typename Allocator>
2006 struct GCInfoTrait<WTF::ListHashSetNode<T, Allocator> > {
2007 static const GCInfo* get()
2009 typedef WTF::ListHashSetNode<T, Allocator> TargetType;
2010 static const GCInfo info = {
2011 TraceTrait<TargetType>::trace,
2012 TargetType::finalize,
2013 WTF::HashTraits<T>::needsDestruction, // The node needs destruction if its data does.
2014 false, // no vtable.
2015 #if ENABLE(GC_PROFILING)
2016 TypenameStringTrait<TargetType>::get()
2023 template<typename T>
2024 struct GCInfoTrait<Vector<T, 0, HeapAllocator> > {
2025 static const GCInfo* get()
2027 #if ENABLE(GC_PROFILING)
2028 typedef Vector<T, 0, HeapAllocator> TargetType;
2030 static const GCInfo info = {
2031 TraceTrait<Vector<T, 0, HeapAllocator> >::trace,
2033 false, // Vector needs no finalizer if it has no inline capacity.
2034 WTF::IsPolymorphic<Vector<T, 0, HeapAllocator> >::value,
2035 #if ENABLE(GC_PROFILING)
2036 TypenameStringTrait<TargetType>::get()
2043 template<typename T, size_t inlineCapacity>
2044 struct FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Vector<T, inlineCapacity, HeapAllocator>, true> { };
2046 template<typename T, size_t inlineCapacity>
2047 struct GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > {
2048 static const GCInfo* get()
2050 typedef Vector<T, inlineCapacity, HeapAllocator> TargetType;
2051 static const GCInfo info = {
2052 TraceTrait<TargetType>::trace,
2053 FinalizerTrait<TargetType>::finalize,
2054 // Finalizer is needed to destruct things stored in the inline capacity.
2055 inlineCapacity && VectorTraits<T>::needsDestruction,
2056 WTF::IsPolymorphic<TargetType>::value,
2057 #if ENABLE(GC_PROFILING)
2058 TypenameStringTrait<TargetType>::get()
2065 template<typename T>
2066 struct GCInfoTrait<Deque<T, 0, HeapAllocator> > {
2067 static const GCInfo* get()
2069 typedef Deque<T, 0, HeapAllocator> TargetType;
2070 static const GCInfo info = {
2071 TraceTrait<TargetType>::trace,
2073 false, // Deque needs no finalizer if it has no inline capacity.
2074 WTF::IsPolymorphic<TargetType>::value,
2075 #if ENABLE(GC_PROFILING)
2076 TypenameStringTrait<TargetType>::get()
2081 static const GCInfo info;
2084 template<typename T, typename U, typename V>
2085 struct GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > {
2086 static const GCInfo* get()
2088 typedef HashCountedSet<T, U, V, HeapAllocator> TargetType;
2089 static const GCInfo info = {
2090 TraceTrait<TargetType>::trace,
2092 false, // HashCountedSet is just a HashTable, and needs no finalizer.
2093 WTF::IsPolymorphic<TargetType>::value,
2094 #if ENABLE(GC_PROFILING)
2095 TypenameStringTrait<TargetType>::get()
2100 static const GCInfo info;
2103 template<typename T, size_t inlineCapacity>
2104 struct FinalizerTrait<Deque<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Deque<T, inlineCapacity, HeapAllocator>, true> { };
2106 template<typename T, size_t inlineCapacity>
2107 struct GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > {
2108 static const GCInfo* get()
2110 typedef Deque<T, inlineCapacity, HeapAllocator> TargetType;
2111 static const GCInfo info = {
2112 TraceTrait<TargetType>::trace,
2113 FinalizerTrait<TargetType>::finalize,
2114 // Finalizer is needed to destruct things stored in the inline capacity.
2115 inlineCapacity && VectorTraits<T>::needsDestruction,
2116 WTF::IsPolymorphic<TargetType>::value,
2117 #if ENABLE(GC_PROFILING)
2118 TypenameStringTrait<TargetType>::get()
2123 static const GCInfo info;
2126 template<typename T, typename Traits>
2127 struct GCInfoTrait<HeapVectorBacking<T, Traits> > {
2128 static const GCInfo* get()
2130 typedef HeapVectorBacking<T, Traits> TargetType;
2131 static const GCInfo info = {
2132 TraceTrait<TargetType>::trace,
2133 FinalizerTrait<TargetType>::finalize,
2134 Traits::needsDestruction,
2135 false, // We don't support embedded objects in HeapVectors with vtables.
2136 #if ENABLE(GC_PROFILING)
2137 TypenameStringTrait<TargetType>::get()
2144 template<typename Table>
2145 struct GCInfoTrait<HeapHashTableBacking<Table> > {
2146 static const GCInfo* get()
2148 typedef HeapHashTableBacking<Table> TargetType;
2149 static const GCInfo info = {
2150 TraceTrait<TargetType>::trace,
2151 HeapHashTableBacking<Table>::finalize,
2152 Table::ValueTraits::needsDestruction,
2153 WTF::IsPolymorphic<TargetType>::value,
2154 #if ENABLE(GC_PROFILING)
2155 TypenameStringTrait<TargetType>::get()
2162 } // namespace blink
2166 // Catch-all for types that have a way to trace that don't have special
2167 // handling for weakness in collections. This means that if this type
2168 // contains WeakMember fields, they will simply be zeroed, but the entry
2169 // will not be removed from the collection. This always happens for
2170 // things in vectors, which don't currently support special handling of
2172 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2173 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, T, Traits> {
2174 static bool trace(blink::Visitor* visitor, T& t)
2176 blink::TraceTrait<T>::trace(visitor, &t);
2181 // Catch-all for things that have HashTrait support for tracing with weakness.
2182 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2183 struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, T, Traits> {
2184 static bool trace(blink::Visitor* visitor, T& t)
2186 return Traits::traceInCollection(visitor, t, strongify);
2190 // Vector backing that needs marking. We don't support weak members in vectors.
2191 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2192 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapVectorBacking<T, Traits>, void> {
2193 static bool trace(blink::Visitor* visitor, void* self)
2195 // The allocator can oversize the allocation a little, according to
2196 // the allocation granularity. The extra size is included in the
2197 // payloadSize call below, since there is nowhere to store the
2198 // originally allocated memory. This assert ensures that visiting the
2199 // last bit of memory can't cause trouble.
2200 COMPILE_ASSERT(!ShouldBeTraced<Traits>::value || sizeof(T) > blink::allocationGranularity || Traits::canInitializeWithMemset, HeapOverallocationCanCauseSpuriousVisits);
2202 T* array = reinterpret_cast<T*>(self);
2203 blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2204 // Use the payload size as recorded by the heap to determine how many
2205 // elements to mark.
2206 size_t length = header->payloadSize() / sizeof(T);
2207 for (size_t i = 0; i < length; i++)
2208 blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, WeakPointersActStrong, T, Traits>::trace(visitor, array[i]);
2213 // Almost all hash table backings are visited with this specialization.
2214 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Table>
2215 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapHashTableBacking<Table>, void> {
2216 typedef typename Table::ValueType Value;
2217 typedef typename Table::ValueTraits Traits;
2218 static bool trace(blink::Visitor* visitor, void* self)
2220 Value* array = reinterpret_cast<Value*>(self);
2221 blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2222 size_t length = header->payloadSize() / sizeof(Value);
2223 for (size_t i = 0; i < length; i++) {
2224 if (!HashTableHelper<Value, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i]))
2225 blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, strongify, Value, Traits>::trace(visitor, array[i]);
2231 // This specialization of TraceInCollectionTrait is for the backing of
2232 // HeapListHashSet. This is for the case that we find a reference to the
2233 // backing from the stack. That probably means we have a GC while we are in a
2234 // ListHashSet method since normal API use does not put pointers to the backing
2236 template<ShouldWeakPointersBeMarkedStrongly strongify, typename NodeContents, size_t inlineCapacity, typename T, typename U, typename V, typename W, typename X, typename Y>
2237 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapHashTableBacking<HashTable<ListHashSetNode<NodeContents, blink::HeapListHashSetAllocator<T, inlineCapacity> >*, U, V, W, X, Y, blink::HeapAllocator> >, void> {
2238 typedef ListHashSetNode<NodeContents, blink::HeapListHashSetAllocator<T, inlineCapacity> > Node;
2239 typedef HashTable<Node*, U, V, W, X, Y, blink::HeapAllocator> Table;
2240 static bool trace(blink::Visitor* visitor, void* self)
2242 Node** array = reinterpret_cast<Node**>(self);
2243 blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2244 size_t length = header->payloadSize() / sizeof(Node*);
2245 for (size_t i = 0; i < length; i++) {
2246 if (!HashTableHelper<Node*, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i])) {
2247 traceListHashSetValue(visitor, array[i]->m_value);
2248 // Just mark the node without tracing because we already traced
2249 // the contents, and there is no need to trace the next and
2250 // prev fields since iterating over the hash table backing will
2251 // find the whole chain.
2252 visitor->markNoTracing(array[i]);
2259 // Key value pairs, as used in HashMap. To disambiguate template choice we have
2260 // to have two versions, first the one with no special weak handling, then the
2261 // one with weak handling.
2262 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2263 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, KeyValuePair<Key, Value>, Traits> {
2264 static bool trace(blink::Visitor* visitor, KeyValuePair<Key, Value>& self)
2266 ASSERT(ShouldBeTraced<Traits>::value);
2267 blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, NoWeakHandlingInCollections, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2268 blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, NoWeakHandlingInCollections, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2273 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2274 struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, KeyValuePair<Key, Value>, Traits> {
2275 static bool trace(blink::Visitor* visitor, KeyValuePair<Key, Value>& self)
2277 // This is the core of the ephemeron-like functionality. If there is
2278 // weakness on the key side then we first check whether there are
2279 // dead weak pointers on that side, and if there are we don't mark the
2280 // value side (yet). Conversely if there is weakness on the value side
2281 // we check that first and don't mark the key side yet if we find dead
2283 // Corner case: If there is weakness on both the key and value side,
2284 // and there are also strong pointers on the both sides then we could
2285 // unexpectedly leak. The scenario is that the weak pointer on the key
2286 // side is alive, which causes the strong pointer on the key side to be
2287 // marked. If that then results in the object pointed to by the weak
2288 // pointer on the value side being marked live, then the whole
2289 // key-value entry is leaked. To avoid unexpected leaking, we disallow
2290 // this case, but if you run into this assert, please reach out to Blink
2291 // reviewers, and we may relax it.
2292 const bool keyIsWeak = Traits::KeyTraits::weakHandlingFlag == WeakHandlingInCollections;
2293 const bool valueIsWeak = Traits::ValueTraits::weakHandlingFlag == WeakHandlingInCollections;
2294 const bool keyHasStrongRefs = ShouldBeTraced<typename Traits::KeyTraits>::value;
2295 const bool valueHasStrongRefs = ShouldBeTraced<typename Traits::ValueTraits>::value;
2296 COMPILE_ASSERT(!keyIsWeak || !valueIsWeak || !keyHasStrongRefs || !valueHasStrongRefs, ThisConfigurationWasDisallowedToAvoidUnexpectedLeaks);
2297 if ((valueIsWeak && !keyIsWeak) || (valueIsWeak && keyIsWeak && !valueHasStrongRefs)) {
2298 // Check value first.
2299 bool deadWeakObjectsFoundOnValueSide = blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, Traits::ValueTraits::weakHandlingFlag, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2300 if (deadWeakObjectsFoundOnValueSide)
2302 return blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, Traits::KeyTraits::weakHandlingFlag, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2305 bool deadWeakObjectsFoundOnKeySide = blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, Traits::KeyTraits::weakHandlingFlag, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2306 if (deadWeakObjectsFoundOnKeySide)
2308 return blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, Traits::ValueTraits::weakHandlingFlag, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2312 // Nodes used by LinkedHashSet. Again we need two versions to disambiguate the
2314 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Allocator, typename Traits>
2315 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, LinkedHashSetNode<Value, Allocator>, Traits> {
2316 static bool trace(blink::Visitor* visitor, LinkedHashSetNode<Value, Allocator>& self)
2318 ASSERT(ShouldBeTraced<Traits>::value);
2319 blink::TraceTrait<Value>::trace(visitor, &self.m_value);
2324 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Allocator, typename Traits>
2325 struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, LinkedHashSetNode<Value, Allocator>, Traits> {
2326 static bool trace(blink::Visitor* visitor, LinkedHashSetNode<Value, Allocator>& self)
2328 return TraceInCollectionTrait<WeakHandlingInCollections, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.m_value);
2332 // ListHashSetNode pointers (a ListHashSet is implemented as a hash table of these pointers).
2333 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, size_t inlineCapacity, typename Traits>
2334 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, ListHashSetNode<Value, blink::HeapListHashSetAllocator<Value, inlineCapacity> >*, Traits> {
2335 typedef ListHashSetNode<Value, blink::HeapListHashSetAllocator<Value, inlineCapacity> > Node;
2336 static bool trace(blink::Visitor* visitor, Node* node)
2338 traceListHashSetValue(visitor, node->m_value);
2339 // Just mark the node without tracing because we already traced the
2340 // contents, and there is no need to trace the next and prev fields
2341 // since iterating over the hash table backing will find the whole
2343 visitor->markNoTracing(node);
2352 // CollectionBackingTraceTrait. Do nothing for things in collections that don't
2353 // need tracing, or call TraceInCollectionTrait for those that do.
2355 // Specialization for things that don't need marking and have no weak pointers. We
2356 // do nothing, even if WTF::WeakPointersActStrong.
2357 template<WTF::ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2358 struct CollectionBackingTraceTrait<false, WTF::NoWeakHandlingInCollections, strongify, T, Traits> {
2359 static bool trace(Visitor*, T&) { return false; }
2362 // Specialization for things that either need marking or have weak pointers or
2364 template<bool needsTracing, WTF::WeakHandlingFlag weakHandlingFlag, WTF::ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2365 struct CollectionBackingTraceTrait {
2366 static bool trace(Visitor* visitor, T&t)
2368 Visitor::verifyGarbageCollectedIfMember(reinterpret_cast<T*>(0));
2369 return WTF::TraceInCollectionTrait<weakHandlingFlag, strongify, T, Traits>::trace(visitor, t);
2373 template<typename T> struct WeakHandlingHashTraits : WTF::SimpleClassHashTraits<T> {
2374 // We want to treat the object as a weak object in the sense that it can
2375 // disappear from hash sets and hash maps.
2376 static const WTF::WeakHandlingFlag weakHandlingFlag = WTF::WeakHandlingInCollections;
2377 // Normally whether or not an object needs tracing is inferred
2378 // automatically from the presence of the trace method, but we don't
2379 // necessarily have a trace method, and we may not need one because T
2380 // can perhaps only be allocated inside collections, never as indpendent
2381 // objects. Explicitly mark this as needing tracing and it will be traced
2382 // in collections using the traceInCollection method, which it must have.
2383 template<typename U = void> struct NeedsTracingLazily {
2384 static const bool value = true;
2386 // The traceInCollection method traces differently depending on whether we
2387 // are strongifying the trace operation. We strongify the trace operation
2388 // when there are active iterators on the object. In this case all
2389 // WeakMembers are marked like strong members so that elements do not
2390 // suddenly disappear during iteration. Returns true if weak pointers to
2391 // dead objects were found: In this case any strong pointers were not yet
2392 // traced and the entry should be removed from the collection.
2393 static bool traceInCollection(Visitor* visitor, T& t, WTF::ShouldWeakPointersBeMarkedStrongly strongify)
2395 return t.traceInCollection(visitor, strongify);
2399 template<typename T, typename Traits>
2400 struct TraceTrait<HeapVectorBacking<T, Traits> > {
2401 typedef HeapVectorBacking<T, Traits> Backing;
2402 static void trace(Visitor* visitor, void* self)
2404 COMPILE_ASSERT(!WTF::IsWeak<T>::value, WeDontSupportWeaknessInHeapVectorsOrDeques);
2405 if (WTF::ShouldBeTraced<Traits>::value)
2406 WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::WeakPointersActWeak, HeapVectorBacking<T, Traits>, void>::trace(visitor, self);
2408 static void mark(Visitor* visitor, const Backing* backing)
2410 visitor->mark(backing, &trace);
2412 static void checkGCInfo(Visitor* visitor, const Backing* backing)
2415 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2420 // The trace trait for the heap hashtable backing is used when we find a
2421 // direct pointer to the backing from the conservative stack scanner. This
2422 // normally indicates that there is an ongoing iteration over the table, and so
2423 // we disable weak processing of table entries. When the backing is found
2424 // through the owning hash table we mark differently, in order to do weak
2426 template<typename Table>
2427 struct TraceTrait<HeapHashTableBacking<Table> > {
2428 typedef HeapHashTableBacking<Table> Backing;
2429 typedef typename Table::ValueTraits Traits;
2430 static void trace(Visitor* visitor, void* self)
2432 if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WTF::WeakHandlingInCollections)
2433 WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::WeakPointersActStrong, Backing, void>::trace(visitor, self);
2435 static void mark(Visitor* visitor, const Backing* backing)
2437 if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WTF::WeakHandlingInCollections)
2438 visitor->mark(backing, &trace);
2440 visitor->markNoTracing(backing); // If we know the trace function will do nothing there is no need to call it.
2442 static void checkGCInfo(Visitor* visitor, const Backing* backing)
2445 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2450 template<typename Table>
2451 void HeapHashTableBacking<Table>::finalize(void* pointer)
2453 typedef typename Table::ValueType Value;
2454 ASSERT(Table::ValueTraits::needsDestruction);
2455 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(pointer);
2456 // Use the payload size as recorded by the heap to determine how many
2457 // elements to finalize.
2458 size_t length = header->payloadSize() / sizeof(Value);
2459 Value* table = reinterpret_cast<Value*>(pointer);
2460 for (unsigned i = 0; i < length; i++) {
2461 if (!Table::isEmptyOrDeletedBucket(table[i]))
2466 template<typename T, typename U, typename V, typename W, typename X>
2467 struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
2468 template<typename T, typename U, typename V>
2469 struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { };
2470 template<typename T, typename U, typename V>
2471 struct GCInfoTrait<HeapLinkedHashSet<T, U, V> > : public GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > { };
2472 template<typename T, size_t inlineCapacity, typename U>
2473 struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U> > : public GCInfoTrait<ListHashSet<T, inlineCapacity, U, HeapListHashSetAllocator<T, inlineCapacity> > > { };
2474 template<typename T, size_t inlineCapacity>
2475 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
2476 template<typename T, size_t inlineCapacity>
2477 struct GCInfoTrait<HeapDeque<T, inlineCapacity> > : public GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
2478 template<typename T, typename U, typename V>
2479 struct GCInfoTrait<HeapHashCountedSet<T, U, V> > : public GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
2481 template<typename T>
2482 struct IfWeakMember;
2484 template<typename T>
2485 struct IfWeakMember {
2486 template<typename U>
2487 static bool isDead(Visitor*, const U&) { return false; }
2490 template<typename T>
2491 struct IfWeakMember<WeakMember<T> > {
2492 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visitor->isAlive(t.get()); }