2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "platform/PlatformExport.h"
35 #include "platform/heap/AddressSanitizer.h"
36 #include "platform/heap/ThreadState.h"
37 #include "platform/heap/Visitor.h"
38 #include "public/platform/WebThread.h"
39 #include "wtf/Assertions.h"
40 #include "wtf/HashCountedSet.h"
41 #include "wtf/LinkedHashSet.h"
42 #include "wtf/ListHashSet.h"
43 #include "wtf/OwnPtr.h"
44 #include "wtf/PassRefPtr.h"
45 #include "wtf/ThreadSafeRefCounted.h"
49 // FIXME: We temporarily disable parallel marking because the current
50 // implementation is slower than a single-thread marking. The reason
51 // of the slowness is that the parallel marking pollutes CPU caches
52 // because marking threads write mark bits on on-heap objects which
53 // will be touched by the mutator thread later. We need to make
54 // the implementation more cache-aware (e.g., bitmap marking).
55 #define ENABLE_PARALLEL_MARKING 0
59 const size_t blinkPageSizeLog2 = 17;
60 const size_t blinkPageSize = 1 << blinkPageSizeLog2;
61 const size_t blinkPageOffsetMask = blinkPageSize - 1;
62 const size_t blinkPageBaseMask = ~blinkPageOffsetMask;
64 // We allocate pages at random addresses but in groups of
65 // blinkPagesPerRegion at a given random address. We group pages to
66 // not spread out too much over the address space which would blow
67 // away the page tables and lead to bad performance.
68 const size_t blinkPagesPerRegion = 10;
70 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte
71 // align all allocations even on 32 bit.
72 const size_t allocationGranularity = 8;
73 const size_t allocationMask = allocationGranularity - 1;
74 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularity) - 1)) / (8 * allocationGranularity);
75 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
76 const size_t maxHeapObjectSizeLog2 = 27;
77 const size_t maxHeapObjectSize = 1 << maxHeapObjectSizeLog2;
79 const size_t markBitMask = 1;
80 const size_t freeListMask = 2;
81 // The dead bit is used for objects that have gone through a GC marking, but did
82 // not get swept before a new GC started. In that case we set the dead bit on
83 // objects that were not marked in the previous GC to ensure we are not tracing
84 // them via a conservatively found pointer. Tracing dead objects could lead to
85 // tracing of already finalized objects in another thread's heap which is a
86 // use-after-free situation.
87 const size_t deadBitMask = 4;
88 // On free-list entries we reuse the dead bit to distinguish a normal free-list
89 // entry from one that has been promptly freed.
90 const size_t promptlyFreedMask = freeListMask | deadBitMask;
91 #if ENABLE(GC_PROFILE_HEAP)
92 const size_t heapObjectGenerations = 8;
93 const size_t maxHeapObjectAge = heapObjectGenerations - 1;
94 const size_t heapObjectAgeMask = ~(maxHeapObjectSize - 1);
95 const size_t sizeMask = ~heapObjectAgeMask & ~static_cast<size_t>(7);
97 const size_t sizeMask = ~static_cast<size_t>(7);
99 const uint8_t freelistZapValue = 42;
100 const uint8_t finalizedZapValue = 24;
101 // The orphaned zap value must be zero in the lowest bits to allow for using
102 // the mark bit when tracing.
103 const uint8_t orphanedZapValue = 240;
105 #if ENABLE_PARALLEL_MARKING
106 const int maxNumberOfMarkingThreads = 4;
108 const int maxNumberOfMarkingThreads = 0;
111 const int numberOfPagesToConsiderForCoalescing = 100;
113 enum CallbackInvocationMode {
121 template<ThreadAffinity affinity> class ThreadLocalPersistents;
122 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity > > class Persistent;
124 #if ENABLE(GC_PROFILE_HEAP)
128 PLATFORM_EXPORT size_t osPageSize();
130 // Blink heap pages are set up with a guard page before and after the
132 inline size_t blinkPagePayloadSize()
134 return blinkPageSize - 2 * osPageSize();
137 // Blink heap pages are aligned to the Blink heap page size.
138 // Therefore, the start of a Blink page can be obtained by
139 // rounding down to the Blink page size.
140 inline Address roundToBlinkPageStart(Address address)
142 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
145 inline Address roundToBlinkPageEnd(Address address)
147 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) & blinkPageBaseMask) + blinkPageSize;
150 // Compute the amount of padding we have to add to a header to make
151 // the size of the header plus the padding a multiple of 8 bytes.
152 template<typename Header>
153 inline size_t headerPadding()
155 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity;
158 // Masks an address down to the enclosing blink page base address.
159 inline Address blinkPageAddress(Address address)
161 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
166 // Sanity check for a page header address: the address of the page
167 // header should be OS page size away from being Blink page size
169 inline bool isPageHeaderAddress(Address address)
171 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPageSize());
175 // Mask an address down to the enclosing oilpan heap base page.
176 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
177 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our typed heaps.
178 // This is only exported to enable tests in HeapTest.cpp.
179 PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object)
181 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
182 return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSize());
185 // Large allocations are allocated as separate objects and linked in a
188 // In order to use the same memory allocation routines for everything
189 // allocated in the heap, large objects are considered heap pages
190 // containing only one object.
192 // The layout of a large heap object is as follows:
194 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
195 template<typename Header>
196 class LargeHeapObject : public BaseHeapPage {
198 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) : BaseHeapPage(storage, gcInfo, state)
200 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), large_heap_object_header_misaligned);
203 virtual void checkAndMarkPointer(Visitor*, Address) override;
204 virtual bool isLargeObject() override { return true; }
206 #if ENABLE(GC_PROFILE_MARKING)
207 virtual const GCInfo* findGCInfo(Address address)
209 if (!objectContains(address))
215 #if ENABLE(GC_PROFILE_HEAP)
216 void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
219 void link(LargeHeapObject<Header>** previousNext)
221 m_next = *previousNext;
222 *previousNext = this;
225 void unlink(LargeHeapObject<Header>** previousNext)
227 *previousNext = m_next;
230 // The LargeHeapObject pseudo-page contains one actual object. Determine
231 // whether the pointer is within that object.
232 bool objectContains(Address object)
234 return (payload() <= object) && (object < address() + size());
237 // Returns true for any address that is on one of the pages that this
238 // large object uses. That ensures that we can use a negative result to
239 // populate the negative page cache.
240 virtual bool contains(Address object) override
242 return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPageEnd(address() + size());
245 LargeHeapObject<Header>* next()
252 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
255 Address payload() { return heapObjectHeader()->payload(); }
256 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
258 Header* heapObjectHeader()
260 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
261 return reinterpret_cast<Header*>(headerAddress);
266 void getStatsForTesting(HeapStats&);
270 virtual void markOrphaned()
272 // Zap the payload with a recognizable value to detect any incorrect
273 // cross thread pointer usage.
274 memset(payload(), orphanedZapValue, payloadSize());
275 BaseHeapPage::markOrphaned();
279 friend class ThreadHeap<Header>;
281 LargeHeapObject<Header>* m_next;
284 // The BasicObjectHeader is the minimal object header. It is used when
285 // encountering heap space of size allocationGranularity to mark it as
286 // as freelist entry.
287 class PLATFORM_EXPORT BasicObjectHeader {
290 explicit BasicObjectHeader(size_t encodedSize)
291 : m_size(encodedSize) { }
293 static size_t freeListEncodedSize(size_t size) { return size | freeListMask; }
296 bool isFree() { return m_size & freeListMask; }
299 bool isPromptlyFreed() { return (m_size & promptlyFreedMask) == promptlyFreedMask; }
302 void markPromptlyFreed() { m_size |= promptlyFreedMask; }
305 size_t size() const { return m_size & sizeMask; }
307 #if ENABLE(GC_PROFILE_HEAP)
309 size_t encodedSize() const { return m_size; }
312 size_t age() const { return m_size >> maxHeapObjectSizeLog2; }
317 size_t current = age();
318 if (current < maxHeapObjectAge)
319 m_size = ((current + 1) << maxHeapObjectSizeLog2) | (m_size & ~heapObjectAgeMask);
324 volatile unsigned m_size;
327 // Our heap object layout is layered with the HeapObjectHeader closest
328 // to the payload, this can be wrapped in a FinalizedObjectHeader if the
329 // object is on the GeneralHeap and not on a specific TypedHeap.
330 // Finally if the object is a large object (> blinkPageSize/2) then it is
331 // wrapped with a LargeObjectHeader.
333 // Object memory layout:
334 // [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payload
335 // The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHeader
336 // are independently optional.
337 class PLATFORM_EXPORT HeapObjectHeader : public BasicObjectHeader {
340 explicit HeapObjectHeader(size_t encodedSize)
341 : BasicObjectHeader(encodedSize)
348 HeapObjectHeader(size_t encodedSize, const GCInfo*)
349 : BasicObjectHeader(encodedSize)
355 inline void checkHeader() const;
356 inline bool isMarked() const;
359 inline void unmark();
361 inline const GCInfo* gcInfo() { return 0; }
363 inline Address payload();
364 inline size_t payloadSize();
365 inline Address payloadEnd();
367 inline void setDeadMark();
368 inline void clearDeadMark();
369 inline bool hasDeadMark() const;
371 // Zap magic number with a new magic number that means there was once an
372 // object allocated here, but it was freed because nobody marked it during
376 static void finalize(const GCInfo*, Address, size_t);
377 static HeapObjectHeader* fromPayload(const void*);
379 static const intptr_t magic = 0xc0de247;
380 static const intptr_t zappedMagic = 0xC0DEdead;
381 // The zap value for vtables should be < 4K to ensure it cannot be
382 // used for dispatch.
383 static const intptr_t zappedVTable = 0xd0d;
391 const size_t objectHeaderSize = sizeof(HeapObjectHeader);
393 // Each object on the GeneralHeap needs to carry a pointer to its
394 // own GCInfo structure for tracing and potential finalization.
395 class PLATFORM_EXPORT FinalizedHeapObjectHeader : public HeapObjectHeader {
398 FinalizedHeapObjectHeader(size_t encodedSize, const GCInfo* gcInfo)
399 : HeapObjectHeader(encodedSize)
404 inline Address payload();
405 inline size_t payloadSize();
408 const GCInfo* gcInfo() { return m_gcInfo; }
411 TraceCallback traceCallback() { return m_gcInfo->m_trace; }
416 inline bool hasFinalizer() { return m_gcInfo->hasFinalizer(); }
418 static FinalizedHeapObjectHeader* fromPayload(const void*);
421 bool hasVTable() { return m_gcInfo->hasVTable(); }
424 const GCInfo* m_gcInfo;
427 const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader);
429 class FreeListEntry : public HeapObjectHeader {
432 explicit FreeListEntry(size_t size)
433 : HeapObjectHeader(freeListEncodedSize(size))
436 #if ENABLE(ASSERT) && !defined(ADDRESS_SANITIZER)
437 // Zap free area with asterisks, aka 0x2a2a2a2a.
438 // For ASan don't zap since we keep accounting in the freelist entry.
439 for (size_t i = sizeof(*this); i < size; i++)
440 reinterpret_cast<Address>(this)[i] = freelistZapValue;
441 ASSERT(size >= objectHeaderSize);
446 Address address() { return reinterpret_cast<Address>(this); }
449 void unlink(FreeListEntry** prevNext)
456 void link(FreeListEntry** prevNext)
463 FreeListEntry* next() const { return m_next; }
466 void append(FreeListEntry* next)
472 #if defined(ADDRESS_SANITIZER)
474 bool shouldAddToFreeList()
476 // Init if not already magic.
477 if ((m_asanMagic & ~asanDeferMemoryReuseMask) != asanMagic) {
478 m_asanMagic = asanMagic | asanDeferMemoryReuseCount;
481 // Decrement if count part of asanMagic > 0.
482 if (m_asanMagic & asanDeferMemoryReuseMask)
484 return !(m_asanMagic & asanDeferMemoryReuseMask);
489 FreeListEntry* m_next;
490 #if defined(ADDRESS_SANITIZER)
491 unsigned m_asanMagic;
495 // Representation of Blink heap pages.
497 // Pages are specialized on the type of header on the object they
498 // contain. If a heap page only contains a certain type of object all
499 // of the objects will have the same GCInfo pointer and therefore that
500 // pointer can be stored in the HeapPage instead of in the header of
501 // each object. In that case objects have only a HeapObjectHeader and
502 // not a FinalizedHeapObjectHeader saving a word per object.
503 template<typename Header>
504 class HeapPage : public BaseHeapPage {
506 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
508 void link(HeapPage**);
509 static void unlink(ThreadHeap<Header>*, HeapPage*, HeapPage**);
513 // Returns true for the whole blinkPageSize page that the page is on, even
514 // for the header, and the unmapped guard page at the start. That ensures
515 // the result can be used to populate the negative page cache.
516 virtual bool contains(Address addr) override
518 Address blinkPageStart = roundToBlinkPageStart(address());
519 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligned address plus guard page size.
520 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
523 HeapPage* next() { return m_next; }
527 return address() + sizeof(*this) + headerPadding<Header>();
530 static size_t payloadSize()
532 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header>()) & ~allocationMask;
535 Address end() { return payload() + payloadSize(); }
537 void getStatsForTesting(HeapStats&);
538 void clearLiveAndMarkDead();
539 void sweep(HeapStats*, ThreadHeap<Header>*);
540 void clearObjectStartBitMap();
541 void finalize(Header*);
542 virtual void checkAndMarkPointer(Visitor*, Address) override;
543 #if ENABLE(GC_PROFILE_MARKING)
544 const GCInfo* findGCInfo(Address) override;
546 #if ENABLE(GC_PROFILE_HEAP)
547 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
550 #if defined(ADDRESS_SANITIZER)
551 void poisonUnmarkedObjects();
554 virtual void markOrphaned()
556 // Zap the payload with a recognizable value to detect any incorrect
557 // cross thread pointer usage.
558 #if defined(ADDRESS_SANITIZER)
559 // Don't use memset when running with ASan since this needs to zap
560 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
561 // only works for code in this method and not for calls to memset.
562 for (Address current = payload(); current < payload() + payloadSize(); ++current)
563 *current = orphanedZapValue;
565 memset(payload(), orphanedZapValue, payloadSize());
567 BaseHeapPage::markOrphaned();
571 Header* findHeaderFromAddress(Address);
572 void populateObjectStartBitMap();
573 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
574 TraceCallback traceCallback(Header*);
575 bool hasVTable(Header*);
577 intptr_t padding() const { return m_padding; }
579 HeapPage<Header>* m_next;
580 intptr_t m_padding; // Preserve 8-byte alignment on 32-bit systems.
581 bool m_objectStartBitMapComputed;
582 uint8_t m_objectStartBitMap[reservedForObjectBitMap];
584 friend class ThreadHeap<Header>;
587 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary
588 // pointer-sized word, and determining whether it cannot be interpreted
589 // as a pointer to an area that is managed by the garbage collected
590 // Blink heap. This is a cache of 'pages' that have previously been
591 // determined to be wholly outside of the heap. The size of these pages must be
592 // smaller than the allocation alignment of the heap pages. We determine
593 // off-heap-ness by rounding down the pointer to the nearest page and looking up
594 // the page in the cache. If there is a miss in the cache we can determine the
595 // status of the pointer precisely using the heap RegionTree.
597 // The HeapDoesNotContainCache is a negative cache, so it must be
598 // flushed when memory is added to the heap.
599 class HeapDoesNotContainCache {
601 HeapDoesNotContainCache()
602 : m_entries(adoptArrayPtr(new Address[HeapDoesNotContainCache::numberOfEntries]))
603 , m_hasEntries(false)
605 // Start by flushing the cache in a non-empty state to initialize all the cache entries.
606 for (int i = 0; i < numberOfEntries; i++)
611 bool isEmpty() { return !m_hasEntries; }
613 // Perform a lookup in the cache.
615 // If lookup returns false, the argument address was not found in
616 // the cache and it is unknown if the address is in the Blink
619 // If lookup returns true, the argument address was found in the
620 // cache which means the address is not in the heap.
621 PLATFORM_EXPORT bool lookup(Address);
623 // Add an entry to the cache.
624 PLATFORM_EXPORT void addEntry(Address);
627 static const int numberOfEntriesLog2 = 12;
628 static const int numberOfEntries = 1 << numberOfEntriesLog2;
630 static size_t hash(Address);
632 WTF::OwnPtr<Address[]> m_entries;
636 template<typename DataType>
643 PoolEntry(DataType* data, PoolEntry* next)
652 PoolEntry* m_pool[NumberOfHeaps];
655 // Once pages have been used for one type of thread heap they will never be
656 // reused for another type of thread heap. Instead of unmapping, we add the
657 // pages to a pool of pages to be reused later by a thread heap of the same
658 // type. This is done as a security feature to avoid type confusion. The
659 // heaps are type segregated by having separate thread heaps for different
660 // types of objects. Holding on to pages ensures that the same virtual address
661 // space cannot be used for objects of another type than the type contained
662 // in this page to begin with.
663 class FreePagePool : public PagePool<PageMemory> {
666 void addFreePage(int, PageMemory*);
667 PageMemory* takeFreePage(int);
670 Mutex m_mutex[NumberOfHeaps];
673 class OrphanedPagePool : public PagePool<BaseHeapPage> {
676 void addOrphanedPage(int, BaseHeapPage*);
677 void decommitOrphanedPages();
679 bool contains(void*);
682 void clearMemory(PageMemory*);
685 // Non-template super class used to pass a heap around to other classes.
688 virtual ~BaseHeap() { }
689 virtual void cleanupPages() = 0;
691 // Find the page in this thread heap containing the given
692 // address. Returns 0 if the address is not contained in any
693 // page in this thread heap.
694 virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
696 #if ENABLE(GC_PROFILE_MARKING)
697 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0;
700 #if ENABLE(GC_PROFILE_HEAP)
701 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0;
704 // Sweep this part of the Blink heap. This finalizes dead objects
705 // and builds freelists for all the unused memory.
706 virtual void sweep(HeapStats*) = 0;
707 virtual void postSweepProcessing() = 0;
709 virtual void clearFreeLists() = 0;
710 virtual void clearLiveAndMarkDead() = 0;
712 virtual void makeConsistentForSweeping() = 0;
714 virtual bool isConsistentForSweeping() = 0;
716 virtual void getStatsForTesting(HeapStats&) = 0;
718 virtual void updateRemainingAllocationSize() = 0;
720 virtual void prepareHeapForTermination() = 0;
722 virtual int normalPageCount() = 0;
724 virtual PassOwnPtr<BaseHeap> split(int normalPages) = 0;
725 virtual void merge(PassOwnPtr<BaseHeap> other) = 0;
727 // Returns a bucket number for inserting a FreeListEntry of a
728 // given size. All FreeListEntries in the given bucket, n, have
730 static int bucketIndexForSize(size_t);
733 // Thread heaps represent a part of the per-thread Blink heap.
735 // Each Blink thread has a number of thread heaps: one general heap
736 // that contains any type of object and a number of heaps specialized
737 // for specific object types (such as Node).
739 // Each thread heap contains the functionality to allocate new objects
740 // (potentially adding new pages to the heap), to find and mark
741 // objects during conservative stack scanning and to sweep the set of
743 template<typename Header>
744 class ThreadHeap : public BaseHeap {
746 ThreadHeap(ThreadState*, int);
747 virtual ~ThreadHeap();
748 virtual void cleanupPages() override;
750 virtual BaseHeapPage* heapPageFromAddress(Address) override;
751 #if ENABLE(GC_PROFILE_MARKING)
752 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) override;
754 #if ENABLE(GC_PROFILE_HEAP)
755 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override;
758 virtual void sweep(HeapStats*) override;
759 virtual void postSweepProcessing() override;
761 virtual void clearFreeLists() override;
762 virtual void clearLiveAndMarkDead() override;
764 virtual void makeConsistentForSweeping() override;
766 virtual bool isConsistentForSweeping() override;
768 virtual void getStatsForTesting(HeapStats&) override;
770 virtual void updateRemainingAllocationSize() override;
772 ThreadState* threadState() { return m_threadState; }
773 HeapStats& stats() { return m_threadState->stats(); }
775 inline Address allocate(size_t, const GCInfo*);
776 void addToFreeList(Address, size_t);
777 inline static size_t roundedAllocationSize(size_t size)
779 return allocationSizeFromSize(size) - sizeof(Header);
782 virtual void prepareHeapForTermination() override;
784 virtual int normalPageCount() override { return m_numberOfNormalPages; }
786 virtual PassOwnPtr<BaseHeap> split(int numberOfNormalPages) override;
787 virtual void merge(PassOwnPtr<BaseHeap> splitOffBase) override;
789 void removePageFromHeap(HeapPage<Header>*);
791 PLATFORM_EXPORT void promptlyFreeObject(Header*);
794 void addPageToHeap(const GCInfo*);
795 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
796 static size_t allocationSizeFromSize(size_t);
797 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
798 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
799 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
800 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
801 void setAllocationPoint(Address point, size_t size)
803 ASSERT(!point || heapPageFromAddress(point));
804 ASSERT(size <= HeapPage<Header>::payloadSize());
805 updateRemainingAllocationSize();
806 m_currentAllocationPoint = point;
807 m_lastRemainingAllocationSize = m_remainingAllocationSize = size;
809 void ensureCurrentAllocation(size_t, const GCInfo*);
810 bool allocateFromFreeList(size_t);
812 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
813 void allocatePage(const GCInfo*);
816 bool pagesToBeSweptContains(Address);
817 bool pagesAllocatedDuringSweepingContains(Address);
820 void sweepNormalPages(HeapStats*);
821 void sweepLargePages(HeapStats*);
822 bool coalesce(size_t);
824 Address m_currentAllocationPoint;
825 size_t m_remainingAllocationSize;
826 size_t m_lastRemainingAllocationSize;
828 HeapPage<Header>* m_firstPage;
829 LargeHeapObject<Header>* m_firstLargeHeapObject;
831 HeapPage<Header>* m_firstPageAllocatedDuringSweeping;
832 HeapPage<Header>* m_lastPageAllocatedDuringSweeping;
834 // Merge point for parallel sweep.
835 HeapPage<Header>* m_mergePoint;
837 int m_biggestFreeListIndex;
839 ThreadState* m_threadState;
841 // All FreeListEntries in the nth list have size >= 2^n.
842 FreeListEntry* m_freeLists[blinkPageSizeLog2];
843 FreeListEntry* m_lastFreeListEntries[blinkPageSizeLog2];
845 // Index into the page pools. This is used to ensure that the pages of the
846 // same type go into the correct page pool and thus avoid type confusion.
849 int m_numberOfNormalPages;
851 // The promptly freed count contains the number of promptly freed objects
852 // since the last sweep or since it was manually reset to delay coalescing.
853 size_t m_promptlyFreedCount;
856 class PLATFORM_EXPORT Heap {
859 static void shutdown();
860 static void doShutdown();
862 static BaseHeapPage* contains(Address);
863 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
864 static BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
866 static bool containedInHeapOrOrphanedPage(void*);
869 // Push a trace callback on the marking stack.
870 static void pushTraceCallback(CallbackStack*, void* containerObject, TraceCallback);
872 // Push a trace callback on the post-marking callback stack. These callbacks
873 // are called after normal marking (including ephemeron iteration).
874 static void pushPostMarkingCallback(void*, TraceCallback);
876 // Add a weak pointer callback to the weak callback work list. General
877 // object pointer callbacks are added to a thread local weak callback work
878 // list and the callback is called on the thread that owns the object, with
879 // the closure pointer as an argument. Most of the time, the closure and
880 // the containerObject can be the same thing, but the containerObject is
881 // constrained to be on the heap, since the heap is used to identify the
883 static void pushWeakObjectPointerCallback(void* closure, void* containerObject, WeakPointerCallback);
885 // Similar to the more general pushWeakObjectPointerCallback, but cell
886 // pointer callbacks are added to a static callback work list and the weak
887 // callback is performed on the thread performing garbage collection. This
888 // is OK because cells are just cleared and no deallocation can happen.
889 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback);
891 // Pop the top of a marking stack and call the callback with the visitor
892 // and the object. Returns false when there is nothing more to do.
893 template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback(CallbackStack*, Visitor*);
895 // Remove an item from the post-marking callback stack and call
896 // the callback with the visitor and the object pointer. Returns
897 // false when there is nothing more to do.
898 static bool popAndInvokePostMarkingCallback(Visitor*);
900 // Remove an item from the weak callback work list and call the callback
901 // with the visitor and the closure pointer. Returns false when there is
902 // nothing more to do.
903 static bool popAndInvokeWeakPointerCallback(Visitor*);
905 // Register an ephemeron table for fixed-point iteration.
906 static void registerWeakTable(void* containerObject, EphemeronCallback, EphemeronCallback);
908 static bool weakTableRegistered(const void*);
911 template<typename T, typename HeapTraits> static Address allocate(size_t);
912 // FIXME: remove this once c++11 is allowed everywhere:
913 template<typename T> static Address allocate(size_t);
915 template<typename T> static Address reallocate(void* previous, size_t);
917 static void collectGarbage(ThreadState::StackState, ThreadState::CauseOfGC = ThreadState::NormalGC);
918 static void collectGarbageForTerminatingThread(ThreadState*);
919 static void collectAllGarbage();
920 static void processMarkingStackEntries(int*);
921 static void processMarkingStackOnMultipleThreads();
922 static void processMarkingStackInParallel();
923 template<CallbackInvocationMode Mode> static void processMarkingStack();
924 static void postMarkingProcessing();
925 static void globalWeakProcessing();
926 static void setForcePreciseGCForTesting();
928 static void prepareForGC();
930 // Conservatively checks whether an address is a pointer in any of the thread
931 // heaps. If so marks the object pointed to as live.
932 static Address checkAndMarkPointer(Visitor*, Address);
934 #if ENABLE(GC_PROFILE_MARKING)
935 // Dump the path to specified object on the next GC. This method is to be invoked from GDB.
936 static void dumpPathToObjectOnNextGC(void* p);
938 // Forcibly find GCInfo of the object at Address.
939 // This is slow and should only be used for debug purposes.
940 // It involves finding the heap page and scanning the heap page for an object header.
941 static const GCInfo* findGCInfo(Address);
943 static String createBacktraceString();
946 // Collect heap stats for all threads attached to the Blink
947 // garbage collector. Should only be called during garbage
948 // collection where threads are known to be at safe points.
949 static void getStats(HeapStats*);
951 static void getStatsForTesting(HeapStats*);
953 static void getHeapSpaceSize(uint64_t*, uint64_t*);
955 static void makeConsistentForSweeping();
958 static bool isConsistentForSweeping();
961 static void flushHeapDoesNotContainCache();
963 // Return true if the last GC found a pointer into a heap page
964 // during conservative scanning.
965 static bool lastGCWasConservative() { return s_lastGCWasConservative; }
967 static FreePagePool* freePagePool() { return s_freePagePool; }
968 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
970 // This look-up uses the region search tree and a negative contains cache to
971 // provide an efficient mapping from arbitrary addresses to the containing
972 // heap-page if one exists.
973 static BaseHeapPage* lookup(Address);
974 static void addPageMemoryRegion(PageMemoryRegion*);
975 static void removePageMemoryRegion(PageMemoryRegion*);
978 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted
979 // by base addresses.
982 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left(0), m_right(0) { }
988 PageMemoryRegion* lookup(Address);
989 static void add(RegionTree*, RegionTree**);
990 static void remove(PageMemoryRegion*, RegionTree**);
992 PageMemoryRegion* m_region;
997 static Visitor* s_markingVisitor;
998 static Vector<OwnPtr<WebThread>>* s_markingThreads;
999 static CallbackStack* s_markingStack;
1000 static CallbackStack* s_postMarkingCallbackStack;
1001 static CallbackStack* s_weakCallbackStack;
1002 static CallbackStack* s_ephemeronStack;
1003 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
1004 static bool s_shutdownCalled;
1005 static bool s_lastGCWasConservative;
1006 static FreePagePool* s_freePagePool;
1007 static OrphanedPagePool* s_orphanedPagePool;
1008 static RegionTree* s_regionTree;
1009 friend class ThreadState;
1012 // The NoAllocationScope class is used in debug mode to catch unwanted
1013 // allocations. E.g. allocations during GC.
1014 template<ThreadAffinity Affinity>
1015 class NoAllocationScope {
1017 NoAllocationScope() : m_active(true) { enter(); }
1019 explicit NoAllocationScope(bool active) : m_active(active) { enter(); }
1021 NoAllocationScope(const NoAllocationScope& other) : m_active(other.m_active) { enter(); }
1023 NoAllocationScope& operator=(const NoAllocationScope& other)
1026 m_active = other.m_active;
1031 ~NoAllocationScope() { release(); }
1036 ThreadStateFor<Affinity>::state()->leaveNoAllocationScope();
1045 ThreadStateFor<Affinity>::state()->enterNoAllocationScope();
1051 // Base class for objects allocated in the Blink garbage-collected
1054 // Defines a 'new' operator that allocates the memory in the
1055 // heap. 'delete' should not be called on objects that inherit from
1056 // GarbageCollected.
1058 // Instances of GarbageCollected will *NOT* get finalized. Their
1059 // destructor will not be called. Therefore, only classes that have
1060 // trivial destructors with no semantic meaning (including all their
1061 // subclasses) should inherit from GarbageCollected. If there are
1062 // non-trival destructors in a given class or any of its subclasses,
1063 // GarbageCollectedFinalized should be used which guarantees that the
1064 // destructor is called on an instance when the garbage collector
1065 // determines that it is no longer reachable.
1066 template<typename T>
1067 class GarbageCollected {
1068 WTF_MAKE_NONCOPYABLE(GarbageCollected);
1070 // For now direct allocation of arrays on the heap is not allowed.
1071 void* operator new[](size_t size);
1072 #if OS(WIN) && COMPILER(MSVC)
1073 // Due to some quirkiness in the MSVC compiler we have to provide
1074 // the delete[] operator in the GarbageCollected subclasses as it
1075 // is called when a class is exported in a DLL.
1077 void operator delete[](void* p)
1079 ASSERT_NOT_REACHED();
1082 void operator delete[](void* p);
1085 typedef T GarbageCollectedBase;
1087 void* operator new(size_t size)
1089 return Heap::allocate<T>(size);
1092 void operator delete(void* p)
1094 ASSERT_NOT_REACHED();
1103 // Base class for objects allocated in the Blink garbage-collected
1106 // Defines a 'new' operator that allocates the memory in the
1107 // heap. 'delete' should not be called on objects that inherit from
1108 // GarbageCollected.
1110 // Instances of GarbageCollectedFinalized will have their destructor
1111 // called when the garbage collector determines that the object is no
1112 // longer reachable.
1113 template<typename T>
1114 class GarbageCollectedFinalized : public GarbageCollected<T> {
1115 WTF_MAKE_NONCOPYABLE(GarbageCollectedFinalized);
1118 // finalizeGarbageCollectedObject is called when the object is
1119 // freed from the heap. By default finalization means calling the
1120 // destructor on the object. finalizeGarbageCollectedObject can be
1121 // overridden to support calling the destructor of a
1122 // subclass. This is useful for objects without vtables that
1123 // require explicit dispatching. The name is intentionally a bit
1124 // long to make name conflicts less likely.
1125 void finalizeGarbageCollectedObject()
1127 static_cast<T*>(this)->~T();
1130 GarbageCollectedFinalized() { }
1131 ~GarbageCollectedFinalized() { }
1133 template<typename U> friend struct HasFinalizer;
1134 template<typename U, bool> friend struct FinalizerTraitImpl;
1137 // Base class for objects that are in the Blink garbage-collected heap
1138 // and are still reference counted.
1140 // This class should be used sparingly and only to gradually move
1141 // objects from being reference counted to being managed by the blink
1142 // garbage collector.
1144 // While the current reference counting keeps one of these objects
1145 // alive it will have a Persistent handle to itself allocated so we
1146 // will not reclaim the memory. When the reference count reaches 0 the
1147 // persistent handle will be deleted. When the garbage collector
1148 // determines that there are no other references to the object it will
1149 // be reclaimed and the destructor of the reclaimed object will be
1150 // called at that time.
1151 template<typename T>
1152 class RefCountedGarbageCollected : public GarbageCollectedFinalized<T> {
1153 WTF_MAKE_NONCOPYABLE(RefCountedGarbageCollected);
1156 RefCountedGarbageCollected()
1161 // Implement method to increase reference count for use with
1164 // In contrast to the normal WTF::RefCounted, the reference count
1165 // can reach 0 and increase again. This happens in the following
1168 // (1) The reference count becomes 0, but members, persistents, or
1169 // on-stack pointers keep references to the object.
1171 // (2) The pointer is assigned to a RefPtr again and the reference
1174 // In this case, we have to resurrect m_keepAlive.
1177 if (UNLIKELY(!m_refCount)) {
1178 ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
1184 // Implement method to decrease reference count for use with
1187 // In contrast to the normal WTF::RefCounted implementation, the
1188 // object itself is not deleted when the reference count reaches
1189 // 0. Instead, the keep-alive persistent handle is deallocated so
1190 // that the object can be reclaimed when the garbage collector
1191 // determines that there are no other references to the object.
1194 ASSERT(m_refCount > 0);
1195 if (!--m_refCount) {
1203 return m_refCount == 1;
1207 ~RefCountedGarbageCollected() { }
1210 void makeKeepAlive()
1212 ASSERT(!m_keepAlive);
1213 m_keepAlive = new Persistent<T>(static_cast<T*>(this));
1217 Persistent<T>* m_keepAlive;
1220 // Classes that contain heap references but aren't themselves heap
1221 // allocated, have some extra macros available which allows their use
1222 // to be restricted to cases where the garbage collector is able
1223 // to discover their heap references.
1225 // STACK_ALLOCATED(): Use if the object is only stack allocated. Heap objects
1226 // should be in Members but you do not need the trace method as they are on
1227 // the stack. (Down the line these might turn in to raw pointers, but for
1228 // now Members indicates that we have thought about them and explicitly
1229 // taken care of them.)
1231 // DISALLOW_ALLOCATION(): Cannot be allocated with new operators but can
1232 // be a part object. If it has Members you need a trace method and the
1233 // containing object needs to call that trace method.
1235 // ALLOW_ONLY_INLINE_ALLOCATION(): Allows only placement new operator.
1236 // This disallows general allocation of this object but allows to put
1237 // the object as a value object in collections. If these have Members you
1238 // need to have a trace method. That trace method will be called
1239 // automatically by the Heap collections.
1241 #if COMPILER_SUPPORTS(CXX_DELETED_FUNCTIONS)
1242 #define DISALLOW_ALLOCATION() \
1244 void* operator new(size_t) = delete; \
1245 void* operator new(size_t, NotNullTag, void*) = delete; \
1246 void* operator new(size_t, void*) = delete;
1248 #define ALLOW_ONLY_INLINE_ALLOCATION() \
1250 void* operator new(size_t, NotNullTag, void* location) { return location; } \
1251 void* operator new(size_t, void* location) { return location; } \
1253 void* operator new(size_t) = delete;
1255 #define STATIC_ONLY(Type) \
1261 #define DISALLOW_ALLOCATION() \
1263 void* operator new(size_t); \
1264 void* operator new(size_t, NotNullTag, void*); \
1265 void* operator new(size_t, void*)
1267 #define ALLOW_ONLY_INLINE_ALLOCATION() \
1269 void* operator new(size_t, NotNullTag, void* location) { return location; } \
1270 void* operator new(size_t, void* location) { return location; } \
1272 void* operator new(size_t);
1274 #define STATIC_ONLY(Type) \
1281 // These macros insert annotations that the Blink GC plugin for clang uses for
1282 // verification. STACK_ALLOCATED is used to declare that objects of this type
1283 // are always stack allocated. GC_PLUGIN_IGNORE is used to make the plugin
1284 // ignore a particular class or field when checking for proper usage. When using
1285 // GC_PLUGIN_IGNORE a bug-number should be provided as an argument where the
1286 // bug describes what needs to happen to remove the GC_PLUGIN_IGNORE again.
1288 #define STACK_ALLOCATED() \
1290 __attribute__((annotate("blink_stack_allocated"))) \
1291 void* operator new(size_t) = delete; \
1292 void* operator new(size_t, NotNullTag, void*) = delete; \
1293 void* operator new(size_t, void*) = delete;
1295 #define GC_PLUGIN_IGNORE(bug) \
1296 __attribute__((annotate("blink_gc_plugin_ignore")))
1298 #define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1299 #define GC_PLUGIN_IGNORE(bug)
1303 void HeapObjectHeader::checkHeader() const
1306 BaseHeapPage* page = pageHeaderFromObject(this);
1307 ASSERT(page->orphaned() || m_magic == magic);
1311 Address HeapObjectHeader::payload()
1313 return reinterpret_cast<Address>(this) + objectHeaderSize;
1316 size_t HeapObjectHeader::payloadSize()
1318 return size() - objectHeaderSize;
1321 Address HeapObjectHeader::payloadEnd()
1323 return reinterpret_cast<Address>(this) + size();
1327 void HeapObjectHeader::mark()
1330 // The use of atomic ops guarantees that the reads and writes are
1331 // atomic and that no memory operation reorderings take place.
1332 // Multiple threads can still read the old value and all store the
1333 // new value. However, the new value will be the same for all of
1334 // the threads and the end result is therefore consistent.
1335 unsigned size = asanUnsafeAcquireLoad(&m_size);
1336 asanUnsafeReleaseStore(&m_size, size | markBitMask);
1339 Address FinalizedHeapObjectHeader::payload()
1341 return reinterpret_cast<Address>(this) + finalizedHeaderSize;
1344 size_t FinalizedHeapObjectHeader::payloadSize()
1346 return size() - finalizedHeaderSize;
1349 template<typename Header>
1350 size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size)
1352 // Check the size before computing the actual allocation size. The
1353 // allocation size calculation can overflow for large sizes and
1354 // the check therefore has to happen before any calculation on the
1356 RELEASE_ASSERT(size < maxHeapObjectSize);
1358 // Add space for header.
1359 size_t allocationSize = size + sizeof(Header);
1360 // Align size with allocation granularity.
1361 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1362 return allocationSize;
1365 template<typename Header>
1366 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1368 size_t allocationSize = allocationSizeFromSize(size);
1369 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
1370 Address headerAddress = m_currentAllocationPoint;
1371 m_currentAllocationPoint += allocationSize;
1372 m_remainingAllocationSize -= allocationSize;
1373 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1374 Address result = headerAddress + sizeof(*header);
1375 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1377 // Unpoison the memory used for the object (payload).
1378 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header));
1379 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1380 memset(result, 0, allocationSize - sizeof(Header));
1382 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1));
1385 return outOfLineAllocate(size, gcInfo);
1388 template<typename T, typename HeapTraits>
1389 Address Heap::allocate(size_t size)
1391 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1392 ASSERT(state->isAllocationAllowed());
1393 const GCInfo* gcInfo = GCInfoTrait<T>::get();
1394 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), size);
1395 BaseHeap* heap = state->heap(heapIndex);
1396 return static_cast<typename HeapTraits::HeapType*>(heap)->allocate(size, gcInfo);
1399 template<typename T>
1400 Address Heap::allocate(size_t size)
1402 return allocate<T, HeapTypeTrait<T> >(size);
1405 template<typename T>
1406 Address Heap::reallocate(void* previous, size_t size)
1409 // If the new size is 0 this is equivalent to either
1410 // free(previous) or malloc(0). In both cases we do
1411 // nothing and return 0.
1414 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1415 ASSERT(state->isAllocationAllowed());
1416 const GCInfo* gcInfo = GCInfoTrait<T>::get();
1417 int heapIndex = HeapTypeTrait<T>::index(gcInfo->hasFinalizer(), size);
1418 // FIXME: Currently only supports raw allocation on the
1419 // GeneralHeap. Hence we assume the header is a
1420 // FinalizedHeapObjectHeader.
1421 ASSERT((General1Heap <= heapIndex && heapIndex <= General4Heap)
1422 || (General1HeapNonFinalized <= heapIndex && heapIndex <= General4HeapNonFinalized));
1423 BaseHeap* heap = state->heap(heapIndex);
1424 Address address = static_cast<typename HeapTypeTrait<T>::HeapType*>(heap)->allocate(size, gcInfo);
1426 // This is equivalent to malloc(size).
1429 FinalizedHeapObjectHeader* previousHeader = FinalizedHeapObjectHeader::fromPayload(previous);
1430 ASSERT(!previousHeader->hasFinalizer());
1431 ASSERT(previousHeader->gcInfo() == gcInfo);
1432 size_t copySize = previousHeader->payloadSize();
1433 if (copySize > size)
1435 memcpy(address, previous, copySize);
1439 class HeapAllocatorQuantizer {
1441 template<typename T>
1442 static size_t quantizedSize(size_t count)
1444 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1445 return HeapIndexTrait<CollectionBackingHeap>::HeapType::roundedAllocationSize(count * sizeof(T));
1447 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1450 // This is a static-only class used as a trait on collections to make them heap allocated.
1451 // However see also HeapListHashSetAllocator.
1452 class HeapAllocator {
1454 typedef HeapAllocatorQuantizer Quantizer;
1455 typedef blink::Visitor Visitor;
1456 static const bool isGarbageCollected = true;
1458 template <typename Return, typename Metadata>
1459 static Return backingMalloc(size_t size)
1461 return reinterpret_cast<Return>(Heap::allocate<Metadata, HeapIndexTrait<CollectionBackingHeap> >(size));
1463 template <typename Return, typename Metadata>
1464 static Return zeroedBackingMalloc(size_t size)
1466 return backingMalloc<Return, Metadata>(size);
1468 template <typename Return, typename Metadata>
1469 static Return malloc(size_t size)
1471 return reinterpret_cast<Return>(Heap::allocate<Metadata>(size));
1473 PLATFORM_EXPORT static void backingFree(void* address);
1475 static void free(void* address) { }
1476 template<typename T>
1477 static void* newArray(size_t bytes)
1479 ASSERT_NOT_REACHED();
1483 static void deleteArray(void* ptr)
1485 ASSERT_NOT_REACHED();
1488 static bool isAllocationAllowed()
1490 return ThreadState::current()->isAllocationAllowed();
1493 static void markUsingGCInfo(Visitor* visitor, const void* buffer)
1495 visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->traceCallback());
1498 static void markNoTracing(Visitor* visitor, const void* t) { visitor->markNoTracing(t); }
1500 template<typename T, typename Traits>
1501 static void trace(Visitor* visitor, T& t)
1503 CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, WTF::WeakPointersActWeak, T, Traits>::trace(visitor, t);
1506 static void registerDelayedMarkNoTracing(Visitor* visitor, const void* object)
1508 visitor->registerDelayedMarkNoTracing(object);
1511 static void registerWeakMembers(Visitor* visitor, const void* closure, const void* object, WeakPointerCallback callback)
1513 visitor->registerWeakMembers(closure, object, callback);
1516 static void registerWeakTable(Visitor* visitor, const void* closure, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
1518 visitor->registerWeakTable(closure, iterationCallback, iterationDoneCallback);
1522 static bool weakTableRegistered(Visitor* visitor, const void* closure)
1524 return visitor->weakTableRegistered(closure);
1528 template<typename T>
1533 // The WTF classes use Allocator::VectorBackingHelper in order to find a
1534 // class to template their backing allocation operation on. For off-heap
1535 // allocations the VectorBackingHelper is a dummy class, since the class is
1536 // not used during allocation of backing. For on-heap allocations this
1537 // typedef ensures that the allocation is done with the correct templated
1538 // instantiation of the allocation function. This ensures the correct GC
1539 // map is written when backing allocations take place.
1540 template<typename T, typename Traits>
1541 struct VectorBackingHelper {
1542 typedef HeapVectorBacking<T, Traits> Type;
1545 // Like the VectorBackingHelper, but this type is used for HashSet and
1546 // HashMap, both of which are implemented using HashTable.
1547 template<typename Table>
1548 struct HashTableBackingHelper {
1549 typedef HeapHashTableBacking<Table> Type;
1552 template<typename T>
1557 template<typename T>
1558 static T& getOther(T* other)
1563 static void enterNoAllocationScope()
1566 ThreadStateFor<AnyThread>::state()->enterNoAllocationScope();
1570 static void leaveNoAllocationScope()
1573 ThreadStateFor<AnyThread>::state()->leaveNoAllocationScope();
1578 template<typename T, size_t u, typename V> friend class WTF::Vector;
1579 template<typename T, typename U, typename V, typename W> friend class WTF::HashSet;
1580 template<typename T, typename U, typename V, typename W, typename X, typename Y> friend class WTF::HashMap;
1583 template<typename Value>
1584 static void traceListHashSetValue(Visitor* visitor, Value& value)
1586 // We use the default hash traits for the value in the node, because
1587 // ListHashSet does not let you specify any specific ones.
1588 // We don't allow ListHashSet of WeakMember, so we set that one false
1589 // (there's an assert elsewhere), but we have to specify some value for the
1590 // strongify template argument, so we specify WTF::WeakPointersActWeak,
1592 CollectionBackingTraceTrait<WTF::ShouldBeTraced<WTF::HashTraits<Value> >::value, WTF::NoWeakHandlingInCollections, WTF::WeakPointersActWeak, Value, WTF::HashTraits<Value> >::trace(visitor, value);
1595 // The inline capacity is just a dummy template argument to match the off-heap
1597 // This inherits from the static-only HeapAllocator trait class, but we do
1598 // declare pointers to instances. These pointers are always null, and no
1599 // objects are instantiated.
1600 template<typename ValueArg, size_t inlineCapacity>
1601 struct HeapListHashSetAllocator : public HeapAllocator {
1602 typedef HeapAllocator TableAllocator;
1603 typedef WTF::ListHashSetNode<ValueArg, HeapListHashSetAllocator> Node;
1606 class AllocatorProvider {
1608 // For the heap allocation we don't need an actual allocator object, so we just
1610 HeapListHashSetAllocator* get() const { return 0; }
1612 // No allocator object is needed.
1613 void createAllocatorIfNeeded() { }
1615 // There is no allocator object in the HeapListHashSet (unlike in
1616 // the regular ListHashSet) so there is nothing to swap.
1617 void swap(AllocatorProvider& other) { }
1620 void deallocate(void* dummy) { }
1622 // This is not a static method even though it could be, because it
1623 // needs to match the one that the (off-heap) ListHashSetAllocator
1624 // has. The 'this' pointer will always be null.
1625 void* allocateNode()
1627 COMPILE_ASSERT(!WTF::IsWeak<ValueArg>::value, WeakPointersInAListHashSetWillJustResultInNullEntriesInTheSetThatsNotWhatYouWantConsiderUsingLinkedHashSetInstead);
1628 return malloc<void*, Node>(sizeof(Node));
1631 static void traceValue(Visitor* visitor, Node* node)
1633 traceListHashSetValue(visitor, node->m_value);
1637 // FIXME: These should just be template aliases:
1639 // template<typename T, size_t inlineCapacity = 0>
1640 // using HeapVector = Vector<T, inlineCapacity, HeapAllocator>;
1642 // as soon as all the compilers we care about support that.
1643 // MSVC supports it only in MSVC 2013.
1647 typename HashArg = typename DefaultHash<KeyArg>::Hash,
1648 typename KeyTraitsArg = HashTraits<KeyArg>,
1649 typename MappedTraitsArg = HashTraits<MappedArg> >
1650 class HeapHashMap : public HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg, HeapAllocator> { };
1654 typename HashArg = typename DefaultHash<ValueArg>::Hash,
1655 typename TraitsArg = HashTraits<ValueArg> >
1656 class HeapHashSet : public HashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1660 typename HashArg = typename DefaultHash<ValueArg>::Hash,
1661 typename TraitsArg = HashTraits<ValueArg> >
1662 class HeapLinkedHashSet : public LinkedHashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1666 size_t inlineCapacity = 0, // The inlineCapacity is just a dummy to match ListHashSet (off-heap).
1667 typename HashArg = typename DefaultHash<ValueArg>::Hash>
1668 class HeapListHashSet : public ListHashSet<ValueArg, inlineCapacity, HashArg, HeapListHashSetAllocator<ValueArg, inlineCapacity> > { };
1672 typename HashFunctions = typename DefaultHash<Value>::Hash,
1673 typename Traits = HashTraits<Value> >
1674 class HeapHashCountedSet : public HashCountedSet<Value, HashFunctions, Traits, HeapAllocator> { };
1676 template<typename T, size_t inlineCapacity = 0>
1677 class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
1681 explicit HeapVector(size_t size) : Vector<T, inlineCapacity, HeapAllocator>(size)
1685 HeapVector(size_t size, const T& val) : Vector<T, inlineCapacity, HeapAllocator>(size, val)
1689 template<size_t otherCapacity>
1690 HeapVector(const HeapVector<T, otherCapacity>& other)
1691 : Vector<T, inlineCapacity, HeapAllocator>(other)
1695 template<typename U>
1696 void append(const U& other)
1698 Vector<T, inlineCapacity, HeapAllocator>::append(other);
1701 template<typename U, size_t otherCapacity>
1702 void appendVector(const HeapVector<U, otherCapacity>& other)
1704 const Vector<U, otherCapacity, HeapAllocator>& otherVector = other;
1705 Vector<T, inlineCapacity, HeapAllocator>::appendVector(otherVector);
1709 template<typename T, size_t inlineCapacity = 0>
1710 class HeapDeque : public Deque<T, inlineCapacity, HeapAllocator> {
1714 explicit HeapDeque(size_t size) : Deque<T, inlineCapacity, HeapAllocator>(size)
1718 HeapDeque(size_t size, const T& val) : Deque<T, inlineCapacity, HeapAllocator>(size, val)
1722 // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1723 HeapDeque<T, 0>& operator=(const HeapDeque& other)
1725 HeapDeque<T> copy(other);
1730 // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1731 inline void swap(HeapDeque& other)
1733 Deque<T, inlineCapacity, HeapAllocator>::swap(other);
1736 template<size_t otherCapacity>
1737 HeapDeque(const HeapDeque<T, otherCapacity>& other)
1738 : Deque<T, inlineCapacity, HeapAllocator>(other)
1742 template<typename U>
1743 void append(const U& other)
1745 Deque<T, inlineCapacity, HeapAllocator>::append(other);
1749 template<typename T, size_t i>
1750 inline void swap(HeapVector<T, i>& a, HeapVector<T, i>& b) { a.swap(b); }
1751 template<typename T, size_t i>
1752 inline void swap(HeapDeque<T, i>& a, HeapDeque<T, i>& b) { a.swap(b); }
1753 template<typename T, typename U, typename V>
1754 inline void swap(HeapHashSet<T, U, V>& a, HeapHashSet<T, U, V>& b) { a.swap(b); }
1755 template<typename T, typename U, typename V, typename W, typename X>
1756 inline void swap(HeapHashMap<T, U, V, W, X>& a, HeapHashMap<T, U, V, W, X>& b) { a.swap(b); }
1757 template<typename T, size_t i, typename U>
1758 inline void swap(HeapListHashSet<T, i, U>& a, HeapListHashSet<T, i, U>& b) { a.swap(b); }
1759 template<typename T, typename U, typename V>
1760 inline void swap(HeapLinkedHashSet<T, U, V>& a, HeapLinkedHashSet<T, U, V>& b) { a.swap(b); }
1761 template<typename T, typename U, typename V>
1762 inline void swap(HeapHashCountedSet<T, U, V>& a, HeapHashCountedSet<T, U, V>& b) { a.swap(b); }
1764 template<typename T>
1765 struct ThreadingTrait<Member<T> > {
1766 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1769 template<typename T>
1770 struct ThreadingTrait<WeakMember<T> > {
1771 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1774 template<typename Key, typename Value, typename T, typename U, typename V>
1775 struct ThreadingTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1776 static const ThreadAffinity Affinity =
1777 (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1778 && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1781 template<typename First, typename Second>
1782 struct ThreadingTrait<WTF::KeyValuePair<First, Second> > {
1783 static const ThreadAffinity Affinity =
1784 (ThreadingTrait<First>::Affinity == MainThreadOnly)
1785 && (ThreadingTrait<Second>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1788 template<typename T, typename U, typename V>
1789 struct ThreadingTrait<HashSet<T, U, V, HeapAllocator> > {
1790 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1794 template<typename T, size_t inlineCapacity>
1795 struct ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1796 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1799 template<typename T, typename Traits>
1800 struct ThreadingTrait<HeapVectorBacking<T, Traits> > {
1801 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1804 template<typename T, size_t inlineCapacity>
1805 struct ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > {
1806 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1809 template<typename T, typename U, typename V>
1810 struct ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > {
1811 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1814 template<typename Table>
1815 struct ThreadingTrait<HeapHashTableBacking<Table> > {
1816 typedef typename Table::KeyType Key;
1817 typedef typename Table::ValueType Value;
1818 static const ThreadAffinity Affinity =
1819 (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1820 && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1823 template<typename T, typename U, typename V, typename W, typename X>
1824 struct ThreadingTrait<HeapHashMap<T, U, V, W, X> > : public ThreadingTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
1826 template<typename T, typename U, typename V>
1827 struct ThreadingTrait<HeapHashSet<T, U, V> > : public ThreadingTrait<HashSet<T, U, V, HeapAllocator> > { };
1829 template<typename T, size_t inlineCapacity>
1830 struct ThreadingTrait<HeapVector<T, inlineCapacity> > : public ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
1832 template<typename T, size_t inlineCapacity>
1833 struct ThreadingTrait<HeapDeque<T, inlineCapacity> > : public ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
1835 template<typename T, typename U, typename V>
1836 struct ThreadingTrait<HeapHashCountedSet<T, U, V> > : public ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
1838 // The standard implementation of GCInfoTrait<T>::get() just returns a static
1839 // from the class T, but we can't do that for HashMap, HashSet, Vector, etc.
1840 // because they are in WTF and know nothing of GCInfos. Instead we have a
1841 // specialization of GCInfoTrait for these four classes here.
1843 template<typename Key, typename Value, typename T, typename U, typename V>
1844 struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1845 static const GCInfo* get()
1847 typedef HashMap<Key, Value, T, U, V, HeapAllocator> TargetType;
1848 static const GCInfo info = {
1849 TraceTrait<TargetType>::trace,
1851 false, // HashMap needs no finalizer.
1852 WTF::IsPolymorphic<TargetType>::value,
1853 #if ENABLE(GC_PROFILING)
1854 TypenameStringTrait<TargetType>::get()
1861 template<typename T, typename U, typename V>
1862 struct GCInfoTrait<HashSet<T, U, V, HeapAllocator> > {
1863 static const GCInfo* get()
1865 typedef HashSet<T, U, V, HeapAllocator> TargetType;
1866 static const GCInfo info = {
1867 TraceTrait<TargetType>::trace,
1869 false, // HashSet needs no finalizer.
1870 WTF::IsPolymorphic<TargetType>::value,
1871 #if ENABLE(GC_PROFILING)
1872 TypenameStringTrait<TargetType>::get()
1879 template<typename T, typename U, typename V>
1880 struct GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > {
1881 static const GCInfo* get()
1883 typedef LinkedHashSet<T, U, V, HeapAllocator> TargetType;
1884 static const GCInfo info = {
1885 TraceTrait<TargetType>::trace,
1886 LinkedHashSet<T, U, V, HeapAllocator>::finalize,
1887 true, // Needs finalization. The anchor needs to unlink itself from the chain.
1888 WTF::IsPolymorphic<TargetType>::value,
1889 #if ENABLE(GC_PROFILING)
1890 TypenameStringTrait<TargetType>::get()
1897 template<typename ValueArg, size_t inlineCapacity, typename U>
1898 struct GCInfoTrait<ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > > {
1899 static const GCInfo* get()
1901 typedef WTF::ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > TargetType;
1902 static const GCInfo info = {
1903 TraceTrait<TargetType>::trace,
1905 false, // ListHashSet needs no finalization though its backing might.
1906 false, // no vtable.
1907 #if ENABLE(GC_PROFILING)
1908 TypenameStringTrait<TargetType>::get()
1915 template<typename T, typename Allocator>
1916 struct GCInfoTrait<WTF::ListHashSetNode<T, Allocator> > {
1917 static const GCInfo* get()
1919 typedef WTF::ListHashSetNode<T, Allocator> TargetType;
1920 static const GCInfo info = {
1921 TraceTrait<TargetType>::trace,
1922 TargetType::finalize,
1923 WTF::HashTraits<T>::needsDestruction, // The node needs destruction if its data does.
1924 false, // no vtable.
1925 #if ENABLE(GC_PROFILING)
1926 TypenameStringTrait<TargetType>::get()
1933 template<typename T>
1934 struct GCInfoTrait<Vector<T, 0, HeapAllocator> > {
1935 static const GCInfo* get()
1937 #if ENABLE(GC_PROFILING)
1938 typedef Vector<T, 0, HeapAllocator> TargetType;
1940 static const GCInfo info = {
1941 TraceTrait<Vector<T, 0, HeapAllocator> >::trace,
1943 false, // Vector needs no finalizer if it has no inline capacity.
1944 WTF::IsPolymorphic<Vector<T, 0, HeapAllocator> >::value,
1945 #if ENABLE(GC_PROFILING)
1946 TypenameStringTrait<TargetType>::get()
1953 template<typename T, size_t inlineCapacity>
1954 struct FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Vector<T, inlineCapacity, HeapAllocator>, true> { };
1956 template<typename T, size_t inlineCapacity>
1957 struct GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1958 static const GCInfo* get()
1960 typedef Vector<T, inlineCapacity, HeapAllocator> TargetType;
1961 static const GCInfo info = {
1962 TraceTrait<TargetType>::trace,
1963 FinalizerTrait<TargetType>::finalize,
1964 // Finalizer is needed to destruct things stored in the inline capacity.
1965 inlineCapacity && VectorTraits<T>::needsDestruction,
1966 WTF::IsPolymorphic<TargetType>::value,
1967 #if ENABLE(GC_PROFILING)
1968 TypenameStringTrait<TargetType>::get()
1975 template<typename T>
1976 struct GCInfoTrait<Deque<T, 0, HeapAllocator> > {
1977 static const GCInfo* get()
1979 typedef Deque<T, 0, HeapAllocator> TargetType;
1980 static const GCInfo info = {
1981 TraceTrait<TargetType>::trace,
1983 false, // Deque needs no finalizer if it has no inline capacity.
1984 WTF::IsPolymorphic<TargetType>::value,
1985 #if ENABLE(GC_PROFILING)
1986 TypenameStringTrait<TargetType>::get()
1991 static const GCInfo info;
1994 template<typename T, typename U, typename V>
1995 struct GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > {
1996 static const GCInfo* get()
1998 typedef HashCountedSet<T, U, V, HeapAllocator> TargetType;
1999 static const GCInfo info = {
2000 TraceTrait<TargetType>::trace,
2002 false, // HashCountedSet is just a HashTable, and needs no finalizer.
2003 WTF::IsPolymorphic<TargetType>::value,
2004 #if ENABLE(GC_PROFILING)
2005 TypenameStringTrait<TargetType>::get()
2010 static const GCInfo info;
2013 template<typename T, size_t inlineCapacity>
2014 struct FinalizerTrait<Deque<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Deque<T, inlineCapacity, HeapAllocator>, true> { };
2016 template<typename T, size_t inlineCapacity>
2017 struct GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > {
2018 static const GCInfo* get()
2020 typedef Deque<T, inlineCapacity, HeapAllocator> TargetType;
2021 static const GCInfo info = {
2022 TraceTrait<TargetType>::trace,
2023 FinalizerTrait<TargetType>::finalize,
2024 // Finalizer is needed to destruct things stored in the inline capacity.
2025 inlineCapacity && VectorTraits<T>::needsDestruction,
2026 WTF::IsPolymorphic<TargetType>::value,
2027 #if ENABLE(GC_PROFILING)
2028 TypenameStringTrait<TargetType>::get()
2033 static const GCInfo info;
2036 template<typename T, typename Traits>
2037 struct GCInfoTrait<HeapVectorBacking<T, Traits> > {
2038 static const GCInfo* get()
2040 typedef HeapVectorBacking<T, Traits> TargetType;
2041 static const GCInfo info = {
2042 TraceTrait<TargetType>::trace,
2043 FinalizerTrait<TargetType>::finalize,
2044 Traits::needsDestruction,
2045 false, // We don't support embedded objects in HeapVectors with vtables.
2046 #if ENABLE(GC_PROFILING)
2047 TypenameStringTrait<TargetType>::get()
2054 template<typename Table>
2055 struct GCInfoTrait<HeapHashTableBacking<Table> > {
2056 static const GCInfo* get()
2058 typedef HeapHashTableBacking<Table> TargetType;
2059 static const GCInfo info = {
2060 TraceTrait<TargetType>::trace,
2061 HeapHashTableBacking<Table>::finalize,
2062 Table::ValueTraits::needsDestruction,
2063 WTF::IsPolymorphic<TargetType>::value,
2064 #if ENABLE(GC_PROFILING)
2065 TypenameStringTrait<TargetType>::get()
2072 } // namespace blink
2076 // Catch-all for types that have a way to trace that don't have special
2077 // handling for weakness in collections. This means that if this type
2078 // contains WeakMember fields, they will simply be zeroed, but the entry
2079 // will not be removed from the collection. This always happens for
2080 // things in vectors, which don't currently support special handling of
2082 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2083 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, T, Traits> {
2084 static bool trace(blink::Visitor* visitor, T& t)
2086 blink::TraceTrait<T>::trace(visitor, &t);
2091 // Catch-all for things that have HashTrait support for tracing with weakness.
2092 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2093 struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, T, Traits> {
2094 static bool trace(blink::Visitor* visitor, T& t)
2096 return Traits::traceInCollection(visitor, t, strongify);
2100 // Vector backing that needs marking. We don't support weak members in vectors.
2101 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2102 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapVectorBacking<T, Traits>, void> {
2103 static bool trace(blink::Visitor* visitor, void* self)
2105 // The allocator can oversize the allocation a little, according to
2106 // the allocation granularity. The extra size is included in the
2107 // payloadSize call below, since there is nowhere to store the
2108 // originally allocated memory. This assert ensures that visiting the
2109 // last bit of memory can't cause trouble.
2110 COMPILE_ASSERT(!ShouldBeTraced<Traits>::value || sizeof(T) > blink::allocationGranularity || Traits::canInitializeWithMemset, HeapOverallocationCanCauseSpuriousVisits);
2112 T* array = reinterpret_cast<T*>(self);
2113 blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2114 // Use the payload size as recorded by the heap to determine how many
2115 // elements to mark.
2116 size_t length = header->payloadSize() / sizeof(T);
2117 for (size_t i = 0; i < length; i++)
2118 blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, WeakPointersActStrong, T, Traits>::trace(visitor, array[i]);
2123 // Almost all hash table backings are visited with this specialization.
2124 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Table>
2125 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapHashTableBacking<Table>, void> {
2126 typedef typename Table::ValueType Value;
2127 typedef typename Table::ValueTraits Traits;
2128 static bool trace(blink::Visitor* visitor, void* self)
2130 Value* array = reinterpret_cast<Value*>(self);
2131 blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2132 size_t length = header->payloadSize() / sizeof(Value);
2133 for (size_t i = 0; i < length; i++) {
2134 if (!HashTableHelper<Value, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i]))
2135 blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, strongify, Value, Traits>::trace(visitor, array[i]);
2141 // This specialization of TraceInCollectionTrait is for the backing of
2142 // HeapListHashSet. This is for the case that we find a reference to the
2143 // backing from the stack. That probably means we have a GC while we are in a
2144 // ListHashSet method since normal API use does not put pointers to the backing
2146 template<ShouldWeakPointersBeMarkedStrongly strongify, typename NodeContents, size_t inlineCapacity, typename T, typename U, typename V, typename W, typename X, typename Y>
2147 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapHashTableBacking<HashTable<ListHashSetNode<NodeContents, blink::HeapListHashSetAllocator<T, inlineCapacity> >*, U, V, W, X, Y, blink::HeapAllocator> >, void> {
2148 typedef ListHashSetNode<NodeContents, blink::HeapListHashSetAllocator<T, inlineCapacity> > Node;
2149 typedef HashTable<Node*, U, V, W, X, Y, blink::HeapAllocator> Table;
2150 static bool trace(blink::Visitor* visitor, void* self)
2152 Node** array = reinterpret_cast<Node**>(self);
2153 blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2154 size_t length = header->payloadSize() / sizeof(Node*);
2155 for (size_t i = 0; i < length; i++) {
2156 if (!HashTableHelper<Node*, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i])) {
2157 traceListHashSetValue(visitor, array[i]->m_value);
2158 // Just mark the node without tracing because we already traced
2159 // the contents, and there is no need to trace the next and
2160 // prev fields since iterating over the hash table backing will
2161 // find the whole chain.
2162 visitor->markNoTracing(array[i]);
2169 // Key value pairs, as used in HashMap. To disambiguate template choice we have
2170 // to have two versions, first the one with no special weak handling, then the
2171 // one with weak handling.
2172 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2173 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, KeyValuePair<Key, Value>, Traits> {
2174 static bool trace(blink::Visitor* visitor, KeyValuePair<Key, Value>& self)
2176 ASSERT(ShouldBeTraced<Traits>::value);
2177 blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, NoWeakHandlingInCollections, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2178 blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, NoWeakHandlingInCollections, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2183 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2184 struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, KeyValuePair<Key, Value>, Traits> {
2185 static bool trace(blink::Visitor* visitor, KeyValuePair<Key, Value>& self)
2187 // This is the core of the ephemeron-like functionality. If there is
2188 // weakness on the key side then we first check whether there are
2189 // dead weak pointers on that side, and if there are we don't mark the
2190 // value side (yet). Conversely if there is weakness on the value side
2191 // we check that first and don't mark the key side yet if we find dead
2193 // Corner case: If there is weakness on both the key and value side,
2194 // and there are also strong pointers on the both sides then we could
2195 // unexpectedly leak. The scenario is that the weak pointer on the key
2196 // side is alive, which causes the strong pointer on the key side to be
2197 // marked. If that then results in the object pointed to by the weak
2198 // pointer on the value side being marked live, then the whole
2199 // key-value entry is leaked. To avoid unexpected leaking, we disallow
2200 // this case, but if you run into this assert, please reach out to Blink
2201 // reviewers, and we may relax it.
2202 const bool keyIsWeak = Traits::KeyTraits::weakHandlingFlag == WeakHandlingInCollections;
2203 const bool valueIsWeak = Traits::ValueTraits::weakHandlingFlag == WeakHandlingInCollections;
2204 const bool keyHasStrongRefs = ShouldBeTraced<typename Traits::KeyTraits>::value;
2205 const bool valueHasStrongRefs = ShouldBeTraced<typename Traits::ValueTraits>::value;
2206 COMPILE_ASSERT(!keyIsWeak || !valueIsWeak || !keyHasStrongRefs || !valueHasStrongRefs, ThisConfigurationWasDisallowedToAvoidUnexpectedLeaks);
2207 if ((valueIsWeak && !keyIsWeak) || (valueIsWeak && keyIsWeak && !valueHasStrongRefs)) {
2208 // Check value first.
2209 bool deadWeakObjectsFoundOnValueSide = blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, Traits::ValueTraits::weakHandlingFlag, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2210 if (deadWeakObjectsFoundOnValueSide)
2212 return blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, Traits::KeyTraits::weakHandlingFlag, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2215 bool deadWeakObjectsFoundOnKeySide = blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, Traits::KeyTraits::weakHandlingFlag, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2216 if (deadWeakObjectsFoundOnKeySide)
2218 return blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, Traits::ValueTraits::weakHandlingFlag, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2222 // Nodes used by LinkedHashSet. Again we need two versions to disambiguate the
2224 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Allocator, typename Traits>
2225 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, LinkedHashSetNode<Value, Allocator>, Traits> {
2226 static bool trace(blink::Visitor* visitor, LinkedHashSetNode<Value, Allocator>& self)
2228 ASSERT(ShouldBeTraced<Traits>::value);
2229 blink::TraceTrait<Value>::trace(visitor, &self.m_value);
2234 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Allocator, typename Traits>
2235 struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, LinkedHashSetNode<Value, Allocator>, Traits> {
2236 static bool trace(blink::Visitor* visitor, LinkedHashSetNode<Value, Allocator>& self)
2238 return TraceInCollectionTrait<WeakHandlingInCollections, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.m_value);
2242 // ListHashSetNode pointers (a ListHashSet is implemented as a hash table of these pointers).
2243 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, size_t inlineCapacity, typename Traits>
2244 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, ListHashSetNode<Value, blink::HeapListHashSetAllocator<Value, inlineCapacity> >*, Traits> {
2245 typedef ListHashSetNode<Value, blink::HeapListHashSetAllocator<Value, inlineCapacity> > Node;
2246 static bool trace(blink::Visitor* visitor, Node* node)
2248 traceListHashSetValue(visitor, node->m_value);
2249 // Just mark the node without tracing because we already traced the
2250 // contents, and there is no need to trace the next and prev fields
2251 // since iterating over the hash table backing will find the whole
2253 visitor->markNoTracing(node);
2262 // CollectionBackingTraceTrait. Do nothing for things in collections that don't
2263 // need tracing, or call TraceInCollectionTrait for those that do.
2265 // Specialization for things that don't need marking and have no weak pointers. We
2266 // do nothing, even if WTF::WeakPointersActStrong.
2267 template<WTF::ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2268 struct CollectionBackingTraceTrait<false, WTF::NoWeakHandlingInCollections, strongify, T, Traits> {
2269 static bool trace(Visitor*, T&) { return false; }
2272 // Specialization for things that either need marking or have weak pointers or
2274 template<bool needsTracing, WTF::WeakHandlingFlag weakHandlingFlag, WTF::ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2275 struct CollectionBackingTraceTrait {
2276 static bool trace(Visitor* visitor, T&t)
2278 Visitor::verifyGarbageCollectedIfMember(reinterpret_cast<T*>(0));
2279 return WTF::TraceInCollectionTrait<weakHandlingFlag, strongify, T, Traits>::trace(visitor, t);
2283 template<typename T> struct WeakHandlingHashTraits : WTF::SimpleClassHashTraits<T> {
2284 // We want to treat the object as a weak object in the sense that it can
2285 // disappear from hash sets and hash maps.
2286 static const WTF::WeakHandlingFlag weakHandlingFlag = WTF::WeakHandlingInCollections;
2287 // Normally whether or not an object needs tracing is inferred
2288 // automatically from the presence of the trace method, but we don't
2289 // necessarily have a trace method, and we may not need one because T
2290 // can perhaps only be allocated inside collections, never as indpendent
2291 // objects. Explicitly mark this as needing tracing and it will be traced
2292 // in collections using the traceInCollection method, which it must have.
2293 template<typename U = void> struct NeedsTracingLazily {
2294 static const bool value = true;
2296 // The traceInCollection method traces differently depending on whether we
2297 // are strongifying the trace operation. We strongify the trace operation
2298 // when there are active iterators on the object. In this case all
2299 // WeakMembers are marked like strong members so that elements do not
2300 // suddenly disappear during iteration. Returns true if weak pointers to
2301 // dead objects were found: In this case any strong pointers were not yet
2302 // traced and the entry should be removed from the collection.
2303 static bool traceInCollection(Visitor* visitor, T& t, WTF::ShouldWeakPointersBeMarkedStrongly strongify)
2305 return t.traceInCollection(visitor, strongify);
2309 template<typename T, typename Traits>
2310 struct TraceTrait<HeapVectorBacking<T, Traits> > {
2311 typedef HeapVectorBacking<T, Traits> Backing;
2312 static void trace(Visitor* visitor, void* self)
2314 COMPILE_ASSERT(!WTF::IsWeak<T>::value, WeDontSupportWeaknessInHeapVectorsOrDeques);
2315 if (WTF::ShouldBeTraced<Traits>::value)
2316 WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::WeakPointersActWeak, HeapVectorBacking<T, Traits>, void>::trace(visitor, self);
2318 static void mark(Visitor* visitor, const Backing* backing)
2320 visitor->mark(backing, &trace);
2322 static void checkGCInfo(Visitor* visitor, const Backing* backing)
2325 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2330 // The trace trait for the heap hashtable backing is used when we find a
2331 // direct pointer to the backing from the conservative stack scanner. This
2332 // normally indicates that there is an ongoing iteration over the table, and so
2333 // we disable weak processing of table entries. When the backing is found
2334 // through the owning hash table we mark differently, in order to do weak
2336 template<typename Table>
2337 struct TraceTrait<HeapHashTableBacking<Table> > {
2338 typedef HeapHashTableBacking<Table> Backing;
2339 typedef typename Table::ValueTraits Traits;
2340 static void trace(Visitor* visitor, void* self)
2342 if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WTF::WeakHandlingInCollections)
2343 WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::WeakPointersActStrong, Backing, void>::trace(visitor, self);
2345 static void mark(Visitor* visitor, const Backing* backing)
2347 if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WTF::WeakHandlingInCollections)
2348 visitor->mark(backing, &trace);
2350 visitor->markNoTracing(backing); // If we know the trace function will do nothing there is no need to call it.
2352 static void checkGCInfo(Visitor* visitor, const Backing* backing)
2355 visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2360 template<typename Table>
2361 void HeapHashTableBacking<Table>::finalize(void* pointer)
2363 typedef typename Table::ValueType Value;
2364 ASSERT(Table::ValueTraits::needsDestruction);
2365 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(pointer);
2366 // Use the payload size as recorded by the heap to determine how many
2367 // elements to finalize.
2368 size_t length = header->payloadSize() / sizeof(Value);
2369 Value* table = reinterpret_cast<Value*>(pointer);
2370 for (unsigned i = 0; i < length; i++) {
2371 if (!Table::isEmptyOrDeletedBucket(table[i]))
2376 template<typename T, typename U, typename V, typename W, typename X>
2377 struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
2378 template<typename T, typename U, typename V>
2379 struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { };
2380 template<typename T, typename U, typename V>
2381 struct GCInfoTrait<HeapLinkedHashSet<T, U, V> > : public GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > { };
2382 template<typename T, size_t inlineCapacity, typename U>
2383 struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U> > : public GCInfoTrait<ListHashSet<T, inlineCapacity, U, HeapListHashSetAllocator<T, inlineCapacity> > > { };
2384 template<typename T, size_t inlineCapacity>
2385 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
2386 template<typename T, size_t inlineCapacity>
2387 struct GCInfoTrait<HeapDeque<T, inlineCapacity> > : public GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
2388 template<typename T, typename U, typename V>
2389 struct GCInfoTrait<HeapHashCountedSet<T, U, V> > : public GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
2391 template<typename T>
2392 struct IfWeakMember;
2394 template<typename T>
2395 struct IfWeakMember {
2396 template<typename U>
2397 static bool isDead(Visitor*, const U&) { return false; }
2400 template<typename T>
2401 struct IfWeakMember<WeakMember<T> > {
2402 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visitor->isAlive(t.get()); }
2405 } // namespace blink