2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "heap/HeapExport.h"
35 #include "heap/ThreadState.h"
36 #include "heap/Visitor.h"
38 #include "wtf/Assertions.h"
39 #include "wtf/OwnPtr.h"
45 // ASAN integration defintions
47 #define USE_ASAN (__has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__))
54 // Marks memory region [addr, addr+size) as unaddressable.
55 // This memory must be previously allocated by the user program. Accessing
56 // addresses in this region from instrumented code is forbidden until
57 // this region is unpoisoned. This function is not guaranteed to poison
58 // the whole region - it may poison only subregion of [addr, addr+size) due
59 // to ASan alignment restrictions.
60 // Method is NOT thread-safe in the sense that no two threads can
61 // (un)poison memory in the same memory region simultaneously.
62 void __asan_poison_memory_region(void const volatile*, size_t);
63 // Marks memory region [addr, addr+size) as addressable.
64 // This memory must be previously allocated by the user program. Accessing
65 // addresses in this region is allowed until this region is poisoned again.
66 // This function may unpoison a superregion of [addr, addr+size) due to
67 // ASan alignment restrictions.
68 // Method is NOT thread-safe in the sense that no two threads can
69 // (un)poison memory in the same memory region simultaneously.
70 void __asan_unpoison_memory_region(void const volatile*, size_t);
72 // User code should use macros instead of functions.
73 #define ASAN_POISON_MEMORY_REGION(addr, size) \
74 __asan_poison_memory_region((addr), (size))
75 #define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
76 __asan_unpoison_memory_region((addr), (size))
77 #define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
78 const size_t asanMagic = 0xabefeed0;
79 const size_t asanDeferMemoryReuseCount = 2;
80 const size_t asanDeferMemoryReuseMask = 0x3;
83 #define ASAN_POISON_MEMORY_REGION(addr, size) \
84 ((void)(addr), (void)(size))
85 #define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
86 ((void)(addr), (void)(size))
87 #define NO_SANITIZE_ADDRESS
90 const size_t blinkPageSizeLog2 = 17;
91 const size_t blinkPageSize = 1 << blinkPageSizeLog2;
92 const size_t blinkPageOffsetMask = blinkPageSize - 1;
93 const size_t blinkPageBaseMask = ~blinkPageOffsetMask;
94 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte
95 // align all allocations even on 32 bit.
96 const size_t allocationGranularity = 8;
97 const size_t allocationMask = allocationGranularity - 1;
98 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularity) - 1)) / (8 * allocationGranularity);
99 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
100 const size_t maxHeapObjectSize = 1 << 27;
102 const size_t markBitMask = 1;
103 const size_t freeListMask = 2;
104 const size_t debugBitMask = 4;
105 const size_t sizeMask = ~7;
106 const uint8_t freelistZapValue = 42;
107 const uint8_t finalizedZapValue = 24;
111 template<typename T> class Persistent;
113 HEAP_EXPORT size_t osPageSize();
115 // Blink heap pages are set up with a guard page before and after the
117 inline size_t blinkPagePayloadSize()
119 return blinkPageSize - 2 * osPageSize();
122 // Blink heap pages are aligned to the Blink heap page size.
123 // Therefore, the start of a Blink page can be obtained by
124 // rounding down to the Blink page size.
125 inline Address roundToBlinkPageStart(Address address)
127 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
130 // Compute the amount of padding we have to add to a header to make
131 // the size of the header plus the padding a multiple of 8 bytes.
132 template<typename Header>
133 inline size_t headerPadding()
135 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity;
138 // Masks an address down to the enclosing blink page base address.
139 inline Address blinkPageAddress(Address address)
141 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
146 // Sanity check for a page header address: the address of the page
147 // header should be OS page size away from being Blink page size
149 inline bool isPageHeaderAddress(Address address)
151 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPageSize());
155 // Mask an address down to the enclosing oilpan heap page base address.
156 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
157 // FIXME: Remove HEAP_EXPORT once we get a proper public interface to our typed heaps.
158 // This is only exported to enable tests in HeapTest.cpp.
159 HEAP_EXPORT inline Address pageHeaderAddress(Address address)
161 return blinkPageAddress(address) + osPageSize();
164 // Common header for heap pages.
167 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo)
171 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
174 // Check if the given address could point to an object in this
175 // heap page. If so, find the start of that object and mark it
176 // using the given Visitor.
178 // Returns true if the object was found and marked, returns false
181 // This is used during conservative stack scanning to
182 // conservatively mark all objects that could be referenced from
184 virtual bool checkAndMarkPointer(Visitor*, Address) = 0;
186 Address address() { return reinterpret_cast<Address>(this); }
187 PageMemory* storage() const { return m_storage; }
188 const GCInfo* gcInfo() { return m_gcInfo; }
191 PageMemory* m_storage;
192 const GCInfo* m_gcInfo;
195 // Large allocations are allocated as separate objects and linked in a
198 // In order to use the same memory allocation routines for everything
199 // allocated in the heap, large objects are considered heap pages
200 // containing only one object.
202 // The layout of a large heap object is as follows:
204 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
205 template<typename Header>
206 class LargeHeapObject : public BaseHeapPage {
208 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo) : BaseHeapPage(storage, gcInfo)
210 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), large_heap_object_header_misaligned);
213 virtual bool checkAndMarkPointer(Visitor*, Address);
215 void link(LargeHeapObject<Header>** previousNext)
217 m_next = *previousNext;
218 *previousNext = this;
221 void unlink(LargeHeapObject<Header>** previousNext)
223 *previousNext = m_next;
226 bool contains(Address object)
228 return (address() <= object) && (object <= (address() + size()));
231 LargeHeapObject<Header>* next()
238 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
241 Address payload() { return heapObjectHeader()->payload(); }
242 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
244 Header* heapObjectHeader()
246 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
247 return reinterpret_cast<Header*>(headerAddress);
252 void getStats(HeapStats&);
258 friend class ThreadHeap<Header>;
260 LargeHeapObject<Header>* m_next;
263 // The BasicObjectHeader is the minimal object header. It is used when
264 // encountering heap space of size allocationGranularity to mark it as
265 // as freelist entry.
266 class BasicObjectHeader {
269 explicit BasicObjectHeader(size_t encodedSize)
270 : m_size(encodedSize) { }
272 static size_t freeListEncodedSize(size_t size) { return size | freeListMask; }
275 bool isFree() { return m_size & freeListMask; }
278 size_t size() const { return m_size & sizeMask; }
284 // Our heap object layout is layered with the HeapObjectHeader closest
285 // to the payload, this can be wrapped in a FinalizedObjectHeader if the
286 // object is on the GeneralHeap and not on a specific TypedHeap.
287 // Finally if the object is a large object (> blinkPageSize/2) then it is
288 // wrapped with a LargeObjectHeader.
290 // Object memory layout:
291 // [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payload
292 // The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHeader
293 // are independently optional.
294 class HeapObjectHeader : public BasicObjectHeader {
297 explicit HeapObjectHeader(size_t encodedSize)
298 : BasicObjectHeader(encodedSize)
305 HeapObjectHeader(size_t encodedSize, const GCInfo*)
306 : BasicObjectHeader(encodedSize)
312 inline void checkHeader() const;
313 inline bool isMarked() const;
316 inline void unmark();
318 inline Address payload();
319 inline size_t payloadSize();
320 inline Address payloadEnd();
322 inline void setDebugMark();
323 inline void clearDebugMark();
324 inline bool hasDebugMark() const;
326 // Zap magic number with a new magic number that means there was once an
327 // object allocated here, but it was freed because nobody marked it during
331 static void finalize(const GCInfo*, Address, size_t);
332 HEAP_EXPORT static HeapObjectHeader* fromPayload(const void*);
334 static const intptr_t magic = 0xc0de247;
335 static const intptr_t zappedMagic = 0xC0DEdead;
336 // The zap value for vtables should be < 4K to ensure it cannot be
337 // used for dispatch.
338 static const intptr_t zappedVTable = 0xd0d;
346 const size_t objectHeaderSize = sizeof(HeapObjectHeader);
348 // Each object on the GeneralHeap needs to carry a pointer to its
349 // own GCInfo structure for tracing and potential finalization.
350 class FinalizedHeapObjectHeader : public HeapObjectHeader {
353 FinalizedHeapObjectHeader(size_t encodedSize, const GCInfo* gcInfo)
354 : HeapObjectHeader(encodedSize)
359 inline Address payload();
360 inline size_t payloadSize();
363 const GCInfo* gcInfo() { return m_gcInfo; }
366 const char* typeMarker() { return m_gcInfo->m_typeMarker; }
369 TraceCallback traceCallback() { return m_gcInfo->m_trace; }
374 inline bool hasFinalizer() { return m_gcInfo->hasFinalizer(); }
376 HEAP_EXPORT static FinalizedHeapObjectHeader* fromPayload(const void*);
379 const GCInfo* m_gcInfo;
382 const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader);
384 class FreeListEntry : public HeapObjectHeader {
387 explicit FreeListEntry(size_t size)
388 : HeapObjectHeader(freeListEncodedSize(size))
391 #if !defined(NDEBUG) && !ASAN
392 // Zap free area with asterisks, aka 0x2a2a2a2a.
393 // For ASAN don't zap since we keep accounting in the freelist entry.
394 for (size_t i = sizeof(*this); i < size; i++)
395 reinterpret_cast<Address>(this)[i] = freelistZapValue;
396 ASSERT(size >= objectHeaderSize);
401 Address address() { return reinterpret_cast<Address>(this); }
404 void unlink(FreeListEntry** prevNext)
411 void link(FreeListEntry** prevNext)
419 bool shouldAddToFreeList()
421 // Init if not already magic.
422 if ((m_asanMagic & ~asanDeferMemoryReuseMask) != asanMagic) {
423 m_asanMagic = asanMagic | asanDeferMemoryReuseCount;
426 // Decrement if count part of asanMagic > 0.
427 if (m_asanMagic & asanDeferMemoryReuseMask)
429 return !(m_asanMagic & asanDeferMemoryReuseMask);
434 FreeListEntry* m_next;
436 unsigned m_asanMagic;
440 // Representation of Blink heap pages.
442 // Pages are specialized on the type of header on the object they
443 // contain. If a heap page only contains a certain type of object all
444 // of the objects will have the same GCInfo pointer and therefore that
445 // pointer can be stored in the HeapPage instead of in the header of
446 // each object. In that case objects have only a HeapObjectHeader and
447 // not a FinalizedHeapObjectHeader saving a word per object.
448 template<typename Header>
449 class HeapPage : public BaseHeapPage {
451 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
453 void link(HeapPage**);
454 static void unlink(HeapPage*, HeapPage**);
458 bool contains(Address addr)
460 Address blinkPageStart = roundToBlinkPageStart(address());
461 return blinkPageStart <= addr && (blinkPageStart + blinkPageSize) > addr;
464 HeapPage* next() { return m_next; }
468 return address() + sizeof(*this) + headerPadding<Header>();
471 static size_t payloadSize()
473 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header>()) & ~allocationMask;
476 Address end() { return payload() + payloadSize(); }
478 void getStats(HeapStats&);
481 void clearObjectStartBitMap();
482 void finalize(Header*);
483 virtual bool checkAndMarkPointer(Visitor*, Address);
484 ThreadHeap<Header>* heap() { return m_heap; }
486 void poisonUnmarkedObjects();
490 void populateObjectStartBitMap();
491 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
492 TraceCallback traceCallback(Header*);
494 HeapPage<Header>* m_next;
495 ThreadHeap<Header>* m_heap;
496 bool m_objectStartBitMapComputed;
497 uint8_t m_objectStartBitMap[reservedForObjectBitMap];
499 friend class ThreadHeap<Header>;
502 // A HeapContainsCache provides a fast way of taking an arbitrary
503 // pointer-sized word, and determining whether it can be interpreted
504 // as a pointer to an area that is managed by the garbage collected
505 // Blink heap. There is a cache of 'pages' that have previously been
506 // determined to be either wholly inside or wholly outside the
507 // heap. The size of these pages must be smaller than the allocation
508 // alignment of the heap pages. We determine on-heap-ness by rounding
509 // down the pointer to the nearest page and looking up the page in the
510 // cache. If there is a miss in the cache we ask the heap to determine
511 // the status of the pointer by iterating over all of the heap. The
512 // result is then cached in the two-way associative page cache.
514 // A HeapContainsCache is both a positive and negative
515 // cache. Therefore, it must be flushed both when new memory is added
516 // and when memory is removed from the Blink heap.
517 class HeapContainsCache {
522 bool contains(Address);
524 // Perform a lookup in the cache.
526 // If lookup returns false the argument address was not found in
527 // the cache and it is unknown if the address is in the Blink
530 // If lookup returns true the argument address was found in the
531 // cache. In that case, the address is in the heap if the base
532 // heap page out parameter is different from 0 and is not in the
533 // heap if the base heap page out parameter is 0.
534 bool lookup(Address, BaseHeapPage**);
536 // Add an entry to the cache. Use a 0 base heap page pointer to
537 // add a negative entry.
538 void addEntry(Address, BaseHeapPage*);
545 , m_containingPage(0)
549 Entry(Address address, BaseHeapPage* containingPage)
551 , m_containingPage(containingPage)
555 BaseHeapPage* containingPage() { return m_containingPage; }
556 Address address() { return m_address; }
560 BaseHeapPage* m_containingPage;
563 static const int numberOfEntriesLog2 = 12;
564 static const int numberOfEntries = 1 << numberOfEntriesLog2;
566 static size_t hash(Address);
568 WTF::OwnPtr<HeapContainsCache::Entry[]> m_entries;
570 friend class ThreadState;
573 // The CallbackStack contains all the visitor callbacks used to trace and mark
574 // objects. A specific CallbackStack instance contains at most bufferSize elements.
575 // If more space is needed a new CallbackStack instance is created and chained
576 // together with the former instance. I.e. a logical CallbackStack can be made of
577 // multiple chained CallbackStack object instances.
578 // There are two logical callback stacks. One containing all the marking callbacks and
579 // one containing the weak pointer callbacks.
580 class CallbackStack {
582 CallbackStack(CallbackStack** first)
583 : m_limit(&(m_buffer[bufferSize]))
584 , m_current(&(m_buffer[0]))
596 void assertIsEmpty();
601 Item(void* object, VisitorCallback callback)
603 , m_callback(callback)
606 void* object() { return m_object; }
607 VisitorCallback callback() { return m_callback; }
611 VisitorCallback m_callback;
614 static void init(CallbackStack** first);
615 static void shutdown(CallbackStack** first);
616 bool popAndInvokeCallback(CallbackStack** first, Visitor*);
618 Item* allocateEntry(CallbackStack** first)
620 if (m_current < m_limit)
622 return (new CallbackStack(first))->allocateEntry(first);
626 static const size_t bufferSize = 8000;
627 Item m_buffer[bufferSize];
630 CallbackStack* m_next;
633 // Non-template super class used to pass a heap around to other classes.
636 virtual ~BaseHeap() { }
638 // Find the page in this thread heap containing the given
639 // address. Returns 0 if the address is not contained in any
640 // page in this thread heap.
641 virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
643 // Find the large object in this thread heap containing the given
644 // address. Returns 0 if the address is not contained in any
645 // page in this thread heap.
646 virtual BaseHeapPage* largeHeapObjectFromAddress(Address) = 0;
648 // Check if the given address could point to an object in this
649 // heap. If so, find the start of that object and mark it using
650 // the given Visitor.
652 // Returns true if the object was found and marked, returns false
655 // This is used during conservative stack scanning to
656 // conservatively mark all objects that could be referenced from
658 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address) = 0;
660 // Sweep this part of the Blink heap. This finalizes dead objects
661 // and builds freelists for all the unused memory.
662 virtual void sweep() = 0;
664 // Forcefully finalize all objects in this part of the Blink heap
665 // (potentially with the exception of one object). This is used
666 // during thread termination to make sure that all objects for the
667 // dying thread are finalized.
668 virtual void finalizeAll(const void* except = 0) = 0;
669 virtual bool inFinalizeAll() = 0;
671 virtual void clearFreeLists() = 0;
672 virtual void clearMarks() = 0;
674 virtual void getScannedStats(HeapStats&) = 0;
677 virtual void makeConsistentForGC() = 0;
678 virtual bool isConsistentForGC() = 0;
680 // Returns a bucket number for inserting a FreeListEntry of a
681 // given size. All FreeListEntries in the given bucket, n, have
683 static int bucketIndexForSize(size_t);
686 // Thread heaps represent a part of the per-thread Blink heap.
688 // Each Blink thread has a number of thread heaps: one general heap
689 // that contains any type of object and a number of heaps specialized
690 // for specific object types (such as Node).
692 // Each thread heap contains the functionality to allocate new objects
693 // (potentially adding new pages to the heap), to find and mark
694 // objects during conservative stack scanning and to sweep the set of
696 template<typename Header>
697 class ThreadHeap : public BaseHeap {
699 ThreadHeap(ThreadState*);
700 virtual ~ThreadHeap();
702 virtual BaseHeapPage* heapPageFromAddress(Address);
703 virtual BaseHeapPage* largeHeapObjectFromAddress(Address);
704 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address);
705 virtual void sweep();
706 virtual void finalizeAll(const void* except = 0);
707 virtual bool inFinalizeAll() { return m_inFinalizeAll; }
708 virtual void clearFreeLists();
709 virtual void clearMarks();
711 virtual void getScannedStats(HeapStats&);
714 virtual void makeConsistentForGC();
715 virtual bool isConsistentForGC();
717 ThreadState* threadState() { return m_threadState; }
718 HeapStats& stats() { return m_threadState->stats(); }
719 HeapContainsCache* heapContainsCache() { return m_threadState->heapContainsCache(); }
721 inline Address allocate(size_t, const GCInfo*);
722 void addToFreeList(Address, size_t);
723 void addPageToPool(HeapPage<Header>*);
724 inline size_t roundedAllocationSize(size_t size)
726 return allocationSizeFromSize(size) - sizeof(Header);
730 // Once pages have been used for one thread heap they will never
731 // be reused for another thread heap. Instead of unmapping, we add
732 // the pages to a pool of pages to be reused later by this thread
733 // heap. This is done as a security feature to avoid type
734 // confusion. The heap is type segregated by having separate
735 // thread heaps for various types of objects. Holding on to pages
736 // ensures that the same virtual address space cannot be used for
737 // objects of another type than the type contained in this thread
739 class PagePoolEntry {
741 PagePoolEntry(PageMemory* storage, PagePoolEntry* next)
746 PageMemory* storage() { return m_storage; }
747 PagePoolEntry* next() { return m_next; }
750 PageMemory* m_storage;
751 PagePoolEntry* m_next;
754 HEAP_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
755 size_t allocationSizeFromSize(size_t);
756 void addPageToHeap(const GCInfo*);
757 HEAP_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
758 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
759 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
760 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
761 void setAllocationPoint(Address point, size_t size)
763 ASSERT(!point || heapPageFromAddress(point));
764 ASSERT(size <= HeapPage<Header>::payloadSize());
765 m_currentAllocationPoint = point;
766 m_remainingAllocationSize = size;
768 void ensureCurrentAllocation(size_t, const GCInfo*);
769 bool allocateFromFreeList(size_t);
771 void setFinalizeAll(bool finalizingAll) { m_inFinalizeAll = finalizingAll; }
772 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
774 void allocatePage(const GCInfo*);
775 PageMemory* takePageFromPool();
776 void clearPagePool();
779 Address m_currentAllocationPoint;
780 size_t m_remainingAllocationSize;
782 HeapPage<Header>* m_firstPage;
783 LargeHeapObject<Header>* m_firstLargeHeapObject;
785 int m_biggestFreeListIndex;
786 bool m_inFinalizeAll;
787 ThreadState* m_threadState;
789 // All FreeListEntries in the nth list have size >= 2^n.
790 FreeListEntry* m_freeLists[blinkPageSizeLog2];
792 // List of pages that have been previously allocated, but are now
794 PagePoolEntry* m_pagePool;
797 class HEAP_EXPORT Heap {
805 static void shutdown();
807 static bool contains(Address);
808 static bool contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
809 static bool contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
811 // Push a trace callback on the marking stack.
812 static void pushTraceCallback(void* containerObject, TraceCallback);
814 // Push a weak pointer callback on the weak callback stack.
815 static void pushWeakPointerCallback(void* containerObject, WeakPointerCallback);
817 // Pop the top of the marking stack and call the callback with the visitor
818 // and the object. Returns false when there is nothing more to do.
819 static bool popAndInvokeTraceCallback(Visitor*);
821 // Pop the top of the weak callback stack and call the callback with the visitor
822 // and the object. Returns false when there is nothing more to do.
823 static bool popAndInvokeWeakPointerCallback(Visitor*);
825 template<typename T> static Address allocate(size_t);
826 template<typename T> static Address reallocate(void* previous, size_t);
828 static void collectGarbage(ThreadState::StackState, GCType = Normal);
830 static void prepareForGC();
832 // Conservatively checks whether an address is a pointer in any of the thread
833 // heaps. If so marks the object pointed to as live.
834 static Address checkAndMarkPointer(Visitor*, Address);
836 // Collect heap stats for all threads attached to the Blink
837 // garbage collector. Should only be called during garbage
838 // collection where threads are known to be at safe points.
839 static void getStats(HeapStats*);
841 static bool isConsistentForGC();
842 static void makeConsistentForGC();
844 static CallbackStack* s_markingStack;
845 static CallbackStack* s_weakCallbackStack;
848 // The NoAllocationScope class is used in debug mode to catch unwanted
849 // allocations. E.g. allocations during GC.
850 template<ThreadAffinity Affinity>
851 class NoAllocationScope {
853 NoAllocationScope() : m_active(true) { enter(); }
855 explicit NoAllocationScope(bool active) : m_active(active) { enter(); }
857 NoAllocationScope(const NoAllocationScope& other) : m_active(other.m_active) { enter(); }
859 NoAllocationScope& operator=(const NoAllocationScope& other)
862 m_active = other.m_active;
867 ~NoAllocationScope() { release(); }
872 ThreadStateFor<Affinity>::state()->leaveNoAllocationScope();
881 ThreadStateFor<Affinity>::state()->enterNoAllocationScope();
887 // Base class for objects allocated in the Blink garbage-collected
890 // Defines a 'new' operator that allocates the memory in the
891 // heap. 'delete' should not be called on objects that inherit from
894 // Instances of GarbageCollected will *NOT* get finalized. Their
895 // destructor will not be called. Therefore, only classes that have
896 // trivial destructors with no semantic meaning (including all their
897 // subclasses) should inherit from GarbageCollected. If there are
898 // non-trival destructors in a given class or any of its subclasses,
899 // GarbageCollectedFinalized should be used which guarantees that the
900 // destructor is called on an instance when the garbage collector
901 // determines that it is no longer reachable.
903 class GarbageCollected {
904 WTF_MAKE_NONCOPYABLE(GarbageCollected);
906 // For now direct allocation of arrays on the heap is not allowed.
907 void* operator new[](size_t size);
908 void operator delete[](void* p);
910 void* operator new(size_t size)
912 return Heap::allocate<T>(size);
915 void operator delete(void* p)
917 ASSERT_NOT_REACHED();
923 ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
925 ~GarbageCollected() { }
928 // Base class for objects allocated in the Blink garbage-collected
931 // Defines a 'new' operator that allocates the memory in the
932 // heap. 'delete' should not be called on objects that inherit from
935 // Instances of GarbageCollectedFinalized will have their destructor
936 // called when the garbage collector determines that the object is no
939 class GarbageCollectedFinalized : public GarbageCollected<T> {
940 WTF_MAKE_NONCOPYABLE(GarbageCollectedFinalized);
943 // Finalize is called when the object is freed from the heap. By
944 // default finalization means calling the destructor on the
945 // object. Finalize can be overridden to support calling the
946 // destructor of a subclass. This is useful for objects without
947 // vtables that require explicit dispatching.
950 static_cast<T*>(this)->~T();
953 GarbageCollectedFinalized() { }
954 ~GarbageCollectedFinalized() { }
956 template<typename U> friend struct HasFinalizer;
957 template<typename U, bool> friend struct FinalizerTraitImpl;
960 // Base class for objects that are in the Blink garbage-collected heap
961 // and are still reference counted.
963 // This class should be used sparingly and only to gradually move
964 // objects from being reference counted to being managed by the blink
965 // garbage collector.
967 // While the current reference counting keeps one of these objects
968 // alive it will have a Persistent handle to itself allocated so we
969 // will not reclaim the memory. When the reference count reaches 0 the
970 // persistent handle will be deleted. When the garbage collector
971 // determines that there are no other references to the object it will
972 // be reclaimed and the destructor of the reclaimed object will be
973 // called at that time.
975 class RefCountedGarbageCollected : public GarbageCollectedFinalized<T> {
976 WTF_MAKE_NONCOPYABLE(RefCountedGarbageCollected);
979 RefCountedGarbageCollected()
982 m_keepAlive = new Persistent<T>(static_cast<T*>(this));
985 // Implement method to increase reference count for use with
988 // In contrast to the normal WTF::RefCounted, the reference count
989 // can reach 0 and increase again. This happens in the following
992 // (1) The reference count becomes 0, but members, persistents, or
993 // on-stack pointers keep references to the object.
995 // (2) The pointer is assigned to a RefPtr again and the reference
998 // In this case, we have to resurrect m_keepAlive.
1001 if (UNLIKELY(!m_refCount)) {
1002 ASSERT(!m_keepAlive);
1003 ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
1004 m_keepAlive = new Persistent<T>(static_cast<T*>(this));
1009 // Implement method to decrease reference count for use with
1012 // In contrast to the normal WTF::RefCounted implementation, the
1013 // object itself is not deleted when the reference count reaches
1014 // 0. Instead, the keep-alive persistent handle is deallocated so
1015 // that the object can be reclaimed when the garbage collector
1016 // determines that there are no other references to the object.
1019 ASSERT(m_refCount > 0);
1020 if (!--m_refCount) {
1027 ~RefCountedGarbageCollected() { }
1031 Persistent<T>* m_keepAlive;
1035 void HeapObjectHeader::checkHeader() const
1037 ASSERT(m_magic == magic);
1040 Address HeapObjectHeader::payload()
1042 return reinterpret_cast<Address>(this) + objectHeaderSize;
1045 size_t HeapObjectHeader::payloadSize()
1047 return size() - objectHeaderSize;
1050 Address HeapObjectHeader::payloadEnd()
1052 return reinterpret_cast<Address>(this) + size();
1056 void HeapObjectHeader::mark()
1059 m_size |= markBitMask;
1062 Address FinalizedHeapObjectHeader::payload()
1064 return reinterpret_cast<Address>(this) + finalizedHeaderSize;
1067 size_t FinalizedHeapObjectHeader::payloadSize()
1069 return size() - finalizedHeaderSize;
1072 template<typename Header>
1073 size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size)
1075 // Check the size before computing the actual allocation size. The
1076 // allocation size calculation can overflow for large sizes and
1077 // the check therefore has to happen before any calculation on the
1079 RELEASE_ASSERT(size < maxHeapObjectSize);
1081 // Add space for header.
1082 size_t allocationSize = size + sizeof(Header);
1083 // Align size with allocation granularity.
1084 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1085 return allocationSize;
1088 template<typename Header>
1089 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1091 size_t allocationSize = allocationSizeFromSize(size);
1092 bool isLargeObject = allocationSize > blinkPageSize / 2;
1094 return allocateLargeObject(allocationSize, gcInfo);
1095 if (m_remainingAllocationSize < allocationSize)
1096 return outOfLineAllocate(size, gcInfo);
1097 Address headerAddress = m_currentAllocationPoint;
1098 m_currentAllocationPoint += allocationSize;
1099 m_remainingAllocationSize -= allocationSize;
1100 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1101 size_t payloadSize = allocationSize - sizeof(Header);
1102 stats().increaseObjectSpace(payloadSize);
1103 Address result = headerAddress + sizeof(*header);
1104 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1105 // Unpoison the memory used for the object (payload).
1106 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize);
1107 memset(result, 0, payloadSize);
1108 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1));
1112 // FIXME: Allocate objects that do not need finalization separately
1113 // and use separate sweeping to not have to check for finalizers.
1114 template<typename T>
1115 Address Heap::allocate(size_t size)
1117 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1118 ASSERT(state->isAllocationAllowed());
1119 BaseHeap* heap = state->heap(HeapTrait<T>::index);
1121 static_cast<typename HeapTrait<T>::HeapType*>(heap)->allocate(size, GCInfoTrait<T>::get());
1125 // FIXME: Allocate objects that do not need finalization separately
1126 // and use separate sweeping to not have to check for finalizers.
1127 template<typename T>
1128 Address Heap::reallocate(void* previous, size_t size)
1131 // If the new size is 0 this is equivalent to either
1132 // free(previous) or malloc(0). In both cases we do
1133 // nothing and return 0.
1136 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1137 ASSERT(state->isAllocationAllowed());
1138 // FIXME: Currently only supports raw allocation on the
1139 // GeneralHeap. Hence we assume the header is a
1140 // FinalizedHeapObjectHeader.
1141 ASSERT(HeapTrait<T>::index == GeneralHeap);
1142 BaseHeap* heap = state->heap(HeapTrait<T>::index);
1143 Address address = static_cast<typename HeapTrait<T>::HeapType*>(heap)->allocate(size, GCInfoTrait<T>::get());
1145 // This is equivalent to malloc(size).
1148 FinalizedHeapObjectHeader* previousHeader = FinalizedHeapObjectHeader::fromPayload(previous);
1149 ASSERT(!previousHeader->hasFinalizer());
1150 ASSERT(previousHeader->gcInfo() == GCInfoTrait<T>::get());
1151 size_t copySize = previousHeader->payloadSize();
1152 if (copySize > size)
1154 memcpy(address, previous, copySize);
1158 class HeapAllocatorQuantizer {
1160 template<typename T>
1161 static size_t quantizedSize(size_t count)
1163 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1164 return typename HeapTrait<T>::HeapType::roundedAllocationSize(count * sizeof(T));
1166 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1169 class HeapAllocator {
1171 typedef HeapAllocatorQuantizer Quantizer;
1172 typedef WebCore::Visitor Visitor;
1173 static const bool isGarbageCollected = true;
1175 template <typename Return, typename Metadata>
1176 static Return backingMalloc(size_t size)
1178 return malloc<Return, Metadata>(size);
1180 template <typename Return, typename Metadata>
1181 static Return zeroedBackingMalloc(size_t size)
1183 return malloc<Return, Metadata>(size);
1185 template <typename Return, typename Metadata>
1186 static Return malloc(size_t size)
1188 return reinterpret_cast<Return>(Heap::allocate<Metadata>(size));
1190 static void backingFree(void* address) { }
1191 static void free(void* address) { }
1192 template<typename T>
1193 static void* newArray(size_t bytes)
1195 ASSERT_NOT_REACHED();
1199 static void deleteArray(void* ptr)
1201 ASSERT_NOT_REACHED();
1204 static void markUsingGCInfo(Visitor* visitor, const void* buffer)
1206 visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->traceCallback());
1209 static void markNoTracing(Visitor* visitor, const void* t)
1214 template<typename T, typename Traits>
1215 static void mark(Visitor* visitor, T& t)
1217 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, false, T, Traits>::mark(visitor, t);
1220 template<typename T>
1221 static bool hasDeadMember(Visitor*, const T&)
1226 template<typename T>
1227 static bool hasDeadMember(Visitor* visitor, const Member<T>& t)
1229 ASSERT(visitor->isAlive(t));
1233 template<typename T>
1234 static bool hasDeadMember(Visitor* visitor, const WeakMember<T>& t)
1236 return !visitor->isAlive(t);
1239 template<typename T, typename U>
1240 static bool hasDeadMember(Visitor* visitor, const WTF::KeyValuePair<T, U>& t)
1242 return hasDeadMember(visitor, t.key) || hasDeadMember(visitor, t.value);
1245 static void registerWeakMembers(Visitor* visitor, const void* object, WeakPointerCallback callback)
1247 visitor->registerWeakMembers(object, callback);
1250 template<typename T>
1255 // The WTF classes use Allocator::VectorBackingHelper in order to find a
1256 // class to template their backing allocation operation on. For off-heap
1257 // allocations the VectorBackingHelper is a dummy class, since the class is
1258 // not used during allocation of backing. For on-heap allocations this
1259 // typedef ensures that the allocation is done with the correct templated
1260 // instantiation of the allocation function. This ensures the correct GC
1261 // map is written when backing allocations take place.
1262 template<typename T, typename Traits>
1263 struct VectorBackingHelper {
1264 typedef HeapVectorBacking<T, Traits> Type;
1267 // Like the VectorBackingHelper, but this type is used for HashSet and
1268 // HashMap, both of which are implemented using HashTable.
1269 template<typename T, typename U, typename V, typename W, typename X>
1270 struct HashTableBackingHelper {
1271 typedef HeapHashTableBacking<T, U, V, W, X> Type;
1274 template<typename T>
1279 template<typename T>
1280 static T& getOther(T* other)
1286 template<typename T, size_t u, typename V> friend class WTF::Vector;
1287 template<typename T, typename U, typename V, typename W> friend class WTF::HashSet;
1288 template<typename T, typename U, typename V, typename W, typename X, typename Y> friend class WTF::HashMap;
1291 // The standard implementation of GCInfoTrait<T>::get() just returns a static
1292 // from the class T, but we can't do that for HashMap, HashSet and Vector
1293 // because they are in WTF and know nothing of GCInfos. Instead we have a
1294 // specialization of GCInfoTrait for these three classes here.
1296 template<typename Key, typename Value, typename T, typename U, typename V>
1297 struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1298 static const GCInfo* get() { return &info; }
1299 static const GCInfo info;
1302 template<typename Key, typename Value, typename T, typename U, typename V>
1303 const GCInfo GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> >::info = {
1305 TraceTrait<HashMap<Key, Value, T, U, V, HeapAllocator> >::trace,
1307 false, // HashMap needs no finalizer.
1310 template<typename T, typename U, typename V>
1311 struct GCInfoTrait<HashSet<T, U, V, HeapAllocator> > {
1312 static const GCInfo* get() { return &info; }
1313 static const GCInfo info;
1316 template<typename T, typename U, typename V>
1317 const GCInfo GCInfoTrait<HashSet<T, U, V, HeapAllocator> >::info = {
1319 TraceTrait<HashSet<T, U, V, HeapAllocator> >::trace,
1321 false, // HashSet needs no finalizer.
1324 template<typename T>
1325 struct GCInfoTrait<Vector<T, 0, HeapAllocator> > {
1326 static const GCInfo* get() { return &info; }
1327 static const GCInfo info;
1330 template<typename T>
1331 const GCInfo GCInfoTrait<Vector<T, 0, HeapAllocator> >::info = {
1333 TraceTrait<Vector<T, 0, HeapAllocator> >::trace,
1335 false, // Vector needs no finalizer if it has no inline capacity.
1338 template<typename T, size_t inlineCapacity>
1339 struct GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1340 static const GCInfo* get() { return &info; }
1341 static const GCInfo info;
1344 template<typename T, size_t inlineCapacity>
1345 struct FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Vector<T, inlineCapacity, HeapAllocator>, true> { };
1347 template<typename T, size_t inlineCapacity>
1348 const GCInfo GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> >::info = {
1350 TraceTrait<Vector<T, inlineCapacity, HeapAllocator> >::trace,
1351 FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> >::finalize,
1352 // Finalizer is needed to destruct things stored in the inline capacity.
1353 inlineCapacity && VectorTraits<T>::needsDestruction,
1356 template<typename T, typename Traits>
1357 struct GCInfoTrait<HeapVectorBacking<T, Traits> > {
1358 static const GCInfo* get() { return &info; }
1359 static const GCInfo info;
1362 template<typename T, typename Traits>
1363 const GCInfo GCInfoTrait<HeapVectorBacking<T, Traits> >::info = {
1365 TraceTrait<HeapVectorBacking<T, Traits> >::trace,
1366 FinalizerTrait<HeapVectorBacking<T, Traits> >::finalize,
1367 Traits::needsDestruction,
1370 template<typename T, typename U, typename V, typename W, typename X>
1371 struct GCInfoTrait<HeapHashTableBacking<T, U, V, W, X> > {
1372 static const GCInfo* get() { return &info; }
1373 static const GCInfo info;
1376 template<typename T, typename U, typename V, typename Traits, typename W>
1377 const GCInfo GCInfoTrait<HeapHashTableBacking<T, U, V, Traits, W> >::info = {
1379 TraceTrait<HeapHashTableBacking<T, U, V, Traits, W> >::trace,
1380 FinalizerTrait<HeapHashTableBacking<T, U, V, Traits, W> >::finalize,
1381 Traits::needsDestruction,
1384 template<bool markWeakMembersStrongly, typename T, typename Traits>
1385 struct BaseVisitVectorBackingTrait {
1386 static void mark(WebCore::Visitor* visitor, void* self)
1388 // The allocator can oversize the allocation a little, according to
1389 // the allocation granularity. The extra size is included in the
1390 // payloadSize call below, since there is nowhere to store the
1391 // originally allocated memory. This assert ensures that visiting the
1392 // last bit of memory can't cause trouble.
1393 COMPILE_ASSERT(!Traits::needsTracing || sizeof(T) > allocationGranularity || Traits::canInitializeWithMemset, HeapOverallocationCanCauseSpuriousVisits);
1395 T* array = reinterpret_cast<T*>(self);
1396 WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
1397 // Use the payload size as recorded by the heap to determine how many
1398 // elements to mark.
1399 size_t length = header->payloadSize() / sizeof(T);
1400 for (size_t i = 0; i < length; i++)
1401 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, markWeakMembersStrongly, T, Traits>::mark(visitor, array[i]);
1405 template<bool markWeakMembersStrongly, typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1406 struct BaseVisitHashTableBackingTrait {
1407 static void mark(WebCore::Visitor* visitor, void* self)
1409 Value* array = reinterpret_cast<Value*>(self);
1410 WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
1411 size_t length = header->payloadSize() / sizeof(Value);
1412 for (size_t i = 0; i < length; i++) {
1413 if (!WTF::HashTableHelper<Value, Extractor, KeyTraits>::isEmptyOrDeletedBucket(array[i]))
1414 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, markWeakMembersStrongly, Value, Traits>::mark(visitor, array[i]);
1419 template<bool markWeakMembersStrongly, typename Key, typename Value, typename Traits>
1420 struct BaseVisitKeyValuePairTrait {
1421 static void mark(WebCore::Visitor* visitor, WTF::KeyValuePair<Key, Value>& self)
1423 ASSERT(Traits::needsTracing || (Traits::isWeak && markWeakMembersStrongly));
1424 CollectionBackingTraceTrait<Traits::KeyTraits::needsTracing, Traits::KeyTraits::isWeak, markWeakMembersStrongly, Key, typename Traits::KeyTraits>::mark(visitor, self.key);
1425 CollectionBackingTraceTrait<Traits::ValueTraits::needsTracing, Traits::ValueTraits::isWeak, markWeakMembersStrongly, Value, typename Traits::ValueTraits>::mark(visitor, self.value);
1429 // FFX - Things that don't need marking and have no weak pointers.
1430 template<bool markWeakMembersStrongly, typename T, typename U>
1431 struct CollectionBackingTraceTrait<false, false, markWeakMembersStrongly, T, U> {
1432 static void mark(Visitor*, const T&) { }
1433 static void mark(Visitor*, const void*) { }
1436 // FTF - Things that don't need marking. They have weak pointers, but we are
1437 // not marking weak pointers in this object in this GC.
1438 template<typename T, typename U>
1439 struct CollectionBackingTraceTrait<false, true, false, T, U> {
1440 static void mark(Visitor*, const T&) { }
1441 static void mark(Visitor*, const void*) { }
1444 // For each type that we understand we have the FTT case and the TXX case. The
1445 // FTT case is where we would not normally need to mark it, but it has weak
1446 // pointers, and we are marking them as strong. The TXX case is the regular
1447 // case for things that need marking.
1450 template<typename T, typename Traits>
1451 struct CollectionBackingTraceTrait<false, true, true, HeapVectorBacking<T, Traits>, void> : public BaseVisitVectorBackingTrait<true, T, Traits> {
1455 template<bool isWeak, bool markWeakMembersStrongly, typename T, typename Traits>
1456 struct CollectionBackingTraceTrait<true, isWeak, markWeakMembersStrongly, HeapVectorBacking<T, Traits>, void> : public BaseVisitVectorBackingTrait<markWeakMembersStrongly, T, Traits> {
1460 template<typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1461 struct CollectionBackingTraceTrait<false, true, true, HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits>, void> : public BaseVisitHashTableBackingTrait<true, Key, Value, Extractor, Traits, KeyTraits> {
1465 template<bool isWeak, bool markWeakMembersStrongly, typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1466 struct CollectionBackingTraceTrait<true, isWeak, markWeakMembersStrongly, HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits>, void> : public BaseVisitHashTableBackingTrait<markWeakMembersStrongly, Key, Value, Extractor, Traits, KeyTraits> {
1469 // FTT (key value pair)
1470 template<typename Key, typename Value, typename Traits>
1471 struct CollectionBackingTraceTrait<false, true, true, WTF::KeyValuePair<Key, Value>, Traits> : public BaseVisitKeyValuePairTrait<true, Key, Value, Traits> {
1474 // TXX (key value pair)
1475 template<bool isWeak, bool markWeakMembersStrongly, typename Key, typename Value, typename Traits>
1476 struct CollectionBackingTraceTrait<true, isWeak, markWeakMembersStrongly, WTF::KeyValuePair<Key, Value>, Traits> : public BaseVisitKeyValuePairTrait<markWeakMembersStrongly, Key, Value, Traits> {
1481 template<bool markWeakMembersStrongly, typename T, typename Traits>
1482 struct CollectionBackingTraceTrait<true, false, markWeakMembersStrongly, Member<T>, Traits> {
1483 static void mark(WebCore::Visitor* visitor, Member<T> self)
1485 visitor->mark(self.get());
1489 // FTT (weak member)
1490 template<typename T, typename Traits>
1491 struct CollectionBackingTraceTrait<false, true, true, WeakMember<T>, Traits> {
1492 static void mark(WebCore::Visitor* visitor, WeakMember<T> self)
1494 // This can mark weak members as if they were strong. The reason we
1495 // need this is that we don't do weak processing unless we reach the
1496 // backing only through the hash table. Reaching it in any other way
1497 // makes it impossible to update the size and deleted slot count of the
1498 // table, and exposes us to weak processing during iteration issues.
1499 visitor->mark(self.get());
1503 // Catch-all for things that have a way to trace. For things that contain weak
1504 // pointers they will generally be visited weakly even if
1505 // markWeakMembersStrongly is true. This is what you want.
1506 template<bool isWeak, bool markWeakMembersStrongly, typename T, typename Traits>
1507 struct CollectionBackingTraceTrait<true, isWeak, markWeakMembersStrongly, T, Traits> {
1508 static void mark(WebCore::Visitor* visitor, T& t)
1510 TraceTrait<T>::trace(visitor, reinterpret_cast<void*>(&t));
1514 template<typename T, typename Traits>
1515 struct TraceTrait<HeapVectorBacking<T, Traits> > {
1516 typedef HeapVectorBacking<T, Traits> Backing;
1517 static void trace(WebCore::Visitor* visitor, void* self)
1519 COMPILE_ASSERT(!Traits::isWeak, WeDontSupportWeaknessInHeapVectors);
1520 if (Traits::needsTracing)
1521 CollectionBackingTraceTrait<Traits::needsTracing, false, false, HeapVectorBacking<T, Traits>, void>::mark(visitor, self);
1523 static void mark(Visitor* visitor, const Backing* backing)
1525 visitor->mark(backing, &trace);
1527 static void checkTypeMarker(Visitor* visitor, const Backing* backing)
1530 visitor->checkTypeMarker(const_cast<Backing*>(backing), getTypeMarker<Backing>());
1535 // The trace trait for the heap hashtable backing is used when we find a
1536 // direct pointer to the backing from the conservative stack scanner. This
1537 // normally indicates that there is an ongoing iteration over the table, and so
1538 // we disable weak processing of table entries. When the backing is found
1539 // through the owning hash table we mark differently, in order to do weak
1541 template<typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1542 struct TraceTrait<HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits> > {
1543 typedef HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits> Backing;
1544 static const bool needsTracing = (Traits::needsTracing || Traits::isWeak);
1545 static void trace(WebCore::Visitor* visitor, void* self)
1548 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, true, HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits>, void>::mark(visitor, self);
1550 static void mark(Visitor* visitor, const Backing* backing)
1553 visitor->mark(backing, &trace);
1555 visitor->mark(backing, 0);
1557 static void checkTypeMarker(Visitor* visitor, const Backing* backing)
1560 visitor->checkTypeMarker(const_cast<Backing*>(backing), getTypeMarker<Backing>());
1565 template<typename T>
1566 struct IfWeakMember;
1568 template<typename T>
1569 struct IfWeakMember {
1570 template<typename U>
1571 static bool isDead(Visitor*, const U&) { return false; }
1574 template<typename T>
1575 struct IfWeakMember<WeakMember<T> > {
1576 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visitor->isAlive(t.get()); }
1579 template<typename K, typename V, typename HashFunctions, typename KeyTraits, typename ValueTraits>
1580 void processWeakOffHeapHashMap(Visitor* visitor, void* self)
1582 typedef HashMap<K, V, WTF::DefaultAllocator, HashFunctions, KeyTraits, ValueTraits> Map;
1583 Map* map = reinterpret_cast<Map*>(self);
1584 // Collect up keys here because we can't modify a hash map while iterating
1586 Vector<K> deletionKeys;
1587 ASSERT(KeyTraits::isWeak || ValueTraits::isWeak);
1588 typedef typename Map::iterator Iterator;
1589 Iterator endIterator(map->end());
1590 for (Iterator it = map->begin(); it != endIterator; ++it) {
1591 if (IfWeakMember<K>::isDead(visitor, it->key))
1592 deletionKeys.append(it->key);
1593 else if (IfWeakMember<V>::isDead(visitor, it->value))
1594 deletionKeys.append(it->key);
1596 size_t size = deletionKeys.size();
1597 if (size == map->size()) {
1601 for (size_t i = 0; i < size; i++)
1602 map->remove(deletionKeys[i]);
1605 template<typename T, typename HashFunctions, typename Traits>
1606 void processWeakOffHeapHashSet(Visitor* visitor, void* self)
1608 typedef HashSet<T, WTF::DefaultAllocator, HashFunctions, Traits> Set;
1609 Set* set = reinterpret_cast<Set*>(self);
1610 ASSERT(Traits::isWeak);
1611 // Collect up keys here because we can't modify a hash set while iterating
1613 Vector<T> deletionKeys;
1614 typedef typename Set::iterator Iterator;
1615 Iterator endIterator(set->end());
1616 for (Iterator it = set->begin(); it != endIterator; ++it) {
1617 if (IfWeakMember<T>::isDead(visitor, *it))
1618 deletionKeys.append(*it);
1620 size_t size = deletionKeys.size();
1621 if (size == set->size()) {
1625 for (size_t i = 0; i < size; i++)
1626 set->remove(deletionKeys[i]);
1630 // Clang does not export the symbols that we have explicitly asked it
1631 // to export. This forces it to export all the methods from ThreadHeap.
1632 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo*);
1633 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*);
1634 extern template class HEAP_EXPORT ThreadHeap<FinalizedHeapObjectHeader>;
1635 extern template class HEAP_EXPORT ThreadHeap<HeapObjectHeader>;