2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "heap/AddressSanitizer.h"
35 #include "heap/HeapExport.h"
36 #include "heap/ThreadState.h"
37 #include "heap/Visitor.h"
39 #include "wtf/Assertions.h"
40 #include "wtf/OwnPtr.h"
41 #include "wtf/PassRefPtr.h"
47 const size_t blinkPageSizeLog2 = 17;
48 const size_t blinkPageSize = 1 << blinkPageSizeLog2;
49 const size_t blinkPageOffsetMask = blinkPageSize - 1;
50 const size_t blinkPageBaseMask = ~blinkPageOffsetMask;
51 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte
52 // align all allocations even on 32 bit.
53 const size_t allocationGranularity = 8;
54 const size_t allocationMask = allocationGranularity - 1;
55 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularity) - 1)) / (8 * allocationGranularity);
56 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
57 const size_t maxHeapObjectSize = 1 << 27;
59 const size_t markBitMask = 1;
60 const size_t freeListMask = 2;
61 const size_t debugBitMask = 4;
62 const size_t sizeMask = ~7;
63 const uint8_t freelistZapValue = 42;
64 const uint8_t finalizedZapValue = 24;
68 template<ThreadAffinity affinity> class ThreadLocalPersistents;
69 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity > > class Persistent;
71 HEAP_EXPORT size_t osPageSize();
73 // Blink heap pages are set up with a guard page before and after the
75 inline size_t blinkPagePayloadSize()
77 return blinkPageSize - 2 * osPageSize();
80 // Blink heap pages are aligned to the Blink heap page size.
81 // Therefore, the start of a Blink page can be obtained by
82 // rounding down to the Blink page size.
83 inline Address roundToBlinkPageStart(Address address)
85 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
88 // Compute the amount of padding we have to add to a header to make
89 // the size of the header plus the padding a multiple of 8 bytes.
90 template<typename Header>
91 inline size_t headerPadding()
93 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity;
96 // Masks an address down to the enclosing blink page base address.
97 inline Address blinkPageAddress(Address address)
99 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
104 // Sanity check for a page header address: the address of the page
105 // header should be OS page size away from being Blink page size
107 inline bool isPageHeaderAddress(Address address)
109 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPageSize());
113 // Mask an address down to the enclosing oilpan heap page base address.
114 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
115 // FIXME: Remove HEAP_EXPORT once we get a proper public interface to our typed heaps.
116 // This is only exported to enable tests in HeapTest.cpp.
117 HEAP_EXPORT inline Address pageHeaderAddress(Address address)
119 return blinkPageAddress(address) + osPageSize();
122 // Common header for heap pages.
125 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo)
129 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
132 // Check if the given address could point to an object in this
133 // heap page. If so, find the start of that object and mark it
134 // using the given Visitor.
136 // Returns true if the object was found and marked, returns false
139 // This is used during conservative stack scanning to
140 // conservatively mark all objects that could be referenced from
142 virtual bool checkAndMarkPointer(Visitor*, Address) = 0;
144 Address address() { return reinterpret_cast<Address>(this); }
145 PageMemory* storage() const { return m_storage; }
146 const GCInfo* gcInfo() { return m_gcInfo; }
149 PageMemory* m_storage;
150 const GCInfo* m_gcInfo;
153 // Large allocations are allocated as separate objects and linked in a
156 // In order to use the same memory allocation routines for everything
157 // allocated in the heap, large objects are considered heap pages
158 // containing only one object.
160 // The layout of a large heap object is as follows:
162 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
163 template<typename Header>
164 class LargeHeapObject : public BaseHeapPage {
166 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo) : BaseHeapPage(storage, gcInfo)
168 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), large_heap_object_header_misaligned);
171 virtual bool checkAndMarkPointer(Visitor*, Address);
173 void link(LargeHeapObject<Header>** previousNext)
175 m_next = *previousNext;
176 *previousNext = this;
179 void unlink(LargeHeapObject<Header>** previousNext)
181 *previousNext = m_next;
184 bool contains(Address object)
186 return (address() <= object) && (object <= (address() + size()));
189 LargeHeapObject<Header>* next()
196 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
199 Address payload() { return heapObjectHeader()->payload(); }
200 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
202 Header* heapObjectHeader()
204 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
205 return reinterpret_cast<Header*>(headerAddress);
210 void getStats(HeapStats&);
216 friend class ThreadHeap<Header>;
218 LargeHeapObject<Header>* m_next;
221 // The BasicObjectHeader is the minimal object header. It is used when
222 // encountering heap space of size allocationGranularity to mark it as
223 // as freelist entry.
224 class BasicObjectHeader {
227 explicit BasicObjectHeader(size_t encodedSize)
228 : m_size(encodedSize) { }
230 static size_t freeListEncodedSize(size_t size) { return size | freeListMask; }
233 bool isFree() { return m_size & freeListMask; }
236 size_t size() const { return m_size & sizeMask; }
242 // Our heap object layout is layered with the HeapObjectHeader closest
243 // to the payload, this can be wrapped in a FinalizedObjectHeader if the
244 // object is on the GeneralHeap and not on a specific TypedHeap.
245 // Finally if the object is a large object (> blinkPageSize/2) then it is
246 // wrapped with a LargeObjectHeader.
248 // Object memory layout:
249 // [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payload
250 // The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHeader
251 // are independently optional.
252 class HeapObjectHeader : public BasicObjectHeader {
255 explicit HeapObjectHeader(size_t encodedSize)
256 : BasicObjectHeader(encodedSize)
263 HeapObjectHeader(size_t encodedSize, const GCInfo*)
264 : BasicObjectHeader(encodedSize)
270 inline void checkHeader() const;
271 inline bool isMarked() const;
274 inline void unmark();
276 inline Address payload();
277 inline size_t payloadSize();
278 inline Address payloadEnd();
280 inline void setDebugMark();
281 inline void clearDebugMark();
282 inline bool hasDebugMark() const;
284 // Zap magic number with a new magic number that means there was once an
285 // object allocated here, but it was freed because nobody marked it during
289 static void finalize(const GCInfo*, Address, size_t);
290 HEAP_EXPORT static HeapObjectHeader* fromPayload(const void*);
292 static const intptr_t magic = 0xc0de247;
293 static const intptr_t zappedMagic = 0xC0DEdead;
294 // The zap value for vtables should be < 4K to ensure it cannot be
295 // used for dispatch.
296 static const intptr_t zappedVTable = 0xd0d;
304 const size_t objectHeaderSize = sizeof(HeapObjectHeader);
306 // Each object on the GeneralHeap needs to carry a pointer to its
307 // own GCInfo structure for tracing and potential finalization.
308 class FinalizedHeapObjectHeader : public HeapObjectHeader {
311 FinalizedHeapObjectHeader(size_t encodedSize, const GCInfo* gcInfo)
312 : HeapObjectHeader(encodedSize)
317 inline Address payload();
318 inline size_t payloadSize();
321 const GCInfo* gcInfo() { return m_gcInfo; }
324 const char* typeMarker() { return m_gcInfo->m_typeMarker; }
327 TraceCallback traceCallback() { return m_gcInfo->m_trace; }
332 inline bool hasFinalizer() { return m_gcInfo->hasFinalizer(); }
334 HEAP_EXPORT static FinalizedHeapObjectHeader* fromPayload(const void*);
337 const GCInfo* m_gcInfo;
340 const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader);
342 class FreeListEntry : public HeapObjectHeader {
345 explicit FreeListEntry(size_t size)
346 : HeapObjectHeader(freeListEncodedSize(size))
349 #if !defined(NDEBUG) && !ASAN
350 // Zap free area with asterisks, aka 0x2a2a2a2a.
351 // For ASAN don't zap since we keep accounting in the freelist entry.
352 for (size_t i = sizeof(*this); i < size; i++)
353 reinterpret_cast<Address>(this)[i] = freelistZapValue;
354 ASSERT(size >= objectHeaderSize);
359 Address address() { return reinterpret_cast<Address>(this); }
362 void unlink(FreeListEntry** prevNext)
369 void link(FreeListEntry** prevNext)
375 #if defined(ADDRESS_SANITIZER)
377 bool shouldAddToFreeList()
379 // Init if not already magic.
380 if ((m_asanMagic & ~asanDeferMemoryReuseMask) != asanMagic) {
381 m_asanMagic = asanMagic | asanDeferMemoryReuseCount;
384 // Decrement if count part of asanMagic > 0.
385 if (m_asanMagic & asanDeferMemoryReuseMask)
387 return !(m_asanMagic & asanDeferMemoryReuseMask);
392 FreeListEntry* m_next;
393 #if defined(ADDRESS_SANITIZER)
394 unsigned m_asanMagic;
398 // Representation of Blink heap pages.
400 // Pages are specialized on the type of header on the object they
401 // contain. If a heap page only contains a certain type of object all
402 // of the objects will have the same GCInfo pointer and therefore that
403 // pointer can be stored in the HeapPage instead of in the header of
404 // each object. In that case objects have only a HeapObjectHeader and
405 // not a FinalizedHeapObjectHeader saving a word per object.
406 template<typename Header>
407 class HeapPage : public BaseHeapPage {
409 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
411 void link(HeapPage**);
412 static void unlink(HeapPage*, HeapPage**);
416 bool contains(Address addr)
418 Address blinkPageStart = roundToBlinkPageStart(address());
419 return blinkPageStart <= addr && (blinkPageStart + blinkPageSize) > addr;
422 HeapPage* next() { return m_next; }
426 return address() + sizeof(*this) + headerPadding<Header>();
429 static size_t payloadSize()
431 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header>()) & ~allocationMask;
434 Address end() { return payload() + payloadSize(); }
436 void getStats(HeapStats&);
439 void clearObjectStartBitMap();
440 void finalize(Header*);
441 virtual bool checkAndMarkPointer(Visitor*, Address);
442 ThreadHeap<Header>* heap() { return m_heap; }
443 #if defined(ADDRESS_SANITIZER)
444 void poisonUnmarkedObjects();
448 void populateObjectStartBitMap();
449 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
450 TraceCallback traceCallback(Header*);
452 HeapPage<Header>* m_next;
453 ThreadHeap<Header>* m_heap;
454 bool m_objectStartBitMapComputed;
455 uint8_t m_objectStartBitMap[reservedForObjectBitMap];
457 friend class ThreadHeap<Header>;
460 // A HeapContainsCache provides a fast way of taking an arbitrary
461 // pointer-sized word, and determining whether it can be interpreted
462 // as a pointer to an area that is managed by the garbage collected
463 // Blink heap. There is a cache of 'pages' that have previously been
464 // determined to be either wholly inside or wholly outside the
465 // heap. The size of these pages must be smaller than the allocation
466 // alignment of the heap pages. We determine on-heap-ness by rounding
467 // down the pointer to the nearest page and looking up the page in the
468 // cache. If there is a miss in the cache we ask the heap to determine
469 // the status of the pointer by iterating over all of the heap. The
470 // result is then cached in the two-way associative page cache.
472 // A HeapContainsCache is both a positive and negative
473 // cache. Therefore, it must be flushed both when new memory is added
474 // and when memory is removed from the Blink heap.
475 class HeapContainsCache {
480 bool contains(Address);
482 // Perform a lookup in the cache.
484 // If lookup returns false the argument address was not found in
485 // the cache and it is unknown if the address is in the Blink
488 // If lookup returns true the argument address was found in the
489 // cache. In that case, the address is in the heap if the base
490 // heap page out parameter is different from 0 and is not in the
491 // heap if the base heap page out parameter is 0.
492 bool lookup(Address, BaseHeapPage**);
494 // Add an entry to the cache. Use a 0 base heap page pointer to
495 // add a negative entry.
496 void addEntry(Address, BaseHeapPage*);
503 , m_containingPage(0)
507 Entry(Address address, BaseHeapPage* containingPage)
509 , m_containingPage(containingPage)
513 BaseHeapPage* containingPage() { return m_containingPage; }
514 Address address() { return m_address; }
518 BaseHeapPage* m_containingPage;
521 static const int numberOfEntriesLog2 = 12;
522 static const int numberOfEntries = 1 << numberOfEntriesLog2;
524 static size_t hash(Address);
526 WTF::OwnPtr<HeapContainsCache::Entry[]> m_entries;
528 friend class ThreadState;
531 // The CallbackStack contains all the visitor callbacks used to trace and mark
532 // objects. A specific CallbackStack instance contains at most bufferSize elements.
533 // If more space is needed a new CallbackStack instance is created and chained
534 // together with the former instance. I.e. a logical CallbackStack can be made of
535 // multiple chained CallbackStack object instances.
536 // There are two logical callback stacks. One containing all the marking callbacks and
537 // one containing the weak pointer callbacks.
538 class CallbackStack {
540 CallbackStack(CallbackStack** first)
541 : m_limit(&(m_buffer[bufferSize]))
542 , m_current(&(m_buffer[0]))
554 void assertIsEmpty();
559 Item(void* object, VisitorCallback callback)
561 , m_callback(callback)
564 void* object() { return m_object; }
565 VisitorCallback callback() { return m_callback; }
569 VisitorCallback m_callback;
572 static void init(CallbackStack** first);
573 static void shutdown(CallbackStack** first);
574 bool popAndInvokeCallback(CallbackStack** first, Visitor*);
576 Item* allocateEntry(CallbackStack** first)
578 if (m_current < m_limit)
580 return (new CallbackStack(first))->allocateEntry(first);
584 static const size_t bufferSize = 8000;
585 Item m_buffer[bufferSize];
588 CallbackStack* m_next;
591 // Non-template super class used to pass a heap around to other classes.
594 virtual ~BaseHeap() { }
596 // Find the page in this thread heap containing the given
597 // address. Returns 0 if the address is not contained in any
598 // page in this thread heap.
599 virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
601 // Find the large object in this thread heap containing the given
602 // address. Returns 0 if the address is not contained in any
603 // page in this thread heap.
604 virtual BaseHeapPage* largeHeapObjectFromAddress(Address) = 0;
606 // Check if the given address could point to an object in this
607 // heap. If so, find the start of that object and mark it using
608 // the given Visitor.
610 // Returns true if the object was found and marked, returns false
613 // This is used during conservative stack scanning to
614 // conservatively mark all objects that could be referenced from
616 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address) = 0;
618 // Sweep this part of the Blink heap. This finalizes dead objects
619 // and builds freelists for all the unused memory.
620 virtual void sweep() = 0;
622 // Forcefully finalize all objects in this part of the Blink heap
623 // (potentially with the exception of one object). This is used
624 // during thread termination to make sure that all objects for the
625 // dying thread are finalized.
626 virtual void assertEmpty() = 0;
628 virtual void clearFreeLists() = 0;
629 virtual void clearMarks() = 0;
631 virtual void getScannedStats(HeapStats&) = 0;
634 virtual void makeConsistentForGC() = 0;
635 virtual bool isConsistentForGC() = 0;
637 // Returns a bucket number for inserting a FreeListEntry of a
638 // given size. All FreeListEntries in the given bucket, n, have
640 static int bucketIndexForSize(size_t);
643 // Thread heaps represent a part of the per-thread Blink heap.
645 // Each Blink thread has a number of thread heaps: one general heap
646 // that contains any type of object and a number of heaps specialized
647 // for specific object types (such as Node).
649 // Each thread heap contains the functionality to allocate new objects
650 // (potentially adding new pages to the heap), to find and mark
651 // objects during conservative stack scanning and to sweep the set of
653 template<typename Header>
654 class ThreadHeap : public BaseHeap {
656 ThreadHeap(ThreadState*);
657 virtual ~ThreadHeap();
659 virtual BaseHeapPage* heapPageFromAddress(Address);
660 virtual BaseHeapPage* largeHeapObjectFromAddress(Address);
661 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address);
662 virtual void sweep();
663 virtual void assertEmpty();
664 virtual void clearFreeLists();
665 virtual void clearMarks();
667 virtual void getScannedStats(HeapStats&);
670 virtual void makeConsistentForGC();
671 virtual bool isConsistentForGC();
673 ThreadState* threadState() { return m_threadState; }
674 HeapStats& stats() { return m_threadState->stats(); }
675 HeapContainsCache* heapContainsCache() { return m_threadState->heapContainsCache(); }
677 inline Address allocate(size_t, const GCInfo*);
678 void addToFreeList(Address, size_t);
679 void addPageToPool(HeapPage<Header>*);
680 inline static size_t roundedAllocationSize(size_t size)
682 return allocationSizeFromSize(size) - sizeof(Header);
686 // Once pages have been used for one thread heap they will never
687 // be reused for another thread heap. Instead of unmapping, we add
688 // the pages to a pool of pages to be reused later by this thread
689 // heap. This is done as a security feature to avoid type
690 // confusion. The heap is type segregated by having separate
691 // thread heaps for various types of objects. Holding on to pages
692 // ensures that the same virtual address space cannot be used for
693 // objects of another type than the type contained in this thread
695 class PagePoolEntry {
697 PagePoolEntry(PageMemory* storage, PagePoolEntry* next)
702 PageMemory* storage() { return m_storage; }
703 PagePoolEntry* next() { return m_next; }
706 PageMemory* m_storage;
707 PagePoolEntry* m_next;
710 HEAP_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
711 static size_t allocationSizeFromSize(size_t);
712 void addPageToHeap(const GCInfo*);
713 HEAP_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
714 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
715 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
716 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
717 void setAllocationPoint(Address point, size_t size)
719 ASSERT(!point || heapPageFromAddress(point));
720 ASSERT(size <= HeapPage<Header>::payloadSize());
721 m_currentAllocationPoint = point;
722 m_remainingAllocationSize = size;
724 void ensureCurrentAllocation(size_t, const GCInfo*);
725 bool allocateFromFreeList(size_t);
727 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
729 void allocatePage(const GCInfo*);
730 PageMemory* takePageFromPool();
731 void clearPagePool();
734 Address m_currentAllocationPoint;
735 size_t m_remainingAllocationSize;
737 HeapPage<Header>* m_firstPage;
738 LargeHeapObject<Header>* m_firstLargeHeapObject;
740 int m_biggestFreeListIndex;
741 ThreadState* m_threadState;
743 // All FreeListEntries in the nth list have size >= 2^n.
744 FreeListEntry* m_freeLists[blinkPageSizeLog2];
746 // List of pages that have been previously allocated, but are now
748 PagePoolEntry* m_pagePool;
751 class HEAP_EXPORT Heap {
759 static void shutdown();
761 static bool contains(Address);
762 static bool contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
763 static bool contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
765 // Push a trace callback on the marking stack.
766 static void pushTraceCallback(void* containerObject, TraceCallback);
768 // Push a weak pointer callback on the weak callback stack.
769 static void pushWeakPointerCallback(void* containerObject, WeakPointerCallback);
771 // Pop the top of the marking stack and call the callback with the visitor
772 // and the object. Returns false when there is nothing more to do.
773 static bool popAndInvokeTraceCallback(Visitor*);
775 // Pop the top of the weak callback stack and call the callback with the visitor
776 // and the object. Returns false when there is nothing more to do.
777 static bool popAndInvokeWeakPointerCallback(Visitor*);
779 template<typename T> static Address allocate(size_t);
780 template<typename T> static Address reallocate(void* previous, size_t);
782 static void collectGarbage(ThreadState::StackState, GCType = Normal);
784 static void prepareForGC();
786 // Conservatively checks whether an address is a pointer in any of the thread
787 // heaps. If so marks the object pointed to as live.
788 static Address checkAndMarkPointer(Visitor*, Address);
790 // Collect heap stats for all threads attached to the Blink
791 // garbage collector. Should only be called during garbage
792 // collection where threads are known to be at safe points.
793 static void getStats(HeapStats*);
795 static bool isConsistentForGC();
796 static void makeConsistentForGC();
798 static CallbackStack* s_markingStack;
799 static CallbackStack* s_weakCallbackStack;
802 // The NoAllocationScope class is used in debug mode to catch unwanted
803 // allocations. E.g. allocations during GC.
804 template<ThreadAffinity Affinity>
805 class NoAllocationScope {
807 NoAllocationScope() : m_active(true) { enter(); }
809 explicit NoAllocationScope(bool active) : m_active(active) { enter(); }
811 NoAllocationScope(const NoAllocationScope& other) : m_active(other.m_active) { enter(); }
813 NoAllocationScope& operator=(const NoAllocationScope& other)
816 m_active = other.m_active;
821 ~NoAllocationScope() { release(); }
826 ThreadStateFor<Affinity>::state()->leaveNoAllocationScope();
835 ThreadStateFor<Affinity>::state()->enterNoAllocationScope();
841 // Base class for objects allocated in the Blink garbage-collected
844 // Defines a 'new' operator that allocates the memory in the
845 // heap. 'delete' should not be called on objects that inherit from
848 // Instances of GarbageCollected will *NOT* get finalized. Their
849 // destructor will not be called. Therefore, only classes that have
850 // trivial destructors with no semantic meaning (including all their
851 // subclasses) should inherit from GarbageCollected. If there are
852 // non-trival destructors in a given class or any of its subclasses,
853 // GarbageCollectedFinalized should be used which guarantees that the
854 // destructor is called on an instance when the garbage collector
855 // determines that it is no longer reachable.
857 class GarbageCollected {
858 WTF_MAKE_NONCOPYABLE(GarbageCollected);
860 // For now direct allocation of arrays on the heap is not allowed.
861 void* operator new[](size_t size);
862 void operator delete[](void* p);
864 void* operator new(size_t size)
866 return Heap::allocate<T>(size);
869 void operator delete(void* p)
871 ASSERT_NOT_REACHED();
877 ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
879 ~GarbageCollected() { }
882 // Base class for objects allocated in the Blink garbage-collected
885 // Defines a 'new' operator that allocates the memory in the
886 // heap. 'delete' should not be called on objects that inherit from
889 // Instances of GarbageCollectedFinalized will have their destructor
890 // called when the garbage collector determines that the object is no
893 class GarbageCollectedFinalized : public GarbageCollected<T> {
894 WTF_MAKE_NONCOPYABLE(GarbageCollectedFinalized);
897 // Finalize is called when the object is freed from the heap. By
898 // default finalization means calling the destructor on the
899 // object. Finalize can be overridden to support calling the
900 // destructor of a subclass. This is useful for objects without
901 // vtables that require explicit dispatching.
904 static_cast<T*>(this)->~T();
907 GarbageCollectedFinalized() { }
908 ~GarbageCollectedFinalized() { }
910 template<typename U> friend struct HasFinalizer;
911 template<typename U, bool> friend struct FinalizerTraitImpl;
914 // Base class for objects that are in the Blink garbage-collected heap
915 // and are still reference counted.
917 // This class should be used sparingly and only to gradually move
918 // objects from being reference counted to being managed by the blink
919 // garbage collector.
921 // While the current reference counting keeps one of these objects
922 // alive it will have a Persistent handle to itself allocated so we
923 // will not reclaim the memory. When the reference count reaches 0 the
924 // persistent handle will be deleted. When the garbage collector
925 // determines that there are no other references to the object it will
926 // be reclaimed and the destructor of the reclaimed object will be
927 // called at that time.
929 class RefCountedGarbageCollected : public GarbageCollectedFinalized<T> {
930 WTF_MAKE_NONCOPYABLE(RefCountedGarbageCollected);
933 RefCountedGarbageCollected()
936 m_keepAlive = new Persistent<T>(static_cast<T*>(this));
939 // Implement method to increase reference count for use with
942 // In contrast to the normal WTF::RefCounted, the reference count
943 // can reach 0 and increase again. This happens in the following
946 // (1) The reference count becomes 0, but members, persistents, or
947 // on-stack pointers keep references to the object.
949 // (2) The pointer is assigned to a RefPtr again and the reference
952 // In this case, we have to resurrect m_keepAlive.
955 if (UNLIKELY(!m_refCount)) {
956 ASSERT(!m_keepAlive);
957 ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
958 m_keepAlive = new Persistent<T>(static_cast<T*>(this));
963 // Implement method to decrease reference count for use with
966 // In contrast to the normal WTF::RefCounted implementation, the
967 // object itself is not deleted when the reference count reaches
968 // 0. Instead, the keep-alive persistent handle is deallocated so
969 // that the object can be reclaimed when the garbage collector
970 // determines that there are no other references to the object.
973 ASSERT(m_refCount > 0);
982 return m_refCount == 1;
986 ~RefCountedGarbageCollected() { }
990 Persistent<T>* m_keepAlive;
994 T* adoptRefCountedGarbageCollected(T* ptr)
996 ASSERT(ptr->hasOneRef());
1002 #if COMPILER_SUPPORTS(CXX_DELETED_FUNCTIONS)
1003 #define DISALLOW_ALLOCATION() \
1005 void* operator new(size_t) = delete;
1007 #define DISALLOW_ALLOCATION() \
1009 void* operator new(size_t);
1012 #define ALLOW_ONLY_INLINE_ALLOCATION() \
1014 void* operator new(size_t, NotNullTag, void* location) { return location; } \
1015 void* operator new(size_t, void* location) { return location; } \
1016 DISALLOW_ALLOCATION()
1019 void HeapObjectHeader::checkHeader() const
1021 ASSERT(m_magic == magic);
1024 Address HeapObjectHeader::payload()
1026 return reinterpret_cast<Address>(this) + objectHeaderSize;
1029 size_t HeapObjectHeader::payloadSize()
1031 return size() - objectHeaderSize;
1034 Address HeapObjectHeader::payloadEnd()
1036 return reinterpret_cast<Address>(this) + size();
1040 void HeapObjectHeader::mark()
1043 m_size |= markBitMask;
1046 Address FinalizedHeapObjectHeader::payload()
1048 return reinterpret_cast<Address>(this) + finalizedHeaderSize;
1051 size_t FinalizedHeapObjectHeader::payloadSize()
1053 return size() - finalizedHeaderSize;
1056 template<typename Header>
1057 size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size)
1059 // Check the size before computing the actual allocation size. The
1060 // allocation size calculation can overflow for large sizes and
1061 // the check therefore has to happen before any calculation on the
1063 RELEASE_ASSERT(size < maxHeapObjectSize);
1065 // Add space for header.
1066 size_t allocationSize = size + sizeof(Header);
1067 // Align size with allocation granularity.
1068 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1069 return allocationSize;
1072 template<typename Header>
1073 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1075 size_t allocationSize = allocationSizeFromSize(size);
1076 bool isLargeObject = allocationSize > blinkPageSize / 2;
1078 return allocateLargeObject(allocationSize, gcInfo);
1079 if (m_remainingAllocationSize < allocationSize)
1080 return outOfLineAllocate(size, gcInfo);
1081 Address headerAddress = m_currentAllocationPoint;
1082 m_currentAllocationPoint += allocationSize;
1083 m_remainingAllocationSize -= allocationSize;
1084 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1085 size_t payloadSize = allocationSize - sizeof(Header);
1086 stats().increaseObjectSpace(payloadSize);
1087 Address result = headerAddress + sizeof(*header);
1088 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1089 // Unpoison the memory used for the object (payload).
1090 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize);
1091 memset(result, 0, payloadSize);
1092 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1));
1096 // FIXME: Allocate objects that do not need finalization separately
1097 // and use separate sweeping to not have to check for finalizers.
1098 template<typename T>
1099 Address Heap::allocate(size_t size)
1101 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1102 ASSERT(state->isAllocationAllowed());
1103 BaseHeap* heap = state->heap(HeapTrait<T>::index);
1105 static_cast<typename HeapTrait<T>::HeapType*>(heap)->allocate(size, GCInfoTrait<T>::get());
1109 // FIXME: Allocate objects that do not need finalization separately
1110 // and use separate sweeping to not have to check for finalizers.
1111 template<typename T>
1112 Address Heap::reallocate(void* previous, size_t size)
1115 // If the new size is 0 this is equivalent to either
1116 // free(previous) or malloc(0). In both cases we do
1117 // nothing and return 0.
1120 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1121 ASSERT(state->isAllocationAllowed());
1122 // FIXME: Currently only supports raw allocation on the
1123 // GeneralHeap. Hence we assume the header is a
1124 // FinalizedHeapObjectHeader.
1125 ASSERT(HeapTrait<T>::index == GeneralHeap);
1126 BaseHeap* heap = state->heap(HeapTrait<T>::index);
1127 Address address = static_cast<typename HeapTrait<T>::HeapType*>(heap)->allocate(size, GCInfoTrait<T>::get());
1129 // This is equivalent to malloc(size).
1132 FinalizedHeapObjectHeader* previousHeader = FinalizedHeapObjectHeader::fromPayload(previous);
1133 ASSERT(!previousHeader->hasFinalizer());
1134 ASSERT(previousHeader->gcInfo() == GCInfoTrait<T>::get());
1135 size_t copySize = previousHeader->payloadSize();
1136 if (copySize > size)
1138 memcpy(address, previous, copySize);
1142 class HeapAllocatorQuantizer {
1144 template<typename T>
1145 static size_t quantizedSize(size_t count)
1147 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1148 return HeapTrait<T>::HeapType::roundedAllocationSize(count * sizeof(T));
1150 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1153 class HeapAllocator {
1155 typedef HeapAllocatorQuantizer Quantizer;
1156 typedef WebCore::Visitor Visitor;
1157 static const bool isGarbageCollected = true;
1159 template <typename Return, typename Metadata>
1160 static Return backingMalloc(size_t size)
1162 return malloc<Return, Metadata>(size);
1164 template <typename Return, typename Metadata>
1165 static Return zeroedBackingMalloc(size_t size)
1167 return malloc<Return, Metadata>(size);
1169 template <typename Return, typename Metadata>
1170 static Return malloc(size_t size)
1172 return reinterpret_cast<Return>(Heap::allocate<Metadata>(size));
1174 static void backingFree(void* address) { }
1175 static void free(void* address) { }
1176 template<typename T>
1177 static void* newArray(size_t bytes)
1179 ASSERT_NOT_REACHED();
1183 static void deleteArray(void* ptr)
1185 ASSERT_NOT_REACHED();
1188 static void markUsingGCInfo(Visitor* visitor, const void* buffer)
1190 visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->traceCallback());
1193 static void markNoTracing(Visitor* visitor, const void* t)
1198 template<typename T, typename Traits>
1199 static void trace(Visitor* visitor, T& t)
1201 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, false, T, Traits>::mark(visitor, t);
1204 template<typename T>
1205 static bool hasDeadMember(Visitor*, const T&)
1210 template<typename T>
1211 static bool hasDeadMember(Visitor* visitor, const Member<T>& t)
1213 ASSERT(visitor->isAlive(t));
1217 template<typename T>
1218 static bool hasDeadMember(Visitor* visitor, const WeakMember<T>& t)
1220 return !visitor->isAlive(t);
1223 template<typename T, typename U>
1224 static bool hasDeadMember(Visitor* visitor, const WTF::KeyValuePair<T, U>& t)
1226 return hasDeadMember(visitor, t.key) || hasDeadMember(visitor, t.value);
1229 static void registerWeakMembers(Visitor* visitor, const void* object, WeakPointerCallback callback)
1231 visitor->registerWeakMembers(object, callback);
1234 template<typename T>
1239 // The WTF classes use Allocator::VectorBackingHelper in order to find a
1240 // class to template their backing allocation operation on. For off-heap
1241 // allocations the VectorBackingHelper is a dummy class, since the class is
1242 // not used during allocation of backing. For on-heap allocations this
1243 // typedef ensures that the allocation is done with the correct templated
1244 // instantiation of the allocation function. This ensures the correct GC
1245 // map is written when backing allocations take place.
1246 template<typename T, typename Traits>
1247 struct VectorBackingHelper {
1248 typedef HeapVectorBacking<T, Traits> Type;
1251 // Like the VectorBackingHelper, but this type is used for HashSet and
1252 // HashMap, both of which are implemented using HashTable.
1253 template<typename T, typename U, typename V, typename W, typename X>
1254 struct HashTableBackingHelper {
1255 typedef HeapHashTableBacking<T, U, V, W, X> Type;
1258 template<typename T>
1263 template<typename T>
1264 static T& getOther(T* other)
1270 template<typename T, size_t u, typename V> friend class WTF::Vector;
1271 template<typename T, typename U, typename V, typename W> friend class WTF::HashSet;
1272 template<typename T, typename U, typename V, typename W, typename X, typename Y> friend class WTF::HashMap;
1275 // FIXME: These should just be template aliases:
1277 // template<typename T, size_t inlineCapacity = 0>
1278 // using HeapVector = Vector<T, inlineCapacity, HeapAllocator>;
1280 // as soon as all the compilers we care about support that.
1281 // MSVC supports it only in MSVC 2013.
1285 typename HashArg = typename DefaultHash<KeyArg>::Hash,
1286 typename KeyTraitsArg = HashTraits<KeyArg>,
1287 typename MappedTraitsArg = HashTraits<MappedArg> >
1288 class HeapHashMap : public HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg, HeapAllocator> { };
1292 typename HashArg = typename DefaultHash<ValueArg>::Hash,
1293 typename TraitsArg = HashTraits<ValueArg> >
1294 class HeapHashSet : public HashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1296 template<typename T, size_t inlineCapacity = 0>
1297 class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
1301 explicit HeapVector(size_t size) : Vector<T, inlineCapacity, HeapAllocator>(size)
1305 template<size_t otherCapacity>
1306 HeapVector(const HeapVector<T, otherCapacity>& other)
1307 : Vector<T, inlineCapacity, HeapAllocator>(other)
1311 template<typename U>
1312 void append(const U& other)
1314 Vector<T, inlineCapacity, HeapAllocator>::append(other);
1317 template<typename U, size_t otherCapacity>
1318 void append(const HeapVector<U, otherCapacity>& other)
1320 const Vector<U, otherCapacity, HeapAllocator>& otherVector = other;
1321 Vector<T, inlineCapacity, HeapAllocator>::append(otherVector);
1325 template<typename T>
1326 struct ThreadingTrait<Member<T> > {
1327 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1330 template<typename T>
1331 struct ThreadingTrait<WeakMember<T> > {
1332 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1335 template<typename Key, typename Value, typename T, typename U, typename V>
1336 struct ThreadingTrait<HashMap<Key, Value, HeapAllocator, T, U, V> > {
1337 static const ThreadAffinity Affinity =
1338 (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1339 && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1342 template<typename T, typename U, typename V>
1343 struct ThreadingTrait<HashSet<T, HeapAllocator, U, V> > {
1344 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1348 template<typename T, size_t inlineCapacity>
1349 struct ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1350 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1353 template<typename T, typename Traits>
1354 struct ThreadingTrait<HeapVectorBacking<T, Traits> > {
1355 static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1358 template<typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1359 struct ThreadingTrait<HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits> > {
1360 static const ThreadAffinity Affinity =
1361 (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1362 && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1365 template<typename Key, typename Value>
1366 struct ThreadingTrait<HeapHashMap<Key, Value> > : public ThreadingTrait<HashMap<Key, Value, HeapAllocator> > { };
1367 template<typename Value>
1368 struct ThreadingTrait<HeapHashSet<Value> > : public ThreadingTrait<HashSet<Value, HeapAllocator> > { };
1369 template<typename T, size_t inlineCapacity>
1370 struct ThreadingTrait<HeapVector<T, inlineCapacity> > : public ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
1372 // The standard implementation of GCInfoTrait<T>::get() just returns a static
1373 // from the class T, but we can't do that for HashMap, HashSet and Vector
1374 // because they are in WTF and know nothing of GCInfos. Instead we have a
1375 // specialization of GCInfoTrait for these three classes here.
1377 template<typename Key, typename Value, typename T, typename U, typename V>
1378 struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1379 static const GCInfo* get() { return &info; }
1380 static const GCInfo info;
1383 template<typename Key, typename Value, typename T, typename U, typename V>
1384 const GCInfo GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> >::info = {
1386 TraceTrait<HashMap<Key, Value, T, U, V, HeapAllocator> >::trace,
1388 false, // HashMap needs no finalizer.
1391 template<typename T, typename U, typename V>
1392 struct GCInfoTrait<HashSet<T, U, V, HeapAllocator> > {
1393 static const GCInfo* get() { return &info; }
1394 static const GCInfo info;
1397 template<typename T, typename U, typename V>
1398 const GCInfo GCInfoTrait<HashSet<T, U, V, HeapAllocator> >::info = {
1400 TraceTrait<HashSet<T, U, V, HeapAllocator> >::trace,
1402 false, // HashSet needs no finalizer.
1405 template<typename T>
1406 struct GCInfoTrait<Vector<T, 0, HeapAllocator> > {
1407 static const GCInfo* get() { return &info; }
1408 static const GCInfo info;
1411 template<typename T>
1412 const GCInfo GCInfoTrait<Vector<T, 0, HeapAllocator> >::info = {
1414 TraceTrait<Vector<T, 0, HeapAllocator> >::trace,
1416 false, // Vector needs no finalizer if it has no inline capacity.
1419 template<typename T, size_t inlineCapacity>
1420 struct GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1421 static const GCInfo* get() { return &info; }
1422 static const GCInfo info;
1425 template<typename T, size_t inlineCapacity>
1426 struct FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Vector<T, inlineCapacity, HeapAllocator>, true> { };
1428 template<typename T, size_t inlineCapacity>
1429 const GCInfo GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> >::info = {
1431 TraceTrait<Vector<T, inlineCapacity, HeapAllocator> >::trace,
1432 FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> >::finalize,
1433 // Finalizer is needed to destruct things stored in the inline capacity.
1434 inlineCapacity && VectorTraits<T>::needsDestruction,
1437 template<typename T, typename Traits>
1438 struct GCInfoTrait<HeapVectorBacking<T, Traits> > {
1439 static const GCInfo* get() { return &info; }
1440 static const GCInfo info;
1443 template<typename T, typename Traits>
1444 const GCInfo GCInfoTrait<HeapVectorBacking<T, Traits> >::info = {
1446 TraceTrait<HeapVectorBacking<T, Traits> >::trace,
1447 FinalizerTrait<HeapVectorBacking<T, Traits> >::finalize,
1448 Traits::needsDestruction,
1451 template<typename T, typename U, typename V, typename W, typename X>
1452 struct GCInfoTrait<HeapHashTableBacking<T, U, V, W, X> > {
1453 static const GCInfo* get() { return &info; }
1454 static const GCInfo info;
1457 template<typename T, typename U, typename V, typename Traits, typename W>
1458 const GCInfo GCInfoTrait<HeapHashTableBacking<T, U, V, Traits, W> >::info = {
1460 TraceTrait<HeapHashTableBacking<T, U, V, Traits, W> >::trace,
1461 FinalizerTrait<HeapHashTableBacking<T, U, V, Traits, W> >::finalize,
1462 Traits::needsDestruction,
1465 template<bool markWeakMembersStrongly, typename T, typename Traits>
1466 struct BaseVisitVectorBackingTrait {
1467 static void mark(WebCore::Visitor* visitor, void* self)
1469 // The allocator can oversize the allocation a little, according to
1470 // the allocation granularity. The extra size is included in the
1471 // payloadSize call below, since there is nowhere to store the
1472 // originally allocated memory. This assert ensures that visiting the
1473 // last bit of memory can't cause trouble.
1474 COMPILE_ASSERT(!Traits::needsTracing || sizeof(T) > allocationGranularity || Traits::canInitializeWithMemset, HeapOverallocationCanCauseSpuriousVisits);
1476 T* array = reinterpret_cast<T*>(self);
1477 WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
1478 // Use the payload size as recorded by the heap to determine how many
1479 // elements to mark.
1480 size_t length = header->payloadSize() / sizeof(T);
1481 for (size_t i = 0; i < length; i++)
1482 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, markWeakMembersStrongly, T, Traits>::mark(visitor, array[i]);
1486 template<bool markWeakMembersStrongly, typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1487 struct BaseVisitHashTableBackingTrait {
1488 static void mark(WebCore::Visitor* visitor, void* self)
1490 Value* array = reinterpret_cast<Value*>(self);
1491 WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
1492 size_t length = header->payloadSize() / sizeof(Value);
1493 for (size_t i = 0; i < length; i++) {
1494 if (!WTF::HashTableHelper<Value, Extractor, KeyTraits>::isEmptyOrDeletedBucket(array[i]))
1495 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, markWeakMembersStrongly, Value, Traits>::mark(visitor, array[i]);
1500 template<bool markWeakMembersStrongly, typename Key, typename Value, typename Traits>
1501 struct BaseVisitKeyValuePairTrait {
1502 static void mark(WebCore::Visitor* visitor, WTF::KeyValuePair<Key, Value>& self)
1504 ASSERT(Traits::needsTracing || (Traits::isWeak && markWeakMembersStrongly));
1505 CollectionBackingTraceTrait<Traits::KeyTraits::needsTracing, Traits::KeyTraits::isWeak, markWeakMembersStrongly, Key, typename Traits::KeyTraits>::mark(visitor, self.key);
1506 CollectionBackingTraceTrait<Traits::ValueTraits::needsTracing, Traits::ValueTraits::isWeak, markWeakMembersStrongly, Value, typename Traits::ValueTraits>::mark(visitor, self.value);
1510 // FFX - Things that don't need marking and have no weak pointers.
1511 template<bool markWeakMembersStrongly, typename T, typename U>
1512 struct CollectionBackingTraceTrait<false, false, markWeakMembersStrongly, T, U> {
1513 static void mark(Visitor*, const T&) { }
1514 static void mark(Visitor*, const void*) { }
1517 // FTF - Things that don't need marking. They have weak pointers, but we are
1518 // not marking weak pointers in this object in this GC.
1519 template<typename T, typename U>
1520 struct CollectionBackingTraceTrait<false, true, false, T, U> {
1521 static void mark(Visitor*, const T&) { }
1522 static void mark(Visitor*, const void*) { }
1525 // For each type that we understand we have the FTT case and the TXX case. The
1526 // FTT case is where we would not normally need to mark it, but it has weak
1527 // pointers, and we are marking them as strong. The TXX case is the regular
1528 // case for things that need marking.
1531 template<typename T, typename Traits>
1532 struct CollectionBackingTraceTrait<false, true, true, HeapVectorBacking<T, Traits>, void> : public BaseVisitVectorBackingTrait<true, T, Traits> {
1536 template<bool isWeak, bool markWeakMembersStrongly, typename T, typename Traits>
1537 struct CollectionBackingTraceTrait<true, isWeak, markWeakMembersStrongly, HeapVectorBacking<T, Traits>, void> : public BaseVisitVectorBackingTrait<markWeakMembersStrongly, T, Traits> {
1541 template<typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1542 struct CollectionBackingTraceTrait<false, true, true, HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits>, void> : public BaseVisitHashTableBackingTrait<true, Key, Value, Extractor, Traits, KeyTraits> {
1546 template<bool isWeak, bool markWeakMembersStrongly, typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1547 struct CollectionBackingTraceTrait<true, isWeak, markWeakMembersStrongly, HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits>, void> : public BaseVisitHashTableBackingTrait<markWeakMembersStrongly, Key, Value, Extractor, Traits, KeyTraits> {
1550 // FTT (key value pair)
1551 template<typename Key, typename Value, typename Traits>
1552 struct CollectionBackingTraceTrait<false, true, true, WTF::KeyValuePair<Key, Value>, Traits> : public BaseVisitKeyValuePairTrait<true, Key, Value, Traits> {
1555 // TXX (key value pair)
1556 template<bool isWeak, bool markWeakMembersStrongly, typename Key, typename Value, typename Traits>
1557 struct CollectionBackingTraceTrait<true, isWeak, markWeakMembersStrongly, WTF::KeyValuePair<Key, Value>, Traits> : public BaseVisitKeyValuePairTrait<markWeakMembersStrongly, Key, Value, Traits> {
1562 template<bool markWeakMembersStrongly, typename T, typename Traits>
1563 struct CollectionBackingTraceTrait<true, false, markWeakMembersStrongly, Member<T>, Traits> {
1564 static void mark(WebCore::Visitor* visitor, Member<T> self)
1566 visitor->mark(self.get());
1570 // FTT (weak member)
1571 template<typename T, typename Traits>
1572 struct CollectionBackingTraceTrait<false, true, true, WeakMember<T>, Traits> {
1573 static void mark(WebCore::Visitor* visitor, WeakMember<T> self)
1575 // This can mark weak members as if they were strong. The reason we
1576 // need this is that we don't do weak processing unless we reach the
1577 // backing only through the hash table. Reaching it in any other way
1578 // makes it impossible to update the size and deleted slot count of the
1579 // table, and exposes us to weak processing during iteration issues.
1580 visitor->mark(self.get());
1584 // Catch-all for things that have a way to trace. For things that contain weak
1585 // pointers they will generally be visited weakly even if
1586 // markWeakMembersStrongly is true. This is what you want.
1587 template<bool isWeak, bool markWeakMembersStrongly, typename T, typename Traits>
1588 struct CollectionBackingTraceTrait<true, isWeak, markWeakMembersStrongly, T, Traits> {
1589 static void mark(WebCore::Visitor* visitor, T& t)
1591 TraceTrait<T>::trace(visitor, reinterpret_cast<void*>(&t));
1595 template<typename T, typename Traits>
1596 struct TraceTrait<HeapVectorBacking<T, Traits> > {
1597 typedef HeapVectorBacking<T, Traits> Backing;
1598 static void trace(WebCore::Visitor* visitor, void* self)
1600 COMPILE_ASSERT(!Traits::isWeak, WeDontSupportWeaknessInHeapVectors);
1601 if (Traits::needsTracing)
1602 CollectionBackingTraceTrait<Traits::needsTracing, false, false, HeapVectorBacking<T, Traits>, void>::mark(visitor, self);
1604 static void mark(Visitor* visitor, const Backing* backing)
1606 visitor->mark(backing, &trace);
1608 static void checkTypeMarker(Visitor* visitor, const Backing* backing)
1611 visitor->checkTypeMarker(const_cast<Backing*>(backing), getTypeMarker<Backing>());
1616 // The trace trait for the heap hashtable backing is used when we find a
1617 // direct pointer to the backing from the conservative stack scanner. This
1618 // normally indicates that there is an ongoing iteration over the table, and so
1619 // we disable weak processing of table entries. When the backing is found
1620 // through the owning hash table we mark differently, in order to do weak
1622 template<typename Key, typename Value, typename Extractor, typename Traits, typename KeyTraits>
1623 struct TraceTrait<HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits> > {
1624 typedef HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits> Backing;
1625 static const bool needsTracing = (Traits::needsTracing || Traits::isWeak);
1626 static void trace(WebCore::Visitor* visitor, void* self)
1629 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, true, HeapHashTableBacking<Key, Value, Extractor, Traits, KeyTraits>, void>::mark(visitor, self);
1631 static void mark(Visitor* visitor, const Backing* backing)
1634 visitor->mark(backing, &trace);
1636 visitor->mark(backing, 0);
1638 static void checkTypeMarker(Visitor* visitor, const Backing* backing)
1641 visitor->checkTypeMarker(const_cast<Backing*>(backing), getTypeMarker<Backing>());
1646 template<typename T, typename U, typename V, typename W, typename X>
1647 struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
1648 template<typename T, typename U, typename V>
1649 struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { };
1650 template<typename T, size_t inlineCapacity>
1651 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
1653 template<typename T>
1654 struct IfWeakMember;
1656 template<typename T>
1657 struct IfWeakMember {
1658 template<typename U>
1659 static bool isDead(Visitor*, const U&) { return false; }
1662 template<typename T>
1663 struct IfWeakMember<WeakMember<T> > {
1664 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visitor->isAlive(t.get()); }
1668 // Clang does not export the symbols that we have explicitly asked it
1669 // to export. This forces it to export all the methods from ThreadHeap.
1670 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo*);
1671 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*);
1672 extern template class HEAP_EXPORT ThreadHeap<FinalizedHeapObjectHeader>;
1673 extern template class HEAP_EXPORT ThreadHeap<HeapObjectHeader>;