2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "platform/heap/Heap.h"
34 #include "platform/ScriptForbiddenScope.h"
35 #include "platform/Task.h"
36 #include "platform/TraceEvent.h"
37 #include "platform/heap/CallbackStack.h"
38 #include "platform/heap/ThreadState.h"
39 #include "public/platform/Platform.h"
40 #include "wtf/AddressSpaceRandomization.h"
41 #include "wtf/Assertions.h"
42 #include "wtf/LeakAnnotations.h"
43 #include "wtf/PassOwnPtr.h"
44 #if ENABLE(GC_PROFILE_MARKING)
45 #include "wtf/HashMap.h"
46 #include "wtf/HashSet.h"
47 #include "wtf/text/StringBuilder.h"
48 #include "wtf/text/StringHash.h"
52 #if ENABLE(GC_PROFILE_HEAP)
53 #include "platform/TracedValue.h"
65 #if ENABLE(GC_PROFILE_MARKING)
66 static String classOf(const void* object)
68 const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast<void*>(object)));
70 return gcInfo->m_className;
76 static bool vTableInitialized(void* objectPointer)
78 return !!(*reinterpret_cast<Address*>(objectPointer));
82 static bool IsPowerOf2(size_t power)
84 return !((power - 1) & power);
88 static Address roundToBlinkPageBoundary(void* base)
90 return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
93 static size_t roundToOsPageSize(size_t size)
95 return (size + osPageSize() - 1) & ~(osPageSize() - 1);
101 static const size_t pageSize = getpagesize();
103 static size_t pageSize = 0;
106 GetSystemInfo(&info);
107 pageSize = info.dwPageSize;
108 ASSERT(IsPowerOf2(pageSize));
116 MemoryRegion(Address base, size_t size)
123 bool contains(Address addr) const
125 return m_base <= addr && addr < (m_base + m_size);
128 bool contains(const MemoryRegion& other) const
130 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
136 int err = munmap(m_base, m_size);
137 RELEASE_ASSERT(!err);
139 bool success = VirtualFree(m_base, 0, MEM_RELEASE);
140 RELEASE_ASSERT(success);
144 WARN_UNUSED_RETURN bool commit()
147 return !mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
149 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
157 int err = mprotect(m_base, m_size, PROT_NONE);
158 RELEASE_ASSERT(!err);
159 // FIXME: Consider using MADV_FREE on MacOS.
160 madvise(m_base, m_size, MADV_DONTNEED);
162 bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
163 RELEASE_ASSERT(success);
167 Address base() const { return m_base; }
168 size_t size() const { return m_size; }
175 // A PageMemoryRegion represents a chunk of reserved virtual address
176 // space containing a number of blink heap pages. On Windows, reserved
177 // virtual address space can only be given back to the system as a
178 // whole. The PageMemoryRegion allows us to do that by keeping track
179 // of the number of pages using it in order to be able to release all
180 // of the virtual address space when there are no more pages using it.
181 class PageMemoryRegion : public MemoryRegion {
188 void pageDeleted(Address page)
190 markPageUnused(page);
192 Heap::removePageMemoryRegion(this);
197 void markPageUsed(Address page)
199 ASSERT(!m_inUse[index(page)]);
200 m_inUse[index(page)] = true;
203 void markPageUnused(Address page)
205 m_inUse[index(page)] = false;
208 static PageMemoryRegion* allocateLargePage(size_t size)
210 return allocate(size, 1);
213 static PageMemoryRegion* allocateNormalPages()
215 return allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion);
218 BaseHeapPage* pageFromAddress(Address address)
220 ASSERT(contains(address));
221 if (!m_inUse[index(address)])
224 return pageHeaderFromObject(base());
225 return pageHeaderFromObject(address);
229 PageMemoryRegion(Address base, size_t size, unsigned numPages)
230 : MemoryRegion(base, size)
231 , m_isLargePage(numPages == 1)
232 , m_numPages(numPages)
234 for (size_t i = 0; i < blinkPagesPerRegion; ++i)
238 unsigned index(Address address)
240 ASSERT(contains(address));
243 size_t offset = blinkPageAddress(address) - base();
244 ASSERT(offset % blinkPageSize == 0);
245 return offset / blinkPageSize;
248 static PageMemoryRegion* allocate(size_t size, unsigned numPages)
250 // Compute a random blink page aligned address for the page memory
251 // region and attempt to get the memory there.
252 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase());
253 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress);
256 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
257 RELEASE_ASSERT(base != MAP_FAILED);
258 if (base == roundToBlinkPageBoundary(base))
259 return new PageMemoryRegion(base, size, numPages);
261 // We failed to get a blink page aligned chunk of
262 // memory. Unmap the chunk that we got and fall back to
263 // overallocating and selecting an aligned sub part of what
265 int error = munmap(base, size);
266 RELEASE_ASSERT(!error);
267 size_t allocationSize = size + blinkPageSize;
268 base = static_cast<Address>(mmap(alignedRandomAddress, allocationSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
269 RELEASE_ASSERT(base != MAP_FAILED);
271 Address end = base + allocationSize;
272 Address alignedBase = roundToBlinkPageBoundary(base);
273 Address regionEnd = alignedBase + size;
275 // If the allocated memory was not blink page aligned release
276 // the memory before the aligned address.
277 if (alignedBase != base)
278 MemoryRegion(base, alignedBase - base).release();
280 // Free the additional memory at the end of the page if any.
282 MemoryRegion(regionEnd, end - regionEnd).release();
284 return new PageMemoryRegion(alignedBase, size, numPages);
286 Address base = static_cast<Address>(VirtualAlloc(alignedRandomAddress, size, MEM_RESERVE, PAGE_NOACCESS));
288 ASSERT(base == alignedRandomAddress);
289 return new PageMemoryRegion(base, size, numPages);
292 // We failed to get the random aligned address that we asked
293 // for. Fall back to overallocating. On Windows it is
294 // impossible to partially release a region of memory
295 // allocated by VirtualAlloc. To avoid wasting virtual address
296 // space we attempt to release a large region of memory
297 // returned as a whole and then allocate an aligned region
298 // inside this larger region.
299 size_t allocationSize = size + blinkPageSize;
300 for (int attempt = 0; attempt < 3; attempt++) {
301 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
302 RELEASE_ASSERT(base);
303 VirtualFree(base, 0, MEM_RELEASE);
305 Address alignedBase = roundToBlinkPageBoundary(base);
306 base = static_cast<Address>(VirtualAlloc(alignedBase, size, MEM_RESERVE, PAGE_NOACCESS));
308 ASSERT(base == alignedBase);
309 return new PageMemoryRegion(alignedBase, size, numPages);
313 // We failed to avoid wasting virtual address space after
315 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
316 RELEASE_ASSERT(base);
318 // FIXME: If base is by accident blink page size aligned
319 // here then we can create two pages out of reserved
321 Address alignedBase = roundToBlinkPageBoundary(base);
323 return new PageMemoryRegion(alignedBase, size, numPages);
328 bool m_inUse[blinkPagesPerRegion];
332 // Representation of the memory used for a Blink heap page.
334 // The representation keeps track of two memory regions:
336 // 1. The virtual memory reserved from the system in order to be able
337 // to free all the virtual memory reserved. Multiple PageMemory
338 // instances can share the same reserved memory region and
339 // therefore notify the reserved memory region on destruction so
340 // that the system memory can be given back when all PageMemory
341 // instances for that memory are gone.
343 // 2. The writable memory (a sub-region of the reserved virtual
344 // memory region) that is used for the actual heap page payload.
346 // Guard pages are created before and after the writable memory.
351 __lsan_unregister_root_region(m_writable.base(), m_writable.size());
352 m_reserved->pageDeleted(writableStart());
355 WARN_UNUSED_RETURN bool commit()
357 m_reserved->markPageUsed(writableStart());
358 return m_writable.commit();
363 m_reserved->markPageUnused(writableStart());
364 m_writable.decommit();
367 void markUnused() { m_reserved->markPageUnused(writableStart()); }
369 PageMemoryRegion* region() { return m_reserved; }
371 Address writableStart() { return m_writable.base(); }
373 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize)
375 // Setup the payload one OS page into the page memory. The
376 // first os page is the guard page.
377 Address payloadAddress = region->base() + pageOffset + osPageSize();
378 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize));
381 // Allocate a virtual address space for one blink page with the
384 // [ guard os page | ... payload ... | guard os page ]
385 // ^---{ aligned to blink page size }
387 static PageMemory* allocate(size_t payloadSize)
389 ASSERT(payloadSize > 0);
391 // Virtual memory allocation routines operate in OS page sizes.
392 // Round up the requested size to nearest os page size.
393 payloadSize = roundToOsPageSize(payloadSize);
395 // Overallocate by 2 times OS page size to have space for a
396 // guard page at the beginning and end of blink heap page.
397 size_t allocationSize = payloadSize + 2 * osPageSize();
398 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocateLargePage(allocationSize);
399 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSize);
400 RELEASE_ASSERT(storage->commit());
405 PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable)
406 : m_reserved(reserved)
407 , m_writable(writable)
409 ASSERT(reserved->contains(writable));
411 // Register the writable area of the memory as part of the LSan root set.
412 // Only the writable area is mapped and can contain C++ objects. Those
413 // C++ objects can contain pointers to objects outside of the heap and
414 // should therefore be part of the LSan root set.
415 __lsan_register_root_region(m_writable.base(), m_writable.size());
419 PageMemoryRegion* m_reserved;
420 MemoryRegion m_writable;
425 explicit GCScope(ThreadState::StackState stackState)
426 : m_state(ThreadState::current())
427 , m_safePointScope(stackState)
428 , m_parkedAllThreads(false)
430 TRACE_EVENT0("blink_gc", "Heap::GCScope");
431 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
432 if (m_state->isMainThread())
433 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
435 m_state->checkThread();
437 // FIXME: in an unlikely coincidence that two threads decide
438 // to collect garbage at the same time, avoid doing two GCs in
440 RELEASE_ASSERT(!m_state->isInGC());
441 RELEASE_ASSERT(!m_state->isSweepInProgress());
442 if (LIKELY(ThreadState::stopThreads())) {
443 m_parkedAllThreads = true;
446 if (m_state->isMainThread())
447 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
450 bool allThreadsParked() { return m_parkedAllThreads; }
454 // Only cleanup if we parked all threads in which case the GC happened
455 // and we need to resume the other threads.
456 if (LIKELY(m_parkedAllThreads)) {
458 ASSERT(!m_state->isInGC());
459 ThreadState::resumeThreads();
464 ThreadState* m_state;
465 ThreadState::SafePointScope m_safePointScope;
466 bool m_parkedAllThreads; // False if we fail to park all threads
470 bool HeapObjectHeader::isMarked() const
473 unsigned size = asanUnsafeAcquireLoad(&m_size);
474 return size & markBitMask;
478 void HeapObjectHeader::unmark()
481 m_size &= ~markBitMask;
485 bool HeapObjectHeader::hasDeadMark() const
488 return m_size & deadBitMask;
492 void HeapObjectHeader::clearDeadMark()
495 m_size &= ~deadBitMask;
499 void HeapObjectHeader::setDeadMark()
503 m_size |= deadBitMask;
508 void HeapObjectHeader::zapMagic()
510 m_magic = zappedMagic;
514 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
516 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
517 HeapObjectHeader* header =
518 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
522 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
525 if (gcInfo->hasFinalizer()) {
526 gcInfo->m_finalize(object);
529 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
530 // In Debug builds, memory is zapped when it's freed, and the zapped memory is
531 // zeroed out when the memory is reused. Memory is also zapped when using Leak
532 // Sanitizer because the heap is used as a root region for LSan and therefore
533 // pointers in unreachable memory could hide leaks.
534 for (size_t i = 0; i < objectSize; i++)
535 object[i] = finalizedZapValue;
537 // Zap the primary vTable entry (secondary vTable entries are not zapped).
538 if (gcInfo->hasVTable()) {
539 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
542 // In Release builds, the entire object is zeroed out when it is added to the free list.
543 // This happens right after sweeping the page and before the thread commences execution.
547 void FinalizedHeapObjectHeader::finalize()
549 HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
552 template<typename Header>
553 void LargeHeapObject<Header>::unmark()
555 return heapObjectHeader()->unmark();
558 template<typename Header>
559 bool LargeHeapObject<Header>::isMarked()
561 return heapObjectHeader()->isMarked();
564 template<typename Header>
565 void LargeHeapObject<Header>::setDeadMark()
567 heapObjectHeader()->setDeadMark();
570 template<typename Header>
571 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
573 ASSERT(contains(address));
574 if (!objectContains(address) || heapObjectHeader()->hasDeadMark())
576 #if ENABLE(GC_PROFILE_MARKING)
577 visitor->setHostInfo(&address, "stack");
583 static bool isUninitializedMemory(void* objectPointer, size_t objectSize)
585 // Scan through the object's fields and check that they are all zero.
586 Address* objectFields = reinterpret_cast<Address*>(objectPointer);
587 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) {
588 if (objectFields[i] != 0)
596 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
598 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) {
599 FinalizedHeapObjectHeader* header = heapObjectHeader();
600 visitor->markNoTracing(header);
601 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
603 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
608 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
611 if (gcInfo()->hasVTable() && !vTableInitialized(payload())) {
612 HeapObjectHeader* header = heapObjectHeader();
613 visitor->markNoTracing(header);
614 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
616 visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
621 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
623 heapObjectHeader()->finalize();
627 void LargeHeapObject<HeapObjectHeader>::finalize()
630 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
633 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
635 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
636 FinalizedHeapObjectHeader* header =
637 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
641 template<typename Header>
642 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
643 : m_currentAllocationPoint(0)
644 , m_remainingAllocationSize(0)
645 , m_lastRemainingAllocationSize(0)
647 , m_firstLargeHeapObject(0)
648 , m_firstPageAllocatedDuringSweeping(0)
649 , m_lastPageAllocatedDuringSweeping(0)
651 , m_biggestFreeListIndex(0)
652 , m_threadState(state)
654 , m_numberOfNormalPages(0)
655 , m_promptlyFreedCount(0)
660 template<typename Header>
661 ThreadHeap<Header>::~ThreadHeap()
663 ASSERT(!m_firstPage);
664 ASSERT(!m_firstLargeHeapObject);
667 template<typename Header>
668 void ThreadHeap<Header>::cleanupPages()
672 // Add the ThreadHeap's pages to the orphanedPagePool.
673 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
674 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
677 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObject; largeObject = largeObject->m_next)
678 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
679 m_firstLargeHeapObject = 0;
682 template<typename Header>
683 void ThreadHeap<Header>::updateRemainingAllocationSize()
685 if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
686 stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAllocationSize());
687 m_lastRemainingAllocationSize = remainingAllocationSize();
689 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
692 template<typename Header>
693 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
695 size_t allocationSize = allocationSizeFromSize(size);
696 ASSERT(allocationSize > remainingAllocationSize());
697 if (allocationSize > blinkPageSize / 2)
698 return allocateLargeObject(allocationSize, gcInfo);
700 updateRemainingAllocationSize();
701 if (threadState()->shouldGC()) {
702 if (threadState()->shouldForceConservativeGC())
703 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
705 threadState()->setGCRequested();
707 if (remainingAllocationSize() > 0) {
708 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
709 setAllocationPoint(0, 0);
711 ensureCurrentAllocation(allocationSize, gcInfo);
712 return allocate(size, gcInfo);
715 template<typename Header>
716 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
718 size_t bucketSize = 1 << m_biggestFreeListIndex;
719 int i = m_biggestFreeListIndex;
720 for (; i > 0; i--, bucketSize >>= 1) {
721 if (bucketSize < minSize)
723 FreeListEntry* entry = m_freeLists[i];
725 m_biggestFreeListIndex = i;
726 entry->unlink(&m_freeLists[i]);
727 setAllocationPoint(entry->address(), entry->size());
728 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
732 m_biggestFreeListIndex = i;
736 template<typename Header>
737 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
739 ASSERT(minSize >= allocationGranularity);
740 if (allocateFromFreeList(minSize))
742 if (coalesce(minSize) && allocateFromFreeList(minSize))
744 addPageToHeap(gcInfo);
745 bool success = allocateFromFreeList(minSize);
746 RELEASE_ASSERT(success);
749 template<typename Header>
750 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
752 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
753 if (page->contains(address))
756 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
757 if (page->contains(address))
760 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
761 // Check that large pages are blinkPageSize aligned (modulo the
762 // osPageSize for the guard page).
763 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlinkPageStart(reinterpret_cast<Address>(current)));
764 if (current->contains(address))
770 #if ENABLE(GC_PROFILE_MARKING)
771 template<typename Header>
772 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address)
774 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
775 if (current->contains(address))
776 return current->gcInfo();
782 #if ENABLE(GC_PROFILE_HEAP)
783 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0
784 template<typename Header>
785 void ThreadHeap<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
787 size_t previousPageCount = info->pageCount;
789 json->beginArray("pages");
790 for (HeapPage<Header>* page = m_firstPage; page; page = page->next(), ++info->pageCount) {
791 // FIXME: To limit the size of the snapshot we only output "threshold" many page snapshots.
792 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) {
794 json->pushInteger(reinterpret_cast<intptr_t>(page));
795 page->snapshot(json, info);
798 page->snapshot(0, info);
803 json->beginArray("largeObjects");
804 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
805 json->beginDictionary();
806 current->snapshot(json, info);
807 json->endDictionary();
811 json->setInteger("pageCount", info->pageCount - previousPageCount);
815 template<typename Header>
816 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
818 ASSERT(heapPageFromAddress(address));
819 ASSERT(heapPageFromAddress(address + size - 1));
820 ASSERT(size < blinkPagePayloadSize());
821 // The free list entries are only pointer aligned (but when we allocate
822 // from them we are 8 byte aligned due to the header size).
823 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
824 ASSERT(!(size & allocationMask));
825 ASAN_POISON_MEMORY_REGION(address, size);
826 FreeListEntry* entry;
827 if (size < sizeof(*entry)) {
828 // Create a dummy header with only a size and freelist bit set.
829 ASSERT(size >= sizeof(BasicObjectHeader));
830 // Free list encode the size to mark the lost memory as freelist memory.
831 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
832 // This memory gets lost. Sweeping can reclaim it.
835 entry = new (NotNull, address) FreeListEntry(size);
836 #if defined(ADDRESS_SANITIZER)
837 // For ASan we don't add the entry to the free lists until the asanDeferMemoryReuseCount
838 // reaches zero. However we always add entire pages to ensure that adding a new page will
839 // increase the allocation space.
840 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
843 int index = bucketIndexForSize(size);
844 entry->link(&m_freeLists[index]);
845 if (!m_lastFreeListEntries[index])
846 m_lastFreeListEntries[index] = entry;
847 if (index > m_biggestFreeListIndex)
848 m_biggestFreeListIndex = index;
851 template<typename Header>
852 void ThreadHeap<Header>::promptlyFreeObject(Header* header)
854 ASSERT(!m_threadState->isSweepInProgress());
855 header->checkHeader();
856 Address address = reinterpret_cast<Address>(header);
857 Address payload = header->payload();
858 size_t size = header->size();
859 size_t payloadSize = header->payloadSize();
860 BaseHeapPage* page = pageHeaderFromObject(address);
862 ASSERT(page == heapPageFromAddress(address));
865 ThreadState::NoSweepScope scope(m_threadState);
866 HeapObjectHeader::finalize(header->gcInfo(), payload, payloadSize);
867 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
868 memset(payload, 0, payloadSize);
870 header->markPromptlyFreed();
873 page->addToPromptlyFreedSize(size);
874 m_promptlyFreedCount++;
877 template<typename Header>
878 bool ThreadHeap<Header>::coalesce(size_t minSize)
880 if (m_threadState->isSweepInProgress())
883 if (m_promptlyFreedCount < 256)
886 // The smallest bucket able to satisfy an allocation request for minSize is
887 // the bucket where all free-list entries are guarantied to be larger than
888 // minSize. That bucket is one larger than the bucket minSize would go into.
889 size_t neededBucketIndex = bucketIndexForSize(minSize) + 1;
890 size_t neededFreeEntrySize = 1 << neededBucketIndex;
891 size_t neededPromptlyFreedSize = neededFreeEntrySize * 3;
892 size_t foundFreeEntrySize = 0;
894 // Bailout early on large requests because it is unlikely we will find a free-list entry.
895 if (neededPromptlyFreedSize >= blinkPageSize)
898 TRACE_EVENT_BEGIN2("blink_gc", "ThreadHeap::coalesce" , "requestedSize", (unsigned)minSize , "neededSize", (unsigned)neededFreeEntrySize);
900 // Search for a coalescing candidate.
901 ASSERT(!ownsNonEmptyAllocationArea());
902 size_t pageCount = 0;
903 HeapPage<Header>* page = m_firstPage;
905 // Only consider one of the first 'n' pages. A "younger" page is more likely to have freed backings.
906 if (++pageCount > numberOfPagesToConsiderForCoalescing) {
910 // Only coalesce pages with "sufficient" promptly freed space.
911 if (page->promptlyFreedSize() >= neededPromptlyFreedSize) {
917 // If we found a likely candidate, fully coalesce all its promptly-freed entries.
919 page->clearObjectStartBitMap();
920 page->resetPromptlyFreedSize();
921 size_t freedCount = 0;
922 Address startOfGap = page->payload();
923 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) {
924 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
925 ASSERT(basicHeader->size() > 0);
926 ASSERT(basicHeader->size() < blinkPagePayloadSize());
928 if (basicHeader->isPromptlyFreed()) {
929 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->size());
930 size_t size = basicHeader->size();
931 ASSERT(size >= sizeof(Header));
932 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
933 memset(headerAddress, 0, sizeof(Header));
936 headerAddress += size;
940 if (startOfGap != headerAddress) {
941 size_t size = headerAddress - startOfGap;
942 addToFreeList(startOfGap, size);
943 if (size > foundFreeEntrySize)
944 foundFreeEntrySize = size;
947 headerAddress += basicHeader->size();
948 startOfGap = headerAddress;
951 if (startOfGap != page->end()) {
952 size_t size = page->end() - startOfGap;
953 addToFreeList(startOfGap, size);
954 if (size > foundFreeEntrySize)
955 foundFreeEntrySize = size;
958 // Check before subtracting because freedCount might not be balanced with freed entries.
959 if (freedCount < m_promptlyFreedCount)
960 m_promptlyFreedCount -= freedCount;
962 m_promptlyFreedCount = 0;
965 TRACE_EVENT_END1("blink_gc", "ThreadHeap::coalesce", "foundFreeEntrySize", (unsigned)foundFreeEntrySize);
967 if (foundFreeEntrySize < neededFreeEntrySize) {
968 // If coalescing failed, reset the freed count to delay coalescing again.
969 m_promptlyFreedCount = 0;
976 template<typename Header>
977 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
979 // Caller already added space for object header and rounded up to allocation alignment
980 ASSERT(!(size & allocationMask));
982 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
984 // Ensure that there is enough space for alignment. If the header
985 // is not a multiple of 8 bytes we will allocate an extra
986 // headerPadding<Header> bytes to ensure it 8 byte aligned.
987 allocationSize += headerPadding<Header>();
989 // If ASan is supported we add allocationGranularity bytes to the allocated space and
990 // poison that to detect overflows
991 #if defined(ADDRESS_SANITIZER)
992 allocationSize += allocationGranularity;
995 updateRemainingAllocationSize();
996 if (m_threadState->shouldGC())
997 m_threadState->setGCRequested();
998 m_threadState->shouldFlushHeapDoesNotContainCache();
999 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
1000 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
1001 Address largeObjectAddress = pageMemory->writableStart();
1002 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
1003 memset(headerAddress, 0, size);
1004 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
1005 Address result = headerAddress + sizeof(*header);
1006 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1007 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
1009 // Poison the object header and allocationGranularity bytes after the object
1010 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
1011 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
1012 largeObject->link(&m_firstLargeHeapObject);
1013 stats().increaseAllocatedSpace(largeObject->size());
1014 stats().increaseObjectSpace(largeObject->size());
1018 template<typename Header>
1019 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
1021 object->unlink(previousNext);
1024 // Unpoison the object header and allocationGranularity bytes after the
1025 // object before freeing.
1026 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
1027 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
1029 if (object->terminating()) {
1030 ASSERT(ThreadState::current()->isTerminating());
1031 // The thread is shutting down so this object is being removed as part
1032 // of a thread local GC. In that case the object could be traced in the
1033 // next global GC either due to a dead object being traced via a
1034 // conservative pointer or due to a programming error where an object
1035 // in another thread heap keeps a dangling pointer to this object.
1036 // To guard against this we put the large object memory in the
1037 // orphanedPagePool to ensure it is still reachable. After the next global
1038 // GC it can be released assuming no rogue/dangling pointers refer to
1040 // NOTE: large objects are not moved to the free page pool as it is
1041 // unlikely they can be reused due to their individual sizes.
1042 Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
1044 ASSERT(!ThreadState::current()->isTerminating());
1045 PageMemory* memory = object->storage();
1046 object->~LargeHeapObject<Header>();
1051 template<typename DataType>
1052 PagePool<DataType>::PagePool()
1054 for (int i = 0; i < NumberOfHeaps; ++i) {
1059 FreePagePool::~FreePagePool()
1061 for (int index = 0; index < NumberOfHeaps; ++index) {
1062 while (PoolEntry* entry = m_pool[index]) {
1063 m_pool[index] = entry->next;
1064 PageMemory* memory = entry->data;
1072 void FreePagePool::addFreePage(int index, PageMemory* memory)
1074 // When adding a page to the pool we decommit it to ensure it is unused
1075 // while in the pool. This also allows the physical memory, backing the
1076 // page, to be given back to the OS.
1078 MutexLocker locker(m_mutex[index]);
1079 PoolEntry* entry = new PoolEntry(memory, m_pool[index]);
1080 m_pool[index] = entry;
1083 PageMemory* FreePagePool::takeFreePage(int index)
1085 MutexLocker locker(m_mutex[index]);
1086 while (PoolEntry* entry = m_pool[index]) {
1087 m_pool[index] = entry->next;
1088 PageMemory* memory = entry->data;
1091 if (memory->commit())
1094 // We got some memory, but failed to commit it, try again.
1100 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
1101 : m_storage(storage)
1103 , m_threadState(state)
1104 , m_terminating(false)
1105 , m_tracedAfterOrphaned(false)
1107 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1110 void BaseHeapPage::markOrphaned()
1114 m_terminating = false;
1115 m_tracedAfterOrphaned = false;
1116 // Since we zap the page payload for orphaned pages we need to mark it as
1117 // unused so a conservative pointer won't interpret the object headers.
1118 storage()->markUnused();
1121 OrphanedPagePool::~OrphanedPagePool()
1123 for (int index = 0; index < NumberOfHeaps; ++index) {
1124 while (PoolEntry* entry = m_pool[index]) {
1125 m_pool[index] = entry->next;
1126 BaseHeapPage* page = entry->data;
1128 PageMemory* memory = page->storage();
1130 page->~BaseHeapPage();
1136 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page)
1138 page->markOrphaned();
1139 PoolEntry* entry = new PoolEntry(page, m_pool[index]);
1140 m_pool[index] = entry;
1144 void OrphanedPagePool::decommitOrphanedPages()
1147 // No locking needed as all threads are at safepoints at this point in time.
1148 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1149 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1150 ASSERT((*it)->isAtSafePoint());
1153 for (int index = 0; index < NumberOfHeaps; ++index) {
1154 PoolEntry* entry = m_pool[index];
1155 PoolEntry** prevNext = &m_pool[index];
1157 BaseHeapPage* page = entry->data;
1158 if (page->tracedAfterOrphaned()) {
1159 // If the orphaned page was traced in the last GC it is not
1160 // decommited. We only decommit a page, ie. put it in the
1161 // memory pool, when the page has no objects pointing to it.
1162 // We remark the page as orphaned to clear the tracedAfterOrphaned
1163 // flag and any object trace bits that were set during tracing.
1164 page->markOrphaned();
1165 prevNext = &entry->next;
1166 entry = entry->next;
1170 // Page was not traced. Check if we should reuse the memory or just
1171 // free it. Large object memory is not reused, but freed, normal
1172 // blink heap pages are reused.
1173 // NOTE: We call the destructor before freeing or adding to the
1175 PageMemory* memory = page->storage();
1176 if (page->isLargeObject()) {
1177 page->~BaseHeapPage();
1180 page->~BaseHeapPage();
1181 // Clear out the page's memory before adding it to the free page
1182 // pool to ensure it is zero filled when being reused.
1183 clearMemory(memory);
1184 Heap::freePagePool()->addFreePage(index, memory);
1187 PoolEntry* deadEntry = entry;
1188 entry = entry->next;
1196 void OrphanedPagePool::clearMemory(PageMemory* memory)
1198 #if defined(ADDRESS_SANITIZER)
1199 // Don't use memset when running with ASan since this needs to zap
1200 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
1201 // only works for code in this method and not for calls to memset.
1202 Address base = memory->writableStart();
1203 for (Address current = base; current < base + blinkPagePayloadSize(); ++current)
1206 memset(memory->writableStart(), 0, blinkPagePayloadSize());
1211 bool OrphanedPagePool::contains(void* object)
1213 for (int index = 0; index < NumberOfHeaps; ++index) {
1214 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
1215 BaseHeapPage* page = entry->data;
1216 if (page->contains(reinterpret_cast<Address>(object)))
1225 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
1227 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
1228 // the heap should be unused (ie. 0).
1233 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
1235 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
1236 // since it is the same for all objects
1238 allocatePage(gcInfo);
1241 template <typename Header>
1242 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
1244 MutexLocker locker(m_threadState->sweepMutex());
1245 if (page->terminating()) {
1246 // The thread is shutting down so this page is being removed as part
1247 // of a thread local GC. In that case the page could be accessed in the
1248 // next global GC either due to a dead object being traced via a
1249 // conservative pointer or due to a programming error where an object
1250 // in another thread heap keeps a dangling pointer to this object.
1251 // To guard against this we put the page in the orphanedPagePool to
1252 // ensure it is still reachable. After the next global GC it can be
1253 // decommitted and moved to the page pool assuming no rogue/dangling
1254 // pointers refer to it.
1255 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
1257 PageMemory* memory = page->storage();
1258 page->~HeapPage<Header>();
1259 Heap::freePagePool()->addFreePage(m_index, memory);
1263 template<typename Header>
1264 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
1266 m_threadState->shouldFlushHeapDoesNotContainCache();
1267 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1268 // We continue allocating page memory until we succeed in committing one.
1269 while (!pageMemory) {
1270 // Allocate a memory region for blinkPagesPerRegion pages that
1271 // will each have the following layout.
1273 // [ guard os page | ... payload ... | guard os page ]
1274 // ^---{ aligned to blink page size }
1275 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages();
1276 m_threadState->allocatedRegionsSinceLastGC().append(region);
1278 // Setup the PageMemory object for each of the pages in the
1281 for (size_t i = 0; i < blinkPagesPerRegion; i++) {
1282 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, offset, blinkPagePayloadSize());
1283 // Take the first possible page ensuring that this thread actually
1284 // gets a page and add the rest to the page pool.
1286 if (memory->commit())
1287 pageMemory = memory;
1291 Heap::freePagePool()->addFreePage(m_index, memory);
1293 offset += blinkPageSize;
1296 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
1297 // Use a separate list for pages allocated during sweeping to make
1298 // sure that we do not accidentally sweep objects that have been
1299 // allocated during sweeping.
1300 if (m_threadState->isSweepInProgress()) {
1301 if (!m_lastPageAllocatedDuringSweeping)
1302 m_lastPageAllocatedDuringSweeping = page;
1303 page->link(&m_firstPageAllocatedDuringSweeping);
1305 page->link(&m_firstPage);
1307 ++m_numberOfNormalPages;
1308 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
1312 template<typename Header>
1313 bool ThreadHeap<Header>::pagesToBeSweptContains(Address address)
1315 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
1316 if (page->contains(address))
1322 template<typename Header>
1323 bool ThreadHeap<Header>::pagesAllocatedDuringSweepingContains(Address address)
1325 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
1326 if (page->contains(address))
1333 template<typename Header>
1334 void ThreadHeap<Header>::getStatsForTesting(HeapStats& stats)
1336 ASSERT(!m_firstPageAllocatedDuringSweeping);
1337 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1338 page->getStatsForTesting(stats);
1339 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
1340 current->getStatsForTesting(stats);
1343 template<typename Header>
1344 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
1346 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages");
1347 HeapPage<Header>* page = m_firstPage;
1348 HeapPage<Header>** previousNext = &m_firstPage;
1349 HeapPage<Header>* previous = 0;
1351 page->resetPromptlyFreedSize();
1352 if (page->isEmpty()) {
1353 HeapPage<Header>* unused = page;
1354 if (unused == m_mergePoint)
1355 m_mergePoint = previous;
1356 page = page->next();
1357 HeapPage<Header>::unlink(this, unused, previousNext);
1358 --m_numberOfNormalPages;
1360 page->sweep(stats, this);
1361 previousNext = &page->m_next;
1363 page = page->next();
1368 template<typename Header>
1369 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
1371 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
1372 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
1373 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
1374 if (current->isMarked()) {
1375 stats->increaseAllocatedSpace(current->size());
1376 stats->increaseObjectSpace(current->size());
1378 previousNext = ¤t->m_next;
1379 current = current->next();
1381 LargeHeapObject<Header>* next = current->next();
1382 freeLargeObject(current, previousNext);
1389 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
1390 // sweeping to catch cases where dead objects touch each other. This is not
1391 // turned on by default because it also triggers for cases that are safe.
1392 // Examples of such safe cases are context life cycle observers and timers
1393 // embedded in garbage collected objects.
1394 #define STRICT_ASAN_FINALIZATION_CHECKING 0
1396 template<typename Header>
1397 void ThreadHeap<Header>::sweep(HeapStats* stats)
1399 ASSERT(isConsistentForSweeping());
1400 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
1401 // When using ASan do a pre-sweep where all unmarked objects are
1402 // poisoned before calling their finalizer methods. This can catch
1403 // the case where the finalizer of an object tries to modify
1404 // another object as part of finalization.
1405 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1406 page->poisonUnmarkedObjects();
1408 sweepNormalPages(stats);
1409 sweepLargePages(stats);
1412 template<typename Header>
1413 void ThreadHeap<Header>::postSweepProcessing()
1415 // If pages have been allocated during sweeping, link them into
1416 // the list of pages.
1417 if (m_firstPageAllocatedDuringSweeping) {
1418 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage;
1419 m_firstPage = m_firstPageAllocatedDuringSweeping;
1420 m_lastPageAllocatedDuringSweeping = 0;
1421 m_firstPageAllocatedDuringSweeping = 0;
1426 template<typename Header>
1427 bool ThreadHeap<Header>::isConsistentForSweeping()
1429 // A thread heap is consistent for sweeping if none of the pages to
1430 // be swept contain a freelist block or the current allocation
1432 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
1433 for (FreeListEntry* freeListEntry = m_freeLists[i]; freeListEntry; freeListEntry = freeListEntry->next()) {
1434 if (pagesToBeSweptContains(freeListEntry->address())) {
1437 ASSERT(pagesAllocatedDuringSweepingContains(freeListEntry->address()));
1440 if (ownsNonEmptyAllocationArea()) {
1441 ASSERT(pagesToBeSweptContains(currentAllocationPoint())
1442 || pagesAllocatedDuringSweepingContains(currentAllocationPoint()));
1443 return !pagesToBeSweptContains(currentAllocationPoint());
1449 template<typename Header>
1450 void ThreadHeap<Header>::makeConsistentForSweeping()
1452 if (ownsNonEmptyAllocationArea())
1453 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
1454 setAllocationPoint(0, 0);
1458 template<typename Header>
1459 void ThreadHeap<Header>::clearLiveAndMarkDead()
1461 ASSERT(isConsistentForSweeping());
1462 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1463 page->clearLiveAndMarkDead();
1464 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
1465 if (current->isMarked())
1468 current->setDeadMark();
1472 template<typename Header>
1473 void ThreadHeap<Header>::clearFreeLists()
1475 m_promptlyFreedCount = 0;
1476 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
1478 m_lastFreeListEntries[i] = 0;
1482 int BaseHeap::bucketIndexForSize(size_t size)
1493 template<typename Header>
1494 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
1495 : BaseHeapPage(storage, gcInfo, heap->threadState())
1498 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
1499 m_objectStartBitMapComputed = false;
1500 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1501 heap->stats().increaseAllocatedSpace(blinkPageSize);
1504 template<typename Header>
1505 void HeapPage<Header>::link(HeapPage** prevNext)
1511 template<typename Header>
1512 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPage** prevNext)
1514 *prevNext = unused->m_next;
1515 heap->removePageFromHeap(unused);
1518 template<typename Header>
1519 void HeapPage<Header>::getStatsForTesting(HeapStats& stats)
1521 stats.increaseAllocatedSpace(blinkPageSize);
1522 Address headerAddress = payload();
1523 ASSERT(headerAddress != end());
1525 Header* header = reinterpret_cast<Header*>(headerAddress);
1526 if (!header->isFree()) {
1527 stats.increaseObjectSpace(header->payloadSize());
1529 ASSERT(header->size() < blinkPagePayloadSize());
1530 headerAddress += header->size();
1531 ASSERT(headerAddress <= end());
1532 } while (headerAddress < end());
1535 template<typename Header>
1536 bool HeapPage<Header>::isEmpty()
1538 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
1539 return header->isFree() && (header->size() == payloadSize());
1542 template<typename Header>
1543 void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap)
1545 clearObjectStartBitMap();
1546 stats->increaseAllocatedSpace(blinkPageSize);
1547 Address startOfGap = payload();
1548 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
1549 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
1550 ASSERT(basicHeader->size() > 0);
1551 ASSERT(basicHeader->size() < blinkPagePayloadSize());
1553 if (basicHeader->isFree()) {
1554 size_t size = basicHeader->size();
1555 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1556 // Zero the memory in the free list header to maintain the
1557 // invariant that memory on the free list is zero filled.
1558 // The rest of the memory is already on the free list and is
1559 // therefore already zero filled.
1560 if (size < sizeof(FreeListEntry))
1561 memset(headerAddress, 0, size);
1563 memset(headerAddress, 0, sizeof(FreeListEntry));
1565 headerAddress += size;
1568 // At this point we know this is a valid object of type Header
1569 Header* header = static_cast<Header*>(basicHeader);
1571 if (!header->isMarked()) {
1572 // For ASan we unpoison the specific object when calling the finalizer and
1573 // poison it again when done to allow the object's own finalizer to operate
1574 // on the object, but not have other finalizers be allowed to access it.
1575 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
1577 size_t size = header->size();
1578 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1579 // This memory will be added to the freelist. Maintain the invariant
1580 // that memory on the freelist is zero filled.
1581 memset(headerAddress, 0, size);
1583 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1584 headerAddress += size;
1588 if (startOfGap != headerAddress)
1589 heap->addToFreeList(startOfGap, headerAddress - startOfGap);
1591 headerAddress += header->size();
1592 stats->increaseObjectSpace(header->size());
1593 startOfGap = headerAddress;
1595 if (startOfGap != end())
1596 heap->addToFreeList(startOfGap, end() - startOfGap);
1599 template<typename Header>
1600 void HeapPage<Header>::clearLiveAndMarkDead()
1602 for (Address headerAddress = payload(); headerAddress < end();) {
1603 Header* header = reinterpret_cast<Header*>(headerAddress);
1604 ASSERT(header->size() < blinkPagePayloadSize());
1605 // Check if a free list entry first since we cannot call
1606 // isMarked on a free list entry.
1607 if (header->isFree()) {
1608 headerAddress += header->size();
1611 if (header->isMarked())
1614 header->setDeadMark();
1615 headerAddress += header->size();
1619 template<typename Header>
1620 void HeapPage<Header>::populateObjectStartBitMap()
1622 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1623 Address start = payload();
1624 for (Address headerAddress = start; headerAddress < end();) {
1625 Header* header = reinterpret_cast<Header*>(headerAddress);
1626 size_t objectOffset = headerAddress - start;
1627 ASSERT(!(objectOffset & allocationMask));
1628 size_t objectStartNumber = objectOffset / allocationGranularity;
1629 size_t mapIndex = objectStartNumber / 8;
1630 ASSERT(mapIndex < objectStartBitMapSize);
1631 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
1632 headerAddress += header->size();
1633 ASSERT(headerAddress <= end());
1635 m_objectStartBitMapComputed = true;
1638 template<typename Header>
1639 void HeapPage<Header>::clearObjectStartBitMap()
1641 m_objectStartBitMapComputed = false;
1644 static int numberOfLeadingZeroes(uint8_t byte)
1662 template<typename Header>
1663 Header* HeapPage<Header>::findHeaderFromAddress(Address address)
1665 if (address < payload())
1667 if (!isObjectStartBitMapComputed())
1668 populateObjectStartBitMap();
1669 size_t objectOffset = address - payload();
1670 size_t objectStartNumber = objectOffset / allocationGranularity;
1671 size_t mapIndex = objectStartNumber / 8;
1672 ASSERT(mapIndex < objectStartBitMapSize);
1673 size_t bit = objectStartNumber & 7;
1674 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1676 ASSERT(mapIndex > 0);
1677 byte = m_objectStartBitMap[--mapIndex];
1679 int leadingZeroes = numberOfLeadingZeroes(byte);
1680 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1681 objectOffset = objectStartNumber * allocationGranularity;
1682 Address objectAddress = objectOffset + payload();
1683 Header* header = reinterpret_cast<Header*>(objectAddress);
1684 if (header->isFree())
1689 template<typename Header>
1690 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1692 ASSERT(contains(address));
1693 Header* header = findHeaderFromAddress(address);
1694 if (!header || header->hasDeadMark())
1697 #if ENABLE(GC_PROFILE_MARKING)
1698 visitor->setHostInfo(&address, "stack");
1700 if (hasVTable(header) && !vTableInitialized(header->payload())) {
1701 visitor->markNoTracing(header);
1702 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
1704 visitor->mark(header, traceCallback(header));
1708 #if ENABLE(GC_PROFILE_MARKING)
1709 template<typename Header>
1710 const GCInfo* HeapPage<Header>::findGCInfo(Address address)
1712 if (address < payload())
1715 if (gcInfo()) // for non FinalizedObjectHeader
1718 Header* header = findHeaderFromAddress(address);
1722 return header->gcInfo();
1726 #if ENABLE(GC_PROFILE_HEAP)
1727 template<typename Header>
1728 void HeapPage<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
1731 for (Address addr = payload(); addr < end(); addr += header->size()) {
1732 header = reinterpret_cast<Header*>(addr);
1734 json->pushInteger(header->encodedSize());
1735 if (header->isFree()) {
1736 info->freeSize += header->size();
1740 const GCInfo* gcinfo = header->gcInfo() ? header->gcInfo() : gcInfo();
1741 size_t tag = info->getClassTag(gcinfo);
1742 size_t age = header->age();
1744 json->pushInteger(tag);
1745 if (header->isMarked()) {
1746 info->liveCount[tag] += 1;
1747 info->liveSize[tag] += header->size();
1748 // Count objects that are live when promoted to the final generation.
1749 if (age == maxHeapObjectAge - 1)
1750 info->generations[tag][maxHeapObjectAge] += 1;
1753 info->deadCount[tag] += 1;
1754 info->deadSize[tag] += header->size();
1755 // Count objects that are dead before the final generation.
1756 if (age < maxHeapObjectAge)
1757 info->generations[tag][age] += 1;
1763 #if defined(ADDRESS_SANITIZER)
1764 template<typename Header>
1765 void HeapPage<Header>::poisonUnmarkedObjects()
1767 for (Address headerAddress = payload(); headerAddress < end(); ) {
1768 Header* header = reinterpret_cast<Header*>(headerAddress);
1769 ASSERT(header->size() < blinkPagePayloadSize());
1771 if (!header->isFree() && !header->isMarked())
1772 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1773 headerAddress += header->size();
1779 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1785 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1788 HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1792 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1795 return gcInfo()->m_trace;
1799 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1801 return header->traceCallback();
1805 inline bool HeapPage<HeapObjectHeader>::hasVTable(HeapObjectHeader* header)
1808 return gcInfo()->hasVTable();
1812 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHeader* header)
1814 return header->hasVTable();
1817 template<typename Header>
1818 void LargeHeapObject<Header>::getStatsForTesting(HeapStats& stats)
1820 stats.increaseAllocatedSpace(size());
1821 stats.increaseObjectSpace(payloadSize());
1824 #if ENABLE(GC_PROFILE_HEAP)
1825 template<typename Header>
1826 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
1828 Header* header = heapObjectHeader();
1829 size_t tag = info->getClassTag(header->gcInfo());
1830 size_t age = header->age();
1832 info->liveCount[tag] += 1;
1833 info->liveSize[tag] += header->size();
1834 // Count objects that are live when promoted to the final generation.
1835 if (age == maxHeapObjectAge - 1)
1836 info->generations[tag][maxHeapObjectAge] += 1;
1839 info->deadCount[tag] += 1;
1840 info->deadSize[tag] += header->size();
1841 // Count objects that are dead before the final generation.
1842 if (age < maxHeapObjectAge)
1843 info->generations[tag][age] += 1;
1847 json->setInteger("class", tag);
1848 json->setInteger("size", header->size());
1849 json->setInteger("isMarked", isMarked());
1854 void HeapDoesNotContainCache::flush()
1856 ASSERT(ThreadState::isAnyThreadInGC());
1859 for (int i = 0; i < numberOfEntries; i++)
1861 m_hasEntries = false;
1865 size_t HeapDoesNotContainCache::hash(Address address)
1867 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1868 value ^= value >> numberOfEntriesLog2;
1869 value ^= value >> (numberOfEntriesLog2 * 2);
1870 value &= numberOfEntries - 1;
1871 return value & ~1; // Returns only even number.
1874 bool HeapDoesNotContainCache::lookup(Address address)
1876 ASSERT(ThreadState::isAnyThreadInGC());
1878 size_t index = hash(address);
1879 ASSERT(!(index & 1));
1880 Address cachePage = roundToBlinkPageStart(address);
1881 if (m_entries[index] == cachePage)
1882 return m_entries[index];
1883 if (m_entries[index + 1] == cachePage)
1884 return m_entries[index + 1];
1888 void HeapDoesNotContainCache::addEntry(Address address)
1890 ASSERT(ThreadState::isAnyThreadInGC());
1892 m_hasEntries = true;
1893 size_t index = hash(address);
1894 ASSERT(!(index & 1));
1895 Address cachePage = roundToBlinkPageStart(address);
1896 m_entries[index + 1] = m_entries[index];
1897 m_entries[index] = cachePage;
1900 void Heap::flushHeapDoesNotContainCache()
1902 s_heapDoesNotContainCache->flush();
1905 // The marking mutex is used to ensure sequential access to data
1906 // structures during marking. The marking mutex needs to be acquired
1907 // during marking when elements are taken from the global marking
1908 // stack or when elements are added to the global ephemeron,
1909 // post-marking, and weak processing stacks. In debug mode the mutex
1910 // also needs to be acquired when asserts use the heap contains
1912 static Mutex& markingMutex()
1914 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
1918 static ThreadCondition& markingCondition()
1920 AtomicallyInitializedStatic(ThreadCondition&, condition = *new ThreadCondition);
1924 static void markNoTracingCallback(Visitor* visitor, void* object)
1926 visitor->markNoTracing(object);
1929 class MarkingVisitor : public Visitor {
1931 #if ENABLE(GC_PROFILE_MARKING)
1932 typedef HashSet<uintptr_t> LiveObjectSet;
1933 typedef HashMap<String, LiveObjectSet> LiveObjectMap;
1934 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph;
1937 MarkingVisitor(CallbackStack* markingStack) : m_markingStack(markingStack)
1941 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1946 // Check that we are not marking objects that are outside
1947 // the heap by calling Heap::contains. However we cannot
1948 // call Heap::contains when outside a GC and we call mark
1949 // when doing weakness for ephemerons. Hence we only check
1950 // when called within.
1951 #if ENABLE_PARALLEL_MARKING
1952 MutexLocker locker(markingMutex());
1954 ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrphanedPage(header));
1957 ASSERT(objectPointer);
1958 if (header->isMarked())
1961 #if ENABLE(GC_PROFILE_MARKING)
1962 MutexLocker locker(objectGraphMutex());
1963 String className(classOf(objectPointer));
1965 LiveObjectMap::AddResult result = currentlyLive().add(className, LiveObjectSet());
1966 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPointer));
1968 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintptr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName));
1969 ASSERT(result.isNewEntry);
1970 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_hostObject, className.ascii().data(), objectPointer);
1973 Heap::pushTraceCallback(m_markingStack, const_cast<void*>(objectPointer), callback);
1976 virtual void mark(HeapObjectHeader* header, TraceCallback callback) override
1978 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1979 // version to correctly find the payload.
1980 visitHeader(header, header->payload(), callback);
1983 virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) override
1985 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1986 // version to correctly find the payload.
1987 visitHeader(header, header->payload(), callback);
1990 virtual void mark(const void* objectPointer, TraceCallback callback) override
1994 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1995 visitHeader(header, header->payload(), callback);
1998 virtual void registerDelayedMarkNoTracing(const void* object) override
2000 Heap::pushPostMarkingCallback(const_cast<void*>(object), markNoTracingCallback);
2003 virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) override
2005 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
2008 virtual void registerWeakTable(const void* closure, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
2010 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, iterationDoneCallback);
2014 virtual bool weakTableRegistered(const void* closure)
2016 return Heap::weakTableRegistered(closure);
2020 virtual bool isMarked(const void* objectPointer) override
2022 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
2025 // This macro defines the necessary visitor methods for typed heaps
2026 #define DEFINE_VISITOR_METHODS(Type) \
2027 virtual void mark(const Type* objectPointer, TraceCallback callback) override \
2029 if (!objectPointer) \
2031 HeapObjectHeader* header = \
2032 HeapObjectHeader::fromPayload(objectPointer); \
2033 visitHeader(header, header->payload(), callback); \
2035 virtual bool isMarked(const Type* objectPointer) override \
2037 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
2040 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
2041 #undef DEFINE_VISITOR_METHODS
2043 #if ENABLE(GC_PROFILE_MARKING)
2046 fprintf(stderr, "\n---------- AFTER MARKING -------------------\n");
2047 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = currentlyLive().end(); it != end; ++it) {
2048 fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size());
2050 if (it->key == "blink::Document")
2051 reportStillAlive(it->value, previouslyLive().get(it->key));
2053 fprintf(stderr, "\n");
2056 previouslyLive().swap(currentlyLive());
2057 currentlyLive().clear();
2059 for (HashSet<uintptr_t>::iterator it = objectsToFindPath().begin(), end = objectsToFindPath().end(); it != end; ++it) {
2060 dumpPathToObjectFromObjectGraph(objectGraph(), *it);
2064 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous)
2068 fprintf(stderr, " [previously %u]", previous.size());
2069 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
2070 if (previous.find(*it) == previous.end())
2078 fprintf(stderr, " {survived 2GCs %d: ", count);
2079 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
2080 if (previous.find(*it) == previous.end())
2082 fprintf(stderr, "%ld", *it);
2084 fprintf(stderr, ", ");
2087 fprintf(stderr, "}");
2090 static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintptr_t target)
2092 ObjectGraph::const_iterator it = graph.find(target);
2093 if (it == graph.end())
2095 fprintf(stderr, "Path to %lx of %s\n", target, classOf(reinterpret_cast<const void*>(target)).ascii().data());
2096 while (it != graph.end()) {
2097 fprintf(stderr, "<- %lx of %s\n", it->value.first, it->value.second.utf8().data());
2098 it = graph.find(it->value.first);
2100 fprintf(stderr, "\n");
2103 static void dumpPathToObjectOnNextGC(void* p)
2105 objectsToFindPath().add(reinterpret_cast<uintptr_t>(p));
2108 static Mutex& objectGraphMutex()
2110 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
2114 static LiveObjectMap& previouslyLive()
2116 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
2120 static LiveObjectMap& currentlyLive()
2122 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
2126 static ObjectGraph& objectGraph()
2128 DEFINE_STATIC_LOCAL(ObjectGraph, graph, ());
2132 static HashSet<uintptr_t>& objectsToFindPath()
2134 DEFINE_STATIC_LOCAL(HashSet<uintptr_t>, set, ());
2140 virtual void registerWeakCell(void** cell, WeakPointerCallback callback) override
2142 Heap::pushWeakCellPointerCallback(cell, callback);
2146 CallbackStack* m_markingStack;
2151 ThreadState::init();
2152 s_markingStack = new CallbackStack();
2153 s_postMarkingCallbackStack = new CallbackStack();
2154 s_weakCallbackStack = new CallbackStack();
2155 s_ephemeronStack = new CallbackStack();
2156 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
2157 s_markingVisitor = new MarkingVisitor(s_markingStack);
2158 s_freePagePool = new FreePagePool();
2159 s_orphanedPagePool = new OrphanedPagePool();
2160 s_markingThreads = new Vector<OwnPtr<WebThread>>();
2161 if (Platform::current()) {
2162 int processors = Platform::current()->numberOfProcessors();
2163 int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThreads);
2164 for (int i = 0; i < numberOfMarkingThreads; i++)
2165 s_markingThreads->append(adoptPtr(Platform::current()->createThread("Blink GC Marking Thread")));
2169 void Heap::shutdown()
2171 s_shutdownCalled = true;
2172 ThreadState::shutdownHeapIfNecessary();
2175 void Heap::doShutdown()
2177 // We don't want to call doShutdown() twice.
2178 if (!s_markingVisitor)
2181 ASSERT(!ThreadState::isAnyThreadInGC());
2182 ASSERT(!ThreadState::attachedThreads().size());
2183 delete s_markingThreads;
2184 s_markingThreads = 0;
2185 delete s_markingVisitor;
2186 s_markingVisitor = 0;
2187 delete s_heapDoesNotContainCache;
2188 s_heapDoesNotContainCache = 0;
2189 delete s_freePagePool;
2191 delete s_orphanedPagePool;
2192 s_orphanedPagePool = 0;
2193 delete s_weakCallbackStack;
2194 s_weakCallbackStack = 0;
2195 delete s_postMarkingCallbackStack;
2196 s_postMarkingCallbackStack = 0;
2197 delete s_markingStack;
2199 delete s_ephemeronStack;
2200 s_ephemeronStack = 0;
2201 delete s_regionTree;
2203 ThreadState::shutdown();
2206 BaseHeapPage* Heap::contains(Address address)
2208 ASSERT(ThreadState::isAnyThreadInGC());
2209 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2210 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2211 BaseHeapPage* page = (*it)->contains(address);
2219 bool Heap::containedInHeapOrOrphanedPage(void* object)
2221 return contains(object) || orphanedPagePool()->contains(object);
2225 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
2227 ASSERT(ThreadState::isAnyThreadInGC());
2230 if (s_heapDoesNotContainCache->lookup(address))
2234 if (BaseHeapPage* page = lookup(address)) {
2235 ASSERT(page->contains(address));
2236 ASSERT(!s_heapDoesNotContainCache->lookup(address));
2237 page->checkAndMarkPointer(visitor, address);
2238 // FIXME: We only need to set the conservative flag if checkAndMarkPointer actually marked the pointer.
2239 s_lastGCWasConservative = true;
2244 s_heapDoesNotContainCache->addEntry(address);
2246 if (!s_heapDoesNotContainCache->lookup(address))
2247 s_heapDoesNotContainCache->addEntry(address);
2252 #if ENABLE(GC_PROFILE_MARKING)
2253 const GCInfo* Heap::findGCInfo(Address address)
2255 return ThreadState::findGCInfoFromAllThreads(address);
2259 #if ENABLE(GC_PROFILE_MARKING)
2260 void Heap::dumpPathToObjectOnNextGC(void* p)
2262 static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p);
2265 String Heap::createBacktraceString()
2267 int framesToShow = 3;
2268 int stackFrameSize = 16;
2269 ASSERT(stackFrameSize >= framesToShow);
2270 typedef void* FramePointer;
2271 FramePointer* stackFrame = static_cast<FramePointer*>(alloca(sizeof(FramePointer) * stackFrameSize));
2272 WTFGetBacktrace(stackFrame, &stackFrameSize);
2274 StringBuilder builder;
2275 builder.append("Persistent");
2276 bool didAppendFirstName = false;
2277 // Skip frames before/including "blink::Persistent".
2278 bool didSeePersistent = false;
2279 for (int i = 0; i < stackFrameSize && framesToShow > 0; ++i) {
2280 FrameToNameScope frameToName(stackFrame[i]);
2281 if (!frameToName.nullableName())
2283 if (strstr(frameToName.nullableName(), "blink::Persistent")) {
2284 didSeePersistent = true;
2287 if (!didSeePersistent)
2289 if (!didAppendFirstName) {
2290 didAppendFirstName = true;
2291 builder.append(" ... Backtrace:");
2293 builder.append("\n\t");
2294 builder.append(frameToName.nullableName());
2297 return builder.toString().replace("blink::", "");
2301 void Heap::pushTraceCallback(CallbackStack* stack, void* object, TraceCallback callback)
2305 #if ENABLE_PARALLEL_MARKING
2306 MutexLocker locker(markingMutex());
2308 ASSERT(Heap::containedInHeapOrOrphanedPage(object));
2311 CallbackStack::Item* slot = stack->allocateEntry();
2312 *slot = CallbackStack::Item(object, callback);
2315 template<CallbackInvocationMode Mode>
2316 bool Heap::popAndInvokeTraceCallback(CallbackStack* stack, Visitor* visitor)
2318 CallbackStack::Item* item = stack->pop();
2321 // If the object being traced is located on a page which is dead don't
2322 // trace it. This can happen when a conservative GC kept a dead object
2323 // alive which pointed to a (now gone) object on the cleaned up page.
2324 // Also, if doing a thread local GC, don't trace objects that are located
2325 // on other thread's heaps, ie, pages where the terminating flag is not set.
2326 BaseHeapPage* heapPage = pageHeaderFromObject(item->object());
2327 if (Mode == GlobalMarking && heapPage->orphaned()) {
2328 // When doing a global GC we should only get a trace callback to an orphaned
2329 // page if the GC is conservative. If it is not conservative there is
2330 // a bug in the code where we have a dangling pointer to a page
2331 // on the dead thread.
2332 RELEASE_ASSERT(Heap::lastGCWasConservative());
2333 heapPage->setTracedAfterOrphaned();
2336 if (Mode == ThreadLocalMarking && (heapPage->orphaned() || !heapPage->terminating()))
2339 #if ENABLE(GC_PROFILE_MARKING)
2340 visitor->setHostInfo(item->object(), classOf(item->object()));
2342 item->call(visitor);
2346 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback)
2348 #if ENABLE_PARALLEL_MARKING
2349 MutexLocker locker(markingMutex());
2351 ASSERT(!Heap::orphanedPagePool()->contains(object));
2352 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry();
2353 *slot = CallbackStack::Item(object, callback);
2356 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor)
2358 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) {
2359 item->call(visitor);
2365 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
2367 #if ENABLE_PARALLEL_MARKING
2368 MutexLocker locker(markingMutex());
2370 ASSERT(!Heap::orphanedPagePool()->contains(cell));
2371 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry();
2372 *slot = CallbackStack::Item(cell, callback);
2375 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
2377 #if ENABLE_PARALLEL_MARKING
2378 MutexLocker locker(markingMutex());
2380 ASSERT(Heap::contains(object));
2381 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object);
2382 ASSERT(!heapPageForObject->orphaned());
2383 ASSERT(Heap::contains(object) == heapPageForObject);
2384 ThreadState* state = heapPageForObject->threadState();
2385 state->pushWeakObjectPointerCallback(closure, callback);
2388 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
2390 // For weak processing we should never reach orphaned pages since orphaned
2391 // pages are not traced and thus objects on those pages are never be
2392 // registered as objects on orphaned pages. We cannot assert this here since
2393 // we might have an off-heap collection. We assert it in
2394 // Heap::pushWeakObjectPointerCallback.
2395 if (CallbackStack::Item* item = s_weakCallbackStack->pop()) {
2396 item->call(visitor);
2402 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
2405 #if ENABLE_PARALLEL_MARKING
2406 MutexLocker locker(markingMutex());
2408 // Check that the ephemeron table being pushed onto the stack is not on an
2410 ASSERT(!Heap::orphanedPagePool()->contains(table));
2411 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry();
2412 *slot = CallbackStack::Item(table, iterationCallback);
2415 // Register a post-marking callback to tell the tables that
2416 // ephemeron iteration is complete.
2417 pushPostMarkingCallback(table, iterationDoneCallback);
2421 bool Heap::weakTableRegistered(const void* table)
2423 #if ENABLE_PARALLEL_MARKING
2424 MutexLocker locker(markingMutex());
2426 ASSERT(s_ephemeronStack);
2427 return s_ephemeronStack->hasCallbackForObject(table);
2431 void Heap::prepareForGC()
2433 ASSERT(ThreadState::isAnyThreadInGC());
2434 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2435 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2436 (*it)->prepareForGC();
2439 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::CauseOfGC cause)
2441 ThreadState* state = ThreadState::current();
2442 state->clearGCRequested();
2444 GCScope gcScope(stackState);
2445 // Check if we successfully parked the other threads. If not we bail out of the GC.
2446 if (!gcScope.allThreadsParked()) {
2447 ThreadState::current()->setGCRequested();
2451 if (state->isMainThread())
2452 ScriptForbiddenScope::enter();
2454 s_lastGCWasConservative = false;
2456 TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
2457 "precise", stackState == ThreadState::NoHeapPointersOnStack,
2458 "forced", cause == ThreadState::ForcedGC);
2459 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
2460 double timeStamp = WTF::currentTimeMS();
2461 #if ENABLE(GC_PROFILE_MARKING)
2462 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
2465 // Disallow allocation during garbage collection (but not
2466 // during the finalization that happens when the gcScope is
2468 NoAllocationScope<AnyThread> noAllocationScope;
2472 // 1. trace persistent roots.
2473 ThreadState::visitPersistentRoots(s_markingVisitor);
2475 // 2. trace objects reachable from the persistent roots including ephemerons.
2476 #if ENABLE_PARALLEL_MARKING
2477 processMarkingStackInParallel();
2479 processMarkingStack<GlobalMarking>();
2482 // 3. trace objects reachable from the stack. We do this independent of the
2483 // given stackState since other threads might have a different stack state.
2484 ThreadState::visitStackRoots(s_markingVisitor);
2486 // 4. trace objects reachable from the stack "roots" including ephemerons.
2487 // Only do the processing if we found a pointer to an object on one of the
2489 if (lastGCWasConservative()) {
2490 #if ENABLE_PARALLEL_MARKING
2491 processMarkingStackInParallel();
2493 processMarkingStack<GlobalMarking>();
2497 postMarkingProcessing();
2498 globalWeakProcessing();
2500 // After a global marking we know that any orphaned page that was not reached
2501 // cannot be reached in a subsequent GC. This is due to a thread either having
2502 // swept its heap or having done a "poor mans sweep" in prepareForGC which marks
2503 // objects that are dead, but not swept in the previous GC as dead. In this GC's
2504 // marking we check that any object marked as dead is not traced. E.g. via a
2505 // conservatively found pointer or a programming error with an object containing
2506 // a dangling pointer.
2507 orphanedPagePool()->decommitOrphanedPages();
2509 #if ENABLE(GC_PROFILE_MARKING)
2510 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2513 if (Platform::current()) {
2514 uint64_t objectSpaceSize;
2515 uint64_t allocatedSpaceSize;
2516 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2517 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2518 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2519 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2522 if (state->isMainThread())
2523 ScriptForbiddenScope::exit();
2526 void Heap::collectGarbageForTerminatingThread(ThreadState* state)
2528 // We explicitly do not enter a safepoint while doing thread specific
2529 // garbage collection since we don't want to allow a global GC at the
2530 // same time as a thread local GC.
2533 NoAllocationScope<AnyThread> noAllocationScope;
2536 state->prepareForGC();
2538 // 1. trace the thread local persistent roots. For thread local GCs we
2539 // don't trace the stack (ie. no conservative scanning) since this is
2540 // only called during thread shutdown where there should be no objects
2542 // We also assume that orphaned pages have no objects reachable from
2543 // persistent handles on other threads or CrossThreadPersistents. The
2544 // only cases where this could happen is if a subsequent conservative
2545 // global GC finds a "pointer" on the stack or due to a programming
2546 // error where an object has a dangling cross-thread pointer to an
2547 // object on this heap.
2548 state->visitPersistents(s_markingVisitor);
2550 // 2. trace objects reachable from the thread's persistent roots
2551 // including ephemerons.
2552 processMarkingStack<ThreadLocalMarking>();
2554 postMarkingProcessing();
2555 globalWeakProcessing();
2559 state->performPendingSweep();
2562 void Heap::processMarkingStackEntries(int* runningMarkingThreads)
2564 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackEntries");
2565 CallbackStack stack;
2566 MarkingVisitor visitor(&stack);
2568 MutexLocker locker(markingMutex());
2569 stack.takeBlockFrom(s_markingStack);
2571 while (!stack.isEmpty()) {
2572 while (popAndInvokeTraceCallback<GlobalMarking>(&stack, &visitor)) { }
2574 MutexLocker locker(markingMutex());
2575 stack.takeBlockFrom(s_markingStack);
2579 MutexLocker locker(markingMutex());
2580 if (!--(*runningMarkingThreads))
2581 markingCondition().signal();
2585 void Heap::processMarkingStackOnMultipleThreads()
2587 int runningMarkingThreads = s_markingThreads->size() + 1;
2589 for (size_t i = 0; i < s_markingThreads->size(); ++i)
2590 s_markingThreads->at(i)->postTask(new Task(WTF::bind(Heap::processMarkingStackEntries, &runningMarkingThreads)));
2592 processMarkingStackEntries(&runningMarkingThreads);
2594 // Wait for the other threads to finish their part of marking.
2595 MutexLocker locker(markingMutex());
2596 while (runningMarkingThreads)
2597 markingCondition().wait(markingMutex());
2600 void Heap::processMarkingStackInParallel()
2602 static const size_t sizeOfStackForParallelMarking = 2 * CallbackStack::blockSize;
2603 // Ephemeron fixed point loop run on the garbage collecting thread.
2605 // Iteratively mark all objects that are reachable from the objects
2606 // currently pushed onto the marking stack. Do so in parallel if there
2607 // are multiple blocks on the global marking stack.
2608 if (s_markingStack->sizeExceeds(sizeOfStackForParallelMarking)) {
2609 processMarkingStackOnMultipleThreads();
2611 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded");
2612 while (popAndInvokeTraceCallback<GlobalMarking>(s_markingStack, s_markingVisitor)) { }
2616 // Mark any strong pointers that have now become reachable in ephemeron
2618 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
2619 s_ephemeronStack->invokeEphemeronCallbacks(s_markingVisitor);
2622 // Rerun loop if ephemeron processing queued more objects for tracing.
2623 } while (!s_markingStack->isEmpty());
2626 template<CallbackInvocationMode Mode>
2627 void Heap::processMarkingStack()
2629 // Ephemeron fixed point loop.
2632 // Iteratively mark all objects that are reachable from the objects
2633 // currently pushed onto the marking stack. If Mode is ThreadLocalMarking
2634 // don't continue tracing if the trace hits an object on another thread's
2636 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded");
2637 while (popAndInvokeTraceCallback<Mode>(s_markingStack, s_markingVisitor)) { }
2641 // Mark any strong pointers that have now become reachable in ephemeron
2643 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
2644 s_ephemeronStack->invokeEphemeronCallbacks(s_markingVisitor);
2647 // Rerun loop if ephemeron processing queued more objects for tracing.
2648 } while (!s_markingStack->isEmpty());
2651 void Heap::postMarkingProcessing()
2653 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing");
2654 // Call post-marking callbacks including:
2655 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup
2656 // (specifically to clear the queued bits for weak hash tables), and
2657 // 2. the markNoTracing callbacks on collection backings to mark them
2658 // if they are only reachable from their front objects.
2659 while (popAndInvokePostMarkingCallback(s_markingVisitor)) { }
2661 s_ephemeronStack->clear();
2663 // Post-marking callbacks should not trace any objects and
2664 // therefore the marking stack should be empty after the
2665 // post-marking callbacks.
2666 ASSERT(s_markingStack->isEmpty());
2669 void Heap::globalWeakProcessing()
2671 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing");
2672 // Call weak callbacks on objects that may now be pointing to dead
2674 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
2676 // It is not permitted to trace pointers of live objects in the weak
2677 // callback phase, so the marking stack should still be empty here.
2678 ASSERT(s_markingStack->isEmpty());
2681 void Heap::collectAllGarbage()
2683 // FIXME: oilpan: we should perform a single GC and everything
2684 // should die. Unfortunately it is not the case for all objects
2685 // because the hierarchy was not completely moved to the heap and
2686 // some heap allocated objects own objects that contain persistents
2687 // pointing to other heap allocated objects.
2688 for (int i = 0; i < 5; i++)
2689 collectGarbage(ThreadState::NoHeapPointersOnStack, ThreadState::ForcedGC);
2692 void Heap::setForcePreciseGCForTesting()
2694 ThreadState::current()->setForcePreciseGCForTesting(true);
2697 template<typename Header>
2698 void ThreadHeap<Header>::prepareHeapForTermination()
2700 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
2701 page->setTerminating();
2703 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
2704 current->setTerminating();
2708 template<typename Header>
2709 PassOwnPtr<BaseHeap> ThreadHeap<Header>::split(int numberOfNormalPages)
2711 // Create a new split off thread heap containing
2712 // |numberOfNormalPages| of the pages of this ThreadHeap for
2713 // parallel sweeping. The split off thread heap will be merged
2714 // with this heap at the end of sweeping and the temporary
2715 // ThreadHeap object will be deallocated after the merge.
2716 ASSERT(numberOfNormalPages > 0);
2717 OwnPtr<ThreadHeap<Header>> splitOff = adoptPtr(new ThreadHeap(m_threadState, m_index));
2718 HeapPage<Header>* splitPoint = m_firstPage;
2719 for (int i = 1; i < numberOfNormalPages; i++)
2720 splitPoint = splitPoint->next();
2721 splitOff->m_firstPage = m_firstPage;
2722 m_firstPage = splitPoint->m_next;
2723 splitOff->m_mergePoint = splitPoint;
2724 splitOff->m_numberOfNormalPages = numberOfNormalPages;
2725 m_numberOfNormalPages -= numberOfNormalPages;
2726 splitPoint->m_next = 0;
2727 return splitOff.release();
2730 template<typename Header>
2731 void ThreadHeap<Header>::merge(PassOwnPtr<BaseHeap> splitOffBase)
2733 ThreadHeap<Header>* splitOff = static_cast<ThreadHeap<Header>*>(splitOffBase.get());
2734 // If the mergePoint is zero all split off pages became empty in
2735 // this round and we don't have to merge. There are no pages and
2736 // nothing on the freelists.
2737 ASSERT(splitOff->m_mergePoint || splitOff->m_numberOfNormalPages == 0);
2738 if (splitOff->m_mergePoint) {
2739 // Link the split off pages into the beginning of the list again.
2740 splitOff->m_mergePoint->m_next = m_firstPage;
2741 m_firstPage = splitOff->m_firstPage;
2742 m_numberOfNormalPages += splitOff->m_numberOfNormalPages;
2743 splitOff->m_firstPage = 0;
2744 // Merge free lists.
2745 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
2746 if (!m_freeLists[i]) {
2747 m_freeLists[i] = splitOff->m_freeLists[i];
2748 } else if (splitOff->m_freeLists[i]) {
2749 m_lastFreeListEntries[i]->append(splitOff->m_freeLists[i]);
2750 m_lastFreeListEntries[i] = splitOff->m_lastFreeListEntries[i];
2756 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceSize)
2758 *objectSpaceSize = 0;
2759 *allocatedSpaceSize = 0;
2760 ASSERT(ThreadState::isAnyThreadInGC());
2761 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2762 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2763 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2764 *objectSpaceSize += (*it)->stats().totalObjectSpace();
2765 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
2769 void Heap::getStats(HeapStats* stats)
2772 ASSERT(ThreadState::isAnyThreadInGC());
2773 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2774 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2775 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2777 (*it)->getStats(temp);
2782 void Heap::getStatsForTesting(HeapStats* stats)
2785 ASSERT(ThreadState::isAnyThreadInGC());
2786 makeConsistentForSweeping();
2787 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2788 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2789 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2791 (*it)->getStatsForTesting(temp);
2797 bool Heap::isConsistentForSweeping()
2799 ASSERT(ThreadState::isAnyThreadInGC());
2800 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2801 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2802 if (!(*it)->isConsistentForSweeping())
2809 void Heap::makeConsistentForSweeping()
2811 ASSERT(ThreadState::isAnyThreadInGC());
2812 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2813 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2814 (*it)->makeConsistentForSweeping();
2817 void HeapAllocator::backingFree(void* address)
2819 if (!address || ThreadState::isAnyThreadInGC())
2822 ThreadState* state = ThreadState::current();
2823 if (state->isSweepInProgress())
2826 // Don't promptly free large objects because their page is never reused
2827 // and don't free backings allocated on other threads.
2828 BaseHeapPage* page = pageHeaderFromObject(address);
2829 if (page->isLargeObject() || page->threadState() != state)
2832 typedef HeapIndexTrait<CollectionBackingHeap> HeapTraits;
2833 typedef HeapTraits::HeapType HeapType;
2834 typedef HeapTraits::HeaderType HeaderType;
2836 HeaderType* header = HeaderType::fromPayload(address);
2837 header->checkHeader();
2839 const GCInfo* gcInfo = header->gcInfo();
2840 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), header->payloadSize());
2841 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex));
2842 heap->promptlyFreeObject(header);
2845 BaseHeapPage* Heap::lookup(Address address)
2847 ASSERT(ThreadState::isAnyThreadInGC());
2850 if (PageMemoryRegion* region = s_regionTree->lookup(address))
2851 return region->pageFromAddress(address);
2855 static Mutex& regionTreeMutex()
2857 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
2861 void Heap::removePageMemoryRegion(PageMemoryRegion* region)
2863 // Deletion of large objects (and thus their regions) can happen concurrently
2864 // on sweeper threads. Removal can also happen during thread shutdown, but
2865 // that case is safe. Regardless, we make all removals mutually exclusive.
2866 MutexLocker locker(regionTreeMutex());
2867 RegionTree::remove(region, &s_regionTree);
2870 void Heap::addPageMemoryRegion(PageMemoryRegion* region)
2872 ASSERT(ThreadState::isAnyThreadInGC());
2873 RegionTree::add(new RegionTree(region), &s_regionTree);
2876 PageMemoryRegion* Heap::RegionTree::lookup(Address address)
2878 RegionTree* current = s_regionTree;
2880 Address base = current->m_region->base();
2881 if (address < base) {
2882 current = current->m_left;
2885 if (address >= base + current->m_region->size()) {
2886 current = current->m_right;
2889 ASSERT(current->m_region->contains(address));
2890 return current->m_region;
2895 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context)
2898 Address base = newTree->m_region->base();
2899 for (RegionTree* current = *context; current; current = *context) {
2900 ASSERT(!current->m_region->contains(base));
2901 context = (base < current->m_region->base()) ? ¤t->m_left : ¤t->m_right;
2906 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context)
2910 Address base = region->base();
2911 RegionTree* current = *context;
2912 for ( ; current; current = *context) {
2913 if (region == current->m_region)
2915 context = (base < current->m_region->base()) ? ¤t->m_left : ¤t->m_right;
2918 // Shutdown via detachMainThread might not have populated the region tree.
2923 if (current->m_left) {
2924 add(current->m_left, context);
2925 current->m_left = 0;
2927 if (current->m_right) {
2928 add(current->m_right, context);
2929 current->m_right = 0;
2934 // Force template instantiations for the types that we need.
2935 template class HeapPage<FinalizedHeapObjectHeader>;
2936 template class HeapPage<HeapObjectHeader>;
2937 template class ThreadHeap<FinalizedHeapObjectHeader>;
2938 template class ThreadHeap<HeapObjectHeader>;
2940 Visitor* Heap::s_markingVisitor;
2941 Vector<OwnPtr<WebThread>>* Heap::s_markingThreads;
2942 CallbackStack* Heap::s_markingStack;
2943 CallbackStack* Heap::s_postMarkingCallbackStack;
2944 CallbackStack* Heap::s_weakCallbackStack;
2945 CallbackStack* Heap::s_ephemeronStack;
2946 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2947 bool Heap::s_shutdownCalled = false;
2948 bool Heap::s_lastGCWasConservative = false;
2949 FreePagePool* Heap::s_freePagePool;
2950 OrphanedPagePool* Heap::s_orphanedPagePool;
2951 Heap::RegionTree* Heap::s_regionTree = 0;
2953 } // namespace blink