2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "platform/heap/Heap.h"
34 #include "platform/ScriptForbiddenScope.h"
35 #include "platform/Task.h"
36 #include "platform/TraceEvent.h"
37 #include "platform/heap/CallbackStack.h"
38 #include "platform/heap/ThreadState.h"
39 #include "public/platform/Platform.h"
40 #include "wtf/AddressSpaceRandomization.h"
41 #include "wtf/Assertions.h"
42 #include "wtf/LeakAnnotations.h"
43 #include "wtf/PassOwnPtr.h"
44 #if ENABLE(GC_PROFILE_MARKING)
45 #include "wtf/HashMap.h"
46 #include "wtf/HashSet.h"
47 #include "wtf/text/StringBuilder.h"
48 #include "wtf/text/StringHash.h"
52 #if ENABLE(GC_PROFILE_HEAP)
53 #include "platform/TracedValue.h"
65 #if ENABLE(GC_PROFILE_MARKING)
66 static String classOf(const void* object)
68 const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast<void*>(object)));
70 return gcInfo->m_className;
76 static bool vTableInitialized(void* objectPointer)
78 return !!(*reinterpret_cast<Address*>(objectPointer));
82 static bool IsPowerOf2(size_t power)
84 return !((power - 1) & power);
88 static Address roundToBlinkPageBoundary(void* base)
90 return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
93 static size_t roundToOsPageSize(size_t size)
95 return (size + osPageSize() - 1) & ~(osPageSize() - 1);
101 static const size_t pageSize = getpagesize();
103 static size_t pageSize = 0;
106 GetSystemInfo(&info);
107 pageSize = info.dwPageSize;
108 ASSERT(IsPowerOf2(pageSize));
116 MemoryRegion(Address base, size_t size)
123 bool contains(Address addr) const
125 return m_base <= addr && addr < (m_base + m_size);
129 bool contains(const MemoryRegion& other) const
131 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
137 int err = munmap(m_base, m_size);
138 RELEASE_ASSERT(!err);
140 bool success = VirtualFree(m_base, 0, MEM_RELEASE);
141 RELEASE_ASSERT(success);
145 WARN_UNUSED_RETURN bool commit()
147 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
149 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
151 madvise(m_base, m_size, MADV_NORMAL);
156 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
164 int err = mprotect(m_base, m_size, PROT_NONE);
165 RELEASE_ASSERT(!err);
166 // FIXME: Consider using MADV_FREE on MacOS.
167 madvise(m_base, m_size, MADV_DONTNEED);
169 bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
170 RELEASE_ASSERT(success);
174 Address base() const { return m_base; }
175 size_t size() const { return m_size; }
182 // A PageMemoryRegion represents a chunk of reserved virtual address
183 // space containing a number of blink heap pages. On Windows, reserved
184 // virtual address space can only be given back to the system as a
185 // whole. The PageMemoryRegion allows us to do that by keeping track
186 // of the number of pages using it in order to be able to release all
187 // of the virtual address space when there are no more pages using it.
188 class PageMemoryRegion : public MemoryRegion {
201 static PageMemoryRegion* allocate(size_t size, unsigned numPages)
203 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
205 // Compute a random blink page aligned address for the page memory
206 // region and attempt to get the memory there.
207 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase());
208 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress);
211 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
212 RELEASE_ASSERT(base != MAP_FAILED);
213 if (base == roundToBlinkPageBoundary(base))
214 return new PageMemoryRegion(base, size, numPages);
216 // We failed to get a blink page aligned chunk of
217 // memory. Unmap the chunk that we got and fall back to
218 // overallocating and selecting an aligned sub part of what
220 int error = munmap(base, size);
221 RELEASE_ASSERT(!error);
222 size_t allocationSize = size + blinkPageSize;
223 base = static_cast<Address>(mmap(alignedRandomAddress, allocationSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
224 RELEASE_ASSERT(base != MAP_FAILED);
226 Address end = base + allocationSize;
227 Address alignedBase = roundToBlinkPageBoundary(base);
228 Address regionEnd = alignedBase + size;
230 // If the allocated memory was not blink page aligned release
231 // the memory before the aligned address.
232 if (alignedBase != base)
233 MemoryRegion(base, alignedBase - base).release();
235 // Free the additional memory at the end of the page if any.
237 MemoryRegion(regionEnd, end - regionEnd).release();
239 return new PageMemoryRegion(alignedBase, size, numPages);
241 Address base = static_cast<Address>(VirtualAlloc(alignedRandomAddress, size, MEM_RESERVE, PAGE_NOACCESS));
243 ASSERT(base == alignedRandomAddress);
244 return new PageMemoryRegion(base, size, numPages);
247 // We failed to get the random aligned address that we asked
248 // for. Fall back to overallocating. On Windows it is
249 // impossible to partially release a region of memory
250 // allocated by VirtualAlloc. To avoid wasting virtual address
251 // space we attempt to release a large region of memory
252 // returned as a whole and then allocate an aligned region
253 // inside this larger region.
254 size_t allocationSize = size + blinkPageSize;
255 for (int attempt = 0; attempt < 3; attempt++) {
256 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
257 RELEASE_ASSERT(base);
258 VirtualFree(base, 0, MEM_RELEASE);
260 Address alignedBase = roundToBlinkPageBoundary(base);
261 base = static_cast<Address>(VirtualAlloc(alignedBase, size, MEM_RESERVE, PAGE_NOACCESS));
263 ASSERT(base == alignedBase);
264 return new PageMemoryRegion(alignedBase, size, numPages);
268 // We failed to avoid wasting virtual address space after
270 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
271 RELEASE_ASSERT(base);
273 // FIXME: If base is by accident blink page size aligned
274 // here then we can create two pages out of reserved
276 Address alignedBase = roundToBlinkPageBoundary(base);
278 return new PageMemoryRegion(alignedBase, size, numPages);
283 PageMemoryRegion(Address base, size_t size, unsigned numPages)
284 : MemoryRegion(base, size)
285 , m_numPages(numPages)
292 // Representation of the memory used for a Blink heap page.
294 // The representation keeps track of two memory regions:
296 // 1. The virtual memory reserved from the system in order to be able
297 // to free all the virtual memory reserved. Multiple PageMemory
298 // instances can share the same reserved memory region and
299 // therefore notify the reserved memory region on destruction so
300 // that the system memory can be given back when all PageMemory
301 // instances for that memory are gone.
303 // 2. The writable memory (a sub-region of the reserved virtual
304 // memory region) that is used for the actual heap page payload.
306 // Guard pages are created before and after the writable memory.
311 __lsan_unregister_root_region(m_writable.base(), m_writable.size());
312 m_reserved->pageRemoved();
315 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
316 void decommit() { m_writable.decommit(); }
318 Address writableStart() { return m_writable.base(); }
320 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize)
322 // Setup the payload one OS page into the page memory. The
323 // first os page is the guard page.
324 Address payloadAddress = region->base() + pageOffset + osPageSize();
325 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize));
328 // Allocate a virtual address space for one blink page with the
331 // [ guard os page | ... payload ... | guard os page ]
332 // ^---{ aligned to blink page size }
334 static PageMemory* allocate(size_t payloadSize)
336 ASSERT(payloadSize > 0);
338 // Virtual memory allocation routines operate in OS page sizes.
339 // Round up the requested size to nearest os page size.
340 payloadSize = roundToOsPageSize(payloadSize);
342 // Overallocate by 2 times OS page size to have space for a
343 // guard page at the beginning and end of blink heap page.
344 size_t allocationSize = payloadSize + 2 * osPageSize();
345 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocate(allocationSize, 1);
346 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSize);
347 RELEASE_ASSERT(storage->commit());
352 PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable)
353 : m_reserved(reserved)
354 , m_writable(writable)
356 ASSERT(reserved->contains(writable));
358 // Register the writable area of the memory as part of the LSan root set.
359 // Only the writable area is mapped and can contain C++ objects. Those
360 // C++ objects can contain pointers to objects outside of the heap and
361 // should therefore be part of the LSan root set.
362 __lsan_register_root_region(m_writable.base(), m_writable.size());
366 PageMemoryRegion* m_reserved;
367 MemoryRegion m_writable;
372 explicit GCScope(ThreadState::StackState stackState)
373 : m_state(ThreadState::current())
374 , m_safePointScope(stackState)
375 , m_parkedAllThreads(false)
377 TRACE_EVENT0("blink_gc", "Heap::GCScope");
378 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
379 if (m_state->isMainThread())
380 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
382 m_state->checkThread();
384 // FIXME: in an unlikely coincidence that two threads decide
385 // to collect garbage at the same time, avoid doing two GCs in
387 RELEASE_ASSERT(!m_state->isInGC());
388 RELEASE_ASSERT(!m_state->isSweepInProgress());
389 if (LIKELY(ThreadState::stopThreads())) {
390 m_parkedAllThreads = true;
393 if (m_state->isMainThread())
394 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
397 bool allThreadsParked() { return m_parkedAllThreads; }
401 // Only cleanup if we parked all threads in which case the GC happened
402 // and we need to resume the other threads.
403 if (LIKELY(m_parkedAllThreads)) {
405 ASSERT(!m_state->isInGC());
406 ThreadState::resumeThreads();
411 ThreadState* m_state;
412 ThreadState::SafePointScope m_safePointScope;
413 bool m_parkedAllThreads; // False if we fail to park all threads
417 bool HeapObjectHeader::isMarked() const
420 unsigned size = asanUnsafeAcquireLoad(&m_size);
421 return size & markBitMask;
425 void HeapObjectHeader::unmark()
428 m_size &= ~markBitMask;
432 bool HeapObjectHeader::hasDeadMark() const
435 return m_size & deadBitMask;
439 void HeapObjectHeader::clearDeadMark()
442 m_size &= ~deadBitMask;
446 void HeapObjectHeader::setDeadMark()
450 m_size |= deadBitMask;
455 void HeapObjectHeader::zapMagic()
457 m_magic = zappedMagic;
461 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
463 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
464 HeapObjectHeader* header =
465 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
469 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
472 if (gcInfo->hasFinalizer()) {
473 gcInfo->m_finalize(object);
476 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
477 // In Debug builds, memory is zapped when it's freed, and the zapped memory is
478 // zeroed out when the memory is reused. Memory is also zapped when using Leak
479 // Sanitizer because the heap is used as a root region for LSan and therefore
480 // pointers in unreachable memory could hide leaks.
481 for (size_t i = 0; i < objectSize; i++)
482 object[i] = finalizedZapValue;
484 // Zap the primary vTable entry (secondary vTable entries are not zapped).
485 if (gcInfo->hasVTable()) {
486 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
489 // In Release builds, the entire object is zeroed out when it is added to the free list.
490 // This happens right after sweeping the page and before the thread commences execution.
494 void FinalizedHeapObjectHeader::finalize()
496 HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
499 template<typename Header>
500 void LargeHeapObject<Header>::unmark()
502 return heapObjectHeader()->unmark();
505 template<typename Header>
506 bool LargeHeapObject<Header>::isMarked()
508 return heapObjectHeader()->isMarked();
511 template<typename Header>
512 void LargeHeapObject<Header>::setDeadMark()
514 heapObjectHeader()->setDeadMark();
517 template<typename Header>
518 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
520 ASSERT(contains(address));
521 if (!objectContains(address) || heapObjectHeader()->hasDeadMark())
523 #if ENABLE(GC_PROFILE_MARKING)
524 visitor->setHostInfo(&address, "stack");
530 static bool isUninitializedMemory(void* objectPointer, size_t objectSize)
532 // Scan through the object's fields and check that they are all zero.
533 Address* objectFields = reinterpret_cast<Address*>(objectPointer);
534 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) {
535 if (objectFields[i] != 0)
543 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
545 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) {
546 FinalizedHeapObjectHeader* header = heapObjectHeader();
547 visitor->markNoTracing(header);
548 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
550 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
555 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
558 if (gcInfo()->hasVTable() && !vTableInitialized(payload())) {
559 HeapObjectHeader* header = heapObjectHeader();
560 visitor->markNoTracing(header);
561 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
563 visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
568 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
570 heapObjectHeader()->finalize();
574 void LargeHeapObject<HeapObjectHeader>::finalize()
577 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
580 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
582 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
583 FinalizedHeapObjectHeader* header =
584 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
588 template<typename Header>
589 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
590 : m_currentAllocationPoint(0)
591 , m_remainingAllocationSize(0)
593 , m_firstLargeHeapObject(0)
594 , m_firstPageAllocatedDuringSweeping(0)
595 , m_lastPageAllocatedDuringSweeping(0)
597 , m_biggestFreeListIndex(0)
598 , m_threadState(state)
600 , m_numberOfNormalPages(0)
601 , m_promptlyFreedCount(0)
606 template<typename Header>
607 ThreadHeap<Header>::~ThreadHeap()
609 ASSERT(!m_firstPage);
610 ASSERT(!m_firstLargeHeapObject);
613 template<typename Header>
614 void ThreadHeap<Header>::cleanupPages()
617 flushHeapContainsCache();
619 // Add the ThreadHeap's pages to the orphanedPagePool.
620 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
621 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
624 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObject; largeObject = largeObject->m_next)
625 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
626 m_firstLargeHeapObject = 0;
629 template<typename Header>
630 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
632 size_t allocationSize = allocationSizeFromSize(size);
633 if (threadState()->shouldGC()) {
634 if (threadState()->shouldForceConservativeGC())
635 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
637 threadState()->setGCRequested();
639 ensureCurrentAllocation(allocationSize, gcInfo);
640 return allocate(size, gcInfo);
643 template<typename Header>
644 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
646 size_t bucketSize = 1 << m_biggestFreeListIndex;
647 int i = m_biggestFreeListIndex;
648 for (; i > 0; i--, bucketSize >>= 1) {
649 if (bucketSize < minSize)
651 FreeListEntry* entry = m_freeLists[i];
653 m_biggestFreeListIndex = i;
654 entry->unlink(&m_freeLists[i]);
655 setAllocationPoint(entry->address(), entry->size());
656 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
660 m_biggestFreeListIndex = i;
664 template<typename Header>
665 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
667 ASSERT(minSize >= allocationGranularity);
668 if (remainingAllocationSize() >= minSize)
671 if (remainingAllocationSize() > 0) {
672 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
673 setAllocationPoint(0, 0);
675 if (allocateFromFreeList(minSize))
677 if (coalesce(minSize) && allocateFromFreeList(minSize))
679 addPageToHeap(gcInfo);
680 bool success = allocateFromFreeList(minSize);
681 RELEASE_ASSERT(success);
684 template<typename Header>
685 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
687 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
688 if (page->contains(address))
691 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
692 if (page->contains(address))
695 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
696 // Check that large pages are blinkPageSize aligned (modulo the
697 // osPageSize for the guard page).
698 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlinkPageStart(reinterpret_cast<Address>(current)));
699 if (current->contains(address))
705 #if ENABLE(GC_PROFILE_MARKING)
706 template<typename Header>
707 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address)
709 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
710 if (current->contains(address))
711 return current->gcInfo();
717 #if ENABLE(GC_PROFILE_HEAP)
718 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0
719 template<typename Header>
720 void ThreadHeap<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
722 size_t previousPageCount = info->pageCount;
724 json->beginArray("pages");
725 for (HeapPage<Header>* page = m_firstPage; page; page = page->next(), ++info->pageCount) {
726 // FIXME: To limit the size of the snapshot we only output "threshold" many page snapshots.
727 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) {
729 json->pushInteger(reinterpret_cast<intptr_t>(page));
730 page->snapshot(json, info);
733 page->snapshot(0, info);
738 json->beginArray("largeObjects");
739 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
740 json->beginDictionary();
741 current->snapshot(json, info);
742 json->endDictionary();
746 json->setInteger("pageCount", info->pageCount - previousPageCount);
750 template<typename Header>
751 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
753 ASSERT(heapPageFromAddress(address));
754 ASSERT(heapPageFromAddress(address + size - 1));
755 ASSERT(size < blinkPagePayloadSize());
756 // The free list entries are only pointer aligned (but when we allocate
757 // from them we are 8 byte aligned due to the header size).
758 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
759 ASSERT(!(size & allocationMask));
760 ASAN_POISON_MEMORY_REGION(address, size);
761 FreeListEntry* entry;
762 if (size < sizeof(*entry)) {
763 // Create a dummy header with only a size and freelist bit set.
764 ASSERT(size >= sizeof(BasicObjectHeader));
765 // Free list encode the size to mark the lost memory as freelist memory.
766 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
767 // This memory gets lost. Sweeping can reclaim it.
770 entry = new (NotNull, address) FreeListEntry(size);
771 #if defined(ADDRESS_SANITIZER)
772 // For ASan we don't add the entry to the free lists until the asanDeferMemoryReuseCount
773 // reaches zero. However we always add entire pages to ensure that adding a new page will
774 // increase the allocation space.
775 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
778 int index = bucketIndexForSize(size);
779 entry->link(&m_freeLists[index]);
780 if (!m_lastFreeListEntries[index])
781 m_lastFreeListEntries[index] = entry;
782 if (index > m_biggestFreeListIndex)
783 m_biggestFreeListIndex = index;
786 template<typename Header>
787 void ThreadHeap<Header>::promptlyFreeObject(Header* header)
789 ASSERT(!m_threadState->isSweepInProgress());
790 header->checkHeader();
791 Address address = reinterpret_cast<Address>(header);
792 Address payload = header->payload();
793 size_t size = header->size();
794 size_t payloadSize = header->payloadSize();
795 BaseHeapPage* page = pageHeaderFromObject(address);
797 ASSERT(page == heapPageFromAddress(address));
800 ThreadState::NoSweepScope scope(m_threadState);
801 HeapObjectHeader::finalize(header->gcInfo(), payload, payloadSize);
802 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
803 memset(payload, 0, payloadSize);
805 header->markPromptlyFreed();
808 page->addToPromptlyFreedSize(size);
809 m_promptlyFreedCount++;
812 template<typename Header>
813 bool ThreadHeap<Header>::coalesce(size_t minSize)
815 if (m_threadState->isSweepInProgress())
818 if (m_promptlyFreedCount < 256)
821 // The smallest bucket able to satisfy an allocation request for minSize is
822 // the bucket where all free-list entries are guarantied to be larger than
823 // minSize. That bucket is one larger than the bucket minSize would go into.
824 size_t neededBucketIndex = bucketIndexForSize(minSize) + 1;
825 size_t neededFreeEntrySize = 1 << neededBucketIndex;
826 size_t neededPromptlyFreedSize = neededFreeEntrySize * 3;
827 size_t foundFreeEntrySize = 0;
829 // Bailout early on large requests because it is unlikely we will find a free-list entry.
830 if (neededPromptlyFreedSize >= blinkPageSize)
833 TRACE_EVENT_BEGIN2("blink_gc", "ThreadHeap::coalesce" , "requestedSize", (unsigned)minSize , "neededSize", (unsigned)neededFreeEntrySize);
835 // Search for a coalescing candidate.
836 ASSERT(!ownsNonEmptyAllocationArea());
837 size_t pageCount = 0;
838 HeapPage<Header>* page = m_firstPage;
840 // Only consider one of the first 'n' pages. A "younger" page is more likely to have freed backings.
841 if (++pageCount > numberOfPagesToConsiderForCoalescing) {
845 // Only coalesce pages with "sufficient" promptly freed space.
846 if (page->promptlyFreedSize() >= neededPromptlyFreedSize) {
852 // If we found a likely candidate, fully coalesce all its promptly-freed entries.
854 page->clearObjectStartBitMap();
855 page->resetPromptlyFreedSize();
856 size_t freedCount = 0;
857 Address startOfGap = page->payload();
858 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) {
859 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
860 ASSERT(basicHeader->size() > 0);
861 ASSERT(basicHeader->size() < blinkPagePayloadSize());
863 if (basicHeader->isPromptlyFreed()) {
864 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->payloadSize());
865 size_t size = basicHeader->size();
866 ASSERT(size >= sizeof(Header));
867 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
868 memset(headerAddress, 0, sizeof(Header));
871 headerAddress += size;
875 if (startOfGap != headerAddress) {
876 size_t size = headerAddress - startOfGap;
877 addToFreeList(startOfGap, size);
878 if (size > foundFreeEntrySize)
879 foundFreeEntrySize = size;
882 headerAddress += basicHeader->size();
883 startOfGap = headerAddress;
886 if (startOfGap != page->end()) {
887 size_t size = page->end() - startOfGap;
888 addToFreeList(startOfGap, size);
889 if (size > foundFreeEntrySize)
890 foundFreeEntrySize = size;
893 // Check before subtracting because freedCount might not be balanced with freed entries.
894 if (freedCount < m_promptlyFreedCount)
895 m_promptlyFreedCount -= freedCount;
897 m_promptlyFreedCount = 0;
900 TRACE_EVENT_END1("blink_gc", "ThreadHeap::coalesce", "foundFreeEntrySize", (unsigned)foundFreeEntrySize);
902 if (foundFreeEntrySize < neededFreeEntrySize) {
903 // If coalescing failed, reset the freed count to delay coalescing again.
904 m_promptlyFreedCount = 0;
911 template<typename Header>
912 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
914 // Caller already added space for object header and rounded up to allocation alignment
915 ASSERT(!(size & allocationMask));
917 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
919 // Ensure that there is enough space for alignment. If the header
920 // is not a multiple of 8 bytes we will allocate an extra
921 // headerPadding<Header> bytes to ensure it 8 byte aligned.
922 allocationSize += headerPadding<Header>();
924 // If ASan is supported we add allocationGranularity bytes to the allocated space and
925 // poison that to detect overflows
926 #if defined(ADDRESS_SANITIZER)
927 allocationSize += allocationGranularity;
929 if (threadState()->shouldGC())
930 threadState()->setGCRequested();
931 Heap::flushHeapDoesNotContainCache();
932 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
933 Address largeObjectAddress = pageMemory->writableStart();
934 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
935 memset(headerAddress, 0, size);
936 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
937 Address result = headerAddress + sizeof(*header);
938 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
939 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
941 // Poison the object header and allocationGranularity bytes after the object
942 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
943 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
944 largeObject->link(&m_firstLargeHeapObject);
945 stats().increaseAllocatedSpace(largeObject->size());
946 stats().increaseObjectSpace(largeObject->payloadSize());
950 template<typename Header>
951 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
953 flushHeapContainsCache();
954 object->unlink(previousNext);
957 // Unpoison the object header and allocationGranularity bytes after the
958 // object before freeing.
959 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
960 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
962 if (object->terminating()) {
963 ASSERT(ThreadState::current()->isTerminating());
964 // The thread is shutting down so this object is being removed as part
965 // of a thread local GC. In that case the object could be traced in the
966 // next global GC either due to a dead object being traced via a
967 // conservative pointer or due to a programming error where an object
968 // in another thread heap keeps a dangling pointer to this object.
969 // To guard against this we put the large object memory in the
970 // orphanedPagePool to ensure it is still reachable. After the next global
971 // GC it can be released assuming no rogue/dangling pointers refer to
973 // NOTE: large objects are not moved to the free page pool as it is
974 // unlikely they can be reused due to their individual sizes.
975 Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
977 ASSERT(!ThreadState::current()->isTerminating());
978 PageMemory* memory = object->storage();
979 object->~LargeHeapObject<Header>();
984 template<typename DataType>
985 PagePool<DataType>::PagePool()
987 for (int i = 0; i < NumberOfHeaps; ++i) {
992 FreePagePool::~FreePagePool()
994 for (int index = 0; index < NumberOfHeaps; ++index) {
995 while (PoolEntry* entry = m_pool[index]) {
996 m_pool[index] = entry->next;
997 PageMemory* memory = entry->data;
1005 void FreePagePool::addFreePage(int index, PageMemory* memory)
1007 // When adding a page to the pool we decommit it to ensure it is unused
1008 // while in the pool. This also allows the physical memory, backing the
1009 // page, to be given back to the OS.
1011 MutexLocker locker(m_mutex[index]);
1012 PoolEntry* entry = new PoolEntry(memory, m_pool[index]);
1013 m_pool[index] = entry;
1016 PageMemory* FreePagePool::takeFreePage(int index)
1018 MutexLocker locker(m_mutex[index]);
1019 while (PoolEntry* entry = m_pool[index]) {
1020 m_pool[index] = entry->next;
1021 PageMemory* memory = entry->data;
1024 if (memory->commit())
1027 // We got some memory, but failed to commit it, try again.
1033 OrphanedPagePool::~OrphanedPagePool()
1035 for (int index = 0; index < NumberOfHeaps; ++index) {
1036 while (PoolEntry* entry = m_pool[index]) {
1037 m_pool[index] = entry->next;
1038 BaseHeapPage* page = entry->data;
1040 PageMemory* memory = page->storage();
1042 page->~BaseHeapPage();
1048 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page)
1050 page->markOrphaned();
1051 PoolEntry* entry = new PoolEntry(page, m_pool[index]);
1052 m_pool[index] = entry;
1056 void OrphanedPagePool::decommitOrphanedPages()
1059 // No locking needed as all threads are at safepoints at this point in time.
1060 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1061 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1062 ASSERT((*it)->isAtSafePoint());
1065 for (int index = 0; index < NumberOfHeaps; ++index) {
1066 PoolEntry* entry = m_pool[index];
1067 PoolEntry** prevNext = &m_pool[index];
1069 BaseHeapPage* page = entry->data;
1070 if (page->tracedAfterOrphaned()) {
1071 // If the orphaned page was traced in the last GC it is not
1072 // decommited. We only decommit a page, ie. put it in the
1073 // memory pool, when the page has no objects pointing to it.
1074 // We remark the page as orphaned to clear the tracedAfterOrphaned
1075 // flag and any object trace bits that were set during tracing.
1076 page->markOrphaned();
1077 prevNext = &entry->next;
1078 entry = entry->next;
1082 // Page was not traced. Check if we should reuse the memory or just
1083 // free it. Large object memory is not reused, but freed, normal
1084 // blink heap pages are reused.
1085 // NOTE: We call the destructor before freeing or adding to the
1087 PageMemory* memory = page->storage();
1088 if (page->isLargeObject()) {
1089 page->~BaseHeapPage();
1092 page->~BaseHeapPage();
1093 // Clear out the page's memory before adding it to the free page
1094 // pool to ensure it is zero filled when being reused.
1095 clearMemory(memory);
1096 Heap::freePagePool()->addFreePage(index, memory);
1099 PoolEntry* deadEntry = entry;
1100 entry = entry->next;
1108 void OrphanedPagePool::clearMemory(PageMemory* memory)
1110 #if defined(ADDRESS_SANITIZER)
1111 // Don't use memset when running with ASan since this needs to zap
1112 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
1113 // only works for code in this method and not for calls to memset.
1114 Address base = memory->writableStart();
1115 for (Address current = base; current < base + blinkPagePayloadSize(); ++current)
1118 memset(memory->writableStart(), 0, blinkPagePayloadSize());
1123 bool OrphanedPagePool::contains(void* object)
1125 for (int index = 0; index < NumberOfHeaps; ++index) {
1126 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
1127 BaseHeapPage* page = entry->data;
1128 if (page->contains(reinterpret_cast<Address>(object)))
1137 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
1139 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
1140 // the heap should be unused (ie. 0).
1145 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
1147 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
1148 // since it is the same for all objects
1150 allocatePage(gcInfo);
1153 template <typename Header>
1154 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
1156 MutexLocker locker(m_threadState->sweepMutex());
1157 flushHeapContainsCache();
1158 if (page->terminating()) {
1159 // The thread is shutting down so this page is being removed as part
1160 // of a thread local GC. In that case the page could be accessed in the
1161 // next global GC either due to a dead object being traced via a
1162 // conservative pointer or due to a programming error where an object
1163 // in another thread heap keeps a dangling pointer to this object.
1164 // To guard against this we put the page in the orphanedPagePool to
1165 // ensure it is still reachable. After the next global GC it can be
1166 // decommitted and moved to the page pool assuming no rogue/dangling
1167 // pointers refer to it.
1168 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
1170 PageMemory* memory = page->storage();
1171 page->~HeapPage<Header>();
1172 Heap::freePagePool()->addFreePage(m_index, memory);
1176 template<typename Header>
1177 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
1179 Heap::flushHeapDoesNotContainCache();
1180 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1181 // We continue allocating page memory until we succeed in getting one.
1182 // Since the FreePagePool is global other threads could use all the
1183 // newly allocated page memory before this thread calls takeFreePage.
1184 while (!pageMemory) {
1185 // Allocate a memory region for blinkPagesPerRegion pages that
1186 // will each have the following layout.
1188 // [ guard os page | ... payload ... | guard os page ]
1189 // ^---{ aligned to blink page size }
1190 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion);
1191 // Setup the PageMemory object for each of the pages in the
1194 for (size_t i = 0; i < blinkPagesPerRegion; i++) {
1195 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemoryInRegion(region, offset, blinkPagePayloadSize()));
1196 offset += blinkPageSize;
1198 pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1200 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
1201 // Use a separate list for pages allocated during sweeping to make
1202 // sure that we do not accidentally sweep objects that have been
1203 // allocated during sweeping.
1204 if (m_threadState->isSweepInProgress()) {
1205 if (!m_lastPageAllocatedDuringSweeping)
1206 m_lastPageAllocatedDuringSweeping = page;
1207 page->link(&m_firstPageAllocatedDuringSweeping);
1209 page->link(&m_firstPage);
1211 ++m_numberOfNormalPages;
1212 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
1216 template<typename Header>
1217 bool ThreadHeap<Header>::pagesToBeSweptContains(Address address)
1219 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
1220 if (page->contains(address))
1226 template<typename Header>
1227 bool ThreadHeap<Header>::pagesAllocatedDuringSweepingContains(Address address)
1229 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
1230 if (page->contains(address))
1236 template<typename Header>
1237 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
1239 ASSERT(!m_firstPageAllocatedDuringSweeping);
1240 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1241 page->getStats(scannedStats);
1242 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
1243 current->getStats(scannedStats);
1247 template<typename Header>
1248 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
1250 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages");
1251 HeapPage<Header>* page = m_firstPage;
1252 HeapPage<Header>** previousNext = &m_firstPage;
1253 HeapPage<Header>* previous = 0;
1255 page->resetPromptlyFreedSize();
1256 if (page->isEmpty()) {
1257 HeapPage<Header>* unused = page;
1258 if (unused == m_mergePoint)
1259 m_mergePoint = previous;
1260 page = page->next();
1261 HeapPage<Header>::unlink(this, unused, previousNext);
1262 --m_numberOfNormalPages;
1264 page->sweep(stats, this);
1265 previousNext = &page->m_next;
1267 page = page->next();
1272 template<typename Header>
1273 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
1275 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
1276 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
1277 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
1278 if (current->isMarked()) {
1279 stats->increaseAllocatedSpace(current->size());
1280 stats->increaseObjectSpace(current->payloadSize());
1282 previousNext = ¤t->m_next;
1283 current = current->next();
1285 LargeHeapObject<Header>* next = current->next();
1286 freeLargeObject(current, previousNext);
1293 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
1294 // sweeping to catch cases where dead objects touch each other. This is not
1295 // turned on by default because it also triggers for cases that are safe.
1296 // Examples of such safe cases are context life cycle observers and timers
1297 // embedded in garbage collected objects.
1298 #define STRICT_ASAN_FINALIZATION_CHECKING 0
1300 template<typename Header>
1301 void ThreadHeap<Header>::sweep(HeapStats* stats)
1303 ASSERT(isConsistentForSweeping());
1304 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
1305 // When using ASan do a pre-sweep where all unmarked objects are
1306 // poisoned before calling their finalizer methods. This can catch
1307 // the case where the finalizer of an object tries to modify
1308 // another object as part of finalization.
1309 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1310 page->poisonUnmarkedObjects();
1312 sweepNormalPages(stats);
1313 sweepLargePages(stats);
1316 template<typename Header>
1317 void ThreadHeap<Header>::postSweepProcessing()
1319 // If pages have been allocated during sweeping, link them into
1320 // the list of pages.
1321 if (m_firstPageAllocatedDuringSweeping) {
1322 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage;
1323 m_firstPage = m_firstPageAllocatedDuringSweeping;
1324 m_lastPageAllocatedDuringSweeping = 0;
1325 m_firstPageAllocatedDuringSweeping = 0;
1330 template<typename Header>
1331 bool ThreadHeap<Header>::isConsistentForSweeping()
1333 // A thread heap is consistent for sweeping if none of the pages to
1334 // be swept contain a freelist block or the current allocation
1336 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
1337 for (FreeListEntry* freeListEntry = m_freeLists[i]; freeListEntry; freeListEntry = freeListEntry->next()) {
1338 if (pagesToBeSweptContains(freeListEntry->address())) {
1341 ASSERT(pagesAllocatedDuringSweepingContains(freeListEntry->address()));
1344 if (ownsNonEmptyAllocationArea()) {
1345 ASSERT(pagesToBeSweptContains(currentAllocationPoint())
1346 || pagesAllocatedDuringSweepingContains(currentAllocationPoint()));
1347 return !pagesToBeSweptContains(currentAllocationPoint());
1353 template<typename Header>
1354 void ThreadHeap<Header>::makeConsistentForSweeping()
1356 if (ownsNonEmptyAllocationArea())
1357 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
1358 setAllocationPoint(0, 0);
1362 template<typename Header>
1363 void ThreadHeap<Header>::clearLiveAndMarkDead()
1365 ASSERT(isConsistentForSweeping());
1366 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1367 page->clearLiveAndMarkDead();
1368 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
1369 if (current->isMarked())
1372 current->setDeadMark();
1376 template<typename Header>
1377 void ThreadHeap<Header>::clearFreeLists()
1379 m_promptlyFreedCount = 0;
1380 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
1382 m_lastFreeListEntries[i] = 0;
1386 int BaseHeap::bucketIndexForSize(size_t size)
1397 template<typename Header>
1398 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
1399 : BaseHeapPage(storage, gcInfo, heap->threadState())
1402 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
1403 m_objectStartBitMapComputed = false;
1404 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1405 heap->stats().increaseAllocatedSpace(blinkPageSize);
1408 template<typename Header>
1409 void HeapPage<Header>::link(HeapPage** prevNext)
1415 template<typename Header>
1416 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPage** prevNext)
1418 *prevNext = unused->m_next;
1419 heap->removePageFromHeap(unused);
1422 template<typename Header>
1423 void HeapPage<Header>::getStats(HeapStats& stats)
1425 stats.increaseAllocatedSpace(blinkPageSize);
1426 Address headerAddress = payload();
1427 ASSERT(headerAddress != end());
1429 Header* header = reinterpret_cast<Header*>(headerAddress);
1430 if (!header->isFree())
1431 stats.increaseObjectSpace(header->payloadSize());
1432 ASSERT(header->size() < blinkPagePayloadSize());
1433 headerAddress += header->size();
1434 ASSERT(headerAddress <= end());
1435 } while (headerAddress < end());
1438 template<typename Header>
1439 bool HeapPage<Header>::isEmpty()
1441 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
1442 return header->isFree() && (header->size() == payloadSize());
1445 template<typename Header>
1446 void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap)
1448 clearObjectStartBitMap();
1449 stats->increaseAllocatedSpace(blinkPageSize);
1450 Address startOfGap = payload();
1451 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
1452 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
1453 ASSERT(basicHeader->size() > 0);
1454 ASSERT(basicHeader->size() < blinkPagePayloadSize());
1456 if (basicHeader->isFree()) {
1457 size_t size = basicHeader->size();
1458 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1459 // Zero the memory in the free list header to maintain the
1460 // invariant that memory on the free list is zero filled.
1461 // The rest of the memory is already on the free list and is
1462 // therefore already zero filled.
1463 if (size < sizeof(FreeListEntry))
1464 memset(headerAddress, 0, size);
1466 memset(headerAddress, 0, sizeof(FreeListEntry));
1468 headerAddress += size;
1471 // At this point we know this is a valid object of type Header
1472 Header* header = static_cast<Header*>(basicHeader);
1474 if (!header->isMarked()) {
1475 // For ASan we unpoison the specific object when calling the finalizer and
1476 // poison it again when done to allow the object's own finalizer to operate
1477 // on the object, but not have other finalizers be allowed to access it.
1478 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
1480 size_t size = header->size();
1481 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1482 // This memory will be added to the freelist. Maintain the invariant
1483 // that memory on the freelist is zero filled.
1484 memset(headerAddress, 0, size);
1486 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1487 headerAddress += size;
1491 if (startOfGap != headerAddress)
1492 heap->addToFreeList(startOfGap, headerAddress - startOfGap);
1494 headerAddress += header->size();
1495 stats->increaseObjectSpace(header->payloadSize());
1496 startOfGap = headerAddress;
1498 if (startOfGap != end())
1499 heap->addToFreeList(startOfGap, end() - startOfGap);
1502 template<typename Header>
1503 void HeapPage<Header>::clearLiveAndMarkDead()
1505 for (Address headerAddress = payload(); headerAddress < end();) {
1506 Header* header = reinterpret_cast<Header*>(headerAddress);
1507 ASSERT(header->size() < blinkPagePayloadSize());
1508 // Check if a free list entry first since we cannot call
1509 // isMarked on a free list entry.
1510 if (header->isFree()) {
1511 headerAddress += header->size();
1514 if (header->isMarked())
1517 header->setDeadMark();
1518 headerAddress += header->size();
1522 template<typename Header>
1523 void HeapPage<Header>::populateObjectStartBitMap()
1525 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1526 Address start = payload();
1527 for (Address headerAddress = start; headerAddress < end();) {
1528 Header* header = reinterpret_cast<Header*>(headerAddress);
1529 size_t objectOffset = headerAddress - start;
1530 ASSERT(!(objectOffset & allocationMask));
1531 size_t objectStartNumber = objectOffset / allocationGranularity;
1532 size_t mapIndex = objectStartNumber / 8;
1533 ASSERT(mapIndex < objectStartBitMapSize);
1534 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
1535 headerAddress += header->size();
1536 ASSERT(headerAddress <= end());
1538 m_objectStartBitMapComputed = true;
1541 template<typename Header>
1542 void HeapPage<Header>::clearObjectStartBitMap()
1544 m_objectStartBitMapComputed = false;
1547 static int numberOfLeadingZeroes(uint8_t byte)
1565 template<typename Header>
1566 Header* HeapPage<Header>::findHeaderFromAddress(Address address)
1568 if (address < payload())
1570 if (!isObjectStartBitMapComputed())
1571 populateObjectStartBitMap();
1572 size_t objectOffset = address - payload();
1573 size_t objectStartNumber = objectOffset / allocationGranularity;
1574 size_t mapIndex = objectStartNumber / 8;
1575 ASSERT(mapIndex < objectStartBitMapSize);
1576 size_t bit = objectStartNumber & 7;
1577 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1579 ASSERT(mapIndex > 0);
1580 byte = m_objectStartBitMap[--mapIndex];
1582 int leadingZeroes = numberOfLeadingZeroes(byte);
1583 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1584 objectOffset = objectStartNumber * allocationGranularity;
1585 Address objectAddress = objectOffset + payload();
1586 Header* header = reinterpret_cast<Header*>(objectAddress);
1587 if (header->isFree())
1592 template<typename Header>
1593 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1595 ASSERT(contains(address));
1596 Header* header = findHeaderFromAddress(address);
1597 if (!header || header->hasDeadMark())
1600 #if ENABLE(GC_PROFILE_MARKING)
1601 visitor->setHostInfo(&address, "stack");
1603 if (hasVTable(header) && !vTableInitialized(header->payload())) {
1604 visitor->markNoTracing(header);
1605 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
1607 visitor->mark(header, traceCallback(header));
1611 #if ENABLE(GC_PROFILE_MARKING)
1612 template<typename Header>
1613 const GCInfo* HeapPage<Header>::findGCInfo(Address address)
1615 if (address < payload())
1618 if (gcInfo()) // for non FinalizedObjectHeader
1621 Header* header = findHeaderFromAddress(address);
1625 return header->gcInfo();
1629 #if ENABLE(GC_PROFILE_HEAP)
1630 template<typename Header>
1631 void HeapPage<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
1634 for (Address addr = payload(); addr < end(); addr += header->size()) {
1635 header = reinterpret_cast<Header*>(addr);
1637 json->pushInteger(header->encodedSize());
1638 if (header->isFree()) {
1639 info->freeSize += header->size();
1643 const GCInfo* gcinfo = header->gcInfo() ? header->gcInfo() : gcInfo();
1644 size_t tag = info->getClassTag(gcinfo);
1645 size_t age = header->age();
1647 json->pushInteger(tag);
1648 if (header->isMarked()) {
1649 info->liveCount[tag] += 1;
1650 info->liveSize[tag] += header->size();
1651 // Count objects that are live when promoted to the final generation.
1652 if (age == maxHeapObjectAge - 1)
1653 info->generations[tag][maxHeapObjectAge] += 1;
1656 info->deadCount[tag] += 1;
1657 info->deadSize[tag] += header->size();
1658 // Count objects that are dead before the final generation.
1659 if (age < maxHeapObjectAge)
1660 info->generations[tag][age] += 1;
1666 #if defined(ADDRESS_SANITIZER)
1667 template<typename Header>
1668 void HeapPage<Header>::poisonUnmarkedObjects()
1670 for (Address headerAddress = payload(); headerAddress < end(); ) {
1671 Header* header = reinterpret_cast<Header*>(headerAddress);
1672 ASSERT(header->size() < blinkPagePayloadSize());
1674 if (!header->isFree() && !header->isMarked())
1675 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1676 headerAddress += header->size();
1682 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1688 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1691 HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1695 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1698 return gcInfo()->m_trace;
1702 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1704 return header->traceCallback();
1708 inline bool HeapPage<HeapObjectHeader>::hasVTable(HeapObjectHeader* header)
1711 return gcInfo()->hasVTable();
1715 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHeader* header)
1717 return header->hasVTable();
1720 template<typename Header>
1721 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1723 stats.increaseAllocatedSpace(size());
1724 stats.increaseObjectSpace(payloadSize());
1727 #if ENABLE(GC_PROFILE_HEAP)
1728 template<typename Header>
1729 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
1731 Header* header = heapObjectHeader();
1732 size_t tag = info->getClassTag(header->gcInfo());
1733 size_t age = header->age();
1735 info->liveCount[tag] += 1;
1736 info->liveSize[tag] += header->size();
1737 // Count objects that are live when promoted to the final generation.
1738 if (age == maxHeapObjectAge - 1)
1739 info->generations[tag][maxHeapObjectAge] += 1;
1742 info->deadCount[tag] += 1;
1743 info->deadSize[tag] += header->size();
1744 // Count objects that are dead before the final generation.
1745 if (age < maxHeapObjectAge)
1746 info->generations[tag][age] += 1;
1750 json->setInteger("class", tag);
1751 json->setInteger("size", header->size());
1752 json->setInteger("isMarked", isMarked());
1757 template<typename Entry>
1758 void HeapExtentCache<Entry>::flush()
1761 for (int i = 0; i < numberOfEntries; i++)
1762 m_entries[i] = Entry();
1763 m_hasEntries = false;
1767 template<typename Entry>
1768 size_t HeapExtentCache<Entry>::hash(Address address)
1770 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1771 value ^= value >> numberOfEntriesLog2;
1772 value ^= value >> (numberOfEntriesLog2 * 2);
1773 value &= numberOfEntries - 1;
1774 return value & ~1; // Returns only even number.
1777 template<typename Entry>
1778 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address)
1780 size_t index = hash(address);
1781 ASSERT(!(index & 1));
1782 Address cachePage = roundToBlinkPageStart(address);
1783 if (m_entries[index].address() == cachePage)
1784 return m_entries[index].result();
1785 if (m_entries[index + 1].address() == cachePage)
1786 return m_entries[index + 1].result();
1790 template<typename Entry>
1791 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupResult entry)
1793 m_hasEntries = true;
1794 size_t index = hash(address);
1795 ASSERT(!(index & 1));
1796 Address cachePage = roundToBlinkPageStart(address);
1797 m_entries[index + 1] = m_entries[index];
1798 m_entries[index] = Entry(cachePage, entry);
1801 // These should not be needed, but it seems impossible to persuade clang to
1802 // instantiate the template functions and export them from a shared library, so
1803 // we add these in the non-templated subclass, which does not have that issue.
1804 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1806 HeapExtentCache<PositiveEntry>::addEntry(address, page);
1809 BaseHeapPage* HeapContainsCache::lookup(Address address)
1811 return HeapExtentCache<PositiveEntry>::lookup(address);
1814 void Heap::flushHeapDoesNotContainCache()
1816 s_heapDoesNotContainCache->flush();
1819 // The marking mutex is used to ensure sequential access to data
1820 // structures during marking. The marking mutex needs to be acquired
1821 // during marking when elements are taken from the global marking
1822 // stack or when elements are added to the global ephemeron,
1823 // post-marking, and weak processing stacks. In debug mode the mutex
1824 // also needs to be acquired when asserts use the heap contains
1826 static Mutex& markingMutex()
1828 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
1832 static ThreadCondition& markingCondition()
1834 AtomicallyInitializedStatic(ThreadCondition&, condition = *new ThreadCondition);
1838 static void markNoTracingCallback(Visitor* visitor, void* object)
1840 visitor->markNoTracing(object);
1843 class MarkingVisitor : public Visitor {
1845 #if ENABLE(GC_PROFILE_MARKING)
1846 typedef HashSet<uintptr_t> LiveObjectSet;
1847 typedef HashMap<String, LiveObjectSet> LiveObjectMap;
1848 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph;
1851 MarkingVisitor(CallbackStack* markingStack) : m_markingStack(markingStack)
1855 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1860 // Check that we are not marking objects that are outside
1861 // the heap by calling Heap::contains. However we cannot
1862 // call Heap::contains when outside a GC and we call mark
1863 // when doing weakness for ephemerons. Hence we only check
1864 // when called within.
1865 MutexLocker locker(markingMutex());
1866 ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrphanedPage(header));
1869 ASSERT(objectPointer);
1870 if (header->isMarked())
1873 #if ENABLE(GC_PROFILE_MARKING)
1874 MutexLocker locker(objectGraphMutex());
1875 String className(classOf(objectPointer));
1877 LiveObjectMap::AddResult result = currentlyLive().add(className, LiveObjectSet());
1878 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPointer));
1880 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintptr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName));
1881 ASSERT(result.isNewEntry);
1882 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_hostObject, className.ascii().data(), objectPointer);
1885 Heap::pushTraceCallback(m_markingStack, const_cast<void*>(objectPointer), callback);
1888 virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1890 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1891 // version to correctly find the payload.
1892 visitHeader(header, header->payload(), callback);
1895 virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1897 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1898 // version to correctly find the payload.
1899 visitHeader(header, header->payload(), callback);
1902 virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1906 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1907 visitHeader(header, header->payload(), callback);
1910 virtual void registerDelayedMarkNoTracing(const void* object) OVERRIDE
1912 Heap::pushPostMarkingCallback(const_cast<void*>(object), markNoTracingCallback);
1915 virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) OVERRIDE
1917 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
1920 virtual void registerWeakTable(const void* closure, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
1922 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, iterationDoneCallback);
1926 virtual bool weakTableRegistered(const void* closure)
1928 return Heap::weakTableRegistered(closure);
1932 virtual bool isMarked(const void* objectPointer) OVERRIDE
1934 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1937 // This macro defines the necessary visitor methods for typed heaps
1938 #define DEFINE_VISITOR_METHODS(Type) \
1939 virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1941 if (!objectPointer) \
1943 HeapObjectHeader* header = \
1944 HeapObjectHeader::fromPayload(objectPointer); \
1945 visitHeader(header, header->payload(), callback); \
1947 virtual bool isMarked(const Type* objectPointer) OVERRIDE \
1949 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
1952 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1953 #undef DEFINE_VISITOR_METHODS
1955 #if ENABLE(GC_PROFILE_MARKING)
1958 fprintf(stderr, "\n---------- AFTER MARKING -------------------\n");
1959 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = currentlyLive().end(); it != end; ++it) {
1960 fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size());
1962 if (it->key == "blink::Document")
1963 reportStillAlive(it->value, previouslyLive().get(it->key));
1965 fprintf(stderr, "\n");
1968 previouslyLive().swap(currentlyLive());
1969 currentlyLive().clear();
1971 for (HashSet<uintptr_t>::iterator it = objectsToFindPath().begin(), end = objectsToFindPath().end(); it != end; ++it) {
1972 dumpPathToObjectFromObjectGraph(objectGraph(), *it);
1976 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous)
1980 fprintf(stderr, " [previously %u]", previous.size());
1981 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1982 if (previous.find(*it) == previous.end())
1990 fprintf(stderr, " {survived 2GCs %d: ", count);
1991 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1992 if (previous.find(*it) == previous.end())
1994 fprintf(stderr, "%ld", *it);
1996 fprintf(stderr, ", ");
1999 fprintf(stderr, "}");
2002 static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintptr_t target)
2004 ObjectGraph::const_iterator it = graph.find(target);
2005 if (it == graph.end())
2007 fprintf(stderr, "Path to %lx of %s\n", target, classOf(reinterpret_cast<const void*>(target)).ascii().data());
2008 while (it != graph.end()) {
2009 fprintf(stderr, "<- %lx of %s\n", it->value.first, it->value.second.utf8().data());
2010 it = graph.find(it->value.first);
2012 fprintf(stderr, "\n");
2015 static void dumpPathToObjectOnNextGC(void* p)
2017 objectsToFindPath().add(reinterpret_cast<uintptr_t>(p));
2020 static Mutex& objectGraphMutex()
2022 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
2026 static LiveObjectMap& previouslyLive()
2028 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
2032 static LiveObjectMap& currentlyLive()
2034 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
2038 static ObjectGraph& objectGraph()
2040 DEFINE_STATIC_LOCAL(ObjectGraph, graph, ());
2044 static HashSet<uintptr_t>& objectsToFindPath()
2046 DEFINE_STATIC_LOCAL(HashSet<uintptr_t>, set, ());
2052 virtual void registerWeakCell(void** cell, WeakPointerCallback callback) OVERRIDE
2054 Heap::pushWeakCellPointerCallback(cell, callback);
2058 CallbackStack* m_markingStack;
2063 ThreadState::init();
2064 s_markingStack = new CallbackStack();
2065 s_postMarkingCallbackStack = new CallbackStack();
2066 s_weakCallbackStack = new CallbackStack();
2067 s_ephemeronStack = new CallbackStack();
2068 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
2069 s_markingVisitor = new MarkingVisitor(s_markingStack);
2070 s_freePagePool = new FreePagePool();
2071 s_orphanedPagePool = new OrphanedPagePool();
2072 s_markingThreads = new Vector<OwnPtr<blink::WebThread> >();
2073 if (blink::Platform::current()) {
2074 // FIXME: We should let the amount of threads scale with the
2075 // amount of processors in the system instead of hardcoding
2077 for (int i = 0; i < numberOfMarkingThreads; i++)
2078 s_markingThreads->append(adoptPtr(blink::Platform::current()->createThread("Blink Heap Marker Thread")));
2082 void Heap::shutdown()
2084 s_shutdownCalled = true;
2085 ThreadState::shutdownHeapIfNecessary();
2088 void Heap::doShutdown()
2090 // We don't want to call doShutdown() twice.
2091 if (!s_markingVisitor)
2094 ASSERT(!ThreadState::isAnyThreadInGC());
2095 ASSERT(!ThreadState::attachedThreads().size());
2096 delete s_markingThreads;
2097 s_markingThreads = 0;
2098 delete s_markingVisitor;
2099 s_markingVisitor = 0;
2100 delete s_heapDoesNotContainCache;
2101 s_heapDoesNotContainCache = 0;
2102 delete s_freePagePool;
2104 delete s_orphanedPagePool;
2105 s_orphanedPagePool = 0;
2106 delete s_weakCallbackStack;
2107 s_weakCallbackStack = 0;
2108 delete s_postMarkingCallbackStack;
2109 s_postMarkingCallbackStack = 0;
2110 delete s_markingStack;
2112 delete s_ephemeronStack;
2113 s_ephemeronStack = 0;
2114 ThreadState::shutdown();
2117 BaseHeapPage* Heap::contains(Address address)
2119 ASSERT(ThreadState::isAnyThreadInGC());
2120 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2121 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2122 BaseHeapPage* page = (*it)->contains(address);
2130 bool Heap::containedInHeapOrOrphanedPage(void* object)
2132 return contains(object) || orphanedPagePool()->contains(object);
2136 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
2138 ASSERT(ThreadState::isAnyThreadInGC());
2141 if (s_heapDoesNotContainCache->lookup(address))
2145 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2146 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2147 if ((*it)->checkAndMarkPointer(visitor, address)) {
2148 // Pointer was in a page of that thread. If it actually pointed
2149 // into an object then that object was found and marked.
2150 ASSERT(!s_heapDoesNotContainCache->lookup(address));
2151 s_lastGCWasConservative = true;
2157 s_heapDoesNotContainCache->addEntry(address, true);
2159 if (!s_heapDoesNotContainCache->lookup(address))
2160 s_heapDoesNotContainCache->addEntry(address, true);
2165 #if ENABLE(GC_PROFILE_MARKING)
2166 const GCInfo* Heap::findGCInfo(Address address)
2168 return ThreadState::findGCInfoFromAllThreads(address);
2172 #if ENABLE(GC_PROFILE_MARKING)
2173 void Heap::dumpPathToObjectOnNextGC(void* p)
2175 static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p);
2178 String Heap::createBacktraceString()
2180 int framesToShow = 3;
2181 int stackFrameSize = 16;
2182 ASSERT(stackFrameSize >= framesToShow);
2183 typedef void* FramePointer;
2184 FramePointer* stackFrame = static_cast<FramePointer*>(alloca(sizeof(FramePointer) * stackFrameSize));
2185 WTFGetBacktrace(stackFrame, &stackFrameSize);
2187 StringBuilder builder;
2188 builder.append("Persistent");
2189 bool didAppendFirstName = false;
2190 // Skip frames before/including "blink::Persistent".
2191 bool didSeePersistent = false;
2192 for (int i = 0; i < stackFrameSize && framesToShow > 0; ++i) {
2193 FrameToNameScope frameToName(stackFrame[i]);
2194 if (!frameToName.nullableName())
2196 if (strstr(frameToName.nullableName(), "blink::Persistent")) {
2197 didSeePersistent = true;
2200 if (!didSeePersistent)
2202 if (!didAppendFirstName) {
2203 didAppendFirstName = true;
2204 builder.append(" ... Backtrace:");
2206 builder.append("\n\t");
2207 builder.append(frameToName.nullableName());
2210 return builder.toString().replace("blink::", "");
2214 void Heap::pushTraceCallback(CallbackStack* stack, void* object, TraceCallback callback)
2218 MutexLocker locker(markingMutex());
2219 ASSERT(Heap::containedInHeapOrOrphanedPage(object));
2222 CallbackStack::Item* slot = stack->allocateEntry();
2223 *slot = CallbackStack::Item(object, callback);
2226 template<CallbackInvocationMode Mode>
2227 bool Heap::popAndInvokeTraceCallback(CallbackStack* stack, Visitor* visitor)
2229 CallbackStack::Item* item = stack->pop();
2232 // If the object being traced is located on a page which is dead don't
2233 // trace it. This can happen when a conservative GC kept a dead object
2234 // alive which pointed to a (now gone) object on the cleaned up page.
2235 // Also, if doing a thread local GC, don't trace objects that are located
2236 // on other thread's heaps, ie, pages where the terminating flag is not set.
2237 BaseHeapPage* heapPage = pageHeaderFromObject(item->object());
2238 if (Mode == GlobalMarking && heapPage->orphaned()) {
2239 // When doing a global GC we should only get a trace callback to an orphaned
2240 // page if the GC is conservative. If it is not conservative there is
2241 // a bug in the code where we have a dangling pointer to a page
2242 // on the dead thread.
2243 RELEASE_ASSERT(Heap::lastGCWasConservative());
2244 heapPage->setTracedAfterOrphaned();
2247 if (Mode == ThreadLocalMarking && (heapPage->orphaned() || !heapPage->terminating()))
2250 #if ENABLE(GC_PROFILE_MARKING)
2251 visitor->setHostInfo(item->object(), classOf(item->object()));
2253 item->call(visitor);
2257 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback)
2259 MutexLocker locker(markingMutex());
2260 ASSERT(!Heap::orphanedPagePool()->contains(object));
2261 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry();
2262 *slot = CallbackStack::Item(object, callback);
2265 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor)
2267 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) {
2268 item->call(visitor);
2274 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
2276 MutexLocker locker(markingMutex());
2277 ASSERT(!Heap::orphanedPagePool()->contains(cell));
2278 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry();
2279 *slot = CallbackStack::Item(cell, callback);
2282 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
2284 MutexLocker locker(markingMutex());
2285 ASSERT(Heap::contains(object));
2286 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object);
2287 ASSERT(!heapPageForObject->orphaned());
2288 ASSERT(Heap::contains(object) == heapPageForObject);
2289 ThreadState* state = heapPageForObject->threadState();
2290 state->pushWeakObjectPointerCallback(closure, callback);
2293 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
2295 // For weak processing we should never reach orphaned pages since orphaned
2296 // pages are not traced and thus objects on those pages are never be
2297 // registered as objects on orphaned pages. We cannot assert this here since
2298 // we might have an off-heap collection. We assert it in
2299 // Heap::pushWeakObjectPointerCallback.
2300 if (CallbackStack::Item* item = s_weakCallbackStack->pop()) {
2301 item->call(visitor);
2307 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
2310 MutexLocker locker(markingMutex());
2311 // Check that the ephemeron table being pushed onto the stack is not on an
2313 ASSERT(!Heap::orphanedPagePool()->contains(table));
2314 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry();
2315 *slot = CallbackStack::Item(table, iterationCallback);
2318 // Register a post-marking callback to tell the tables that
2319 // ephemeron iteration is complete.
2320 pushPostMarkingCallback(table, iterationDoneCallback);
2324 bool Heap::weakTableRegistered(const void* table)
2326 MutexLocker locker(markingMutex());
2327 ASSERT(s_ephemeronStack);
2328 return s_ephemeronStack->hasCallbackForObject(table);
2332 void Heap::prepareForGC()
2334 ASSERT(ThreadState::isAnyThreadInGC());
2335 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2336 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2337 (*it)->prepareForGC();
2340 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::CauseOfGC cause)
2342 ThreadState* state = ThreadState::current();
2343 state->clearGCRequested();
2345 GCScope gcScope(stackState);
2346 // Check if we successfully parked the other threads. If not we bail out of the GC.
2347 if (!gcScope.allThreadsParked()) {
2348 ThreadState::current()->setGCRequested();
2352 if (state->isMainThread())
2353 ScriptForbiddenScope::enter();
2355 s_lastGCWasConservative = false;
2357 TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
2358 "precise", stackState == ThreadState::NoHeapPointersOnStack,
2359 "forced", cause == ThreadState::ForcedGC);
2360 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
2361 double timeStamp = WTF::currentTimeMS();
2362 #if ENABLE(GC_PROFILE_MARKING)
2363 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
2366 // Disallow allocation during garbage collection (but not
2367 // during the finalization that happens when the gcScope is
2369 NoAllocationScope<AnyThread> noAllocationScope;
2373 // 1. trace persistent roots.
2374 ThreadState::visitPersistentRoots(s_markingVisitor);
2376 // 2. trace objects reachable from the persistent roots including ephemerons.
2377 processMarkingStackInParallel();
2379 // 3. trace objects reachable from the stack. We do this independent of the
2380 // given stackState since other threads might have a different stack state.
2381 ThreadState::visitStackRoots(s_markingVisitor);
2383 // 4. trace objects reachable from the stack "roots" including ephemerons.
2384 // Only do the processing if we found a pointer to an object on one of the
2386 if (lastGCWasConservative())
2387 processMarkingStackInParallel();
2389 postMarkingProcessing();
2390 globalWeakProcessing();
2392 // After a global marking we know that any orphaned page that was not reached
2393 // cannot be reached in a subsequent GC. This is due to a thread either having
2394 // swept its heap or having done a "poor mans sweep" in prepareForGC which marks
2395 // objects that are dead, but not swept in the previous GC as dead. In this GC's
2396 // marking we check that any object marked as dead is not traced. E.g. via a
2397 // conservatively found pointer or a programming error with an object containing
2398 // a dangling pointer.
2399 orphanedPagePool()->decommitOrphanedPages();
2401 #if ENABLE(GC_PROFILE_MARKING)
2402 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2405 if (blink::Platform::current()) {
2406 uint64_t objectSpaceSize;
2407 uint64_t allocatedSpaceSize;
2408 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2409 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2410 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2411 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2414 if (state->isMainThread())
2415 ScriptForbiddenScope::exit();
2418 void Heap::collectGarbageForTerminatingThread(ThreadState* state)
2420 // We explicitly do not enter a safepoint while doing thread specific
2421 // garbage collection since we don't want to allow a global GC at the
2422 // same time as a thread local GC.
2425 NoAllocationScope<AnyThread> noAllocationScope;
2428 state->prepareForGC();
2430 // 1. trace the thread local persistent roots. For thread local GCs we
2431 // don't trace the stack (ie. no conservative scanning) since this is
2432 // only called during thread shutdown where there should be no objects
2434 // We also assume that orphaned pages have no objects reachable from
2435 // persistent handles on other threads or CrossThreadPersistents. The
2436 // only cases where this could happen is if a subsequent conservative
2437 // global GC finds a "pointer" on the stack or due to a programming
2438 // error where an object has a dangling cross-thread pointer to an
2439 // object on this heap.
2440 state->visitPersistents(s_markingVisitor);
2442 // 2. trace objects reachable from the thread's persistent roots
2443 // including ephemerons.
2444 processMarkingStack<ThreadLocalMarking>();
2446 postMarkingProcessing();
2447 globalWeakProcessing();
2451 state->performPendingSweep();
2454 void Heap::processMarkingStackEntries(int* runningMarkingThreads)
2456 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackEntries");
2457 CallbackStack stack;
2458 MarkingVisitor visitor(&stack);
2460 MutexLocker locker(markingMutex());
2461 stack.takeBlockFrom(s_markingStack);
2463 while (!stack.isEmpty()) {
2464 while (popAndInvokeTraceCallback<GlobalMarking>(&stack, &visitor)) { }
2466 MutexLocker locker(markingMutex());
2467 stack.takeBlockFrom(s_markingStack);
2471 MutexLocker locker(markingMutex());
2472 if (!--(*runningMarkingThreads))
2473 markingCondition().signal();
2477 void Heap::processMarkingStackOnMultipleThreads()
2479 int runningMarkingThreads = s_markingThreads->size() + 1;
2481 for (size_t i = 0; i < s_markingThreads->size(); ++i)
2482 s_markingThreads->at(i)->postTask(new Task(WTF::bind(Heap::processMarkingStackEntries, &runningMarkingThreads)));
2484 processMarkingStackEntries(&runningMarkingThreads);
2486 // Wait for the other threads to finish their part of marking.
2487 MutexLocker locker(markingMutex());
2488 while (runningMarkingThreads)
2489 markingCondition().wait(markingMutex());
2492 void Heap::processMarkingStackInParallel()
2494 static const size_t sizeOfStackForParallelMarking = 2 * CallbackStack::blockSize;
2495 // Ephemeron fixed point loop run on the garbage collecting thread.
2497 // Iteratively mark all objects that are reachable from the objects
2498 // currently pushed onto the marking stack. Do so in parallel if there
2499 // are multiple blocks on the global marking stack.
2500 if (s_markingStack->sizeExceeds(sizeOfStackForParallelMarking)) {
2501 processMarkingStackOnMultipleThreads();
2503 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded");
2504 while (popAndInvokeTraceCallback<GlobalMarking>(s_markingStack, s_markingVisitor)) { }
2507 // Mark any strong pointers that have now become reachable in ephemeron
2509 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
2510 s_ephemeronStack->invokeEphemeronCallbacks(s_markingVisitor);
2512 // Rerun loop if ephemeron processing queued more objects for tracing.
2513 } while (!s_markingStack->isEmpty());
2516 template<CallbackInvocationMode Mode>
2517 void Heap::processMarkingStack()
2519 // Ephemeron fixed point loop.
2521 // Iteratively mark all objects that are reachable from the objects
2522 // currently pushed onto the marking stack. If Mode is ThreadLocalMarking
2523 // don't continue tracing if the trace hits an object on another thread's
2525 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded");
2526 while (popAndInvokeTraceCallback<Mode>(s_markingStack, s_markingVisitor)) { }
2528 // Mark any strong pointers that have now become reachable in ephemeron
2530 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
2531 s_ephemeronStack->invokeEphemeronCallbacks(s_markingVisitor);
2533 // Rerun loop if ephemeron processing queued more objects for tracing.
2534 } while (!s_markingStack->isEmpty());
2537 void Heap::postMarkingProcessing()
2539 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing");
2540 // Call post-marking callbacks including:
2541 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup
2542 // (specifically to clear the queued bits for weak hash tables), and
2543 // 2. the markNoTracing callbacks on collection backings to mark them
2544 // if they are only reachable from their front objects.
2545 while (popAndInvokePostMarkingCallback(s_markingVisitor)) { }
2547 s_ephemeronStack->clear();
2549 // Post-marking callbacks should not trace any objects and
2550 // therefore the marking stack should be empty after the
2551 // post-marking callbacks.
2552 ASSERT(s_markingStack->isEmpty());
2555 void Heap::globalWeakProcessing()
2557 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing");
2558 // Call weak callbacks on objects that may now be pointing to dead
2560 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
2562 // It is not permitted to trace pointers of live objects in the weak
2563 // callback phase, so the marking stack should still be empty here.
2564 ASSERT(s_markingStack->isEmpty());
2567 void Heap::collectAllGarbage()
2569 // FIXME: oilpan: we should perform a single GC and everything
2570 // should die. Unfortunately it is not the case for all objects
2571 // because the hierarchy was not completely moved to the heap and
2572 // some heap allocated objects own objects that contain persistents
2573 // pointing to other heap allocated objects.
2574 for (int i = 0; i < 5; i++)
2575 collectGarbage(ThreadState::NoHeapPointersOnStack, ThreadState::ForcedGC);
2578 void Heap::setForcePreciseGCForTesting()
2580 ThreadState::current()->setForcePreciseGCForTesting(true);
2583 template<typename Header>
2584 void ThreadHeap<Header>::prepareHeapForTermination()
2586 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
2587 page->setTerminating();
2589 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
2590 current->setTerminating();
2594 template<typename Header>
2595 BaseHeap* ThreadHeap<Header>::split(int numberOfNormalPages)
2597 // Create a new split off thread heap containing
2598 // |numberOfNormalPages| of the pages of this ThreadHeap for
2599 // parallel sweeping. The split off thread heap will be merged
2600 // with this heap at the end of sweeping and the temporary
2601 // ThreadHeap object will be deallocated after the merge.
2602 ASSERT(numberOfNormalPages > 0);
2603 ThreadHeap<Header>* splitOff = new ThreadHeap(m_threadState, m_index);
2604 HeapPage<Header>* splitPoint = m_firstPage;
2605 for (int i = 1; i < numberOfNormalPages; i++)
2606 splitPoint = splitPoint->next();
2607 splitOff->m_firstPage = m_firstPage;
2608 m_firstPage = splitPoint->m_next;
2609 splitOff->m_mergePoint = splitPoint;
2610 splitOff->m_numberOfNormalPages = numberOfNormalPages;
2611 m_numberOfNormalPages -= numberOfNormalPages;
2612 splitPoint->m_next = 0;
2616 template<typename Header>
2617 void ThreadHeap<Header>::merge(BaseHeap* splitOffBase)
2619 ThreadHeap<Header>* splitOff = static_cast<ThreadHeap<Header>*>(splitOffBase);
2620 // If the mergePoint is zero all split off pages became empty in
2621 // this round and we don't have to merge. There are no pages and
2622 // nothing on the freelists.
2623 ASSERT(splitOff->m_mergePoint || splitOff->m_numberOfNormalPages == 0);
2624 if (splitOff->m_mergePoint) {
2625 // Link the split off pages into the beginning of the list again.
2626 splitOff->m_mergePoint->m_next = m_firstPage;
2627 m_firstPage = splitOff->m_firstPage;
2628 m_numberOfNormalPages += splitOff->m_numberOfNormalPages;
2629 splitOff->m_firstPage = 0;
2630 // Merge free lists.
2631 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
2632 if (!m_freeLists[i]) {
2633 m_freeLists[i] = splitOff->m_freeLists[i];
2634 } else if (splitOff->m_freeLists[i]) {
2635 m_lastFreeListEntries[i]->append(splitOff->m_freeLists[i]);
2636 m_lastFreeListEntries[i] = splitOff->m_lastFreeListEntries[i];
2640 delete splitOffBase;
2643 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceSize)
2645 *objectSpaceSize = 0;
2646 *allocatedSpaceSize = 0;
2647 ASSERT(ThreadState::isAnyThreadInGC());
2648 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2649 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2650 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2651 *objectSpaceSize += (*it)->stats().totalObjectSpace();
2652 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
2656 void Heap::getStats(HeapStats* stats)
2659 ASSERT(ThreadState::isAnyThreadInGC());
2660 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2661 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2662 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2664 (*it)->getStats(temp);
2670 bool Heap::isConsistentForSweeping()
2672 ASSERT(ThreadState::isAnyThreadInGC());
2673 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2674 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2675 if (!(*it)->isConsistentForSweeping())
2682 void Heap::makeConsistentForSweeping()
2684 ASSERT(ThreadState::isAnyThreadInGC());
2685 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2686 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2687 (*it)->makeConsistentForSweeping();
2690 void HeapAllocator::backingFree(void* address)
2692 if (!address || ThreadState::isAnyThreadInGC())
2695 ThreadState* state = ThreadState::current();
2696 if (state->isSweepInProgress())
2699 // Don't promptly free large objects because their page is never reused
2700 // and don't free backings allocated on other threads.
2701 BaseHeapPage* page = pageHeaderFromObject(address);
2702 if (page->isLargeObject() || page->threadState() != state)
2705 typedef HeapIndexTrait<CollectionBackingHeap> HeapTraits;
2706 typedef HeapTraits::HeapType HeapType;
2707 typedef HeapTraits::HeaderType HeaderType;
2709 HeaderType* header = HeaderType::fromPayload(address);
2710 header->checkHeader();
2712 const GCInfo* gcInfo = header->gcInfo();
2713 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer());
2714 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex));
2715 heap->promptlyFreeObject(header);
2718 // Force template instantiations for the types that we need.
2719 template class HeapPage<FinalizedHeapObjectHeader>;
2720 template class HeapPage<HeapObjectHeader>;
2721 template class ThreadHeap<FinalizedHeapObjectHeader>;
2722 template class ThreadHeap<HeapObjectHeader>;
2724 Visitor* Heap::s_markingVisitor;
2725 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads;
2726 CallbackStack* Heap::s_markingStack;
2727 CallbackStack* Heap::s_postMarkingCallbackStack;
2728 CallbackStack* Heap::s_weakCallbackStack;
2729 CallbackStack* Heap::s_ephemeronStack;
2730 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2731 bool Heap::s_shutdownCalled = false;
2732 bool Heap::s_lastGCWasConservative = false;
2733 FreePagePool* Heap::s_freePagePool;
2734 OrphanedPagePool* Heap::s_orphanedPagePool;