2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "platform/heap/Heap.h"
34 #include "platform/TraceEvent.h"
35 #include "platform/heap/ThreadState.h"
36 #include "wtf/Assertions.h"
37 #include "wtf/LeakAnnotations.h"
38 #include "wtf/PassOwnPtr.h"
39 #if ENABLE(GC_TRACING)
40 #include "wtf/HashMap.h"
41 #include "wtf/HashSet.h"
42 #include "wtf/text/StringBuilder.h"
43 #include "wtf/text/StringHash.h"
57 #if ENABLE(GC_TRACING)
58 static String classOf(const void* object)
60 const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast<void*>(object)));
62 return gcInfo->m_className;
68 static bool vTableInitialized(void* objectPointer)
70 return !!(*reinterpret_cast<Address*>(objectPointer));
74 static bool IsPowerOf2(size_t power)
76 return !((power - 1) & power);
80 static Address roundToBlinkPageBoundary(void* base)
82 return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
85 static size_t roundToOsPageSize(size_t size)
87 return (size + osPageSize() - 1) & ~(osPageSize() - 1);
93 static const size_t pageSize = getpagesize();
95 static size_t pageSize = 0;
99 pageSize = info.dwPageSize;
100 ASSERT(IsPowerOf2(pageSize));
108 MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
110 bool contains(Address addr) const
112 return m_base <= addr && addr < (m_base + m_size);
116 bool contains(const MemoryRegion& other) const
118 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
124 int err = munmap(m_base, m_size);
125 RELEASE_ASSERT(!err);
127 bool success = VirtualFree(m_base, 0, MEM_RELEASE);
128 RELEASE_ASSERT(success);
132 WARN_UNUSED_RETURN bool commit()
134 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
136 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
138 madvise(m_base, m_size, MADV_NORMAL);
143 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
151 int err = mprotect(m_base, m_size, PROT_NONE);
152 RELEASE_ASSERT(!err);
153 // FIXME: Consider using MADV_FREE on MacOS.
154 madvise(m_base, m_size, MADV_DONTNEED);
156 bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
157 RELEASE_ASSERT(success);
161 Address base() const { return m_base; }
168 // Representation of the memory used for a Blink heap page.
170 // The representation keeps track of two memory regions:
172 // 1. The virtual memory reserved from the sytem in order to be able
173 // to free all the virtual memory reserved on destruction.
175 // 2. The writable memory (a sub-region of the reserved virtual
176 // memory region) that is used for the actual heap page payload.
178 // Guard pages are created before and after the writable memory.
181 ~PageMemory() { m_reserved.release(); }
183 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
184 void decommit() { m_writable.decommit(); }
186 Address writableStart() { return m_writable.base(); }
188 // Allocate a virtual address space for the blink page with the
191 // [ guard os page | ... payload ... | guard os page ]
192 // ^---{ aligned to blink page size }
194 static PageMemory* allocate(size_t payloadSize)
196 ASSERT(payloadSize > 0);
198 // Virtual memory allocation routines operate in OS page sizes.
199 // Round up the requested size to nearest os page size.
200 payloadSize = roundToOsPageSize(payloadSize);
202 // Overallocate by blinkPageSize and 2 times OS page size to
203 // ensure a chunk of memory which is blinkPageSize aligned and
204 // has a system page before and after to use for guarding. We
205 // unmap the excess memory before returning.
206 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
208 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
210 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
211 RELEASE_ASSERT(base != MAP_FAILED);
213 Address end = base + allocationSize;
214 Address alignedBase = roundToBlinkPageBoundary(base);
215 Address payloadBase = alignedBase + osPageSize();
216 Address payloadEnd = payloadBase + payloadSize;
217 Address blinkPageEnd = payloadEnd + osPageSize();
219 // If the allocate memory was not blink page aligned release
220 // the memory before the aligned address.
221 if (alignedBase != base)
222 MemoryRegion(base, alignedBase - base).release();
224 // Create guard pages by decommiting an OS page before and
225 // after the payload.
226 MemoryRegion(alignedBase, osPageSize()).decommit();
227 MemoryRegion(payloadEnd, osPageSize()).decommit();
229 // Free the additional memory at the end of the page if any.
230 if (blinkPageEnd < end)
231 MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
233 return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
236 Address alignedBase = 0;
238 // On Windows it is impossible to partially release a region
239 // of memory allocated by VirtualAlloc. To avoid wasting
240 // virtual address space we attempt to release a large region
241 // of memory returned as a whole and then allocate an aligned
242 // region inside this larger region.
243 for (int attempt = 0; attempt < 3; attempt++) {
244 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
245 RELEASE_ASSERT(base);
246 VirtualFree(base, 0, MEM_RELEASE);
248 alignedBase = roundToBlinkPageBoundary(base);
249 base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
251 RELEASE_ASSERT(base == alignedBase);
252 allocationSize = payloadSize + 2 * osPageSize();
258 // We failed to avoid wasting virtual address space after
260 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
261 RELEASE_ASSERT(base);
263 // FIXME: If base is by accident blink page size aligned
264 // here then we can create two pages out of reserved
266 alignedBase = roundToBlinkPageBoundary(base);
269 Address payloadBase = alignedBase + osPageSize();
270 PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
271 bool res = storage->commit();
278 PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
279 : m_reserved(reserved)
280 , m_writable(writable)
282 // This annotation is for letting the LeakSanitizer ignore PageMemory objects.
284 // - The LeakSanitizer runs before the shutdown sequence and reports unreachable memory blocks.
285 // - The LeakSanitizer only recognizes memory blocks allocated through malloc/new,
286 // and we need special handling for mapped regions.
287 // - The PageMemory object is only referenced by a HeapPage<Header> object, which is
288 // located inside the mapped region, which is not released until the shutdown sequence.
290 // Given the above, we need to explicitly annotate that the LeakSanitizer should ignore
291 // PageMemory objects.
292 WTF_ANNOTATE_LEAKING_OBJECT_PTR(this);
294 ASSERT(reserved.contains(writable));
297 MemoryRegion m_reserved;
298 MemoryRegion m_writable;
303 explicit GCScope(ThreadState::StackState stackState)
304 : m_state(ThreadState::current())
305 , m_safePointScope(stackState)
306 , m_parkedAllThreads(false)
308 TRACE_EVENT0("Blink", "Heap::GCScope");
309 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
310 if (m_state->isMainThread())
311 TRACE_EVENT_SET_SAMPLING_STATE("Blink", "BlinkGCWaiting");
313 m_state->checkThread();
315 // FIXME: in an unlikely coincidence that two threads decide
316 // to collect garbage at the same time, avoid doing two GCs in
318 RELEASE_ASSERT(!m_state->isInGC());
319 RELEASE_ASSERT(!m_state->isSweepInProgress());
320 if (LIKELY(ThreadState::stopThreads())) {
321 m_parkedAllThreads = true;
324 if (m_state->isMainThread())
325 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
328 bool allThreadsParked() { return m_parkedAllThreads; }
332 // Only cleanup if we parked all threads in which case the GC happened
333 // and we need to resume the other threads.
334 if (LIKELY(m_parkedAllThreads)) {
336 ASSERT(!m_state->isInGC());
337 ThreadState::resumeThreads();
342 ThreadState* m_state;
343 ThreadState::SafePointScope m_safePointScope;
344 bool m_parkedAllThreads; // False if we fail to park all threads
348 bool HeapObjectHeader::isMarked() const
351 return m_size & markBitMask;
355 void HeapObjectHeader::unmark()
358 m_size &= ~markBitMask;
362 bool HeapObjectHeader::hasDebugMark() const
365 return m_size & debugBitMask;
369 void HeapObjectHeader::clearDebugMark()
372 m_size &= ~debugBitMask;
376 void HeapObjectHeader::setDebugMark()
379 m_size |= debugBitMask;
384 void HeapObjectHeader::zapMagic()
386 m_magic = zappedMagic;
390 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
392 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
393 HeapObjectHeader* header =
394 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
398 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
401 if (gcInfo->hasFinalizer()) {
402 gcInfo->m_finalize(object);
405 for (size_t i = 0; i < objectSize; i++)
406 object[i] = finalizedZapValue;
408 // Zap the primary vTable entry (secondary vTable entries are not zapped)
409 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
413 void FinalizedHeapObjectHeader::finalize()
415 HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
418 template<typename Header>
419 void LargeHeapObject<Header>::unmark()
421 return heapObjectHeader()->unmark();
424 template<typename Header>
425 bool LargeHeapObject<Header>::isMarked()
427 return heapObjectHeader()->isMarked();
430 template<typename Header>
431 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
433 ASSERT(contains(address));
434 if (!objectContains(address))
436 #if ENABLE(GC_TRACING)
437 visitor->setHostInfo(&address, "stack");
443 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
445 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload()))
446 visitor->markConservatively(heapObjectHeader());
448 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
452 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
455 if (gcInfo()->hasVTable() && !vTableInitialized(payload()))
456 visitor->markConservatively(heapObjectHeader());
458 visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
462 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
464 heapObjectHeader()->finalize();
468 void LargeHeapObject<HeapObjectHeader>::finalize()
471 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
474 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
476 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
477 FinalizedHeapObjectHeader* header =
478 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
482 template<typename Header>
483 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
484 : m_currentAllocationPoint(0)
485 , m_remainingAllocationSize(0)
487 , m_firstLargeHeapObject(0)
488 , m_biggestFreeListIndex(0)
489 , m_threadState(state)
495 template<typename Header>
496 ThreadHeap<Header>::~ThreadHeap()
499 if (!ThreadState::current()->isMainThread())
504 template<typename Header>
505 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
507 size_t allocationSize = allocationSizeFromSize(size);
508 if (threadState()->shouldGC()) {
509 if (threadState()->shouldForceConservativeGC())
510 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
512 threadState()->setGCRequested();
514 ensureCurrentAllocation(allocationSize, gcInfo);
515 return allocate(size, gcInfo);
518 template<typename Header>
519 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
521 size_t bucketSize = 1 << m_biggestFreeListIndex;
522 int i = m_biggestFreeListIndex;
523 for (; i > 0; i--, bucketSize >>= 1) {
524 if (bucketSize < minSize)
526 FreeListEntry* entry = m_freeLists[i];
528 m_biggestFreeListIndex = i;
529 entry->unlink(&m_freeLists[i]);
530 setAllocationPoint(entry->address(), entry->size());
531 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
535 m_biggestFreeListIndex = i;
539 template<typename Header>
540 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
542 ASSERT(minSize >= allocationGranularity);
543 if (remainingAllocationSize() >= minSize)
546 if (remainingAllocationSize() > 0)
547 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
548 if (allocateFromFreeList(minSize))
550 addPageToHeap(gcInfo);
551 bool success = allocateFromFreeList(minSize);
552 RELEASE_ASSERT(success);
555 template<typename Header>
556 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
558 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
559 if (page->contains(address))
562 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
563 // Check that large pages are blinkPageSize aligned (modulo the
564 // osPageSize for the guard page).
565 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlinkPageStart(reinterpret_cast<Address>(current)));
566 if (current->contains(address))
572 #if ENABLE(GC_TRACING)
573 template<typename Header>
574 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address)
576 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
577 if (current->contains(address))
578 return current->gcInfo();
584 template<typename Header>
585 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
587 ASSERT(heapPageFromAddress(address));
588 ASSERT(heapPageFromAddress(address + size - 1));
589 ASSERT(size < blinkPagePayloadSize());
590 // The free list entries are only pointer aligned (but when we allocate
591 // from them we are 8 byte aligned due to the header size).
592 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
593 ASSERT(!(size & allocationMask));
594 ASAN_POISON_MEMORY_REGION(address, size);
595 FreeListEntry* entry;
596 if (size < sizeof(*entry)) {
597 // Create a dummy header with only a size and freelist bit set.
598 ASSERT(size >= sizeof(BasicObjectHeader));
599 // Free list encode the size to mark the lost memory as freelist memory.
600 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
601 // This memory gets lost. Sweeping can reclaim it.
604 entry = new (NotNull, address) FreeListEntry(size);
605 #if defined(ADDRESS_SANITIZER)
606 // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
607 // reaches zero. However we always add entire pages to ensure that adding a new page will
608 // increase the allocation space.
609 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
612 int index = bucketIndexForSize(size);
613 entry->link(&m_freeLists[index]);
614 if (index > m_biggestFreeListIndex)
615 m_biggestFreeListIndex = index;
618 template<typename Header>
619 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
621 // Caller already added space for object header and rounded up to allocation alignment
622 ASSERT(!(size & allocationMask));
624 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
626 // Ensure that there is enough space for alignment. If the header
627 // is not a multiple of 8 bytes we will allocate an extra
628 // headerPadding<Header> bytes to ensure it 8 byte aligned.
629 allocationSize += headerPadding<Header>();
631 // If ASAN is supported we add allocationGranularity bytes to the allocated space and
632 // poison that to detect overflows
633 #if defined(ADDRESS_SANITIZER)
634 allocationSize += allocationGranularity;
636 if (threadState()->shouldGC())
637 threadState()->setGCRequested();
638 Heap::flushHeapDoesNotContainCache();
639 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
640 Address largeObjectAddress = pageMemory->writableStart();
641 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
642 memset(headerAddress, 0, size);
643 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
644 Address result = headerAddress + sizeof(*header);
645 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
646 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
648 // Poison the object header and allocationGranularity bytes after the object
649 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
650 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
651 largeObject->link(&m_firstLargeHeapObject);
652 stats().increaseAllocatedSpace(largeObject->size());
653 stats().increaseObjectSpace(largeObject->payloadSize());
657 template<typename Header>
658 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
660 flushHeapContainsCache();
661 object->unlink(previousNext);
664 // Unpoison the object header and allocationGranularity bytes after the
665 // object before freeing.
666 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
667 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
668 delete object->storage();
672 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
674 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
675 // the heap should be unused (ie. 0).
680 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
682 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
683 // since it is the same for all objects
685 allocatePage(gcInfo);
688 template<typename Header>
689 void ThreadHeap<Header>::clearPagePool()
691 while (takePageFromPool()) { }
694 template<typename Header>
695 PageMemory* ThreadHeap<Header>::takePageFromPool()
697 Heap::flushHeapDoesNotContainCache();
698 while (PagePoolEntry* entry = m_pagePool) {
699 m_pagePool = entry->next();
700 PageMemory* storage = entry->storage();
703 if (storage->commit())
706 // Failed to commit pooled storage. Release it.
713 template<typename Header>
714 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
716 flushHeapContainsCache();
717 PageMemory* storage = unused->storage();
718 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
723 template<typename Header>
724 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
726 Heap::flushHeapDoesNotContainCache();
727 PageMemory* pageMemory = takePageFromPool();
729 pageMemory = PageMemory::allocate(blinkPagePayloadSize());
730 RELEASE_ASSERT(pageMemory);
732 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
733 // FIXME: Oilpan: Linking new pages into the front of the list is
734 // crucial when performing allocations during finalization because
735 // it ensures that those pages are not swept in the current GC
736 // round. We should create a separate page list for that to
737 // separate out the pages allocated during finalization clearly
738 // from the pages currently being swept.
739 page->link(&m_firstPage);
740 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
744 template<typename Header>
745 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
747 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
748 page->getStats(scannedStats);
749 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
750 current->getStats(scannedStats);
754 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
755 // sweeping to catch cases where dead objects touch eachother. This is not
756 // turned on by default because it also triggers for cases that are safe.
757 // Examples of such safe cases are context life cycle observers and timers
758 // embedded in garbage collected objects.
759 #define STRICT_ASAN_FINALIZATION_CHECKING 0
761 template<typename Header>
762 void ThreadHeap<Header>::sweep()
764 ASSERT(isConsistentForGC());
765 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
766 // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
767 // calling their finalizer methods. This can catch the cases where one objects
768 // finalizer tries to modify another object as part of finalization.
769 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
770 page->poisonUnmarkedObjects();
772 HeapPage<Header>* page = m_firstPage;
773 HeapPage<Header>** previous = &m_firstPage;
774 bool pagesRemoved = false;
776 if (page->isEmpty()) {
777 flushHeapContainsCache();
778 HeapPage<Header>* unused = page;
780 HeapPage<Header>::unlink(unused, previous);
784 previous = &page->m_next;
789 flushHeapContainsCache();
791 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
792 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
793 if (current->isMarked()) {
794 stats().increaseAllocatedSpace(current->size());
795 stats().increaseObjectSpace(current->payloadSize());
797 previousNext = ¤t->m_next;
798 current = current->next();
800 LargeHeapObject<Header>* next = current->next();
801 freeLargeObject(current, previousNext);
807 template<typename Header>
808 void ThreadHeap<Header>::assertEmpty()
810 // No allocations are permitted. The thread is exiting.
811 NoAllocationScope<AnyThread> noAllocation;
812 makeConsistentForGC();
813 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
814 Address end = page->end();
815 Address headerAddress;
816 for (headerAddress = page->payload(); headerAddress < end; ) {
817 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
818 ASSERT(basicHeader->size() < blinkPagePayloadSize());
819 // Live object is potentially a dangling pointer from some root.
820 // Treat it as critical bug both in release and debug mode.
821 RELEASE_ASSERT(basicHeader->isFree());
822 headerAddress += basicHeader->size();
824 ASSERT(headerAddress == end);
825 addToFreeList(page->payload(), end - page->payload());
828 RELEASE_ASSERT(!m_firstLargeHeapObject);
831 template<typename Header>
832 bool ThreadHeap<Header>::isConsistentForGC()
834 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
838 return !ownsNonEmptyAllocationArea();
841 template<typename Header>
842 void ThreadHeap<Header>::makeConsistentForGC()
844 if (ownsNonEmptyAllocationArea())
845 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
846 setAllocationPoint(0, 0);
850 template<typename Header>
851 void ThreadHeap<Header>::clearMarks()
853 ASSERT(isConsistentForGC());
854 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
856 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
860 template<typename Header>
861 void ThreadHeap<Header>::deletePages()
863 flushHeapContainsCache();
864 // Add all pages in the pool to the heap's list of pages before deleting
867 for (HeapPage<Header>* page = m_firstPage; page; ) {
868 HeapPage<Header>* dead = page;
870 PageMemory* storage = dead->storage();
876 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
877 LargeHeapObject<Header>* dead = current;
878 current = current->next();
879 PageMemory* storage = dead->storage();
880 dead->~LargeHeapObject();
883 m_firstLargeHeapObject = 0;
886 template<typename Header>
887 void ThreadHeap<Header>::clearFreeLists()
889 for (size_t i = 0; i < blinkPageSizeLog2; i++)
893 int BaseHeap::bucketIndexForSize(size_t size)
904 template<typename Header>
905 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
906 : BaseHeapPage(storage, gcInfo, heap->threadState())
910 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
911 m_objectStartBitMapComputed = false;
912 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
913 heap->stats().increaseAllocatedSpace(blinkPageSize);
916 template<typename Header>
917 void HeapPage<Header>::link(HeapPage** prevNext)
923 template<typename Header>
924 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
926 *prevNext = unused->m_next;
927 unused->heap()->addPageToPool(unused);
930 template<typename Header>
931 void HeapPage<Header>::getStats(HeapStats& stats)
933 stats.increaseAllocatedSpace(blinkPageSize);
934 Address headerAddress = payload();
935 ASSERT(headerAddress != end());
937 Header* header = reinterpret_cast<Header*>(headerAddress);
938 if (!header->isFree())
939 stats.increaseObjectSpace(header->payloadSize());
940 ASSERT(header->size() < blinkPagePayloadSize());
941 headerAddress += header->size();
942 ASSERT(headerAddress <= end());
943 } while (headerAddress < end());
946 template<typename Header>
947 bool HeapPage<Header>::isEmpty()
949 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
950 return header->isFree() && (header->size() == payloadSize());
953 template<typename Header>
954 void HeapPage<Header>::sweep()
956 clearObjectStartBitMap();
957 heap()->stats().increaseAllocatedSpace(blinkPageSize);
958 Address startOfGap = payload();
959 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
960 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
961 ASSERT(basicHeader->size() < blinkPagePayloadSize());
963 if (basicHeader->isFree()) {
964 headerAddress += basicHeader->size();
967 // At this point we know this is a valid object of type Header
968 Header* header = static_cast<Header*>(basicHeader);
970 if (!header->isMarked()) {
971 // For ASAN we unpoison the specific object when calling the finalizer and
972 // poison it again when done to allow the object's own finalizer to operate
973 // on the object, but not have other finalizers be allowed to access it.
974 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
976 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
977 headerAddress += header->size();
981 if (startOfGap != headerAddress)
982 heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
984 headerAddress += header->size();
985 heap()->stats().increaseObjectSpace(header->payloadSize());
986 startOfGap = headerAddress;
988 if (startOfGap != end())
989 heap()->addToFreeList(startOfGap, end() - startOfGap);
992 template<typename Header>
993 void HeapPage<Header>::clearMarks()
995 for (Address headerAddress = payload(); headerAddress < end();) {
996 Header* header = reinterpret_cast<Header*>(headerAddress);
997 ASSERT(header->size() < blinkPagePayloadSize());
998 if (!header->isFree())
1000 headerAddress += header->size();
1004 template<typename Header>
1005 void HeapPage<Header>::populateObjectStartBitMap()
1007 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1008 Address start = payload();
1009 for (Address headerAddress = start; headerAddress < end();) {
1010 Header* header = reinterpret_cast<Header*>(headerAddress);
1011 size_t objectOffset = headerAddress - start;
1012 ASSERT(!(objectOffset & allocationMask));
1013 size_t objectStartNumber = objectOffset / allocationGranularity;
1014 size_t mapIndex = objectStartNumber / 8;
1015 ASSERT(mapIndex < objectStartBitMapSize);
1016 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
1017 headerAddress += header->size();
1018 ASSERT(headerAddress <= end());
1020 m_objectStartBitMapComputed = true;
1023 template<typename Header>
1024 void HeapPage<Header>::clearObjectStartBitMap()
1026 m_objectStartBitMapComputed = false;
1029 static int numberOfLeadingZeroes(uint8_t byte)
1047 template<typename Header>
1048 Header* HeapPage<Header>::findHeaderFromAddress(Address address)
1050 if (address < payload())
1052 if (!isObjectStartBitMapComputed())
1053 populateObjectStartBitMap();
1054 size_t objectOffset = address - payload();
1055 size_t objectStartNumber = objectOffset / allocationGranularity;
1056 size_t mapIndex = objectStartNumber / 8;
1057 ASSERT(mapIndex < objectStartBitMapSize);
1058 size_t bit = objectStartNumber & 7;
1059 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1061 ASSERT(mapIndex > 0);
1062 byte = m_objectStartBitMap[--mapIndex];
1064 int leadingZeroes = numberOfLeadingZeroes(byte);
1065 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1066 objectOffset = objectStartNumber * allocationGranularity;
1067 Address objectAddress = objectOffset + payload();
1068 Header* header = reinterpret_cast<Header*>(objectAddress);
1069 if (header->isFree())
1074 template<typename Header>
1075 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1077 ASSERT(contains(address));
1078 Header* header = findHeaderFromAddress(address);
1082 #if ENABLE(GC_TRACING)
1083 visitor->setHostInfo(&address, "stack");
1085 if (hasVTable(header) && !vTableInitialized(header->payload()))
1086 visitor->markConservatively(header);
1088 visitor->mark(header, traceCallback(header));
1091 #if ENABLE(GC_TRACING)
1092 template<typename Header>
1093 const GCInfo* HeapPage<Header>::findGCInfo(Address address)
1095 if (address < payload())
1098 if (gcInfo()) // for non FinalizedObjectHeader
1101 Header* header = findHeaderFromAddress(address);
1105 return header->gcInfo();
1109 #if defined(ADDRESS_SANITIZER)
1110 template<typename Header>
1111 void HeapPage<Header>::poisonUnmarkedObjects()
1113 for (Address headerAddress = payload(); headerAddress < end(); ) {
1114 Header* header = reinterpret_cast<Header*>(headerAddress);
1115 ASSERT(header->size() < blinkPagePayloadSize());
1117 if (!header->isFree() && !header->isMarked())
1118 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1119 headerAddress += header->size();
1125 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1131 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1134 HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1138 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1141 return gcInfo()->m_trace;
1145 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1147 return header->traceCallback();
1151 inline bool HeapPage<HeapObjectHeader>::hasVTable(HeapObjectHeader* header)
1154 return gcInfo()->hasVTable();
1158 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHeader* header)
1160 return header->hasVTable();
1163 template<typename Header>
1164 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1166 stats.increaseAllocatedSpace(size());
1167 stats.increaseObjectSpace(payloadSize());
1170 template<typename Entry>
1171 void HeapExtentCache<Entry>::flush()
1174 for (int i = 0; i < numberOfEntries; i++)
1175 m_entries[i] = Entry();
1176 m_hasEntries = false;
1180 template<typename Entry>
1181 size_t HeapExtentCache<Entry>::hash(Address address)
1183 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1184 value ^= value >> numberOfEntriesLog2;
1185 value ^= value >> (numberOfEntriesLog2 * 2);
1186 value &= numberOfEntries - 1;
1187 return value & ~1; // Returns only even number.
1190 template<typename Entry>
1191 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address)
1193 size_t index = hash(address);
1194 ASSERT(!(index & 1));
1195 Address cachePage = roundToBlinkPageStart(address);
1196 if (m_entries[index].address() == cachePage)
1197 return m_entries[index].result();
1198 if (m_entries[index + 1].address() == cachePage)
1199 return m_entries[index + 1].result();
1203 template<typename Entry>
1204 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupResult entry)
1206 m_hasEntries = true;
1207 size_t index = hash(address);
1208 ASSERT(!(index & 1));
1209 Address cachePage = roundToBlinkPageStart(address);
1210 m_entries[index + 1] = m_entries[index];
1211 m_entries[index] = Entry(cachePage, entry);
1214 // These should not be needed, but it seems impossible to persuade clang to
1215 // instantiate the template functions and export them from a shared library, so
1216 // we add these in the non-templated subclass, which does not have that issue.
1217 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1219 HeapExtentCache<PositiveEntry>::addEntry(address, page);
1222 BaseHeapPage* HeapContainsCache::lookup(Address address)
1224 return HeapExtentCache<PositiveEntry>::lookup(address);
1227 void Heap::flushHeapDoesNotContainCache()
1229 s_heapDoesNotContainCache->flush();
1232 void CallbackStack::init(CallbackStack** first)
1234 // The stacks are chained, so we start by setting this to null as terminator.
1236 *first = new CallbackStack(first);
1239 void CallbackStack::shutdown(CallbackStack** first)
1241 CallbackStack* next;
1242 for (CallbackStack* current = *first; current; current = next) {
1243 next = current->m_next;
1249 CallbackStack::~CallbackStack()
1256 void CallbackStack::clearUnused()
1258 ASSERT(m_current == &(m_buffer[0]));
1259 for (size_t i = 0; i < bufferSize; i++)
1260 m_buffer[i] = Item(0, 0);
1263 void CallbackStack::assertIsEmpty()
1265 ASSERT(m_current == &(m_buffer[0]));
1269 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1271 if (m_current == &(m_buffer[0])) {
1278 CallbackStack* nextStack = m_next;
1281 return nextStack->popAndInvokeCallback(first, visitor);
1283 Item* item = --m_current;
1285 VisitorCallback callback = item->callback();
1286 #if ENABLE(GC_TRACING)
1287 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndInvokeCallback
1288 visitor->setHostInfo(item->object(), classOf(item->object()));
1290 callback(visitor, item->object());
1295 class MarkingVisitor : public Visitor {
1297 #if ENABLE(GC_TRACING)
1298 typedef HashSet<uintptr_t> LiveObjectSet;
1299 typedef HashMap<String, LiveObjectSet> LiveObjectMap;
1300 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph;
1303 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1306 ASSERT(objectPointer);
1307 if (header->isMarked())
1310 #if ENABLE(GC_TRACING)
1311 String className(classOf(objectPointer));
1313 LiveObjectMap::AddResult result = currentlyLive().add(className, LiveObjectSet());
1314 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPointer));
1316 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintptr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName));
1317 ASSERT(result.isNewEntry);
1318 // printf("%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_hostObject, className.ascii().data(), objectPointer);
1321 Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1324 virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1326 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1327 // version to correctly find the payload.
1328 visitHeader(header, header->payload(), callback);
1331 virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1333 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1334 // version to correctly find the payload.
1335 visitHeader(header, header->payload(), callback);
1338 virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1342 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1343 visitHeader(header, header->payload(), callback);
1347 inline void visitConservatively(HeapObjectHeader* header, void* objectPointer, size_t objectSize)
1350 ASSERT(objectPointer);
1351 if (header->isMarked())
1355 // Scan through the object's fields and visit them conservatively.
1356 Address* objectFields = reinterpret_cast<Address*>(objectPointer);
1357 for (size_t i = 0; i < objectSize / sizeof(Address); ++i)
1358 Heap::checkAndMarkPointer(this, objectFields[i]);
1361 virtual void markConservatively(HeapObjectHeader* header)
1363 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1364 // version to correctly find the payload.
1365 visitConservatively(header, header->payload(), header->payloadSize());
1368 virtual void markConservatively(FinalizedHeapObjectHeader* header)
1370 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1371 // version to correctly find the payload.
1372 visitConservatively(header, header->payload(), header->payloadSize());
1375 virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) OVERRIDE
1377 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
1380 virtual bool isMarked(const void* objectPointer) OVERRIDE
1382 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1385 // This macro defines the necessary visitor methods for typed heaps
1386 #define DEFINE_VISITOR_METHODS(Type) \
1387 virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1389 if (!objectPointer) \
1391 HeapObjectHeader* header = \
1392 HeapObjectHeader::fromPayload(objectPointer); \
1393 visitHeader(header, header->payload(), callback); \
1395 virtual bool isMarked(const Type* objectPointer) OVERRIDE \
1397 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
1400 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1401 #undef DEFINE_VISITOR_METHODS
1403 #if ENABLE(GC_TRACING)
1406 printf("\n---------- AFTER MARKING -------------------\n");
1407 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = currentlyLive().end(); it != end; ++it) {
1408 printf("%s %u", it->key.ascii().data(), it->value.size());
1410 if (it->key == "WebCore::Document")
1411 reportStillAlive(it->value, previouslyLive().get(it->key));
1416 previouslyLive().swap(currentlyLive());
1417 currentlyLive().clear();
1419 for (HashSet<uintptr_t>::iterator it = objectsToFindPath().begin(), end = objectsToFindPath().end(); it != end; ++it) {
1420 dumpPathToObjectFromObjectGraph(objectGraph(), *it);
1424 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous)
1428 printf(" [previously %u]", previous.size());
1429 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1430 if (previous.find(*it) == previous.end())
1438 printf(" {survived 2GCs %d: ", count);
1439 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1440 if (previous.find(*it) == previous.end())
1450 static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintptr_t target)
1452 printf("Path to %lx of %s\n", target, classOf(reinterpret_cast<const void*>(target)).ascii().data());
1453 ObjectGraph::const_iterator it = graph.find(target);
1454 while (it != graph.end()) {
1455 printf("<- %lx of %s\n", it->value.first, it->value.second.ascii().data());
1456 it = graph.find(it->value.first);
1461 static void dumpPathToObjectOnNextGC(void* p)
1463 objectsToFindPath().add(reinterpret_cast<uintptr_t>(p));
1466 static LiveObjectMap& previouslyLive()
1468 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
1472 static LiveObjectMap& currentlyLive()
1474 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
1478 static ObjectGraph& objectGraph()
1480 DEFINE_STATIC_LOCAL(ObjectGraph, graph, ());
1484 static HashSet<uintptr_t>& objectsToFindPath()
1486 DEFINE_STATIC_LOCAL(HashSet<uintptr_t>, set, ());
1492 virtual void registerWeakCell(void** cell, WeakPointerCallback callback) OVERRIDE
1494 Heap::pushWeakCellPointerCallback(cell, callback);
1500 ThreadState::init();
1501 CallbackStack::init(&s_markingStack);
1502 CallbackStack::init(&s_weakCallbackStack);
1503 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
1504 s_markingVisitor = new MarkingVisitor();
1507 void Heap::shutdown()
1509 s_shutdownCalled = true;
1510 ThreadState::shutdownHeapIfNecessary();
1513 void Heap::doShutdown()
1515 // We don't want to call doShutdown() twice.
1516 if (!s_markingVisitor)
1519 ASSERT(!ThreadState::isAnyThreadInGC());
1520 ASSERT(!ThreadState::attachedThreads().size());
1521 delete s_markingVisitor;
1522 s_markingVisitor = 0;
1523 delete s_heapDoesNotContainCache;
1524 s_heapDoesNotContainCache = 0;
1525 CallbackStack::shutdown(&s_weakCallbackStack);
1526 CallbackStack::shutdown(&s_markingStack);
1527 ThreadState::shutdown();
1530 BaseHeapPage* Heap::contains(Address address)
1532 ASSERT(ThreadState::isAnyThreadInGC());
1533 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1534 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1535 BaseHeapPage* page = (*it)->contains(address);
1542 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1544 ASSERT(ThreadState::isAnyThreadInGC());
1547 if (s_heapDoesNotContainCache->lookup(address))
1551 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1552 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1553 if ((*it)->checkAndMarkPointer(visitor, address)) {
1554 // Pointer was in a page of that thread. If it actually pointed
1555 // into an object then that object was found and marked.
1556 ASSERT(!s_heapDoesNotContainCache->lookup(address));
1562 s_heapDoesNotContainCache->addEntry(address, true);
1564 if (!s_heapDoesNotContainCache->lookup(address))
1565 s_heapDoesNotContainCache->addEntry(address, true);
1570 #if ENABLE(GC_TRACING)
1571 const GCInfo* Heap::findGCInfo(Address address)
1573 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1574 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1575 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) {
1582 void Heap::dumpPathToObjectOnNextGC(void* p)
1584 static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p);
1588 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1590 ASSERT(Heap::contains(object));
1591 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1592 *slot = CallbackStack::Item(object, callback);
1595 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1597 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1600 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
1602 ASSERT(Heap::contains(cell));
1603 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1604 *slot = CallbackStack::Item(cell, callback);
1607 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
1609 ASSERT(Heap::contains(object));
1610 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeaderAddress(reinterpret_cast<Address>(object)));
1611 ASSERT(Heap::contains(object) == heapPageForObject);
1612 ThreadState* state = heapPageForObject->threadState();
1613 state->pushWeakObjectPointerCallback(closure, callback);
1616 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1618 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1621 void Heap::prepareForGC()
1623 ASSERT(ThreadState::isAnyThreadInGC());
1624 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1625 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1626 (*it)->prepareForGC();
1629 void Heap::collectGarbage(ThreadState::StackState stackState)
1631 ThreadState* state = ThreadState::current();
1632 state->clearGCRequested();
1634 GCScope gcScope(stackState);
1635 // Check if we successfully parked the other threads. If not we bail out of the GC.
1636 if (!gcScope.allThreadsParked()) {
1637 ThreadState::current()->setGCRequested();
1640 TRACE_EVENT0("Blink", "Heap::collectGarbage");
1641 TRACE_EVENT_SCOPED_SAMPLING_STATE("Blink", "BlinkGC");
1642 #if ENABLE(GC_TRACING)
1643 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
1646 // Disallow allocation during garbage collection (but not
1647 // during the finalization that happens when the gcScope is
1649 NoAllocationScope<AnyThread> noAllocationScope;
1653 ThreadState::visitRoots(s_markingVisitor);
1654 // Recursively mark all objects that are reachable from the roots.
1655 while (popAndInvokeTraceCallback(s_markingVisitor)) { }
1657 // Call weak callbacks on objects that may now be pointing to dead
1659 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1661 // It is not permitted to trace pointers of live objects in the weak
1662 // callback phase, so the marking stack should still be empty here.
1663 s_markingStack->assertIsEmpty();
1665 #if ENABLE(GC_TRACING)
1666 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
1670 void Heap::collectAllGarbage()
1672 // FIXME: oilpan: we should perform a single GC and everything
1673 // should die. Unfortunately it is not the case for all objects
1674 // because the hierarchy was not completely moved to the heap and
1675 // some heap allocated objects own objects that contain persistents
1676 // pointing to other heap allocated objects.
1677 for (int i = 0; i < 5; i++)
1678 collectGarbage(ThreadState::NoHeapPointersOnStack);
1681 void Heap::setForcePreciseGCForTesting()
1683 ThreadState::current()->setForcePreciseGCForTesting(true);
1686 void Heap::getStats(HeapStats* stats)
1689 ASSERT(ThreadState::isAnyThreadInGC());
1690 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1691 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1692 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1694 (*it)->getStats(temp);
1699 bool Heap::isConsistentForGC()
1701 ASSERT(ThreadState::isAnyThreadInGC());
1702 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1703 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1704 if (!(*it)->isConsistentForGC())
1710 void Heap::makeConsistentForGC()
1712 ASSERT(ThreadState::isAnyThreadInGC());
1713 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1714 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1715 (*it)->makeConsistentForGC();
1718 // Force template instantiations for the types that we need.
1719 template class HeapPage<FinalizedHeapObjectHeader>;
1720 template class HeapPage<HeapObjectHeader>;
1721 template class ThreadHeap<FinalizedHeapObjectHeader>;
1722 template class ThreadHeap<HeapObjectHeader>;
1724 Visitor* Heap::s_markingVisitor;
1725 CallbackStack* Heap::s_markingStack;
1726 CallbackStack* Heap::s_weakCallbackStack;
1727 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
1728 bool Heap::s_shutdownCalled = false;