2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "heap/Heap.h"
34 #include "heap/ThreadState.h"
36 #include "wtf/PassOwnPtr.h"
48 static bool IsPowerOf2(size_t power)
50 return !((power - 1) & power);
54 static Address roundToBlinkPageBoundary(void* base)
56 return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
59 static size_t roundToOsPageSize(size_t size)
61 return (size + osPageSize() - 1) & ~(osPageSize() - 1);
67 static const size_t pageSize = getpagesize();
69 static size_t pageSize = 0;
73 pageSize = info.dwPageSize;
74 ASSERT(IsPowerOf2(pageSize));
82 MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
84 bool contains(Address addr) const
86 return m_base <= addr && addr < (m_base + m_size);
90 bool contains(const MemoryRegion& other) const
92 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
98 int err = munmap(m_base, m_size);
101 bool success = VirtualFree(m_base, 0, MEM_RELEASE);
102 RELEASE_ASSERT(success);
106 WARN_UNUSED_RETURN bool commit()
109 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
111 madvise(m_base, m_size, MADV_NORMAL);
116 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
124 int err = mprotect(m_base, m_size, PROT_NONE);
125 RELEASE_ASSERT(!err);
126 // FIXME: Consider using MADV_FREE on MacOS.
127 madvise(m_base, m_size, MADV_DONTNEED);
129 bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
130 RELEASE_ASSERT(success);
134 Address base() const { return m_base; }
141 // Representation of the memory used for a Blink heap page.
143 // The representation keeps track of two memory regions:
145 // 1. The virtual memory reserved from the sytem in order to be able
146 // to free all the virtual memory reserved on destruction.
148 // 2. The writable memory (a sub-region of the reserved virtual
149 // memory region) that is used for the actual heap page payload.
151 // Guard pages are created before and after the writable memory.
154 ~PageMemory() { m_reserved.release(); }
156 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
157 void decommit() { m_writable.decommit(); }
159 Address writableStart() { return m_writable.base(); }
161 // Allocate a virtual address space for the blink page with the
164 // [ guard os page | ... payload ... | guard os page ]
165 // ^---{ aligned to blink page size }
167 static PageMemory* allocate(size_t payloadSize)
169 ASSERT(payloadSize > 0);
171 // Virtual memory allocation routines operate in OS page sizes.
172 // Round up the requested size to nearest os page size.
173 payloadSize = roundToOsPageSize(payloadSize);
175 // Overallocate by blinkPageSize and 2 times OS page size to
176 // ensure a chunk of memory which is blinkPageSize aligned and
177 // has a system page before and after to use for guarding. We
178 // unmap the excess memory before returning.
179 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
182 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
183 RELEASE_ASSERT(base != MAP_FAILED);
185 Address end = base + allocationSize;
186 Address alignedBase = roundToBlinkPageBoundary(base);
187 Address payloadBase = alignedBase + osPageSize();
188 Address payloadEnd = payloadBase + payloadSize;
189 Address blinkPageEnd = payloadEnd + osPageSize();
191 // If the allocate memory was not blink page aligned release
192 // the memory before the aligned address.
193 if (alignedBase != base)
194 MemoryRegion(base, alignedBase - base).release();
196 // Create guard pages by decommiting an OS page before and
197 // after the payload.
198 MemoryRegion(alignedBase, osPageSize()).decommit();
199 MemoryRegion(payloadEnd, osPageSize()).decommit();
201 // Free the additional memory at the end of the page if any.
202 if (blinkPageEnd < end)
203 MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
205 return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
208 Address alignedBase = 0;
210 // On Windows it is impossible to partially release a region
211 // of memory allocated by VirtualAlloc. To avoid wasting
212 // virtual address space we attempt to release a large region
213 // of memory returned as a whole and then allocate an aligned
214 // region inside this larger region.
215 for (int attempt = 0; attempt < 3; attempt++) {
216 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
217 RELEASE_ASSERT(base);
218 VirtualFree(base, 0, MEM_RELEASE);
220 alignedBase = roundToBlinkPageBoundary(base);
221 base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
223 RELEASE_ASSERT(base == alignedBase);
224 allocationSize = payloadSize + 2 * osPageSize();
230 // We failed to avoid wasting virtual address space after
232 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
233 RELEASE_ASSERT(base);
235 // FIXME: If base is by accident blink page size aligned
236 // here then we can create two pages out of reserved
238 alignedBase = roundToBlinkPageBoundary(base);
241 Address payloadBase = alignedBase + osPageSize();
242 PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
243 bool res = storage->commit();
250 PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
251 : m_reserved(reserved)
252 , m_writable(writable)
254 ASSERT(reserved.contains(writable));
257 MemoryRegion m_reserved;
258 MemoryRegion m_writable;
263 explicit GCScope(ThreadState::StackState stackState)
264 : m_state(ThreadState::current())
265 , m_safePointScope(stackState)
267 m_state->checkThread();
269 // FIXME: in an unlikely coincidence that two threads decide
270 // to collect garbage at the same time, avoid doing two GCs in
272 ASSERT(!m_state->isInGC());
273 ThreadState::stopThreads();
280 ASSERT(!m_state->isInGC());
281 ThreadState::resumeThreads();
285 ThreadState* m_state;
286 ThreadState::SafePointScope m_safePointScope;
290 bool HeapObjectHeader::isMarked() const
293 return m_size & markBitMask;
297 void HeapObjectHeader::unmark()
300 m_size &= ~markBitMask;
304 bool HeapObjectHeader::hasDebugMark() const
307 return m_size & debugBitMask;
311 void HeapObjectHeader::clearDebugMark()
314 m_size &= ~debugBitMask;
318 void HeapObjectHeader::setDebugMark()
321 m_size |= debugBitMask;
326 void HeapObjectHeader::zapMagic()
328 m_magic = zappedMagic;
332 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
334 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
335 HeapObjectHeader* header =
336 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
340 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
343 if (gcInfo->hasFinalizer()) {
344 gcInfo->m_finalize(object);
347 for (size_t i = 0; i < objectSize; i++)
348 object[i] = finalizedZapValue;
350 // Zap the primary vTable entry (secondary vTable entries are not zapped)
351 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
355 void FinalizedHeapObjectHeader::finalize()
357 HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
360 template<typename Header>
361 void LargeHeapObject<Header>::unmark()
363 return heapObjectHeader()->unmark();
366 template<typename Header>
367 bool LargeHeapObject<Header>::isMarked()
369 return heapObjectHeader()->isMarked();
372 template<typename Header>
373 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
375 if (contains(address)) {
383 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
385 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
389 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
392 visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
396 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
398 heapObjectHeader()->finalize();
402 void LargeHeapObject<HeapObjectHeader>::finalize()
405 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
408 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
410 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
411 FinalizedHeapObjectHeader* header =
412 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
416 template<typename Header>
417 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
418 : m_currentAllocationPoint(0)
419 , m_remainingAllocationSize(0)
421 , m_firstLargeHeapObject(0)
422 , m_biggestFreeListIndex(0)
423 , m_inFinalizeAll(false)
424 , m_threadState(state)
430 template<typename Header>
431 ThreadHeap<Header>::~ThreadHeap()
434 // FIXME: at the moment we can't finalize all objects owned by the
435 // main thread eagerly because there are tangled destruction order
436 // dependencies there.
437 if (!ThreadState::isMainThread())
442 template<typename Header>
443 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
445 size_t allocationSize = allocationSizeFromSize(size);
446 if (threadState()->shouldGC()) {
447 if (threadState()->shouldForceConservativeGC())
448 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
450 threadState()->setGCRequested();
452 ensureCurrentAllocation(allocationSize, gcInfo);
453 return allocate(size, gcInfo);
456 template<typename Header>
457 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
459 size_t bucketSize = 1 << m_biggestFreeListIndex;
460 int i = m_biggestFreeListIndex;
461 for (; i > 0; i--, bucketSize >>= 1) {
462 if (bucketSize < minSize)
464 FreeListEntry* entry = m_freeLists[i];
466 m_biggestFreeListIndex = i;
467 entry->unlink(&m_freeLists[i]);
468 setAllocationPoint(entry->address(), entry->size());
469 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
473 m_biggestFreeListIndex = i;
477 template<typename Header>
478 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
480 ASSERT(minSize >= allocationGranularity);
481 if (remainingAllocationSize() >= minSize)
484 if (remainingAllocationSize() > 0)
485 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
486 if (allocateFromFreeList(minSize))
488 addPageToHeap(gcInfo);
489 bool success = allocateFromFreeList(minSize);
490 RELEASE_ASSERT(success);
493 template<typename Header>
494 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
496 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
497 if (page->contains(address))
503 template<typename Header>
504 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address)
506 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
507 if (current->contains(address))
513 template<typename Header>
514 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address address)
516 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
517 if (current->checkAndMarkPointer(visitor, address))
523 template<typename Header>
524 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
526 ASSERT(heapPageFromAddress(address));
527 ASSERT(heapPageFromAddress(address + size - 1));
528 ASSERT(size < blinkPagePayloadSize());
529 // The free list entries are only pointer aligned (but when we allocate
530 // from them we are 8 byte aligned due to the header size).
531 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
532 ASSERT(!(size & allocationMask));
533 ASAN_POISON_MEMORY_REGION(address, size);
534 FreeListEntry* entry;
535 if (size < sizeof(*entry)) {
536 // Create a dummy header with only a size and freelist bit set.
537 ASSERT(size >= sizeof(BasicObjectHeader));
538 // Free list encode the size to mark the lost memory as freelist memory.
539 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
540 // This memory gets lost. Sweeping can reclaim it.
543 entry = new (NotNull, address) FreeListEntry(size);
545 // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
546 // reaches zero. However we always add entire pages to ensure that adding a new page will
547 // increase the allocation space.
548 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
551 int index = bucketIndexForSize(size);
552 entry->link(&m_freeLists[index]);
553 if (index > m_biggestFreeListIndex)
554 m_biggestFreeListIndex = index;
557 template<typename Header>
558 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
560 // Caller already added space for object header and rounded up to allocation alignment
561 ASSERT(!(size & allocationMask));
563 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
565 // Ensure that there is enough space for alignment. If the header
566 // is not a multiple of 8 bytes we will allocate an extra
567 // headerPadding<Header> bytes to ensure it 8 byte aligned.
568 allocationSize += headerPadding<Header>();
570 // If ASAN is supported we add allocationGranularity bytes to the allocated space and
571 // poison that to detect overflows
573 allocationSize += allocationGranularity;
575 if (threadState()->shouldGC())
576 threadState()->setGCRequested();
577 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
578 Address largeObjectAddress = pageMemory->writableStart();
579 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
580 memset(headerAddress, 0, size);
581 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
582 Address result = headerAddress + sizeof(*header);
583 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
584 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo);
586 // Poison the object header and allocationGranularity bytes after the object
587 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
588 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
589 largeObject->link(&m_firstLargeHeapObject);
590 stats().increaseAllocatedSpace(largeObject->size());
591 stats().increaseObjectSpace(largeObject->payloadSize());
595 template<typename Header>
596 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
598 object->unlink(previousNext);
601 // Unpoison the object header and allocationGranularity bytes after the
602 // object before freeing.
603 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
604 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
605 delete object->storage();
609 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
611 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
612 // the heap should be unused (ie. 0).
617 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
619 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
620 // since it is the same for all objects
622 allocatePage(gcInfo);
625 template<typename Header>
626 void ThreadHeap<Header>::clearPagePool()
628 while (takePageFromPool()) { }
631 template<typename Header>
632 PageMemory* ThreadHeap<Header>::takePageFromPool()
634 while (PagePoolEntry* entry = m_pagePool) {
635 m_pagePool = entry->next();
636 PageMemory* storage = entry->storage();
639 if (storage->commit())
642 // Failed to commit pooled storage. Release it.
649 template<typename Header>
650 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
652 PageMemory* storage = unused->storage();
653 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
658 template<typename Header>
659 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
661 heapContainsCache()->flush();
662 PageMemory* pageMemory = takePageFromPool();
664 pageMemory = PageMemory::allocate(blinkPagePayloadSize());
665 RELEASE_ASSERT(pageMemory);
667 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
668 page->link(&m_firstPage);
669 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
673 template<typename Header>
674 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
676 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
677 page->getStats(scannedStats);
678 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
679 current->getStats(scannedStats);
683 template<typename Header>
684 void ThreadHeap<Header>::sweep()
686 ASSERT(isConsistentForGC());
688 // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
689 // calling their finalizer methods. This can catch the cases where one objects
690 // finalizer tries to modify another object as part of finalization.
691 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
692 page->poisonUnmarkedObjects();
694 HeapPage<Header>* page = m_firstPage;
695 HeapPage<Header>** previous = &m_firstPage;
696 bool pagesRemoved = false;
698 if (page->isEmpty()) {
699 HeapPage<Header>* unused = page;
701 HeapPage<Header>::unlink(unused, previous);
705 previous = &page->m_next;
710 heapContainsCache()->flush();
712 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
713 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
714 if (current->isMarked()) {
715 stats().increaseAllocatedSpace(current->size());
716 stats().increaseObjectSpace(current->payloadSize());
718 previousNext = ¤t->m_next;
719 current = current->next();
721 LargeHeapObject<Header>* next = current->next();
722 freeLargeObject(current, previousNext);
728 template<typename Header>
729 void ThreadHeap<Header>::finalizeAll(const void* except)
733 setFinalizeAll(true);
735 // No nested GCs are permitted. The thread is exiting.
736 NoAllocationScope<AnyThread> noAllocation;
737 makeConsistentForGC();
738 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
739 Address startOfGap = page->payload();
740 Address end = page->end();
741 Address headerAddress;
742 for (headerAddress = page->payload(); headerAddress < end; ) {
743 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
744 ASSERT(basicHeader->size() < blinkPagePayloadSize());
746 if (basicHeader->isFree()) {
747 headerAddress += basicHeader->size();
750 // At this point we know this is a valid object of type Header
751 Header* header = static_cast<Header*>(basicHeader);
753 if (header->payload() == except) {
754 if (startOfGap != headerAddress)
755 addToFreeList(startOfGap, headerAddress - startOfGap);
756 headerAddress += header->size();
757 startOfGap = headerAddress;
761 page->finalize(header);
762 headerAddress += header->size();
764 ASSERT(headerAddress == end);
765 if (startOfGap != end)
766 addToFreeList(startOfGap, end - startOfGap);
769 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
770 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
771 LargeHeapObject<Header>* next = current->next();
772 if (current->heapObjectHeader()->payload() != except)
773 freeLargeObject(current, previousNext);
775 previousNext = ¤t->m_next;
778 setFinalizeAll(false);
781 template<typename Header>
782 bool ThreadHeap<Header>::isConsistentForGC()
784 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
788 return !ownsNonEmptyAllocationArea();
791 template<typename Header>
792 void ThreadHeap<Header>::makeConsistentForGC()
794 if (ownsNonEmptyAllocationArea())
795 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
796 setAllocationPoint(0, 0);
800 template<typename Header>
801 void ThreadHeap<Header>::clearMarks()
803 ASSERT(isConsistentForGC());
804 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
806 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
810 template<typename Header>
811 void ThreadHeap<Header>::deletePages()
813 heapContainsCache()->flush();
814 // Add all pages in the pool to the heap's list of pages before deleting
817 for (HeapPage<Header>* page = m_firstPage; page; ) {
818 HeapPage<Header>* dead = page;
820 PageMemory* storage = dead->storage();
826 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
827 LargeHeapObject<Header>* dead = current;
828 current = current->next();
829 PageMemory* storage = dead->storage();
830 dead->~LargeHeapObject();
833 m_firstLargeHeapObject = 0;
836 template<typename Header>
837 void ThreadHeap<Header>::clearFreeLists()
839 for (size_t i = 0; i < blinkPageSizeLog2; i++)
843 int BaseHeap::bucketIndexForSize(size_t size)
854 template<typename Header>
855 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
856 : BaseHeapPage(storage, gcInfo)
860 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
861 m_objectStartBitMapComputed = false;
862 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
863 heap->stats().increaseAllocatedSpace(blinkPageSize);
866 template<typename Header>
867 void HeapPage<Header>::link(HeapPage** prevNext)
873 template<typename Header>
874 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
876 *prevNext = unused->m_next;
877 unused->heap()->addPageToPool(unused);
880 template<typename Header>
881 void HeapPage<Header>::getStats(HeapStats& stats)
883 stats.increaseAllocatedSpace(blinkPageSize);
884 Address headerAddress = payload();
885 ASSERT(headerAddress != end());
887 Header* header = reinterpret_cast<Header*>(headerAddress);
888 if (!header->isFree())
889 stats.increaseObjectSpace(header->payloadSize());
890 ASSERT(header->size() < blinkPagePayloadSize());
891 headerAddress += header->size();
892 ASSERT(headerAddress <= end());
893 } while (headerAddress < end());
896 template<typename Header>
897 bool HeapPage<Header>::isEmpty()
899 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
900 return header->isFree() && (header->size() == payloadSize());
903 template<typename Header>
904 void HeapPage<Header>::sweep()
906 clearObjectStartBitMap();
907 heap()->stats().increaseAllocatedSpace(blinkPageSize);
908 Address startOfGap = payload();
909 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
910 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
911 ASSERT(basicHeader->size() < blinkPagePayloadSize());
913 if (basicHeader->isFree()) {
914 headerAddress += basicHeader->size();
917 // At this point we know this is a valid object of type Header
918 Header* header = static_cast<Header*>(basicHeader);
920 if (!header->isMarked()) {
921 // For ASAN we unpoison the specific object when calling the finalizer and
922 // poison it again when done to allow the object's own finalizer to operate
923 // on the object, but not have other finalizers be allowed to access it.
924 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
926 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
927 headerAddress += header->size();
931 if (startOfGap != headerAddress)
932 heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
934 headerAddress += header->size();
935 heap()->stats().increaseObjectSpace(header->payloadSize());
936 startOfGap = headerAddress;
938 if (startOfGap != end())
939 heap()->addToFreeList(startOfGap, end() - startOfGap);
942 template<typename Header>
943 void HeapPage<Header>::clearMarks()
945 for (Address headerAddress = payload(); headerAddress < end();) {
946 Header* header = reinterpret_cast<Header*>(headerAddress);
947 ASSERT(header->size() < blinkPagePayloadSize());
948 if (!header->isFree())
950 headerAddress += header->size();
954 template<typename Header>
955 void HeapPage<Header>::populateObjectStartBitMap()
957 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
958 Address start = payload();
959 for (Address headerAddress = start; headerAddress < end();) {
960 Header* header = reinterpret_cast<Header*>(headerAddress);
961 size_t objectOffset = headerAddress - start;
962 ASSERT(!(objectOffset & allocationMask));
963 size_t objectStartNumber = objectOffset / allocationGranularity;
964 size_t mapIndex = objectStartNumber / 8;
965 ASSERT(mapIndex < objectStartBitMapSize);
966 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
967 headerAddress += header->size();
968 ASSERT(headerAddress <= end());
970 m_objectStartBitMapComputed = true;
973 template<typename Header>
974 void HeapPage<Header>::clearObjectStartBitMap()
976 m_objectStartBitMapComputed = false;
979 static int numberOfLeadingZeroes(uint8_t byte)
997 template<typename Header>
998 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address addr)
1000 if (addr < payload())
1002 if (!isObjectStartBitMapComputed())
1003 populateObjectStartBitMap();
1004 size_t objectOffset = addr - payload();
1005 size_t objectStartNumber = objectOffset / allocationGranularity;
1006 size_t mapIndex = objectStartNumber / 8;
1007 ASSERT(mapIndex < objectStartBitMapSize);
1008 size_t bit = objectStartNumber & 7;
1009 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1011 ASSERT(mapIndex > 0);
1012 byte = m_objectStartBitMap[--mapIndex];
1014 int leadingZeroes = numberOfLeadingZeroes(byte);
1015 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1016 objectOffset = objectStartNumber * allocationGranularity;
1017 Address objectAddress = objectOffset + payload();
1018 Header* header = reinterpret_cast<Header*>(objectAddress);
1019 if (header->isFree())
1022 visitor->mark(header, traceCallback(header));
1027 template<typename Header>
1028 void HeapPage<Header>::poisonUnmarkedObjects()
1030 for (Address headerAddress = payload(); headerAddress < end(); ) {
1031 Header* header = reinterpret_cast<Header*>(headerAddress);
1032 ASSERT(header->size() < blinkPagePayloadSize());
1034 if (!header->isFree() && !header->isMarked())
1035 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1036 headerAddress += header->size();
1042 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1048 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1051 HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1055 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1058 return gcInfo()->m_trace;
1062 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1064 return header->traceCallback();
1067 template<typename Header>
1068 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1070 stats.increaseAllocatedSpace(size());
1071 stats.increaseObjectSpace(payloadSize());
1074 HeapContainsCache::HeapContainsCache()
1075 : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries]))
1079 void HeapContainsCache::flush()
1081 for (int i = 0; i < numberOfEntries; i++)
1082 m_entries[i] = Entry();
1085 size_t HeapContainsCache::hash(Address address)
1087 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1088 value ^= value >> numberOfEntriesLog2;
1089 value ^= value >> (numberOfEntriesLog2 * 2);
1090 value &= numberOfEntries - 1;
1091 return value & ~1; // Returns only even number.
1094 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page)
1097 size_t index = hash(address);
1098 ASSERT(!(index & 1));
1099 Address cachePage = roundToBlinkPageStart(address);
1100 if (m_entries[index].address() == cachePage) {
1101 *page = m_entries[index].containingPage();
1104 if (m_entries[index + 1].address() == cachePage) {
1105 *page = m_entries[index + 1].containingPage();
1112 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1114 size_t index = hash(address);
1115 ASSERT(!(index & 1));
1116 Address cachePage = roundToBlinkPageStart(address);
1117 m_entries[index + 1] = m_entries[index];
1118 m_entries[index] = Entry(cachePage, page);
1121 void CallbackStack::init(CallbackStack** first)
1123 // The stacks are chained, so we start by setting this to null as terminator.
1125 *first = new CallbackStack(first);
1128 void CallbackStack::shutdown(CallbackStack** first)
1130 CallbackStack* next;
1131 for (CallbackStack* current = *first; current; current = next) {
1132 next = current->m_next;
1138 CallbackStack::~CallbackStack()
1145 void CallbackStack::clearUnused()
1147 ASSERT(m_current == &(m_buffer[0]));
1148 for (size_t i = 0; i < bufferSize; i++)
1149 m_buffer[i] = Item(0, 0);
1152 void CallbackStack::assertIsEmpty()
1154 ASSERT(m_current == &(m_buffer[0]));
1158 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1160 if (m_current == &(m_buffer[0])) {
1167 CallbackStack* nextStack = m_next;
1170 return nextStack->popAndInvokeCallback(first, visitor);
1172 Item* item = --m_current;
1174 VisitorCallback callback = item->callback();
1175 callback(visitor, item->object());
1180 class MarkingVisitor : public Visitor {
1182 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1185 ASSERT(objectPointer);
1186 if (header->isMarked())
1190 Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1193 virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1195 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1196 // version to correctly find the payload.
1197 visitHeader(header, header->payload(), callback);
1200 virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1202 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1203 // version to correctly find the payload.
1204 visitHeader(header, header->payload(), callback);
1207 virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1211 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1212 visitHeader(header, header->payload(), callback);
1215 virtual void registerWeakMembers(const void* containingObject, WeakPointerCallback callback) OVERRIDE
1217 Heap::pushWeakPointerCallback(const_cast<void*>(containingObject), callback);
1220 virtual bool isMarked(const void* objectPointer) OVERRIDE
1222 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1225 // This macro defines the necessary visitor methods for typed heaps
1226 #define DEFINE_VISITOR_METHODS(Type) \
1227 virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1229 if (!objectPointer) \
1231 HeapObjectHeader* header = \
1232 HeapObjectHeader::fromPayload(objectPointer); \
1233 visitHeader(header, header->payload(), callback); \
1235 virtual bool isMarked(const Type* objectPointer) OVERRIDE \
1237 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
1240 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1241 #undef DEFINE_VISITOR_METHODS
1246 ThreadState::init();
1247 CallbackStack::init(&s_markingStack);
1248 CallbackStack::init(&s_weakCallbackStack);
1251 void Heap::shutdown()
1253 ThreadState::shutdown();
1254 CallbackStack::shutdown(&s_markingStack);
1255 CallbackStack::shutdown(&s_weakCallbackStack);
1258 bool Heap::contains(Address address)
1260 ASSERT(ThreadState::isAnyThreadInGC());
1261 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1262 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1263 if ((*it)->contains(address))
1269 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1271 ASSERT(ThreadState::isAnyThreadInGC());
1272 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1273 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1274 if ((*it)->checkAndMarkPointer(visitor, address)) {
1275 // Pointer found and marked.
1282 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1284 ASSERT(Heap::contains(object));
1285 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1286 *slot = CallbackStack::Item(object, callback);
1289 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1291 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1294 void Heap::pushWeakPointerCallback(void* object, WeakPointerCallback callback)
1296 ASSERT(Heap::contains(object));
1297 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1298 *slot = CallbackStack::Item(object, callback);
1301 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1303 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1306 void Heap::prepareForGC()
1308 ASSERT(ThreadState::isAnyThreadInGC());
1309 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1310 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1311 (*it)->prepareForGC();
1314 void Heap::collectGarbage(ThreadState::StackState stackState, GCType gcType)
1316 ThreadState::current()->clearGCRequested();
1317 GCScope gcScope(stackState);
1319 // Disallow allocation during garbage collection.
1320 NoAllocationScope<AnyThread> noAllocationScope;
1322 MarkingVisitor marker;
1324 ThreadState::visitRoots(&marker);
1325 // Recursively mark all objects that are reachable from the roots.
1326 while (popAndInvokeTraceCallback(&marker)) { }
1328 // Call weak callbacks on objects that may now be pointing to dead
1330 while (popAndInvokeWeakPointerCallback(&marker)) { }
1332 // It is not permitted to trace pointers of live objects in the weak
1333 // callback phase, so the marking stack should still be empty here.
1334 s_markingStack->assertIsEmpty();
1337 void Heap::getStats(HeapStats* stats)
1340 ASSERT(ThreadState::isAnyThreadInGC());
1341 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1342 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1343 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1345 (*it)->getStats(temp);
1350 bool Heap::isConsistentForGC()
1352 ASSERT(ThreadState::isAnyThreadInGC());
1353 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1354 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1355 return (*it)->isConsistentForGC();
1359 void Heap::makeConsistentForGC()
1361 ASSERT(ThreadState::isAnyThreadInGC());
1362 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1363 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1364 (*it)->makeConsistentForGC();
1367 // Force template instantiations for the types that we need.
1368 template class HeapPage<FinalizedHeapObjectHeader>;
1369 template class HeapPage<HeapObjectHeader>;
1370 template class ThreadHeap<FinalizedHeapObjectHeader>;
1371 template class ThreadHeap<HeapObjectHeader>;
1373 CallbackStack* Heap::s_markingStack;
1374 CallbackStack* Heap::s_weakCallbackStack;