2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "heap/Heap.h"
34 #include "heap/ThreadState.h"
36 #include "wtf/PassOwnPtr.h"
48 static bool IsPowerOf2(size_t power)
50 return !((power - 1) & power);
54 static Address roundToBlinkPageBoundary(void* base)
56 return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
59 static size_t roundToOsPageSize(size_t size)
61 return (size + osPageSize() - 1) & ~(osPageSize() - 1);
67 static const size_t pageSize = getpagesize();
69 static size_t pageSize = 0;
73 pageSize = info.dwPageSize;
74 ASSERT(IsPowerOf2(pageSize));
82 MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
84 bool contains(Address addr) const
86 return m_base <= addr && addr < (m_base + m_size);
90 bool contains(const MemoryRegion& other) const
92 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
98 int err = munmap(m_base, m_size);
101 bool success = VirtualFree(m_base, 0, MEM_RELEASE);
102 RELEASE_ASSERT(success);
106 WARN_UNUSED_RETURN bool commit()
109 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
111 madvise(m_base, m_size, MADV_NORMAL);
116 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
124 int err = mprotect(m_base, m_size, PROT_NONE);
125 RELEASE_ASSERT(!err);
126 // FIXME: Consider using MADV_FREE on MacOS.
127 madvise(m_base, m_size, MADV_DONTNEED);
129 bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
130 RELEASE_ASSERT(success);
134 Address base() const { return m_base; }
141 // Representation of the memory used for a Blink heap page.
143 // The representation keeps track of two memory regions:
145 // 1. The virtual memory reserved from the sytem in order to be able
146 // to free all the virtual memory reserved on destruction.
148 // 2. The writable memory (a sub-region of the reserved virtual
149 // memory region) that is used for the actual heap page payload.
151 // Guard pages are created before and after the writable memory.
154 ~PageMemory() { m_reserved.release(); }
156 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
157 void decommit() { m_writable.decommit(); }
159 Address writableStart() { return m_writable.base(); }
161 // Allocate a virtual address space for the blink page with the
164 // [ guard os page | ... payload ... | guard os page ]
165 // ^---{ aligned to blink page size }
167 static PageMemory* allocate(size_t payloadSize)
169 ASSERT(payloadSize > 0);
171 // Virtual memory allocation routines operate in OS page sizes.
172 // Round up the requested size to nearest os page size.
173 payloadSize = roundToOsPageSize(payloadSize);
175 // Overallocate by blinkPageSize and 2 times OS page size to
176 // ensure a chunk of memory which is blinkPageSize aligned and
177 // has a system page before and after to use for guarding. We
178 // unmap the excess memory before returning.
179 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
182 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
183 RELEASE_ASSERT(base != MAP_FAILED);
185 Address end = base + allocationSize;
186 Address alignedBase = roundToBlinkPageBoundary(base);
187 Address payloadBase = alignedBase + osPageSize();
188 Address payloadEnd = payloadBase + payloadSize;
189 Address blinkPageEnd = payloadEnd + osPageSize();
191 // If the allocate memory was not blink page aligned release
192 // the memory before the aligned address.
193 if (alignedBase != base)
194 MemoryRegion(base, alignedBase - base).release();
196 // Create guard pages by decommiting an OS page before and
197 // after the payload.
198 MemoryRegion(alignedBase, osPageSize()).decommit();
199 MemoryRegion(payloadEnd, osPageSize()).decommit();
201 // Free the additional memory at the end of the page if any.
202 if (blinkPageEnd < end)
203 MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
205 return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
208 Address alignedBase = 0;
210 // On Windows it is impossible to partially release a region
211 // of memory allocated by VirtualAlloc. To avoid wasting
212 // virtual address space we attempt to release a large region
213 // of memory returned as a whole and then allocate an aligned
214 // region inside this larger region.
215 for (int attempt = 0; attempt < 3; attempt++) {
216 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
217 RELEASE_ASSERT(base);
218 VirtualFree(base, 0, MEM_RELEASE);
220 alignedBase = roundToBlinkPageBoundary(base);
221 base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
223 RELEASE_ASSERT(base == alignedBase);
224 allocationSize = payloadSize + 2 * osPageSize();
230 // We failed to avoid wasting virtual address space after
232 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
233 RELEASE_ASSERT(base);
235 // FIXME: If base is by accident blink page size aligned
236 // here then we can create two pages out of reserved
238 alignedBase = roundToBlinkPageBoundary(base);
241 Address payloadBase = alignedBase + osPageSize();
242 PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
243 bool res = storage->commit();
250 PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
251 : m_reserved(reserved)
252 , m_writable(writable)
254 ASSERT(reserved.contains(writable));
257 MemoryRegion m_reserved;
258 MemoryRegion m_writable;
263 explicit GCScope(ThreadState::StackState stackState)
264 : m_state(ThreadState::current())
265 , m_safePointScope(stackState)
267 m_state->checkThread();
269 // FIXME: in an unlikely coincidence that two threads decide
270 // to collect garbage at the same time, avoid doing two GCs in
272 ASSERT(!m_state->isInGC());
273 ThreadState::stopThreads();
280 ASSERT(!m_state->isInGC());
281 ThreadState::resumeThreads();
285 ThreadState* m_state;
286 ThreadState::SafePointScope m_safePointScope;
290 bool HeapObjectHeader::isMarked() const
293 return m_size & markBitMask;
297 void HeapObjectHeader::unmark()
300 m_size &= ~markBitMask;
304 bool HeapObjectHeader::hasDebugMark() const
307 return m_size & debugBitMask;
311 void HeapObjectHeader::clearDebugMark()
314 m_size &= ~debugBitMask;
318 void HeapObjectHeader::setDebugMark()
321 m_size |= debugBitMask;
326 void HeapObjectHeader::zapMagic()
328 m_magic = zappedMagic;
332 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
334 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
335 HeapObjectHeader* header =
336 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
340 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
343 if (gcInfo->hasFinalizer()) {
344 gcInfo->m_finalize(object);
347 for (size_t i = 0; i < objectSize; i++)
348 object[i] = finalizedZapValue;
350 // Zap the primary vTable entry (secondary vTable entries are not zapped)
351 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
355 void FinalizedHeapObjectHeader::finalize()
357 HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
360 template<typename Header>
361 void LargeHeapObject<Header>::unmark()
363 return heapObjectHeader()->unmark();
366 template<typename Header>
367 bool LargeHeapObject<Header>::isMarked()
369 return heapObjectHeader()->isMarked();
372 template<typename Header>
373 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
375 if (contains(address)) {
383 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
385 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
389 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
392 visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
396 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
398 heapObjectHeader()->finalize();
402 void LargeHeapObject<HeapObjectHeader>::finalize()
405 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
408 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
410 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
411 FinalizedHeapObjectHeader* header =
412 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
416 template<typename Header>
417 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
418 : m_currentAllocationPoint(0)
419 , m_remainingAllocationSize(0)
421 , m_firstLargeHeapObject(0)
422 , m_biggestFreeListIndex(0)
423 , m_threadState(state)
429 template<typename Header>
430 ThreadHeap<Header>::~ThreadHeap()
433 if (!ThreadState::isMainThread())
438 template<typename Header>
439 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
441 size_t allocationSize = allocationSizeFromSize(size);
442 if (threadState()->shouldGC()) {
443 if (threadState()->shouldForceConservativeGC())
444 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
446 threadState()->setGCRequested();
448 ensureCurrentAllocation(allocationSize, gcInfo);
449 return allocate(size, gcInfo);
452 template<typename Header>
453 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
455 size_t bucketSize = 1 << m_biggestFreeListIndex;
456 int i = m_biggestFreeListIndex;
457 for (; i > 0; i--, bucketSize >>= 1) {
458 if (bucketSize < minSize)
460 FreeListEntry* entry = m_freeLists[i];
462 m_biggestFreeListIndex = i;
463 entry->unlink(&m_freeLists[i]);
464 setAllocationPoint(entry->address(), entry->size());
465 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
469 m_biggestFreeListIndex = i;
473 template<typename Header>
474 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
476 ASSERT(minSize >= allocationGranularity);
477 if (remainingAllocationSize() >= minSize)
480 if (remainingAllocationSize() > 0)
481 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
482 if (allocateFromFreeList(minSize))
484 addPageToHeap(gcInfo);
485 bool success = allocateFromFreeList(minSize);
486 RELEASE_ASSERT(success);
489 template<typename Header>
490 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
492 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
493 if (page->contains(address))
499 template<typename Header>
500 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address)
502 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
503 if (current->contains(address))
509 template<typename Header>
510 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address address)
512 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
513 if (current->checkAndMarkPointer(visitor, address))
519 template<typename Header>
520 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
522 ASSERT(heapPageFromAddress(address));
523 ASSERT(heapPageFromAddress(address + size - 1));
524 ASSERT(size < blinkPagePayloadSize());
525 // The free list entries are only pointer aligned (but when we allocate
526 // from them we are 8 byte aligned due to the header size).
527 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
528 ASSERT(!(size & allocationMask));
529 ASAN_POISON_MEMORY_REGION(address, size);
530 FreeListEntry* entry;
531 if (size < sizeof(*entry)) {
532 // Create a dummy header with only a size and freelist bit set.
533 ASSERT(size >= sizeof(BasicObjectHeader));
534 // Free list encode the size to mark the lost memory as freelist memory.
535 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
536 // This memory gets lost. Sweeping can reclaim it.
539 entry = new (NotNull, address) FreeListEntry(size);
540 #if defined(ADDRESS_SANITIZER)
541 // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
542 // reaches zero. However we always add entire pages to ensure that adding a new page will
543 // increase the allocation space.
544 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
547 int index = bucketIndexForSize(size);
548 entry->link(&m_freeLists[index]);
549 if (index > m_biggestFreeListIndex)
550 m_biggestFreeListIndex = index;
553 template<typename Header>
554 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
556 // Caller already added space for object header and rounded up to allocation alignment
557 ASSERT(!(size & allocationMask));
559 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
561 // Ensure that there is enough space for alignment. If the header
562 // is not a multiple of 8 bytes we will allocate an extra
563 // headerPadding<Header> bytes to ensure it 8 byte aligned.
564 allocationSize += headerPadding<Header>();
566 // If ASAN is supported we add allocationGranularity bytes to the allocated space and
567 // poison that to detect overflows
568 #if defined(ADDRESS_SANITIZER)
569 allocationSize += allocationGranularity;
571 if (threadState()->shouldGC())
572 threadState()->setGCRequested();
573 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
574 Address largeObjectAddress = pageMemory->writableStart();
575 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
576 memset(headerAddress, 0, size);
577 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
578 Address result = headerAddress + sizeof(*header);
579 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
580 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo);
582 // Poison the object header and allocationGranularity bytes after the object
583 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
584 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
585 largeObject->link(&m_firstLargeHeapObject);
586 stats().increaseAllocatedSpace(largeObject->size());
587 stats().increaseObjectSpace(largeObject->payloadSize());
591 template<typename Header>
592 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
594 object->unlink(previousNext);
597 // Unpoison the object header and allocationGranularity bytes after the
598 // object before freeing.
599 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
600 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
601 delete object->storage();
605 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
607 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
608 // the heap should be unused (ie. 0).
613 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
615 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
616 // since it is the same for all objects
618 allocatePage(gcInfo);
621 template<typename Header>
622 void ThreadHeap<Header>::clearPagePool()
624 while (takePageFromPool()) { }
627 template<typename Header>
628 PageMemory* ThreadHeap<Header>::takePageFromPool()
630 while (PagePoolEntry* entry = m_pagePool) {
631 m_pagePool = entry->next();
632 PageMemory* storage = entry->storage();
635 if (storage->commit())
638 // Failed to commit pooled storage. Release it.
645 template<typename Header>
646 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
648 PageMemory* storage = unused->storage();
649 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
654 template<typename Header>
655 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
657 heapContainsCache()->flush();
658 PageMemory* pageMemory = takePageFromPool();
660 pageMemory = PageMemory::allocate(blinkPagePayloadSize());
661 RELEASE_ASSERT(pageMemory);
663 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
664 page->link(&m_firstPage);
665 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
669 template<typename Header>
670 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
672 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
673 page->getStats(scannedStats);
674 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
675 current->getStats(scannedStats);
679 template<typename Header>
680 void ThreadHeap<Header>::sweep()
682 ASSERT(isConsistentForGC());
683 #if defined(ADDRESS_SANITIZER)
684 // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
685 // calling their finalizer methods. This can catch the cases where one objects
686 // finalizer tries to modify another object as part of finalization.
687 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
688 page->poisonUnmarkedObjects();
690 HeapPage<Header>* page = m_firstPage;
691 HeapPage<Header>** previous = &m_firstPage;
692 bool pagesRemoved = false;
694 if (page->isEmpty()) {
695 HeapPage<Header>* unused = page;
697 HeapPage<Header>::unlink(unused, previous);
701 previous = &page->m_next;
706 heapContainsCache()->flush();
708 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
709 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
710 if (current->isMarked()) {
711 stats().increaseAllocatedSpace(current->size());
712 stats().increaseObjectSpace(current->payloadSize());
714 previousNext = ¤t->m_next;
715 current = current->next();
717 LargeHeapObject<Header>* next = current->next();
718 freeLargeObject(current, previousNext);
724 template<typename Header>
725 void ThreadHeap<Header>::assertEmpty()
727 // No nested GCs are permitted. The thread is exiting.
728 NoAllocationScope<AnyThread> noAllocation;
729 makeConsistentForGC();
730 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
731 Address end = page->end();
732 Address headerAddress;
733 for (headerAddress = page->payload(); headerAddress < end; ) {
734 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
735 ASSERT(basicHeader->size() < blinkPagePayloadSize());
736 // Live object is potentially a dangling pointer from some root.
737 // Treat it as critical bug both in release and debug mode.
738 RELEASE_ASSERT(basicHeader->isFree());
739 headerAddress += basicHeader->size();
741 ASSERT(headerAddress == end);
742 addToFreeList(page->payload(), end - page->payload());
745 RELEASE_ASSERT(!m_firstLargeHeapObject);
748 template<typename Header>
749 bool ThreadHeap<Header>::isConsistentForGC()
751 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
755 return !ownsNonEmptyAllocationArea();
758 template<typename Header>
759 void ThreadHeap<Header>::makeConsistentForGC()
761 if (ownsNonEmptyAllocationArea())
762 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
763 setAllocationPoint(0, 0);
767 template<typename Header>
768 void ThreadHeap<Header>::clearMarks()
770 ASSERT(isConsistentForGC());
771 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
773 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
777 template<typename Header>
778 void ThreadHeap<Header>::deletePages()
780 heapContainsCache()->flush();
781 // Add all pages in the pool to the heap's list of pages before deleting
784 for (HeapPage<Header>* page = m_firstPage; page; ) {
785 HeapPage<Header>* dead = page;
787 PageMemory* storage = dead->storage();
793 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
794 LargeHeapObject<Header>* dead = current;
795 current = current->next();
796 PageMemory* storage = dead->storage();
797 dead->~LargeHeapObject();
800 m_firstLargeHeapObject = 0;
803 template<typename Header>
804 void ThreadHeap<Header>::clearFreeLists()
806 for (size_t i = 0; i < blinkPageSizeLog2; i++)
810 int BaseHeap::bucketIndexForSize(size_t size)
821 template<typename Header>
822 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
823 : BaseHeapPage(storage, gcInfo)
827 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
828 m_objectStartBitMapComputed = false;
829 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
830 heap->stats().increaseAllocatedSpace(blinkPageSize);
833 template<typename Header>
834 void HeapPage<Header>::link(HeapPage** prevNext)
840 template<typename Header>
841 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
843 *prevNext = unused->m_next;
844 unused->heap()->addPageToPool(unused);
847 template<typename Header>
848 void HeapPage<Header>::getStats(HeapStats& stats)
850 stats.increaseAllocatedSpace(blinkPageSize);
851 Address headerAddress = payload();
852 ASSERT(headerAddress != end());
854 Header* header = reinterpret_cast<Header*>(headerAddress);
855 if (!header->isFree())
856 stats.increaseObjectSpace(header->payloadSize());
857 ASSERT(header->size() < blinkPagePayloadSize());
858 headerAddress += header->size();
859 ASSERT(headerAddress <= end());
860 } while (headerAddress < end());
863 template<typename Header>
864 bool HeapPage<Header>::isEmpty()
866 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
867 return header->isFree() && (header->size() == payloadSize());
870 template<typename Header>
871 void HeapPage<Header>::sweep()
873 clearObjectStartBitMap();
874 heap()->stats().increaseAllocatedSpace(blinkPageSize);
875 Address startOfGap = payload();
876 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
877 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
878 ASSERT(basicHeader->size() < blinkPagePayloadSize());
880 if (basicHeader->isFree()) {
881 headerAddress += basicHeader->size();
884 // At this point we know this is a valid object of type Header
885 Header* header = static_cast<Header*>(basicHeader);
887 if (!header->isMarked()) {
888 // For ASAN we unpoison the specific object when calling the finalizer and
889 // poison it again when done to allow the object's own finalizer to operate
890 // on the object, but not have other finalizers be allowed to access it.
891 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
893 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
894 headerAddress += header->size();
898 if (startOfGap != headerAddress)
899 heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
901 headerAddress += header->size();
902 heap()->stats().increaseObjectSpace(header->payloadSize());
903 startOfGap = headerAddress;
905 if (startOfGap != end())
906 heap()->addToFreeList(startOfGap, end() - startOfGap);
909 template<typename Header>
910 void HeapPage<Header>::clearMarks()
912 for (Address headerAddress = payload(); headerAddress < end();) {
913 Header* header = reinterpret_cast<Header*>(headerAddress);
914 ASSERT(header->size() < blinkPagePayloadSize());
915 if (!header->isFree())
917 headerAddress += header->size();
921 template<typename Header>
922 void HeapPage<Header>::populateObjectStartBitMap()
924 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
925 Address start = payload();
926 for (Address headerAddress = start; headerAddress < end();) {
927 Header* header = reinterpret_cast<Header*>(headerAddress);
928 size_t objectOffset = headerAddress - start;
929 ASSERT(!(objectOffset & allocationMask));
930 size_t objectStartNumber = objectOffset / allocationGranularity;
931 size_t mapIndex = objectStartNumber / 8;
932 ASSERT(mapIndex < objectStartBitMapSize);
933 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
934 headerAddress += header->size();
935 ASSERT(headerAddress <= end());
937 m_objectStartBitMapComputed = true;
940 template<typename Header>
941 void HeapPage<Header>::clearObjectStartBitMap()
943 m_objectStartBitMapComputed = false;
946 static int numberOfLeadingZeroes(uint8_t byte)
964 template<typename Header>
965 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address addr)
967 if (addr < payload())
969 if (!isObjectStartBitMapComputed())
970 populateObjectStartBitMap();
971 size_t objectOffset = addr - payload();
972 size_t objectStartNumber = objectOffset / allocationGranularity;
973 size_t mapIndex = objectStartNumber / 8;
974 ASSERT(mapIndex < objectStartBitMapSize);
975 size_t bit = objectStartNumber & 7;
976 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
978 ASSERT(mapIndex > 0);
979 byte = m_objectStartBitMap[--mapIndex];
981 int leadingZeroes = numberOfLeadingZeroes(byte);
982 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
983 objectOffset = objectStartNumber * allocationGranularity;
984 Address objectAddress = objectOffset + payload();
985 Header* header = reinterpret_cast<Header*>(objectAddress);
986 if (header->isFree())
989 visitor->mark(header, traceCallback(header));
993 #if defined(ADDRESS_SANITIZER)
994 template<typename Header>
995 void HeapPage<Header>::poisonUnmarkedObjects()
997 for (Address headerAddress = payload(); headerAddress < end(); ) {
998 Header* header = reinterpret_cast<Header*>(headerAddress);
999 ASSERT(header->size() < blinkPagePayloadSize());
1001 if (!header->isFree() && !header->isMarked())
1002 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1003 headerAddress += header->size();
1009 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1015 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1018 HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1022 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1025 return gcInfo()->m_trace;
1029 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1031 return header->traceCallback();
1034 template<typename Header>
1035 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1037 stats.increaseAllocatedSpace(size());
1038 stats.increaseObjectSpace(payloadSize());
1041 HeapContainsCache::HeapContainsCache()
1042 : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries]))
1046 void HeapContainsCache::flush()
1048 for (int i = 0; i < numberOfEntries; i++)
1049 m_entries[i] = Entry();
1052 size_t HeapContainsCache::hash(Address address)
1054 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1055 value ^= value >> numberOfEntriesLog2;
1056 value ^= value >> (numberOfEntriesLog2 * 2);
1057 value &= numberOfEntries - 1;
1058 return value & ~1; // Returns only even number.
1061 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page)
1064 size_t index = hash(address);
1065 ASSERT(!(index & 1));
1066 Address cachePage = roundToBlinkPageStart(address);
1067 if (m_entries[index].address() == cachePage) {
1068 *page = m_entries[index].containingPage();
1071 if (m_entries[index + 1].address() == cachePage) {
1072 *page = m_entries[index + 1].containingPage();
1079 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1081 size_t index = hash(address);
1082 ASSERT(!(index & 1));
1083 Address cachePage = roundToBlinkPageStart(address);
1084 m_entries[index + 1] = m_entries[index];
1085 m_entries[index] = Entry(cachePage, page);
1088 void CallbackStack::init(CallbackStack** first)
1090 // The stacks are chained, so we start by setting this to null as terminator.
1092 *first = new CallbackStack(first);
1095 void CallbackStack::shutdown(CallbackStack** first)
1097 CallbackStack* next;
1098 for (CallbackStack* current = *first; current; current = next) {
1099 next = current->m_next;
1105 CallbackStack::~CallbackStack()
1112 void CallbackStack::clearUnused()
1114 ASSERT(m_current == &(m_buffer[0]));
1115 for (size_t i = 0; i < bufferSize; i++)
1116 m_buffer[i] = Item(0, 0);
1119 void CallbackStack::assertIsEmpty()
1121 ASSERT(m_current == &(m_buffer[0]));
1125 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1127 if (m_current == &(m_buffer[0])) {
1134 CallbackStack* nextStack = m_next;
1137 return nextStack->popAndInvokeCallback(first, visitor);
1139 Item* item = --m_current;
1141 VisitorCallback callback = item->callback();
1142 callback(visitor, item->object());
1147 class MarkingVisitor : public Visitor {
1149 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1152 ASSERT(objectPointer);
1153 if (header->isMarked())
1157 Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1160 virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1162 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1163 // version to correctly find the payload.
1164 visitHeader(header, header->payload(), callback);
1167 virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1169 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1170 // version to correctly find the payload.
1171 visitHeader(header, header->payload(), callback);
1174 virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1178 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1179 visitHeader(header, header->payload(), callback);
1182 virtual void registerWeakMembers(const void* containingObject, WeakPointerCallback callback) OVERRIDE
1184 Heap::pushWeakPointerCallback(const_cast<void*>(containingObject), callback);
1187 virtual bool isMarked(const void* objectPointer) OVERRIDE
1189 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1192 // This macro defines the necessary visitor methods for typed heaps
1193 #define DEFINE_VISITOR_METHODS(Type) \
1194 virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1196 if (!objectPointer) \
1198 HeapObjectHeader* header = \
1199 HeapObjectHeader::fromPayload(objectPointer); \
1200 visitHeader(header, header->payload(), callback); \
1202 virtual bool isMarked(const Type* objectPointer) OVERRIDE \
1204 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
1207 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1208 #undef DEFINE_VISITOR_METHODS
1213 ThreadState::init();
1214 CallbackStack::init(&s_markingStack);
1215 CallbackStack::init(&s_weakCallbackStack);
1218 void Heap::shutdown()
1220 ThreadState::shutdown();
1221 CallbackStack::shutdown(&s_markingStack);
1222 CallbackStack::shutdown(&s_weakCallbackStack);
1225 bool Heap::contains(Address address)
1227 ASSERT(ThreadState::isAnyThreadInGC());
1228 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1229 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1230 if ((*it)->contains(address))
1236 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1238 ASSERT(ThreadState::isAnyThreadInGC());
1239 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1240 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1241 if ((*it)->checkAndMarkPointer(visitor, address)) {
1242 // Pointer found and marked.
1249 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1251 ASSERT(Heap::contains(object));
1252 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1253 *slot = CallbackStack::Item(object, callback);
1256 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1258 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1261 void Heap::pushWeakPointerCallback(void* object, WeakPointerCallback callback)
1263 ASSERT(Heap::contains(object));
1264 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1265 *slot = CallbackStack::Item(object, callback);
1268 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1270 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1273 void Heap::prepareForGC()
1275 ASSERT(ThreadState::isAnyThreadInGC());
1276 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1277 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1278 (*it)->prepareForGC();
1281 void Heap::collectGarbage(ThreadState::StackState stackState, GCType gcType)
1283 ThreadState::current()->clearGCRequested();
1284 GCScope gcScope(stackState);
1286 // Disallow allocation during garbage collection.
1287 NoAllocationScope<AnyThread> noAllocationScope;
1289 MarkingVisitor marker;
1291 ThreadState::visitRoots(&marker);
1292 // Recursively mark all objects that are reachable from the roots.
1293 while (popAndInvokeTraceCallback(&marker)) { }
1295 // Call weak callbacks on objects that may now be pointing to dead
1297 while (popAndInvokeWeakPointerCallback(&marker)) { }
1299 // It is not permitted to trace pointers of live objects in the weak
1300 // callback phase, so the marking stack should still be empty here.
1301 s_markingStack->assertIsEmpty();
1304 void Heap::getStats(HeapStats* stats)
1307 ASSERT(ThreadState::isAnyThreadInGC());
1308 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1309 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1310 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1312 (*it)->getStats(temp);
1317 bool Heap::isConsistentForGC()
1319 ASSERT(ThreadState::isAnyThreadInGC());
1320 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1321 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1322 return (*it)->isConsistentForGC();
1326 void Heap::makeConsistentForGC()
1328 ASSERT(ThreadState::isAnyThreadInGC());
1329 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1330 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1331 (*it)->makeConsistentForGC();
1334 // Force template instantiations for the types that we need.
1335 template class HeapPage<FinalizedHeapObjectHeader>;
1336 template class HeapPage<HeapObjectHeader>;
1337 template class ThreadHeap<FinalizedHeapObjectHeader>;
1338 template class ThreadHeap<HeapObjectHeader>;
1340 CallbackStack* Heap::s_markingStack;
1341 CallbackStack* Heap::s_weakCallbackStack;