2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "heap/Heap.h"
34 #include "heap/ThreadState.h"
36 #include "wtf/PassOwnPtr.h"
48 static bool IsPowerOf2(size_t power)
50 return !((power - 1) & power);
54 static Address roundToBlinkPageBoundary(void* base)
56 return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
59 static size_t roundToOsPageSize(size_t size)
61 return (size + osPageSize() - 1) & ~(osPageSize() - 1);
67 static const size_t pageSize = getpagesize();
69 static size_t pageSize = 0;
73 pageSize = info.dwPageSize;
74 ASSERT(IsPowerOf2(pageSize));
82 MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
84 bool contains(Address addr) const
86 return m_base <= addr && addr < (m_base + m_size);
90 bool contains(const MemoryRegion& other) const
92 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
98 int err = munmap(m_base, m_size);
101 bool success = VirtualFree(m_base, 0, MEM_RELEASE);
102 RELEASE_ASSERT(success);
106 WARN_UNUSED_RETURN bool commit()
109 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
111 madvise(m_base, m_size, MADV_NORMAL);
116 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
124 int err = mprotect(m_base, m_size, PROT_NONE);
125 RELEASE_ASSERT(!err);
126 // FIXME: Consider using MADV_FREE on MacOS.
127 madvise(m_base, m_size, MADV_DONTNEED);
129 bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
130 RELEASE_ASSERT(success);
134 Address base() const { return m_base; }
141 // Representation of the memory used for a Blink heap page.
143 // The representation keeps track of two memory regions:
145 // 1. The virtual memory reserved from the sytem in order to be able
146 // to free all the virtual memory reserved on destruction.
148 // 2. The writable memory (a sub-region of the reserved virtual
149 // memory region) that is used for the actual heap page payload.
151 // Guard pages are created before and after the writable memory.
154 ~PageMemory() { m_reserved.release(); }
156 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
157 void decommit() { m_writable.decommit(); }
159 Address writableStart() { return m_writable.base(); }
161 // Allocate a virtual address space for the blink page with the
164 // [ guard os page | ... payload ... | guard os page ]
165 // ^---{ aligned to blink page size }
167 static PageMemory* allocate(size_t payloadSize)
169 ASSERT(payloadSize > 0);
171 // Virtual memory allocation routines operate in OS page sizes.
172 // Round up the requested size to nearest os page size.
173 payloadSize = roundToOsPageSize(payloadSize);
175 // Overallocate by blinkPageSize and 2 times OS page size to
176 // ensure a chunk of memory which is blinkPageSize aligned and
177 // has a system page before and after to use for guarding. We
178 // unmap the excess memory before returning.
179 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
182 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
183 RELEASE_ASSERT(base != MAP_FAILED);
185 Address end = base + allocationSize;
186 Address alignedBase = roundToBlinkPageBoundary(base);
187 Address payloadBase = alignedBase + osPageSize();
188 Address payloadEnd = payloadBase + payloadSize;
189 Address blinkPageEnd = payloadEnd + osPageSize();
191 // If the allocate memory was not blink page aligned release
192 // the memory before the aligned address.
193 if (alignedBase != base)
194 MemoryRegion(base, alignedBase - base).release();
196 // Create guard pages by decommiting an OS page before and
197 // after the payload.
198 MemoryRegion(alignedBase, osPageSize()).decommit();
199 MemoryRegion(payloadEnd, osPageSize()).decommit();
201 // Free the additional memory at the end of the page if any.
202 if (blinkPageEnd < end)
203 MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
205 return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
208 Address alignedBase = 0;
210 // On Windows it is impossible to partially release a region
211 // of memory allocated by VirtualAlloc. To avoid wasting
212 // virtual address space we attempt to release a large region
213 // of memory returned as a whole and then allocate an aligned
214 // region inside this larger region.
215 for (int attempt = 0; attempt < 3; attempt++) {
216 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
217 RELEASE_ASSERT(base);
218 VirtualFree(base, 0, MEM_RELEASE);
220 alignedBase = roundToBlinkPageBoundary(base);
221 base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
223 RELEASE_ASSERT(base == alignedBase);
224 allocationSize = payloadSize + 2 * osPageSize();
230 // We failed to avoid wasting virtual address space after
232 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
233 RELEASE_ASSERT(base);
235 // FIXME: If base is by accident blink page size aligned
236 // here then we can create two pages out of reserved
238 alignedBase = roundToBlinkPageBoundary(base);
241 Address payloadBase = alignedBase + osPageSize();
242 PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
243 bool res = storage->commit();
250 PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
251 : m_reserved(reserved)
252 , m_writable(writable)
254 ASSERT(reserved.contains(writable));
257 MemoryRegion m_reserved;
258 MemoryRegion m_writable;
263 explicit GCScope(ThreadState::StackState stackState)
264 : m_state(ThreadState::current())
265 , m_safePointScope(stackState)
267 m_state->checkThread();
269 // FIXME: in an unlikely coincidence that two threads decide
270 // to collect garbage at the same time, avoid doing two GCs in
272 RELEASE_ASSERT(!m_state->isInGC());
273 RELEASE_ASSERT(!m_state->isSweepInProgress());
274 ThreadState::stopThreads();
281 ASSERT(!m_state->isInGC());
282 ThreadState::resumeThreads();
286 ThreadState* m_state;
287 ThreadState::SafePointScope m_safePointScope;
291 bool HeapObjectHeader::isMarked() const
294 return m_size & markBitMask;
298 void HeapObjectHeader::unmark()
301 m_size &= ~markBitMask;
305 bool HeapObjectHeader::hasDebugMark() const
308 return m_size & debugBitMask;
312 void HeapObjectHeader::clearDebugMark()
315 m_size &= ~debugBitMask;
319 void HeapObjectHeader::setDebugMark()
322 m_size |= debugBitMask;
327 void HeapObjectHeader::zapMagic()
329 m_magic = zappedMagic;
333 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
335 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
336 HeapObjectHeader* header =
337 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
341 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
344 if (gcInfo->hasFinalizer()) {
345 gcInfo->m_finalize(object);
348 for (size_t i = 0; i < objectSize; i++)
349 object[i] = finalizedZapValue;
351 // Zap the primary vTable entry (secondary vTable entries are not zapped)
352 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
356 void FinalizedHeapObjectHeader::finalize()
358 HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
361 template<typename Header>
362 void LargeHeapObject<Header>::unmark()
364 return heapObjectHeader()->unmark();
367 template<typename Header>
368 bool LargeHeapObject<Header>::isMarked()
370 return heapObjectHeader()->isMarked();
373 template<typename Header>
374 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
376 if (contains(address)) {
384 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
386 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
390 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
393 visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
397 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
399 heapObjectHeader()->finalize();
403 void LargeHeapObject<HeapObjectHeader>::finalize()
406 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
409 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
411 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
412 FinalizedHeapObjectHeader* header =
413 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
417 template<typename Header>
418 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
419 : m_currentAllocationPoint(0)
420 , m_remainingAllocationSize(0)
422 , m_firstLargeHeapObject(0)
423 , m_biggestFreeListIndex(0)
424 , m_threadState(state)
430 template<typename Header>
431 ThreadHeap<Header>::~ThreadHeap()
434 if (!ThreadState::isMainThread())
439 template<typename Header>
440 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
442 size_t allocationSize = allocationSizeFromSize(size);
443 if (threadState()->shouldGC()) {
444 if (threadState()->shouldForceConservativeGC())
445 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
447 threadState()->setGCRequested();
449 ensureCurrentAllocation(allocationSize, gcInfo);
450 return allocate(size, gcInfo);
453 template<typename Header>
454 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
456 size_t bucketSize = 1 << m_biggestFreeListIndex;
457 int i = m_biggestFreeListIndex;
458 for (; i > 0; i--, bucketSize >>= 1) {
459 if (bucketSize < minSize)
461 FreeListEntry* entry = m_freeLists[i];
463 m_biggestFreeListIndex = i;
464 entry->unlink(&m_freeLists[i]);
465 setAllocationPoint(entry->address(), entry->size());
466 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
470 m_biggestFreeListIndex = i;
474 template<typename Header>
475 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
477 ASSERT(minSize >= allocationGranularity);
478 if (remainingAllocationSize() >= minSize)
481 if (remainingAllocationSize() > 0)
482 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
483 if (allocateFromFreeList(minSize))
485 addPageToHeap(gcInfo);
486 bool success = allocateFromFreeList(minSize);
487 RELEASE_ASSERT(success);
490 template<typename Header>
491 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
493 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
494 if (page->contains(address))
500 template<typename Header>
501 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address)
503 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
504 if (current->contains(address))
510 template<typename Header>
511 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address address)
513 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
514 if (current->checkAndMarkPointer(visitor, address))
520 template<typename Header>
521 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
523 ASSERT(heapPageFromAddress(address));
524 ASSERT(heapPageFromAddress(address + size - 1));
525 ASSERT(size < blinkPagePayloadSize());
526 // The free list entries are only pointer aligned (but when we allocate
527 // from them we are 8 byte aligned due to the header size).
528 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
529 ASSERT(!(size & allocationMask));
530 ASAN_POISON_MEMORY_REGION(address, size);
531 FreeListEntry* entry;
532 if (size < sizeof(*entry)) {
533 // Create a dummy header with only a size and freelist bit set.
534 ASSERT(size >= sizeof(BasicObjectHeader));
535 // Free list encode the size to mark the lost memory as freelist memory.
536 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
537 // This memory gets lost. Sweeping can reclaim it.
540 entry = new (NotNull, address) FreeListEntry(size);
541 #if defined(ADDRESS_SANITIZER)
542 // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
543 // reaches zero. However we always add entire pages to ensure that adding a new page will
544 // increase the allocation space.
545 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
548 int index = bucketIndexForSize(size);
549 entry->link(&m_freeLists[index]);
550 if (index > m_biggestFreeListIndex)
551 m_biggestFreeListIndex = index;
554 template<typename Header>
555 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
557 // Caller already added space for object header and rounded up to allocation alignment
558 ASSERT(!(size & allocationMask));
560 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
562 // Ensure that there is enough space for alignment. If the header
563 // is not a multiple of 8 bytes we will allocate an extra
564 // headerPadding<Header> bytes to ensure it 8 byte aligned.
565 allocationSize += headerPadding<Header>();
567 // If ASAN is supported we add allocationGranularity bytes to the allocated space and
568 // poison that to detect overflows
569 #if defined(ADDRESS_SANITIZER)
570 allocationSize += allocationGranularity;
572 if (threadState()->shouldGC())
573 threadState()->setGCRequested();
574 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
575 Address largeObjectAddress = pageMemory->writableStart();
576 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
577 memset(headerAddress, 0, size);
578 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
579 Address result = headerAddress + sizeof(*header);
580 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
581 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
583 // Poison the object header and allocationGranularity bytes after the object
584 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
585 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
586 largeObject->link(&m_firstLargeHeapObject);
587 stats().increaseAllocatedSpace(largeObject->size());
588 stats().increaseObjectSpace(largeObject->payloadSize());
592 template<typename Header>
593 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
595 object->unlink(previousNext);
598 // Unpoison the object header and allocationGranularity bytes after the
599 // object before freeing.
600 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
601 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
602 delete object->storage();
606 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
608 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
609 // the heap should be unused (ie. 0).
614 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
616 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
617 // since it is the same for all objects
619 allocatePage(gcInfo);
622 template<typename Header>
623 void ThreadHeap<Header>::clearPagePool()
625 while (takePageFromPool()) { }
628 template<typename Header>
629 PageMemory* ThreadHeap<Header>::takePageFromPool()
631 while (PagePoolEntry* entry = m_pagePool) {
632 m_pagePool = entry->next();
633 PageMemory* storage = entry->storage();
636 if (storage->commit())
639 // Failed to commit pooled storage. Release it.
646 template<typename Header>
647 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
649 PageMemory* storage = unused->storage();
650 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
655 template<typename Header>
656 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
658 heapContainsCache()->flush();
659 PageMemory* pageMemory = takePageFromPool();
661 pageMemory = PageMemory::allocate(blinkPagePayloadSize());
662 RELEASE_ASSERT(pageMemory);
664 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
665 // FIXME: Oilpan: Linking new pages into the front of the list is
666 // crucial when performing allocations during finalization because
667 // it ensures that those pages are not swept in the current GC
668 // round. We should create a separate page list for that to
669 // separate out the pages allocated during finalization clearly
670 // from the pages currently being swept.
671 page->link(&m_firstPage);
672 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
676 template<typename Header>
677 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
679 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
680 page->getStats(scannedStats);
681 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
682 current->getStats(scannedStats);
686 template<typename Header>
687 void ThreadHeap<Header>::sweep()
689 ASSERT(isConsistentForGC());
690 #if defined(ADDRESS_SANITIZER)
691 // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
692 // calling their finalizer methods. This can catch the cases where one objects
693 // finalizer tries to modify another object as part of finalization.
694 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
695 page->poisonUnmarkedObjects();
697 HeapPage<Header>* page = m_firstPage;
698 HeapPage<Header>** previous = &m_firstPage;
699 bool pagesRemoved = false;
701 if (page->isEmpty()) {
702 HeapPage<Header>* unused = page;
704 HeapPage<Header>::unlink(unused, previous);
708 previous = &page->m_next;
713 heapContainsCache()->flush();
715 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
716 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
717 if (current->isMarked()) {
718 stats().increaseAllocatedSpace(current->size());
719 stats().increaseObjectSpace(current->payloadSize());
721 previousNext = ¤t->m_next;
722 current = current->next();
724 LargeHeapObject<Header>* next = current->next();
725 freeLargeObject(current, previousNext);
731 template<typename Header>
732 void ThreadHeap<Header>::assertEmpty()
734 // No allocations are permitted. The thread is exiting.
735 NoAllocationScope<AnyThread> noAllocation;
736 makeConsistentForGC();
737 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
738 Address end = page->end();
739 Address headerAddress;
740 for (headerAddress = page->payload(); headerAddress < end; ) {
741 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
742 ASSERT(basicHeader->size() < blinkPagePayloadSize());
743 // Live object is potentially a dangling pointer from some root.
744 // Treat it as critical bug both in release and debug mode.
745 RELEASE_ASSERT(basicHeader->isFree());
746 headerAddress += basicHeader->size();
748 ASSERT(headerAddress == end);
749 addToFreeList(page->payload(), end - page->payload());
752 RELEASE_ASSERT(!m_firstLargeHeapObject);
755 template<typename Header>
756 bool ThreadHeap<Header>::isConsistentForGC()
758 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
762 return !ownsNonEmptyAllocationArea();
765 template<typename Header>
766 void ThreadHeap<Header>::makeConsistentForGC()
768 if (ownsNonEmptyAllocationArea())
769 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
770 setAllocationPoint(0, 0);
774 template<typename Header>
775 void ThreadHeap<Header>::clearMarks()
777 ASSERT(isConsistentForGC());
778 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
780 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
784 template<typename Header>
785 void ThreadHeap<Header>::deletePages()
787 heapContainsCache()->flush();
788 // Add all pages in the pool to the heap's list of pages before deleting
791 for (HeapPage<Header>* page = m_firstPage; page; ) {
792 HeapPage<Header>* dead = page;
794 PageMemory* storage = dead->storage();
800 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
801 LargeHeapObject<Header>* dead = current;
802 current = current->next();
803 PageMemory* storage = dead->storage();
804 dead->~LargeHeapObject();
807 m_firstLargeHeapObject = 0;
810 template<typename Header>
811 void ThreadHeap<Header>::clearFreeLists()
813 for (size_t i = 0; i < blinkPageSizeLog2; i++)
817 int BaseHeap::bucketIndexForSize(size_t size)
828 template<typename Header>
829 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
830 : BaseHeapPage(storage, gcInfo, heap->threadState())
834 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
835 m_objectStartBitMapComputed = false;
836 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
837 heap->stats().increaseAllocatedSpace(blinkPageSize);
840 template<typename Header>
841 void HeapPage<Header>::link(HeapPage** prevNext)
847 template<typename Header>
848 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
850 *prevNext = unused->m_next;
851 unused->heap()->addPageToPool(unused);
854 template<typename Header>
855 void HeapPage<Header>::getStats(HeapStats& stats)
857 stats.increaseAllocatedSpace(blinkPageSize);
858 Address headerAddress = payload();
859 ASSERT(headerAddress != end());
861 Header* header = reinterpret_cast<Header*>(headerAddress);
862 if (!header->isFree())
863 stats.increaseObjectSpace(header->payloadSize());
864 ASSERT(header->size() < blinkPagePayloadSize());
865 headerAddress += header->size();
866 ASSERT(headerAddress <= end());
867 } while (headerAddress < end());
870 template<typename Header>
871 bool HeapPage<Header>::isEmpty()
873 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
874 return header->isFree() && (header->size() == payloadSize());
877 template<typename Header>
878 void HeapPage<Header>::sweep()
880 clearObjectStartBitMap();
881 heap()->stats().increaseAllocatedSpace(blinkPageSize);
882 Address startOfGap = payload();
883 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
884 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
885 ASSERT(basicHeader->size() < blinkPagePayloadSize());
887 if (basicHeader->isFree()) {
888 headerAddress += basicHeader->size();
891 // At this point we know this is a valid object of type Header
892 Header* header = static_cast<Header*>(basicHeader);
894 if (!header->isMarked()) {
895 // For ASAN we unpoison the specific object when calling the finalizer and
896 // poison it again when done to allow the object's own finalizer to operate
897 // on the object, but not have other finalizers be allowed to access it.
898 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
900 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
901 headerAddress += header->size();
905 if (startOfGap != headerAddress)
906 heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
908 headerAddress += header->size();
909 heap()->stats().increaseObjectSpace(header->payloadSize());
910 startOfGap = headerAddress;
912 if (startOfGap != end())
913 heap()->addToFreeList(startOfGap, end() - startOfGap);
916 template<typename Header>
917 void HeapPage<Header>::clearMarks()
919 for (Address headerAddress = payload(); headerAddress < end();) {
920 Header* header = reinterpret_cast<Header*>(headerAddress);
921 ASSERT(header->size() < blinkPagePayloadSize());
922 if (!header->isFree())
924 headerAddress += header->size();
928 template<typename Header>
929 void HeapPage<Header>::populateObjectStartBitMap()
931 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
932 Address start = payload();
933 for (Address headerAddress = start; headerAddress < end();) {
934 Header* header = reinterpret_cast<Header*>(headerAddress);
935 size_t objectOffset = headerAddress - start;
936 ASSERT(!(objectOffset & allocationMask));
937 size_t objectStartNumber = objectOffset / allocationGranularity;
938 size_t mapIndex = objectStartNumber / 8;
939 ASSERT(mapIndex < objectStartBitMapSize);
940 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
941 headerAddress += header->size();
942 ASSERT(headerAddress <= end());
944 m_objectStartBitMapComputed = true;
947 template<typename Header>
948 void HeapPage<Header>::clearObjectStartBitMap()
950 m_objectStartBitMapComputed = false;
953 static int numberOfLeadingZeroes(uint8_t byte)
971 template<typename Header>
972 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address addr)
974 if (addr < payload())
976 if (!isObjectStartBitMapComputed())
977 populateObjectStartBitMap();
978 size_t objectOffset = addr - payload();
979 size_t objectStartNumber = objectOffset / allocationGranularity;
980 size_t mapIndex = objectStartNumber / 8;
981 ASSERT(mapIndex < objectStartBitMapSize);
982 size_t bit = objectStartNumber & 7;
983 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
985 ASSERT(mapIndex > 0);
986 byte = m_objectStartBitMap[--mapIndex];
988 int leadingZeroes = numberOfLeadingZeroes(byte);
989 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
990 objectOffset = objectStartNumber * allocationGranularity;
991 Address objectAddress = objectOffset + payload();
992 Header* header = reinterpret_cast<Header*>(objectAddress);
993 if (header->isFree())
996 visitor->mark(header, traceCallback(header));
1000 #if defined(ADDRESS_SANITIZER)
1001 template<typename Header>
1002 void HeapPage<Header>::poisonUnmarkedObjects()
1004 for (Address headerAddress = payload(); headerAddress < end(); ) {
1005 Header* header = reinterpret_cast<Header*>(headerAddress);
1006 ASSERT(header->size() < blinkPagePayloadSize());
1008 if (!header->isFree() && !header->isMarked())
1009 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1010 headerAddress += header->size();
1016 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1022 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1025 HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1029 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1032 return gcInfo()->m_trace;
1036 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1038 return header->traceCallback();
1041 template<typename Header>
1042 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1044 stats.increaseAllocatedSpace(size());
1045 stats.increaseObjectSpace(payloadSize());
1048 HeapContainsCache::HeapContainsCache()
1049 : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries]))
1053 void HeapContainsCache::flush()
1055 for (int i = 0; i < numberOfEntries; i++)
1056 m_entries[i] = Entry();
1059 size_t HeapContainsCache::hash(Address address)
1061 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1062 value ^= value >> numberOfEntriesLog2;
1063 value ^= value >> (numberOfEntriesLog2 * 2);
1064 value &= numberOfEntries - 1;
1065 return value & ~1; // Returns only even number.
1068 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page)
1071 size_t index = hash(address);
1072 ASSERT(!(index & 1));
1073 Address cachePage = roundToBlinkPageStart(address);
1074 if (m_entries[index].address() == cachePage) {
1075 *page = m_entries[index].containingPage();
1078 if (m_entries[index + 1].address() == cachePage) {
1079 *page = m_entries[index + 1].containingPage();
1086 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1088 size_t index = hash(address);
1089 ASSERT(!(index & 1));
1090 Address cachePage = roundToBlinkPageStart(address);
1091 m_entries[index + 1] = m_entries[index];
1092 m_entries[index] = Entry(cachePage, page);
1095 void CallbackStack::init(CallbackStack** first)
1097 // The stacks are chained, so we start by setting this to null as terminator.
1099 *first = new CallbackStack(first);
1102 void CallbackStack::shutdown(CallbackStack** first)
1104 CallbackStack* next;
1105 for (CallbackStack* current = *first; current; current = next) {
1106 next = current->m_next;
1112 CallbackStack::~CallbackStack()
1119 void CallbackStack::clearUnused()
1121 ASSERT(m_current == &(m_buffer[0]));
1122 for (size_t i = 0; i < bufferSize; i++)
1123 m_buffer[i] = Item(0, 0);
1126 void CallbackStack::assertIsEmpty()
1128 ASSERT(m_current == &(m_buffer[0]));
1132 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1134 if (m_current == &(m_buffer[0])) {
1141 CallbackStack* nextStack = m_next;
1144 return nextStack->popAndInvokeCallback(first, visitor);
1146 Item* item = --m_current;
1148 VisitorCallback callback = item->callback();
1149 callback(visitor, item->object());
1154 class MarkingVisitor : public Visitor {
1156 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1159 ASSERT(objectPointer);
1160 if (header->isMarked())
1164 Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1167 virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1169 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1170 // version to correctly find the payload.
1171 visitHeader(header, header->payload(), callback);
1174 virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1176 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1177 // version to correctly find the payload.
1178 visitHeader(header, header->payload(), callback);
1181 virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1185 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1186 visitHeader(header, header->payload(), callback);
1189 virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) OVERRIDE
1191 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
1194 virtual bool isMarked(const void* objectPointer) OVERRIDE
1196 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1199 // This macro defines the necessary visitor methods for typed heaps
1200 #define DEFINE_VISITOR_METHODS(Type) \
1201 virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1203 if (!objectPointer) \
1205 HeapObjectHeader* header = \
1206 HeapObjectHeader::fromPayload(objectPointer); \
1207 visitHeader(header, header->payload(), callback); \
1209 virtual bool isMarked(const Type* objectPointer) OVERRIDE \
1211 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
1214 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1215 #undef DEFINE_VISITOR_METHODS
1218 virtual void registerWeakCell(void** cell, WeakPointerCallback callback) OVERRIDE
1220 Heap::pushWeakCellPointerCallback(cell, callback);
1226 ThreadState::init();
1227 CallbackStack::init(&s_markingStack);
1228 CallbackStack::init(&s_weakCallbackStack);
1229 s_markingVisitor = new MarkingVisitor();
1232 void Heap::shutdown()
1234 delete s_markingVisitor;
1235 CallbackStack::shutdown(&s_weakCallbackStack);
1236 CallbackStack::shutdown(&s_markingStack);
1237 ThreadState::shutdown();
1240 BaseHeapPage* Heap::contains(Address address)
1242 ASSERT(ThreadState::isAnyThreadInGC());
1243 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1244 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1245 BaseHeapPage* page = (*it)->contains(address);
1252 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1254 ASSERT(ThreadState::isAnyThreadInGC());
1255 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1256 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1257 if ((*it)->checkAndMarkPointer(visitor, address)) {
1258 // Pointer found and marked.
1265 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1267 ASSERT(Heap::contains(object));
1268 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1269 *slot = CallbackStack::Item(object, callback);
1272 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1274 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1277 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
1279 ASSERT(Heap::contains(cell));
1280 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1281 *slot = CallbackStack::Item(cell, callback);
1284 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
1286 ASSERT(Heap::contains(object));
1287 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeaderAddress(reinterpret_cast<Address>(object)));
1288 ASSERT(Heap::contains(object) == heapPageForObject);
1289 ThreadState* state = heapPageForObject->threadState();
1290 state->pushWeakObjectPointerCallback(closure, callback);
1293 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1295 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1298 void Heap::prepareForGC()
1300 ASSERT(ThreadState::isAnyThreadInGC());
1301 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1302 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1303 (*it)->prepareForGC();
1306 void Heap::collectGarbage(ThreadState::StackState stackState, GCType gcType)
1308 ThreadState::current()->clearGCRequested();
1309 GCScope gcScope(stackState);
1311 // Disallow allocation during garbage collection (but not
1312 // during the finalization that happens when the gcScope is
1314 NoAllocationScope<AnyThread> noAllocationScope;
1318 ThreadState::visitRoots(s_markingVisitor);
1319 // Recursively mark all objects that are reachable from the roots.
1320 while (popAndInvokeTraceCallback(s_markingVisitor)) { }
1322 // Call weak callbacks on objects that may now be pointing to dead
1324 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1326 // It is not permitted to trace pointers of live objects in the weak
1327 // callback phase, so the marking stack should still be empty here.
1328 s_markingStack->assertIsEmpty();
1331 void Heap::collectAllGarbage(ThreadState::StackState stackState, GCType gcType)
1333 // FIXME: oilpan: we should perform a single GC and everything
1334 // should die. Unfortunately it is not the case for all objects
1335 // because the hierarchy was not completely moved to the heap and
1336 // some heap allocated objects own objects that contain persistents
1337 // pointing to other heap allocated objects.
1338 for (int i = 0; i < 5; i++)
1339 collectGarbage(stackState, gcType);
1342 void Heap::getStats(HeapStats* stats)
1345 ASSERT(ThreadState::isAnyThreadInGC());
1346 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1347 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1348 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1350 (*it)->getStats(temp);
1355 bool Heap::isConsistentForGC()
1357 ASSERT(ThreadState::isAnyThreadInGC());
1358 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1359 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1360 if (!(*it)->isConsistentForGC())
1366 void Heap::makeConsistentForGC()
1368 ASSERT(ThreadState::isAnyThreadInGC());
1369 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1370 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1371 (*it)->makeConsistentForGC();
1374 // Force template instantiations for the types that we need.
1375 template class HeapPage<FinalizedHeapObjectHeader>;
1376 template class HeapPage<HeapObjectHeader>;
1377 template class ThreadHeap<FinalizedHeapObjectHeader>;
1378 template class ThreadHeap<HeapObjectHeader>;
1380 Visitor* Heap::s_markingVisitor;
1381 CallbackStack* Heap::s_markingStack;
1382 CallbackStack* Heap::s_weakCallbackStack;