1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h"
32 #include "mark-compact.h"
39 // ----------------------------------------------------------------------------
42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
43 // You can't actually iterate over the anchor page. It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) {
57 // You can't actually iterate over the anchor page. It is not a real page,
58 // just an anchor for the double linked page list. Initialize the current
59 // address and end as NULL, then the first iteration will move on
69 HeapObjectIterator::HeapObjectIterator(Page* page,
70 HeapObjectCallback size_func) {
71 Space* owner = page->owner();
72 ASSERT(owner == HEAP->old_pointer_space() ||
73 owner == HEAP->old_data_space() ||
74 owner == HEAP->map_space() ||
75 owner == HEAP->cell_space() ||
76 owner == HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner),
82 ASSERT(page->WasSweptPrecisely());
86 void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode,
89 HeapObjectCallback size_f) {
90 // Check that we actually can iterate this space.
91 ASSERT(!space->was_swept_conservatively());
101 // We have hit the end of the page and should advance to the next block of
102 // objects. This happens at the end of the page.
103 bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly) return false;
107 if (cur_addr_ == NULL) {
108 cur_page = space_->anchor();
110 cur_page = Page::FromAddress(cur_addr_ - 1);
111 ASSERT(cur_addr_ == cur_page->area_end());
113 cur_page = cur_page->next_page();
114 if (cur_page == space_->anchor()) return false;
115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->area_end();
117 ASSERT(cur_page->WasSweptPrecisely());
122 // -----------------------------------------------------------------------------
126 CodeRange::CodeRange(Isolate* isolate)
131 current_allocation_block_index_(0) {
135 bool CodeRange::SetUp(const size_t requested) {
136 ASSERT(code_range_ == NULL);
138 code_range_ = new VirtualMemory(requested);
139 CHECK(code_range_ != NULL);
140 if (!code_range_->IsReserved()) {
146 // We are sure that we have mapped a block of requested addresses.
147 ASSERT(code_range_->size() == requested);
148 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
149 Address base = reinterpret_cast<Address>(code_range_->address());
150 Address aligned_base =
151 RoundUp(reinterpret_cast<Address>(code_range_->address()),
152 MemoryChunk::kAlignment);
153 size_t size = code_range_->size() - (aligned_base - base);
154 allocation_list_.Add(FreeBlock(aligned_base, size));
155 current_allocation_block_index_ = 0;
160 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161 const FreeBlock* right) {
162 // The entire point of CodeRange is that the difference between two
163 // addresses in the range can be represented as a signed 32-bit int,
164 // so the cast is semantically correct.
165 return static_cast<int>(left->start - right->start);
169 void CodeRange::GetNextAllocationBlock(size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
174 return; // Found a large enough allocation block.
178 // Sort and merge the free blocks on the free list and the allocation list.
179 free_list_.AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.Sort(&CompareFreeBlockAddress);
182 for (int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
185 // Add adjacent free blocks to the current merged block.
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
191 if (merged.size > 0) {
192 allocation_list_.Add(merged);
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return; // Found a large enough allocation block.
205 // Code range is full or too fragmented.
206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
211 Address CodeRange::AllocateRawMemory(const size_t requested,
213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested > allocation_list_[current_allocation_block_index_].size) {
215 // Find an allocation block large enough. This function call may
216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217 GetNextAllocationBlock(requested);
219 // Commit the requested memory at the start of the current allocation block.
220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
221 FreeBlock current = allocation_list_[current_allocation_block_index_];
222 if (aligned_requested >= (current.size - Page::kPageSize)) {
223 // Don't leave a small free block, useless for a large object or chunk.
224 *allocated = current.size;
226 *allocated = aligned_requested;
228 ASSERT(*allocated <= current.size);
229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 if (!MemoryAllocator::CommitCodePage(code_range_,
236 allocation_list_[current_allocation_block_index_].start += *allocated;
237 allocation_list_[current_allocation_block_index_].size -= *allocated;
238 if (*allocated == current.size) {
239 GetNextAllocationBlock(0); // This block is used up, get the next one.
241 return current.start;
245 void CodeRange::FreeRawMemory(Address address, size_t length) {
246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
247 free_list_.Add(FreeBlock(address, length));
248 code_range_->Uncommit(address, length);
252 void CodeRange::TearDown() {
253 delete code_range_; // Frees all memory in the virtual memory range.
256 allocation_list_.Free();
260 // -----------------------------------------------------------------------------
264 MemoryAllocator::MemoryAllocator(Isolate* isolate)
267 capacity_executable_(0),
269 size_executable_(0) {
273 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
274 capacity_ = RoundUp(capacity, Page::kPageSize);
275 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
276 ASSERT_GE(capacity_, capacity_executable_);
279 size_executable_ = 0;
285 void MemoryAllocator::TearDown() {
286 // Check that spaces were torn down before MemoryAllocator.
288 // TODO(gc) this will be true again when we fix FreeMemory.
289 // ASSERT(size_executable_ == 0);
291 capacity_executable_ = 0;
295 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
296 Executability executable) {
297 // TODO(gc) make code_range part of memory allocator?
298 ASSERT(reservation->IsReserved());
299 size_t size = reservation->size();
300 ASSERT(size_ >= size);
303 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
305 if (executable == EXECUTABLE) {
306 ASSERT(size_executable_ >= size);
307 size_executable_ -= size;
309 // Code which is part of the code-range does not have its own VirtualMemory.
310 ASSERT(!isolate_->code_range()->contains(
311 static_cast<Address>(reservation->address())));
312 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
313 reservation->Release();
317 void MemoryAllocator::FreeMemory(Address base,
319 Executability executable) {
320 // TODO(gc) make code_range part of memory allocator?
321 ASSERT(size_ >= size);
324 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
326 if (executable == EXECUTABLE) {
327 ASSERT(size_executable_ >= size);
328 size_executable_ -= size;
330 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
331 ASSERT(executable == EXECUTABLE);
332 isolate_->code_range()->FreeRawMemory(base, size);
334 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
335 bool result = VirtualMemory::ReleaseRegion(base, size);
342 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
344 VirtualMemory* controller) {
345 VirtualMemory reservation(size, alignment);
347 if (!reservation.IsReserved()) return NULL;
348 size_ += reservation.size();
349 Address base = RoundUp(static_cast<Address>(reservation.address()),
351 controller->TakeControl(&reservation);
356 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
358 Executability executable,
359 VirtualMemory* controller) {
360 VirtualMemory reservation;
361 Address base = ReserveAlignedMemory(size, alignment, &reservation);
362 if (base == NULL) return NULL;
364 if (executable == EXECUTABLE) {
365 if (!CommitCodePage(&reservation, base, size)) {
369 if (!reservation.Commit(base, size, false)) {
375 // Failed to commit the body. Release the mapping and any partially
376 // commited regions inside it.
377 reservation.Release();
381 controller->TakeControl(&reservation);
386 void Page::InitializeAsAnchor(PagedSpace* owner) {
393 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
395 SemiSpace* semi_space) {
396 Address area_start = start + NewSpacePage::kObjectStartOffset;
397 Address area_end = start + Page::kPageSize;
399 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
406 chunk->set_next_chunk(NULL);
407 chunk->set_prev_chunk(NULL);
408 chunk->initialize_scan_on_scavenge(true);
409 bool in_to_space = (semi_space->id() != kFromSpace);
410 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
411 : MemoryChunk::IN_FROM_SPACE);
412 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
413 : MemoryChunk::IN_TO_SPACE));
414 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
415 heap->incremental_marking()->SetNewSpacePageFlags(page);
420 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
421 set_owner(semi_space);
422 set_next_chunk(this);
423 set_prev_chunk(this);
424 // Flags marks this invalid page as not being in new-space.
425 // All real new-space pages will be in new-space.
430 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
435 Executability executable,
437 MemoryChunk* chunk = FromAddress(base);
439 ASSERT(base == chunk->address());
443 chunk->area_start_ = area_start;
444 chunk->area_end_ = area_end;
446 chunk->set_owner(owner);
447 chunk->InitializeReservedMemory();
448 chunk->slots_buffer_ = NULL;
449 chunk->skip_list_ = NULL;
450 chunk->ResetLiveBytes();
451 Bitmap::Clear(chunk);
452 chunk->initialize_scan_on_scavenge(false);
453 chunk->SetFlag(WAS_SWEPT_PRECISELY);
455 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
456 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
458 if (executable == EXECUTABLE) {
459 chunk->SetFlag(IS_EXECUTABLE);
462 if (owner == heap->old_data_space()) {
463 chunk->SetFlag(CONTAINS_ONLY_DATA);
470 void MemoryChunk::InsertAfter(MemoryChunk* other) {
471 next_chunk_ = other->next_chunk_;
473 other->next_chunk_->prev_chunk_ = this;
474 other->next_chunk_ = this;
478 void MemoryChunk::Unlink() {
479 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
480 heap_->decrement_scan_on_scavenge_pages();
481 ClearFlag(SCAN_ON_SCAVENGE);
483 next_chunk_->prev_chunk_ = prev_chunk_;
484 prev_chunk_->next_chunk_ = next_chunk_;
490 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
491 Executability executable,
494 Heap* heap = isolate_->heap();
496 VirtualMemory reservation;
497 Address area_start = NULL;
498 Address area_end = NULL;
499 if (executable == EXECUTABLE) {
500 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
501 OS::CommitPageSize()) + CodePageGuardSize();
503 // Check executable memory limit.
504 if (size_executable_ + chunk_size > capacity_executable_) {
506 StringEvent("MemoryAllocator::AllocateRawMemory",
507 "V8 Executable Allocation capacity exceeded"));
511 // Allocate executable memory either from code range or from the
513 if (isolate_->code_range()->exists()) {
514 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
515 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
516 MemoryChunk::kAlignment));
517 if (base == NULL) return NULL;
519 // Update executable memory size.
520 size_executable_ += chunk_size;
522 base = AllocateAlignedMemory(chunk_size,
523 MemoryChunk::kAlignment,
526 if (base == NULL) return NULL;
527 // Update executable memory size.
528 size_executable_ += reservation.size();
532 ZapBlock(base, CodePageGuardStartOffset());
533 ZapBlock(base + CodePageAreaStartOffset(), body_size);
535 area_start = base + CodePageAreaStartOffset();
536 area_end = area_start + body_size;
538 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
539 base = AllocateAlignedMemory(chunk_size,
540 MemoryChunk::kAlignment,
544 if (base == NULL) return NULL;
547 ZapBlock(base, chunk_size);
550 area_start = base + Page::kObjectStartOffset;
551 area_end = base + chunk_size;
554 isolate_->counters()->memory_allocated()->
555 Increment(static_cast<int>(chunk_size));
557 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
559 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
560 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
563 MemoryChunk* result = MemoryChunk::Initialize(heap,
570 result->set_reserved_memory(&reservation);
575 Page* MemoryAllocator::AllocatePage(intptr_t size,
577 Executability executable) {
578 MemoryChunk* chunk = AllocateChunk(size, executable, owner);
580 if (chunk == NULL) return NULL;
582 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
586 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
588 Executability executable) {
589 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
590 if (chunk == NULL) return NULL;
591 return LargePage::Initialize(isolate_->heap(), chunk);
595 void MemoryAllocator::Free(MemoryChunk* chunk) {
596 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
597 if (chunk->owner() != NULL) {
599 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
600 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
603 isolate_->heap()->RememberUnmappedPage(
604 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
606 delete chunk->slots_buffer();
607 delete chunk->skip_list();
609 VirtualMemory* reservation = chunk->reserved_memory();
610 if (reservation->IsReserved()) {
611 FreeMemory(reservation, chunk->executable());
613 FreeMemory(chunk->address(),
615 chunk->executable());
620 bool MemoryAllocator::CommitBlock(Address start,
622 Executability executable) {
623 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
625 ZapBlock(start, size);
627 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
632 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
633 if (!VirtualMemory::UncommitRegion(start, size)) return false;
634 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
639 void MemoryAllocator::ZapBlock(Address start, size_t size) {
640 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
641 Memory::Address_at(start + s) = kZapValue;
646 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
647 AllocationAction action,
649 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
650 MemoryAllocationCallbackRegistration registration =
651 memory_allocation_callbacks_[i];
652 if ((registration.space & space) == space &&
653 (registration.action & action) == action)
654 registration.callback(space, action, static_cast<int>(size));
659 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
660 MemoryAllocationCallback callback) {
661 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
662 if (memory_allocation_callbacks_[i].callback == callback) return true;
668 void MemoryAllocator::AddMemoryAllocationCallback(
669 MemoryAllocationCallback callback,
671 AllocationAction action) {
672 ASSERT(callback != NULL);
673 MemoryAllocationCallbackRegistration registration(callback, space, action);
674 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
675 return memory_allocation_callbacks_.Add(registration);
679 void MemoryAllocator::RemoveMemoryAllocationCallback(
680 MemoryAllocationCallback callback) {
681 ASSERT(callback != NULL);
682 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
683 if (memory_allocation_callbacks_[i].callback == callback) {
684 memory_allocation_callbacks_.Remove(i);
693 void MemoryAllocator::ReportStatistics() {
694 float pct = static_cast<float>(capacity_ - size_) / capacity_;
695 PrintF(" capacity: %" V8_PTR_PREFIX "d"
696 ", used: %" V8_PTR_PREFIX "d"
697 ", available: %%%d\n\n",
698 capacity_, size_, static_cast<int>(pct*100));
703 int MemoryAllocator::CodePageGuardStartOffset() {
704 // We are guarding code pages: the first OS page after the header
705 // will be protected as non-writable.
706 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
710 int MemoryAllocator::CodePageGuardSize() {
711 return static_cast<int>(OS::CommitPageSize());
715 int MemoryAllocator::CodePageAreaStartOffset() {
716 // We are guarding code pages: the first OS page after the header
717 // will be protected as non-writable.
718 return CodePageGuardStartOffset() + CodePageGuardSize();
722 int MemoryAllocator::CodePageAreaEndOffset() {
723 // We are guarding code pages: the last OS page will be protected as
725 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
729 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
732 // Commit page header (not executable).
733 if (!vm->Commit(start,
734 CodePageGuardStartOffset(),
739 // Create guard page after the header.
740 if (!vm->Guard(start + CodePageGuardStartOffset())) {
744 // Commit page body (executable).
745 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
746 if (!vm->Commit(start + CodePageAreaStartOffset(),
752 // Create guard page after the allocatable area.
753 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
761 // -----------------------------------------------------------------------------
762 // MemoryChunk implementation
764 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
765 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
766 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
767 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
769 chunk->IncrementLiveBytes(by);
772 // -----------------------------------------------------------------------------
773 // PagedSpace implementation
775 PagedSpace::PagedSpace(Heap* heap,
776 intptr_t max_capacity,
778 Executability executable)
779 : Space(heap, id, executable),
781 was_swept_conservatively_(false),
782 first_unswept_page_(Page::FromAddress(NULL)),
783 unswept_free_bytes_(0) {
784 if (id == CODE_SPACE) {
785 area_size_ = heap->isolate()->memory_allocator()->
788 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
790 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
792 accounting_stats_.Clear();
794 allocation_info_.top = NULL;
795 allocation_info_.limit = NULL;
797 anchor_.InitializeAsAnchor(this);
801 bool PagedSpace::SetUp() {
806 bool PagedSpace::HasBeenSetUp() {
811 void PagedSpace::TearDown() {
812 PageIterator iterator(this);
813 while (iterator.has_next()) {
814 heap()->isolate()->memory_allocator()->Free(iterator.next());
816 anchor_.set_next_page(&anchor_);
817 anchor_.set_prev_page(&anchor_);
818 accounting_stats_.Clear();
822 MaybeObject* PagedSpace::FindObject(Address addr) {
823 // Note: this function can only be called on precisely swept spaces.
824 ASSERT(!heap()->mark_compact_collector()->in_use());
826 if (!Contains(addr)) return Failure::Exception();
828 Page* p = Page::FromAddress(addr);
829 HeapObjectIterator it(p, NULL);
830 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
831 Address cur = obj->address();
832 Address next = cur + obj->Size();
833 if ((cur <= addr) && (addr < next)) return obj;
837 return Failure::Exception();
840 bool PagedSpace::CanExpand() {
841 ASSERT(max_capacity_ % AreaSize() == 0);
843 if (Capacity() == max_capacity_) return false;
845 ASSERT(Capacity() < max_capacity_);
847 // Are we going to exceed capacity for this space?
848 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
853 bool PagedSpace::Expand() {
854 if (!CanExpand()) return false;
856 intptr_t size = AreaSize();
858 if (anchor_.next_page() == &anchor_) {
859 size = SizeOfFirstPage();
862 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
863 size, this, executable());
864 if (p == NULL) return false;
866 ASSERT(Capacity() <= max_capacity_);
868 p->InsertAfter(anchor_.prev_page());
874 intptr_t PagedSpace::SizeOfFirstPage() {
876 switch (identity()) {
877 case OLD_POINTER_SPACE:
878 size = 64 * kPointerSize * KB;
890 if (kPointerSize == 8) {
891 // On x64 we allocate code pages in a special way (from the reserved
892 // 2Byte area). That part of the code is not yet upgraded to handle
902 return Min(size, AreaSize());
906 int PagedSpace::CountTotalPages() {
907 PageIterator it(this);
909 while (it.has_next()) {
917 void PagedSpace::ReleasePage(Page* page) {
918 ASSERT(page->LiveBytes() == 0);
919 ASSERT(AreaSize() == page->area_size());
921 // Adjust list of unswept pages if the page is the head of the list.
922 if (first_unswept_page_ == page) {
923 first_unswept_page_ = page->next_page();
924 if (first_unswept_page_ == anchor()) {
925 first_unswept_page_ = Page::FromAddress(NULL);
929 if (page->WasSwept()) {
930 intptr_t size = free_list_.EvictFreeListItems(page);
931 accounting_stats_.AllocateBytes(size);
932 ASSERT_EQ(AreaSize(), static_cast<int>(size));
934 DecreaseUnsweptFreeBytes(page);
937 if (Page::FromAllocationTop(allocation_info_.top) == page) {
938 allocation_info_.top = allocation_info_.limit = NULL;
942 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
943 heap()->isolate()->memory_allocator()->Free(page);
945 heap()->QueueMemoryChunkForFree(page);
948 ASSERT(Capacity() > 0);
949 accounting_stats_.ShrinkSpace(AreaSize());
953 void PagedSpace::ReleaseAllUnusedPages() {
954 PageIterator it(this);
955 while (it.has_next()) {
956 Page* page = it.next();
957 if (!page->WasSwept()) {
958 if (page->LiveBytes() == 0) ReleasePage(page);
960 HeapObject* obj = HeapObject::FromAddress(page->area_start());
961 if (obj->IsFreeSpace() &&
962 FreeSpace::cast(obj)->size() == AreaSize()) {
963 // Sometimes we allocate memory from free list but don't
964 // immediately initialize it (e.g. see PagedSpace::ReserveSpace
965 // called from Heap::ReserveSpace that can cause GC before
966 // reserved space is actually initialized).
967 // Thus we can't simply assume that obj represents a valid
968 // node still owned by a free list
969 // Instead we should verify that the page is fully covered
970 // by free list items.
971 FreeList::SizeStats sizes;
972 free_list_.CountFreeListItems(page, &sizes);
973 if (sizes.Total() == AreaSize()) {
979 heap()->FreeQueuedChunks();
984 void PagedSpace::Print() { }
989 void PagedSpace::Verify(ObjectVisitor* visitor) {
990 // We can only iterate over the pages if they were swept precisely.
991 if (was_swept_conservatively_) return;
993 bool allocation_pointer_found_in_space =
994 (allocation_info_.top == allocation_info_.limit);
995 PageIterator page_iterator(this);
996 while (page_iterator.has_next()) {
997 Page* page = page_iterator.next();
998 ASSERT(page->owner() == this);
999 if (page == Page::FromAllocationTop(allocation_info_.top)) {
1000 allocation_pointer_found_in_space = true;
1002 ASSERT(page->WasSweptPrecisely());
1003 HeapObjectIterator it(page, NULL);
1004 Address end_of_previous_object = page->area_start();
1005 Address top = page->area_end();
1007 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1008 ASSERT(end_of_previous_object <= object->address());
1010 // The first word should be a map, and we expect all map pointers to
1012 Map* map = object->map();
1013 ASSERT(map->IsMap());
1014 ASSERT(heap()->map_space()->Contains(map));
1016 // Perform space-specific object verification.
1017 VerifyObject(object);
1019 // The object itself should look OK.
1022 // All the interior pointers should be contained in the heap.
1023 int size = object->Size();
1024 object->IterateBody(map->instance_type(), size, visitor);
1025 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1029 ASSERT(object->address() + size <= top);
1030 end_of_previous_object = object->address() + size;
1032 ASSERT_LE(black_size, page->LiveBytes());
1034 ASSERT(allocation_pointer_found_in_space);
1039 // -----------------------------------------------------------------------------
1040 // NewSpace implementation
1043 bool NewSpace::SetUp(int reserved_semispace_capacity,
1044 int maximum_semispace_capacity) {
1045 // Set up new space based on the preallocated memory block defined by
1046 // start and size. The provided space is divided into two semi-spaces.
1047 // To support fast containment testing in the new space, the size of
1048 // this chunk must be a power of two and it must be aligned to its size.
1049 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1051 size_t size = 2 * reserved_semispace_capacity;
1053 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1054 size, size, &reservation_);
1055 if (base == NULL) return false;
1058 chunk_size_ = static_cast<uintptr_t>(size);
1059 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1061 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1062 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1064 // Allocate and set up the histogram arrays if necessary.
1065 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1066 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1068 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1069 promoted_histogram_[name].set_name(#name);
1070 INSTANCE_TYPE_LIST(SET_NAME)
1073 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1074 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1075 2 * heap()->ReservedSemiSpaceSize());
1076 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1078 to_space_.SetUp(chunk_base_,
1079 initial_semispace_capacity,
1080 maximum_semispace_capacity);
1081 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1082 initial_semispace_capacity,
1083 maximum_semispace_capacity);
1084 if (!to_space_.Commit()) {
1087 ASSERT(!from_space_.is_committed()); // No need to use memory yet.
1089 start_ = chunk_base_;
1090 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1091 object_mask_ = address_mask_ | kHeapObjectTagMask;
1092 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1094 ResetAllocationInfo();
1100 void NewSpace::TearDown() {
1101 if (allocated_histogram_) {
1102 DeleteArray(allocated_histogram_);
1103 allocated_histogram_ = NULL;
1105 if (promoted_histogram_) {
1106 DeleteArray(promoted_histogram_);
1107 promoted_histogram_ = NULL;
1111 allocation_info_.top = NULL;
1112 allocation_info_.limit = NULL;
1114 to_space_.TearDown();
1115 from_space_.TearDown();
1117 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1119 ASSERT(reservation_.IsReserved());
1120 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1127 void NewSpace::Flip() {
1128 SemiSpace::Swap(&from_space_, &to_space_);
1132 void NewSpace::Grow() {
1133 // Double the semispace size but only up to maximum capacity.
1134 ASSERT(Capacity() < MaximumCapacity());
1135 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1136 if (to_space_.GrowTo(new_capacity)) {
1137 // Only grow from space if we managed to grow to-space.
1138 if (!from_space_.GrowTo(new_capacity)) {
1139 // If we managed to grow to-space but couldn't grow from-space,
1140 // attempt to shrink to-space.
1141 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1142 // We are in an inconsistent state because we could not
1143 // commit/uncommit memory from new space.
1144 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1148 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1152 void NewSpace::Shrink() {
1153 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1154 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1155 if (rounded_new_capacity < Capacity() &&
1156 to_space_.ShrinkTo(rounded_new_capacity)) {
1157 // Only shrink from-space if we managed to shrink to-space.
1158 from_space_.Reset();
1159 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1160 // If we managed to shrink to-space but couldn't shrink from
1161 // space, attempt to grow to-space again.
1162 if (!to_space_.GrowTo(from_space_.Capacity())) {
1163 // We are in an inconsistent state because we could not
1164 // commit/uncommit memory from new space.
1165 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1169 allocation_info_.limit = to_space_.page_high();
1170 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1174 void NewSpace::UpdateAllocationInfo() {
1175 allocation_info_.top = to_space_.page_low();
1176 allocation_info_.limit = to_space_.page_high();
1178 // Lower limit during incremental marking.
1179 if (heap()->incremental_marking()->IsMarking() &&
1180 inline_allocation_limit_step() != 0) {
1182 allocation_info_.top + inline_allocation_limit_step();
1183 allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1185 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1189 void NewSpace::ResetAllocationInfo() {
1191 UpdateAllocationInfo();
1193 // Clear all mark-bits in the to-space.
1194 NewSpacePageIterator it(&to_space_);
1195 while (it.has_next()) {
1196 Bitmap::Clear(it.next());
1201 bool NewSpace::AddFreshPage() {
1202 Address top = allocation_info_.top;
1203 if (NewSpacePage::IsAtStart(top)) {
1204 // The current page is already empty. Don't try to make another.
1206 // We should only get here if someone asks to allocate more
1207 // than what can be stored in a single page.
1208 // TODO(gc): Change the limit on new-space allocation to prevent this
1209 // from happening (all such allocations should go directly to LOSpace).
1212 if (!to_space_.AdvancePage()) {
1213 // Failed to get a new page in to-space.
1217 // Clear remainder of current page.
1218 Address limit = NewSpacePage::FromLimit(top)->area_end();
1219 if (heap()->gc_state() == Heap::SCAVENGE) {
1220 heap()->promotion_queue()->SetNewLimit(limit);
1221 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1224 int remaining_in_page = static_cast<int>(limit - top);
1225 heap()->CreateFillerObjectAt(top, remaining_in_page);
1227 UpdateAllocationInfo();
1233 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1234 Address old_top = allocation_info_.top;
1235 Address new_top = old_top + size_in_bytes;
1236 Address high = to_space_.page_high();
1237 if (allocation_info_.limit < high) {
1238 // Incremental marking has lowered the limit to get a
1239 // chance to do a step.
1240 allocation_info_.limit = Min(
1241 allocation_info_.limit + inline_allocation_limit_step_,
1243 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1244 heap()->incremental_marking()->Step(
1245 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1246 top_on_previous_step_ = new_top;
1247 return AllocateRaw(size_in_bytes);
1248 } else if (AddFreshPage()) {
1249 // Switched to new page. Try allocating again.
1250 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1251 heap()->incremental_marking()->Step(
1252 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1253 top_on_previous_step_ = to_space_.page_low();
1254 return AllocateRaw(size_in_bytes);
1256 return Failure::RetryAfterGC();
1262 // We do not use the SemiSpaceIterator because verification doesn't assume
1263 // that it works (it depends on the invariants we are checking).
1264 void NewSpace::Verify() {
1265 // The allocation pointer should be in the space or at the very end.
1266 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1268 // There should be objects packed in from the low address up to the
1269 // allocation pointer.
1270 Address current = to_space_.first_page()->area_start();
1271 CHECK_EQ(current, to_space_.space_start());
1273 while (current != top()) {
1274 if (!NewSpacePage::IsAtEnd(current)) {
1275 // The allocation pointer should not be in the middle of an object.
1276 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1279 HeapObject* object = HeapObject::FromAddress(current);
1281 // The first word should be a map, and we expect all map pointers to
1283 Map* map = object->map();
1284 CHECK(map->IsMap());
1285 CHECK(heap()->map_space()->Contains(map));
1287 // The object should not be code or a map.
1288 CHECK(!object->IsMap());
1289 CHECK(!object->IsCode());
1291 // The object itself should look OK.
1294 // All the interior pointers should be contained in the heap.
1295 VerifyPointersVisitor visitor;
1296 int size = object->Size();
1297 object->IterateBody(map->instance_type(), size, &visitor);
1301 // At end of page, switch to next page.
1302 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1303 // Next page should be valid.
1304 CHECK(!page->is_anchor());
1305 current = page->area_start();
1309 // Check semi-spaces.
1310 ASSERT_EQ(from_space_.id(), kFromSpace);
1311 ASSERT_EQ(to_space_.id(), kToSpace);
1312 from_space_.Verify();
1317 // -----------------------------------------------------------------------------
1318 // SemiSpace implementation
1320 void SemiSpace::SetUp(Address start,
1321 int initial_capacity,
1322 int maximum_capacity) {
1323 // Creates a space in the young generation. The constructor does not
1324 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1325 // memory of size 'capacity' when set up, and does not grow or shrink
1326 // otherwise. In the mark-compact collector, the memory region of the from
1327 // space is used as the marking stack. It requires contiguous memory
1329 ASSERT(maximum_capacity >= Page::kPageSize);
1330 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1331 capacity_ = initial_capacity;
1332 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1335 address_mask_ = ~(maximum_capacity - 1);
1336 object_mask_ = address_mask_ | kHeapObjectTagMask;
1337 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1342 void SemiSpace::TearDown() {
1348 bool SemiSpace::Commit() {
1349 ASSERT(!is_committed());
1350 int pages = capacity_ / Page::kPageSize;
1351 Address end = start_ + maximum_capacity_;
1352 Address start = end - pages * Page::kPageSize;
1353 if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1359 NewSpacePage* page = anchor();
1360 for (int i = 1; i <= pages; i++) {
1361 NewSpacePage* new_page =
1362 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1363 new_page->InsertAfter(page);
1373 bool SemiSpace::Uncommit() {
1374 ASSERT(is_committed());
1375 Address start = start_ + maximum_capacity_ - capacity_;
1376 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1379 anchor()->set_next_page(anchor());
1380 anchor()->set_prev_page(anchor());
1387 bool SemiSpace::GrowTo(int new_capacity) {
1388 if (!is_committed()) {
1389 if (!Commit()) return false;
1391 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1392 ASSERT(new_capacity <= maximum_capacity_);
1393 ASSERT(new_capacity > capacity_);
1394 int pages_before = capacity_ / Page::kPageSize;
1395 int pages_after = new_capacity / Page::kPageSize;
1397 Address end = start_ + maximum_capacity_;
1398 Address start = end - new_capacity;
1399 size_t delta = new_capacity - capacity_;
1401 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1402 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1403 start, delta, executable())) {
1406 capacity_ = new_capacity;
1407 NewSpacePage* last_page = anchor()->prev_page();
1408 ASSERT(last_page != anchor());
1409 for (int i = pages_before + 1; i <= pages_after; i++) {
1410 Address page_address = end - i * Page::kPageSize;
1411 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1414 new_page->InsertAfter(last_page);
1415 Bitmap::Clear(new_page);
1416 // Duplicate the flags that was set on the old page.
1417 new_page->SetFlags(last_page->GetFlags(),
1418 NewSpacePage::kCopyOnFlipFlagsMask);
1419 last_page = new_page;
1425 bool SemiSpace::ShrinkTo(int new_capacity) {
1426 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1427 ASSERT(new_capacity >= initial_capacity_);
1428 ASSERT(new_capacity < capacity_);
1429 if (is_committed()) {
1430 // Semispaces grow backwards from the end of their allocated capacity,
1431 // so we find the before and after start addresses relative to the
1432 // end of the space.
1433 Address space_end = start_ + maximum_capacity_;
1434 Address old_start = space_end - capacity_;
1435 size_t delta = capacity_ - new_capacity;
1436 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1438 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1439 if (!allocator->UncommitBlock(old_start, delta)) {
1443 int pages_after = new_capacity / Page::kPageSize;
1444 NewSpacePage* new_last_page =
1445 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1446 new_last_page->set_next_page(anchor());
1447 anchor()->set_prev_page(new_last_page);
1448 ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1451 capacity_ = new_capacity;
1457 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1458 anchor_.set_owner(this);
1459 // Fixup back-pointers to anchor. Address of anchor changes
1461 anchor_.prev_page()->set_next_page(&anchor_);
1462 anchor_.next_page()->set_prev_page(&anchor_);
1464 bool becomes_to_space = (id_ == kFromSpace);
1465 id_ = becomes_to_space ? kToSpace : kFromSpace;
1466 NewSpacePage* page = anchor_.next_page();
1467 while (page != &anchor_) {
1468 page->set_owner(this);
1469 page->SetFlags(flags, mask);
1470 if (becomes_to_space) {
1471 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1472 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1473 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1474 page->ResetLiveBytes();
1476 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1477 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1479 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1480 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1481 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1482 page = page->next_page();
1487 void SemiSpace::Reset() {
1488 ASSERT(anchor_.next_page() != &anchor_);
1489 current_page_ = anchor_.next_page();
1493 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1494 // We won't be swapping semispaces without data in them.
1495 ASSERT(from->anchor_.next_page() != &from->anchor_);
1496 ASSERT(to->anchor_.next_page() != &to->anchor_);
1499 SemiSpace tmp = *from;
1503 // Fixup back-pointers to the page list anchor now that its address
1505 // Swap to/from-space bits on pages.
1506 // Copy GC flags from old active space (from-space) to new (to-space).
1507 intptr_t flags = from->current_page()->GetFlags();
1508 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1510 from->FlipPages(0, 0);
1514 void SemiSpace::set_age_mark(Address mark) {
1515 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1517 // Mark all pages up to the one containing mark.
1518 NewSpacePageIterator it(space_start(), mark);
1519 while (it.has_next()) {
1520 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1526 void SemiSpace::Print() { }
1529 void SemiSpace::Verify() {
1530 bool is_from_space = (id_ == kFromSpace);
1531 NewSpacePage* page = anchor_.next_page();
1532 CHECK(anchor_.semi_space() == this);
1533 while (page != &anchor_) {
1534 CHECK(page->semi_space() == this);
1535 CHECK(page->InNewSpace());
1536 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1537 : MemoryChunk::IN_TO_SPACE));
1538 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1539 : MemoryChunk::IN_FROM_SPACE));
1540 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1541 if (!is_from_space) {
1542 // The pointers-from-here-are-interesting flag isn't updated dynamically
1543 // on from-space pages, so it might be out of sync with the marking state.
1544 if (page->heap()->incremental_marking()->IsMarking()) {
1545 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1547 CHECK(!page->IsFlagSet(
1548 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1550 // TODO(gc): Check that the live_bytes_count_ field matches the
1551 // black marking on the page (if we make it match in new-space).
1553 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1554 CHECK(page->prev_page()->next_page() == page);
1555 page = page->next_page();
1560 void SemiSpace::AssertValidRange(Address start, Address end) {
1561 // Addresses belong to same semi-space
1562 NewSpacePage* page = NewSpacePage::FromLimit(start);
1563 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1564 SemiSpace* space = page->semi_space();
1565 CHECK_EQ(space, end_page->semi_space());
1566 // Start address is before end address, either on same page,
1567 // or end address is on a later page in the linked list of
1568 // semi-space pages.
1569 if (page == end_page) {
1570 CHECK(start <= end);
1572 while (page != end_page) {
1573 page = page->next_page();
1574 CHECK_NE(page, space->anchor());
1581 // -----------------------------------------------------------------------------
1582 // SemiSpaceIterator implementation.
1583 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1584 Initialize(space->bottom(), space->top(), NULL);
1588 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1589 HeapObjectCallback size_func) {
1590 Initialize(space->bottom(), space->top(), size_func);
1594 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1595 Initialize(start, space->top(), NULL);
1599 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1600 Initialize(from, to, NULL);
1604 void SemiSpaceIterator::Initialize(Address start,
1606 HeapObjectCallback size_func) {
1607 SemiSpace::AssertValidRange(start, end);
1610 size_func_ = size_func;
1615 // heap_histograms is shared, always clear it before using it.
1616 static void ClearHistograms() {
1617 Isolate* isolate = Isolate::Current();
1618 // We reset the name each time, though it hasn't changed.
1619 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1620 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1621 #undef DEF_TYPE_NAME
1623 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1624 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1625 #undef CLEAR_HISTOGRAM
1627 isolate->js_spill_information()->Clear();
1631 static void ClearCodeKindStatistics() {
1632 Isolate* isolate = Isolate::Current();
1633 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1634 isolate->code_kind_statistics()[i] = 0;
1639 static void ReportCodeKindStatistics() {
1640 Isolate* isolate = Isolate::Current();
1641 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1643 #define CASE(name) \
1644 case Code::name: table[Code::name] = #name; \
1647 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1648 switch (static_cast<Code::Kind>(i)) {
1650 CASE(OPTIMIZED_FUNCTION);
1654 CASE(KEYED_LOAD_IC);
1656 CASE(KEYED_STORE_IC);
1658 CASE(KEYED_CALL_IC);
1662 CASE(TO_BOOLEAN_IC);
1668 PrintF("\n Code kind histograms: \n");
1669 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1670 if (isolate->code_kind_statistics()[i] > 0) {
1671 PrintF(" %-20s: %10d bytes\n", table[i],
1672 isolate->code_kind_statistics()[i]);
1679 static int CollectHistogramInfo(HeapObject* obj) {
1680 Isolate* isolate = Isolate::Current();
1681 InstanceType type = obj->map()->instance_type();
1682 ASSERT(0 <= type && type <= LAST_TYPE);
1683 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1684 isolate->heap_histograms()[type].increment_number(1);
1685 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1687 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1688 JSObject::cast(obj)->IncrementSpillStatistics(
1689 isolate->js_spill_information());
1696 static void ReportHistogram(bool print_spill) {
1697 Isolate* isolate = Isolate::Current();
1698 PrintF("\n Object Histogram:\n");
1699 for (int i = 0; i <= LAST_TYPE; i++) {
1700 if (isolate->heap_histograms()[i].number() > 0) {
1701 PrintF(" %-34s%10d (%10d bytes)\n",
1702 isolate->heap_histograms()[i].name(),
1703 isolate->heap_histograms()[i].number(),
1704 isolate->heap_histograms()[i].bytes());
1709 // Summarize string types.
1710 int string_number = 0;
1711 int string_bytes = 0;
1712 #define INCREMENT(type, size, name, camel_name) \
1713 string_number += isolate->heap_histograms()[type].number(); \
1714 string_bytes += isolate->heap_histograms()[type].bytes();
1715 STRING_TYPE_LIST(INCREMENT)
1717 if (string_number > 0) {
1718 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1722 if (FLAG_collect_heap_spill_statistics && print_spill) {
1723 isolate->js_spill_information()->Print();
1729 // Support for statistics gathering for --heap-stats and --log-gc.
1730 void NewSpace::ClearHistograms() {
1731 for (int i = 0; i <= LAST_TYPE; i++) {
1732 allocated_histogram_[i].clear();
1733 promoted_histogram_[i].clear();
1737 // Because the copying collector does not touch garbage objects, we iterate
1738 // the new space before a collection to get a histogram of allocated objects.
1739 // This only happens when --log-gc flag is set.
1740 void NewSpace::CollectStatistics() {
1742 SemiSpaceIterator it(this);
1743 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1744 RecordAllocation(obj);
1748 static void DoReportStatistics(Isolate* isolate,
1749 HistogramInfo* info, const char* description) {
1750 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1751 // Lump all the string types together.
1752 int string_number = 0;
1753 int string_bytes = 0;
1754 #define INCREMENT(type, size, name, camel_name) \
1755 string_number += info[type].number(); \
1756 string_bytes += info[type].bytes();
1757 STRING_TYPE_LIST(INCREMENT)
1759 if (string_number > 0) {
1761 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1764 // Then do the other types.
1765 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1766 if (info[i].number() > 0) {
1768 HeapSampleItemEvent(info[i].name(), info[i].number(),
1772 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1776 void NewSpace::ReportStatistics() {
1778 if (FLAG_heap_stats) {
1779 float pct = static_cast<float>(Available()) / Capacity();
1780 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1781 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1782 Capacity(), Available(), static_cast<int>(pct*100));
1783 PrintF("\n Object Histogram:\n");
1784 for (int i = 0; i <= LAST_TYPE; i++) {
1785 if (allocated_histogram_[i].number() > 0) {
1786 PrintF(" %-34s%10d (%10d bytes)\n",
1787 allocated_histogram_[i].name(),
1788 allocated_histogram_[i].number(),
1789 allocated_histogram_[i].bytes());
1797 Isolate* isolate = ISOLATE;
1798 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1799 DoReportStatistics(isolate, promoted_histogram_, "promoted");
1804 void NewSpace::RecordAllocation(HeapObject* obj) {
1805 InstanceType type = obj->map()->instance_type();
1806 ASSERT(0 <= type && type <= LAST_TYPE);
1807 allocated_histogram_[type].increment_number(1);
1808 allocated_histogram_[type].increment_bytes(obj->Size());
1812 void NewSpace::RecordPromotion(HeapObject* obj) {
1813 InstanceType type = obj->map()->instance_type();
1814 ASSERT(0 <= type && type <= LAST_TYPE);
1815 promoted_histogram_[type].increment_number(1);
1816 promoted_histogram_[type].increment_bytes(obj->Size());
1819 // -----------------------------------------------------------------------------
1820 // Free lists for old object spaces implementation
1822 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1823 ASSERT(size_in_bytes > 0);
1824 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1826 // We write a map and possibly size information to the block. If the block
1827 // is big enough to be a FreeSpace with at least one extra word (the next
1828 // pointer), we set its map to be the free space map and its size to an
1829 // appropriate array length for the desired size from HeapObject::Size().
1830 // If the block is too small (eg, one or two words), to hold both a size
1831 // field and a next pointer, we give it a filler map that gives it the
1833 if (size_in_bytes > FreeSpace::kHeaderSize) {
1834 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1835 // Can't use FreeSpace::cast because it fails during deserialization.
1836 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1837 this_as_free_space->set_size(size_in_bytes);
1838 } else if (size_in_bytes == kPointerSize) {
1839 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1840 } else if (size_in_bytes == 2 * kPointerSize) {
1841 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1845 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1846 // deserialization because the free space map is not done yet.
1850 FreeListNode* FreeListNode::next() {
1851 ASSERT(IsFreeListNode(this));
1852 if (map() == HEAP->raw_unchecked_free_space_map()) {
1853 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1854 return reinterpret_cast<FreeListNode*>(
1855 Memory::Address_at(address() + kNextOffset));
1857 return reinterpret_cast<FreeListNode*>(
1858 Memory::Address_at(address() + kPointerSize));
1863 FreeListNode** FreeListNode::next_address() {
1864 ASSERT(IsFreeListNode(this));
1865 if (map() == HEAP->raw_unchecked_free_space_map()) {
1866 ASSERT(Size() >= kNextOffset + kPointerSize);
1867 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
1869 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
1874 void FreeListNode::set_next(FreeListNode* next) {
1875 ASSERT(IsFreeListNode(this));
1876 // While we are booting the VM the free space map will actually be null. So
1877 // we have to make sure that we don't try to use it for anything at that
1879 if (map() == HEAP->raw_unchecked_free_space_map()) {
1880 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1881 Memory::Address_at(address() + kNextOffset) =
1882 reinterpret_cast<Address>(next);
1884 Memory::Address_at(address() + kPointerSize) =
1885 reinterpret_cast<Address>(next);
1890 FreeList::FreeList(PagedSpace* owner)
1891 : owner_(owner), heap_(owner->heap()) {
1896 void FreeList::Reset() {
1899 medium_list_ = NULL;
1905 int FreeList::Free(Address start, int size_in_bytes) {
1906 if (size_in_bytes == 0) return 0;
1907 FreeListNode* node = FreeListNode::FromAddress(start);
1908 node->set_size(heap_, size_in_bytes);
1910 // Early return to drop too-small blocks on the floor.
1911 if (size_in_bytes < kSmallListMin) return size_in_bytes;
1913 // Insert other blocks at the head of a free list of the appropriate
1915 if (size_in_bytes <= kSmallListMax) {
1916 node->set_next(small_list_);
1918 } else if (size_in_bytes <= kMediumListMax) {
1919 node->set_next(medium_list_);
1920 medium_list_ = node;
1921 } else if (size_in_bytes <= kLargeListMax) {
1922 node->set_next(large_list_);
1925 node->set_next(huge_list_);
1928 available_ += size_in_bytes;
1929 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1934 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1935 FreeListNode* node = *list;
1937 if (node == NULL) return NULL;
1939 while (node != NULL &&
1940 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1941 available_ -= node->Size();
1942 node = node->next();
1946 *node_size = node->Size();
1947 *list = node->next();
1956 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1957 FreeListNode* node = NULL;
1959 if (size_in_bytes <= kSmallAllocationMax) {
1960 node = PickNodeFromList(&small_list_, node_size);
1961 if (node != NULL) return node;
1964 if (size_in_bytes <= kMediumAllocationMax) {
1965 node = PickNodeFromList(&medium_list_, node_size);
1966 if (node != NULL) return node;
1969 if (size_in_bytes <= kLargeAllocationMax) {
1970 node = PickNodeFromList(&large_list_, node_size);
1971 if (node != NULL) return node;
1974 for (FreeListNode** cur = &huge_list_;
1976 cur = (*cur)->next_address()) {
1977 FreeListNode* cur_node = *cur;
1978 while (cur_node != NULL &&
1979 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1980 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1981 cur_node = cur_node->next();
1985 if (cur_node == NULL) break;
1987 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1988 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1989 int size = cur_as_free_space->Size();
1990 if (size >= size_in_bytes) {
1991 // Large enough node found. Unlink it from the list.
1994 *cur = node->next();
2003 // Allocation on the old space free list. If it succeeds then a new linear
2004 // allocation space has been set up with the top and limit of the space. If
2005 // the allocation fails then NULL is returned, and the caller can perform a GC
2006 // or allocate a new page before retrying.
2007 HeapObject* FreeList::Allocate(int size_in_bytes) {
2008 ASSERT(0 < size_in_bytes);
2009 ASSERT(size_in_bytes <= kMaxBlockSize);
2010 ASSERT(IsAligned(size_in_bytes, kPointerSize));
2011 // Don't free list allocate if there is linear space available.
2012 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2014 int new_node_size = 0;
2015 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2016 if (new_node == NULL) return NULL;
2018 available_ -= new_node_size;
2019 ASSERT(IsVeryLong() || available_ == SumFreeLists());
2021 int bytes_left = new_node_size - size_in_bytes;
2022 ASSERT(bytes_left >= 0);
2024 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2025 // Mark the old linear allocation area with a free space map so it can be
2026 // skipped when scanning the heap. This also puts it back in the free list
2027 // if it is big enough.
2028 owner_->Free(owner_->top(), old_linear_size);
2031 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2032 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
2036 owner_->heap()->incremental_marking()->OldSpaceStep(
2037 size_in_bytes - old_linear_size);
2039 // The old-space-step might have finished sweeping and restarted marking.
2040 // Verify that it did not turn the page of the new node into an evacuation
2042 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2044 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2046 // Memory in the linear allocation area is counted as allocated. We may free
2047 // a little of this again immediately - see below.
2048 owner_->Allocate(new_node_size);
2050 if (bytes_left > kThreshold &&
2051 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2052 FLAG_incremental_marking_steps) {
2053 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2054 // We don't want to give too large linear areas to the allocator while
2055 // incremental marking is going on, because we won't check again whether
2056 // we want to do another increment until the linear area is used up.
2057 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2058 new_node_size - size_in_bytes - linear_size);
2059 owner_->SetTop(new_node->address() + size_in_bytes,
2060 new_node->address() + size_in_bytes + linear_size);
2061 } else if (bytes_left > 0) {
2062 // Normally we give the rest of the node to the allocator as its new
2063 // linear allocation area.
2064 owner_->SetTop(new_node->address() + size_in_bytes,
2065 new_node->address() + new_node_size);
2067 // TODO(gc) Try not freeing linear allocation region when bytes_left
2069 owner_->SetTop(NULL, NULL);
2076 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2079 if (Page::FromAddress(n->address()) == p) {
2080 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
2081 sum += free_space->Size();
2089 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2090 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2091 if (sizes->huge_size_ < p->area_size()) {
2092 sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2093 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2094 sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2096 sizes->small_size_ = 0;
2097 sizes->medium_size_ = 0;
2098 sizes->large_size_ = 0;
2103 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2105 while (*n != NULL) {
2106 if (Page::FromAddress((*n)->address()) == p) {
2107 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2108 sum += free_space->Size();
2111 n = (*n)->next_address();
2118 intptr_t FreeList::EvictFreeListItems(Page* p) {
2119 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2121 if (sum < p->area_size()) {
2122 sum += EvictFreeListItemsInList(&small_list_, p) +
2123 EvictFreeListItemsInList(&medium_list_, p) +
2124 EvictFreeListItemsInList(&large_list_, p);
2127 available_ -= static_cast<int>(sum);
2134 intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2136 while (cur != NULL) {
2137 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
2138 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2139 sum += cur_as_free_space->Size();
2146 static const int kVeryLongFreeList = 500;
2149 int FreeList::FreeListLength(FreeListNode* cur) {
2151 while (cur != NULL) {
2154 if (length == kVeryLongFreeList) return length;
2160 bool FreeList::IsVeryLong() {
2161 if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
2162 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
2163 if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
2164 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
2169 // This can take a very long time because it is linear in the number of entries
2170 // on the free list, so it should not be called if FreeListLength returns
2171 // kVeryLongFreeList.
2172 intptr_t FreeList::SumFreeLists() {
2173 intptr_t sum = SumFreeList(small_list_);
2174 sum += SumFreeList(medium_list_);
2175 sum += SumFreeList(large_list_);
2176 sum += SumFreeList(huge_list_);
2182 // -----------------------------------------------------------------------------
2183 // OldSpace implementation
2185 bool NewSpace::ReserveSpace(int bytes) {
2186 // We can't reliably unpack a partial snapshot that needs more new space
2187 // space than the minimum NewSpace size. The limit can be set lower than
2188 // the end of new space either because there is more space on the next page
2189 // or because we have lowered the limit in order to get periodic incremental
2190 // marking. The most reliable way to ensure that there is linear space is
2191 // to do the allocation, then rewind the limit.
2192 ASSERT(bytes <= InitialCapacity());
2193 MaybeObject* maybe = AllocateRaw(bytes);
2194 Object* object = NULL;
2195 if (!maybe->ToObject(&object)) return false;
2196 HeapObject* allocation = HeapObject::cast(object);
2197 Address top = allocation_info_.top;
2198 if ((top - bytes) == allocation->address()) {
2199 allocation_info_.top = allocation->address();
2202 // There may be a borderline case here where the allocation succeeded, but
2203 // the limit and top have moved on to a new page. In that case we try again.
2204 return ReserveSpace(bytes);
2208 void PagedSpace::PrepareForMarkCompact() {
2209 // We don't have a linear allocation area while sweeping. It will be restored
2210 // on the first allocation after the sweep.
2211 // Mark the old linear allocation area with a free space map so it can be
2212 // skipped when scanning the heap.
2213 int old_linear_size = static_cast<int>(limit() - top());
2214 Free(top(), old_linear_size);
2217 // Stop lazy sweeping and clear marking bits for unswept pages.
2218 if (first_unswept_page_ != NULL) {
2219 Page* p = first_unswept_page_;
2221 // Do not use ShouldBeSweptLazily predicate here.
2222 // New evacuation candidates were selected but they still have
2223 // to be swept before collection starts.
2224 if (!p->WasSwept()) {
2226 if (FLAG_gc_verbose) {
2227 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2228 reinterpret_cast<intptr_t>(p));
2232 } while (p != anchor());
2234 first_unswept_page_ = Page::FromAddress(NULL);
2235 unswept_free_bytes_ = 0;
2237 // Clear the free list before a full GC---it will be rebuilt afterward.
2242 bool PagedSpace::ReserveSpace(int size_in_bytes) {
2243 ASSERT(size_in_bytes <= AreaSize());
2244 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2245 Address current_top = allocation_info_.top;
2246 Address new_top = current_top + size_in_bytes;
2247 if (new_top <= allocation_info_.limit) return true;
2249 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2250 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2251 if (new_area == NULL) return false;
2253 int old_linear_size = static_cast<int>(limit() - top());
2254 // Mark the old linear allocation area with a free space so it can be
2255 // skipped when scanning the heap. This also puts it back in the free list
2256 // if it is big enough.
2257 Free(top(), old_linear_size);
2259 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2260 Allocate(size_in_bytes);
2265 // You have to call this last, since the implementation from PagedSpace
2266 // doesn't know that memory was 'promised' to large object space.
2267 bool LargeObjectSpace::ReserveSpace(int bytes) {
2268 return heap()->OldGenerationCapacityAvailable() >= bytes &&
2269 (!heap()->incremental_marking()->IsStopped() ||
2270 heap()->OldGenerationSpaceAvailable() >= bytes);
2274 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2275 if (IsSweepingComplete()) return true;
2277 intptr_t freed_bytes = 0;
2278 Page* p = first_unswept_page_;
2280 Page* next_page = p->next_page();
2281 if (ShouldBeSweptLazily(p)) {
2282 if (FLAG_gc_verbose) {
2283 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2284 reinterpret_cast<intptr_t>(p));
2286 DecreaseUnsweptFreeBytes(p);
2287 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2290 } while (p != anchor() && freed_bytes < bytes_to_sweep);
2292 if (p == anchor()) {
2293 first_unswept_page_ = Page::FromAddress(NULL);
2295 first_unswept_page_ = p;
2298 heap()->FreeQueuedChunks();
2300 return IsSweepingComplete();
2304 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2305 if (allocation_info_.top >= allocation_info_.limit) return;
2307 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2308 // Create filler object to keep page iterable if it was iterable.
2310 static_cast<int>(allocation_info_.limit - allocation_info_.top);
2311 heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2313 allocation_info_.top = NULL;
2314 allocation_info_.limit = NULL;
2319 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2320 // Allocation in this space has failed.
2322 // If there are unswept pages advance lazy sweeper then sweep one page before
2323 // allocating a new page.
2324 if (first_unswept_page_->is_valid()) {
2325 AdvanceSweeper(size_in_bytes);
2327 // Retry the free list allocation.
2328 HeapObject* object = free_list_.Allocate(size_in_bytes);
2329 if (object != NULL) return object;
2332 // Free list allocation failed and there is no next page. Fail if we have
2333 // hit the old generation size limit that should cause a garbage
2335 if (!heap()->always_allocate() &&
2336 heap()->OldGenerationAllocationLimitReached()) {
2340 // Try to expand the space and allocate in the new next page.
2342 return free_list_.Allocate(size_in_bytes);
2345 // Last ditch, sweep all the remaining pages to try to find space. This may
2347 if (!IsSweepingComplete()) {
2348 AdvanceSweeper(kMaxInt);
2350 // Retry the free list allocation.
2351 HeapObject* object = free_list_.Allocate(size_in_bytes);
2352 if (object != NULL) return object;
2361 void PagedSpace::ReportCodeStatistics() {
2362 Isolate* isolate = Isolate::Current();
2363 CommentStatistic* comments_statistics =
2364 isolate->paged_space_comments_statistics();
2365 ReportCodeKindStatistics();
2366 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2367 "count (average)\"):\n");
2368 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2369 const CommentStatistic& cs = comments_statistics[i];
2371 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2379 void PagedSpace::ResetCodeStatistics() {
2380 Isolate* isolate = Isolate::Current();
2381 CommentStatistic* comments_statistics =
2382 isolate->paged_space_comments_statistics();
2383 ClearCodeKindStatistics();
2384 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2385 comments_statistics[i].Clear();
2387 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2388 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2389 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2393 // Adds comment to 'comment_statistics' table. Performance OK as long as
2394 // 'kMaxComments' is small
2395 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2396 CommentStatistic* comments_statistics =
2397 isolate->paged_space_comments_statistics();
2398 // Do not count empty comments
2399 if (delta <= 0) return;
2400 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2401 // Search for a free or matching entry in 'comments_statistics': 'cs'
2402 // points to result.
2403 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2404 if (comments_statistics[i].comment == NULL) {
2405 cs = &comments_statistics[i];
2406 cs->comment = comment;
2408 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2409 cs = &comments_statistics[i];
2413 // Update entry for 'comment'
2419 // Call for each nested comment start (start marked with '[ xxx', end marked
2420 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2421 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2422 ASSERT(!it->done());
2423 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2424 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2425 if (tmp[0] != '[') {
2426 // Not a nested comment; skip
2430 // Search for end of nested comment or a new nested comment
2431 const char* const comment_txt =
2432 reinterpret_cast<const char*>(it->rinfo()->data());
2433 const byte* prev_pc = it->rinfo()->pc();
2437 // All nested comments must be terminated properly, and therefore exit
2439 ASSERT(!it->done());
2440 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2441 const char* const txt =
2442 reinterpret_cast<const char*>(it->rinfo()->data());
2443 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2444 if (txt[0] == ']') break; // End of nested comment
2446 CollectCommentStatistics(isolate, it);
2447 // Skip code that was covered with previous comment
2448 prev_pc = it->rinfo()->pc();
2452 EnterComment(isolate, comment_txt, flat_delta);
2456 // Collects code size statistics:
2458 // - by code comment
2459 void PagedSpace::CollectCodeStatistics() {
2460 Isolate* isolate = heap()->isolate();
2461 HeapObjectIterator obj_it(this);
2462 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2463 if (obj->IsCode()) {
2464 Code* code = Code::cast(obj);
2465 isolate->code_kind_statistics()[code->kind()] += code->Size();
2466 RelocIterator it(code);
2468 const byte* prev_pc = code->instruction_start();
2469 while (!it.done()) {
2470 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2471 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2472 CollectCommentStatistics(isolate, &it);
2473 prev_pc = it.rinfo()->pc();
2478 ASSERT(code->instruction_start() <= prev_pc &&
2479 prev_pc <= code->instruction_end());
2480 delta += static_cast<int>(code->instruction_end() - prev_pc);
2481 EnterComment(isolate, "NoComment", delta);
2487 void PagedSpace::ReportStatistics() {
2488 int pct = static_cast<int>(Available() * 100 / Capacity());
2489 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2490 ", waste: %" V8_PTR_PREFIX "d"
2491 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2492 Capacity(), Waste(), Available(), pct);
2494 if (was_swept_conservatively_) return;
2496 HeapObjectIterator obj_it(this);
2497 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2498 CollectHistogramInfo(obj);
2499 ReportHistogram(true);
2503 // -----------------------------------------------------------------------------
2504 // FixedSpace implementation
2506 void FixedSpace::PrepareForMarkCompact() {
2507 // Call prepare of the super class.
2508 PagedSpace::PrepareForMarkCompact();
2510 // During a non-compacting collection, everything below the linear
2511 // allocation pointer except wasted top-of-page blocks is considered
2512 // allocated and we will rediscover available bytes during the
2514 accounting_stats_.AllocateBytes(free_list_.available());
2516 // Clear the free list before a full GC---it will be rebuilt afterward.
2521 // -----------------------------------------------------------------------------
2522 // MapSpace implementation
2525 void MapSpace::VerifyObject(HeapObject* object) {
2526 // The object should be a map or a free-list node.
2527 ASSERT(object->IsMap() || object->IsFreeSpace());
2532 // -----------------------------------------------------------------------------
2533 // GlobalPropertyCellSpace implementation
2536 void CellSpace::VerifyObject(HeapObject* object) {
2537 // The object should be a global object property cell or a free-list node.
2538 ASSERT(object->IsJSGlobalPropertyCell() ||
2539 object->map() == heap()->two_pointer_filler_map());
2544 // -----------------------------------------------------------------------------
2545 // LargeObjectIterator
2547 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2548 current_ = space->first_page_;
2553 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2554 HeapObjectCallback size_func) {
2555 current_ = space->first_page_;
2556 size_func_ = size_func;
2560 HeapObject* LargeObjectIterator::Next() {
2561 if (current_ == NULL) return NULL;
2563 HeapObject* object = current_->GetObject();
2564 current_ = current_->next_page();
2569 // -----------------------------------------------------------------------------
2571 static bool ComparePointers(void* key1, void* key2) {
2572 return key1 == key2;
2576 LargeObjectSpace::LargeObjectSpace(Heap* heap,
2577 intptr_t max_capacity,
2579 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2580 max_capacity_(max_capacity),
2585 chunk_map_(ComparePointers, 1024) {}
2588 bool LargeObjectSpace::SetUp() {
2598 void LargeObjectSpace::TearDown() {
2599 while (first_page_ != NULL) {
2600 LargePage* page = first_page_;
2601 first_page_ = first_page_->next_page();
2602 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2604 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2605 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2606 space, kAllocationActionFree, page->size());
2607 heap()->isolate()->memory_allocator()->Free(page);
2613 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2614 Executability executable) {
2615 // Check if we want to force a GC before growing the old space further.
2616 // If so, fail the allocation.
2617 if (!heap()->always_allocate() &&
2618 heap()->OldGenerationAllocationLimitReached()) {
2619 return Failure::RetryAfterGC(identity());
2622 if (Size() + object_size > max_capacity_) {
2623 return Failure::RetryAfterGC(identity());
2626 LargePage* page = heap()->isolate()->memory_allocator()->
2627 AllocateLargePage(object_size, this, executable);
2628 if (page == NULL) return Failure::RetryAfterGC(identity());
2629 ASSERT(page->area_size() >= object_size);
2631 size_ += static_cast<int>(page->size());
2632 objects_size_ += object_size;
2634 page->set_next_page(first_page_);
2637 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2638 // this large page in the chunk map.
2639 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2640 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2641 for (uintptr_t key = base; key <= limit; key++) {
2642 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2643 static_cast<uint32_t>(key),
2645 ASSERT(entry != NULL);
2646 entry->value = page;
2649 HeapObject* object = page->GetObject();
2652 // Make the object consistent so the heap can be vefified in OldSpaceStep.
2653 reinterpret_cast<Object**>(object->address())[0] =
2654 heap()->fixed_array_map();
2655 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2658 heap()->incremental_marking()->OldSpaceStep(object_size);
2664 MaybeObject* LargeObjectSpace::FindObject(Address a) {
2665 LargePage* page = FindPage(a);
2667 return page->GetObject();
2669 return Failure::Exception();
2673 LargePage* LargeObjectSpace::FindPage(Address a) {
2674 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2675 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2676 static_cast<uint32_t>(key),
2679 ASSERT(e->value != NULL);
2680 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2681 ASSERT(page->is_valid());
2682 if (page->Contains(a)) {
2690 void LargeObjectSpace::FreeUnmarkedObjects() {
2691 LargePage* previous = NULL;
2692 LargePage* current = first_page_;
2693 while (current != NULL) {
2694 HeapObject* object = current->GetObject();
2695 // Can this large page contain pointers to non-trivial objects. No other
2696 // pointer object is this big.
2697 bool is_pointer_object = object->IsFixedArray();
2698 MarkBit mark_bit = Marking::MarkBitFrom(object);
2699 if (mark_bit.Get()) {
2701 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
2703 current = current->next_page();
2705 LargePage* page = current;
2706 // Cut the chunk out from the chunk list.
2707 current = current->next_page();
2708 if (previous == NULL) {
2709 first_page_ = current;
2711 previous->set_next_page(current);
2715 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2716 object, heap()->isolate());
2717 size_ -= static_cast<int>(page->size());
2718 objects_size_ -= object->Size();
2721 // Remove entries belonging to this page.
2722 // Use variable alignment to help pass length check (<= 80 characters)
2723 // of single line in tools/presubmit.py.
2724 const intptr_t alignment = MemoryChunk::kAlignment;
2725 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
2726 uintptr_t limit = base + (page->size()-1)/alignment;
2727 for (uintptr_t key = base; key <= limit; key++) {
2728 chunk_map_.Remove(reinterpret_cast<void*>(key),
2729 static_cast<uint32_t>(key));
2732 if (is_pointer_object) {
2733 heap()->QueueMemoryChunkForFree(page);
2735 heap()->isolate()->memory_allocator()->Free(page);
2739 heap()->FreeQueuedChunks();
2743 bool LargeObjectSpace::Contains(HeapObject* object) {
2744 Address address = object->address();
2745 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2747 bool owned = (chunk->owner() == this);
2749 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2756 // We do not assume that the large object iterator works, because it depends
2757 // on the invariants we are checking during verification.
2758 void LargeObjectSpace::Verify() {
2759 for (LargePage* chunk = first_page_;
2761 chunk = chunk->next_page()) {
2762 // Each chunk contains an object that starts at the large object page's
2763 // object area start.
2764 HeapObject* object = chunk->GetObject();
2765 Page* page = Page::FromAddress(object->address());
2766 ASSERT(object->address() == page->area_start());
2768 // The first word should be a map, and we expect all map pointers to be
2770 Map* map = object->map();
2771 ASSERT(map->IsMap());
2772 ASSERT(heap()->map_space()->Contains(map));
2774 // We have only code, sequential strings, external strings
2775 // (sequential strings that have been morphed into external
2776 // strings), fixed arrays, and byte arrays in large object space.
2777 ASSERT(object->IsCode() || object->IsSeqString() ||
2778 object->IsExternalString() || object->IsFixedArray() ||
2779 object->IsFixedDoubleArray() || object->IsByteArray());
2781 // The object itself should look OK.
2784 // Byte arrays and strings don't have interior pointers.
2785 if (object->IsCode()) {
2786 VerifyPointersVisitor code_visitor;
2787 object->IterateBody(map->instance_type(),
2790 } else if (object->IsFixedArray()) {
2791 FixedArray* array = FixedArray::cast(object);
2792 for (int j = 0; j < array->length(); j++) {
2793 Object* element = array->get(j);
2794 if (element->IsHeapObject()) {
2795 HeapObject* element_object = HeapObject::cast(element);
2796 ASSERT(heap()->Contains(element_object));
2797 ASSERT(element_object->map()->IsMap());
2805 void LargeObjectSpace::Print() {
2806 LargeObjectIterator it(this);
2807 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2813 void LargeObjectSpace::ReportStatistics() {
2814 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
2815 int num_objects = 0;
2817 LargeObjectIterator it(this);
2818 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2820 CollectHistogramInfo(obj);
2823 PrintF(" number of objects %d, "
2824 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
2825 if (num_objects > 0) ReportHistogram(false);
2829 void LargeObjectSpace::CollectCodeStatistics() {
2830 Isolate* isolate = heap()->isolate();
2831 LargeObjectIterator obj_it(this);
2832 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2833 if (obj->IsCode()) {
2834 Code* code = Code::cast(obj);
2835 isolate->code_kind_statistics()[code->kind()] += code->Size();
2841 void Page::Print() {
2842 // Make a best-effort to print the objects in the page.
2843 PrintF("Page@%p in %s\n",
2845 AllocationSpaceName(this->owner()->identity()));
2846 printf(" --------------------------------------\n");
2847 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
2848 unsigned mark_size = 0;
2849 for (HeapObject* object = objects.Next();
2851 object = objects.Next()) {
2852 bool is_marked = Marking::MarkBitFrom(object).Get();
2853 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
2855 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
2857 object->ShortPrint();
2860 printf(" --------------------------------------\n");
2861 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2866 } } // namespace v8::internal