1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/macro-assembler.h"
13 #include "src/snapshot/snapshot.h"
19 // ----------------------------------------------------------------------------
22 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23 // You can't actually iterate over the anchor page. It is not a real page,
24 // just an anchor for the double linked page list. Initialize as if we have
25 // reached the end of the anchor page, then the first iteration will move on
27 Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
31 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
32 HeapObjectCallback size_func) {
33 // You can't actually iterate over the anchor page. It is not a real page,
34 // just an anchor for the double linked page list. Initialize the current
35 // address and end as NULL, then the first iteration will move on
37 Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
41 HeapObjectIterator::HeapObjectIterator(Page* page,
42 HeapObjectCallback size_func) {
43 Space* owner = page->owner();
44 DCHECK(owner == page->heap()->old_space() ||
45 owner == page->heap()->map_space() ||
46 owner == page->heap()->code_space());
47 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
48 page->area_end(), kOnePageOnly, size_func);
49 DCHECK(page->WasSwept() || page->SweepingCompleted());
53 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
54 HeapObjectIterator::PageMode mode,
55 HeapObjectCallback size_f) {
64 // We have hit the end of the page and should advance to the next block of
65 // objects. This happens at the end of the page.
66 bool HeapObjectIterator::AdvanceToNextPage() {
67 DCHECK(cur_addr_ == cur_end_);
68 if (page_mode_ == kOnePageOnly) return false;
70 if (cur_addr_ == NULL) {
71 cur_page = space_->anchor();
73 cur_page = Page::FromAddress(cur_addr_ - 1);
74 DCHECK(cur_addr_ == cur_page->area_end());
76 cur_page = cur_page->next_page();
77 if (cur_page == space_->anchor()) return false;
78 cur_addr_ = cur_page->area_start();
79 cur_end_ = cur_page->area_end();
80 DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
85 // -----------------------------------------------------------------------------
89 CodeRange::CodeRange(Isolate* isolate)
94 current_allocation_block_index_(0),
98 bool CodeRange::SetUp(size_t requested) {
99 DCHECK(code_range_ == NULL);
101 if (requested == 0) {
102 // When a target requires the code range feature, we put all code objects
103 // in a kMaximalCodeRangeSize range of virtual address space, so that
104 // they can call each other with near calls.
105 if (kRequiresCodeRange) {
106 requested = kMaximalCodeRangeSize;
112 if (requested <= kMinimumCodeRangeSize) {
113 requested = kMinimumCodeRangeSize;
116 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
117 #ifdef V8_TARGET_ARCH_MIPS64
118 // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
119 // encoded immediate, the addresses have to be in range of 256Mb aligned
121 code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
123 code_range_ = new base::VirtualMemory(requested);
125 CHECK(code_range_ != NULL);
126 if (!code_range_->IsReserved()) {
132 // We are sure that we have mapped a block of requested addresses.
133 DCHECK(code_range_->size() == requested);
134 Address base = reinterpret_cast<Address>(code_range_->address());
136 // On some platforms, specifically Win64, we need to reserve some pages at
137 // the beginning of an executable space.
138 if (kReservedCodeRangePages) {
139 if (!code_range_->Commit(
140 base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
145 base += kReservedCodeRangePages * base::OS::CommitPageSize();
147 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
148 size_t size = code_range_->size() - (aligned_base - base) -
149 kReservedCodeRangePages * base::OS::CommitPageSize();
150 allocation_list_.Add(FreeBlock(aligned_base, size));
151 current_allocation_block_index_ = 0;
153 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
154 ReserveEmergencyBlock();
159 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
160 const FreeBlock* right) {
161 // The entire point of CodeRange is that the difference between two
162 // addresses in the range can be represented as a signed 32-bit int,
163 // so the cast is semantically correct.
164 return static_cast<int>(left->start - right->start);
168 bool CodeRange::GetNextAllocationBlock(size_t requested) {
169 for (current_allocation_block_index_++;
170 current_allocation_block_index_ < allocation_list_.length();
171 current_allocation_block_index_++) {
172 if (requested <= allocation_list_[current_allocation_block_index_].size) {
173 return true; // Found a large enough allocation block.
177 // Sort and merge the free blocks on the free list and the allocation list.
178 free_list_.AddAll(allocation_list_);
179 allocation_list_.Clear();
180 free_list_.Sort(&CompareFreeBlockAddress);
181 for (int i = 0; i < free_list_.length();) {
182 FreeBlock merged = free_list_[i];
184 // Add adjacent free blocks to the current merged block.
185 while (i < free_list_.length() &&
186 free_list_[i].start == merged.start + merged.size) {
187 merged.size += free_list_[i].size;
190 if (merged.size > 0) {
191 allocation_list_.Add(merged);
196 for (current_allocation_block_index_ = 0;
197 current_allocation_block_index_ < allocation_list_.length();
198 current_allocation_block_index_++) {
199 if (requested <= allocation_list_[current_allocation_block_index_].size) {
200 return true; // Found a large enough allocation block.
203 current_allocation_block_index_ = 0;
204 // Code range is full or too fragmented.
209 Address CodeRange::AllocateRawMemory(const size_t requested_size,
210 const size_t commit_size,
212 DCHECK(commit_size <= requested_size);
214 if (!ReserveBlock(requested_size, ¤t)) {
218 *allocated = current.size;
219 DCHECK(*allocated <= current.size);
220 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
221 if (!isolate_->memory_allocator()->CommitExecutableMemory(
222 code_range_, current.start, commit_size, *allocated)) {
224 ReleaseBlock(¤t);
227 return current.start;
231 bool CodeRange::CommitRawMemory(Address start, size_t length) {
232 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
236 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
237 return code_range_->Uncommit(start, length);
241 void CodeRange::FreeRawMemory(Address address, size_t length) {
242 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
243 free_list_.Add(FreeBlock(address, length));
244 code_range_->Uncommit(address, length);
248 void CodeRange::TearDown() {
249 delete code_range_; // Frees all memory in the virtual memory range.
252 allocation_list_.Free();
256 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
257 DCHECK(allocation_list_.length() == 0 ||
258 current_allocation_block_index_ < allocation_list_.length());
259 if (allocation_list_.length() == 0 ||
260 requested_size > allocation_list_[current_allocation_block_index_].size) {
261 // Find an allocation block large enough.
262 if (!GetNextAllocationBlock(requested_size)) return false;
264 // Commit the requested memory at the start of the current allocation block.
265 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
266 *block = allocation_list_[current_allocation_block_index_];
267 // Don't leave a small free block, useless for a large object or chunk.
268 if (aligned_requested < (block->size - Page::kPageSize)) {
269 block->size = aligned_requested;
271 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
272 allocation_list_[current_allocation_block_index_].start += block->size;
273 allocation_list_[current_allocation_block_index_].size -= block->size;
278 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
281 void CodeRange::ReserveEmergencyBlock() {
282 const size_t requested_size = MemoryAllocator::CodePageAreaSize();
283 if (emergency_block_.size == 0) {
284 ReserveBlock(requested_size, &emergency_block_);
286 DCHECK(emergency_block_.size >= requested_size);
291 void CodeRange::ReleaseEmergencyBlock() {
292 if (emergency_block_.size != 0) {
293 ReleaseBlock(&emergency_block_);
294 emergency_block_.size = 0;
299 // -----------------------------------------------------------------------------
303 MemoryAllocator::MemoryAllocator(Isolate* isolate)
306 capacity_executable_(0),
309 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
310 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
313 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
314 capacity_ = RoundUp(capacity, Page::kPageSize);
315 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
316 DCHECK_GE(capacity_, capacity_executable_);
319 size_executable_ = 0;
325 void MemoryAllocator::TearDown() {
326 // Check that spaces were torn down before MemoryAllocator.
328 // TODO(gc) this will be true again when we fix FreeMemory.
329 // DCHECK(size_executable_ == 0);
331 capacity_executable_ = 0;
335 bool MemoryAllocator::CommitMemory(Address base, size_t size,
336 Executability executable) {
337 if (!base::VirtualMemory::CommitRegion(base, size,
338 executable == EXECUTABLE)) {
341 UpdateAllocatedSpaceLimits(base, base + size);
346 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
347 Executability executable) {
348 // TODO(gc) make code_range part of memory allocator?
349 DCHECK(reservation->IsReserved());
350 size_t size = reservation->size();
351 DCHECK(size_ >= size);
354 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
356 if (executable == EXECUTABLE) {
357 DCHECK(size_executable_ >= size);
358 size_executable_ -= size;
360 // Code which is part of the code-range does not have its own VirtualMemory.
361 DCHECK(isolate_->code_range() == NULL ||
362 !isolate_->code_range()->contains(
363 static_cast<Address>(reservation->address())));
364 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
365 !isolate_->code_range()->valid() || size <= Page::kPageSize);
367 reservation->Release();
371 void MemoryAllocator::FreeMemory(Address base, size_t size,
372 Executability executable) {
373 // TODO(gc) make code_range part of memory allocator?
374 DCHECK(size_ >= size);
377 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
379 if (executable == EXECUTABLE) {
380 DCHECK(size_executable_ >= size);
381 size_executable_ -= size;
383 if (isolate_->code_range() != NULL &&
384 isolate_->code_range()->contains(static_cast<Address>(base))) {
385 DCHECK(executable == EXECUTABLE);
386 isolate_->code_range()->FreeRawMemory(base, size);
388 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
389 !isolate_->code_range()->valid());
390 bool result = base::VirtualMemory::ReleaseRegion(base, size);
397 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
398 base::VirtualMemory* controller) {
399 base::VirtualMemory reservation(size, alignment);
401 if (!reservation.IsReserved()) return NULL;
402 size_ += reservation.size();
404 RoundUp(static_cast<Address>(reservation.address()), alignment);
405 controller->TakeControl(&reservation);
410 Address MemoryAllocator::AllocateAlignedMemory(
411 size_t reserve_size, size_t commit_size, size_t alignment,
412 Executability executable, base::VirtualMemory* controller) {
413 DCHECK(commit_size <= reserve_size);
414 base::VirtualMemory reservation;
415 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
416 if (base == NULL) return NULL;
418 if (executable == EXECUTABLE) {
419 if (!CommitExecutableMemory(&reservation, base, commit_size,
424 if (reservation.Commit(base, commit_size, false)) {
425 UpdateAllocatedSpaceLimits(base, base + commit_size);
432 // Failed to commit the body. Release the mapping and any partially
433 // commited regions inside it.
434 reservation.Release();
438 controller->TakeControl(&reservation);
443 void Page::InitializeAsAnchor(PagedSpace* owner) {
450 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
451 SemiSpace* semi_space) {
452 Address area_start = start + NewSpacePage::kObjectStartOffset;
453 Address area_end = start + Page::kPageSize;
456 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
457 area_end, NOT_EXECUTABLE, semi_space);
458 chunk->set_next_chunk(NULL);
459 chunk->set_prev_chunk(NULL);
460 chunk->initialize_scan_on_scavenge(true);
461 bool in_to_space = (semi_space->id() != kFromSpace);
462 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
463 : MemoryChunk::IN_FROM_SPACE);
464 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
465 : MemoryChunk::IN_TO_SPACE));
466 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
467 heap->incremental_marking()->SetNewSpacePageFlags(page);
472 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
473 set_owner(semi_space);
474 set_next_chunk(this);
475 set_prev_chunk(this);
476 // Flags marks this invalid page as not being in new-space.
477 // All real new-space pages will be in new-space.
482 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
483 Address area_start, Address area_end,
484 Executability executable, Space* owner) {
485 MemoryChunk* chunk = FromAddress(base);
487 DCHECK(base == chunk->address());
491 chunk->area_start_ = area_start;
492 chunk->area_end_ = area_end;
494 chunk->set_owner(owner);
495 chunk->InitializeReservedMemory();
496 chunk->slots_buffer_ = NULL;
497 chunk->skip_list_ = NULL;
498 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
499 chunk->progress_bar_ = 0;
500 chunk->high_water_mark_ = static_cast<int>(area_start - base);
501 chunk->set_parallel_sweeping(SWEEPING_DONE);
502 chunk->available_in_small_free_list_ = 0;
503 chunk->available_in_medium_free_list_ = 0;
504 chunk->available_in_large_free_list_ = 0;
505 chunk->available_in_huge_free_list_ = 0;
506 chunk->non_available_small_blocks_ = 0;
507 chunk->ResetLiveBytes();
508 Bitmap::Clear(chunk);
509 chunk->initialize_scan_on_scavenge(false);
510 chunk->SetFlag(WAS_SWEPT);
512 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
513 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
515 if (executable == EXECUTABLE) {
516 chunk->SetFlag(IS_EXECUTABLE);
523 // Commit MemoryChunk area to the requested size.
524 bool MemoryChunk::CommitArea(size_t requested) {
526 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
527 size_t header_size = area_start() - address() - guard_size;
529 RoundUp(header_size + requested, base::OS::CommitPageSize());
530 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
531 base::OS::CommitPageSize());
533 if (commit_size > committed_size) {
534 // Commit size should be less or equal than the reserved size.
535 DCHECK(commit_size <= size() - 2 * guard_size);
536 // Append the committed area.
537 Address start = address() + committed_size + guard_size;
538 size_t length = commit_size - committed_size;
539 if (reservation_.IsReserved()) {
540 Executability executable =
541 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
542 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
547 CodeRange* code_range = heap_->isolate()->code_range();
548 DCHECK(code_range != NULL && code_range->valid() &&
549 IsFlagSet(IS_EXECUTABLE));
550 if (!code_range->CommitRawMemory(start, length)) return false;
553 if (Heap::ShouldZapGarbage()) {
554 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
556 } else if (commit_size < committed_size) {
557 DCHECK(commit_size > 0);
558 // Shrink the committed area.
559 size_t length = committed_size - commit_size;
560 Address start = address() + committed_size + guard_size - length;
561 if (reservation_.IsReserved()) {
562 if (!reservation_.Uncommit(start, length)) return false;
564 CodeRange* code_range = heap_->isolate()->code_range();
565 DCHECK(code_range != NULL && code_range->valid() &&
566 IsFlagSet(IS_EXECUTABLE));
567 if (!code_range->UncommitRawMemory(start, length)) return false;
571 area_end_ = area_start_ + requested;
576 void MemoryChunk::InsertAfter(MemoryChunk* other) {
577 MemoryChunk* other_next = other->next_chunk();
579 set_next_chunk(other_next);
580 set_prev_chunk(other);
581 other_next->set_prev_chunk(this);
582 other->set_next_chunk(this);
586 void MemoryChunk::Unlink() {
587 MemoryChunk* next_element = next_chunk();
588 MemoryChunk* prev_element = prev_chunk();
589 next_element->set_prev_chunk(prev_element);
590 prev_element->set_next_chunk(next_element);
591 set_prev_chunk(NULL);
592 set_next_chunk(NULL);
596 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
597 intptr_t commit_area_size,
598 Executability executable,
600 DCHECK(commit_area_size <= reserve_area_size);
603 Heap* heap = isolate_->heap();
605 base::VirtualMemory reservation;
606 Address area_start = NULL;
607 Address area_end = NULL;
610 // MemoryChunk layout:
613 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
615 // +----------------------------+<- base + CodePageGuardStartOffset
617 // +----------------------------+<- area_start_
619 // +----------------------------+<- area_end_ (area_start + commit_area_size)
620 // | Committed but not used |
621 // +----------------------------+<- aligned at OS page boundary
622 // | Reserved but not committed |
623 // +----------------------------+<- aligned at OS page boundary
625 // +----------------------------+<- base + chunk_size
628 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
630 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
632 // +----------------------------+<- area_end_ (area_start + commit_area_size)
633 // | Committed but not used |
634 // +----------------------------+<- aligned at OS page boundary
635 // | Reserved but not committed |
636 // +----------------------------+<- base + chunk_size
639 if (executable == EXECUTABLE) {
640 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
641 base::OS::CommitPageSize()) +
644 // Check executable memory limit.
645 if (size_executable_ + chunk_size > capacity_executable_) {
646 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
647 "V8 Executable Allocation capacity exceeded"));
651 // Size of header (not executable) plus area (executable).
652 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
653 base::OS::CommitPageSize());
654 // Allocate executable memory either from code range or from the
656 #ifdef V8_TARGET_ARCH_MIPS64
657 // Use code range only for large object space on mips64 to keep address
658 // range within 256-MB memory region.
659 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
660 reserve_area_size > CodePageAreaSize()) {
662 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
664 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
667 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
668 if (base == NULL) return NULL;
670 // Update executable memory size.
671 size_executable_ += chunk_size;
673 base = AllocateAlignedMemory(chunk_size, commit_size,
674 MemoryChunk::kAlignment, executable,
676 if (base == NULL) return NULL;
677 // Update executable memory size.
678 size_executable_ += reservation.size();
681 if (Heap::ShouldZapGarbage()) {
682 ZapBlock(base, CodePageGuardStartOffset());
683 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
686 area_start = base + CodePageAreaStartOffset();
687 area_end = area_start + commit_area_size;
689 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
690 base::OS::CommitPageSize());
692 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
693 base::OS::CommitPageSize());
695 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
696 executable, &reservation);
698 if (base == NULL) return NULL;
700 if (Heap::ShouldZapGarbage()) {
701 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
704 area_start = base + Page::kObjectStartOffset;
705 area_end = area_start + commit_area_size;
708 // Use chunk_size for statistics and callbacks because we assume that they
709 // treat reserved but not-yet committed memory regions of chunks as allocated.
710 isolate_->counters()->memory_allocated()->Increment(
711 static_cast<int>(chunk_size));
713 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
715 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
716 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
719 MemoryChunk* result = MemoryChunk::Initialize(
720 heap, base, chunk_size, area_start, area_end, executable, owner);
721 result->set_reserved_memory(&reservation);
726 void Page::ResetFreeListStatistics() {
727 non_available_small_blocks_ = 0;
728 available_in_small_free_list_ = 0;
729 available_in_medium_free_list_ = 0;
730 available_in_large_free_list_ = 0;
731 available_in_huge_free_list_ = 0;
735 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
736 Executability executable) {
737 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
739 if (chunk == NULL) return NULL;
741 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
745 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
747 Executability executable) {
749 AllocateChunk(object_size, object_size, executable, owner);
750 if (chunk == NULL) return NULL;
751 return LargePage::Initialize(isolate_->heap(), chunk);
755 void MemoryAllocator::Free(MemoryChunk* chunk) {
756 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
757 if (chunk->owner() != NULL) {
759 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
760 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
763 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
764 chunk->IsEvacuationCandidate());
766 delete chunk->slots_buffer();
767 delete chunk->skip_list();
769 base::VirtualMemory* reservation = chunk->reserved_memory();
770 if (reservation->IsReserved()) {
771 FreeMemory(reservation, chunk->executable());
773 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
778 bool MemoryAllocator::CommitBlock(Address start, size_t size,
779 Executability executable) {
780 if (!CommitMemory(start, size, executable)) return false;
782 if (Heap::ShouldZapGarbage()) {
783 ZapBlock(start, size);
786 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
791 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
792 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
793 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
798 void MemoryAllocator::ZapBlock(Address start, size_t size) {
799 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
800 Memory::Address_at(start + s) = kZapValue;
805 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
806 AllocationAction action,
808 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
809 MemoryAllocationCallbackRegistration registration =
810 memory_allocation_callbacks_[i];
811 if ((registration.space & space) == space &&
812 (registration.action & action) == action)
813 registration.callback(space, action, static_cast<int>(size));
818 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
819 MemoryAllocationCallback callback) {
820 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
821 if (memory_allocation_callbacks_[i].callback == callback) return true;
827 void MemoryAllocator::AddMemoryAllocationCallback(
828 MemoryAllocationCallback callback, ObjectSpace space,
829 AllocationAction action) {
830 DCHECK(callback != NULL);
831 MemoryAllocationCallbackRegistration registration(callback, space, action);
832 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
833 return memory_allocation_callbacks_.Add(registration);
837 void MemoryAllocator::RemoveMemoryAllocationCallback(
838 MemoryAllocationCallback callback) {
839 DCHECK(callback != NULL);
840 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
841 if (memory_allocation_callbacks_[i].callback == callback) {
842 memory_allocation_callbacks_.Remove(i);
851 void MemoryAllocator::ReportStatistics() {
852 float pct = static_cast<float>(capacity_ - size_) / capacity_;
853 PrintF(" capacity: %" V8_PTR_PREFIX
855 ", used: %" V8_PTR_PREFIX
857 ", available: %%%d\n\n",
858 capacity_, size_, static_cast<int>(pct * 100));
863 int MemoryAllocator::CodePageGuardStartOffset() {
864 // We are guarding code pages: the first OS page after the header
865 // will be protected as non-writable.
866 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
870 int MemoryAllocator::CodePageGuardSize() {
871 return static_cast<int>(base::OS::CommitPageSize());
875 int MemoryAllocator::CodePageAreaStartOffset() {
876 // We are guarding code pages: the first OS page after the header
877 // will be protected as non-writable.
878 return CodePageGuardStartOffset() + CodePageGuardSize();
882 int MemoryAllocator::CodePageAreaEndOffset() {
883 // We are guarding code pages: the last OS page will be protected as
885 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
889 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
890 Address start, size_t commit_size,
891 size_t reserved_size) {
892 // Commit page header (not executable).
893 Address header = start;
894 size_t header_size = CodePageGuardStartOffset();
895 if (vm->Commit(header, header_size, false)) {
896 // Create guard page after the header.
897 if (vm->Guard(start + CodePageGuardStartOffset())) {
898 // Commit page body (executable).
899 Address body = start + CodePageAreaStartOffset();
900 size_t body_size = commit_size - CodePageGuardStartOffset();
901 if (vm->Commit(body, body_size, true)) {
902 // Create guard page before the end.
903 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
904 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
906 CodePageGuardStartOffset());
909 vm->Uncommit(body, body_size);
912 vm->Uncommit(header, header_size);
918 // -----------------------------------------------------------------------------
919 // MemoryChunk implementation
921 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
922 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
923 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
924 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
926 chunk->IncrementLiveBytes(by);
930 // -----------------------------------------------------------------------------
931 // PagedSpace implementation
933 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
934 ObjectSpace::kObjectSpaceNewSpace);
935 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
936 ObjectSpace::kObjectSpaceOldSpace);
937 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
938 ObjectSpace::kObjectSpaceCodeSpace);
939 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
940 ObjectSpace::kObjectSpaceMapSpace);
943 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
944 Executability executable)
945 : Space(heap, space, executable),
947 unswept_free_bytes_(0),
948 end_of_unswept_pages_(NULL),
949 emergency_memory_(NULL) {
950 area_size_ = MemoryAllocator::PageAreaSize(space);
952 (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
953 accounting_stats_.Clear();
955 allocation_info_.set_top(NULL);
956 allocation_info_.set_limit(NULL);
958 anchor_.InitializeAsAnchor(this);
962 bool PagedSpace::SetUp() { return true; }
965 bool PagedSpace::HasBeenSetUp() { return true; }
968 void PagedSpace::TearDown() {
969 PageIterator iterator(this);
970 while (iterator.has_next()) {
971 heap()->isolate()->memory_allocator()->Free(iterator.next());
973 anchor_.set_next_page(&anchor_);
974 anchor_.set_prev_page(&anchor_);
975 accounting_stats_.Clear();
979 size_t PagedSpace::CommittedPhysicalMemory() {
980 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
981 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
983 PageIterator it(this);
984 while (it.has_next()) {
985 size += it.next()->CommittedPhysicalMemory();
991 bool PagedSpace::ContainsSafe(Address addr) {
992 Page* p = Page::FromAddress(addr);
993 PageIterator iterator(this);
994 while (iterator.has_next()) {
995 if (iterator.next() == p) return true;
1001 Object* PagedSpace::FindObject(Address addr) {
1002 // Note: this function can only be called on iterable spaces.
1003 DCHECK(!heap()->mark_compact_collector()->in_use());
1005 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1007 Page* p = Page::FromAddress(addr);
1008 HeapObjectIterator it(p, NULL);
1009 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1010 Address cur = obj->address();
1011 Address next = cur + obj->Size();
1012 if ((cur <= addr) && (addr < next)) return obj;
1016 return Smi::FromInt(0);
1020 bool PagedSpace::CanExpand() {
1021 DCHECK(max_capacity_ % AreaSize() == 0);
1022 DCHECK(heap()->mark_compact_collector()->is_compacting() ||
1023 Capacity() <= heap()->MaxOldGenerationSize());
1024 DCHECK(heap()->CommittedOldGenerationMemory() <=
1025 heap()->MaxOldGenerationSize() +
1026 PagedSpace::MaxEmergencyMemoryAllocated());
1028 // Are we going to exceed capacity for this space?
1029 if (!heap()->CanExpandOldGeneration(Page::kPageSize)) return false;
1035 bool PagedSpace::Expand() {
1036 if (!CanExpand()) return false;
1038 intptr_t size = AreaSize();
1040 if (anchor_.next_page() == &anchor_) {
1041 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1044 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1046 if (p == NULL) return false;
1048 // Pages created during bootstrapping may contain immortal immovable objects.
1049 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1051 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1052 DCHECK(heap()->CommittedOldGenerationMemory() <=
1053 heap()->MaxOldGenerationSize() +
1054 PagedSpace::MaxEmergencyMemoryAllocated());
1056 p->InsertAfter(anchor_.prev_page());
1062 int PagedSpace::CountTotalPages() {
1063 PageIterator it(this);
1065 while (it.has_next()) {
1073 void PagedSpace::ResetFreeListStatistics() {
1074 PageIterator page_iterator(this);
1075 while (page_iterator.has_next()) {
1076 Page* page = page_iterator.next();
1077 page->ResetFreeListStatistics();
1082 void PagedSpace::IncreaseCapacity(int size) {
1083 accounting_stats_.ExpandSpace(size);
1087 void PagedSpace::ReleasePage(Page* page) {
1088 DCHECK(page->LiveBytes() == 0);
1089 DCHECK(AreaSize() == page->area_size());
1091 if (page->WasSwept()) {
1092 intptr_t size = free_list_.EvictFreeListItems(page);
1093 accounting_stats_.AllocateBytes(size);
1094 DCHECK_EQ(AreaSize(), static_cast<int>(size));
1096 DecreaseUnsweptFreeBytes(page);
1099 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1100 heap()->decrement_scan_on_scavenge_pages();
1101 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1104 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1106 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1107 allocation_info_.set_top(NULL);
1108 allocation_info_.set_limit(NULL);
1111 // If page is still in a list, unlink it from that list.
1112 if (page->next_chunk() != NULL) {
1113 DCHECK(page->prev_chunk() != NULL);
1117 heap()->QueueMemoryChunkForFree(page);
1119 DCHECK(Capacity() > 0);
1120 accounting_stats_.ShrinkSpace(AreaSize());
1124 intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
1125 // New space and large object space.
1126 static const int spaces_without_emergency_memory = 2;
1127 static const int spaces_with_emergency_memory =
1128 LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
1129 return Page::kPageSize * spaces_with_emergency_memory;
1133 void PagedSpace::CreateEmergencyMemory() {
1134 if (identity() == CODE_SPACE) {
1135 // Make the emergency block available to the allocator.
1136 CodeRange* code_range = heap()->isolate()->code_range();
1137 if (code_range != NULL && code_range->valid()) {
1138 code_range->ReleaseEmergencyBlock();
1140 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1142 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1143 AreaSize(), AreaSize(), executable(), this);
1147 void PagedSpace::FreeEmergencyMemory() {
1148 Page* page = static_cast<Page*>(emergency_memory_);
1149 DCHECK(page->LiveBytes() == 0);
1150 DCHECK(AreaSize() == page->area_size());
1151 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1152 heap()->isolate()->memory_allocator()->Free(page);
1153 emergency_memory_ = NULL;
1157 void PagedSpace::UseEmergencyMemory() {
1158 // Page::Initialize makes the chunk into a real page and adds it to the
1159 // accounting for this space. Unlike PagedSpace::Expand, we don't check
1160 // CanExpand first, so we can go over the limits a little here. That's OK,
1161 // because we are in the process of compacting which will free up at least as
1162 // much memory as it allocates.
1163 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1164 page->InsertAfter(anchor_.prev_page());
1165 emergency_memory_ = NULL;
1170 void PagedSpace::Print() {}
1174 void PagedSpace::Verify(ObjectVisitor* visitor) {
1175 bool allocation_pointer_found_in_space =
1176 (allocation_info_.top() == allocation_info_.limit());
1177 PageIterator page_iterator(this);
1178 while (page_iterator.has_next()) {
1179 Page* page = page_iterator.next();
1180 CHECK(page->owner() == this);
1181 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1182 allocation_pointer_found_in_space = true;
1184 CHECK(page->WasSwept());
1185 HeapObjectIterator it(page, NULL);
1186 Address end_of_previous_object = page->area_start();
1187 Address top = page->area_end();
1189 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1190 CHECK(end_of_previous_object <= object->address());
1192 // The first word should be a map, and we expect all map pointers to
1194 Map* map = object->map();
1195 CHECK(map->IsMap());
1196 CHECK(heap()->map_space()->Contains(map));
1198 // Perform space-specific object verification.
1199 VerifyObject(object);
1201 // The object itself should look OK.
1202 object->ObjectVerify();
1204 // All the interior pointers should be contained in the heap.
1205 int size = object->Size();
1206 object->IterateBody(map->instance_type(), size, visitor);
1207 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1211 CHECK(object->address() + size <= top);
1212 end_of_previous_object = object->address() + size;
1214 CHECK_LE(black_size, page->LiveBytes());
1216 CHECK(allocation_pointer_found_in_space);
1218 #endif // VERIFY_HEAP
1220 // -----------------------------------------------------------------------------
1221 // NewSpace implementation
1224 bool NewSpace::SetUp(int reserved_semispace_capacity,
1225 int maximum_semispace_capacity) {
1226 // Set up new space based on the preallocated memory block defined by
1227 // start and size. The provided space is divided into two semi-spaces.
1228 // To support fast containment testing in the new space, the size of
1229 // this chunk must be a power of two and it must be aligned to its size.
1230 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1232 int target_semispace_capacity = heap()->TargetSemiSpaceSize();
1234 size_t size = 2 * reserved_semispace_capacity;
1235 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1236 size, size, &reservation_);
1237 if (base == NULL) return false;
1240 chunk_size_ = static_cast<uintptr_t>(size);
1241 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1243 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1244 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1246 // Allocate and set up the histogram arrays if necessary.
1247 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1248 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1250 #define SET_NAME(name) \
1251 allocated_histogram_[name].set_name(#name); \
1252 promoted_histogram_[name].set_name(#name);
1253 INSTANCE_TYPE_LIST(SET_NAME)
1256 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1257 DCHECK(static_cast<intptr_t>(chunk_size_) >=
1258 2 * heap()->ReservedSemiSpaceSize());
1259 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1261 to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1262 target_semispace_capacity, maximum_semispace_capacity);
1263 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1264 initial_semispace_capacity, target_semispace_capacity,
1265 maximum_semispace_capacity);
1266 if (!to_space_.Commit()) {
1269 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1271 start_ = chunk_base_;
1272 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1273 object_mask_ = address_mask_ | kHeapObjectTagMask;
1274 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1276 ResetAllocationInfo();
1282 void NewSpace::TearDown() {
1283 if (allocated_histogram_) {
1284 DeleteArray(allocated_histogram_);
1285 allocated_histogram_ = NULL;
1287 if (promoted_histogram_) {
1288 DeleteArray(promoted_histogram_);
1289 promoted_histogram_ = NULL;
1293 allocation_info_.set_top(NULL);
1294 allocation_info_.set_limit(NULL);
1296 to_space_.TearDown();
1297 from_space_.TearDown();
1299 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1301 DCHECK(reservation_.IsReserved());
1302 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1309 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1312 void NewSpace::Grow() {
1313 // Double the semispace size but only up to maximum capacity.
1314 DCHECK(TotalCapacity() < MaximumCapacity());
1316 Min(MaximumCapacity(),
1317 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
1318 if (to_space_.GrowTo(new_capacity)) {
1319 // Only grow from space if we managed to grow to-space.
1320 if (!from_space_.GrowTo(new_capacity)) {
1321 // If we managed to grow to-space but couldn't grow from-space,
1322 // attempt to shrink to-space.
1323 if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1324 // We are in an inconsistent state because we could not
1325 // commit/uncommit memory from new space.
1330 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1334 bool NewSpace::GrowOnePage() {
1335 if (TotalCapacity() == MaximumCapacity()) return false;
1336 int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
1337 if (to_space_.GrowTo(new_capacity)) {
1338 // Only grow from space if we managed to grow to-space and the from space
1339 // is actually committed.
1340 if (from_space_.is_committed()) {
1341 if (!from_space_.GrowTo(new_capacity)) {
1342 // If we managed to grow to-space but couldn't grow from-space,
1343 // attempt to shrink to-space.
1344 if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1345 // We are in an inconsistent state because we could not
1346 // commit/uncommit memory from new space.
1352 if (!from_space_.SetTotalCapacity(new_capacity)) {
1353 // Can't really happen, but better safe than sorry.
1357 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1364 void NewSpace::Shrink() {
1365 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1366 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1367 if (rounded_new_capacity < TotalCapacity() &&
1368 to_space_.ShrinkTo(rounded_new_capacity)) {
1369 // Only shrink from-space if we managed to shrink to-space.
1370 from_space_.Reset();
1371 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1372 // If we managed to shrink to-space but couldn't shrink from
1373 // space, attempt to grow to-space again.
1374 if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
1375 // We are in an inconsistent state because we could not
1376 // commit/uncommit memory from new space.
1381 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1385 void NewSpace::UpdateAllocationInfo() {
1386 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1387 allocation_info_.set_top(to_space_.page_low());
1388 allocation_info_.set_limit(to_space_.page_high());
1389 UpdateInlineAllocationLimit(0);
1390 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1394 void NewSpace::ResetAllocationInfo() {
1396 UpdateAllocationInfo();
1398 // Clear all mark-bits in the to-space.
1399 NewSpacePageIterator it(&to_space_);
1400 while (it.has_next()) {
1401 Bitmap::Clear(it.next());
1406 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1407 if (heap()->inline_allocation_disabled()) {
1408 // Lowest limit when linear allocation was disabled.
1409 Address high = to_space_.page_high();
1410 Address new_top = allocation_info_.top() + size_in_bytes;
1411 allocation_info_.set_limit(Min(new_top, high));
1412 } else if (inline_allocation_limit_step() == 0) {
1413 // Normal limit is the end of the current page.
1414 allocation_info_.set_limit(to_space_.page_high());
1416 // Lower limit during incremental marking.
1417 Address high = to_space_.page_high();
1418 Address new_top = allocation_info_.top() + size_in_bytes;
1419 Address new_limit = new_top + inline_allocation_limit_step_;
1420 allocation_info_.set_limit(Min(new_limit, high));
1422 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1426 bool NewSpace::AddFreshPage() {
1427 Address top = allocation_info_.top();
1428 if (NewSpacePage::IsAtStart(top)) {
1429 // The current page is already empty. Don't try to make another.
1431 // We should only get here if someone asks to allocate more
1432 // than what can be stored in a single page.
1433 // TODO(gc): Change the limit on new-space allocation to prevent this
1434 // from happening (all such allocations should go directly to LOSpace).
1437 if (!to_space_.AdvancePage()) {
1438 // Check if we reached the target capacity yet. If not, try to commit a page
1440 if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
1442 if (!to_space_.AdvancePage()) {
1443 // It doesn't make sense that we managed to commit a page, but can't use
1448 // Failed to get a new page in to-space.
1453 // Clear remainder of current page.
1454 Address limit = NewSpacePage::FromLimit(top)->area_end();
1455 if (heap()->gc_state() == Heap::SCAVENGE) {
1456 heap()->promotion_queue()->SetNewLimit(limit);
1459 int remaining_in_page = static_cast<int>(limit - top);
1460 heap()->CreateFillerObjectAt(top, remaining_in_page);
1462 UpdateAllocationInfo();
1468 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
1469 AllocationAlignment alignment) {
1470 Address old_top = allocation_info_.top();
1471 Address high = to_space_.page_high();
1472 if (allocation_info_.limit() < high) {
1473 int alignment_size = Heap::GetFillToAlign(old_top, alignment);
1474 int aligned_size_in_bytes = size_in_bytes + alignment_size;
1476 // Either the limit has been lowered because linear allocation was disabled
1477 // or because incremental marking wants to get a chance to do a step. Set
1478 // the new limit accordingly.
1479 Address new_top = old_top + aligned_size_in_bytes;
1480 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1481 heap()->incremental_marking()->Step(bytes_allocated,
1482 IncrementalMarking::GC_VIA_STACK_GUARD);
1483 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1484 top_on_previous_step_ = new_top;
1485 if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
1486 return AllocateRawAligned(size_in_bytes, alignment);
1487 } else if (AddFreshPage()) {
1488 // Switched to new page. Try allocating again.
1489 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1490 heap()->incremental_marking()->Step(bytes_allocated,
1491 IncrementalMarking::GC_VIA_STACK_GUARD);
1492 top_on_previous_step_ = to_space_.page_low();
1493 if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
1494 return AllocateRawAligned(size_in_bytes, alignment);
1496 return AllocationResult::Retry();
1502 // We do not use the SemiSpaceIterator because verification doesn't assume
1503 // that it works (it depends on the invariants we are checking).
1504 void NewSpace::Verify() {
1505 // The allocation pointer should be in the space or at the very end.
1506 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1508 // There should be objects packed in from the low address up to the
1509 // allocation pointer.
1510 Address current = to_space_.first_page()->area_start();
1511 CHECK_EQ(current, to_space_.space_start());
1513 while (current != top()) {
1514 if (!NewSpacePage::IsAtEnd(current)) {
1515 // The allocation pointer should not be in the middle of an object.
1516 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1519 HeapObject* object = HeapObject::FromAddress(current);
1521 // The first word should be a map, and we expect all map pointers to
1523 Map* map = object->map();
1524 CHECK(map->IsMap());
1525 CHECK(heap()->map_space()->Contains(map));
1527 // The object should not be code or a map.
1528 CHECK(!object->IsMap());
1529 CHECK(!object->IsCode());
1531 // The object itself should look OK.
1532 object->ObjectVerify();
1534 // All the interior pointers should be contained in the heap.
1535 VerifyPointersVisitor visitor;
1536 int size = object->Size();
1537 object->IterateBody(map->instance_type(), size, &visitor);
1541 // At end of page, switch to next page.
1542 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1543 // Next page should be valid.
1544 CHECK(!page->is_anchor());
1545 current = page->area_start();
1549 // Check semi-spaces.
1550 CHECK_EQ(from_space_.id(), kFromSpace);
1551 CHECK_EQ(to_space_.id(), kToSpace);
1552 from_space_.Verify();
1557 // -----------------------------------------------------------------------------
1558 // SemiSpace implementation
1560 void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
1561 int maximum_capacity) {
1562 // Creates a space in the young generation. The constructor does not
1563 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1564 // memory of size 'capacity' when set up, and does not grow or shrink
1565 // otherwise. In the mark-compact collector, the memory region of the from
1566 // space is used as the marking stack. It requires contiguous memory
1568 DCHECK(maximum_capacity >= Page::kPageSize);
1569 DCHECK(initial_capacity <= target_capacity);
1570 DCHECK(target_capacity <= maximum_capacity);
1571 initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1572 total_capacity_ = initial_capacity;
1573 target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
1574 maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1575 maximum_committed_ = 0;
1578 address_mask_ = ~(maximum_capacity - 1);
1579 object_mask_ = address_mask_ | kHeapObjectTagMask;
1580 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1581 age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
1585 void SemiSpace::TearDown() {
1587 total_capacity_ = 0;
1591 bool SemiSpace::Commit() {
1592 DCHECK(!is_committed());
1593 int pages = total_capacity_ / Page::kPageSize;
1594 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1595 start_, total_capacity_, executable())) {
1599 NewSpacePage* current = anchor();
1600 for (int i = 0; i < pages; i++) {
1601 NewSpacePage* new_page =
1602 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1603 new_page->InsertAfter(current);
1607 SetCapacity(total_capacity_);
1614 bool SemiSpace::Uncommit() {
1615 DCHECK(is_committed());
1616 Address start = start_ + maximum_total_capacity_ - total_capacity_;
1617 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1621 anchor()->set_next_page(anchor());
1622 anchor()->set_prev_page(anchor());
1629 size_t SemiSpace::CommittedPhysicalMemory() {
1630 if (!is_committed()) return 0;
1632 NewSpacePageIterator it(this);
1633 while (it.has_next()) {
1634 size += it.next()->CommittedPhysicalMemory();
1640 bool SemiSpace::GrowTo(int new_capacity) {
1641 if (!is_committed()) {
1642 if (!Commit()) return false;
1644 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1645 DCHECK(new_capacity <= maximum_total_capacity_);
1646 DCHECK(new_capacity > total_capacity_);
1647 int pages_before = total_capacity_ / Page::kPageSize;
1648 int pages_after = new_capacity / Page::kPageSize;
1650 size_t delta = new_capacity - total_capacity_;
1652 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1653 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1654 start_ + total_capacity_, delta, executable())) {
1657 SetCapacity(new_capacity);
1658 NewSpacePage* last_page = anchor()->prev_page();
1659 DCHECK(last_page != anchor());
1660 for (int i = pages_before; i < pages_after; i++) {
1661 Address page_address = start_ + i * Page::kPageSize;
1662 NewSpacePage* new_page =
1663 NewSpacePage::Initialize(heap(), page_address, this);
1664 new_page->InsertAfter(last_page);
1665 Bitmap::Clear(new_page);
1666 // Duplicate the flags that was set on the old page.
1667 new_page->SetFlags(last_page->GetFlags(),
1668 NewSpacePage::kCopyOnFlipFlagsMask);
1669 last_page = new_page;
1675 bool SemiSpace::ShrinkTo(int new_capacity) {
1676 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1677 DCHECK(new_capacity >= initial_total_capacity_);
1678 DCHECK(new_capacity < total_capacity_);
1679 if (is_committed()) {
1680 size_t delta = total_capacity_ - new_capacity;
1681 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1683 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1684 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1688 int pages_after = new_capacity / Page::kPageSize;
1689 NewSpacePage* new_last_page =
1690 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1691 new_last_page->set_next_page(anchor());
1692 anchor()->set_prev_page(new_last_page);
1693 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1696 SetCapacity(new_capacity);
1702 bool SemiSpace::SetTotalCapacity(int new_capacity) {
1703 CHECK(!is_committed());
1704 if (new_capacity >= initial_total_capacity_ &&
1705 new_capacity <= maximum_total_capacity_) {
1706 total_capacity_ = new_capacity;
1713 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1714 anchor_.set_owner(this);
1715 // Fixup back-pointers to anchor. Address of anchor changes
1717 anchor_.prev_page()->set_next_page(&anchor_);
1718 anchor_.next_page()->set_prev_page(&anchor_);
1720 bool becomes_to_space = (id_ == kFromSpace);
1721 id_ = becomes_to_space ? kToSpace : kFromSpace;
1722 NewSpacePage* page = anchor_.next_page();
1723 while (page != &anchor_) {
1724 page->set_owner(this);
1725 page->SetFlags(flags, mask);
1726 if (becomes_to_space) {
1727 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1728 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1729 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1730 page->ResetLiveBytes();
1732 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1733 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1735 DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1736 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1737 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1738 page = page->next_page();
1743 void SemiSpace::Reset() {
1744 DCHECK(anchor_.next_page() != &anchor_);
1745 current_page_ = anchor_.next_page();
1749 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1750 // We won't be swapping semispaces without data in them.
1751 DCHECK(from->anchor_.next_page() != &from->anchor_);
1752 DCHECK(to->anchor_.next_page() != &to->anchor_);
1755 SemiSpace tmp = *from;
1759 // Fixup back-pointers to the page list anchor now that its address
1761 // Swap to/from-space bits on pages.
1762 // Copy GC flags from old active space (from-space) to new (to-space).
1763 intptr_t flags = from->current_page()->GetFlags();
1764 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1766 from->FlipPages(0, 0);
1770 void SemiSpace::SetCapacity(int new_capacity) {
1771 total_capacity_ = new_capacity;
1772 if (total_capacity_ > maximum_committed_) {
1773 maximum_committed_ = total_capacity_;
1778 void SemiSpace::set_age_mark(Address mark) {
1779 DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1781 // Mark all pages up to the one containing mark.
1782 NewSpacePageIterator it(space_start(), mark);
1783 while (it.has_next()) {
1784 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1790 void SemiSpace::Print() {}
1794 void SemiSpace::Verify() {
1795 bool is_from_space = (id_ == kFromSpace);
1796 NewSpacePage* page = anchor_.next_page();
1797 CHECK(anchor_.semi_space() == this);
1798 while (page != &anchor_) {
1799 CHECK(page->semi_space() == this);
1800 CHECK(page->InNewSpace());
1801 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1802 : MemoryChunk::IN_TO_SPACE));
1803 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1804 : MemoryChunk::IN_FROM_SPACE));
1805 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1806 if (!is_from_space) {
1807 // The pointers-from-here-are-interesting flag isn't updated dynamically
1808 // on from-space pages, so it might be out of sync with the marking state.
1809 if (page->heap()->incremental_marking()->IsMarking()) {
1810 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1813 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1815 // TODO(gc): Check that the live_bytes_count_ field matches the
1816 // black marking on the page (if we make it match in new-space).
1818 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1819 CHECK(page->prev_page()->next_page() == page);
1820 page = page->next_page();
1826 void SemiSpace::AssertValidRange(Address start, Address end) {
1827 // Addresses belong to same semi-space
1828 NewSpacePage* page = NewSpacePage::FromLimit(start);
1829 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1830 SemiSpace* space = page->semi_space();
1831 CHECK_EQ(space, end_page->semi_space());
1832 // Start address is before end address, either on same page,
1833 // or end address is on a later page in the linked list of
1834 // semi-space pages.
1835 if (page == end_page) {
1836 CHECK(start <= end);
1838 while (page != end_page) {
1839 page = page->next_page();
1840 CHECK_NE(page, space->anchor());
1847 // -----------------------------------------------------------------------------
1848 // SemiSpaceIterator implementation.
1849 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1850 Initialize(space->bottom(), space->top(), NULL);
1854 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1855 HeapObjectCallback size_func) {
1856 Initialize(space->bottom(), space->top(), size_func);
1860 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1861 Initialize(start, space->top(), NULL);
1865 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1866 Initialize(from, to, NULL);
1870 void SemiSpaceIterator::Initialize(Address start, Address end,
1871 HeapObjectCallback size_func) {
1872 SemiSpace::AssertValidRange(start, end);
1875 size_func_ = size_func;
1880 // heap_histograms is shared, always clear it before using it.
1881 static void ClearHistograms(Isolate* isolate) {
1882 // We reset the name each time, though it hasn't changed.
1883 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1884 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1885 #undef DEF_TYPE_NAME
1887 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1888 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1889 #undef CLEAR_HISTOGRAM
1891 isolate->js_spill_information()->Clear();
1895 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1896 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1897 code_kind_statistics[i] = 0;
1902 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1903 PrintF("\n Code kind histograms: \n");
1904 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1905 if (code_kind_statistics[i] > 0) {
1906 PrintF(" %-20s: %10d bytes\n",
1907 Code::Kind2String(static_cast<Code::Kind>(i)),
1908 code_kind_statistics[i]);
1915 static int CollectHistogramInfo(HeapObject* obj) {
1916 Isolate* isolate = obj->GetIsolate();
1917 InstanceType type = obj->map()->instance_type();
1918 DCHECK(0 <= type && type <= LAST_TYPE);
1919 DCHECK(isolate->heap_histograms()[type].name() != NULL);
1920 isolate->heap_histograms()[type].increment_number(1);
1921 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1923 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1925 ->IncrementSpillStatistics(isolate->js_spill_information());
1932 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1933 PrintF("\n Object Histogram:\n");
1934 for (int i = 0; i <= LAST_TYPE; i++) {
1935 if (isolate->heap_histograms()[i].number() > 0) {
1936 PrintF(" %-34s%10d (%10d bytes)\n",
1937 isolate->heap_histograms()[i].name(),
1938 isolate->heap_histograms()[i].number(),
1939 isolate->heap_histograms()[i].bytes());
1944 // Summarize string types.
1945 int string_number = 0;
1946 int string_bytes = 0;
1947 #define INCREMENT(type, size, name, camel_name) \
1948 string_number += isolate->heap_histograms()[type].number(); \
1949 string_bytes += isolate->heap_histograms()[type].bytes();
1950 STRING_TYPE_LIST(INCREMENT)
1952 if (string_number > 0) {
1953 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1957 if (FLAG_collect_heap_spill_statistics && print_spill) {
1958 isolate->js_spill_information()->Print();
1964 // Support for statistics gathering for --heap-stats and --log-gc.
1965 void NewSpace::ClearHistograms() {
1966 for (int i = 0; i <= LAST_TYPE; i++) {
1967 allocated_histogram_[i].clear();
1968 promoted_histogram_[i].clear();
1973 // Because the copying collector does not touch garbage objects, we iterate
1974 // the new space before a collection to get a histogram of allocated objects.
1975 // This only happens when --log-gc flag is set.
1976 void NewSpace::CollectStatistics() {
1978 SemiSpaceIterator it(this);
1979 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1980 RecordAllocation(obj);
1984 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1985 const char* description) {
1986 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1987 // Lump all the string types together.
1988 int string_number = 0;
1989 int string_bytes = 0;
1990 #define INCREMENT(type, size, name, camel_name) \
1991 string_number += info[type].number(); \
1992 string_bytes += info[type].bytes();
1993 STRING_TYPE_LIST(INCREMENT)
1995 if (string_number > 0) {
1997 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2000 // Then do the other types.
2001 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2002 if (info[i].number() > 0) {
2003 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2007 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2011 void NewSpace::ReportStatistics() {
2013 if (FLAG_heap_stats) {
2014 float pct = static_cast<float>(Available()) / TotalCapacity();
2015 PrintF(" capacity: %" V8_PTR_PREFIX
2017 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2018 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2019 PrintF("\n Object Histogram:\n");
2020 for (int i = 0; i <= LAST_TYPE; i++) {
2021 if (allocated_histogram_[i].number() > 0) {
2022 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2023 allocated_histogram_[i].number(),
2024 allocated_histogram_[i].bytes());
2032 Isolate* isolate = heap()->isolate();
2033 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2034 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2039 void NewSpace::RecordAllocation(HeapObject* obj) {
2040 InstanceType type = obj->map()->instance_type();
2041 DCHECK(0 <= type && type <= LAST_TYPE);
2042 allocated_histogram_[type].increment_number(1);
2043 allocated_histogram_[type].increment_bytes(obj->Size());
2047 void NewSpace::RecordPromotion(HeapObject* obj) {
2048 InstanceType type = obj->map()->instance_type();
2049 DCHECK(0 <= type && type <= LAST_TYPE);
2050 promoted_histogram_[type].increment_number(1);
2051 promoted_histogram_[type].increment_bytes(obj->Size());
2055 size_t NewSpace::CommittedPhysicalMemory() {
2056 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2057 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2058 size_t size = to_space_.CommittedPhysicalMemory();
2059 if (from_space_.is_committed()) {
2060 size += from_space_.CommittedPhysicalMemory();
2066 // -----------------------------------------------------------------------------
2067 // Free lists for old object spaces implementation
2069 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2070 intptr_t free_bytes = 0;
2071 if (category->top() != NULL) {
2072 // This is safe (not going to deadlock) since Concatenate operations
2073 // are never performed on the same free lists at the same time in
2075 base::LockGuard<base::Mutex> target_lock_guard(mutex());
2076 base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2077 DCHECK(category->end_ != NULL);
2078 free_bytes = category->available();
2080 end_ = category->end();
2082 category->end()->set_next(top());
2084 set_top(category->top());
2085 base::NoBarrier_Store(&top_, category->top_);
2086 available_ += category->available();
2093 void FreeListCategory::Reset() {
2100 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2102 FreeSpace* t = top();
2104 while (*n != NULL) {
2105 if (Page::FromAddress((*n)->address()) == p) {
2106 FreeSpace* free_space = *n;
2107 sum += free_space->Size();
2110 n = (*n)->next_address();
2114 if (top() == NULL) {
2122 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2123 FreeSpace* node = top();
2124 while (node != NULL) {
2125 if (Page::FromAddress(node->address()) == p) return true;
2126 node = node->next();
2132 FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2133 FreeSpace* node = top();
2135 if (node == NULL) return NULL;
2137 while (node != NULL &&
2138 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2139 available_ -= node->Size();
2140 node = node->next();
2144 set_top(node->next());
2145 *node_size = node->Size();
2146 available_ -= *node_size;
2151 if (top() == NULL) {
2159 FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
2161 FreeSpace* node = PickNodeFromList(node_size);
2162 if (node != NULL && *node_size < size_in_bytes) {
2163 Free(node, *node_size);
2171 void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
2172 DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
2173 free_space->set_next(top());
2174 set_top(free_space);
2178 available_ += size_in_bytes;
2182 void FreeListCategory::RepairFreeList(Heap* heap) {
2183 FreeSpace* n = top();
2185 Map** map_location = reinterpret_cast<Map**>(n->address());
2186 if (*map_location == NULL) {
2187 *map_location = heap->free_space_map();
2189 DCHECK(*map_location == heap->free_space_map());
2196 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2201 intptr_t FreeList::Concatenate(FreeList* free_list) {
2202 intptr_t free_bytes = 0;
2203 free_bytes += small_list_.Concatenate(free_list->small_list());
2204 free_bytes += medium_list_.Concatenate(free_list->medium_list());
2205 free_bytes += large_list_.Concatenate(free_list->large_list());
2206 free_bytes += huge_list_.Concatenate(free_list->huge_list());
2211 void FreeList::Reset() {
2212 small_list_.Reset();
2213 medium_list_.Reset();
2214 large_list_.Reset();
2219 int FreeList::Free(Address start, int size_in_bytes) {
2220 if (size_in_bytes == 0) return 0;
2222 heap_->CreateFillerObjectAt(start, size_in_bytes);
2224 Page* page = Page::FromAddress(start);
2226 // Early return to drop too-small blocks on the floor.
2227 if (size_in_bytes < kSmallListMin) {
2228 page->add_non_available_small_blocks(size_in_bytes);
2229 return size_in_bytes;
2232 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2233 // Insert other blocks at the head of a free list of the appropriate
2235 if (size_in_bytes <= kSmallListMax) {
2236 small_list_.Free(free_space, size_in_bytes);
2237 page->add_available_in_small_free_list(size_in_bytes);
2238 } else if (size_in_bytes <= kMediumListMax) {
2239 medium_list_.Free(free_space, size_in_bytes);
2240 page->add_available_in_medium_free_list(size_in_bytes);
2241 } else if (size_in_bytes <= kLargeListMax) {
2242 large_list_.Free(free_space, size_in_bytes);
2243 page->add_available_in_large_free_list(size_in_bytes);
2245 huge_list_.Free(free_space, size_in_bytes);
2246 page->add_available_in_huge_free_list(size_in_bytes);
2249 DCHECK(IsVeryLong() || available() == SumFreeLists());
2254 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2255 FreeSpace* node = NULL;
2258 if (size_in_bytes <= kSmallAllocationMax) {
2259 node = small_list_.PickNodeFromList(node_size);
2261 DCHECK(size_in_bytes <= *node_size);
2262 page = Page::FromAddress(node->address());
2263 page->add_available_in_small_free_list(-(*node_size));
2264 DCHECK(IsVeryLong() || available() == SumFreeLists());
2269 if (size_in_bytes <= kMediumAllocationMax) {
2270 node = medium_list_.PickNodeFromList(node_size);
2272 DCHECK(size_in_bytes <= *node_size);
2273 page = Page::FromAddress(node->address());
2274 page->add_available_in_medium_free_list(-(*node_size));
2275 DCHECK(IsVeryLong() || available() == SumFreeLists());
2280 if (size_in_bytes <= kLargeAllocationMax) {
2281 node = large_list_.PickNodeFromList(node_size);
2283 DCHECK(size_in_bytes <= *node_size);
2284 page = Page::FromAddress(node->address());
2285 page->add_available_in_large_free_list(-(*node_size));
2286 DCHECK(IsVeryLong() || available() == SumFreeLists());
2291 int huge_list_available = huge_list_.available();
2292 FreeSpace* top_node = huge_list_.top();
2293 for (FreeSpace** cur = &top_node; *cur != NULL;
2294 cur = (*cur)->next_address()) {
2295 FreeSpace* cur_node = *cur;
2296 while (cur_node != NULL &&
2297 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2298 int size = cur_node->Size();
2299 huge_list_available -= size;
2300 page = Page::FromAddress(cur_node->address());
2301 page->add_available_in_huge_free_list(-size);
2302 cur_node = cur_node->next();
2306 if (cur_node == NULL) {
2307 huge_list_.set_end(NULL);
2311 int size = cur_node->Size();
2312 if (size >= size_in_bytes) {
2313 // Large enough node found. Unlink it from the list.
2315 *cur = node->next();
2317 huge_list_available -= size;
2318 page = Page::FromAddress(node->address());
2319 page->add_available_in_huge_free_list(-size);
2324 huge_list_.set_top(top_node);
2325 if (huge_list_.top() == NULL) {
2326 huge_list_.set_end(NULL);
2328 huge_list_.set_available(huge_list_available);
2331 DCHECK(IsVeryLong() || available() == SumFreeLists());
2335 if (size_in_bytes <= kSmallListMax) {
2336 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2338 DCHECK(size_in_bytes <= *node_size);
2339 page = Page::FromAddress(node->address());
2340 page->add_available_in_small_free_list(-(*node_size));
2342 } else if (size_in_bytes <= kMediumListMax) {
2343 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2345 DCHECK(size_in_bytes <= *node_size);
2346 page = Page::FromAddress(node->address());
2347 page->add_available_in_medium_free_list(-(*node_size));
2349 } else if (size_in_bytes <= kLargeListMax) {
2350 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2352 DCHECK(size_in_bytes <= *node_size);
2353 page = Page::FromAddress(node->address());
2354 page->add_available_in_large_free_list(-(*node_size));
2358 DCHECK(IsVeryLong() || available() == SumFreeLists());
2363 // Allocation on the old space free list. If it succeeds then a new linear
2364 // allocation space has been set up with the top and limit of the space. If
2365 // the allocation fails then NULL is returned, and the caller can perform a GC
2366 // or allocate a new page before retrying.
2367 HeapObject* FreeList::Allocate(int size_in_bytes) {
2368 DCHECK(0 < size_in_bytes);
2369 DCHECK(size_in_bytes <= kMaxBlockSize);
2370 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2371 // Don't free list allocate if there is linear space available.
2372 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2374 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2375 // Mark the old linear allocation area with a free space map so it can be
2376 // skipped when scanning the heap. This also puts it back in the free list
2377 // if it is big enough.
2378 owner_->Free(owner_->top(), old_linear_size);
2380 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2383 int new_node_size = 0;
2384 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2385 if (new_node == NULL) {
2386 owner_->SetTopAndLimit(NULL, NULL);
2390 int bytes_left = new_node_size - size_in_bytes;
2391 DCHECK(bytes_left >= 0);
2394 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2395 reinterpret_cast<Object**>(new_node->address())[i] =
2396 Smi::FromInt(kCodeZapValue);
2400 // The old-space-step might have finished sweeping and restarted marking.
2401 // Verify that it did not turn the page of the new node into an evacuation
2403 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2405 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2407 // Memory in the linear allocation area is counted as allocated. We may free
2408 // a little of this again immediately - see below.
2409 owner_->Allocate(new_node_size);
2411 if (owner_->heap()->inline_allocation_disabled()) {
2412 // Keep the linear allocation area empty if requested to do so, just
2413 // return area back to the free list instead.
2414 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2415 DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2416 } else if (bytes_left > kThreshold &&
2417 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2418 FLAG_incremental_marking_steps) {
2419 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2420 // We don't want to give too large linear areas to the allocator while
2421 // incremental marking is going on, because we won't check again whether
2422 // we want to do another increment until the linear area is used up.
2423 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2424 new_node_size - size_in_bytes - linear_size);
2425 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2426 new_node->address() + size_in_bytes + linear_size);
2427 } else if (bytes_left > 0) {
2428 // Normally we give the rest of the node to the allocator as its new
2429 // linear allocation area.
2430 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2431 new_node->address() + new_node_size);
2433 // TODO(gc) Try not freeing linear allocation region when bytes_left
2435 owner_->SetTopAndLimit(NULL, NULL);
2442 intptr_t FreeList::EvictFreeListItems(Page* p) {
2443 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2444 p->set_available_in_huge_free_list(0);
2446 if (sum < p->area_size()) {
2447 sum += small_list_.EvictFreeListItemsInList(p) +
2448 medium_list_.EvictFreeListItemsInList(p) +
2449 large_list_.EvictFreeListItemsInList(p);
2450 p->set_available_in_small_free_list(0);
2451 p->set_available_in_medium_free_list(0);
2452 p->set_available_in_large_free_list(0);
2459 bool FreeList::ContainsPageFreeListItems(Page* p) {
2460 return huge_list_.EvictFreeListItemsInList(p) ||
2461 small_list_.EvictFreeListItemsInList(p) ||
2462 medium_list_.EvictFreeListItemsInList(p) ||
2463 large_list_.EvictFreeListItemsInList(p);
2467 void FreeList::RepairLists(Heap* heap) {
2468 small_list_.RepairFreeList(heap);
2469 medium_list_.RepairFreeList(heap);
2470 large_list_.RepairFreeList(heap);
2471 huge_list_.RepairFreeList(heap);
2476 intptr_t FreeListCategory::SumFreeList() {
2478 FreeSpace* cur = top();
2479 while (cur != NULL) {
2480 DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2481 sum += cur->nobarrier_size();
2488 static const int kVeryLongFreeList = 500;
2491 int FreeListCategory::FreeListLength() {
2493 FreeSpace* cur = top();
2494 while (cur != NULL) {
2497 if (length == kVeryLongFreeList) return length;
2503 bool FreeList::IsVeryLong() {
2504 if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2505 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2506 if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2507 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2512 // This can take a very long time because it is linear in the number of entries
2513 // on the free list, so it should not be called if FreeListLength returns
2514 // kVeryLongFreeList.
2515 intptr_t FreeList::SumFreeLists() {
2516 intptr_t sum = small_list_.SumFreeList();
2517 sum += medium_list_.SumFreeList();
2518 sum += large_list_.SumFreeList();
2519 sum += huge_list_.SumFreeList();
2525 // -----------------------------------------------------------------------------
2526 // OldSpace implementation
2528 void PagedSpace::PrepareForMarkCompact() {
2529 // We don't have a linear allocation area while sweeping. It will be restored
2530 // on the first allocation after the sweep.
2531 EmptyAllocationInfo();
2533 // This counter will be increased for pages which will be swept by the
2535 unswept_free_bytes_ = 0;
2537 // Clear the free list before a full GC---it will be rebuilt afterward.
2542 intptr_t PagedSpace::SizeOfObjects() {
2543 DCHECK(!FLAG_concurrent_sweeping ||
2544 heap()->mark_compact_collector()->sweeping_in_progress() ||
2545 (unswept_free_bytes_ == 0));
2546 return Size() - unswept_free_bytes_ - (limit() - top());
2550 // After we have booted, we have created a map which represents free space
2551 // on the heap. If there was already a free list then the elements on it
2552 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2554 void PagedSpace::RepairFreeListsAfterDeserialization() {
2555 free_list_.RepairLists(heap());
2556 // Each page may have a small free space that is not tracked by a free list.
2557 // Update the maps for those free space objects.
2558 PageIterator iterator(this);
2559 while (iterator.has_next()) {
2560 Page* page = iterator.next();
2561 int size = static_cast<int>(page->non_available_small_blocks());
2562 if (size == 0) continue;
2563 Address address = page->OffsetToAddress(Page::kPageSize - size);
2564 heap()->CreateFillerObjectAt(address, size);
2569 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2570 if (allocation_info_.top() >= allocation_info_.limit()) return;
2572 if (Page::FromAllocationTop(allocation_info_.top())
2573 ->IsEvacuationCandidate()) {
2574 // Create filler object to keep page iterable if it was iterable.
2576 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2577 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2579 allocation_info_.set_top(NULL);
2580 allocation_info_.set_limit(NULL);
2585 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2586 int size_in_bytes) {
2587 MarkCompactCollector* collector = heap()->mark_compact_collector();
2588 if (collector->sweeping_in_progress()) {
2589 // Wait for the sweeper threads here and complete the sweeping phase.
2590 collector->EnsureSweepingCompleted();
2592 // After waiting for the sweeper threads, there may be new free-list
2594 return free_list_.Allocate(size_in_bytes);
2600 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2601 // Allocation in this space has failed.
2603 MarkCompactCollector* collector = heap()->mark_compact_collector();
2604 // Sweeping is still in progress.
2605 if (collector->sweeping_in_progress()) {
2606 // First try to refill the free-list, concurrent sweeper threads
2607 // may have freed some objects in the meantime.
2608 collector->RefillFreeList(this);
2610 // Retry the free list allocation.
2611 HeapObject* object = free_list_.Allocate(size_in_bytes);
2612 if (object != NULL) return object;
2614 // If sweeping is still in progress try to sweep pages on the main thread.
2615 int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2616 collector->RefillFreeList(this);
2617 if (free_chunk >= size_in_bytes) {
2618 HeapObject* object = free_list_.Allocate(size_in_bytes);
2619 // We should be able to allocate an object here since we just freed that
2621 DCHECK(object != NULL);
2622 if (object != NULL) return object;
2626 // Free list allocation failed and there is no next page. Fail if we have
2627 // hit the old generation size limit that should cause a garbage
2629 if (!heap()->always_allocate() &&
2630 heap()->OldGenerationAllocationLimitReached()) {
2631 // If sweeper threads are active, wait for them at that point and steal
2632 // elements form their free-lists.
2633 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2637 // Try to expand the space and allocate in the new next page.
2639 DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2640 return free_list_.Allocate(size_in_bytes);
2643 // If sweeper threads are active, wait for them at that point and steal
2644 // elements form their free-lists. Allocation may still fail their which
2645 // would indicate that there is not enough memory for the given allocation.
2646 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2651 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2652 CommentStatistic* comments_statistics =
2653 isolate->paged_space_comments_statistics();
2654 ReportCodeKindStatistics(isolate->code_kind_statistics());
2656 "Code comment statistics (\" [ comment-txt : size/ "
2657 "count (average)\"):\n");
2658 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2659 const CommentStatistic& cs = comments_statistics[i];
2661 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2662 cs.size / cs.count);
2669 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2670 CommentStatistic* comments_statistics =
2671 isolate->paged_space_comments_statistics();
2672 ClearCodeKindStatistics(isolate->code_kind_statistics());
2673 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2674 comments_statistics[i].Clear();
2676 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2677 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2678 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2682 // Adds comment to 'comment_statistics' table. Performance OK as long as
2683 // 'kMaxComments' is small
2684 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2685 CommentStatistic* comments_statistics =
2686 isolate->paged_space_comments_statistics();
2687 // Do not count empty comments
2688 if (delta <= 0) return;
2689 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2690 // Search for a free or matching entry in 'comments_statistics': 'cs'
2691 // points to result.
2692 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2693 if (comments_statistics[i].comment == NULL) {
2694 cs = &comments_statistics[i];
2695 cs->comment = comment;
2697 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2698 cs = &comments_statistics[i];
2702 // Update entry for 'comment'
2708 // Call for each nested comment start (start marked with '[ xxx', end marked
2709 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2710 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2711 DCHECK(!it->done());
2712 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2713 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2714 if (tmp[0] != '[') {
2715 // Not a nested comment; skip
2719 // Search for end of nested comment or a new nested comment
2720 const char* const comment_txt =
2721 reinterpret_cast<const char*>(it->rinfo()->data());
2722 const byte* prev_pc = it->rinfo()->pc();
2726 // All nested comments must be terminated properly, and therefore exit
2728 DCHECK(!it->done());
2729 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2730 const char* const txt =
2731 reinterpret_cast<const char*>(it->rinfo()->data());
2732 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2733 if (txt[0] == ']') break; // End of nested comment
2735 CollectCommentStatistics(isolate, it);
2736 // Skip code that was covered with previous comment
2737 prev_pc = it->rinfo()->pc();
2741 EnterComment(isolate, comment_txt, flat_delta);
2745 // Collects code size statistics:
2747 // - by code comment
2748 void PagedSpace::CollectCodeStatistics() {
2749 Isolate* isolate = heap()->isolate();
2750 HeapObjectIterator obj_it(this);
2751 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2752 if (obj->IsCode()) {
2753 Code* code = Code::cast(obj);
2754 isolate->code_kind_statistics()[code->kind()] += code->Size();
2755 RelocIterator it(code);
2757 const byte* prev_pc = code->instruction_start();
2758 while (!it.done()) {
2759 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2760 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2761 CollectCommentStatistics(isolate, &it);
2762 prev_pc = it.rinfo()->pc();
2767 DCHECK(code->instruction_start() <= prev_pc &&
2768 prev_pc <= code->instruction_end());
2769 delta += static_cast<int>(code->instruction_end() - prev_pc);
2770 EnterComment(isolate, "NoComment", delta);
2776 void PagedSpace::ReportStatistics() {
2777 int pct = static_cast<int>(Available() * 100 / Capacity());
2778 PrintF(" capacity: %" V8_PTR_PREFIX
2780 ", waste: %" V8_PTR_PREFIX
2782 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2783 Capacity(), Waste(), Available(), pct);
2785 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2786 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2788 ClearHistograms(heap()->isolate());
2789 HeapObjectIterator obj_it(this);
2790 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2791 CollectHistogramInfo(obj);
2792 ReportHistogram(heap()->isolate(), true);
2797 // -----------------------------------------------------------------------------
2798 // MapSpace implementation
2799 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2800 // there is at least one non-inlined virtual function. I would prefer to hide
2801 // the VerifyObject definition behind VERIFY_HEAP.
2803 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2806 // -----------------------------------------------------------------------------
2807 // LargeObjectIterator
2809 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2810 current_ = space->first_page_;
2815 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2816 HeapObjectCallback size_func) {
2817 current_ = space->first_page_;
2818 size_func_ = size_func;
2822 HeapObject* LargeObjectIterator::Next() {
2823 if (current_ == NULL) return NULL;
2825 HeapObject* object = current_->GetObject();
2826 current_ = current_->next_page();
2831 // -----------------------------------------------------------------------------
2833 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2836 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2838 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2839 max_capacity_(max_capacity),
2844 chunk_map_(ComparePointers, 1024) {}
2847 bool LargeObjectSpace::SetUp() {
2850 maximum_committed_ = 0;
2858 void LargeObjectSpace::TearDown() {
2859 while (first_page_ != NULL) {
2860 LargePage* page = first_page_;
2861 first_page_ = first_page_->next_page();
2862 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2864 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2865 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2866 space, kAllocationActionFree, page->size());
2867 heap()->isolate()->memory_allocator()->Free(page);
2873 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2874 Executability executable) {
2875 // Check if we want to force a GC before growing the old space further.
2876 // If so, fail the allocation.
2877 if (!heap()->always_allocate() &&
2878 heap()->OldGenerationAllocationLimitReached()) {
2879 return AllocationResult::Retry(identity());
2882 if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
2884 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2885 object_size, this, executable);
2886 if (page == NULL) return AllocationResult::Retry(identity());
2887 DCHECK(page->area_size() >= object_size);
2889 size_ += static_cast<int>(page->size());
2890 objects_size_ += object_size;
2892 page->set_next_page(first_page_);
2895 if (size_ > maximum_committed_) {
2896 maximum_committed_ = size_;
2899 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2900 // this large page in the chunk map.
2901 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2902 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2903 for (uintptr_t key = base; key <= limit; key++) {
2904 HashMap::Entry* entry = chunk_map_.LookupOrInsert(
2905 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
2906 DCHECK(entry != NULL);
2907 entry->value = page;
2910 HeapObject* object = page->GetObject();
2912 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2914 if (Heap::ShouldZapGarbage()) {
2915 // Make the object consistent so the heap can be verified in OldSpaceStep.
2916 // We only need to do this in debug builds or if verify_heap is on.
2917 reinterpret_cast<Object**>(object->address())[0] =
2918 heap()->fixed_array_map();
2919 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2922 heap()->incremental_marking()->OldSpaceStep(object_size);
2927 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2928 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2930 LargePage* current = first_page_;
2931 while (current != NULL) {
2932 size += current->CommittedPhysicalMemory();
2933 current = current->next_page();
2940 Object* LargeObjectSpace::FindObject(Address a) {
2941 LargePage* page = FindPage(a);
2943 return page->GetObject();
2945 return Smi::FromInt(0); // Signaling not found.
2949 LargePage* LargeObjectSpace::FindPage(Address a) {
2950 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2951 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2952 static_cast<uint32_t>(key));
2954 DCHECK(e->value != NULL);
2955 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2956 DCHECK(LargePage::IsValid(page));
2957 if (page->Contains(a)) {
2965 void LargeObjectSpace::FreeUnmarkedObjects() {
2966 LargePage* previous = NULL;
2967 LargePage* current = first_page_;
2968 while (current != NULL) {
2969 HeapObject* object = current->GetObject();
2970 // Can this large page contain pointers to non-trivial objects. No other
2971 // pointer object is this big.
2972 bool is_pointer_object = object->IsFixedArray();
2973 MarkBit mark_bit = Marking::MarkBitFrom(object);
2974 if (Marking::IsBlackOrGrey(mark_bit)) {
2975 Marking::BlackToWhite(mark_bit);
2976 Page::FromAddress(object->address())->ResetProgressBar();
2977 Page::FromAddress(object->address())->ResetLiveBytes();
2979 current = current->next_page();
2981 LargePage* page = current;
2982 // Cut the chunk out from the chunk list.
2983 current = current->next_page();
2984 if (previous == NULL) {
2985 first_page_ = current;
2987 previous->set_next_page(current);
2991 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
2993 size_ -= static_cast<int>(page->size());
2994 objects_size_ -= object->Size();
2997 // Remove entries belonging to this page.
2998 // Use variable alignment to help pass length check (<= 80 characters)
2999 // of single line in tools/presubmit.py.
3000 const intptr_t alignment = MemoryChunk::kAlignment;
3001 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3002 uintptr_t limit = base + (page->size() - 1) / alignment;
3003 for (uintptr_t key = base; key <= limit; key++) {
3004 chunk_map_.Remove(reinterpret_cast<void*>(key),
3005 static_cast<uint32_t>(key));
3008 if (is_pointer_object) {
3009 heap()->QueueMemoryChunkForFree(page);
3011 heap()->isolate()->memory_allocator()->Free(page);
3015 heap()->FreeQueuedChunks();
3019 bool LargeObjectSpace::Contains(HeapObject* object) {
3020 Address address = object->address();
3021 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3023 bool owned = (chunk->owner() == this);
3025 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3032 // We do not assume that the large object iterator works, because it depends
3033 // on the invariants we are checking during verification.
3034 void LargeObjectSpace::Verify() {
3035 for (LargePage* chunk = first_page_; chunk != NULL;
3036 chunk = chunk->next_page()) {
3037 // Each chunk contains an object that starts at the large object page's
3038 // object area start.
3039 HeapObject* object = chunk->GetObject();
3040 Page* page = Page::FromAddress(object->address());
3041 CHECK(object->address() == page->area_start());
3043 // The first word should be a map, and we expect all map pointers to be
3045 Map* map = object->map();
3046 CHECK(map->IsMap());
3047 CHECK(heap()->map_space()->Contains(map));
3049 // We have only code, sequential strings, external strings
3050 // (sequential strings that have been morphed into external
3051 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3052 // large object space.
3053 CHECK(object->IsCode() || object->IsSeqString() ||
3054 object->IsExternalString() || object->IsFixedArray() ||
3055 object->IsFixedDoubleArray() || object->IsByteArray());
3057 // The object itself should look OK.
3058 object->ObjectVerify();
3060 // Byte arrays and strings don't have interior pointers.
3061 if (object->IsCode()) {
3062 VerifyPointersVisitor code_visitor;
3063 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3064 } else if (object->IsFixedArray()) {
3065 FixedArray* array = FixedArray::cast(object);
3066 for (int j = 0; j < array->length(); j++) {
3067 Object* element = array->get(j);
3068 if (element->IsHeapObject()) {
3069 HeapObject* element_object = HeapObject::cast(element);
3070 CHECK(heap()->Contains(element_object));
3071 CHECK(element_object->map()->IsMap());
3081 void LargeObjectSpace::Print() {
3082 OFStream os(stdout);
3083 LargeObjectIterator it(this);
3084 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3090 void LargeObjectSpace::ReportStatistics() {
3091 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3092 int num_objects = 0;
3093 ClearHistograms(heap()->isolate());
3094 LargeObjectIterator it(this);
3095 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3097 CollectHistogramInfo(obj);
3101 " number of objects %d, "
3102 "size of objects %" V8_PTR_PREFIX "d\n",
3103 num_objects, objects_size_);
3104 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3108 void LargeObjectSpace::CollectCodeStatistics() {
3109 Isolate* isolate = heap()->isolate();
3110 LargeObjectIterator obj_it(this);
3111 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3112 if (obj->IsCode()) {
3113 Code* code = Code::cast(obj);
3114 isolate->code_kind_statistics()[code->kind()] += code->Size();
3120 void Page::Print() {
3121 // Make a best-effort to print the objects in the page.
3122 PrintF("Page@%p in %s\n", this->address(),
3123 AllocationSpaceName(this->owner()->identity()));
3124 printf(" --------------------------------------\n");
3125 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3126 unsigned mark_size = 0;
3127 for (HeapObject* object = objects.Next(); object != NULL;
3128 object = objects.Next()) {
3129 bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
3130 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3132 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3134 object->ShortPrint();
3137 printf(" --------------------------------------\n");
3138 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3142 } // namespace internal