1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/macro-assembler.h"
13 #include "src/snapshot.h"
19 // ----------------------------------------------------------------------------
22 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23 // You can't actually iterate over the anchor page. It is not a real page,
24 // just an anchor for the double linked page list. Initialize as if we have
25 // reached the end of the anchor page, then the first iteration will move on
27 Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
31 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
32 HeapObjectCallback size_func) {
33 // You can't actually iterate over the anchor page. It is not a real page,
34 // just an anchor for the double linked page list. Initialize the current
35 // address and end as NULL, then the first iteration will move on
37 Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
41 HeapObjectIterator::HeapObjectIterator(Page* page,
42 HeapObjectCallback size_func) {
43 Space* owner = page->owner();
44 DCHECK(owner == page->heap()->old_pointer_space() ||
45 owner == page->heap()->old_data_space() ||
46 owner == page->heap()->map_space() ||
47 owner == page->heap()->cell_space() ||
48 owner == page->heap()->property_cell_space() ||
49 owner == page->heap()->code_space());
50 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
51 page->area_end(), kOnePageOnly, size_func);
52 DCHECK(page->WasSwept() || page->SweepingCompleted());
56 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
57 HeapObjectIterator::PageMode mode,
58 HeapObjectCallback size_f) {
67 // We have hit the end of the page and should advance to the next block of
68 // objects. This happens at the end of the page.
69 bool HeapObjectIterator::AdvanceToNextPage() {
70 DCHECK(cur_addr_ == cur_end_);
71 if (page_mode_ == kOnePageOnly) return false;
73 if (cur_addr_ == NULL) {
74 cur_page = space_->anchor();
76 cur_page = Page::FromAddress(cur_addr_ - 1);
77 DCHECK(cur_addr_ == cur_page->area_end());
79 cur_page = cur_page->next_page();
80 if (cur_page == space_->anchor()) return false;
81 cur_addr_ = cur_page->area_start();
82 cur_end_ = cur_page->area_end();
83 DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
88 // -----------------------------------------------------------------------------
92 CodeRange::CodeRange(Isolate* isolate)
97 current_allocation_block_index_(0),
101 bool CodeRange::SetUp(size_t requested) {
102 DCHECK(code_range_ == NULL);
104 if (requested == 0) {
105 // When a target requires the code range feature, we put all code objects
106 // in a kMaximalCodeRangeSize range of virtual address space, so that
107 // they can call each other with near calls.
108 if (kRequiresCodeRange) {
109 requested = kMaximalCodeRangeSize;
115 if (requested <= kMinimumCodeRangeSize) {
116 requested = kMinimumCodeRangeSize;
119 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
120 code_range_ = new base::VirtualMemory(requested);
121 CHECK(code_range_ != NULL);
122 if (!code_range_->IsReserved()) {
128 // We are sure that we have mapped a block of requested addresses.
129 DCHECK(code_range_->size() == requested);
130 Address base = reinterpret_cast<Address>(code_range_->address());
132 // On some platforms, specifically Win64, we need to reserve some pages at
133 // the beginning of an executable space.
134 if (kReservedCodeRangePages) {
135 if (!code_range_->Commit(
136 base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
141 base += kReservedCodeRangePages * base::OS::CommitPageSize();
143 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
144 size_t size = code_range_->size() - (aligned_base - base) -
145 kReservedCodeRangePages * base::OS::CommitPageSize();
146 allocation_list_.Add(FreeBlock(aligned_base, size));
147 current_allocation_block_index_ = 0;
149 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
150 ReserveEmergencyBlock();
155 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
156 const FreeBlock* right) {
157 // The entire point of CodeRange is that the difference between two
158 // addresses in the range can be represented as a signed 32-bit int,
159 // so the cast is semantically correct.
160 return static_cast<int>(left->start - right->start);
164 bool CodeRange::GetNextAllocationBlock(size_t requested) {
165 for (current_allocation_block_index_++;
166 current_allocation_block_index_ < allocation_list_.length();
167 current_allocation_block_index_++) {
168 if (requested <= allocation_list_[current_allocation_block_index_].size) {
169 return true; // Found a large enough allocation block.
173 // Sort and merge the free blocks on the free list and the allocation list.
174 free_list_.AddAll(allocation_list_);
175 allocation_list_.Clear();
176 free_list_.Sort(&CompareFreeBlockAddress);
177 for (int i = 0; i < free_list_.length();) {
178 FreeBlock merged = free_list_[i];
180 // Add adjacent free blocks to the current merged block.
181 while (i < free_list_.length() &&
182 free_list_[i].start == merged.start + merged.size) {
183 merged.size += free_list_[i].size;
186 if (merged.size > 0) {
187 allocation_list_.Add(merged);
192 for (current_allocation_block_index_ = 0;
193 current_allocation_block_index_ < allocation_list_.length();
194 current_allocation_block_index_++) {
195 if (requested <= allocation_list_[current_allocation_block_index_].size) {
196 return true; // Found a large enough allocation block.
199 current_allocation_block_index_ = 0;
200 // Code range is full or too fragmented.
205 Address CodeRange::AllocateRawMemory(const size_t requested_size,
206 const size_t commit_size,
208 DCHECK(commit_size <= requested_size);
210 if (!ReserveBlock(requested_size, ¤t)) {
214 *allocated = current.size;
215 DCHECK(*allocated <= current.size);
216 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
217 if (!isolate_->memory_allocator()->CommitExecutableMemory(
218 code_range_, current.start, commit_size, *allocated)) {
220 ReleaseBlock(¤t);
223 return current.start;
227 bool CodeRange::CommitRawMemory(Address start, size_t length) {
228 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
232 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
233 return code_range_->Uncommit(start, length);
237 void CodeRange::FreeRawMemory(Address address, size_t length) {
238 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
239 free_list_.Add(FreeBlock(address, length));
240 code_range_->Uncommit(address, length);
244 void CodeRange::TearDown() {
245 delete code_range_; // Frees all memory in the virtual memory range.
248 allocation_list_.Free();
252 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
253 DCHECK(allocation_list_.length() == 0 ||
254 current_allocation_block_index_ < allocation_list_.length());
255 if (allocation_list_.length() == 0 ||
256 requested_size > allocation_list_[current_allocation_block_index_].size) {
257 // Find an allocation block large enough.
258 if (!GetNextAllocationBlock(requested_size)) return false;
260 // Commit the requested memory at the start of the current allocation block.
261 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
262 *block = allocation_list_[current_allocation_block_index_];
263 // Don't leave a small free block, useless for a large object or chunk.
264 if (aligned_requested < (block->size - Page::kPageSize)) {
265 block->size = aligned_requested;
267 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
268 allocation_list_[current_allocation_block_index_].start += block->size;
269 allocation_list_[current_allocation_block_index_].size -= block->size;
274 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
277 void CodeRange::ReserveEmergencyBlock() {
278 const size_t requested_size = MemoryAllocator::CodePageAreaSize();
279 if (emergency_block_.size == 0) {
280 ReserveBlock(requested_size, &emergency_block_);
282 DCHECK(emergency_block_.size >= requested_size);
287 void CodeRange::ReleaseEmergencyBlock() {
288 if (emergency_block_.size != 0) {
289 ReleaseBlock(&emergency_block_);
290 emergency_block_.size = 0;
295 // -----------------------------------------------------------------------------
299 MemoryAllocator::MemoryAllocator(Isolate* isolate)
302 capacity_executable_(0),
305 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
306 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
309 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
310 capacity_ = RoundUp(capacity, Page::kPageSize);
311 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312 DCHECK_GE(capacity_, capacity_executable_);
315 size_executable_ = 0;
321 void MemoryAllocator::TearDown() {
322 // Check that spaces were torn down before MemoryAllocator.
324 // TODO(gc) this will be true again when we fix FreeMemory.
325 // DCHECK(size_executable_ == 0);
327 capacity_executable_ = 0;
331 bool MemoryAllocator::CommitMemory(Address base, size_t size,
332 Executability executable) {
333 if (!base::VirtualMemory::CommitRegion(base, size,
334 executable == EXECUTABLE)) {
337 UpdateAllocatedSpaceLimits(base, base + size);
342 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
343 Executability executable) {
344 // TODO(gc) make code_range part of memory allocator?
345 DCHECK(reservation->IsReserved());
346 size_t size = reservation->size();
347 DCHECK(size_ >= size);
350 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
352 if (executable == EXECUTABLE) {
353 DCHECK(size_executable_ >= size);
354 size_executable_ -= size;
356 // Code which is part of the code-range does not have its own VirtualMemory.
357 DCHECK(isolate_->code_range() == NULL ||
358 !isolate_->code_range()->contains(
359 static_cast<Address>(reservation->address())));
360 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
361 !isolate_->code_range()->valid());
362 reservation->Release();
366 void MemoryAllocator::FreeMemory(Address base, size_t size,
367 Executability executable) {
368 // TODO(gc) make code_range part of memory allocator?
369 DCHECK(size_ >= size);
372 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
374 if (executable == EXECUTABLE) {
375 DCHECK(size_executable_ >= size);
376 size_executable_ -= size;
378 if (isolate_->code_range() != NULL &&
379 isolate_->code_range()->contains(static_cast<Address>(base))) {
380 DCHECK(executable == EXECUTABLE);
381 isolate_->code_range()->FreeRawMemory(base, size);
383 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
384 !isolate_->code_range()->valid());
385 bool result = base::VirtualMemory::ReleaseRegion(base, size);
392 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
393 base::VirtualMemory* controller) {
394 base::VirtualMemory reservation(size, alignment);
396 if (!reservation.IsReserved()) return NULL;
397 size_ += reservation.size();
399 RoundUp(static_cast<Address>(reservation.address()), alignment);
400 controller->TakeControl(&reservation);
405 Address MemoryAllocator::AllocateAlignedMemory(
406 size_t reserve_size, size_t commit_size, size_t alignment,
407 Executability executable, base::VirtualMemory* controller) {
408 DCHECK(commit_size <= reserve_size);
409 base::VirtualMemory reservation;
410 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
411 if (base == NULL) return NULL;
413 if (executable == EXECUTABLE) {
414 if (!CommitExecutableMemory(&reservation, base, commit_size,
419 if (reservation.Commit(base, commit_size, false)) {
420 UpdateAllocatedSpaceLimits(base, base + commit_size);
427 // Failed to commit the body. Release the mapping and any partially
428 // commited regions inside it.
429 reservation.Release();
433 controller->TakeControl(&reservation);
438 void Page::InitializeAsAnchor(PagedSpace* owner) {
445 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
446 SemiSpace* semi_space) {
447 Address area_start = start + NewSpacePage::kObjectStartOffset;
448 Address area_end = start + Page::kPageSize;
451 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
452 area_end, NOT_EXECUTABLE, semi_space);
453 chunk->set_next_chunk(NULL);
454 chunk->set_prev_chunk(NULL);
455 chunk->initialize_scan_on_scavenge(true);
456 bool in_to_space = (semi_space->id() != kFromSpace);
457 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
458 : MemoryChunk::IN_FROM_SPACE);
459 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
460 : MemoryChunk::IN_TO_SPACE));
461 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
462 heap->incremental_marking()->SetNewSpacePageFlags(page);
467 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
468 set_owner(semi_space);
469 set_next_chunk(this);
470 set_prev_chunk(this);
471 // Flags marks this invalid page as not being in new-space.
472 // All real new-space pages will be in new-space.
477 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
478 Address area_start, Address area_end,
479 Executability executable, Space* owner) {
480 MemoryChunk* chunk = FromAddress(base);
482 DCHECK(base == chunk->address());
486 chunk->area_start_ = area_start;
487 chunk->area_end_ = area_end;
489 chunk->set_owner(owner);
490 chunk->InitializeReservedMemory();
491 chunk->slots_buffer_ = NULL;
492 chunk->skip_list_ = NULL;
493 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
494 chunk->progress_bar_ = 0;
495 chunk->high_water_mark_ = static_cast<int>(area_start - base);
496 chunk->set_parallel_sweeping(SWEEPING_DONE);
497 chunk->available_in_small_free_list_ = 0;
498 chunk->available_in_medium_free_list_ = 0;
499 chunk->available_in_large_free_list_ = 0;
500 chunk->available_in_huge_free_list_ = 0;
501 chunk->non_available_small_blocks_ = 0;
502 chunk->ResetLiveBytes();
503 Bitmap::Clear(chunk);
504 chunk->initialize_scan_on_scavenge(false);
505 chunk->SetFlag(WAS_SWEPT);
507 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
508 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
510 if (executable == EXECUTABLE) {
511 chunk->SetFlag(IS_EXECUTABLE);
514 if (owner == heap->old_data_space()) {
515 chunk->SetFlag(CONTAINS_ONLY_DATA);
522 // Commit MemoryChunk area to the requested size.
523 bool MemoryChunk::CommitArea(size_t requested) {
525 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
526 size_t header_size = area_start() - address() - guard_size;
528 RoundUp(header_size + requested, base::OS::CommitPageSize());
529 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
530 base::OS::CommitPageSize());
532 if (commit_size > committed_size) {
533 // Commit size should be less or equal than the reserved size.
534 DCHECK(commit_size <= size() - 2 * guard_size);
535 // Append the committed area.
536 Address start = address() + committed_size + guard_size;
537 size_t length = commit_size - committed_size;
538 if (reservation_.IsReserved()) {
539 Executability executable =
540 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
541 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
546 CodeRange* code_range = heap_->isolate()->code_range();
547 DCHECK(code_range != NULL && code_range->valid() &&
548 IsFlagSet(IS_EXECUTABLE));
549 if (!code_range->CommitRawMemory(start, length)) return false;
552 if (Heap::ShouldZapGarbage()) {
553 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
555 } else if (commit_size < committed_size) {
556 DCHECK(commit_size > 0);
557 // Shrink the committed area.
558 size_t length = committed_size - commit_size;
559 Address start = address() + committed_size + guard_size - length;
560 if (reservation_.IsReserved()) {
561 if (!reservation_.Uncommit(start, length)) return false;
563 CodeRange* code_range = heap_->isolate()->code_range();
564 DCHECK(code_range != NULL && code_range->valid() &&
565 IsFlagSet(IS_EXECUTABLE));
566 if (!code_range->UncommitRawMemory(start, length)) return false;
570 area_end_ = area_start_ + requested;
575 void MemoryChunk::InsertAfter(MemoryChunk* other) {
576 MemoryChunk* other_next = other->next_chunk();
578 set_next_chunk(other_next);
579 set_prev_chunk(other);
580 other_next->set_prev_chunk(this);
581 other->set_next_chunk(this);
585 void MemoryChunk::Unlink() {
586 MemoryChunk* next_element = next_chunk();
587 MemoryChunk* prev_element = prev_chunk();
588 next_element->set_prev_chunk(prev_element);
589 prev_element->set_next_chunk(next_element);
590 set_prev_chunk(NULL);
591 set_next_chunk(NULL);
595 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
596 intptr_t commit_area_size,
597 Executability executable,
599 DCHECK(commit_area_size <= reserve_area_size);
602 Heap* heap = isolate_->heap();
604 base::VirtualMemory reservation;
605 Address area_start = NULL;
606 Address area_end = NULL;
609 // MemoryChunk layout:
612 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
614 // +----------------------------+<- base + CodePageGuardStartOffset
616 // +----------------------------+<- area_start_
618 // +----------------------------+<- area_end_ (area_start + commit_area_size)
619 // | Committed but not used |
620 // +----------------------------+<- aligned at OS page boundary
621 // | Reserved but not committed |
622 // +----------------------------+<- aligned at OS page boundary
624 // +----------------------------+<- base + chunk_size
627 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
629 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
631 // +----------------------------+<- area_end_ (area_start + commit_area_size)
632 // | Committed but not used |
633 // +----------------------------+<- aligned at OS page boundary
634 // | Reserved but not committed |
635 // +----------------------------+<- base + chunk_size
638 if (executable == EXECUTABLE) {
639 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
640 base::OS::CommitPageSize()) +
643 // Check executable memory limit.
644 if (size_executable_ + chunk_size > capacity_executable_) {
645 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
646 "V8 Executable Allocation capacity exceeded"));
650 // Size of header (not executable) plus area (executable).
651 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
652 base::OS::CommitPageSize());
653 // Allocate executable memory either from code range or from the
655 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
656 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
659 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
660 if (base == NULL) return NULL;
662 // Update executable memory size.
663 size_executable_ += chunk_size;
665 base = AllocateAlignedMemory(chunk_size, commit_size,
666 MemoryChunk::kAlignment, executable,
668 if (base == NULL) return NULL;
669 // Update executable memory size.
670 size_executable_ += reservation.size();
673 if (Heap::ShouldZapGarbage()) {
674 ZapBlock(base, CodePageGuardStartOffset());
675 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
678 area_start = base + CodePageAreaStartOffset();
679 area_end = area_start + commit_area_size;
681 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
682 base::OS::CommitPageSize());
684 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
685 base::OS::CommitPageSize());
687 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
688 executable, &reservation);
690 if (base == NULL) return NULL;
692 if (Heap::ShouldZapGarbage()) {
693 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
696 area_start = base + Page::kObjectStartOffset;
697 area_end = area_start + commit_area_size;
700 // Use chunk_size for statistics and callbacks because we assume that they
701 // treat reserved but not-yet committed memory regions of chunks as allocated.
702 isolate_->counters()->memory_allocated()->Increment(
703 static_cast<int>(chunk_size));
705 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
707 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
708 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
711 MemoryChunk* result = MemoryChunk::Initialize(
712 heap, base, chunk_size, area_start, area_end, executable, owner);
713 result->set_reserved_memory(&reservation);
718 void Page::ResetFreeListStatistics() {
719 non_available_small_blocks_ = 0;
720 available_in_small_free_list_ = 0;
721 available_in_medium_free_list_ = 0;
722 available_in_large_free_list_ = 0;
723 available_in_huge_free_list_ = 0;
727 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
728 Executability executable) {
729 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
731 if (chunk == NULL) return NULL;
733 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
737 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
739 Executability executable) {
741 AllocateChunk(object_size, object_size, executable, owner);
742 if (chunk == NULL) return NULL;
743 return LargePage::Initialize(isolate_->heap(), chunk);
747 void MemoryAllocator::Free(MemoryChunk* chunk) {
748 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
749 if (chunk->owner() != NULL) {
751 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
752 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
755 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
756 chunk->IsEvacuationCandidate());
758 delete chunk->slots_buffer();
759 delete chunk->skip_list();
761 base::VirtualMemory* reservation = chunk->reserved_memory();
762 if (reservation->IsReserved()) {
763 FreeMemory(reservation, chunk->executable());
765 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
770 bool MemoryAllocator::CommitBlock(Address start, size_t size,
771 Executability executable) {
772 if (!CommitMemory(start, size, executable)) return false;
774 if (Heap::ShouldZapGarbage()) {
775 ZapBlock(start, size);
778 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
783 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
784 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
785 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
790 void MemoryAllocator::ZapBlock(Address start, size_t size) {
791 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
792 Memory::Address_at(start + s) = kZapValue;
797 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
798 AllocationAction action,
800 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
801 MemoryAllocationCallbackRegistration registration =
802 memory_allocation_callbacks_[i];
803 if ((registration.space & space) == space &&
804 (registration.action & action) == action)
805 registration.callback(space, action, static_cast<int>(size));
810 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
811 MemoryAllocationCallback callback) {
812 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
813 if (memory_allocation_callbacks_[i].callback == callback) return true;
819 void MemoryAllocator::AddMemoryAllocationCallback(
820 MemoryAllocationCallback callback, ObjectSpace space,
821 AllocationAction action) {
822 DCHECK(callback != NULL);
823 MemoryAllocationCallbackRegistration registration(callback, space, action);
824 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
825 return memory_allocation_callbacks_.Add(registration);
829 void MemoryAllocator::RemoveMemoryAllocationCallback(
830 MemoryAllocationCallback callback) {
831 DCHECK(callback != NULL);
832 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
833 if (memory_allocation_callbacks_[i].callback == callback) {
834 memory_allocation_callbacks_.Remove(i);
843 void MemoryAllocator::ReportStatistics() {
844 float pct = static_cast<float>(capacity_ - size_) / capacity_;
845 PrintF(" capacity: %" V8_PTR_PREFIX
847 ", used: %" V8_PTR_PREFIX
849 ", available: %%%d\n\n",
850 capacity_, size_, static_cast<int>(pct * 100));
855 int MemoryAllocator::CodePageGuardStartOffset() {
856 // We are guarding code pages: the first OS page after the header
857 // will be protected as non-writable.
858 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
862 int MemoryAllocator::CodePageGuardSize() {
863 return static_cast<int>(base::OS::CommitPageSize());
867 int MemoryAllocator::CodePageAreaStartOffset() {
868 // We are guarding code pages: the first OS page after the header
869 // will be protected as non-writable.
870 return CodePageGuardStartOffset() + CodePageGuardSize();
874 int MemoryAllocator::CodePageAreaEndOffset() {
875 // We are guarding code pages: the last OS page will be protected as
877 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
881 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
882 Address start, size_t commit_size,
883 size_t reserved_size) {
884 // Commit page header (not executable).
885 if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
889 // Create guard page after the header.
890 if (!vm->Guard(start + CodePageGuardStartOffset())) {
894 // Commit page body (executable).
895 if (!vm->Commit(start + CodePageAreaStartOffset(),
896 commit_size - CodePageGuardStartOffset(), true)) {
900 // Create guard page before the end.
901 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
905 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
907 CodePageGuardStartOffset());
912 // -----------------------------------------------------------------------------
913 // MemoryChunk implementation
915 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
916 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
917 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
918 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
920 chunk->IncrementLiveBytes(by);
924 // -----------------------------------------------------------------------------
925 // PagedSpace implementation
927 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
928 ObjectSpace::kObjectSpaceNewSpace);
929 STATIC_ASSERT(static_cast<ObjectSpace>(1
930 << AllocationSpace::OLD_POINTER_SPACE) ==
931 ObjectSpace::kObjectSpaceOldPointerSpace);
932 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_DATA_SPACE) ==
933 ObjectSpace::kObjectSpaceOldDataSpace);
934 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
935 ObjectSpace::kObjectSpaceCodeSpace);
936 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) ==
937 ObjectSpace::kObjectSpaceCellSpace);
939 static_cast<ObjectSpace>(1 << AllocationSpace::PROPERTY_CELL_SPACE) ==
940 ObjectSpace::kObjectSpacePropertyCellSpace);
941 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
942 ObjectSpace::kObjectSpaceMapSpace);
945 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
946 Executability executable)
947 : Space(heap, space, executable),
949 unswept_free_bytes_(0),
950 end_of_unswept_pages_(NULL),
951 emergency_memory_(NULL) {
952 area_size_ = MemoryAllocator::PageAreaSize(space);
954 (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
955 accounting_stats_.Clear();
957 allocation_info_.set_top(NULL);
958 allocation_info_.set_limit(NULL);
960 anchor_.InitializeAsAnchor(this);
964 bool PagedSpace::SetUp() { return true; }
967 bool PagedSpace::HasBeenSetUp() { return true; }
970 void PagedSpace::TearDown() {
971 PageIterator iterator(this);
972 while (iterator.has_next()) {
973 heap()->isolate()->memory_allocator()->Free(iterator.next());
975 anchor_.set_next_page(&anchor_);
976 anchor_.set_prev_page(&anchor_);
977 accounting_stats_.Clear();
981 size_t PagedSpace::CommittedPhysicalMemory() {
982 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
983 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
985 PageIterator it(this);
986 while (it.has_next()) {
987 size += it.next()->CommittedPhysicalMemory();
993 bool PagedSpace::ContainsSafe(Address addr) {
994 Page* p = Page::FromAddress(addr);
995 PageIterator iterator(this);
996 while (iterator.has_next()) {
997 if (iterator.next() == p) return true;
1003 Object* PagedSpace::FindObject(Address addr) {
1004 // Note: this function can only be called on iterable spaces.
1005 DCHECK(!heap()->mark_compact_collector()->in_use());
1007 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1009 Page* p = Page::FromAddress(addr);
1010 HeapObjectIterator it(p, NULL);
1011 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1012 Address cur = obj->address();
1013 Address next = cur + obj->Size();
1014 if ((cur <= addr) && (addr < next)) return obj;
1018 return Smi::FromInt(0);
1022 bool PagedSpace::CanExpand() {
1023 DCHECK(max_capacity_ % AreaSize() == 0);
1025 if (Capacity() == max_capacity_) return false;
1027 DCHECK(Capacity() < max_capacity_);
1029 // Are we going to exceed capacity for this space?
1030 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1036 bool PagedSpace::Expand() {
1037 if (!CanExpand()) return false;
1039 intptr_t size = AreaSize();
1041 if (anchor_.next_page() == &anchor_) {
1042 size = Snapshot::SizeOfFirstPage(identity());
1045 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1047 if (p == NULL) return false;
1049 // Pages created during bootstrapping may contain immortal immovable objects.
1050 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1052 DCHECK(Capacity() <= max_capacity_);
1054 p->InsertAfter(anchor_.prev_page());
1060 int PagedSpace::CountTotalPages() {
1061 PageIterator it(this);
1063 while (it.has_next()) {
1071 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1072 sizes->huge_size_ = page->available_in_huge_free_list();
1073 sizes->small_size_ = page->available_in_small_free_list();
1074 sizes->medium_size_ = page->available_in_medium_free_list();
1075 sizes->large_size_ = page->available_in_large_free_list();
1079 void PagedSpace::ResetFreeListStatistics() {
1080 PageIterator page_iterator(this);
1081 while (page_iterator.has_next()) {
1082 Page* page = page_iterator.next();
1083 page->ResetFreeListStatistics();
1088 void PagedSpace::IncreaseCapacity(int size) {
1089 accounting_stats_.ExpandSpace(size);
1093 void PagedSpace::ReleasePage(Page* page) {
1094 DCHECK(page->LiveBytes() == 0);
1095 DCHECK(AreaSize() == page->area_size());
1097 if (page->WasSwept()) {
1098 intptr_t size = free_list_.EvictFreeListItems(page);
1099 accounting_stats_.AllocateBytes(size);
1100 DCHECK_EQ(AreaSize(), static_cast<int>(size));
1102 DecreaseUnsweptFreeBytes(page);
1105 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1106 heap()->decrement_scan_on_scavenge_pages();
1107 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1110 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1112 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1113 allocation_info_.set_top(NULL);
1114 allocation_info_.set_limit(NULL);
1117 // If page is still in a list, unlink it from that list.
1118 if (page->next_chunk() != NULL) {
1119 DCHECK(page->prev_chunk() != NULL);
1123 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1124 heap()->isolate()->memory_allocator()->Free(page);
1126 heap()->QueueMemoryChunkForFree(page);
1129 DCHECK(Capacity() > 0);
1130 accounting_stats_.ShrinkSpace(AreaSize());
1134 void PagedSpace::CreateEmergencyMemory() {
1135 if (identity() == CODE_SPACE) {
1136 // Make the emergency block available to the allocator.
1137 CodeRange* code_range = heap()->isolate()->code_range();
1138 if (code_range != NULL && code_range->valid()) {
1139 code_range->ReleaseEmergencyBlock();
1141 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1143 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1144 AreaSize(), AreaSize(), executable(), this);
1148 void PagedSpace::FreeEmergencyMemory() {
1149 Page* page = static_cast<Page*>(emergency_memory_);
1150 DCHECK(page->LiveBytes() == 0);
1151 DCHECK(AreaSize() == page->area_size());
1152 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1153 heap()->isolate()->memory_allocator()->Free(page);
1154 emergency_memory_ = NULL;
1158 void PagedSpace::UseEmergencyMemory() {
1159 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1160 page->InsertAfter(anchor_.prev_page());
1161 emergency_memory_ = NULL;
1166 void PagedSpace::Print() {}
1170 void PagedSpace::Verify(ObjectVisitor* visitor) {
1171 bool allocation_pointer_found_in_space =
1172 (allocation_info_.top() == allocation_info_.limit());
1173 PageIterator page_iterator(this);
1174 while (page_iterator.has_next()) {
1175 Page* page = page_iterator.next();
1176 CHECK(page->owner() == this);
1177 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1178 allocation_pointer_found_in_space = true;
1180 CHECK(page->WasSwept());
1181 HeapObjectIterator it(page, NULL);
1182 Address end_of_previous_object = page->area_start();
1183 Address top = page->area_end();
1185 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1186 CHECK(end_of_previous_object <= object->address());
1188 // The first word should be a map, and we expect all map pointers to
1190 Map* map = object->map();
1191 CHECK(map->IsMap());
1192 CHECK(heap()->map_space()->Contains(map));
1194 // Perform space-specific object verification.
1195 VerifyObject(object);
1197 // The object itself should look OK.
1198 object->ObjectVerify();
1200 // All the interior pointers should be contained in the heap.
1201 int size = object->Size();
1202 object->IterateBody(map->instance_type(), size, visitor);
1203 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1207 CHECK(object->address() + size <= top);
1208 end_of_previous_object = object->address() + size;
1210 CHECK_LE(black_size, page->LiveBytes());
1212 CHECK(allocation_pointer_found_in_space);
1214 #endif // VERIFY_HEAP
1216 // -----------------------------------------------------------------------------
1217 // NewSpace implementation
1220 bool NewSpace::SetUp(int reserved_semispace_capacity,
1221 int maximum_semispace_capacity) {
1222 // Set up new space based on the preallocated memory block defined by
1223 // start and size. The provided space is divided into two semi-spaces.
1224 // To support fast containment testing in the new space, the size of
1225 // this chunk must be a power of two and it must be aligned to its size.
1226 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1228 int target_semispace_capacity = heap()->TargetSemiSpaceSize();
1230 size_t size = 2 * reserved_semispace_capacity;
1231 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1232 size, size, &reservation_);
1233 if (base == NULL) return false;
1236 chunk_size_ = static_cast<uintptr_t>(size);
1237 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1239 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1240 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1242 // Allocate and set up the histogram arrays if necessary.
1243 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1244 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1246 #define SET_NAME(name) \
1247 allocated_histogram_[name].set_name(#name); \
1248 promoted_histogram_[name].set_name(#name);
1249 INSTANCE_TYPE_LIST(SET_NAME)
1252 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1253 DCHECK(static_cast<intptr_t>(chunk_size_) >=
1254 2 * heap()->ReservedSemiSpaceSize());
1255 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1257 to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1258 target_semispace_capacity, maximum_semispace_capacity);
1259 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1260 initial_semispace_capacity, target_semispace_capacity,
1261 maximum_semispace_capacity);
1262 if (!to_space_.Commit()) {
1265 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1267 start_ = chunk_base_;
1268 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1269 object_mask_ = address_mask_ | kHeapObjectTagMask;
1270 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1272 ResetAllocationInfo();
1278 void NewSpace::TearDown() {
1279 if (allocated_histogram_) {
1280 DeleteArray(allocated_histogram_);
1281 allocated_histogram_ = NULL;
1283 if (promoted_histogram_) {
1284 DeleteArray(promoted_histogram_);
1285 promoted_histogram_ = NULL;
1289 allocation_info_.set_top(NULL);
1290 allocation_info_.set_limit(NULL);
1292 to_space_.TearDown();
1293 from_space_.TearDown();
1295 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1297 DCHECK(reservation_.IsReserved());
1298 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1305 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1308 void NewSpace::Grow() {
1309 // Double the semispace size but only up to maximum capacity.
1310 DCHECK(TotalCapacity() < MaximumCapacity());
1312 Min(MaximumCapacity(),
1313 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
1314 if (to_space_.GrowTo(new_capacity)) {
1315 // Only grow from space if we managed to grow to-space.
1316 if (!from_space_.GrowTo(new_capacity)) {
1317 // If we managed to grow to-space but couldn't grow from-space,
1318 // attempt to shrink to-space.
1319 if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1320 // We are in an inconsistent state because we could not
1321 // commit/uncommit memory from new space.
1326 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1330 bool NewSpace::GrowOnePage() {
1331 if (TotalCapacity() == MaximumCapacity()) return false;
1332 int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
1333 if (to_space_.GrowTo(new_capacity)) {
1334 // Only grow from space if we managed to grow to-space and the from space
1335 // is actually committed.
1336 if (from_space_.is_committed()) {
1337 if (!from_space_.GrowTo(new_capacity)) {
1338 // If we managed to grow to-space but couldn't grow from-space,
1339 // attempt to shrink to-space.
1340 if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1341 // We are in an inconsistent state because we could not
1342 // commit/uncommit memory from new space.
1348 if (!from_space_.SetTotalCapacity(new_capacity)) {
1349 // Can't really happen, but better safe than sorry.
1353 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1360 void NewSpace::Shrink() {
1361 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1362 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1363 if (rounded_new_capacity < TotalCapacity() &&
1364 to_space_.ShrinkTo(rounded_new_capacity)) {
1365 // Only shrink from-space if we managed to shrink to-space.
1366 from_space_.Reset();
1367 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1368 // If we managed to shrink to-space but couldn't shrink from
1369 // space, attempt to grow to-space again.
1370 if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
1371 // We are in an inconsistent state because we could not
1372 // commit/uncommit memory from new space.
1377 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1381 void NewSpace::UpdateAllocationInfo() {
1382 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1383 allocation_info_.set_top(to_space_.page_low());
1384 allocation_info_.set_limit(to_space_.page_high());
1385 UpdateInlineAllocationLimit(0);
1386 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1390 void NewSpace::ResetAllocationInfo() {
1392 UpdateAllocationInfo();
1394 // Clear all mark-bits in the to-space.
1395 NewSpacePageIterator it(&to_space_);
1396 while (it.has_next()) {
1397 Bitmap::Clear(it.next());
1402 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1403 if (heap()->inline_allocation_disabled()) {
1404 // Lowest limit when linear allocation was disabled.
1405 Address high = to_space_.page_high();
1406 Address new_top = allocation_info_.top() + size_in_bytes;
1407 allocation_info_.set_limit(Min(new_top, high));
1408 } else if (inline_allocation_limit_step() == 0) {
1409 // Normal limit is the end of the current page.
1410 allocation_info_.set_limit(to_space_.page_high());
1412 // Lower limit during incremental marking.
1413 Address high = to_space_.page_high();
1414 Address new_top = allocation_info_.top() + size_in_bytes;
1415 Address new_limit = new_top + inline_allocation_limit_step_;
1416 allocation_info_.set_limit(Min(new_limit, high));
1418 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1422 bool NewSpace::AddFreshPage() {
1423 Address top = allocation_info_.top();
1424 if (NewSpacePage::IsAtStart(top)) {
1425 // The current page is already empty. Don't try to make another.
1427 // We should only get here if someone asks to allocate more
1428 // than what can be stored in a single page.
1429 // TODO(gc): Change the limit on new-space allocation to prevent this
1430 // from happening (all such allocations should go directly to LOSpace).
1433 if (!to_space_.AdvancePage()) {
1434 // Check if we reached the target capacity yet. If not, try to commit a page
1436 if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
1438 if (!to_space_.AdvancePage()) {
1439 // It doesn't make sense that we managed to commit a page, but can't use
1444 // Failed to get a new page in to-space.
1449 // Clear remainder of current page.
1450 Address limit = NewSpacePage::FromLimit(top)->area_end();
1451 if (heap()->gc_state() == Heap::SCAVENGE) {
1452 heap()->promotion_queue()->SetNewLimit(limit);
1455 int remaining_in_page = static_cast<int>(limit - top);
1456 heap()->CreateFillerObjectAt(top, remaining_in_page);
1458 UpdateAllocationInfo();
1464 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
1465 Address old_top = allocation_info_.top();
1466 Address high = to_space_.page_high();
1467 if (allocation_info_.limit() < high) {
1468 // Either the limit has been lowered because linear allocation was disabled
1469 // or because incremental marking wants to get a chance to do a step. Set
1470 // the new limit accordingly.
1471 Address new_top = old_top + size_in_bytes;
1472 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1473 heap()->incremental_marking()->Step(bytes_allocated,
1474 IncrementalMarking::GC_VIA_STACK_GUARD);
1475 UpdateInlineAllocationLimit(size_in_bytes);
1476 top_on_previous_step_ = new_top;
1477 return AllocateRaw(size_in_bytes);
1478 } else if (AddFreshPage()) {
1479 // Switched to new page. Try allocating again.
1480 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1481 heap()->incremental_marking()->Step(bytes_allocated,
1482 IncrementalMarking::GC_VIA_STACK_GUARD);
1483 top_on_previous_step_ = to_space_.page_low();
1484 return AllocateRaw(size_in_bytes);
1486 return AllocationResult::Retry();
1492 // We do not use the SemiSpaceIterator because verification doesn't assume
1493 // that it works (it depends on the invariants we are checking).
1494 void NewSpace::Verify() {
1495 // The allocation pointer should be in the space or at the very end.
1496 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1498 // There should be objects packed in from the low address up to the
1499 // allocation pointer.
1500 Address current = to_space_.first_page()->area_start();
1501 CHECK_EQ(current, to_space_.space_start());
1503 while (current != top()) {
1504 if (!NewSpacePage::IsAtEnd(current)) {
1505 // The allocation pointer should not be in the middle of an object.
1506 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1509 HeapObject* object = HeapObject::FromAddress(current);
1511 // The first word should be a map, and we expect all map pointers to
1513 Map* map = object->map();
1514 CHECK(map->IsMap());
1515 CHECK(heap()->map_space()->Contains(map));
1517 // The object should not be code or a map.
1518 CHECK(!object->IsMap());
1519 CHECK(!object->IsCode());
1521 // The object itself should look OK.
1522 object->ObjectVerify();
1524 // All the interior pointers should be contained in the heap.
1525 VerifyPointersVisitor visitor;
1526 int size = object->Size();
1527 object->IterateBody(map->instance_type(), size, &visitor);
1531 // At end of page, switch to next page.
1532 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1533 // Next page should be valid.
1534 CHECK(!page->is_anchor());
1535 current = page->area_start();
1539 // Check semi-spaces.
1540 CHECK_EQ(from_space_.id(), kFromSpace);
1541 CHECK_EQ(to_space_.id(), kToSpace);
1542 from_space_.Verify();
1547 // -----------------------------------------------------------------------------
1548 // SemiSpace implementation
1550 void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
1551 int maximum_capacity) {
1552 // Creates a space in the young generation. The constructor does not
1553 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1554 // memory of size 'capacity' when set up, and does not grow or shrink
1555 // otherwise. In the mark-compact collector, the memory region of the from
1556 // space is used as the marking stack. It requires contiguous memory
1558 DCHECK(maximum_capacity >= Page::kPageSize);
1559 DCHECK(initial_capacity <= target_capacity);
1560 DCHECK(target_capacity <= maximum_capacity);
1561 initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1562 total_capacity_ = initial_capacity;
1563 target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
1564 maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1565 maximum_committed_ = 0;
1568 address_mask_ = ~(maximum_capacity - 1);
1569 object_mask_ = address_mask_ | kHeapObjectTagMask;
1570 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1575 void SemiSpace::TearDown() {
1577 total_capacity_ = 0;
1581 bool SemiSpace::Commit() {
1582 DCHECK(!is_committed());
1583 int pages = total_capacity_ / Page::kPageSize;
1584 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1585 start_, total_capacity_, executable())) {
1589 NewSpacePage* current = anchor();
1590 for (int i = 0; i < pages; i++) {
1591 NewSpacePage* new_page =
1592 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1593 new_page->InsertAfter(current);
1597 SetCapacity(total_capacity_);
1604 bool SemiSpace::Uncommit() {
1605 DCHECK(is_committed());
1606 Address start = start_ + maximum_total_capacity_ - total_capacity_;
1607 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1611 anchor()->set_next_page(anchor());
1612 anchor()->set_prev_page(anchor());
1619 size_t SemiSpace::CommittedPhysicalMemory() {
1620 if (!is_committed()) return 0;
1622 NewSpacePageIterator it(this);
1623 while (it.has_next()) {
1624 size += it.next()->CommittedPhysicalMemory();
1630 bool SemiSpace::GrowTo(int new_capacity) {
1631 if (!is_committed()) {
1632 if (!Commit()) return false;
1634 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1635 DCHECK(new_capacity <= maximum_total_capacity_);
1636 DCHECK(new_capacity > total_capacity_);
1637 int pages_before = total_capacity_ / Page::kPageSize;
1638 int pages_after = new_capacity / Page::kPageSize;
1640 size_t delta = new_capacity - total_capacity_;
1642 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1643 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1644 start_ + total_capacity_, delta, executable())) {
1647 SetCapacity(new_capacity);
1648 NewSpacePage* last_page = anchor()->prev_page();
1649 DCHECK(last_page != anchor());
1650 for (int i = pages_before; i < pages_after; i++) {
1651 Address page_address = start_ + i * Page::kPageSize;
1652 NewSpacePage* new_page =
1653 NewSpacePage::Initialize(heap(), page_address, this);
1654 new_page->InsertAfter(last_page);
1655 Bitmap::Clear(new_page);
1656 // Duplicate the flags that was set on the old page.
1657 new_page->SetFlags(last_page->GetFlags(),
1658 NewSpacePage::kCopyOnFlipFlagsMask);
1659 last_page = new_page;
1665 bool SemiSpace::ShrinkTo(int new_capacity) {
1666 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1667 DCHECK(new_capacity >= initial_total_capacity_);
1668 DCHECK(new_capacity < total_capacity_);
1669 if (is_committed()) {
1670 size_t delta = total_capacity_ - new_capacity;
1671 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1673 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1674 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1678 int pages_after = new_capacity / Page::kPageSize;
1679 NewSpacePage* new_last_page =
1680 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1681 new_last_page->set_next_page(anchor());
1682 anchor()->set_prev_page(new_last_page);
1683 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1686 SetCapacity(new_capacity);
1692 bool SemiSpace::SetTotalCapacity(int new_capacity) {
1693 CHECK(!is_committed());
1694 if (new_capacity >= initial_total_capacity_ &&
1695 new_capacity <= maximum_total_capacity_) {
1696 total_capacity_ = new_capacity;
1703 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1704 anchor_.set_owner(this);
1705 // Fixup back-pointers to anchor. Address of anchor changes
1707 anchor_.prev_page()->set_next_page(&anchor_);
1708 anchor_.next_page()->set_prev_page(&anchor_);
1710 bool becomes_to_space = (id_ == kFromSpace);
1711 id_ = becomes_to_space ? kToSpace : kFromSpace;
1712 NewSpacePage* page = anchor_.next_page();
1713 while (page != &anchor_) {
1714 page->set_owner(this);
1715 page->SetFlags(flags, mask);
1716 if (becomes_to_space) {
1717 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1718 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1719 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1720 page->ResetLiveBytes();
1722 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1723 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1725 DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1726 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1727 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1728 page = page->next_page();
1733 void SemiSpace::Reset() {
1734 DCHECK(anchor_.next_page() != &anchor_);
1735 current_page_ = anchor_.next_page();
1739 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1740 // We won't be swapping semispaces without data in them.
1741 DCHECK(from->anchor_.next_page() != &from->anchor_);
1742 DCHECK(to->anchor_.next_page() != &to->anchor_);
1745 SemiSpace tmp = *from;
1749 // Fixup back-pointers to the page list anchor now that its address
1751 // Swap to/from-space bits on pages.
1752 // Copy GC flags from old active space (from-space) to new (to-space).
1753 intptr_t flags = from->current_page()->GetFlags();
1754 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1756 from->FlipPages(0, 0);
1760 void SemiSpace::SetCapacity(int new_capacity) {
1761 total_capacity_ = new_capacity;
1762 if (total_capacity_ > maximum_committed_) {
1763 maximum_committed_ = total_capacity_;
1768 void SemiSpace::set_age_mark(Address mark) {
1769 DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1771 // Mark all pages up to the one containing mark.
1772 NewSpacePageIterator it(space_start(), mark);
1773 while (it.has_next()) {
1774 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1780 void SemiSpace::Print() {}
1784 void SemiSpace::Verify() {
1785 bool is_from_space = (id_ == kFromSpace);
1786 NewSpacePage* page = anchor_.next_page();
1787 CHECK(anchor_.semi_space() == this);
1788 while (page != &anchor_) {
1789 CHECK(page->semi_space() == this);
1790 CHECK(page->InNewSpace());
1791 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1792 : MemoryChunk::IN_TO_SPACE));
1793 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1794 : MemoryChunk::IN_FROM_SPACE));
1795 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1796 if (!is_from_space) {
1797 // The pointers-from-here-are-interesting flag isn't updated dynamically
1798 // on from-space pages, so it might be out of sync with the marking state.
1799 if (page->heap()->incremental_marking()->IsMarking()) {
1800 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1803 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1805 // TODO(gc): Check that the live_bytes_count_ field matches the
1806 // black marking on the page (if we make it match in new-space).
1808 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1809 CHECK(page->prev_page()->next_page() == page);
1810 page = page->next_page();
1816 void SemiSpace::AssertValidRange(Address start, Address end) {
1817 // Addresses belong to same semi-space
1818 NewSpacePage* page = NewSpacePage::FromLimit(start);
1819 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1820 SemiSpace* space = page->semi_space();
1821 CHECK_EQ(space, end_page->semi_space());
1822 // Start address is before end address, either on same page,
1823 // or end address is on a later page in the linked list of
1824 // semi-space pages.
1825 if (page == end_page) {
1826 CHECK(start <= end);
1828 while (page != end_page) {
1829 page = page->next_page();
1830 CHECK_NE(page, space->anchor());
1837 // -----------------------------------------------------------------------------
1838 // SemiSpaceIterator implementation.
1839 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1840 Initialize(space->bottom(), space->top(), NULL);
1844 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1845 HeapObjectCallback size_func) {
1846 Initialize(space->bottom(), space->top(), size_func);
1850 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1851 Initialize(start, space->top(), NULL);
1855 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1856 Initialize(from, to, NULL);
1860 void SemiSpaceIterator::Initialize(Address start, Address end,
1861 HeapObjectCallback size_func) {
1862 SemiSpace::AssertValidRange(start, end);
1865 size_func_ = size_func;
1870 // heap_histograms is shared, always clear it before using it.
1871 static void ClearHistograms(Isolate* isolate) {
1872 // We reset the name each time, though it hasn't changed.
1873 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1874 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1875 #undef DEF_TYPE_NAME
1877 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1878 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1879 #undef CLEAR_HISTOGRAM
1881 isolate->js_spill_information()->Clear();
1885 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1886 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1887 code_kind_statistics[i] = 0;
1892 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1893 PrintF("\n Code kind histograms: \n");
1894 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1895 if (code_kind_statistics[i] > 0) {
1896 PrintF(" %-20s: %10d bytes\n",
1897 Code::Kind2String(static_cast<Code::Kind>(i)),
1898 code_kind_statistics[i]);
1905 static int CollectHistogramInfo(HeapObject* obj) {
1906 Isolate* isolate = obj->GetIsolate();
1907 InstanceType type = obj->map()->instance_type();
1908 DCHECK(0 <= type && type <= LAST_TYPE);
1909 DCHECK(isolate->heap_histograms()[type].name() != NULL);
1910 isolate->heap_histograms()[type].increment_number(1);
1911 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1913 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1915 ->IncrementSpillStatistics(isolate->js_spill_information());
1922 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1923 PrintF("\n Object Histogram:\n");
1924 for (int i = 0; i <= LAST_TYPE; i++) {
1925 if (isolate->heap_histograms()[i].number() > 0) {
1926 PrintF(" %-34s%10d (%10d bytes)\n",
1927 isolate->heap_histograms()[i].name(),
1928 isolate->heap_histograms()[i].number(),
1929 isolate->heap_histograms()[i].bytes());
1934 // Summarize string types.
1935 int string_number = 0;
1936 int string_bytes = 0;
1937 #define INCREMENT(type, size, name, camel_name) \
1938 string_number += isolate->heap_histograms()[type].number(); \
1939 string_bytes += isolate->heap_histograms()[type].bytes();
1940 STRING_TYPE_LIST(INCREMENT)
1942 if (string_number > 0) {
1943 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1947 if (FLAG_collect_heap_spill_statistics && print_spill) {
1948 isolate->js_spill_information()->Print();
1954 // Support for statistics gathering for --heap-stats and --log-gc.
1955 void NewSpace::ClearHistograms() {
1956 for (int i = 0; i <= LAST_TYPE; i++) {
1957 allocated_histogram_[i].clear();
1958 promoted_histogram_[i].clear();
1963 // Because the copying collector does not touch garbage objects, we iterate
1964 // the new space before a collection to get a histogram of allocated objects.
1965 // This only happens when --log-gc flag is set.
1966 void NewSpace::CollectStatistics() {
1968 SemiSpaceIterator it(this);
1969 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1970 RecordAllocation(obj);
1974 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1975 const char* description) {
1976 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1977 // Lump all the string types together.
1978 int string_number = 0;
1979 int string_bytes = 0;
1980 #define INCREMENT(type, size, name, camel_name) \
1981 string_number += info[type].number(); \
1982 string_bytes += info[type].bytes();
1983 STRING_TYPE_LIST(INCREMENT)
1985 if (string_number > 0) {
1987 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1990 // Then do the other types.
1991 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1992 if (info[i].number() > 0) {
1993 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
1997 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2001 void NewSpace::ReportStatistics() {
2003 if (FLAG_heap_stats) {
2004 float pct = static_cast<float>(Available()) / TotalCapacity();
2005 PrintF(" capacity: %" V8_PTR_PREFIX
2007 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2008 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2009 PrintF("\n Object Histogram:\n");
2010 for (int i = 0; i <= LAST_TYPE; i++) {
2011 if (allocated_histogram_[i].number() > 0) {
2012 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2013 allocated_histogram_[i].number(),
2014 allocated_histogram_[i].bytes());
2022 Isolate* isolate = heap()->isolate();
2023 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2024 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2029 void NewSpace::RecordAllocation(HeapObject* obj) {
2030 InstanceType type = obj->map()->instance_type();
2031 DCHECK(0 <= type && type <= LAST_TYPE);
2032 allocated_histogram_[type].increment_number(1);
2033 allocated_histogram_[type].increment_bytes(obj->Size());
2037 void NewSpace::RecordPromotion(HeapObject* obj) {
2038 InstanceType type = obj->map()->instance_type();
2039 DCHECK(0 <= type && type <= LAST_TYPE);
2040 promoted_histogram_[type].increment_number(1);
2041 promoted_histogram_[type].increment_bytes(obj->Size());
2045 size_t NewSpace::CommittedPhysicalMemory() {
2046 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2047 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2048 size_t size = to_space_.CommittedPhysicalMemory();
2049 if (from_space_.is_committed()) {
2050 size += from_space_.CommittedPhysicalMemory();
2056 // -----------------------------------------------------------------------------
2057 // Free lists for old object spaces implementation
2059 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2060 intptr_t free_bytes = 0;
2061 if (category->top() != NULL) {
2062 // This is safe (not going to deadlock) since Concatenate operations
2063 // are never performed on the same free lists at the same time in
2065 base::LockGuard<base::Mutex> target_lock_guard(mutex());
2066 base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2067 DCHECK(category->end_ != NULL);
2068 free_bytes = category->available();
2070 end_ = category->end();
2072 category->end()->set_next(top());
2074 set_top(category->top());
2075 base::NoBarrier_Store(&top_, category->top_);
2076 available_ += category->available();
2083 void FreeListCategory::Reset() {
2090 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2092 FreeSpace* t = top();
2094 while (*n != NULL) {
2095 if (Page::FromAddress((*n)->address()) == p) {
2096 FreeSpace* free_space = *n;
2097 sum += free_space->Size();
2100 n = (*n)->next_address();
2104 if (top() == NULL) {
2112 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2113 FreeSpace* node = top();
2114 while (node != NULL) {
2115 if (Page::FromAddress(node->address()) == p) return true;
2116 node = node->next();
2122 FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2123 FreeSpace* node = top();
2125 if (node == NULL) return NULL;
2127 while (node != NULL &&
2128 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2129 available_ -= node->Size();
2130 node = node->next();
2134 set_top(node->next());
2135 *node_size = node->Size();
2136 available_ -= *node_size;
2141 if (top() == NULL) {
2149 FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
2151 FreeSpace* node = PickNodeFromList(node_size);
2152 if (node != NULL && *node_size < size_in_bytes) {
2153 Free(node, *node_size);
2161 void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
2162 DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
2163 free_space->set_next(top());
2164 set_top(free_space);
2168 available_ += size_in_bytes;
2172 void FreeListCategory::RepairFreeList(Heap* heap) {
2173 FreeSpace* n = top();
2175 Map** map_location = reinterpret_cast<Map**>(n->address());
2176 if (*map_location == NULL) {
2177 *map_location = heap->free_space_map();
2179 DCHECK(*map_location == heap->free_space_map());
2186 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2191 intptr_t FreeList::Concatenate(FreeList* free_list) {
2192 intptr_t free_bytes = 0;
2193 free_bytes += small_list_.Concatenate(free_list->small_list());
2194 free_bytes += medium_list_.Concatenate(free_list->medium_list());
2195 free_bytes += large_list_.Concatenate(free_list->large_list());
2196 free_bytes += huge_list_.Concatenate(free_list->huge_list());
2201 void FreeList::Reset() {
2202 small_list_.Reset();
2203 medium_list_.Reset();
2204 large_list_.Reset();
2209 int FreeList::Free(Address start, int size_in_bytes) {
2210 if (size_in_bytes == 0) return 0;
2212 heap_->CreateFillerObjectAt(start, size_in_bytes);
2214 Page* page = Page::FromAddress(start);
2216 // Early return to drop too-small blocks on the floor.
2217 if (size_in_bytes < kSmallListMin) {
2218 page->add_non_available_small_blocks(size_in_bytes);
2219 return size_in_bytes;
2222 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2223 // Insert other blocks at the head of a free list of the appropriate
2225 if (size_in_bytes <= kSmallListMax) {
2226 small_list_.Free(free_space, size_in_bytes);
2227 page->add_available_in_small_free_list(size_in_bytes);
2228 } else if (size_in_bytes <= kMediumListMax) {
2229 medium_list_.Free(free_space, size_in_bytes);
2230 page->add_available_in_medium_free_list(size_in_bytes);
2231 } else if (size_in_bytes <= kLargeListMax) {
2232 large_list_.Free(free_space, size_in_bytes);
2233 page->add_available_in_large_free_list(size_in_bytes);
2235 huge_list_.Free(free_space, size_in_bytes);
2236 page->add_available_in_huge_free_list(size_in_bytes);
2239 DCHECK(IsVeryLong() || available() == SumFreeLists());
2244 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2245 FreeSpace* node = NULL;
2248 if (size_in_bytes <= kSmallAllocationMax) {
2249 node = small_list_.PickNodeFromList(node_size);
2251 DCHECK(size_in_bytes <= *node_size);
2252 page = Page::FromAddress(node->address());
2253 page->add_available_in_small_free_list(-(*node_size));
2254 DCHECK(IsVeryLong() || available() == SumFreeLists());
2259 if (size_in_bytes <= kMediumAllocationMax) {
2260 node = medium_list_.PickNodeFromList(node_size);
2262 DCHECK(size_in_bytes <= *node_size);
2263 page = Page::FromAddress(node->address());
2264 page->add_available_in_medium_free_list(-(*node_size));
2265 DCHECK(IsVeryLong() || available() == SumFreeLists());
2270 if (size_in_bytes <= kLargeAllocationMax) {
2271 node = large_list_.PickNodeFromList(node_size);
2273 DCHECK(size_in_bytes <= *node_size);
2274 page = Page::FromAddress(node->address());
2275 page->add_available_in_large_free_list(-(*node_size));
2276 DCHECK(IsVeryLong() || available() == SumFreeLists());
2281 int huge_list_available = huge_list_.available();
2282 FreeSpace* top_node = huge_list_.top();
2283 for (FreeSpace** cur = &top_node; *cur != NULL;
2284 cur = (*cur)->next_address()) {
2285 FreeSpace* cur_node = *cur;
2286 while (cur_node != NULL &&
2287 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2288 int size = cur_node->Size();
2289 huge_list_available -= size;
2290 page = Page::FromAddress(cur_node->address());
2291 page->add_available_in_huge_free_list(-size);
2292 cur_node = cur_node->next();
2296 if (cur_node == NULL) {
2297 huge_list_.set_end(NULL);
2301 int size = cur_node->Size();
2302 if (size >= size_in_bytes) {
2303 // Large enough node found. Unlink it from the list.
2305 *cur = node->next();
2307 huge_list_available -= size;
2308 page = Page::FromAddress(node->address());
2309 page->add_available_in_huge_free_list(-size);
2314 huge_list_.set_top(top_node);
2315 if (huge_list_.top() == NULL) {
2316 huge_list_.set_end(NULL);
2318 huge_list_.set_available(huge_list_available);
2321 DCHECK(IsVeryLong() || available() == SumFreeLists());
2325 if (size_in_bytes <= kSmallListMax) {
2326 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2328 DCHECK(size_in_bytes <= *node_size);
2329 page = Page::FromAddress(node->address());
2330 page->add_available_in_small_free_list(-(*node_size));
2332 } else if (size_in_bytes <= kMediumListMax) {
2333 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2335 DCHECK(size_in_bytes <= *node_size);
2336 page = Page::FromAddress(node->address());
2337 page->add_available_in_medium_free_list(-(*node_size));
2339 } else if (size_in_bytes <= kLargeListMax) {
2340 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2342 DCHECK(size_in_bytes <= *node_size);
2343 page = Page::FromAddress(node->address());
2344 page->add_available_in_large_free_list(-(*node_size));
2348 DCHECK(IsVeryLong() || available() == SumFreeLists());
2353 // Allocation on the old space free list. If it succeeds then a new linear
2354 // allocation space has been set up with the top and limit of the space. If
2355 // the allocation fails then NULL is returned, and the caller can perform a GC
2356 // or allocate a new page before retrying.
2357 HeapObject* FreeList::Allocate(int size_in_bytes) {
2358 DCHECK(0 < size_in_bytes);
2359 DCHECK(size_in_bytes <= kMaxBlockSize);
2360 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2361 // Don't free list allocate if there is linear space available.
2362 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2364 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2365 // Mark the old linear allocation area with a free space map so it can be
2366 // skipped when scanning the heap. This also puts it back in the free list
2367 // if it is big enough.
2368 owner_->Free(owner_->top(), old_linear_size);
2370 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2373 int new_node_size = 0;
2374 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2375 if (new_node == NULL) {
2376 owner_->SetTopAndLimit(NULL, NULL);
2380 int bytes_left = new_node_size - size_in_bytes;
2381 DCHECK(bytes_left >= 0);
2384 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2385 reinterpret_cast<Object**>(new_node->address())[i] =
2386 Smi::FromInt(kCodeZapValue);
2390 // The old-space-step might have finished sweeping and restarted marking.
2391 // Verify that it did not turn the page of the new node into an evacuation
2393 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2395 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2397 // Memory in the linear allocation area is counted as allocated. We may free
2398 // a little of this again immediately - see below.
2399 owner_->Allocate(new_node_size);
2401 if (owner_->heap()->inline_allocation_disabled()) {
2402 // Keep the linear allocation area empty if requested to do so, just
2403 // return area back to the free list instead.
2404 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2405 DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2406 } else if (bytes_left > kThreshold &&
2407 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2408 FLAG_incremental_marking_steps) {
2409 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2410 // We don't want to give too large linear areas to the allocator while
2411 // incremental marking is going on, because we won't check again whether
2412 // we want to do another increment until the linear area is used up.
2413 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2414 new_node_size - size_in_bytes - linear_size);
2415 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2416 new_node->address() + size_in_bytes + linear_size);
2417 } else if (bytes_left > 0) {
2418 // Normally we give the rest of the node to the allocator as its new
2419 // linear allocation area.
2420 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2421 new_node->address() + new_node_size);
2423 // TODO(gc) Try not freeing linear allocation region when bytes_left
2425 owner_->SetTopAndLimit(NULL, NULL);
2432 intptr_t FreeList::EvictFreeListItems(Page* p) {
2433 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2434 p->set_available_in_huge_free_list(0);
2436 if (sum < p->area_size()) {
2437 sum += small_list_.EvictFreeListItemsInList(p) +
2438 medium_list_.EvictFreeListItemsInList(p) +
2439 large_list_.EvictFreeListItemsInList(p);
2440 p->set_available_in_small_free_list(0);
2441 p->set_available_in_medium_free_list(0);
2442 p->set_available_in_large_free_list(0);
2449 bool FreeList::ContainsPageFreeListItems(Page* p) {
2450 return huge_list_.EvictFreeListItemsInList(p) ||
2451 small_list_.EvictFreeListItemsInList(p) ||
2452 medium_list_.EvictFreeListItemsInList(p) ||
2453 large_list_.EvictFreeListItemsInList(p);
2457 void FreeList::RepairLists(Heap* heap) {
2458 small_list_.RepairFreeList(heap);
2459 medium_list_.RepairFreeList(heap);
2460 large_list_.RepairFreeList(heap);
2461 huge_list_.RepairFreeList(heap);
2466 intptr_t FreeListCategory::SumFreeList() {
2468 FreeSpace* cur = top();
2469 while (cur != NULL) {
2470 DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2471 sum += cur->nobarrier_size();
2478 static const int kVeryLongFreeList = 500;
2481 int FreeListCategory::FreeListLength() {
2483 FreeSpace* cur = top();
2484 while (cur != NULL) {
2487 if (length == kVeryLongFreeList) return length;
2493 bool FreeList::IsVeryLong() {
2494 if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2495 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2496 if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2497 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2502 // This can take a very long time because it is linear in the number of entries
2503 // on the free list, so it should not be called if FreeListLength returns
2504 // kVeryLongFreeList.
2505 intptr_t FreeList::SumFreeLists() {
2506 intptr_t sum = small_list_.SumFreeList();
2507 sum += medium_list_.SumFreeList();
2508 sum += large_list_.SumFreeList();
2509 sum += huge_list_.SumFreeList();
2515 // -----------------------------------------------------------------------------
2516 // OldSpace implementation
2518 void PagedSpace::PrepareForMarkCompact() {
2519 // We don't have a linear allocation area while sweeping. It will be restored
2520 // on the first allocation after the sweep.
2521 EmptyAllocationInfo();
2523 // This counter will be increased for pages which will be swept by the
2525 unswept_free_bytes_ = 0;
2527 // Clear the free list before a full GC---it will be rebuilt afterward.
2532 intptr_t PagedSpace::SizeOfObjects() {
2533 DCHECK(!FLAG_concurrent_sweeping ||
2534 heap()->mark_compact_collector()->sweeping_in_progress() ||
2535 (unswept_free_bytes_ == 0));
2536 return Size() - unswept_free_bytes_ - (limit() - top());
2540 // After we have booted, we have created a map which represents free space
2541 // on the heap. If there was already a free list then the elements on it
2542 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2544 void PagedSpace::RepairFreeListsAfterDeserialization() {
2545 free_list_.RepairLists(heap());
2546 // Each page may have a small free space that is not tracked by a free list.
2547 // Update the maps for those free space objects.
2548 PageIterator iterator(this);
2549 while (iterator.has_next()) {
2550 Page* page = iterator.next();
2551 int size = static_cast<int>(page->non_available_small_blocks());
2552 if (size == 0) continue;
2553 Address address = page->OffsetToAddress(Page::kPageSize - size);
2554 heap()->CreateFillerObjectAt(address, size);
2559 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2560 if (allocation_info_.top() >= allocation_info_.limit()) return;
2562 if (Page::FromAllocationTop(allocation_info_.top())
2563 ->IsEvacuationCandidate()) {
2564 // Create filler object to keep page iterable if it was iterable.
2566 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2567 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2569 allocation_info_.set_top(NULL);
2570 allocation_info_.set_limit(NULL);
2575 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2576 int size_in_bytes) {
2577 MarkCompactCollector* collector = heap()->mark_compact_collector();
2578 if (collector->sweeping_in_progress()) {
2579 // Wait for the sweeper threads here and complete the sweeping phase.
2580 collector->EnsureSweepingCompleted();
2582 // After waiting for the sweeper threads, there may be new free-list
2584 return free_list_.Allocate(size_in_bytes);
2590 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2591 // Allocation in this space has failed.
2593 MarkCompactCollector* collector = heap()->mark_compact_collector();
2594 // Sweeping is still in progress.
2595 if (collector->sweeping_in_progress()) {
2596 // First try to refill the free-list, concurrent sweeper threads
2597 // may have freed some objects in the meantime.
2598 collector->RefillFreeList(this);
2600 // Retry the free list allocation.
2601 HeapObject* object = free_list_.Allocate(size_in_bytes);
2602 if (object != NULL) return object;
2604 // If sweeping is still in progress try to sweep pages on the main thread.
2605 int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2606 collector->RefillFreeList(this);
2607 if (free_chunk >= size_in_bytes) {
2608 HeapObject* object = free_list_.Allocate(size_in_bytes);
2609 // We should be able to allocate an object here since we just freed that
2611 DCHECK(object != NULL);
2612 if (object != NULL) return object;
2616 // Free list allocation failed and there is no next page. Fail if we have
2617 // hit the old generation size limit that should cause a garbage
2619 if (!heap()->always_allocate() &&
2620 heap()->OldGenerationAllocationLimitReached()) {
2621 // If sweeper threads are active, wait for them at that point and steal
2622 // elements form their free-lists.
2623 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2624 if (object != NULL) return object;
2627 // Try to expand the space and allocate in the new next page.
2629 DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2630 return free_list_.Allocate(size_in_bytes);
2633 // If sweeper threads are active, wait for them at that point and steal
2634 // elements form their free-lists. Allocation may still fail their which
2635 // would indicate that there is not enough memory for the given allocation.
2636 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2641 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2642 CommentStatistic* comments_statistics =
2643 isolate->paged_space_comments_statistics();
2644 ReportCodeKindStatistics(isolate->code_kind_statistics());
2646 "Code comment statistics (\" [ comment-txt : size/ "
2647 "count (average)\"):\n");
2648 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2649 const CommentStatistic& cs = comments_statistics[i];
2651 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2652 cs.size / cs.count);
2659 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2660 CommentStatistic* comments_statistics =
2661 isolate->paged_space_comments_statistics();
2662 ClearCodeKindStatistics(isolate->code_kind_statistics());
2663 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2664 comments_statistics[i].Clear();
2666 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2667 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2668 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2672 // Adds comment to 'comment_statistics' table. Performance OK as long as
2673 // 'kMaxComments' is small
2674 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2675 CommentStatistic* comments_statistics =
2676 isolate->paged_space_comments_statistics();
2677 // Do not count empty comments
2678 if (delta <= 0) return;
2679 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2680 // Search for a free or matching entry in 'comments_statistics': 'cs'
2681 // points to result.
2682 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2683 if (comments_statistics[i].comment == NULL) {
2684 cs = &comments_statistics[i];
2685 cs->comment = comment;
2687 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2688 cs = &comments_statistics[i];
2692 // Update entry for 'comment'
2698 // Call for each nested comment start (start marked with '[ xxx', end marked
2699 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2700 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2701 DCHECK(!it->done());
2702 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2703 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2704 if (tmp[0] != '[') {
2705 // Not a nested comment; skip
2709 // Search for end of nested comment or a new nested comment
2710 const char* const comment_txt =
2711 reinterpret_cast<const char*>(it->rinfo()->data());
2712 const byte* prev_pc = it->rinfo()->pc();
2716 // All nested comments must be terminated properly, and therefore exit
2718 DCHECK(!it->done());
2719 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2720 const char* const txt =
2721 reinterpret_cast<const char*>(it->rinfo()->data());
2722 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2723 if (txt[0] == ']') break; // End of nested comment
2725 CollectCommentStatistics(isolate, it);
2726 // Skip code that was covered with previous comment
2727 prev_pc = it->rinfo()->pc();
2731 EnterComment(isolate, comment_txt, flat_delta);
2735 // Collects code size statistics:
2737 // - by code comment
2738 void PagedSpace::CollectCodeStatistics() {
2739 Isolate* isolate = heap()->isolate();
2740 HeapObjectIterator obj_it(this);
2741 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2742 if (obj->IsCode()) {
2743 Code* code = Code::cast(obj);
2744 isolate->code_kind_statistics()[code->kind()] += code->Size();
2745 RelocIterator it(code);
2747 const byte* prev_pc = code->instruction_start();
2748 while (!it.done()) {
2749 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2750 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2751 CollectCommentStatistics(isolate, &it);
2752 prev_pc = it.rinfo()->pc();
2757 DCHECK(code->instruction_start() <= prev_pc &&
2758 prev_pc <= code->instruction_end());
2759 delta += static_cast<int>(code->instruction_end() - prev_pc);
2760 EnterComment(isolate, "NoComment", delta);
2766 void PagedSpace::ReportStatistics() {
2767 int pct = static_cast<int>(Available() * 100 / Capacity());
2768 PrintF(" capacity: %" V8_PTR_PREFIX
2770 ", waste: %" V8_PTR_PREFIX
2772 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2773 Capacity(), Waste(), Available(), pct);
2775 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2776 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2778 ClearHistograms(heap()->isolate());
2779 HeapObjectIterator obj_it(this);
2780 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2781 CollectHistogramInfo(obj);
2782 ReportHistogram(heap()->isolate(), true);
2787 // -----------------------------------------------------------------------------
2788 // MapSpace implementation
2789 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2790 // there is at least one non-inlined virtual function. I would prefer to hide
2791 // the VerifyObject definition behind VERIFY_HEAP.
2793 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2796 // -----------------------------------------------------------------------------
2797 // CellSpace and PropertyCellSpace implementation
2798 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2799 // there is at least one non-inlined virtual function. I would prefer to hide
2800 // the VerifyObject definition behind VERIFY_HEAP.
2802 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
2805 void PropertyCellSpace::VerifyObject(HeapObject* object) {
2806 CHECK(object->IsPropertyCell());
2810 // -----------------------------------------------------------------------------
2811 // LargeObjectIterator
2813 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2814 current_ = space->first_page_;
2819 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2820 HeapObjectCallback size_func) {
2821 current_ = space->first_page_;
2822 size_func_ = size_func;
2826 HeapObject* LargeObjectIterator::Next() {
2827 if (current_ == NULL) return NULL;
2829 HeapObject* object = current_->GetObject();
2830 current_ = current_->next_page();
2835 // -----------------------------------------------------------------------------
2837 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2840 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2842 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2843 max_capacity_(max_capacity),
2848 chunk_map_(ComparePointers, 1024) {}
2851 bool LargeObjectSpace::SetUp() {
2854 maximum_committed_ = 0;
2862 void LargeObjectSpace::TearDown() {
2863 while (first_page_ != NULL) {
2864 LargePage* page = first_page_;
2865 first_page_ = first_page_->next_page();
2866 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2868 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2869 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2870 space, kAllocationActionFree, page->size());
2871 heap()->isolate()->memory_allocator()->Free(page);
2877 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2878 Executability executable) {
2879 // Check if we want to force a GC before growing the old space further.
2880 // If so, fail the allocation.
2881 if (!heap()->always_allocate() &&
2882 heap()->OldGenerationAllocationLimitReached()) {
2883 return AllocationResult::Retry(identity());
2886 if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
2888 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2889 object_size, this, executable);
2890 if (page == NULL) return AllocationResult::Retry(identity());
2891 DCHECK(page->area_size() >= object_size);
2893 size_ += static_cast<int>(page->size());
2894 objects_size_ += object_size;
2896 page->set_next_page(first_page_);
2899 if (size_ > maximum_committed_) {
2900 maximum_committed_ = size_;
2903 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2904 // this large page in the chunk map.
2905 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2906 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2907 for (uintptr_t key = base; key <= limit; key++) {
2908 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2909 static_cast<uint32_t>(key), true);
2910 DCHECK(entry != NULL);
2911 entry->value = page;
2914 HeapObject* object = page->GetObject();
2916 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2918 if (Heap::ShouldZapGarbage()) {
2919 // Make the object consistent so the heap can be verified in OldSpaceStep.
2920 // We only need to do this in debug builds or if verify_heap is on.
2921 reinterpret_cast<Object**>(object->address())[0] =
2922 heap()->fixed_array_map();
2923 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2926 heap()->incremental_marking()->OldSpaceStep(object_size);
2931 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2932 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2934 LargePage* current = first_page_;
2935 while (current != NULL) {
2936 size += current->CommittedPhysicalMemory();
2937 current = current->next_page();
2944 Object* LargeObjectSpace::FindObject(Address a) {
2945 LargePage* page = FindPage(a);
2947 return page->GetObject();
2949 return Smi::FromInt(0); // Signaling not found.
2953 LargePage* LargeObjectSpace::FindPage(Address a) {
2954 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2955 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2956 static_cast<uint32_t>(key), false);
2958 DCHECK(e->value != NULL);
2959 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2960 DCHECK(page->is_valid());
2961 if (page->Contains(a)) {
2969 void LargeObjectSpace::FreeUnmarkedObjects() {
2970 LargePage* previous = NULL;
2971 LargePage* current = first_page_;
2972 while (current != NULL) {
2973 HeapObject* object = current->GetObject();
2974 // Can this large page contain pointers to non-trivial objects. No other
2975 // pointer object is this big.
2976 bool is_pointer_object = object->IsFixedArray();
2977 MarkBit mark_bit = Marking::MarkBitFrom(object);
2978 if (mark_bit.Get()) {
2980 Page::FromAddress(object->address())->ResetProgressBar();
2981 Page::FromAddress(object->address())->ResetLiveBytes();
2983 current = current->next_page();
2985 LargePage* page = current;
2986 // Cut the chunk out from the chunk list.
2987 current = current->next_page();
2988 if (previous == NULL) {
2989 first_page_ = current;
2991 previous->set_next_page(current);
2995 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
2997 size_ -= static_cast<int>(page->size());
2998 objects_size_ -= object->Size();
3001 // Remove entries belonging to this page.
3002 // Use variable alignment to help pass length check (<= 80 characters)
3003 // of single line in tools/presubmit.py.
3004 const intptr_t alignment = MemoryChunk::kAlignment;
3005 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3006 uintptr_t limit = base + (page->size() - 1) / alignment;
3007 for (uintptr_t key = base; key <= limit; key++) {
3008 chunk_map_.Remove(reinterpret_cast<void*>(key),
3009 static_cast<uint32_t>(key));
3012 if (is_pointer_object) {
3013 heap()->QueueMemoryChunkForFree(page);
3015 heap()->isolate()->memory_allocator()->Free(page);
3019 heap()->FreeQueuedChunks();
3023 bool LargeObjectSpace::Contains(HeapObject* object) {
3024 Address address = object->address();
3025 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3027 bool owned = (chunk->owner() == this);
3029 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3036 // We do not assume that the large object iterator works, because it depends
3037 // on the invariants we are checking during verification.
3038 void LargeObjectSpace::Verify() {
3039 for (LargePage* chunk = first_page_; chunk != NULL;
3040 chunk = chunk->next_page()) {
3041 // Each chunk contains an object that starts at the large object page's
3042 // object area start.
3043 HeapObject* object = chunk->GetObject();
3044 Page* page = Page::FromAddress(object->address());
3045 CHECK(object->address() == page->area_start());
3047 // The first word should be a map, and we expect all map pointers to be
3049 Map* map = object->map();
3050 CHECK(map->IsMap());
3051 CHECK(heap()->map_space()->Contains(map));
3053 // We have only code, sequential strings, external strings
3054 // (sequential strings that have been morphed into external
3055 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3056 // large object space.
3057 CHECK(object->IsCode() || object->IsSeqString() ||
3058 object->IsExternalString() || object->IsFixedArray() ||
3059 object->IsFixedDoubleArray() || object->IsByteArray() ||
3060 object->IsConstantPoolArray());
3062 // The object itself should look OK.
3063 object->ObjectVerify();
3065 // Byte arrays and strings don't have interior pointers.
3066 if (object->IsCode()) {
3067 VerifyPointersVisitor code_visitor;
3068 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3069 } else if (object->IsFixedArray()) {
3070 FixedArray* array = FixedArray::cast(object);
3071 for (int j = 0; j < array->length(); j++) {
3072 Object* element = array->get(j);
3073 if (element->IsHeapObject()) {
3074 HeapObject* element_object = HeapObject::cast(element);
3075 CHECK(heap()->Contains(element_object));
3076 CHECK(element_object->map()->IsMap());
3086 void LargeObjectSpace::Print() {
3087 OFStream os(stdout);
3088 LargeObjectIterator it(this);
3089 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3095 void LargeObjectSpace::ReportStatistics() {
3096 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3097 int num_objects = 0;
3098 ClearHistograms(heap()->isolate());
3099 LargeObjectIterator it(this);
3100 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3102 CollectHistogramInfo(obj);
3106 " number of objects %d, "
3107 "size of objects %" V8_PTR_PREFIX "d\n",
3108 num_objects, objects_size_);
3109 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3113 void LargeObjectSpace::CollectCodeStatistics() {
3114 Isolate* isolate = heap()->isolate();
3115 LargeObjectIterator obj_it(this);
3116 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3117 if (obj->IsCode()) {
3118 Code* code = Code::cast(obj);
3119 isolate->code_kind_statistics()[code->kind()] += code->Size();
3125 void Page::Print() {
3126 // Make a best-effort to print the objects in the page.
3127 PrintF("Page@%p in %s\n", this->address(),
3128 AllocationSpaceName(this->owner()->identity()));
3129 printf(" --------------------------------------\n");
3130 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3131 unsigned mark_size = 0;
3132 for (HeapObject* object = objects.Next(); object != NULL;
3133 object = objects.Next()) {
3134 bool is_marked = Marking::MarkBitFrom(object).Get();
3135 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3137 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3139 object->ShortPrint();
3142 printf(" --------------------------------------\n");
3143 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3148 } // namespace v8::internal