1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/macro-assembler.h"
13 #include "src/snapshot/snapshot.h"
19 // ----------------------------------------------------------------------------
22 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23 // You can't actually iterate over the anchor page. It is not a real page,
24 // just an anchor for the double linked page list. Initialize as if we have
25 // reached the end of the anchor page, then the first iteration will move on
27 Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
31 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
32 HeapObjectCallback size_func) {
33 // You can't actually iterate over the anchor page. It is not a real page,
34 // just an anchor for the double linked page list. Initialize the current
35 // address and end as NULL, then the first iteration will move on
37 Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
41 HeapObjectIterator::HeapObjectIterator(Page* page,
42 HeapObjectCallback size_func) {
43 Space* owner = page->owner();
44 DCHECK(owner == page->heap()->old_pointer_space() ||
45 owner == page->heap()->old_data_space() ||
46 owner == page->heap()->map_space() ||
47 owner == page->heap()->cell_space() ||
48 owner == page->heap()->code_space());
49 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
50 page->area_end(), kOnePageOnly, size_func);
51 DCHECK(page->WasSwept() || page->SweepingCompleted());
55 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
56 HeapObjectIterator::PageMode mode,
57 HeapObjectCallback size_f) {
66 // We have hit the end of the page and should advance to the next block of
67 // objects. This happens at the end of the page.
68 bool HeapObjectIterator::AdvanceToNextPage() {
69 DCHECK(cur_addr_ == cur_end_);
70 if (page_mode_ == kOnePageOnly) return false;
72 if (cur_addr_ == NULL) {
73 cur_page = space_->anchor();
75 cur_page = Page::FromAddress(cur_addr_ - 1);
76 DCHECK(cur_addr_ == cur_page->area_end());
78 cur_page = cur_page->next_page();
79 if (cur_page == space_->anchor()) return false;
80 cur_addr_ = cur_page->area_start();
81 cur_end_ = cur_page->area_end();
82 DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
87 // -----------------------------------------------------------------------------
91 CodeRange::CodeRange(Isolate* isolate)
96 current_allocation_block_index_(0),
100 bool CodeRange::SetUp(size_t requested) {
101 DCHECK(code_range_ == NULL);
103 if (requested == 0) {
104 // When a target requires the code range feature, we put all code objects
105 // in a kMaximalCodeRangeSize range of virtual address space, so that
106 // they can call each other with near calls.
107 if (kRequiresCodeRange) {
108 requested = kMaximalCodeRangeSize;
114 if (requested <= kMinimumCodeRangeSize) {
115 requested = kMinimumCodeRangeSize;
118 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
119 code_range_ = new base::VirtualMemory(requested);
120 CHECK(code_range_ != NULL);
121 if (!code_range_->IsReserved()) {
127 // We are sure that we have mapped a block of requested addresses.
128 DCHECK(code_range_->size() == requested);
129 Address base = reinterpret_cast<Address>(code_range_->address());
131 // On some platforms, specifically Win64, we need to reserve some pages at
132 // the beginning of an executable space.
133 if (kReservedCodeRangePages) {
134 if (!code_range_->Commit(
135 base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
140 base += kReservedCodeRangePages * base::OS::CommitPageSize();
142 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
143 size_t size = code_range_->size() - (aligned_base - base) -
144 kReservedCodeRangePages * base::OS::CommitPageSize();
145 allocation_list_.Add(FreeBlock(aligned_base, size));
146 current_allocation_block_index_ = 0;
148 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
149 ReserveEmergencyBlock();
154 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
155 const FreeBlock* right) {
156 // The entire point of CodeRange is that the difference between two
157 // addresses in the range can be represented as a signed 32-bit int,
158 // so the cast is semantically correct.
159 return static_cast<int>(left->start - right->start);
163 bool CodeRange::GetNextAllocationBlock(size_t requested) {
164 for (current_allocation_block_index_++;
165 current_allocation_block_index_ < allocation_list_.length();
166 current_allocation_block_index_++) {
167 if (requested <= allocation_list_[current_allocation_block_index_].size) {
168 return true; // Found a large enough allocation block.
172 // Sort and merge the free blocks on the free list and the allocation list.
173 free_list_.AddAll(allocation_list_);
174 allocation_list_.Clear();
175 free_list_.Sort(&CompareFreeBlockAddress);
176 for (int i = 0; i < free_list_.length();) {
177 FreeBlock merged = free_list_[i];
179 // Add adjacent free blocks to the current merged block.
180 while (i < free_list_.length() &&
181 free_list_[i].start == merged.start + merged.size) {
182 merged.size += free_list_[i].size;
185 if (merged.size > 0) {
186 allocation_list_.Add(merged);
191 for (current_allocation_block_index_ = 0;
192 current_allocation_block_index_ < allocation_list_.length();
193 current_allocation_block_index_++) {
194 if (requested <= allocation_list_[current_allocation_block_index_].size) {
195 return true; // Found a large enough allocation block.
198 current_allocation_block_index_ = 0;
199 // Code range is full or too fragmented.
204 Address CodeRange::AllocateRawMemory(const size_t requested_size,
205 const size_t commit_size,
207 DCHECK(commit_size <= requested_size);
209 if (!ReserveBlock(requested_size, ¤t)) {
213 *allocated = current.size;
214 DCHECK(*allocated <= current.size);
215 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
216 if (!isolate_->memory_allocator()->CommitExecutableMemory(
217 code_range_, current.start, commit_size, *allocated)) {
219 ReleaseBlock(¤t);
222 return current.start;
226 bool CodeRange::CommitRawMemory(Address start, size_t length) {
227 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
231 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
232 return code_range_->Uncommit(start, length);
236 void CodeRange::FreeRawMemory(Address address, size_t length) {
237 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
238 free_list_.Add(FreeBlock(address, length));
239 code_range_->Uncommit(address, length);
243 void CodeRange::TearDown() {
244 delete code_range_; // Frees all memory in the virtual memory range.
247 allocation_list_.Free();
251 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
252 DCHECK(allocation_list_.length() == 0 ||
253 current_allocation_block_index_ < allocation_list_.length());
254 if (allocation_list_.length() == 0 ||
255 requested_size > allocation_list_[current_allocation_block_index_].size) {
256 // Find an allocation block large enough.
257 if (!GetNextAllocationBlock(requested_size)) return false;
259 // Commit the requested memory at the start of the current allocation block.
260 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
261 *block = allocation_list_[current_allocation_block_index_];
262 // Don't leave a small free block, useless for a large object or chunk.
263 if (aligned_requested < (block->size - Page::kPageSize)) {
264 block->size = aligned_requested;
266 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
267 allocation_list_[current_allocation_block_index_].start += block->size;
268 allocation_list_[current_allocation_block_index_].size -= block->size;
273 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
276 void CodeRange::ReserveEmergencyBlock() {
277 const size_t requested_size = MemoryAllocator::CodePageAreaSize();
278 if (emergency_block_.size == 0) {
279 ReserveBlock(requested_size, &emergency_block_);
281 DCHECK(emergency_block_.size >= requested_size);
286 void CodeRange::ReleaseEmergencyBlock() {
287 if (emergency_block_.size != 0) {
288 ReleaseBlock(&emergency_block_);
289 emergency_block_.size = 0;
294 // -----------------------------------------------------------------------------
298 MemoryAllocator::MemoryAllocator(Isolate* isolate)
301 capacity_executable_(0),
304 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
305 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
308 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
309 capacity_ = RoundUp(capacity, Page::kPageSize);
310 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
311 DCHECK_GE(capacity_, capacity_executable_);
314 size_executable_ = 0;
320 void MemoryAllocator::TearDown() {
321 // Check that spaces were torn down before MemoryAllocator.
323 // TODO(gc) this will be true again when we fix FreeMemory.
324 // DCHECK(size_executable_ == 0);
326 capacity_executable_ = 0;
330 bool MemoryAllocator::CommitMemory(Address base, size_t size,
331 Executability executable) {
332 if (!base::VirtualMemory::CommitRegion(base, size,
333 executable == EXECUTABLE)) {
336 UpdateAllocatedSpaceLimits(base, base + size);
341 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
342 Executability executable) {
343 // TODO(gc) make code_range part of memory allocator?
344 DCHECK(reservation->IsReserved());
345 size_t size = reservation->size();
346 DCHECK(size_ >= size);
349 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
351 if (executable == EXECUTABLE) {
352 DCHECK(size_executable_ >= size);
353 size_executable_ -= size;
355 // Code which is part of the code-range does not have its own VirtualMemory.
356 DCHECK(isolate_->code_range() == NULL ||
357 !isolate_->code_range()->contains(
358 static_cast<Address>(reservation->address())));
359 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
360 !isolate_->code_range()->valid());
361 reservation->Release();
365 void MemoryAllocator::FreeMemory(Address base, size_t size,
366 Executability executable) {
367 // TODO(gc) make code_range part of memory allocator?
368 DCHECK(size_ >= size);
371 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
373 if (executable == EXECUTABLE) {
374 DCHECK(size_executable_ >= size);
375 size_executable_ -= size;
377 if (isolate_->code_range() != NULL &&
378 isolate_->code_range()->contains(static_cast<Address>(base))) {
379 DCHECK(executable == EXECUTABLE);
380 isolate_->code_range()->FreeRawMemory(base, size);
382 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
383 !isolate_->code_range()->valid());
384 bool result = base::VirtualMemory::ReleaseRegion(base, size);
391 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
392 base::VirtualMemory* controller) {
393 base::VirtualMemory reservation(size, alignment);
395 if (!reservation.IsReserved()) return NULL;
396 size_ += reservation.size();
398 RoundUp(static_cast<Address>(reservation.address()), alignment);
399 controller->TakeControl(&reservation);
404 Address MemoryAllocator::AllocateAlignedMemory(
405 size_t reserve_size, size_t commit_size, size_t alignment,
406 Executability executable, base::VirtualMemory* controller) {
407 DCHECK(commit_size <= reserve_size);
408 base::VirtualMemory reservation;
409 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
410 if (base == NULL) return NULL;
412 if (executable == EXECUTABLE) {
413 if (!CommitExecutableMemory(&reservation, base, commit_size,
418 if (reservation.Commit(base, commit_size, false)) {
419 UpdateAllocatedSpaceLimits(base, base + commit_size);
426 // Failed to commit the body. Release the mapping and any partially
427 // commited regions inside it.
428 reservation.Release();
432 controller->TakeControl(&reservation);
437 void Page::InitializeAsAnchor(PagedSpace* owner) {
444 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
445 SemiSpace* semi_space) {
446 Address area_start = start + NewSpacePage::kObjectStartOffset;
447 Address area_end = start + Page::kPageSize;
450 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
451 area_end, NOT_EXECUTABLE, semi_space);
452 chunk->set_next_chunk(NULL);
453 chunk->set_prev_chunk(NULL);
454 chunk->initialize_scan_on_scavenge(true);
455 bool in_to_space = (semi_space->id() != kFromSpace);
456 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
457 : MemoryChunk::IN_FROM_SPACE);
458 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
459 : MemoryChunk::IN_TO_SPACE));
460 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
461 heap->incremental_marking()->SetNewSpacePageFlags(page);
466 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
467 set_owner(semi_space);
468 set_next_chunk(this);
469 set_prev_chunk(this);
470 // Flags marks this invalid page as not being in new-space.
471 // All real new-space pages will be in new-space.
476 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
477 Address area_start, Address area_end,
478 Executability executable, Space* owner) {
479 MemoryChunk* chunk = FromAddress(base);
481 DCHECK(base == chunk->address());
485 chunk->area_start_ = area_start;
486 chunk->area_end_ = area_end;
488 chunk->set_owner(owner);
489 chunk->InitializeReservedMemory();
490 chunk->slots_buffer_ = NULL;
491 chunk->skip_list_ = NULL;
492 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
493 chunk->progress_bar_ = 0;
494 chunk->high_water_mark_ = static_cast<int>(area_start - base);
495 chunk->set_parallel_sweeping(SWEEPING_DONE);
496 chunk->available_in_small_free_list_ = 0;
497 chunk->available_in_medium_free_list_ = 0;
498 chunk->available_in_large_free_list_ = 0;
499 chunk->available_in_huge_free_list_ = 0;
500 chunk->non_available_small_blocks_ = 0;
501 chunk->ResetLiveBytes();
502 Bitmap::Clear(chunk);
503 chunk->initialize_scan_on_scavenge(false);
504 chunk->SetFlag(WAS_SWEPT);
506 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
507 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
509 if (executable == EXECUTABLE) {
510 chunk->SetFlag(IS_EXECUTABLE);
513 if (owner == heap->old_data_space()) {
514 chunk->SetFlag(CONTAINS_ONLY_DATA);
521 // Commit MemoryChunk area to the requested size.
522 bool MemoryChunk::CommitArea(size_t requested) {
524 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
525 size_t header_size = area_start() - address() - guard_size;
527 RoundUp(header_size + requested, base::OS::CommitPageSize());
528 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
529 base::OS::CommitPageSize());
531 if (commit_size > committed_size) {
532 // Commit size should be less or equal than the reserved size.
533 DCHECK(commit_size <= size() - 2 * guard_size);
534 // Append the committed area.
535 Address start = address() + committed_size + guard_size;
536 size_t length = commit_size - committed_size;
537 if (reservation_.IsReserved()) {
538 Executability executable =
539 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
540 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
545 CodeRange* code_range = heap_->isolate()->code_range();
546 DCHECK(code_range != NULL && code_range->valid() &&
547 IsFlagSet(IS_EXECUTABLE));
548 if (!code_range->CommitRawMemory(start, length)) return false;
551 if (Heap::ShouldZapGarbage()) {
552 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
554 } else if (commit_size < committed_size) {
555 DCHECK(commit_size > 0);
556 // Shrink the committed area.
557 size_t length = committed_size - commit_size;
558 Address start = address() + committed_size + guard_size - length;
559 if (reservation_.IsReserved()) {
560 if (!reservation_.Uncommit(start, length)) return false;
562 CodeRange* code_range = heap_->isolate()->code_range();
563 DCHECK(code_range != NULL && code_range->valid() &&
564 IsFlagSet(IS_EXECUTABLE));
565 if (!code_range->UncommitRawMemory(start, length)) return false;
569 area_end_ = area_start_ + requested;
574 void MemoryChunk::InsertAfter(MemoryChunk* other) {
575 MemoryChunk* other_next = other->next_chunk();
577 set_next_chunk(other_next);
578 set_prev_chunk(other);
579 other_next->set_prev_chunk(this);
580 other->set_next_chunk(this);
584 void MemoryChunk::Unlink() {
585 MemoryChunk* next_element = next_chunk();
586 MemoryChunk* prev_element = prev_chunk();
587 next_element->set_prev_chunk(prev_element);
588 prev_element->set_next_chunk(next_element);
589 set_prev_chunk(NULL);
590 set_next_chunk(NULL);
594 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
595 intptr_t commit_area_size,
596 Executability executable,
598 DCHECK(commit_area_size <= reserve_area_size);
601 Heap* heap = isolate_->heap();
603 base::VirtualMemory reservation;
604 Address area_start = NULL;
605 Address area_end = NULL;
608 // MemoryChunk layout:
611 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
613 // +----------------------------+<- base + CodePageGuardStartOffset
615 // +----------------------------+<- area_start_
617 // +----------------------------+<- area_end_ (area_start + commit_area_size)
618 // | Committed but not used |
619 // +----------------------------+<- aligned at OS page boundary
620 // | Reserved but not committed |
621 // +----------------------------+<- aligned at OS page boundary
623 // +----------------------------+<- base + chunk_size
626 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
628 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
630 // +----------------------------+<- area_end_ (area_start + commit_area_size)
631 // | Committed but not used |
632 // +----------------------------+<- aligned at OS page boundary
633 // | Reserved but not committed |
634 // +----------------------------+<- base + chunk_size
637 if (executable == EXECUTABLE) {
638 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
639 base::OS::CommitPageSize()) +
642 // Check executable memory limit.
643 if (size_executable_ + chunk_size > capacity_executable_) {
644 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
645 "V8 Executable Allocation capacity exceeded"));
649 // Size of header (not executable) plus area (executable).
650 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
651 base::OS::CommitPageSize());
652 // Allocate executable memory either from code range or from the
654 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
655 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
658 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
659 if (base == NULL) return NULL;
661 // Update executable memory size.
662 size_executable_ += chunk_size;
664 base = AllocateAlignedMemory(chunk_size, commit_size,
665 MemoryChunk::kAlignment, executable,
667 if (base == NULL) return NULL;
668 // Update executable memory size.
669 size_executable_ += reservation.size();
672 if (Heap::ShouldZapGarbage()) {
673 ZapBlock(base, CodePageGuardStartOffset());
674 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
677 area_start = base + CodePageAreaStartOffset();
678 area_end = area_start + commit_area_size;
680 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
681 base::OS::CommitPageSize());
683 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
684 base::OS::CommitPageSize());
686 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
687 executable, &reservation);
689 if (base == NULL) return NULL;
691 if (Heap::ShouldZapGarbage()) {
692 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
695 area_start = base + Page::kObjectStartOffset;
696 area_end = area_start + commit_area_size;
699 // Use chunk_size for statistics and callbacks because we assume that they
700 // treat reserved but not-yet committed memory regions of chunks as allocated.
701 isolate_->counters()->memory_allocated()->Increment(
702 static_cast<int>(chunk_size));
704 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
706 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
707 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
710 MemoryChunk* result = MemoryChunk::Initialize(
711 heap, base, chunk_size, area_start, area_end, executable, owner);
712 result->set_reserved_memory(&reservation);
717 void Page::ResetFreeListStatistics() {
718 non_available_small_blocks_ = 0;
719 available_in_small_free_list_ = 0;
720 available_in_medium_free_list_ = 0;
721 available_in_large_free_list_ = 0;
722 available_in_huge_free_list_ = 0;
726 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
727 Executability executable) {
728 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
730 if (chunk == NULL) return NULL;
732 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
736 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
738 Executability executable) {
740 AllocateChunk(object_size, object_size, executable, owner);
741 if (chunk == NULL) return NULL;
742 return LargePage::Initialize(isolate_->heap(), chunk);
746 void MemoryAllocator::Free(MemoryChunk* chunk) {
747 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
748 if (chunk->owner() != NULL) {
750 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
751 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
754 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
755 chunk->IsEvacuationCandidate());
757 delete chunk->slots_buffer();
758 delete chunk->skip_list();
760 base::VirtualMemory* reservation = chunk->reserved_memory();
761 if (reservation->IsReserved()) {
762 FreeMemory(reservation, chunk->executable());
764 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
769 bool MemoryAllocator::CommitBlock(Address start, size_t size,
770 Executability executable) {
771 if (!CommitMemory(start, size, executable)) return false;
773 if (Heap::ShouldZapGarbage()) {
774 ZapBlock(start, size);
777 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
782 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
783 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
784 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
789 void MemoryAllocator::ZapBlock(Address start, size_t size) {
790 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
791 Memory::Address_at(start + s) = kZapValue;
796 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
797 AllocationAction action,
799 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
800 MemoryAllocationCallbackRegistration registration =
801 memory_allocation_callbacks_[i];
802 if ((registration.space & space) == space &&
803 (registration.action & action) == action)
804 registration.callback(space, action, static_cast<int>(size));
809 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
810 MemoryAllocationCallback callback) {
811 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
812 if (memory_allocation_callbacks_[i].callback == callback) return true;
818 void MemoryAllocator::AddMemoryAllocationCallback(
819 MemoryAllocationCallback callback, ObjectSpace space,
820 AllocationAction action) {
821 DCHECK(callback != NULL);
822 MemoryAllocationCallbackRegistration registration(callback, space, action);
823 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
824 return memory_allocation_callbacks_.Add(registration);
828 void MemoryAllocator::RemoveMemoryAllocationCallback(
829 MemoryAllocationCallback callback) {
830 DCHECK(callback != NULL);
831 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
832 if (memory_allocation_callbacks_[i].callback == callback) {
833 memory_allocation_callbacks_.Remove(i);
842 void MemoryAllocator::ReportStatistics() {
843 float pct = static_cast<float>(capacity_ - size_) / capacity_;
844 PrintF(" capacity: %" V8_PTR_PREFIX
846 ", used: %" V8_PTR_PREFIX
848 ", available: %%%d\n\n",
849 capacity_, size_, static_cast<int>(pct * 100));
854 int MemoryAllocator::CodePageGuardStartOffset() {
855 // We are guarding code pages: the first OS page after the header
856 // will be protected as non-writable.
857 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
861 int MemoryAllocator::CodePageGuardSize() {
862 return static_cast<int>(base::OS::CommitPageSize());
866 int MemoryAllocator::CodePageAreaStartOffset() {
867 // We are guarding code pages: the first OS page after the header
868 // will be protected as non-writable.
869 return CodePageGuardStartOffset() + CodePageGuardSize();
873 int MemoryAllocator::CodePageAreaEndOffset() {
874 // We are guarding code pages: the last OS page will be protected as
876 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
880 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
881 Address start, size_t commit_size,
882 size_t reserved_size) {
883 // Commit page header (not executable).
884 if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
888 // Create guard page after the header.
889 if (!vm->Guard(start + CodePageGuardStartOffset())) {
893 // Commit page body (executable).
894 if (!vm->Commit(start + CodePageAreaStartOffset(),
895 commit_size - CodePageGuardStartOffset(), true)) {
899 // Create guard page before the end.
900 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
904 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
906 CodePageGuardStartOffset());
911 // -----------------------------------------------------------------------------
912 // MemoryChunk implementation
914 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
915 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
916 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
917 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
919 chunk->IncrementLiveBytes(by);
923 // -----------------------------------------------------------------------------
924 // PagedSpace implementation
926 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
927 ObjectSpace::kObjectSpaceNewSpace);
928 STATIC_ASSERT(static_cast<ObjectSpace>(1
929 << AllocationSpace::OLD_POINTER_SPACE) ==
930 ObjectSpace::kObjectSpaceOldPointerSpace);
931 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_DATA_SPACE) ==
932 ObjectSpace::kObjectSpaceOldDataSpace);
933 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
934 ObjectSpace::kObjectSpaceCodeSpace);
935 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) ==
936 ObjectSpace::kObjectSpaceCellSpace);
937 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
938 ObjectSpace::kObjectSpaceMapSpace);
941 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
942 Executability executable)
943 : Space(heap, space, executable),
945 unswept_free_bytes_(0),
946 end_of_unswept_pages_(NULL),
947 emergency_memory_(NULL) {
948 area_size_ = MemoryAllocator::PageAreaSize(space);
950 (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
951 accounting_stats_.Clear();
953 allocation_info_.set_top(NULL);
954 allocation_info_.set_limit(NULL);
956 anchor_.InitializeAsAnchor(this);
960 bool PagedSpace::SetUp() { return true; }
963 bool PagedSpace::HasBeenSetUp() { return true; }
966 void PagedSpace::TearDown() {
967 PageIterator iterator(this);
968 while (iterator.has_next()) {
969 heap()->isolate()->memory_allocator()->Free(iterator.next());
971 anchor_.set_next_page(&anchor_);
972 anchor_.set_prev_page(&anchor_);
973 accounting_stats_.Clear();
977 size_t PagedSpace::CommittedPhysicalMemory() {
978 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
979 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
981 PageIterator it(this);
982 while (it.has_next()) {
983 size += it.next()->CommittedPhysicalMemory();
989 bool PagedSpace::ContainsSafe(Address addr) {
990 Page* p = Page::FromAddress(addr);
991 PageIterator iterator(this);
992 while (iterator.has_next()) {
993 if (iterator.next() == p) return true;
999 Object* PagedSpace::FindObject(Address addr) {
1000 // Note: this function can only be called on iterable spaces.
1001 DCHECK(!heap()->mark_compact_collector()->in_use());
1003 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1005 Page* p = Page::FromAddress(addr);
1006 HeapObjectIterator it(p, NULL);
1007 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1008 Address cur = obj->address();
1009 Address next = cur + obj->Size();
1010 if ((cur <= addr) && (addr < next)) return obj;
1014 return Smi::FromInt(0);
1018 bool PagedSpace::CanExpand() {
1019 DCHECK(max_capacity_ % AreaSize() == 0);
1020 DCHECK(heap()->mark_compact_collector()->is_compacting() ||
1021 Capacity() <= heap()->MaxOldGenerationSize());
1022 DCHECK(heap()->CommittedOldGenerationMemory() <=
1023 heap()->MaxOldGenerationSize() +
1024 PagedSpace::MaxEmergencyMemoryAllocated());
1026 // Are we going to exceed capacity for this space?
1027 if (!heap()->CanExpandOldGeneration(Page::kPageSize)) return false;
1033 bool PagedSpace::Expand() {
1034 if (!CanExpand()) return false;
1036 intptr_t size = AreaSize();
1038 if (anchor_.next_page() == &anchor_) {
1039 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1042 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1044 if (p == NULL) return false;
1046 // Pages created during bootstrapping may contain immortal immovable objects.
1047 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1049 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1050 DCHECK(heap()->CommittedOldGenerationMemory() <=
1051 heap()->MaxOldGenerationSize() +
1052 PagedSpace::MaxEmergencyMemoryAllocated());
1054 p->InsertAfter(anchor_.prev_page());
1060 int PagedSpace::CountTotalPages() {
1061 PageIterator it(this);
1063 while (it.has_next()) {
1071 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1072 sizes->huge_size_ = page->available_in_huge_free_list();
1073 sizes->small_size_ = page->available_in_small_free_list();
1074 sizes->medium_size_ = page->available_in_medium_free_list();
1075 sizes->large_size_ = page->available_in_large_free_list();
1079 void PagedSpace::ResetFreeListStatistics() {
1080 PageIterator page_iterator(this);
1081 while (page_iterator.has_next()) {
1082 Page* page = page_iterator.next();
1083 page->ResetFreeListStatistics();
1088 void PagedSpace::IncreaseCapacity(int size) {
1089 accounting_stats_.ExpandSpace(size);
1093 void PagedSpace::ReleasePage(Page* page) {
1094 DCHECK(page->LiveBytes() == 0);
1095 DCHECK(AreaSize() == page->area_size());
1097 if (page->WasSwept()) {
1098 intptr_t size = free_list_.EvictFreeListItems(page);
1099 accounting_stats_.AllocateBytes(size);
1100 DCHECK_EQ(AreaSize(), static_cast<int>(size));
1102 DecreaseUnsweptFreeBytes(page);
1105 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1106 heap()->decrement_scan_on_scavenge_pages();
1107 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1110 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1112 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1113 allocation_info_.set_top(NULL);
1114 allocation_info_.set_limit(NULL);
1117 // If page is still in a list, unlink it from that list.
1118 if (page->next_chunk() != NULL) {
1119 DCHECK(page->prev_chunk() != NULL);
1123 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1124 heap()->isolate()->memory_allocator()->Free(page);
1126 heap()->QueueMemoryChunkForFree(page);
1129 DCHECK(Capacity() > 0);
1130 accounting_stats_.ShrinkSpace(AreaSize());
1134 intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
1135 // New space and large object space.
1136 static const int spaces_without_emergency_memory = 2;
1137 static const int spaces_with_emergency_memory =
1138 LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
1139 return Page::kPageSize * spaces_with_emergency_memory;
1143 void PagedSpace::CreateEmergencyMemory() {
1144 if (identity() == CODE_SPACE) {
1145 // Make the emergency block available to the allocator.
1146 CodeRange* code_range = heap()->isolate()->code_range();
1147 if (code_range != NULL && code_range->valid()) {
1148 code_range->ReleaseEmergencyBlock();
1150 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1152 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1153 AreaSize(), AreaSize(), executable(), this);
1157 void PagedSpace::FreeEmergencyMemory() {
1158 Page* page = static_cast<Page*>(emergency_memory_);
1159 DCHECK(page->LiveBytes() == 0);
1160 DCHECK(AreaSize() == page->area_size());
1161 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1162 heap()->isolate()->memory_allocator()->Free(page);
1163 emergency_memory_ = NULL;
1167 void PagedSpace::UseEmergencyMemory() {
1168 // Page::Initialize makes the chunk into a real page and adds it to the
1169 // accounting for this space. Unlike PagedSpace::Expand, we don't check
1170 // CanExpand first, so we can go over the limits a little here. That's OK,
1171 // because we are in the process of compacting which will free up at least as
1172 // much memory as it allocates.
1173 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1174 page->InsertAfter(anchor_.prev_page());
1175 emergency_memory_ = NULL;
1180 void PagedSpace::Print() {}
1184 void PagedSpace::Verify(ObjectVisitor* visitor) {
1185 bool allocation_pointer_found_in_space =
1186 (allocation_info_.top() == allocation_info_.limit());
1187 PageIterator page_iterator(this);
1188 while (page_iterator.has_next()) {
1189 Page* page = page_iterator.next();
1190 CHECK(page->owner() == this);
1191 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1192 allocation_pointer_found_in_space = true;
1194 CHECK(page->WasSwept());
1195 HeapObjectIterator it(page, NULL);
1196 Address end_of_previous_object = page->area_start();
1197 Address top = page->area_end();
1199 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1200 CHECK(end_of_previous_object <= object->address());
1202 // The first word should be a map, and we expect all map pointers to
1204 Map* map = object->map();
1205 CHECK(map->IsMap());
1206 CHECK(heap()->map_space()->Contains(map));
1208 // Perform space-specific object verification.
1209 VerifyObject(object);
1211 // The object itself should look OK.
1212 object->ObjectVerify();
1214 // All the interior pointers should be contained in the heap.
1215 int size = object->Size();
1216 object->IterateBody(map->instance_type(), size, visitor);
1217 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1221 CHECK(object->address() + size <= top);
1222 end_of_previous_object = object->address() + size;
1224 CHECK_LE(black_size, page->LiveBytes());
1226 CHECK(allocation_pointer_found_in_space);
1228 #endif // VERIFY_HEAP
1230 // -----------------------------------------------------------------------------
1231 // NewSpace implementation
1234 bool NewSpace::SetUp(int reserved_semispace_capacity,
1235 int maximum_semispace_capacity) {
1236 // Set up new space based on the preallocated memory block defined by
1237 // start and size. The provided space is divided into two semi-spaces.
1238 // To support fast containment testing in the new space, the size of
1239 // this chunk must be a power of two and it must be aligned to its size.
1240 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1242 int target_semispace_capacity = heap()->TargetSemiSpaceSize();
1244 size_t size = 2 * reserved_semispace_capacity;
1245 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1246 size, size, &reservation_);
1247 if (base == NULL) return false;
1250 chunk_size_ = static_cast<uintptr_t>(size);
1251 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1253 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1254 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1256 // Allocate and set up the histogram arrays if necessary.
1257 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1258 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1260 #define SET_NAME(name) \
1261 allocated_histogram_[name].set_name(#name); \
1262 promoted_histogram_[name].set_name(#name);
1263 INSTANCE_TYPE_LIST(SET_NAME)
1266 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1267 DCHECK(static_cast<intptr_t>(chunk_size_) >=
1268 2 * heap()->ReservedSemiSpaceSize());
1269 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1271 to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1272 target_semispace_capacity, maximum_semispace_capacity);
1273 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1274 initial_semispace_capacity, target_semispace_capacity,
1275 maximum_semispace_capacity);
1276 if (!to_space_.Commit()) {
1279 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1281 start_ = chunk_base_;
1282 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1283 object_mask_ = address_mask_ | kHeapObjectTagMask;
1284 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1286 ResetAllocationInfo();
1292 void NewSpace::TearDown() {
1293 if (allocated_histogram_) {
1294 DeleteArray(allocated_histogram_);
1295 allocated_histogram_ = NULL;
1297 if (promoted_histogram_) {
1298 DeleteArray(promoted_histogram_);
1299 promoted_histogram_ = NULL;
1303 allocation_info_.set_top(NULL);
1304 allocation_info_.set_limit(NULL);
1306 to_space_.TearDown();
1307 from_space_.TearDown();
1309 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1311 DCHECK(reservation_.IsReserved());
1312 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1319 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1322 void NewSpace::Grow() {
1323 // Double the semispace size but only up to maximum capacity.
1324 DCHECK(TotalCapacity() < MaximumCapacity());
1326 Min(MaximumCapacity(),
1327 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
1328 if (to_space_.GrowTo(new_capacity)) {
1329 // Only grow from space if we managed to grow to-space.
1330 if (!from_space_.GrowTo(new_capacity)) {
1331 // If we managed to grow to-space but couldn't grow from-space,
1332 // attempt to shrink to-space.
1333 if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1334 // We are in an inconsistent state because we could not
1335 // commit/uncommit memory from new space.
1340 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1344 bool NewSpace::GrowOnePage() {
1345 if (TotalCapacity() == MaximumCapacity()) return false;
1346 int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
1347 if (to_space_.GrowTo(new_capacity)) {
1348 // Only grow from space if we managed to grow to-space and the from space
1349 // is actually committed.
1350 if (from_space_.is_committed()) {
1351 if (!from_space_.GrowTo(new_capacity)) {
1352 // If we managed to grow to-space but couldn't grow from-space,
1353 // attempt to shrink to-space.
1354 if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1355 // We are in an inconsistent state because we could not
1356 // commit/uncommit memory from new space.
1362 if (!from_space_.SetTotalCapacity(new_capacity)) {
1363 // Can't really happen, but better safe than sorry.
1367 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1374 void NewSpace::Shrink() {
1375 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1376 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1377 if (rounded_new_capacity < TotalCapacity() &&
1378 to_space_.ShrinkTo(rounded_new_capacity)) {
1379 // Only shrink from-space if we managed to shrink to-space.
1380 from_space_.Reset();
1381 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1382 // If we managed to shrink to-space but couldn't shrink from
1383 // space, attempt to grow to-space again.
1384 if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
1385 // We are in an inconsistent state because we could not
1386 // commit/uncommit memory from new space.
1391 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1395 void NewSpace::UpdateAllocationInfo() {
1396 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1397 allocation_info_.set_top(to_space_.page_low());
1398 allocation_info_.set_limit(to_space_.page_high());
1399 UpdateInlineAllocationLimit(0);
1400 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1404 void NewSpace::ResetAllocationInfo() {
1406 UpdateAllocationInfo();
1408 // Clear all mark-bits in the to-space.
1409 NewSpacePageIterator it(&to_space_);
1410 while (it.has_next()) {
1411 Bitmap::Clear(it.next());
1416 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1417 if (heap()->inline_allocation_disabled()) {
1418 // Lowest limit when linear allocation was disabled.
1419 Address high = to_space_.page_high();
1420 Address new_top = allocation_info_.top() + size_in_bytes;
1421 allocation_info_.set_limit(Min(new_top, high));
1422 } else if (inline_allocation_limit_step() == 0) {
1423 // Normal limit is the end of the current page.
1424 allocation_info_.set_limit(to_space_.page_high());
1426 // Lower limit during incremental marking.
1427 Address high = to_space_.page_high();
1428 Address new_top = allocation_info_.top() + size_in_bytes;
1429 Address new_limit = new_top + inline_allocation_limit_step_;
1430 allocation_info_.set_limit(Min(new_limit, high));
1432 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1436 bool NewSpace::AddFreshPage() {
1437 Address top = allocation_info_.top();
1438 if (NewSpacePage::IsAtStart(top)) {
1439 // The current page is already empty. Don't try to make another.
1441 // We should only get here if someone asks to allocate more
1442 // than what can be stored in a single page.
1443 // TODO(gc): Change the limit on new-space allocation to prevent this
1444 // from happening (all such allocations should go directly to LOSpace).
1447 if (!to_space_.AdvancePage()) {
1448 // Check if we reached the target capacity yet. If not, try to commit a page
1450 if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
1452 if (!to_space_.AdvancePage()) {
1453 // It doesn't make sense that we managed to commit a page, but can't use
1458 // Failed to get a new page in to-space.
1463 // Clear remainder of current page.
1464 Address limit = NewSpacePage::FromLimit(top)->area_end();
1465 if (heap()->gc_state() == Heap::SCAVENGE) {
1466 heap()->promotion_queue()->SetNewLimit(limit);
1469 int remaining_in_page = static_cast<int>(limit - top);
1470 heap()->CreateFillerObjectAt(top, remaining_in_page);
1472 UpdateAllocationInfo();
1478 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
1479 Address old_top = allocation_info_.top();
1480 Address high = to_space_.page_high();
1481 if (allocation_info_.limit() < high) {
1482 // Either the limit has been lowered because linear allocation was disabled
1483 // or because incremental marking wants to get a chance to do a step. Set
1484 // the new limit accordingly.
1485 Address new_top = old_top + size_in_bytes;
1486 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1487 heap()->incremental_marking()->Step(bytes_allocated,
1488 IncrementalMarking::GC_VIA_STACK_GUARD);
1489 UpdateInlineAllocationLimit(size_in_bytes);
1490 top_on_previous_step_ = new_top;
1491 return AllocateRaw(size_in_bytes);
1492 } else if (AddFreshPage()) {
1493 // Switched to new page. Try allocating again.
1494 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1495 heap()->incremental_marking()->Step(bytes_allocated,
1496 IncrementalMarking::GC_VIA_STACK_GUARD);
1497 top_on_previous_step_ = to_space_.page_low();
1498 return AllocateRaw(size_in_bytes);
1500 return AllocationResult::Retry();
1506 // We do not use the SemiSpaceIterator because verification doesn't assume
1507 // that it works (it depends on the invariants we are checking).
1508 void NewSpace::Verify() {
1509 // The allocation pointer should be in the space or at the very end.
1510 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1512 // There should be objects packed in from the low address up to the
1513 // allocation pointer.
1514 Address current = to_space_.first_page()->area_start();
1515 CHECK_EQ(current, to_space_.space_start());
1517 while (current != top()) {
1518 if (!NewSpacePage::IsAtEnd(current)) {
1519 // The allocation pointer should not be in the middle of an object.
1520 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1523 HeapObject* object = HeapObject::FromAddress(current);
1525 // The first word should be a map, and we expect all map pointers to
1527 Map* map = object->map();
1528 CHECK(map->IsMap());
1529 CHECK(heap()->map_space()->Contains(map));
1531 // The object should not be code or a map.
1532 CHECK(!object->IsMap());
1533 CHECK(!object->IsCode());
1535 // The object itself should look OK.
1536 object->ObjectVerify();
1538 // All the interior pointers should be contained in the heap.
1539 VerifyPointersVisitor visitor;
1540 int size = object->Size();
1541 object->IterateBody(map->instance_type(), size, &visitor);
1545 // At end of page, switch to next page.
1546 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1547 // Next page should be valid.
1548 CHECK(!page->is_anchor());
1549 current = page->area_start();
1553 // Check semi-spaces.
1554 CHECK_EQ(from_space_.id(), kFromSpace);
1555 CHECK_EQ(to_space_.id(), kToSpace);
1556 from_space_.Verify();
1561 // -----------------------------------------------------------------------------
1562 // SemiSpace implementation
1564 void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
1565 int maximum_capacity) {
1566 // Creates a space in the young generation. The constructor does not
1567 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1568 // memory of size 'capacity' when set up, and does not grow or shrink
1569 // otherwise. In the mark-compact collector, the memory region of the from
1570 // space is used as the marking stack. It requires contiguous memory
1572 DCHECK(maximum_capacity >= Page::kPageSize);
1573 DCHECK(initial_capacity <= target_capacity);
1574 DCHECK(target_capacity <= maximum_capacity);
1575 initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1576 total_capacity_ = initial_capacity;
1577 target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
1578 maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1579 maximum_committed_ = 0;
1582 address_mask_ = ~(maximum_capacity - 1);
1583 object_mask_ = address_mask_ | kHeapObjectTagMask;
1584 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1589 void SemiSpace::TearDown() {
1591 total_capacity_ = 0;
1595 bool SemiSpace::Commit() {
1596 DCHECK(!is_committed());
1597 int pages = total_capacity_ / Page::kPageSize;
1598 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1599 start_, total_capacity_, executable())) {
1603 NewSpacePage* current = anchor();
1604 for (int i = 0; i < pages; i++) {
1605 NewSpacePage* new_page =
1606 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1607 new_page->InsertAfter(current);
1611 SetCapacity(total_capacity_);
1618 bool SemiSpace::Uncommit() {
1619 DCHECK(is_committed());
1620 Address start = start_ + maximum_total_capacity_ - total_capacity_;
1621 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1625 anchor()->set_next_page(anchor());
1626 anchor()->set_prev_page(anchor());
1633 size_t SemiSpace::CommittedPhysicalMemory() {
1634 if (!is_committed()) return 0;
1636 NewSpacePageIterator it(this);
1637 while (it.has_next()) {
1638 size += it.next()->CommittedPhysicalMemory();
1644 bool SemiSpace::GrowTo(int new_capacity) {
1645 if (!is_committed()) {
1646 if (!Commit()) return false;
1648 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1649 DCHECK(new_capacity <= maximum_total_capacity_);
1650 DCHECK(new_capacity > total_capacity_);
1651 int pages_before = total_capacity_ / Page::kPageSize;
1652 int pages_after = new_capacity / Page::kPageSize;
1654 size_t delta = new_capacity - total_capacity_;
1656 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1657 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1658 start_ + total_capacity_, delta, executable())) {
1661 SetCapacity(new_capacity);
1662 NewSpacePage* last_page = anchor()->prev_page();
1663 DCHECK(last_page != anchor());
1664 for (int i = pages_before; i < pages_after; i++) {
1665 Address page_address = start_ + i * Page::kPageSize;
1666 NewSpacePage* new_page =
1667 NewSpacePage::Initialize(heap(), page_address, this);
1668 new_page->InsertAfter(last_page);
1669 Bitmap::Clear(new_page);
1670 // Duplicate the flags that was set on the old page.
1671 new_page->SetFlags(last_page->GetFlags(),
1672 NewSpacePage::kCopyOnFlipFlagsMask);
1673 last_page = new_page;
1679 bool SemiSpace::ShrinkTo(int new_capacity) {
1680 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1681 DCHECK(new_capacity >= initial_total_capacity_);
1682 DCHECK(new_capacity < total_capacity_);
1683 if (is_committed()) {
1684 size_t delta = total_capacity_ - new_capacity;
1685 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1687 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1688 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1692 int pages_after = new_capacity / Page::kPageSize;
1693 NewSpacePage* new_last_page =
1694 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1695 new_last_page->set_next_page(anchor());
1696 anchor()->set_prev_page(new_last_page);
1697 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1700 SetCapacity(new_capacity);
1706 bool SemiSpace::SetTotalCapacity(int new_capacity) {
1707 CHECK(!is_committed());
1708 if (new_capacity >= initial_total_capacity_ &&
1709 new_capacity <= maximum_total_capacity_) {
1710 total_capacity_ = new_capacity;
1717 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1718 anchor_.set_owner(this);
1719 // Fixup back-pointers to anchor. Address of anchor changes
1721 anchor_.prev_page()->set_next_page(&anchor_);
1722 anchor_.next_page()->set_prev_page(&anchor_);
1724 bool becomes_to_space = (id_ == kFromSpace);
1725 id_ = becomes_to_space ? kToSpace : kFromSpace;
1726 NewSpacePage* page = anchor_.next_page();
1727 while (page != &anchor_) {
1728 page->set_owner(this);
1729 page->SetFlags(flags, mask);
1730 if (becomes_to_space) {
1731 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1732 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1733 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1734 page->ResetLiveBytes();
1736 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1737 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1739 DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1740 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1741 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1742 page = page->next_page();
1747 void SemiSpace::Reset() {
1748 DCHECK(anchor_.next_page() != &anchor_);
1749 current_page_ = anchor_.next_page();
1753 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1754 // We won't be swapping semispaces without data in them.
1755 DCHECK(from->anchor_.next_page() != &from->anchor_);
1756 DCHECK(to->anchor_.next_page() != &to->anchor_);
1759 SemiSpace tmp = *from;
1763 // Fixup back-pointers to the page list anchor now that its address
1765 // Swap to/from-space bits on pages.
1766 // Copy GC flags from old active space (from-space) to new (to-space).
1767 intptr_t flags = from->current_page()->GetFlags();
1768 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1770 from->FlipPages(0, 0);
1774 void SemiSpace::SetCapacity(int new_capacity) {
1775 total_capacity_ = new_capacity;
1776 if (total_capacity_ > maximum_committed_) {
1777 maximum_committed_ = total_capacity_;
1782 void SemiSpace::set_age_mark(Address mark) {
1783 DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1785 // Mark all pages up to the one containing mark.
1786 NewSpacePageIterator it(space_start(), mark);
1787 while (it.has_next()) {
1788 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1794 void SemiSpace::Print() {}
1798 void SemiSpace::Verify() {
1799 bool is_from_space = (id_ == kFromSpace);
1800 NewSpacePage* page = anchor_.next_page();
1801 CHECK(anchor_.semi_space() == this);
1802 while (page != &anchor_) {
1803 CHECK(page->semi_space() == this);
1804 CHECK(page->InNewSpace());
1805 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1806 : MemoryChunk::IN_TO_SPACE));
1807 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1808 : MemoryChunk::IN_FROM_SPACE));
1809 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1810 if (!is_from_space) {
1811 // The pointers-from-here-are-interesting flag isn't updated dynamically
1812 // on from-space pages, so it might be out of sync with the marking state.
1813 if (page->heap()->incremental_marking()->IsMarking()) {
1814 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1817 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1819 // TODO(gc): Check that the live_bytes_count_ field matches the
1820 // black marking on the page (if we make it match in new-space).
1822 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1823 CHECK(page->prev_page()->next_page() == page);
1824 page = page->next_page();
1830 void SemiSpace::AssertValidRange(Address start, Address end) {
1831 // Addresses belong to same semi-space
1832 NewSpacePage* page = NewSpacePage::FromLimit(start);
1833 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1834 SemiSpace* space = page->semi_space();
1835 CHECK_EQ(space, end_page->semi_space());
1836 // Start address is before end address, either on same page,
1837 // or end address is on a later page in the linked list of
1838 // semi-space pages.
1839 if (page == end_page) {
1840 CHECK(start <= end);
1842 while (page != end_page) {
1843 page = page->next_page();
1844 CHECK_NE(page, space->anchor());
1851 // -----------------------------------------------------------------------------
1852 // SemiSpaceIterator implementation.
1853 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1854 Initialize(space->bottom(), space->top(), NULL);
1858 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1859 HeapObjectCallback size_func) {
1860 Initialize(space->bottom(), space->top(), size_func);
1864 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1865 Initialize(start, space->top(), NULL);
1869 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1870 Initialize(from, to, NULL);
1874 void SemiSpaceIterator::Initialize(Address start, Address end,
1875 HeapObjectCallback size_func) {
1876 SemiSpace::AssertValidRange(start, end);
1879 size_func_ = size_func;
1884 // heap_histograms is shared, always clear it before using it.
1885 static void ClearHistograms(Isolate* isolate) {
1886 // We reset the name each time, though it hasn't changed.
1887 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1888 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1889 #undef DEF_TYPE_NAME
1891 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1892 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1893 #undef CLEAR_HISTOGRAM
1895 isolate->js_spill_information()->Clear();
1899 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1900 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1901 code_kind_statistics[i] = 0;
1906 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1907 PrintF("\n Code kind histograms: \n");
1908 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1909 if (code_kind_statistics[i] > 0) {
1910 PrintF(" %-20s: %10d bytes\n",
1911 Code::Kind2String(static_cast<Code::Kind>(i)),
1912 code_kind_statistics[i]);
1919 static int CollectHistogramInfo(HeapObject* obj) {
1920 Isolate* isolate = obj->GetIsolate();
1921 InstanceType type = obj->map()->instance_type();
1922 DCHECK(0 <= type && type <= LAST_TYPE);
1923 DCHECK(isolate->heap_histograms()[type].name() != NULL);
1924 isolate->heap_histograms()[type].increment_number(1);
1925 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1927 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1929 ->IncrementSpillStatistics(isolate->js_spill_information());
1936 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1937 PrintF("\n Object Histogram:\n");
1938 for (int i = 0; i <= LAST_TYPE; i++) {
1939 if (isolate->heap_histograms()[i].number() > 0) {
1940 PrintF(" %-34s%10d (%10d bytes)\n",
1941 isolate->heap_histograms()[i].name(),
1942 isolate->heap_histograms()[i].number(),
1943 isolate->heap_histograms()[i].bytes());
1948 // Summarize string types.
1949 int string_number = 0;
1950 int string_bytes = 0;
1951 #define INCREMENT(type, size, name, camel_name) \
1952 string_number += isolate->heap_histograms()[type].number(); \
1953 string_bytes += isolate->heap_histograms()[type].bytes();
1954 STRING_TYPE_LIST(INCREMENT)
1956 if (string_number > 0) {
1957 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1961 if (FLAG_collect_heap_spill_statistics && print_spill) {
1962 isolate->js_spill_information()->Print();
1968 // Support for statistics gathering for --heap-stats and --log-gc.
1969 void NewSpace::ClearHistograms() {
1970 for (int i = 0; i <= LAST_TYPE; i++) {
1971 allocated_histogram_[i].clear();
1972 promoted_histogram_[i].clear();
1977 // Because the copying collector does not touch garbage objects, we iterate
1978 // the new space before a collection to get a histogram of allocated objects.
1979 // This only happens when --log-gc flag is set.
1980 void NewSpace::CollectStatistics() {
1982 SemiSpaceIterator it(this);
1983 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1984 RecordAllocation(obj);
1988 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1989 const char* description) {
1990 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1991 // Lump all the string types together.
1992 int string_number = 0;
1993 int string_bytes = 0;
1994 #define INCREMENT(type, size, name, camel_name) \
1995 string_number += info[type].number(); \
1996 string_bytes += info[type].bytes();
1997 STRING_TYPE_LIST(INCREMENT)
1999 if (string_number > 0) {
2001 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2004 // Then do the other types.
2005 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2006 if (info[i].number() > 0) {
2007 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2011 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2015 void NewSpace::ReportStatistics() {
2017 if (FLAG_heap_stats) {
2018 float pct = static_cast<float>(Available()) / TotalCapacity();
2019 PrintF(" capacity: %" V8_PTR_PREFIX
2021 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2022 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2023 PrintF("\n Object Histogram:\n");
2024 for (int i = 0; i <= LAST_TYPE; i++) {
2025 if (allocated_histogram_[i].number() > 0) {
2026 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2027 allocated_histogram_[i].number(),
2028 allocated_histogram_[i].bytes());
2036 Isolate* isolate = heap()->isolate();
2037 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2038 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2043 void NewSpace::RecordAllocation(HeapObject* obj) {
2044 InstanceType type = obj->map()->instance_type();
2045 DCHECK(0 <= type && type <= LAST_TYPE);
2046 allocated_histogram_[type].increment_number(1);
2047 allocated_histogram_[type].increment_bytes(obj->Size());
2051 void NewSpace::RecordPromotion(HeapObject* obj) {
2052 InstanceType type = obj->map()->instance_type();
2053 DCHECK(0 <= type && type <= LAST_TYPE);
2054 promoted_histogram_[type].increment_number(1);
2055 promoted_histogram_[type].increment_bytes(obj->Size());
2059 size_t NewSpace::CommittedPhysicalMemory() {
2060 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2061 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2062 size_t size = to_space_.CommittedPhysicalMemory();
2063 if (from_space_.is_committed()) {
2064 size += from_space_.CommittedPhysicalMemory();
2070 // -----------------------------------------------------------------------------
2071 // Free lists for old object spaces implementation
2073 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2074 intptr_t free_bytes = 0;
2075 if (category->top() != NULL) {
2076 // This is safe (not going to deadlock) since Concatenate operations
2077 // are never performed on the same free lists at the same time in
2079 base::LockGuard<base::Mutex> target_lock_guard(mutex());
2080 base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2081 DCHECK(category->end_ != NULL);
2082 free_bytes = category->available();
2084 end_ = category->end();
2086 category->end()->set_next(top());
2088 set_top(category->top());
2089 base::NoBarrier_Store(&top_, category->top_);
2090 available_ += category->available();
2097 void FreeListCategory::Reset() {
2104 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2106 FreeSpace* t = top();
2108 while (*n != NULL) {
2109 if (Page::FromAddress((*n)->address()) == p) {
2110 FreeSpace* free_space = *n;
2111 sum += free_space->Size();
2114 n = (*n)->next_address();
2118 if (top() == NULL) {
2126 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2127 FreeSpace* node = top();
2128 while (node != NULL) {
2129 if (Page::FromAddress(node->address()) == p) return true;
2130 node = node->next();
2136 FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2137 FreeSpace* node = top();
2139 if (node == NULL) return NULL;
2141 while (node != NULL &&
2142 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2143 available_ -= node->Size();
2144 node = node->next();
2148 set_top(node->next());
2149 *node_size = node->Size();
2150 available_ -= *node_size;
2155 if (top() == NULL) {
2163 FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
2165 FreeSpace* node = PickNodeFromList(node_size);
2166 if (node != NULL && *node_size < size_in_bytes) {
2167 Free(node, *node_size);
2175 void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
2176 DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
2177 free_space->set_next(top());
2178 set_top(free_space);
2182 available_ += size_in_bytes;
2186 void FreeListCategory::RepairFreeList(Heap* heap) {
2187 FreeSpace* n = top();
2189 Map** map_location = reinterpret_cast<Map**>(n->address());
2190 if (*map_location == NULL) {
2191 *map_location = heap->free_space_map();
2193 DCHECK(*map_location == heap->free_space_map());
2200 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2205 intptr_t FreeList::Concatenate(FreeList* free_list) {
2206 intptr_t free_bytes = 0;
2207 free_bytes += small_list_.Concatenate(free_list->small_list());
2208 free_bytes += medium_list_.Concatenate(free_list->medium_list());
2209 free_bytes += large_list_.Concatenate(free_list->large_list());
2210 free_bytes += huge_list_.Concatenate(free_list->huge_list());
2215 void FreeList::Reset() {
2216 small_list_.Reset();
2217 medium_list_.Reset();
2218 large_list_.Reset();
2223 int FreeList::Free(Address start, int size_in_bytes) {
2224 if (size_in_bytes == 0) return 0;
2226 heap_->CreateFillerObjectAt(start, size_in_bytes);
2228 Page* page = Page::FromAddress(start);
2230 // Early return to drop too-small blocks on the floor.
2231 if (size_in_bytes < kSmallListMin) {
2232 page->add_non_available_small_blocks(size_in_bytes);
2233 return size_in_bytes;
2236 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2237 // Insert other blocks at the head of a free list of the appropriate
2239 if (size_in_bytes <= kSmallListMax) {
2240 small_list_.Free(free_space, size_in_bytes);
2241 page->add_available_in_small_free_list(size_in_bytes);
2242 } else if (size_in_bytes <= kMediumListMax) {
2243 medium_list_.Free(free_space, size_in_bytes);
2244 page->add_available_in_medium_free_list(size_in_bytes);
2245 } else if (size_in_bytes <= kLargeListMax) {
2246 large_list_.Free(free_space, size_in_bytes);
2247 page->add_available_in_large_free_list(size_in_bytes);
2249 huge_list_.Free(free_space, size_in_bytes);
2250 page->add_available_in_huge_free_list(size_in_bytes);
2253 DCHECK(IsVeryLong() || available() == SumFreeLists());
2258 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2259 FreeSpace* node = NULL;
2262 if (size_in_bytes <= kSmallAllocationMax) {
2263 node = small_list_.PickNodeFromList(node_size);
2265 DCHECK(size_in_bytes <= *node_size);
2266 page = Page::FromAddress(node->address());
2267 page->add_available_in_small_free_list(-(*node_size));
2268 DCHECK(IsVeryLong() || available() == SumFreeLists());
2273 if (size_in_bytes <= kMediumAllocationMax) {
2274 node = medium_list_.PickNodeFromList(node_size);
2276 DCHECK(size_in_bytes <= *node_size);
2277 page = Page::FromAddress(node->address());
2278 page->add_available_in_medium_free_list(-(*node_size));
2279 DCHECK(IsVeryLong() || available() == SumFreeLists());
2284 if (size_in_bytes <= kLargeAllocationMax) {
2285 node = large_list_.PickNodeFromList(node_size);
2287 DCHECK(size_in_bytes <= *node_size);
2288 page = Page::FromAddress(node->address());
2289 page->add_available_in_large_free_list(-(*node_size));
2290 DCHECK(IsVeryLong() || available() == SumFreeLists());
2295 int huge_list_available = huge_list_.available();
2296 FreeSpace* top_node = huge_list_.top();
2297 for (FreeSpace** cur = &top_node; *cur != NULL;
2298 cur = (*cur)->next_address()) {
2299 FreeSpace* cur_node = *cur;
2300 while (cur_node != NULL &&
2301 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2302 int size = cur_node->Size();
2303 huge_list_available -= size;
2304 page = Page::FromAddress(cur_node->address());
2305 page->add_available_in_huge_free_list(-size);
2306 cur_node = cur_node->next();
2310 if (cur_node == NULL) {
2311 huge_list_.set_end(NULL);
2315 int size = cur_node->Size();
2316 if (size >= size_in_bytes) {
2317 // Large enough node found. Unlink it from the list.
2319 *cur = node->next();
2321 huge_list_available -= size;
2322 page = Page::FromAddress(node->address());
2323 page->add_available_in_huge_free_list(-size);
2328 huge_list_.set_top(top_node);
2329 if (huge_list_.top() == NULL) {
2330 huge_list_.set_end(NULL);
2332 huge_list_.set_available(huge_list_available);
2335 DCHECK(IsVeryLong() || available() == SumFreeLists());
2339 if (size_in_bytes <= kSmallListMax) {
2340 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2342 DCHECK(size_in_bytes <= *node_size);
2343 page = Page::FromAddress(node->address());
2344 page->add_available_in_small_free_list(-(*node_size));
2346 } else if (size_in_bytes <= kMediumListMax) {
2347 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2349 DCHECK(size_in_bytes <= *node_size);
2350 page = Page::FromAddress(node->address());
2351 page->add_available_in_medium_free_list(-(*node_size));
2353 } else if (size_in_bytes <= kLargeListMax) {
2354 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2356 DCHECK(size_in_bytes <= *node_size);
2357 page = Page::FromAddress(node->address());
2358 page->add_available_in_large_free_list(-(*node_size));
2362 DCHECK(IsVeryLong() || available() == SumFreeLists());
2367 // Allocation on the old space free list. If it succeeds then a new linear
2368 // allocation space has been set up with the top and limit of the space. If
2369 // the allocation fails then NULL is returned, and the caller can perform a GC
2370 // or allocate a new page before retrying.
2371 HeapObject* FreeList::Allocate(int size_in_bytes) {
2372 DCHECK(0 < size_in_bytes);
2373 DCHECK(size_in_bytes <= kMaxBlockSize);
2374 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2375 // Don't free list allocate if there is linear space available.
2376 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2378 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2379 // Mark the old linear allocation area with a free space map so it can be
2380 // skipped when scanning the heap. This also puts it back in the free list
2381 // if it is big enough.
2382 owner_->Free(owner_->top(), old_linear_size);
2384 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2387 int new_node_size = 0;
2388 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2389 if (new_node == NULL) {
2390 owner_->SetTopAndLimit(NULL, NULL);
2394 int bytes_left = new_node_size - size_in_bytes;
2395 DCHECK(bytes_left >= 0);
2398 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2399 reinterpret_cast<Object**>(new_node->address())[i] =
2400 Smi::FromInt(kCodeZapValue);
2404 // The old-space-step might have finished sweeping and restarted marking.
2405 // Verify that it did not turn the page of the new node into an evacuation
2407 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2409 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2411 // Memory in the linear allocation area is counted as allocated. We may free
2412 // a little of this again immediately - see below.
2413 owner_->Allocate(new_node_size);
2415 if (owner_->heap()->inline_allocation_disabled()) {
2416 // Keep the linear allocation area empty if requested to do so, just
2417 // return area back to the free list instead.
2418 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2419 DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2420 } else if (bytes_left > kThreshold &&
2421 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2422 FLAG_incremental_marking_steps) {
2423 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2424 // We don't want to give too large linear areas to the allocator while
2425 // incremental marking is going on, because we won't check again whether
2426 // we want to do another increment until the linear area is used up.
2427 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2428 new_node_size - size_in_bytes - linear_size);
2429 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2430 new_node->address() + size_in_bytes + linear_size);
2431 } else if (bytes_left > 0) {
2432 // Normally we give the rest of the node to the allocator as its new
2433 // linear allocation area.
2434 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2435 new_node->address() + new_node_size);
2437 // TODO(gc) Try not freeing linear allocation region when bytes_left
2439 owner_->SetTopAndLimit(NULL, NULL);
2446 intptr_t FreeList::EvictFreeListItems(Page* p) {
2447 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2448 p->set_available_in_huge_free_list(0);
2450 if (sum < p->area_size()) {
2451 sum += small_list_.EvictFreeListItemsInList(p) +
2452 medium_list_.EvictFreeListItemsInList(p) +
2453 large_list_.EvictFreeListItemsInList(p);
2454 p->set_available_in_small_free_list(0);
2455 p->set_available_in_medium_free_list(0);
2456 p->set_available_in_large_free_list(0);
2463 bool FreeList::ContainsPageFreeListItems(Page* p) {
2464 return huge_list_.EvictFreeListItemsInList(p) ||
2465 small_list_.EvictFreeListItemsInList(p) ||
2466 medium_list_.EvictFreeListItemsInList(p) ||
2467 large_list_.EvictFreeListItemsInList(p);
2471 void FreeList::RepairLists(Heap* heap) {
2472 small_list_.RepairFreeList(heap);
2473 medium_list_.RepairFreeList(heap);
2474 large_list_.RepairFreeList(heap);
2475 huge_list_.RepairFreeList(heap);
2480 intptr_t FreeListCategory::SumFreeList() {
2482 FreeSpace* cur = top();
2483 while (cur != NULL) {
2484 DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2485 sum += cur->nobarrier_size();
2492 static const int kVeryLongFreeList = 500;
2495 int FreeListCategory::FreeListLength() {
2497 FreeSpace* cur = top();
2498 while (cur != NULL) {
2501 if (length == kVeryLongFreeList) return length;
2507 bool FreeList::IsVeryLong() {
2508 if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2509 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2510 if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2511 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2516 // This can take a very long time because it is linear in the number of entries
2517 // on the free list, so it should not be called if FreeListLength returns
2518 // kVeryLongFreeList.
2519 intptr_t FreeList::SumFreeLists() {
2520 intptr_t sum = small_list_.SumFreeList();
2521 sum += medium_list_.SumFreeList();
2522 sum += large_list_.SumFreeList();
2523 sum += huge_list_.SumFreeList();
2529 // -----------------------------------------------------------------------------
2530 // OldSpace implementation
2532 void PagedSpace::PrepareForMarkCompact() {
2533 // We don't have a linear allocation area while sweeping. It will be restored
2534 // on the first allocation after the sweep.
2535 EmptyAllocationInfo();
2537 // This counter will be increased for pages which will be swept by the
2539 unswept_free_bytes_ = 0;
2541 // Clear the free list before a full GC---it will be rebuilt afterward.
2546 intptr_t PagedSpace::SizeOfObjects() {
2547 DCHECK(!FLAG_concurrent_sweeping ||
2548 heap()->mark_compact_collector()->sweeping_in_progress() ||
2549 (unswept_free_bytes_ == 0));
2550 return Size() - unswept_free_bytes_ - (limit() - top());
2554 // After we have booted, we have created a map which represents free space
2555 // on the heap. If there was already a free list then the elements on it
2556 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2558 void PagedSpace::RepairFreeListsAfterDeserialization() {
2559 free_list_.RepairLists(heap());
2560 // Each page may have a small free space that is not tracked by a free list.
2561 // Update the maps for those free space objects.
2562 PageIterator iterator(this);
2563 while (iterator.has_next()) {
2564 Page* page = iterator.next();
2565 int size = static_cast<int>(page->non_available_small_blocks());
2566 if (size == 0) continue;
2567 Address address = page->OffsetToAddress(Page::kPageSize - size);
2568 heap()->CreateFillerObjectAt(address, size);
2573 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2574 if (allocation_info_.top() >= allocation_info_.limit()) return;
2576 if (Page::FromAllocationTop(allocation_info_.top())
2577 ->IsEvacuationCandidate()) {
2578 // Create filler object to keep page iterable if it was iterable.
2580 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2581 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2583 allocation_info_.set_top(NULL);
2584 allocation_info_.set_limit(NULL);
2589 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2590 int size_in_bytes) {
2591 MarkCompactCollector* collector = heap()->mark_compact_collector();
2592 if (collector->sweeping_in_progress()) {
2593 // Wait for the sweeper threads here and complete the sweeping phase.
2594 collector->EnsureSweepingCompleted();
2596 // After waiting for the sweeper threads, there may be new free-list
2598 return free_list_.Allocate(size_in_bytes);
2604 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2605 // Allocation in this space has failed.
2607 MarkCompactCollector* collector = heap()->mark_compact_collector();
2608 // Sweeping is still in progress.
2609 if (collector->sweeping_in_progress()) {
2610 // First try to refill the free-list, concurrent sweeper threads
2611 // may have freed some objects in the meantime.
2612 collector->RefillFreeList(this);
2614 // Retry the free list allocation.
2615 HeapObject* object = free_list_.Allocate(size_in_bytes);
2616 if (object != NULL) return object;
2618 // If sweeping is still in progress try to sweep pages on the main thread.
2619 int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2620 collector->RefillFreeList(this);
2621 if (free_chunk >= size_in_bytes) {
2622 HeapObject* object = free_list_.Allocate(size_in_bytes);
2623 // We should be able to allocate an object here since we just freed that
2625 DCHECK(object != NULL);
2626 if (object != NULL) return object;
2630 // Free list allocation failed and there is no next page. Fail if we have
2631 // hit the old generation size limit that should cause a garbage
2633 if (!heap()->always_allocate() &&
2634 heap()->OldGenerationAllocationLimitReached()) {
2635 // If sweeper threads are active, wait for them at that point and steal
2636 // elements form their free-lists.
2637 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2641 // Try to expand the space and allocate in the new next page.
2643 DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2644 return free_list_.Allocate(size_in_bytes);
2647 // If sweeper threads are active, wait for them at that point and steal
2648 // elements form their free-lists. Allocation may still fail their which
2649 // would indicate that there is not enough memory for the given allocation.
2650 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2655 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2656 CommentStatistic* comments_statistics =
2657 isolate->paged_space_comments_statistics();
2658 ReportCodeKindStatistics(isolate->code_kind_statistics());
2660 "Code comment statistics (\" [ comment-txt : size/ "
2661 "count (average)\"):\n");
2662 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2663 const CommentStatistic& cs = comments_statistics[i];
2665 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2666 cs.size / cs.count);
2673 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2674 CommentStatistic* comments_statistics =
2675 isolate->paged_space_comments_statistics();
2676 ClearCodeKindStatistics(isolate->code_kind_statistics());
2677 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2678 comments_statistics[i].Clear();
2680 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2681 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2682 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2686 // Adds comment to 'comment_statistics' table. Performance OK as long as
2687 // 'kMaxComments' is small
2688 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2689 CommentStatistic* comments_statistics =
2690 isolate->paged_space_comments_statistics();
2691 // Do not count empty comments
2692 if (delta <= 0) return;
2693 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2694 // Search for a free or matching entry in 'comments_statistics': 'cs'
2695 // points to result.
2696 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2697 if (comments_statistics[i].comment == NULL) {
2698 cs = &comments_statistics[i];
2699 cs->comment = comment;
2701 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2702 cs = &comments_statistics[i];
2706 // Update entry for 'comment'
2712 // Call for each nested comment start (start marked with '[ xxx', end marked
2713 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2714 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2715 DCHECK(!it->done());
2716 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2717 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2718 if (tmp[0] != '[') {
2719 // Not a nested comment; skip
2723 // Search for end of nested comment or a new nested comment
2724 const char* const comment_txt =
2725 reinterpret_cast<const char*>(it->rinfo()->data());
2726 const byte* prev_pc = it->rinfo()->pc();
2730 // All nested comments must be terminated properly, and therefore exit
2732 DCHECK(!it->done());
2733 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2734 const char* const txt =
2735 reinterpret_cast<const char*>(it->rinfo()->data());
2736 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2737 if (txt[0] == ']') break; // End of nested comment
2739 CollectCommentStatistics(isolate, it);
2740 // Skip code that was covered with previous comment
2741 prev_pc = it->rinfo()->pc();
2745 EnterComment(isolate, comment_txt, flat_delta);
2749 // Collects code size statistics:
2751 // - by code comment
2752 void PagedSpace::CollectCodeStatistics() {
2753 Isolate* isolate = heap()->isolate();
2754 HeapObjectIterator obj_it(this);
2755 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2756 if (obj->IsCode()) {
2757 Code* code = Code::cast(obj);
2758 isolate->code_kind_statistics()[code->kind()] += code->Size();
2759 RelocIterator it(code);
2761 const byte* prev_pc = code->instruction_start();
2762 while (!it.done()) {
2763 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2764 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2765 CollectCommentStatistics(isolate, &it);
2766 prev_pc = it.rinfo()->pc();
2771 DCHECK(code->instruction_start() <= prev_pc &&
2772 prev_pc <= code->instruction_end());
2773 delta += static_cast<int>(code->instruction_end() - prev_pc);
2774 EnterComment(isolate, "NoComment", delta);
2780 void PagedSpace::ReportStatistics() {
2781 int pct = static_cast<int>(Available() * 100 / Capacity());
2782 PrintF(" capacity: %" V8_PTR_PREFIX
2784 ", waste: %" V8_PTR_PREFIX
2786 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2787 Capacity(), Waste(), Available(), pct);
2789 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2790 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2792 ClearHistograms(heap()->isolate());
2793 HeapObjectIterator obj_it(this);
2794 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2795 CollectHistogramInfo(obj);
2796 ReportHistogram(heap()->isolate(), true);
2801 // -----------------------------------------------------------------------------
2802 // MapSpace implementation
2803 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2804 // there is at least one non-inlined virtual function. I would prefer to hide
2805 // the VerifyObject definition behind VERIFY_HEAP.
2807 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2810 // -----------------------------------------------------------------------------
2811 // CellSpace implementation
2812 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2813 // there is at least one non-inlined virtual function. I would prefer to hide
2814 // the VerifyObject definition behind VERIFY_HEAP.
2816 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
2819 // -----------------------------------------------------------------------------
2820 // LargeObjectIterator
2822 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2823 current_ = space->first_page_;
2828 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2829 HeapObjectCallback size_func) {
2830 current_ = space->first_page_;
2831 size_func_ = size_func;
2835 HeapObject* LargeObjectIterator::Next() {
2836 if (current_ == NULL) return NULL;
2838 HeapObject* object = current_->GetObject();
2839 current_ = current_->next_page();
2844 // -----------------------------------------------------------------------------
2846 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2849 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2851 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2852 max_capacity_(max_capacity),
2857 chunk_map_(ComparePointers, 1024) {}
2860 bool LargeObjectSpace::SetUp() {
2863 maximum_committed_ = 0;
2871 void LargeObjectSpace::TearDown() {
2872 while (first_page_ != NULL) {
2873 LargePage* page = first_page_;
2874 first_page_ = first_page_->next_page();
2875 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2877 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2878 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2879 space, kAllocationActionFree, page->size());
2880 heap()->isolate()->memory_allocator()->Free(page);
2886 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2887 Executability executable) {
2888 // Check if we want to force a GC before growing the old space further.
2889 // If so, fail the allocation.
2890 if (!heap()->always_allocate() &&
2891 heap()->OldGenerationAllocationLimitReached()) {
2892 return AllocationResult::Retry(identity());
2895 if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
2897 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2898 object_size, this, executable);
2899 if (page == NULL) return AllocationResult::Retry(identity());
2900 DCHECK(page->area_size() >= object_size);
2902 size_ += static_cast<int>(page->size());
2903 objects_size_ += object_size;
2905 page->set_next_page(first_page_);
2908 if (size_ > maximum_committed_) {
2909 maximum_committed_ = size_;
2912 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2913 // this large page in the chunk map.
2914 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2915 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2916 for (uintptr_t key = base; key <= limit; key++) {
2917 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2918 static_cast<uint32_t>(key), true);
2919 DCHECK(entry != NULL);
2920 entry->value = page;
2923 HeapObject* object = page->GetObject();
2925 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2927 if (Heap::ShouldZapGarbage()) {
2928 // Make the object consistent so the heap can be verified in OldSpaceStep.
2929 // We only need to do this in debug builds or if verify_heap is on.
2930 reinterpret_cast<Object**>(object->address())[0] =
2931 heap()->fixed_array_map();
2932 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2935 heap()->incremental_marking()->OldSpaceStep(object_size);
2940 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2941 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2943 LargePage* current = first_page_;
2944 while (current != NULL) {
2945 size += current->CommittedPhysicalMemory();
2946 current = current->next_page();
2953 Object* LargeObjectSpace::FindObject(Address a) {
2954 LargePage* page = FindPage(a);
2956 return page->GetObject();
2958 return Smi::FromInt(0); // Signaling not found.
2962 LargePage* LargeObjectSpace::FindPage(Address a) {
2963 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2964 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2965 static_cast<uint32_t>(key), false);
2967 DCHECK(e->value != NULL);
2968 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2969 DCHECK(page->is_valid());
2970 if (page->Contains(a)) {
2978 void LargeObjectSpace::FreeUnmarkedObjects() {
2979 LargePage* previous = NULL;
2980 LargePage* current = first_page_;
2981 while (current != NULL) {
2982 HeapObject* object = current->GetObject();
2983 // Can this large page contain pointers to non-trivial objects. No other
2984 // pointer object is this big.
2985 bool is_pointer_object = object->IsFixedArray();
2986 MarkBit mark_bit = Marking::MarkBitFrom(object);
2987 if (mark_bit.Get()) {
2989 Page::FromAddress(object->address())->ResetProgressBar();
2990 Page::FromAddress(object->address())->ResetLiveBytes();
2992 current = current->next_page();
2994 LargePage* page = current;
2995 // Cut the chunk out from the chunk list.
2996 current = current->next_page();
2997 if (previous == NULL) {
2998 first_page_ = current;
3000 previous->set_next_page(current);
3004 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
3006 size_ -= static_cast<int>(page->size());
3007 objects_size_ -= object->Size();
3010 // Remove entries belonging to this page.
3011 // Use variable alignment to help pass length check (<= 80 characters)
3012 // of single line in tools/presubmit.py.
3013 const intptr_t alignment = MemoryChunk::kAlignment;
3014 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3015 uintptr_t limit = base + (page->size() - 1) / alignment;
3016 for (uintptr_t key = base; key <= limit; key++) {
3017 chunk_map_.Remove(reinterpret_cast<void*>(key),
3018 static_cast<uint32_t>(key));
3021 if (is_pointer_object) {
3022 heap()->QueueMemoryChunkForFree(page);
3024 heap()->isolate()->memory_allocator()->Free(page);
3028 heap()->FreeQueuedChunks();
3032 bool LargeObjectSpace::Contains(HeapObject* object) {
3033 Address address = object->address();
3034 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3036 bool owned = (chunk->owner() == this);
3038 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3045 // We do not assume that the large object iterator works, because it depends
3046 // on the invariants we are checking during verification.
3047 void LargeObjectSpace::Verify() {
3048 for (LargePage* chunk = first_page_; chunk != NULL;
3049 chunk = chunk->next_page()) {
3050 // Each chunk contains an object that starts at the large object page's
3051 // object area start.
3052 HeapObject* object = chunk->GetObject();
3053 Page* page = Page::FromAddress(object->address());
3054 CHECK(object->address() == page->area_start());
3056 // The first word should be a map, and we expect all map pointers to be
3058 Map* map = object->map();
3059 CHECK(map->IsMap());
3060 CHECK(heap()->map_space()->Contains(map));
3062 // We have only code, sequential strings, external strings
3063 // (sequential strings that have been morphed into external
3064 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3065 // large object space.
3066 CHECK(object->IsCode() || object->IsSeqString() ||
3067 object->IsExternalString() || object->IsFixedArray() ||
3068 object->IsFixedDoubleArray() || object->IsByteArray() ||
3069 object->IsConstantPoolArray());
3071 // The object itself should look OK.
3072 object->ObjectVerify();
3074 // Byte arrays and strings don't have interior pointers.
3075 if (object->IsCode()) {
3076 VerifyPointersVisitor code_visitor;
3077 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3078 } else if (object->IsFixedArray()) {
3079 FixedArray* array = FixedArray::cast(object);
3080 for (int j = 0; j < array->length(); j++) {
3081 Object* element = array->get(j);
3082 if (element->IsHeapObject()) {
3083 HeapObject* element_object = HeapObject::cast(element);
3084 CHECK(heap()->Contains(element_object));
3085 CHECK(element_object->map()->IsMap());
3095 void LargeObjectSpace::Print() {
3096 OFStream os(stdout);
3097 LargeObjectIterator it(this);
3098 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3104 void LargeObjectSpace::ReportStatistics() {
3105 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3106 int num_objects = 0;
3107 ClearHistograms(heap()->isolate());
3108 LargeObjectIterator it(this);
3109 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3111 CollectHistogramInfo(obj);
3115 " number of objects %d, "
3116 "size of objects %" V8_PTR_PREFIX "d\n",
3117 num_objects, objects_size_);
3118 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3122 void LargeObjectSpace::CollectCodeStatistics() {
3123 Isolate* isolate = heap()->isolate();
3124 LargeObjectIterator obj_it(this);
3125 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3126 if (obj->IsCode()) {
3127 Code* code = Code::cast(obj);
3128 isolate->code_kind_statistics()[code->kind()] += code->Size();
3134 void Page::Print() {
3135 // Make a best-effort to print the objects in the page.
3136 PrintF("Page@%p in %s\n", this->address(),
3137 AllocationSpaceName(this->owner()->identity()));
3138 printf(" --------------------------------------\n");
3139 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3140 unsigned mark_size = 0;
3141 for (HeapObject* object = objects.Next(); object != NULL;
3142 object = objects.Next()) {
3143 bool is_marked = Marking::MarkBitFrom(object).Get();
3144 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3146 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3148 object->ShortPrint();
3151 printf(" --------------------------------------\n");
3152 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3157 } // namespace v8::internal