1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/platform/platform.h"
8 #include "src/full-codegen.h"
9 #include "src/heap/mark-compact.h"
10 #include "src/macro-assembler.h"
17 // ----------------------------------------------------------------------------
20 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
21 // You can't actually iterate over the anchor page. It is not a real page,
22 // just an anchor for the double linked page list. Initialize as if we have
23 // reached the end of the anchor page, then the first iteration will move on
25 Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
29 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
30 HeapObjectCallback size_func) {
31 // You can't actually iterate over the anchor page. It is not a real page,
32 // just an anchor for the double linked page list. Initialize the current
33 // address and end as NULL, then the first iteration will move on
35 Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
39 HeapObjectIterator::HeapObjectIterator(Page* page,
40 HeapObjectCallback size_func) {
41 Space* owner = page->owner();
42 DCHECK(owner == page->heap()->old_pointer_space() ||
43 owner == page->heap()->old_data_space() ||
44 owner == page->heap()->map_space() ||
45 owner == page->heap()->cell_space() ||
46 owner == page->heap()->property_cell_space() ||
47 owner == page->heap()->code_space());
48 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
49 page->area_end(), kOnePageOnly, size_func);
50 DCHECK(page->WasSweptPrecisely() ||
51 (static_cast<PagedSpace*>(owner)->swept_precisely() &&
52 page->SweepingCompleted()));
56 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
57 HeapObjectIterator::PageMode mode,
58 HeapObjectCallback size_f) {
59 // Check that we actually can iterate this space.
60 DCHECK(space->swept_precisely());
70 // We have hit the end of the page and should advance to the next block of
71 // objects. This happens at the end of the page.
72 bool HeapObjectIterator::AdvanceToNextPage() {
73 DCHECK(cur_addr_ == cur_end_);
74 if (page_mode_ == kOnePageOnly) return false;
76 if (cur_addr_ == NULL) {
77 cur_page = space_->anchor();
79 cur_page = Page::FromAddress(cur_addr_ - 1);
80 DCHECK(cur_addr_ == cur_page->area_end());
82 cur_page = cur_page->next_page();
83 if (cur_page == space_->anchor()) return false;
84 cur_addr_ = cur_page->area_start();
85 cur_end_ = cur_page->area_end();
86 DCHECK(cur_page->WasSweptPrecisely() ||
87 (static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() &&
88 cur_page->SweepingCompleted()));
93 // -----------------------------------------------------------------------------
97 CodeRange::CodeRange(Isolate* isolate)
102 current_allocation_block_index_(0) {}
105 bool CodeRange::SetUp(size_t requested) {
106 DCHECK(code_range_ == NULL);
108 if (requested == 0) {
109 // When a target requires the code range feature, we put all code objects
110 // in a kMaximalCodeRangeSize range of virtual address space, so that
111 // they can call each other with near calls.
112 if (kRequiresCodeRange) {
113 requested = kMaximalCodeRangeSize;
119 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
120 code_range_ = new base::VirtualMemory(requested);
121 CHECK(code_range_ != NULL);
122 if (!code_range_->IsReserved()) {
128 // We are sure that we have mapped a block of requested addresses.
129 DCHECK(code_range_->size() == requested);
130 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
131 Address base = reinterpret_cast<Address>(code_range_->address());
132 Address aligned_base =
133 RoundUp(reinterpret_cast<Address>(code_range_->address()),
134 MemoryChunk::kAlignment);
135 size_t size = code_range_->size() - (aligned_base - base);
136 allocation_list_.Add(FreeBlock(aligned_base, size));
137 current_allocation_block_index_ = 0;
142 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
143 const FreeBlock* right) {
144 // The entire point of CodeRange is that the difference between two
145 // addresses in the range can be represented as a signed 32-bit int,
146 // so the cast is semantically correct.
147 return static_cast<int>(left->start - right->start);
151 bool CodeRange::GetNextAllocationBlock(size_t requested) {
152 for (current_allocation_block_index_++;
153 current_allocation_block_index_ < allocation_list_.length();
154 current_allocation_block_index_++) {
155 if (requested <= allocation_list_[current_allocation_block_index_].size) {
156 return true; // Found a large enough allocation block.
160 // Sort and merge the free blocks on the free list and the allocation list.
161 free_list_.AddAll(allocation_list_);
162 allocation_list_.Clear();
163 free_list_.Sort(&CompareFreeBlockAddress);
164 for (int i = 0; i < free_list_.length();) {
165 FreeBlock merged = free_list_[i];
167 // Add adjacent free blocks to the current merged block.
168 while (i < free_list_.length() &&
169 free_list_[i].start == merged.start + merged.size) {
170 merged.size += free_list_[i].size;
173 if (merged.size > 0) {
174 allocation_list_.Add(merged);
179 for (current_allocation_block_index_ = 0;
180 current_allocation_block_index_ < allocation_list_.length();
181 current_allocation_block_index_++) {
182 if (requested <= allocation_list_[current_allocation_block_index_].size) {
183 return true; // Found a large enough allocation block.
186 current_allocation_block_index_ = 0;
187 // Code range is full or too fragmented.
192 Address CodeRange::AllocateRawMemory(const size_t requested_size,
193 const size_t commit_size,
195 DCHECK(commit_size <= requested_size);
196 DCHECK(allocation_list_.length() == 0 ||
197 current_allocation_block_index_ < allocation_list_.length());
198 if (allocation_list_.length() == 0 ||
199 requested_size > allocation_list_[current_allocation_block_index_].size) {
200 // Find an allocation block large enough.
201 if (!GetNextAllocationBlock(requested_size)) return NULL;
203 // Commit the requested memory at the start of the current allocation block.
204 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
205 FreeBlock current = allocation_list_[current_allocation_block_index_];
206 if (aligned_requested >= (current.size - Page::kPageSize)) {
207 // Don't leave a small free block, useless for a large object or chunk.
208 *allocated = current.size;
210 *allocated = aligned_requested;
212 DCHECK(*allocated <= current.size);
213 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
214 if (!isolate_->memory_allocator()->CommitExecutableMemory(
215 code_range_, current.start, commit_size, *allocated)) {
219 allocation_list_[current_allocation_block_index_].start += *allocated;
220 allocation_list_[current_allocation_block_index_].size -= *allocated;
221 if (*allocated == current.size) {
222 // This block is used up, get the next one.
223 GetNextAllocationBlock(0);
225 return current.start;
229 bool CodeRange::CommitRawMemory(Address start, size_t length) {
230 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
234 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
235 return code_range_->Uncommit(start, length);
239 void CodeRange::FreeRawMemory(Address address, size_t length) {
240 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
241 free_list_.Add(FreeBlock(address, length));
242 code_range_->Uncommit(address, length);
246 void CodeRange::TearDown() {
247 delete code_range_; // Frees all memory in the virtual memory range.
250 allocation_list_.Free();
254 // -----------------------------------------------------------------------------
258 MemoryAllocator::MemoryAllocator(Isolate* isolate)
261 capacity_executable_(0),
264 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
265 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
268 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
269 capacity_ = RoundUp(capacity, Page::kPageSize);
270 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
271 DCHECK_GE(capacity_, capacity_executable_);
274 size_executable_ = 0;
280 void MemoryAllocator::TearDown() {
281 // Check that spaces were torn down before MemoryAllocator.
283 // TODO(gc) this will be true again when we fix FreeMemory.
284 // DCHECK(size_executable_ == 0);
286 capacity_executable_ = 0;
290 bool MemoryAllocator::CommitMemory(Address base, size_t size,
291 Executability executable) {
292 if (!base::VirtualMemory::CommitRegion(base, size,
293 executable == EXECUTABLE)) {
296 UpdateAllocatedSpaceLimits(base, base + size);
301 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
302 Executability executable) {
303 // TODO(gc) make code_range part of memory allocator?
304 DCHECK(reservation->IsReserved());
305 size_t size = reservation->size();
306 DCHECK(size_ >= size);
309 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
311 if (executable == EXECUTABLE) {
312 DCHECK(size_executable_ >= size);
313 size_executable_ -= size;
315 // Code which is part of the code-range does not have its own VirtualMemory.
316 DCHECK(isolate_->code_range() == NULL ||
317 !isolate_->code_range()->contains(
318 static_cast<Address>(reservation->address())));
319 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
320 !isolate_->code_range()->valid());
321 reservation->Release();
325 void MemoryAllocator::FreeMemory(Address base, size_t size,
326 Executability executable) {
327 // TODO(gc) make code_range part of memory allocator?
328 DCHECK(size_ >= size);
331 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
333 if (executable == EXECUTABLE) {
334 DCHECK(size_executable_ >= size);
335 size_executable_ -= size;
337 if (isolate_->code_range() != NULL &&
338 isolate_->code_range()->contains(static_cast<Address>(base))) {
339 DCHECK(executable == EXECUTABLE);
340 isolate_->code_range()->FreeRawMemory(base, size);
342 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
343 !isolate_->code_range()->valid());
344 bool result = base::VirtualMemory::ReleaseRegion(base, size);
351 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
352 base::VirtualMemory* controller) {
353 base::VirtualMemory reservation(size, alignment);
355 if (!reservation.IsReserved()) return NULL;
356 size_ += reservation.size();
358 RoundUp(static_cast<Address>(reservation.address()), alignment);
359 controller->TakeControl(&reservation);
364 Address MemoryAllocator::AllocateAlignedMemory(
365 size_t reserve_size, size_t commit_size, size_t alignment,
366 Executability executable, base::VirtualMemory* controller) {
367 DCHECK(commit_size <= reserve_size);
368 base::VirtualMemory reservation;
369 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
370 if (base == NULL) return NULL;
372 if (executable == EXECUTABLE) {
373 if (!CommitExecutableMemory(&reservation, base, commit_size,
378 if (reservation.Commit(base, commit_size, false)) {
379 UpdateAllocatedSpaceLimits(base, base + commit_size);
386 // Failed to commit the body. Release the mapping and any partially
387 // commited regions inside it.
388 reservation.Release();
392 controller->TakeControl(&reservation);
397 void Page::InitializeAsAnchor(PagedSpace* owner) {
404 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
405 SemiSpace* semi_space) {
406 Address area_start = start + NewSpacePage::kObjectStartOffset;
407 Address area_end = start + Page::kPageSize;
410 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
411 area_end, NOT_EXECUTABLE, semi_space);
412 chunk->set_next_chunk(NULL);
413 chunk->set_prev_chunk(NULL);
414 chunk->initialize_scan_on_scavenge(true);
415 bool in_to_space = (semi_space->id() != kFromSpace);
416 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
417 : MemoryChunk::IN_FROM_SPACE);
418 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
419 : MemoryChunk::IN_TO_SPACE));
420 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
421 heap->incremental_marking()->SetNewSpacePageFlags(page);
426 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
427 set_owner(semi_space);
428 set_next_chunk(this);
429 set_prev_chunk(this);
430 // Flags marks this invalid page as not being in new-space.
431 // All real new-space pages will be in new-space.
436 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
437 Address area_start, Address area_end,
438 Executability executable, Space* owner) {
439 MemoryChunk* chunk = FromAddress(base);
441 DCHECK(base == chunk->address());
445 chunk->area_start_ = area_start;
446 chunk->area_end_ = area_end;
448 chunk->set_owner(owner);
449 chunk->InitializeReservedMemory();
450 chunk->slots_buffer_ = NULL;
451 chunk->skip_list_ = NULL;
452 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
453 chunk->progress_bar_ = 0;
454 chunk->high_water_mark_ = static_cast<int>(area_start - base);
455 chunk->set_parallel_sweeping(SWEEPING_DONE);
456 chunk->available_in_small_free_list_ = 0;
457 chunk->available_in_medium_free_list_ = 0;
458 chunk->available_in_large_free_list_ = 0;
459 chunk->available_in_huge_free_list_ = 0;
460 chunk->non_available_small_blocks_ = 0;
461 chunk->ResetLiveBytes();
462 Bitmap::Clear(chunk);
463 chunk->initialize_scan_on_scavenge(false);
464 chunk->SetFlag(WAS_SWEPT_PRECISELY);
466 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
467 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
469 if (executable == EXECUTABLE) {
470 chunk->SetFlag(IS_EXECUTABLE);
473 if (owner == heap->old_data_space()) {
474 chunk->SetFlag(CONTAINS_ONLY_DATA);
481 // Commit MemoryChunk area to the requested size.
482 bool MemoryChunk::CommitArea(size_t requested) {
484 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
485 size_t header_size = area_start() - address() - guard_size;
487 RoundUp(header_size + requested, base::OS::CommitPageSize());
488 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
489 base::OS::CommitPageSize());
491 if (commit_size > committed_size) {
492 // Commit size should be less or equal than the reserved size.
493 DCHECK(commit_size <= size() - 2 * guard_size);
494 // Append the committed area.
495 Address start = address() + committed_size + guard_size;
496 size_t length = commit_size - committed_size;
497 if (reservation_.IsReserved()) {
498 Executability executable =
499 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
500 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
505 CodeRange* code_range = heap_->isolate()->code_range();
506 DCHECK(code_range != NULL && code_range->valid() &&
507 IsFlagSet(IS_EXECUTABLE));
508 if (!code_range->CommitRawMemory(start, length)) return false;
511 if (Heap::ShouldZapGarbage()) {
512 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
514 } else if (commit_size < committed_size) {
515 DCHECK(commit_size > 0);
516 // Shrink the committed area.
517 size_t length = committed_size - commit_size;
518 Address start = address() + committed_size + guard_size - length;
519 if (reservation_.IsReserved()) {
520 if (!reservation_.Uncommit(start, length)) return false;
522 CodeRange* code_range = heap_->isolate()->code_range();
523 DCHECK(code_range != NULL && code_range->valid() &&
524 IsFlagSet(IS_EXECUTABLE));
525 if (!code_range->UncommitRawMemory(start, length)) return false;
529 area_end_ = area_start_ + requested;
534 void MemoryChunk::InsertAfter(MemoryChunk* other) {
535 MemoryChunk* other_next = other->next_chunk();
537 set_next_chunk(other_next);
538 set_prev_chunk(other);
539 other_next->set_prev_chunk(this);
540 other->set_next_chunk(this);
544 void MemoryChunk::Unlink() {
545 MemoryChunk* next_element = next_chunk();
546 MemoryChunk* prev_element = prev_chunk();
547 next_element->set_prev_chunk(prev_element);
548 prev_element->set_next_chunk(next_element);
549 set_prev_chunk(NULL);
550 set_next_chunk(NULL);
554 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
555 intptr_t commit_area_size,
556 Executability executable,
558 DCHECK(commit_area_size <= reserve_area_size);
561 Heap* heap = isolate_->heap();
563 base::VirtualMemory reservation;
564 Address area_start = NULL;
565 Address area_end = NULL;
568 // MemoryChunk layout:
571 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
573 // +----------------------------+<- base + CodePageGuardStartOffset
575 // +----------------------------+<- area_start_
577 // +----------------------------+<- area_end_ (area_start + commit_area_size)
578 // | Committed but not used |
579 // +----------------------------+<- aligned at OS page boundary
580 // | Reserved but not committed |
581 // +----------------------------+<- aligned at OS page boundary
583 // +----------------------------+<- base + chunk_size
586 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
588 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
590 // +----------------------------+<- area_end_ (area_start + commit_area_size)
591 // | Committed but not used |
592 // +----------------------------+<- aligned at OS page boundary
593 // | Reserved but not committed |
594 // +----------------------------+<- base + chunk_size
597 if (executable == EXECUTABLE) {
598 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
599 base::OS::CommitPageSize()) +
602 // Check executable memory limit.
603 if (size_executable_ + chunk_size > capacity_executable_) {
604 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
605 "V8 Executable Allocation capacity exceeded"));
609 // Size of header (not executable) plus area (executable).
610 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
611 base::OS::CommitPageSize());
612 // Allocate executable memory either from code range or from the
614 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
615 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
618 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
619 if (base == NULL) return NULL;
621 // Update executable memory size.
622 size_executable_ += chunk_size;
624 base = AllocateAlignedMemory(chunk_size, commit_size,
625 MemoryChunk::kAlignment, executable,
627 if (base == NULL) return NULL;
628 // Update executable memory size.
629 size_executable_ += reservation.size();
632 if (Heap::ShouldZapGarbage()) {
633 ZapBlock(base, CodePageGuardStartOffset());
634 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
637 area_start = base + CodePageAreaStartOffset();
638 area_end = area_start + commit_area_size;
640 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
641 base::OS::CommitPageSize());
643 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
644 base::OS::CommitPageSize());
646 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
647 executable, &reservation);
649 if (base == NULL) return NULL;
651 if (Heap::ShouldZapGarbage()) {
652 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
655 area_start = base + Page::kObjectStartOffset;
656 area_end = area_start + commit_area_size;
659 // Use chunk_size for statistics and callbacks because we assume that they
660 // treat reserved but not-yet committed memory regions of chunks as allocated.
661 isolate_->counters()->memory_allocated()->Increment(
662 static_cast<int>(chunk_size));
664 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
666 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
667 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
670 MemoryChunk* result = MemoryChunk::Initialize(
671 heap, base, chunk_size, area_start, area_end, executable, owner);
672 result->set_reserved_memory(&reservation);
673 MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
678 void Page::ResetFreeListStatistics() {
679 non_available_small_blocks_ = 0;
680 available_in_small_free_list_ = 0;
681 available_in_medium_free_list_ = 0;
682 available_in_large_free_list_ = 0;
683 available_in_huge_free_list_ = 0;
687 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
688 Executability executable) {
689 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
691 if (chunk == NULL) return NULL;
693 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
697 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
699 Executability executable) {
701 AllocateChunk(object_size, object_size, executable, owner);
702 if (chunk == NULL) return NULL;
703 return LargePage::Initialize(isolate_->heap(), chunk);
707 void MemoryAllocator::Free(MemoryChunk* chunk) {
708 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
709 if (chunk->owner() != NULL) {
711 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
712 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
715 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
716 chunk->IsEvacuationCandidate());
718 delete chunk->slots_buffer();
719 delete chunk->skip_list();
721 base::VirtualMemory* reservation = chunk->reserved_memory();
722 if (reservation->IsReserved()) {
723 FreeMemory(reservation, chunk->executable());
725 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
730 bool MemoryAllocator::CommitBlock(Address start, size_t size,
731 Executability executable) {
732 if (!CommitMemory(start, size, executable)) return false;
734 if (Heap::ShouldZapGarbage()) {
735 ZapBlock(start, size);
738 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
743 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
744 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
745 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
750 void MemoryAllocator::ZapBlock(Address start, size_t size) {
751 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
752 Memory::Address_at(start + s) = kZapValue;
757 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
758 AllocationAction action,
760 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
761 MemoryAllocationCallbackRegistration registration =
762 memory_allocation_callbacks_[i];
763 if ((registration.space & space) == space &&
764 (registration.action & action) == action)
765 registration.callback(space, action, static_cast<int>(size));
770 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
771 MemoryAllocationCallback callback) {
772 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
773 if (memory_allocation_callbacks_[i].callback == callback) return true;
779 void MemoryAllocator::AddMemoryAllocationCallback(
780 MemoryAllocationCallback callback, ObjectSpace space,
781 AllocationAction action) {
782 DCHECK(callback != NULL);
783 MemoryAllocationCallbackRegistration registration(callback, space, action);
784 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
785 return memory_allocation_callbacks_.Add(registration);
789 void MemoryAllocator::RemoveMemoryAllocationCallback(
790 MemoryAllocationCallback callback) {
791 DCHECK(callback != NULL);
792 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
793 if (memory_allocation_callbacks_[i].callback == callback) {
794 memory_allocation_callbacks_.Remove(i);
803 void MemoryAllocator::ReportStatistics() {
804 float pct = static_cast<float>(capacity_ - size_) / capacity_;
805 PrintF(" capacity: %" V8_PTR_PREFIX
807 ", used: %" V8_PTR_PREFIX
809 ", available: %%%d\n\n",
810 capacity_, size_, static_cast<int>(pct * 100));
815 int MemoryAllocator::CodePageGuardStartOffset() {
816 // We are guarding code pages: the first OS page after the header
817 // will be protected as non-writable.
818 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
822 int MemoryAllocator::CodePageGuardSize() {
823 return static_cast<int>(base::OS::CommitPageSize());
827 int MemoryAllocator::CodePageAreaStartOffset() {
828 // We are guarding code pages: the first OS page after the header
829 // will be protected as non-writable.
830 return CodePageGuardStartOffset() + CodePageGuardSize();
834 int MemoryAllocator::CodePageAreaEndOffset() {
835 // We are guarding code pages: the last OS page will be protected as
837 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
841 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
842 Address start, size_t commit_size,
843 size_t reserved_size) {
844 // Commit page header (not executable).
845 if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
849 // Create guard page after the header.
850 if (!vm->Guard(start + CodePageGuardStartOffset())) {
854 // Commit page body (executable).
855 if (!vm->Commit(start + CodePageAreaStartOffset(),
856 commit_size - CodePageGuardStartOffset(), true)) {
860 // Create guard page before the end.
861 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
865 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
867 CodePageGuardStartOffset());
872 // -----------------------------------------------------------------------------
873 // MemoryChunk implementation
875 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
876 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
877 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
878 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
880 chunk->IncrementLiveBytes(by);
884 // -----------------------------------------------------------------------------
885 // PagedSpace implementation
887 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
888 Executability executable)
889 : Space(heap, id, executable),
891 swept_precisely_(true),
892 unswept_free_bytes_(0),
893 end_of_unswept_pages_(NULL),
894 emergency_memory_(NULL) {
895 if (id == CODE_SPACE) {
896 area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize();
898 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
901 (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
902 accounting_stats_.Clear();
904 allocation_info_.set_top(NULL);
905 allocation_info_.set_limit(NULL);
907 anchor_.InitializeAsAnchor(this);
911 bool PagedSpace::SetUp() { return true; }
914 bool PagedSpace::HasBeenSetUp() { return true; }
917 void PagedSpace::TearDown() {
918 PageIterator iterator(this);
919 while (iterator.has_next()) {
920 heap()->isolate()->memory_allocator()->Free(iterator.next());
922 anchor_.set_next_page(&anchor_);
923 anchor_.set_prev_page(&anchor_);
924 accounting_stats_.Clear();
928 size_t PagedSpace::CommittedPhysicalMemory() {
929 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
930 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
932 PageIterator it(this);
933 while (it.has_next()) {
934 size += it.next()->CommittedPhysicalMemory();
940 Object* PagedSpace::FindObject(Address addr) {
941 // Note: this function can only be called on precisely swept spaces.
942 DCHECK(!heap()->mark_compact_collector()->in_use());
944 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
946 Page* p = Page::FromAddress(addr);
947 HeapObjectIterator it(p, NULL);
948 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
949 Address cur = obj->address();
950 Address next = cur + obj->Size();
951 if ((cur <= addr) && (addr < next)) return obj;
955 return Smi::FromInt(0);
959 bool PagedSpace::CanExpand() {
960 DCHECK(max_capacity_ % AreaSize() == 0);
962 if (Capacity() == max_capacity_) return false;
964 DCHECK(Capacity() < max_capacity_);
966 // Are we going to exceed capacity for this space?
967 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
973 bool PagedSpace::Expand() {
974 if (!CanExpand()) return false;
976 intptr_t size = AreaSize();
978 if (anchor_.next_page() == &anchor_) {
979 size = SizeOfFirstPage();
982 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
984 if (p == NULL) return false;
986 DCHECK(Capacity() <= max_capacity_);
988 p->InsertAfter(anchor_.prev_page());
994 intptr_t PagedSpace::SizeOfFirstPage() {
996 switch (identity()) {
997 case OLD_POINTER_SPACE:
998 size = 112 * kPointerSize * KB;
1000 case OLD_DATA_SPACE:
1004 size = 16 * kPointerSize * KB;
1007 size = 16 * kPointerSize * KB;
1009 case PROPERTY_CELL_SPACE:
1010 size = 8 * kPointerSize * KB;
1013 CodeRange* code_range = heap()->isolate()->code_range();
1014 if (code_range != NULL && code_range->valid()) {
1015 // When code range exists, code pages are allocated in a special way
1016 // (from the reserved code range). That part of the code is not yet
1017 // upgraded to handle small pages.
1021 RoundUp(480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
1029 return Min(size, AreaSize());
1033 int PagedSpace::CountTotalPages() {
1034 PageIterator it(this);
1036 while (it.has_next()) {
1044 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1045 sizes->huge_size_ = page->available_in_huge_free_list();
1046 sizes->small_size_ = page->available_in_small_free_list();
1047 sizes->medium_size_ = page->available_in_medium_free_list();
1048 sizes->large_size_ = page->available_in_large_free_list();
1052 void PagedSpace::ResetFreeListStatistics() {
1053 PageIterator page_iterator(this);
1054 while (page_iterator.has_next()) {
1055 Page* page = page_iterator.next();
1056 page->ResetFreeListStatistics();
1061 void PagedSpace::IncreaseCapacity(int size) {
1062 accounting_stats_.ExpandSpace(size);
1066 void PagedSpace::ReleasePage(Page* page) {
1067 DCHECK(page->LiveBytes() == 0);
1068 DCHECK(AreaSize() == page->area_size());
1070 if (page->WasSwept()) {
1071 intptr_t size = free_list_.EvictFreeListItems(page);
1072 accounting_stats_.AllocateBytes(size);
1073 DCHECK_EQ(AreaSize(), static_cast<int>(size));
1075 DecreaseUnsweptFreeBytes(page);
1078 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1079 heap()->decrement_scan_on_scavenge_pages();
1080 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1083 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1085 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1086 allocation_info_.set_top(NULL);
1087 allocation_info_.set_limit(NULL);
1091 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1092 heap()->isolate()->memory_allocator()->Free(page);
1094 heap()->QueueMemoryChunkForFree(page);
1097 DCHECK(Capacity() > 0);
1098 accounting_stats_.ShrinkSpace(AreaSize());
1102 void PagedSpace::CreateEmergencyMemory() {
1103 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1104 AreaSize(), AreaSize(), executable(), this);
1108 void PagedSpace::FreeEmergencyMemory() {
1109 Page* page = static_cast<Page*>(emergency_memory_);
1110 DCHECK(page->LiveBytes() == 0);
1111 DCHECK(AreaSize() == page->area_size());
1112 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1113 heap()->isolate()->memory_allocator()->Free(page);
1114 emergency_memory_ = NULL;
1118 void PagedSpace::UseEmergencyMemory() {
1119 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1120 page->InsertAfter(anchor_.prev_page());
1121 emergency_memory_ = NULL;
1126 void PagedSpace::Print() {}
1130 void PagedSpace::Verify(ObjectVisitor* visitor) {
1131 // We can only iterate over the pages if they were swept precisely.
1132 if (!swept_precisely_) return;
1134 bool allocation_pointer_found_in_space =
1135 (allocation_info_.top() == allocation_info_.limit());
1136 PageIterator page_iterator(this);
1137 while (page_iterator.has_next()) {
1138 Page* page = page_iterator.next();
1139 CHECK(page->owner() == this);
1140 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1141 allocation_pointer_found_in_space = true;
1143 CHECK(page->WasSweptPrecisely());
1144 HeapObjectIterator it(page, NULL);
1145 Address end_of_previous_object = page->area_start();
1146 Address top = page->area_end();
1148 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1149 CHECK(end_of_previous_object <= object->address());
1151 // The first word should be a map, and we expect all map pointers to
1153 Map* map = object->map();
1154 CHECK(map->IsMap());
1155 CHECK(heap()->map_space()->Contains(map));
1157 // Perform space-specific object verification.
1158 VerifyObject(object);
1160 // The object itself should look OK.
1161 object->ObjectVerify();
1163 // All the interior pointers should be contained in the heap.
1164 int size = object->Size();
1165 object->IterateBody(map->instance_type(), size, visitor);
1166 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1170 CHECK(object->address() + size <= top);
1171 end_of_previous_object = object->address() + size;
1173 CHECK_LE(black_size, page->LiveBytes());
1175 CHECK(allocation_pointer_found_in_space);
1177 #endif // VERIFY_HEAP
1179 // -----------------------------------------------------------------------------
1180 // NewSpace implementation
1183 bool NewSpace::SetUp(int reserved_semispace_capacity,
1184 int maximum_semispace_capacity) {
1185 // Set up new space based on the preallocated memory block defined by
1186 // start and size. The provided space is divided into two semi-spaces.
1187 // To support fast containment testing in the new space, the size of
1188 // this chunk must be a power of two and it must be aligned to its size.
1189 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1191 size_t size = 2 * reserved_semispace_capacity;
1192 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1193 size, size, &reservation_);
1194 if (base == NULL) return false;
1197 chunk_size_ = static_cast<uintptr_t>(size);
1198 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1200 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1201 DCHECK(IsPowerOf2(maximum_semispace_capacity));
1203 // Allocate and set up the histogram arrays if necessary.
1204 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1205 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1207 #define SET_NAME(name) \
1208 allocated_histogram_[name].set_name(#name); \
1209 promoted_histogram_[name].set_name(#name);
1210 INSTANCE_TYPE_LIST(SET_NAME)
1213 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1214 DCHECK(static_cast<intptr_t>(chunk_size_) >=
1215 2 * heap()->ReservedSemiSpaceSize());
1216 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1218 to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1219 maximum_semispace_capacity);
1220 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1221 initial_semispace_capacity, maximum_semispace_capacity);
1222 if (!to_space_.Commit()) {
1225 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1227 start_ = chunk_base_;
1228 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1229 object_mask_ = address_mask_ | kHeapObjectTagMask;
1230 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1232 ResetAllocationInfo();
1238 void NewSpace::TearDown() {
1239 if (allocated_histogram_) {
1240 DeleteArray(allocated_histogram_);
1241 allocated_histogram_ = NULL;
1243 if (promoted_histogram_) {
1244 DeleteArray(promoted_histogram_);
1245 promoted_histogram_ = NULL;
1249 allocation_info_.set_top(NULL);
1250 allocation_info_.set_limit(NULL);
1252 to_space_.TearDown();
1253 from_space_.TearDown();
1255 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1257 DCHECK(reservation_.IsReserved());
1258 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1265 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1268 void NewSpace::Grow() {
1269 // Double the semispace size but only up to maximum capacity.
1270 DCHECK(Capacity() < MaximumCapacity());
1271 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1272 if (to_space_.GrowTo(new_capacity)) {
1273 // Only grow from space if we managed to grow to-space.
1274 if (!from_space_.GrowTo(new_capacity)) {
1275 // If we managed to grow to-space but couldn't grow from-space,
1276 // attempt to shrink to-space.
1277 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1278 // We are in an inconsistent state because we could not
1279 // commit/uncommit memory from new space.
1280 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1284 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1288 void NewSpace::Shrink() {
1289 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1290 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1291 if (rounded_new_capacity < Capacity() &&
1292 to_space_.ShrinkTo(rounded_new_capacity)) {
1293 // Only shrink from-space if we managed to shrink to-space.
1294 from_space_.Reset();
1295 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1296 // If we managed to shrink to-space but couldn't shrink from
1297 // space, attempt to grow to-space again.
1298 if (!to_space_.GrowTo(from_space_.Capacity())) {
1299 // We are in an inconsistent state because we could not
1300 // commit/uncommit memory from new space.
1301 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1305 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1309 void NewSpace::UpdateAllocationInfo() {
1310 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1311 allocation_info_.set_top(to_space_.page_low());
1312 allocation_info_.set_limit(to_space_.page_high());
1313 UpdateInlineAllocationLimit(0);
1314 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1318 void NewSpace::ResetAllocationInfo() {
1320 UpdateAllocationInfo();
1322 // Clear all mark-bits in the to-space.
1323 NewSpacePageIterator it(&to_space_);
1324 while (it.has_next()) {
1325 Bitmap::Clear(it.next());
1330 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1331 if (heap()->inline_allocation_disabled()) {
1332 // Lowest limit when linear allocation was disabled.
1333 Address high = to_space_.page_high();
1334 Address new_top = allocation_info_.top() + size_in_bytes;
1335 allocation_info_.set_limit(Min(new_top, high));
1336 } else if (inline_allocation_limit_step() == 0) {
1337 // Normal limit is the end of the current page.
1338 allocation_info_.set_limit(to_space_.page_high());
1340 // Lower limit during incremental marking.
1341 Address high = to_space_.page_high();
1342 Address new_top = allocation_info_.top() + size_in_bytes;
1343 Address new_limit = new_top + inline_allocation_limit_step_;
1344 allocation_info_.set_limit(Min(new_limit, high));
1346 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1350 bool NewSpace::AddFreshPage() {
1351 Address top = allocation_info_.top();
1352 if (NewSpacePage::IsAtStart(top)) {
1353 // The current page is already empty. Don't try to make another.
1355 // We should only get here if someone asks to allocate more
1356 // than what can be stored in a single page.
1357 // TODO(gc): Change the limit on new-space allocation to prevent this
1358 // from happening (all such allocations should go directly to LOSpace).
1361 if (!to_space_.AdvancePage()) {
1362 // Failed to get a new page in to-space.
1366 // Clear remainder of current page.
1367 Address limit = NewSpacePage::FromLimit(top)->area_end();
1368 if (heap()->gc_state() == Heap::SCAVENGE) {
1369 heap()->promotion_queue()->SetNewLimit(limit);
1370 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1373 int remaining_in_page = static_cast<int>(limit - top);
1374 heap()->CreateFillerObjectAt(top, remaining_in_page);
1376 UpdateAllocationInfo();
1382 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
1383 Address old_top = allocation_info_.top();
1384 Address high = to_space_.page_high();
1385 if (allocation_info_.limit() < high) {
1386 // Either the limit has been lowered because linear allocation was disabled
1387 // or because incremental marking wants to get a chance to do a step. Set
1388 // the new limit accordingly.
1389 Address new_top = old_top + size_in_bytes;
1390 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1391 heap()->incremental_marking()->Step(bytes_allocated,
1392 IncrementalMarking::GC_VIA_STACK_GUARD);
1393 UpdateInlineAllocationLimit(size_in_bytes);
1394 top_on_previous_step_ = new_top;
1395 return AllocateRaw(size_in_bytes);
1396 } else if (AddFreshPage()) {
1397 // Switched to new page. Try allocating again.
1398 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1399 heap()->incremental_marking()->Step(bytes_allocated,
1400 IncrementalMarking::GC_VIA_STACK_GUARD);
1401 top_on_previous_step_ = to_space_.page_low();
1402 return AllocateRaw(size_in_bytes);
1404 return AllocationResult::Retry();
1410 // We do not use the SemiSpaceIterator because verification doesn't assume
1411 // that it works (it depends on the invariants we are checking).
1412 void NewSpace::Verify() {
1413 // The allocation pointer should be in the space or at the very end.
1414 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1416 // There should be objects packed in from the low address up to the
1417 // allocation pointer.
1418 Address current = to_space_.first_page()->area_start();
1419 CHECK_EQ(current, to_space_.space_start());
1421 while (current != top()) {
1422 if (!NewSpacePage::IsAtEnd(current)) {
1423 // The allocation pointer should not be in the middle of an object.
1424 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1427 HeapObject* object = HeapObject::FromAddress(current);
1429 // The first word should be a map, and we expect all map pointers to
1431 Map* map = object->map();
1432 CHECK(map->IsMap());
1433 CHECK(heap()->map_space()->Contains(map));
1435 // The object should not be code or a map.
1436 CHECK(!object->IsMap());
1437 CHECK(!object->IsCode());
1439 // The object itself should look OK.
1440 object->ObjectVerify();
1442 // All the interior pointers should be contained in the heap.
1443 VerifyPointersVisitor visitor;
1444 int size = object->Size();
1445 object->IterateBody(map->instance_type(), size, &visitor);
1449 // At end of page, switch to next page.
1450 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1451 // Next page should be valid.
1452 CHECK(!page->is_anchor());
1453 current = page->area_start();
1457 // Check semi-spaces.
1458 CHECK_EQ(from_space_.id(), kFromSpace);
1459 CHECK_EQ(to_space_.id(), kToSpace);
1460 from_space_.Verify();
1465 // -----------------------------------------------------------------------------
1466 // SemiSpace implementation
1468 void SemiSpace::SetUp(Address start, int initial_capacity,
1469 int maximum_capacity) {
1470 // Creates a space in the young generation. The constructor does not
1471 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1472 // memory of size 'capacity' when set up, and does not grow or shrink
1473 // otherwise. In the mark-compact collector, the memory region of the from
1474 // space is used as the marking stack. It requires contiguous memory
1476 DCHECK(maximum_capacity >= Page::kPageSize);
1477 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1478 capacity_ = initial_capacity;
1479 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1480 maximum_committed_ = 0;
1483 address_mask_ = ~(maximum_capacity - 1);
1484 object_mask_ = address_mask_ | kHeapObjectTagMask;
1485 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1490 void SemiSpace::TearDown() {
1496 bool SemiSpace::Commit() {
1497 DCHECK(!is_committed());
1498 int pages = capacity_ / Page::kPageSize;
1499 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, capacity_,
1504 NewSpacePage* current = anchor();
1505 for (int i = 0; i < pages; i++) {
1506 NewSpacePage* new_page =
1507 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1508 new_page->InsertAfter(current);
1512 SetCapacity(capacity_);
1519 bool SemiSpace::Uncommit() {
1520 DCHECK(is_committed());
1521 Address start = start_ + maximum_capacity_ - capacity_;
1522 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1525 anchor()->set_next_page(anchor());
1526 anchor()->set_prev_page(anchor());
1533 size_t SemiSpace::CommittedPhysicalMemory() {
1534 if (!is_committed()) return 0;
1536 NewSpacePageIterator it(this);
1537 while (it.has_next()) {
1538 size += it.next()->CommittedPhysicalMemory();
1544 bool SemiSpace::GrowTo(int new_capacity) {
1545 if (!is_committed()) {
1546 if (!Commit()) return false;
1548 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1549 DCHECK(new_capacity <= maximum_capacity_);
1550 DCHECK(new_capacity > capacity_);
1551 int pages_before = capacity_ / Page::kPageSize;
1552 int pages_after = new_capacity / Page::kPageSize;
1554 size_t delta = new_capacity - capacity_;
1556 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1557 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1558 start_ + capacity_, delta, executable())) {
1561 SetCapacity(new_capacity);
1562 NewSpacePage* last_page = anchor()->prev_page();
1563 DCHECK(last_page != anchor());
1564 for (int i = pages_before; i < pages_after; i++) {
1565 Address page_address = start_ + i * Page::kPageSize;
1566 NewSpacePage* new_page =
1567 NewSpacePage::Initialize(heap(), page_address, this);
1568 new_page->InsertAfter(last_page);
1569 Bitmap::Clear(new_page);
1570 // Duplicate the flags that was set on the old page.
1571 new_page->SetFlags(last_page->GetFlags(),
1572 NewSpacePage::kCopyOnFlipFlagsMask);
1573 last_page = new_page;
1579 bool SemiSpace::ShrinkTo(int new_capacity) {
1580 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1581 DCHECK(new_capacity >= initial_capacity_);
1582 DCHECK(new_capacity < capacity_);
1583 if (is_committed()) {
1584 size_t delta = capacity_ - new_capacity;
1585 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1587 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1588 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1592 int pages_after = new_capacity / Page::kPageSize;
1593 NewSpacePage* new_last_page =
1594 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1595 new_last_page->set_next_page(anchor());
1596 anchor()->set_prev_page(new_last_page);
1597 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1600 SetCapacity(new_capacity);
1606 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1607 anchor_.set_owner(this);
1608 // Fixup back-pointers to anchor. Address of anchor changes
1610 anchor_.prev_page()->set_next_page(&anchor_);
1611 anchor_.next_page()->set_prev_page(&anchor_);
1613 bool becomes_to_space = (id_ == kFromSpace);
1614 id_ = becomes_to_space ? kToSpace : kFromSpace;
1615 NewSpacePage* page = anchor_.next_page();
1616 while (page != &anchor_) {
1617 page->set_owner(this);
1618 page->SetFlags(flags, mask);
1619 if (becomes_to_space) {
1620 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1621 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1622 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1623 page->ResetLiveBytes();
1625 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1626 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1628 DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1629 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1630 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1631 page = page->next_page();
1636 void SemiSpace::Reset() {
1637 DCHECK(anchor_.next_page() != &anchor_);
1638 current_page_ = anchor_.next_page();
1642 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1643 // We won't be swapping semispaces without data in them.
1644 DCHECK(from->anchor_.next_page() != &from->anchor_);
1645 DCHECK(to->anchor_.next_page() != &to->anchor_);
1648 SemiSpace tmp = *from;
1652 // Fixup back-pointers to the page list anchor now that its address
1654 // Swap to/from-space bits on pages.
1655 // Copy GC flags from old active space (from-space) to new (to-space).
1656 intptr_t flags = from->current_page()->GetFlags();
1657 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1659 from->FlipPages(0, 0);
1663 void SemiSpace::SetCapacity(int new_capacity) {
1664 capacity_ = new_capacity;
1665 if (capacity_ > maximum_committed_) {
1666 maximum_committed_ = capacity_;
1671 void SemiSpace::set_age_mark(Address mark) {
1672 DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1674 // Mark all pages up to the one containing mark.
1675 NewSpacePageIterator it(space_start(), mark);
1676 while (it.has_next()) {
1677 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1683 void SemiSpace::Print() {}
1687 void SemiSpace::Verify() {
1688 bool is_from_space = (id_ == kFromSpace);
1689 NewSpacePage* page = anchor_.next_page();
1690 CHECK(anchor_.semi_space() == this);
1691 while (page != &anchor_) {
1692 CHECK(page->semi_space() == this);
1693 CHECK(page->InNewSpace());
1694 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1695 : MemoryChunk::IN_TO_SPACE));
1696 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1697 : MemoryChunk::IN_FROM_SPACE));
1698 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1699 if (!is_from_space) {
1700 // The pointers-from-here-are-interesting flag isn't updated dynamically
1701 // on from-space pages, so it might be out of sync with the marking state.
1702 if (page->heap()->incremental_marking()->IsMarking()) {
1703 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1706 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1708 // TODO(gc): Check that the live_bytes_count_ field matches the
1709 // black marking on the page (if we make it match in new-space).
1711 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1712 CHECK(page->prev_page()->next_page() == page);
1713 page = page->next_page();
1719 void SemiSpace::AssertValidRange(Address start, Address end) {
1720 // Addresses belong to same semi-space
1721 NewSpacePage* page = NewSpacePage::FromLimit(start);
1722 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1723 SemiSpace* space = page->semi_space();
1724 CHECK_EQ(space, end_page->semi_space());
1725 // Start address is before end address, either on same page,
1726 // or end address is on a later page in the linked list of
1727 // semi-space pages.
1728 if (page == end_page) {
1729 CHECK(start <= end);
1731 while (page != end_page) {
1732 page = page->next_page();
1733 CHECK_NE(page, space->anchor());
1740 // -----------------------------------------------------------------------------
1741 // SemiSpaceIterator implementation.
1742 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1743 Initialize(space->bottom(), space->top(), NULL);
1747 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1748 HeapObjectCallback size_func) {
1749 Initialize(space->bottom(), space->top(), size_func);
1753 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1754 Initialize(start, space->top(), NULL);
1758 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1759 Initialize(from, to, NULL);
1763 void SemiSpaceIterator::Initialize(Address start, Address end,
1764 HeapObjectCallback size_func) {
1765 SemiSpace::AssertValidRange(start, end);
1768 size_func_ = size_func;
1773 // heap_histograms is shared, always clear it before using it.
1774 static void ClearHistograms(Isolate* isolate) {
1775 // We reset the name each time, though it hasn't changed.
1776 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1777 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1778 #undef DEF_TYPE_NAME
1780 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1781 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1782 #undef CLEAR_HISTOGRAM
1784 isolate->js_spill_information()->Clear();
1788 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1789 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1790 code_kind_statistics[i] = 0;
1795 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1796 PrintF("\n Code kind histograms: \n");
1797 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1798 if (code_kind_statistics[i] > 0) {
1799 PrintF(" %-20s: %10d bytes\n",
1800 Code::Kind2String(static_cast<Code::Kind>(i)),
1801 code_kind_statistics[i]);
1808 static int CollectHistogramInfo(HeapObject* obj) {
1809 Isolate* isolate = obj->GetIsolate();
1810 InstanceType type = obj->map()->instance_type();
1811 DCHECK(0 <= type && type <= LAST_TYPE);
1812 DCHECK(isolate->heap_histograms()[type].name() != NULL);
1813 isolate->heap_histograms()[type].increment_number(1);
1814 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1816 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1818 ->IncrementSpillStatistics(isolate->js_spill_information());
1825 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1826 PrintF("\n Object Histogram:\n");
1827 for (int i = 0; i <= LAST_TYPE; i++) {
1828 if (isolate->heap_histograms()[i].number() > 0) {
1829 PrintF(" %-34s%10d (%10d bytes)\n",
1830 isolate->heap_histograms()[i].name(),
1831 isolate->heap_histograms()[i].number(),
1832 isolate->heap_histograms()[i].bytes());
1837 // Summarize string types.
1838 int string_number = 0;
1839 int string_bytes = 0;
1840 #define INCREMENT(type, size, name, camel_name) \
1841 string_number += isolate->heap_histograms()[type].number(); \
1842 string_bytes += isolate->heap_histograms()[type].bytes();
1843 STRING_TYPE_LIST(INCREMENT)
1845 if (string_number > 0) {
1846 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1850 if (FLAG_collect_heap_spill_statistics && print_spill) {
1851 isolate->js_spill_information()->Print();
1857 // Support for statistics gathering for --heap-stats and --log-gc.
1858 void NewSpace::ClearHistograms() {
1859 for (int i = 0; i <= LAST_TYPE; i++) {
1860 allocated_histogram_[i].clear();
1861 promoted_histogram_[i].clear();
1866 // Because the copying collector does not touch garbage objects, we iterate
1867 // the new space before a collection to get a histogram of allocated objects.
1868 // This only happens when --log-gc flag is set.
1869 void NewSpace::CollectStatistics() {
1871 SemiSpaceIterator it(this);
1872 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1873 RecordAllocation(obj);
1877 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1878 const char* description) {
1879 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1880 // Lump all the string types together.
1881 int string_number = 0;
1882 int string_bytes = 0;
1883 #define INCREMENT(type, size, name, camel_name) \
1884 string_number += info[type].number(); \
1885 string_bytes += info[type].bytes();
1886 STRING_TYPE_LIST(INCREMENT)
1888 if (string_number > 0) {
1890 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1893 // Then do the other types.
1894 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1895 if (info[i].number() > 0) {
1896 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
1900 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1904 void NewSpace::ReportStatistics() {
1906 if (FLAG_heap_stats) {
1907 float pct = static_cast<float>(Available()) / Capacity();
1908 PrintF(" capacity: %" V8_PTR_PREFIX
1910 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1911 Capacity(), Available(), static_cast<int>(pct * 100));
1912 PrintF("\n Object Histogram:\n");
1913 for (int i = 0; i <= LAST_TYPE; i++) {
1914 if (allocated_histogram_[i].number() > 0) {
1915 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
1916 allocated_histogram_[i].number(),
1917 allocated_histogram_[i].bytes());
1925 Isolate* isolate = heap()->isolate();
1926 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1927 DoReportStatistics(isolate, promoted_histogram_, "promoted");
1932 void NewSpace::RecordAllocation(HeapObject* obj) {
1933 InstanceType type = obj->map()->instance_type();
1934 DCHECK(0 <= type && type <= LAST_TYPE);
1935 allocated_histogram_[type].increment_number(1);
1936 allocated_histogram_[type].increment_bytes(obj->Size());
1940 void NewSpace::RecordPromotion(HeapObject* obj) {
1941 InstanceType type = obj->map()->instance_type();
1942 DCHECK(0 <= type && type <= LAST_TYPE);
1943 promoted_histogram_[type].increment_number(1);
1944 promoted_histogram_[type].increment_bytes(obj->Size());
1948 size_t NewSpace::CommittedPhysicalMemory() {
1949 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1950 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1951 size_t size = to_space_.CommittedPhysicalMemory();
1952 if (from_space_.is_committed()) {
1953 size += from_space_.CommittedPhysicalMemory();
1959 // -----------------------------------------------------------------------------
1960 // Free lists for old object spaces implementation
1962 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1963 DCHECK(size_in_bytes > 0);
1964 DCHECK(IsAligned(size_in_bytes, kPointerSize));
1966 // We write a map and possibly size information to the block. If the block
1967 // is big enough to be a FreeSpace with at least one extra word (the next
1968 // pointer), we set its map to be the free space map and its size to an
1969 // appropriate array length for the desired size from HeapObject::Size().
1970 // If the block is too small (eg, one or two words), to hold both a size
1971 // field and a next pointer, we give it a filler map that gives it the
1973 if (size_in_bytes > FreeSpace::kHeaderSize) {
1974 // Can't use FreeSpace::cast because it fails during deserialization.
1975 // We have to set the size first with a release store before we store
1976 // the map because a concurrent store buffer scan on scavenge must not
1977 // observe a map with an invalid size.
1978 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1979 this_as_free_space->nobarrier_set_size(size_in_bytes);
1980 synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1981 } else if (size_in_bytes == kPointerSize) {
1982 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1983 } else if (size_in_bytes == 2 * kPointerSize) {
1984 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1988 // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
1989 // deserialization because the free space map is not done yet.
1993 FreeListNode* FreeListNode::next() {
1994 DCHECK(IsFreeListNode(this));
1995 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
1996 DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
1997 return reinterpret_cast<FreeListNode*>(
1998 Memory::Address_at(address() + kNextOffset));
2000 return reinterpret_cast<FreeListNode*>(
2001 Memory::Address_at(address() + kPointerSize));
2006 FreeListNode** FreeListNode::next_address() {
2007 DCHECK(IsFreeListNode(this));
2008 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2009 DCHECK(Size() >= kNextOffset + kPointerSize);
2010 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2012 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2017 void FreeListNode::set_next(FreeListNode* next) {
2018 DCHECK(IsFreeListNode(this));
2019 // While we are booting the VM the free space map will actually be null. So
2020 // we have to make sure that we don't try to use it for anything at that
2022 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2023 DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
2024 base::NoBarrier_Store(
2025 reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
2026 reinterpret_cast<base::AtomicWord>(next));
2028 base::NoBarrier_Store(
2029 reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
2030 reinterpret_cast<base::AtomicWord>(next));
2035 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2036 intptr_t free_bytes = 0;
2037 if (category->top() != NULL) {
2038 // This is safe (not going to deadlock) since Concatenate operations
2039 // are never performed on the same free lists at the same time in
2041 base::LockGuard<base::Mutex> target_lock_guard(mutex());
2042 base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2043 DCHECK(category->end_ != NULL);
2044 free_bytes = category->available();
2046 end_ = category->end();
2048 category->end()->set_next(top());
2050 set_top(category->top());
2051 base::NoBarrier_Store(&top_, category->top_);
2052 available_ += category->available();
2059 void FreeListCategory::Reset() {
2066 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2068 FreeListNode* t = top();
2069 FreeListNode** n = &t;
2070 while (*n != NULL) {
2071 if (Page::FromAddress((*n)->address()) == p) {
2072 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2073 sum += free_space->Size();
2076 n = (*n)->next_address();
2080 if (top() == NULL) {
2088 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2089 FreeListNode* node = top();
2090 while (node != NULL) {
2091 if (Page::FromAddress(node->address()) == p) return true;
2092 node = node->next();
2098 FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
2099 FreeListNode* node = top();
2101 if (node == NULL) return NULL;
2103 while (node != NULL &&
2104 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2105 available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2106 node = node->next();
2110 set_top(node->next());
2111 *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2112 available_ -= *node_size;
2117 if (top() == NULL) {
2125 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2127 FreeListNode* node = PickNodeFromList(node_size);
2128 if (node != NULL && *node_size < size_in_bytes) {
2129 Free(node, *node_size);
2137 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2138 node->set_next(top());
2143 available_ += size_in_bytes;
2147 void FreeListCategory::RepairFreeList(Heap* heap) {
2148 FreeListNode* n = top();
2150 Map** map_location = reinterpret_cast<Map**>(n->address());
2151 if (*map_location == NULL) {
2152 *map_location = heap->free_space_map();
2154 DCHECK(*map_location == heap->free_space_map());
2161 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2166 intptr_t FreeList::Concatenate(FreeList* free_list) {
2167 intptr_t free_bytes = 0;
2168 free_bytes += small_list_.Concatenate(free_list->small_list());
2169 free_bytes += medium_list_.Concatenate(free_list->medium_list());
2170 free_bytes += large_list_.Concatenate(free_list->large_list());
2171 free_bytes += huge_list_.Concatenate(free_list->huge_list());
2176 void FreeList::Reset() {
2177 small_list_.Reset();
2178 medium_list_.Reset();
2179 large_list_.Reset();
2184 int FreeList::Free(Address start, int size_in_bytes) {
2185 if (size_in_bytes == 0) return 0;
2187 FreeListNode* node = FreeListNode::FromAddress(start);
2188 node->set_size(heap_, size_in_bytes);
2189 Page* page = Page::FromAddress(start);
2191 // Early return to drop too-small blocks on the floor.
2192 if (size_in_bytes < kSmallListMin) {
2193 page->add_non_available_small_blocks(size_in_bytes);
2194 return size_in_bytes;
2197 // Insert other blocks at the head of a free list of the appropriate
2199 if (size_in_bytes <= kSmallListMax) {
2200 small_list_.Free(node, size_in_bytes);
2201 page->add_available_in_small_free_list(size_in_bytes);
2202 } else if (size_in_bytes <= kMediumListMax) {
2203 medium_list_.Free(node, size_in_bytes);
2204 page->add_available_in_medium_free_list(size_in_bytes);
2205 } else if (size_in_bytes <= kLargeListMax) {
2206 large_list_.Free(node, size_in_bytes);
2207 page->add_available_in_large_free_list(size_in_bytes);
2209 huge_list_.Free(node, size_in_bytes);
2210 page->add_available_in_huge_free_list(size_in_bytes);
2213 DCHECK(IsVeryLong() || available() == SumFreeLists());
2218 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2219 FreeListNode* node = NULL;
2222 if (size_in_bytes <= kSmallAllocationMax) {
2223 node = small_list_.PickNodeFromList(node_size);
2225 DCHECK(size_in_bytes <= *node_size);
2226 page = Page::FromAddress(node->address());
2227 page->add_available_in_small_free_list(-(*node_size));
2228 DCHECK(IsVeryLong() || available() == SumFreeLists());
2233 if (size_in_bytes <= kMediumAllocationMax) {
2234 node = medium_list_.PickNodeFromList(node_size);
2236 DCHECK(size_in_bytes <= *node_size);
2237 page = Page::FromAddress(node->address());
2238 page->add_available_in_medium_free_list(-(*node_size));
2239 DCHECK(IsVeryLong() || available() == SumFreeLists());
2244 if (size_in_bytes <= kLargeAllocationMax) {
2245 node = large_list_.PickNodeFromList(node_size);
2247 DCHECK(size_in_bytes <= *node_size);
2248 page = Page::FromAddress(node->address());
2249 page->add_available_in_large_free_list(-(*node_size));
2250 DCHECK(IsVeryLong() || available() == SumFreeLists());
2255 int huge_list_available = huge_list_.available();
2256 FreeListNode* top_node = huge_list_.top();
2257 for (FreeListNode** cur = &top_node; *cur != NULL;
2258 cur = (*cur)->next_address()) {
2259 FreeListNode* cur_node = *cur;
2260 while (cur_node != NULL &&
2261 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2262 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2263 huge_list_available -= size;
2264 page = Page::FromAddress(cur_node->address());
2265 page->add_available_in_huge_free_list(-size);
2266 cur_node = cur_node->next();
2270 if (cur_node == NULL) {
2271 huge_list_.set_end(NULL);
2275 DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
2276 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2277 int size = cur_as_free_space->Size();
2278 if (size >= size_in_bytes) {
2279 // Large enough node found. Unlink it from the list.
2281 *cur = node->next();
2283 huge_list_available -= size;
2284 page = Page::FromAddress(node->address());
2285 page->add_available_in_huge_free_list(-size);
2290 huge_list_.set_top(top_node);
2291 if (huge_list_.top() == NULL) {
2292 huge_list_.set_end(NULL);
2294 huge_list_.set_available(huge_list_available);
2297 DCHECK(IsVeryLong() || available() == SumFreeLists());
2301 if (size_in_bytes <= kSmallListMax) {
2302 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2304 DCHECK(size_in_bytes <= *node_size);
2305 page = Page::FromAddress(node->address());
2306 page->add_available_in_small_free_list(-(*node_size));
2308 } else if (size_in_bytes <= kMediumListMax) {
2309 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2311 DCHECK(size_in_bytes <= *node_size);
2312 page = Page::FromAddress(node->address());
2313 page->add_available_in_medium_free_list(-(*node_size));
2315 } else if (size_in_bytes <= kLargeListMax) {
2316 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2318 DCHECK(size_in_bytes <= *node_size);
2319 page = Page::FromAddress(node->address());
2320 page->add_available_in_large_free_list(-(*node_size));
2324 DCHECK(IsVeryLong() || available() == SumFreeLists());
2329 // Allocation on the old space free list. If it succeeds then a new linear
2330 // allocation space has been set up with the top and limit of the space. If
2331 // the allocation fails then NULL is returned, and the caller can perform a GC
2332 // or allocate a new page before retrying.
2333 HeapObject* FreeList::Allocate(int size_in_bytes) {
2334 DCHECK(0 < size_in_bytes);
2335 DCHECK(size_in_bytes <= kMaxBlockSize);
2336 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2337 // Don't free list allocate if there is linear space available.
2338 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2340 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2341 // Mark the old linear allocation area with a free space map so it can be
2342 // skipped when scanning the heap. This also puts it back in the free list
2343 // if it is big enough.
2344 owner_->Free(owner_->top(), old_linear_size);
2346 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2349 int new_node_size = 0;
2350 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2351 if (new_node == NULL) {
2352 owner_->SetTopAndLimit(NULL, NULL);
2356 int bytes_left = new_node_size - size_in_bytes;
2357 DCHECK(bytes_left >= 0);
2360 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2361 reinterpret_cast<Object**>(new_node->address())[i] =
2362 Smi::FromInt(kCodeZapValue);
2366 // The old-space-step might have finished sweeping and restarted marking.
2367 // Verify that it did not turn the page of the new node into an evacuation
2369 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2371 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2373 // Memory in the linear allocation area is counted as allocated. We may free
2374 // a little of this again immediately - see below.
2375 owner_->Allocate(new_node_size);
2377 if (owner_->heap()->inline_allocation_disabled()) {
2378 // Keep the linear allocation area empty if requested to do so, just
2379 // return area back to the free list instead.
2380 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2381 DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2382 } else if (bytes_left > kThreshold &&
2383 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2384 FLAG_incremental_marking_steps) {
2385 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2386 // We don't want to give too large linear areas to the allocator while
2387 // incremental marking is going on, because we won't check again whether
2388 // we want to do another increment until the linear area is used up.
2389 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2390 new_node_size - size_in_bytes - linear_size);
2391 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2392 new_node->address() + size_in_bytes + linear_size);
2393 } else if (bytes_left > 0) {
2394 // Normally we give the rest of the node to the allocator as its new
2395 // linear allocation area.
2396 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2397 new_node->address() + new_node_size);
2399 // TODO(gc) Try not freeing linear allocation region when bytes_left
2401 owner_->SetTopAndLimit(NULL, NULL);
2408 intptr_t FreeList::EvictFreeListItems(Page* p) {
2409 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2410 p->set_available_in_huge_free_list(0);
2412 if (sum < p->area_size()) {
2413 sum += small_list_.EvictFreeListItemsInList(p) +
2414 medium_list_.EvictFreeListItemsInList(p) +
2415 large_list_.EvictFreeListItemsInList(p);
2416 p->set_available_in_small_free_list(0);
2417 p->set_available_in_medium_free_list(0);
2418 p->set_available_in_large_free_list(0);
2425 bool FreeList::ContainsPageFreeListItems(Page* p) {
2426 return huge_list_.EvictFreeListItemsInList(p) ||
2427 small_list_.EvictFreeListItemsInList(p) ||
2428 medium_list_.EvictFreeListItemsInList(p) ||
2429 large_list_.EvictFreeListItemsInList(p);
2433 void FreeList::RepairLists(Heap* heap) {
2434 small_list_.RepairFreeList(heap);
2435 medium_list_.RepairFreeList(heap);
2436 large_list_.RepairFreeList(heap);
2437 huge_list_.RepairFreeList(heap);
2442 intptr_t FreeListCategory::SumFreeList() {
2444 FreeListNode* cur = top();
2445 while (cur != NULL) {
2446 DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2447 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2448 sum += cur_as_free_space->nobarrier_size();
2455 static const int kVeryLongFreeList = 500;
2458 int FreeListCategory::FreeListLength() {
2460 FreeListNode* cur = top();
2461 while (cur != NULL) {
2464 if (length == kVeryLongFreeList) return length;
2470 bool FreeList::IsVeryLong() {
2471 if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2472 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2473 if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2474 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2479 // This can take a very long time because it is linear in the number of entries
2480 // on the free list, so it should not be called if FreeListLength returns
2481 // kVeryLongFreeList.
2482 intptr_t FreeList::SumFreeLists() {
2483 intptr_t sum = small_list_.SumFreeList();
2484 sum += medium_list_.SumFreeList();
2485 sum += large_list_.SumFreeList();
2486 sum += huge_list_.SumFreeList();
2492 // -----------------------------------------------------------------------------
2493 // OldSpace implementation
2495 void PagedSpace::PrepareForMarkCompact() {
2496 // We don't have a linear allocation area while sweeping. It will be restored
2497 // on the first allocation after the sweep.
2498 EmptyAllocationInfo();
2500 // This counter will be increased for pages which will be swept by the
2502 unswept_free_bytes_ = 0;
2504 // Clear the free list before a full GC---it will be rebuilt afterward.
2509 intptr_t PagedSpace::SizeOfObjects() {
2510 DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
2511 (unswept_free_bytes_ == 0));
2512 return Size() - unswept_free_bytes_ - (limit() - top());
2516 // After we have booted, we have created a map which represents free space
2517 // on the heap. If there was already a free list then the elements on it
2518 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2520 void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
2523 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2524 if (allocation_info_.top() >= allocation_info_.limit()) return;
2526 if (Page::FromAllocationTop(allocation_info_.top())
2527 ->IsEvacuationCandidate()) {
2528 // Create filler object to keep page iterable if it was iterable.
2530 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2531 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2533 allocation_info_.set_top(NULL);
2534 allocation_info_.set_limit(NULL);
2539 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2540 int size_in_bytes) {
2541 MarkCompactCollector* collector = heap()->mark_compact_collector();
2542 if (collector->sweeping_in_progress()) {
2543 // Wait for the sweeper threads here and complete the sweeping phase.
2544 collector->EnsureSweepingCompleted();
2546 // After waiting for the sweeper threads, there may be new free-list
2548 return free_list_.Allocate(size_in_bytes);
2554 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2555 // Allocation in this space has failed.
2557 MarkCompactCollector* collector = heap()->mark_compact_collector();
2558 // Sweeping is still in progress.
2559 if (collector->sweeping_in_progress()) {
2560 // First try to refill the free-list, concurrent sweeper threads
2561 // may have freed some objects in the meantime.
2562 collector->RefillFreeList(this);
2564 // Retry the free list allocation.
2565 HeapObject* object = free_list_.Allocate(size_in_bytes);
2566 if (object != NULL) return object;
2568 // If sweeping is still in progress try to sweep pages on the main thread.
2569 int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2570 collector->RefillFreeList(this);
2571 if (free_chunk >= size_in_bytes) {
2572 HeapObject* object = free_list_.Allocate(size_in_bytes);
2573 // We should be able to allocate an object here since we just freed that
2575 DCHECK(object != NULL);
2576 if (object != NULL) return object;
2580 // Free list allocation failed and there is no next page. Fail if we have
2581 // hit the old generation size limit that should cause a garbage
2583 if (!heap()->always_allocate() &&
2584 heap()->OldGenerationAllocationLimitReached()) {
2585 // If sweeper threads are active, wait for them at that point and steal
2586 // elements form their free-lists.
2587 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2588 if (object != NULL) return object;
2591 // Try to expand the space and allocate in the new next page.
2593 DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2594 return free_list_.Allocate(size_in_bytes);
2597 // If sweeper threads are active, wait for them at that point and steal
2598 // elements form their free-lists. Allocation may still fail their which
2599 // would indicate that there is not enough memory for the given allocation.
2600 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2605 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2606 CommentStatistic* comments_statistics =
2607 isolate->paged_space_comments_statistics();
2608 ReportCodeKindStatistics(isolate->code_kind_statistics());
2610 "Code comment statistics (\" [ comment-txt : size/ "
2611 "count (average)\"):\n");
2612 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2613 const CommentStatistic& cs = comments_statistics[i];
2615 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2616 cs.size / cs.count);
2623 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2624 CommentStatistic* comments_statistics =
2625 isolate->paged_space_comments_statistics();
2626 ClearCodeKindStatistics(isolate->code_kind_statistics());
2627 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2628 comments_statistics[i].Clear();
2630 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2631 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2632 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2636 // Adds comment to 'comment_statistics' table. Performance OK as long as
2637 // 'kMaxComments' is small
2638 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2639 CommentStatistic* comments_statistics =
2640 isolate->paged_space_comments_statistics();
2641 // Do not count empty comments
2642 if (delta <= 0) return;
2643 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2644 // Search for a free or matching entry in 'comments_statistics': 'cs'
2645 // points to result.
2646 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2647 if (comments_statistics[i].comment == NULL) {
2648 cs = &comments_statistics[i];
2649 cs->comment = comment;
2651 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2652 cs = &comments_statistics[i];
2656 // Update entry for 'comment'
2662 // Call for each nested comment start (start marked with '[ xxx', end marked
2663 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2664 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2665 DCHECK(!it->done());
2666 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2667 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2668 if (tmp[0] != '[') {
2669 // Not a nested comment; skip
2673 // Search for end of nested comment or a new nested comment
2674 const char* const comment_txt =
2675 reinterpret_cast<const char*>(it->rinfo()->data());
2676 const byte* prev_pc = it->rinfo()->pc();
2680 // All nested comments must be terminated properly, and therefore exit
2682 DCHECK(!it->done());
2683 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2684 const char* const txt =
2685 reinterpret_cast<const char*>(it->rinfo()->data());
2686 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2687 if (txt[0] == ']') break; // End of nested comment
2689 CollectCommentStatistics(isolate, it);
2690 // Skip code that was covered with previous comment
2691 prev_pc = it->rinfo()->pc();
2695 EnterComment(isolate, comment_txt, flat_delta);
2699 // Collects code size statistics:
2701 // - by code comment
2702 void PagedSpace::CollectCodeStatistics() {
2703 Isolate* isolate = heap()->isolate();
2704 HeapObjectIterator obj_it(this);
2705 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2706 if (obj->IsCode()) {
2707 Code* code = Code::cast(obj);
2708 isolate->code_kind_statistics()[code->kind()] += code->Size();
2709 RelocIterator it(code);
2711 const byte* prev_pc = code->instruction_start();
2712 while (!it.done()) {
2713 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2714 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2715 CollectCommentStatistics(isolate, &it);
2716 prev_pc = it.rinfo()->pc();
2721 DCHECK(code->instruction_start() <= prev_pc &&
2722 prev_pc <= code->instruction_end());
2723 delta += static_cast<int>(code->instruction_end() - prev_pc);
2724 EnterComment(isolate, "NoComment", delta);
2730 void PagedSpace::ReportStatistics() {
2731 int pct = static_cast<int>(Available() * 100 / Capacity());
2732 PrintF(" capacity: %" V8_PTR_PREFIX
2734 ", waste: %" V8_PTR_PREFIX
2736 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2737 Capacity(), Waste(), Available(), pct);
2739 if (!swept_precisely_) return;
2740 ClearHistograms(heap()->isolate());
2741 HeapObjectIterator obj_it(this);
2742 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2743 CollectHistogramInfo(obj);
2744 ReportHistogram(heap()->isolate(), true);
2749 // -----------------------------------------------------------------------------
2750 // MapSpace implementation
2751 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2752 // there is at least one non-inlined virtual function. I would prefer to hide
2753 // the VerifyObject definition behind VERIFY_HEAP.
2755 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2758 // -----------------------------------------------------------------------------
2759 // CellSpace and PropertyCellSpace implementation
2760 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2761 // there is at least one non-inlined virtual function. I would prefer to hide
2762 // the VerifyObject definition behind VERIFY_HEAP.
2764 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
2767 void PropertyCellSpace::VerifyObject(HeapObject* object) {
2768 CHECK(object->IsPropertyCell());
2772 // -----------------------------------------------------------------------------
2773 // LargeObjectIterator
2775 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2776 current_ = space->first_page_;
2781 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2782 HeapObjectCallback size_func) {
2783 current_ = space->first_page_;
2784 size_func_ = size_func;
2788 HeapObject* LargeObjectIterator::Next() {
2789 if (current_ == NULL) return NULL;
2791 HeapObject* object = current_->GetObject();
2792 current_ = current_->next_page();
2797 // -----------------------------------------------------------------------------
2799 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2802 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2804 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2805 max_capacity_(max_capacity),
2810 chunk_map_(ComparePointers, 1024) {}
2813 bool LargeObjectSpace::SetUp() {
2816 maximum_committed_ = 0;
2824 void LargeObjectSpace::TearDown() {
2825 while (first_page_ != NULL) {
2826 LargePage* page = first_page_;
2827 first_page_ = first_page_->next_page();
2828 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2830 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2831 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2832 space, kAllocationActionFree, page->size());
2833 heap()->isolate()->memory_allocator()->Free(page);
2839 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2840 Executability executable) {
2841 // Check if we want to force a GC before growing the old space further.
2842 // If so, fail the allocation.
2843 if (!heap()->always_allocate() &&
2844 heap()->OldGenerationAllocationLimitReached()) {
2845 return AllocationResult::Retry(identity());
2848 if (Size() + object_size > max_capacity_) {
2849 return AllocationResult::Retry(identity());
2852 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2853 object_size, this, executable);
2854 if (page == NULL) return AllocationResult::Retry(identity());
2855 DCHECK(page->area_size() >= object_size);
2857 size_ += static_cast<int>(page->size());
2858 objects_size_ += object_size;
2860 page->set_next_page(first_page_);
2863 if (size_ > maximum_committed_) {
2864 maximum_committed_ = size_;
2867 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2868 // this large page in the chunk map.
2869 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2870 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2871 for (uintptr_t key = base; key <= limit; key++) {
2872 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2873 static_cast<uint32_t>(key), true);
2874 DCHECK(entry != NULL);
2875 entry->value = page;
2878 HeapObject* object = page->GetObject();
2880 if (Heap::ShouldZapGarbage()) {
2881 // Make the object consistent so the heap can be verified in OldSpaceStep.
2882 // We only need to do this in debug builds or if verify_heap is on.
2883 reinterpret_cast<Object**>(object->address())[0] =
2884 heap()->fixed_array_map();
2885 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2888 heap()->incremental_marking()->OldSpaceStep(object_size);
2893 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2894 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2896 LargePage* current = first_page_;
2897 while (current != NULL) {
2898 size += current->CommittedPhysicalMemory();
2899 current = current->next_page();
2906 Object* LargeObjectSpace::FindObject(Address a) {
2907 LargePage* page = FindPage(a);
2909 return page->GetObject();
2911 return Smi::FromInt(0); // Signaling not found.
2915 LargePage* LargeObjectSpace::FindPage(Address a) {
2916 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2917 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2918 static_cast<uint32_t>(key), false);
2920 DCHECK(e->value != NULL);
2921 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2922 DCHECK(page->is_valid());
2923 if (page->Contains(a)) {
2931 void LargeObjectSpace::FreeUnmarkedObjects() {
2932 LargePage* previous = NULL;
2933 LargePage* current = first_page_;
2934 while (current != NULL) {
2935 HeapObject* object = current->GetObject();
2936 // Can this large page contain pointers to non-trivial objects. No other
2937 // pointer object is this big.
2938 bool is_pointer_object = object->IsFixedArray();
2939 MarkBit mark_bit = Marking::MarkBitFrom(object);
2940 if (mark_bit.Get()) {
2942 Page::FromAddress(object->address())->ResetProgressBar();
2943 Page::FromAddress(object->address())->ResetLiveBytes();
2945 current = current->next_page();
2947 LargePage* page = current;
2948 // Cut the chunk out from the chunk list.
2949 current = current->next_page();
2950 if (previous == NULL) {
2951 first_page_ = current;
2953 previous->set_next_page(current);
2957 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
2959 size_ -= static_cast<int>(page->size());
2960 objects_size_ -= object->Size();
2963 // Remove entries belonging to this page.
2964 // Use variable alignment to help pass length check (<= 80 characters)
2965 // of single line in tools/presubmit.py.
2966 const intptr_t alignment = MemoryChunk::kAlignment;
2967 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
2968 uintptr_t limit = base + (page->size() - 1) / alignment;
2969 for (uintptr_t key = base; key <= limit; key++) {
2970 chunk_map_.Remove(reinterpret_cast<void*>(key),
2971 static_cast<uint32_t>(key));
2974 if (is_pointer_object) {
2975 heap()->QueueMemoryChunkForFree(page);
2977 heap()->isolate()->memory_allocator()->Free(page);
2981 heap()->FreeQueuedChunks();
2985 bool LargeObjectSpace::Contains(HeapObject* object) {
2986 Address address = object->address();
2987 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2989 bool owned = (chunk->owner() == this);
2991 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
2998 // We do not assume that the large object iterator works, because it depends
2999 // on the invariants we are checking during verification.
3000 void LargeObjectSpace::Verify() {
3001 for (LargePage* chunk = first_page_; chunk != NULL;
3002 chunk = chunk->next_page()) {
3003 // Each chunk contains an object that starts at the large object page's
3004 // object area start.
3005 HeapObject* object = chunk->GetObject();
3006 Page* page = Page::FromAddress(object->address());
3007 CHECK(object->address() == page->area_start());
3009 // The first word should be a map, and we expect all map pointers to be
3011 Map* map = object->map();
3012 CHECK(map->IsMap());
3013 CHECK(heap()->map_space()->Contains(map));
3015 // We have only code, sequential strings, external strings
3016 // (sequential strings that have been morphed into external
3017 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3018 // large object space.
3019 CHECK(object->IsCode() || object->IsSeqString() ||
3020 object->IsExternalString() || object->IsFixedArray() ||
3021 object->IsFixedDoubleArray() || object->IsByteArray() ||
3022 object->IsConstantPoolArray());
3024 // The object itself should look OK.
3025 object->ObjectVerify();
3027 // Byte arrays and strings don't have interior pointers.
3028 if (object->IsCode()) {
3029 VerifyPointersVisitor code_visitor;
3030 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3031 } else if (object->IsFixedArray()) {
3032 FixedArray* array = FixedArray::cast(object);
3033 for (int j = 0; j < array->length(); j++) {
3034 Object* element = array->get(j);
3035 if (element->IsHeapObject()) {
3036 HeapObject* element_object = HeapObject::cast(element);
3037 CHECK(heap()->Contains(element_object));
3038 CHECK(element_object->map()->IsMap());
3048 void LargeObjectSpace::Print() {
3049 OFStream os(stdout);
3050 LargeObjectIterator it(this);
3051 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3057 void LargeObjectSpace::ReportStatistics() {
3058 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3059 int num_objects = 0;
3060 ClearHistograms(heap()->isolate());
3061 LargeObjectIterator it(this);
3062 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3064 CollectHistogramInfo(obj);
3068 " number of objects %d, "
3069 "size of objects %" V8_PTR_PREFIX "d\n",
3070 num_objects, objects_size_);
3071 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3075 void LargeObjectSpace::CollectCodeStatistics() {
3076 Isolate* isolate = heap()->isolate();
3077 LargeObjectIterator obj_it(this);
3078 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3079 if (obj->IsCode()) {
3080 Code* code = Code::cast(obj);
3081 isolate->code_kind_statistics()[code->kind()] += code->Size();
3087 void Page::Print() {
3088 // Make a best-effort to print the objects in the page.
3089 PrintF("Page@%p in %s\n", this->address(),
3090 AllocationSpaceName(this->owner()->identity()));
3091 printf(" --------------------------------------\n");
3092 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3093 unsigned mark_size = 0;
3094 for (HeapObject* object = objects.Next(); object != NULL;
3095 object = objects.Next()) {
3096 bool is_marked = Marking::MarkBitFrom(object).Get();
3097 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3099 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3101 object->ShortPrint();
3104 printf(" --------------------------------------\n");
3105 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3110 } // namespace v8::internal