1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "macro-assembler.h"
31 #include "mark-compact.h"
39 // ----------------------------------------------------------------------------
42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
43 // You can't actually iterate over the anchor page. It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) {
57 // You can't actually iterate over the anchor page. It is not a real page,
58 // just an anchor for the double linked page list. Initialize the current
59 // address and end as NULL, then the first iteration will move on
69 HeapObjectIterator::HeapObjectIterator(Page* page,
70 HeapObjectCallback size_func) {
71 Space* owner = page->owner();
72 ASSERT(owner == page->heap()->old_pointer_space() ||
73 owner == page->heap()->old_data_space() ||
74 owner == page->heap()->map_space() ||
75 owner == page->heap()->cell_space() ||
76 owner == page->heap()->property_cell_space() ||
77 owner == page->heap()->code_space());
78 Initialize(reinterpret_cast<PagedSpace*>(owner),
83 ASSERT(page->WasSweptPrecisely());
87 void HeapObjectIterator::Initialize(PagedSpace* space,
88 Address cur, Address end,
89 HeapObjectIterator::PageMode mode,
90 HeapObjectCallback size_f) {
91 // Check that we actually can iterate this space.
92 ASSERT(!space->was_swept_conservatively());
102 // We have hit the end of the page and should advance to the next block of
103 // objects. This happens at the end of the page.
104 bool HeapObjectIterator::AdvanceToNextPage() {
105 ASSERT(cur_addr_ == cur_end_);
106 if (page_mode_ == kOnePageOnly) return false;
108 if (cur_addr_ == NULL) {
109 cur_page = space_->anchor();
111 cur_page = Page::FromAddress(cur_addr_ - 1);
112 ASSERT(cur_addr_ == cur_page->area_end());
114 cur_page = cur_page->next_page();
115 if (cur_page == space_->anchor()) return false;
116 cur_addr_ = cur_page->area_start();
117 cur_end_ = cur_page->area_end();
118 ASSERT(cur_page->WasSweptPrecisely());
123 // -----------------------------------------------------------------------------
127 CodeRange::CodeRange(Isolate* isolate)
132 current_allocation_block_index_(0) {
136 bool CodeRange::SetUp(const size_t requested) {
137 ASSERT(code_range_ == NULL);
139 code_range_ = new VirtualMemory(requested);
140 CHECK(code_range_ != NULL);
141 if (!code_range_->IsReserved()) {
147 // We are sure that we have mapped a block of requested addresses.
148 ASSERT(code_range_->size() == requested);
149 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
150 Address base = reinterpret_cast<Address>(code_range_->address());
151 Address aligned_base =
152 RoundUp(reinterpret_cast<Address>(code_range_->address()),
153 MemoryChunk::kAlignment);
154 size_t size = code_range_->size() - (aligned_base - base);
155 allocation_list_.Add(FreeBlock(aligned_base, size));
156 current_allocation_block_index_ = 0;
161 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
162 const FreeBlock* right) {
163 // The entire point of CodeRange is that the difference between two
164 // addresses in the range can be represented as a signed 32-bit int,
165 // so the cast is semantically correct.
166 return static_cast<int>(left->start - right->start);
170 void CodeRange::GetNextAllocationBlock(size_t requested) {
171 for (current_allocation_block_index_++;
172 current_allocation_block_index_ < allocation_list_.length();
173 current_allocation_block_index_++) {
174 if (requested <= allocation_list_[current_allocation_block_index_].size) {
175 return; // Found a large enough allocation block.
179 // Sort and merge the free blocks on the free list and the allocation list.
180 free_list_.AddAll(allocation_list_);
181 allocation_list_.Clear();
182 free_list_.Sort(&CompareFreeBlockAddress);
183 for (int i = 0; i < free_list_.length();) {
184 FreeBlock merged = free_list_[i];
186 // Add adjacent free blocks to the current merged block.
187 while (i < free_list_.length() &&
188 free_list_[i].start == merged.start + merged.size) {
189 merged.size += free_list_[i].size;
192 if (merged.size > 0) {
193 allocation_list_.Add(merged);
198 for (current_allocation_block_index_ = 0;
199 current_allocation_block_index_ < allocation_list_.length();
200 current_allocation_block_index_++) {
201 if (requested <= allocation_list_[current_allocation_block_index_].size) {
202 return; // Found a large enough allocation block.
206 // Code range is full or too fragmented.
207 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
211 Address CodeRange::AllocateRawMemory(const size_t requested_size,
212 const size_t commit_size,
214 ASSERT(commit_size <= requested_size);
215 ASSERT(current_allocation_block_index_ < allocation_list_.length());
216 if (requested_size > allocation_list_[current_allocation_block_index_].size) {
217 // Find an allocation block large enough. This function call may
218 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
219 GetNextAllocationBlock(requested_size);
221 // Commit the requested memory at the start of the current allocation block.
222 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
223 FreeBlock current = allocation_list_[current_allocation_block_index_];
224 if (aligned_requested >= (current.size - Page::kPageSize)) {
225 // Don't leave a small free block, useless for a large object or chunk.
226 *allocated = current.size;
228 *allocated = aligned_requested;
230 ASSERT(*allocated <= current.size);
231 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
232 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
239 allocation_list_[current_allocation_block_index_].start += *allocated;
240 allocation_list_[current_allocation_block_index_].size -= *allocated;
241 if (*allocated == current.size) {
242 GetNextAllocationBlock(0); // This block is used up, get the next one.
244 return current.start;
248 bool CodeRange::CommitRawMemory(Address start, size_t length) {
249 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
253 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
254 return code_range_->Uncommit(start, length);
258 void CodeRange::FreeRawMemory(Address address, size_t length) {
259 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
260 free_list_.Add(FreeBlock(address, length));
261 code_range_->Uncommit(address, length);
265 void CodeRange::TearDown() {
266 delete code_range_; // Frees all memory in the virtual memory range.
269 allocation_list_.Free();
273 // -----------------------------------------------------------------------------
277 MemoryAllocator::MemoryAllocator(Isolate* isolate)
280 capacity_executable_(0),
283 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
284 highest_ever_allocated_(reinterpret_cast<void*>(0)) {
288 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
289 capacity_ = RoundUp(capacity, Page::kPageSize);
290 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
291 ASSERT_GE(capacity_, capacity_executable_);
294 size_executable_ = 0;
300 void MemoryAllocator::TearDown() {
301 // Check that spaces were torn down before MemoryAllocator.
303 // TODO(gc) this will be true again when we fix FreeMemory.
304 // ASSERT(size_executable_ == 0);
306 capacity_executable_ = 0;
310 bool MemoryAllocator::CommitMemory(Address base,
312 Executability executable) {
313 if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
316 UpdateAllocatedSpaceLimits(base, base + size);
321 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
322 Executability executable) {
323 // TODO(gc) make code_range part of memory allocator?
324 ASSERT(reservation->IsReserved());
325 size_t size = reservation->size();
326 ASSERT(size_ >= size);
329 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
331 if (executable == EXECUTABLE) {
332 ASSERT(size_executable_ >= size);
333 size_executable_ -= size;
335 // Code which is part of the code-range does not have its own VirtualMemory.
336 ASSERT(!isolate_->code_range()->contains(
337 static_cast<Address>(reservation->address())));
338 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
339 reservation->Release();
343 void MemoryAllocator::FreeMemory(Address base,
345 Executability executable) {
346 // TODO(gc) make code_range part of memory allocator?
347 ASSERT(size_ >= size);
350 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
352 if (executable == EXECUTABLE) {
353 ASSERT(size_executable_ >= size);
354 size_executable_ -= size;
356 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
357 ASSERT(executable == EXECUTABLE);
358 isolate_->code_range()->FreeRawMemory(base, size);
360 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
361 bool result = VirtualMemory::ReleaseRegion(base, size);
368 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
370 VirtualMemory* controller) {
371 VirtualMemory reservation(size, alignment);
373 if (!reservation.IsReserved()) return NULL;
374 size_ += reservation.size();
375 Address base = RoundUp(static_cast<Address>(reservation.address()),
377 controller->TakeControl(&reservation);
382 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
385 Executability executable,
386 VirtualMemory* controller) {
387 ASSERT(commit_size <= reserve_size);
388 VirtualMemory reservation;
389 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
390 if (base == NULL) return NULL;
392 if (executable == EXECUTABLE) {
393 if (!CommitExecutableMemory(&reservation,
400 if (reservation.Commit(base, commit_size, false)) {
401 UpdateAllocatedSpaceLimits(base, base + commit_size);
408 // Failed to commit the body. Release the mapping and any partially
409 // commited regions inside it.
410 reservation.Release();
414 controller->TakeControl(&reservation);
419 void Page::InitializeAsAnchor(PagedSpace* owner) {
426 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
428 SemiSpace* semi_space) {
429 Address area_start = start + NewSpacePage::kObjectStartOffset;
430 Address area_end = start + Page::kPageSize;
432 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
439 chunk->set_next_chunk(NULL);
440 chunk->set_prev_chunk(NULL);
441 chunk->initialize_scan_on_scavenge(true);
442 bool in_to_space = (semi_space->id() != kFromSpace);
443 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
444 : MemoryChunk::IN_FROM_SPACE);
445 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
446 : MemoryChunk::IN_TO_SPACE));
447 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
448 heap->incremental_marking()->SetNewSpacePageFlags(page);
453 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
454 set_owner(semi_space);
455 set_next_chunk(this);
456 set_prev_chunk(this);
457 // Flags marks this invalid page as not being in new-space.
458 // All real new-space pages will be in new-space.
463 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
468 Executability executable,
470 MemoryChunk* chunk = FromAddress(base);
472 ASSERT(base == chunk->address());
476 chunk->area_start_ = area_start;
477 chunk->area_end_ = area_end;
479 chunk->set_owner(owner);
480 chunk->InitializeReservedMemory();
481 chunk->slots_buffer_ = NULL;
482 chunk->skip_list_ = NULL;
483 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
484 chunk->progress_bar_ = 0;
485 chunk->high_water_mark_ = static_cast<int>(area_start - base);
486 chunk->parallel_sweeping_ = 0;
487 chunk->available_in_small_free_list_ = 0;
488 chunk->available_in_medium_free_list_ = 0;
489 chunk->available_in_large_free_list_ = 0;
490 chunk->available_in_huge_free_list_ = 0;
491 chunk->non_available_small_blocks_ = 0;
492 chunk->ResetLiveBytes();
493 Bitmap::Clear(chunk);
494 chunk->initialize_scan_on_scavenge(false);
495 chunk->SetFlag(WAS_SWEPT_PRECISELY);
497 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
498 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
500 if (executable == EXECUTABLE) {
501 chunk->SetFlag(IS_EXECUTABLE);
504 if (owner == heap->old_data_space()) {
505 chunk->SetFlag(CONTAINS_ONLY_DATA);
512 // Commit MemoryChunk area to the requested size.
513 bool MemoryChunk::CommitArea(size_t requested) {
514 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
515 MemoryAllocator::CodePageGuardSize() : 0;
516 size_t header_size = area_start() - address() - guard_size;
517 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
518 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
519 OS::CommitPageSize());
521 if (commit_size > committed_size) {
522 // Commit size should be less or equal than the reserved size.
523 ASSERT(commit_size <= size() - 2 * guard_size);
524 // Append the committed area.
525 Address start = address() + committed_size + guard_size;
526 size_t length = commit_size - committed_size;
527 if (reservation_.IsReserved()) {
528 Executability executable = IsFlagSet(IS_EXECUTABLE)
529 ? EXECUTABLE : NOT_EXECUTABLE;
530 if (!heap()->isolate()->memory_allocator()->CommitMemory(
531 start, length, executable)) {
535 CodeRange* code_range = heap_->isolate()->code_range();
536 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
537 if (!code_range->CommitRawMemory(start, length)) return false;
540 if (Heap::ShouldZapGarbage()) {
541 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
543 } else if (commit_size < committed_size) {
544 ASSERT(commit_size > 0);
545 // Shrink the committed area.
546 size_t length = committed_size - commit_size;
547 Address start = address() + committed_size + guard_size - length;
548 if (reservation_.IsReserved()) {
549 if (!reservation_.Uncommit(start, length)) return false;
551 CodeRange* code_range = heap_->isolate()->code_range();
552 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
553 if (!code_range->UncommitRawMemory(start, length)) return false;
557 area_end_ = area_start_ + requested;
562 void MemoryChunk::InsertAfter(MemoryChunk* other) {
563 next_chunk_ = other->next_chunk_;
566 // This memory barrier is needed since concurrent sweeper threads may iterate
567 // over the list of pages while a new page is inserted.
568 // TODO(hpayer): find a cleaner way to guarantee that the page list can be
569 // expanded concurrently
572 // The following two write operations can take effect in arbitrary order
573 // since pages are always iterated by the sweeper threads in LIFO order, i.e,
574 // the inserted page becomes visible for the sweeper threads after
575 // other->next_chunk_ = this;
576 other->next_chunk_->prev_chunk_ = this;
577 other->next_chunk_ = this;
581 void MemoryChunk::Unlink() {
582 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
583 heap_->decrement_scan_on_scavenge_pages();
584 ClearFlag(SCAN_ON_SCAVENGE);
586 next_chunk_->prev_chunk_ = prev_chunk_;
587 prev_chunk_->next_chunk_ = next_chunk_;
593 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
594 intptr_t commit_area_size,
595 Executability executable,
597 ASSERT(commit_area_size <= reserve_area_size);
600 Heap* heap = isolate_->heap();
602 VirtualMemory reservation;
603 Address area_start = NULL;
604 Address area_end = NULL;
607 // MemoryChunk layout:
610 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
612 // +----------------------------+<- base + CodePageGuardStartOffset
614 // +----------------------------+<- area_start_
616 // +----------------------------+<- area_end_ (area_start + commit_area_size)
617 // | Committed but not used |
618 // +----------------------------+<- aligned at OS page boundary
619 // | Reserved but not committed |
620 // +----------------------------+<- aligned at OS page boundary
622 // +----------------------------+<- base + chunk_size
625 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
627 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
629 // +----------------------------+<- area_end_ (area_start + commit_area_size)
630 // | Committed but not used |
631 // +----------------------------+<- aligned at OS page boundary
632 // | Reserved but not committed |
633 // +----------------------------+<- base + chunk_size
636 if (executable == EXECUTABLE) {
637 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
638 OS::CommitPageSize()) + CodePageGuardSize();
640 // Check executable memory limit.
641 if (size_executable_ + chunk_size > capacity_executable_) {
643 StringEvent("MemoryAllocator::AllocateRawMemory",
644 "V8 Executable Allocation capacity exceeded"));
648 // Size of header (not executable) plus area (executable).
649 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
650 OS::CommitPageSize());
651 // Allocate executable memory either from code range or from the
653 if (isolate_->code_range()->exists()) {
654 base = isolate_->code_range()->AllocateRawMemory(chunk_size,
657 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
658 MemoryChunk::kAlignment));
659 if (base == NULL) return NULL;
661 // Update executable memory size.
662 size_executable_ += chunk_size;
664 base = AllocateAlignedMemory(chunk_size,
666 MemoryChunk::kAlignment,
669 if (base == NULL) return NULL;
670 // Update executable memory size.
671 size_executable_ += reservation.size();
674 if (Heap::ShouldZapGarbage()) {
675 ZapBlock(base, CodePageGuardStartOffset());
676 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
679 area_start = base + CodePageAreaStartOffset();
680 area_end = area_start + commit_area_size;
682 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
683 OS::CommitPageSize());
684 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
685 commit_area_size, OS::CommitPageSize());
686 base = AllocateAlignedMemory(chunk_size,
688 MemoryChunk::kAlignment,
692 if (base == NULL) return NULL;
694 if (Heap::ShouldZapGarbage()) {
695 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
698 area_start = base + Page::kObjectStartOffset;
699 area_end = area_start + commit_area_size;
702 // Use chunk_size for statistics and callbacks because we assume that they
703 // treat reserved but not-yet committed memory regions of chunks as allocated.
704 isolate_->counters()->memory_allocated()->
705 Increment(static_cast<int>(chunk_size));
707 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
709 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
710 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
713 MemoryChunk* result = MemoryChunk::Initialize(heap,
720 result->set_reserved_memory(&reservation);
721 MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
726 void Page::ResetFreeListStatistics() {
727 non_available_small_blocks_ = 0;
728 available_in_small_free_list_ = 0;
729 available_in_medium_free_list_ = 0;
730 available_in_large_free_list_ = 0;
731 available_in_huge_free_list_ = 0;
735 Page* MemoryAllocator::AllocatePage(intptr_t size,
737 Executability executable) {
738 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
740 if (chunk == NULL) return NULL;
742 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
746 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
748 Executability executable) {
749 MemoryChunk* chunk = AllocateChunk(object_size,
753 if (chunk == NULL) return NULL;
754 return LargePage::Initialize(isolate_->heap(), chunk);
758 void MemoryAllocator::Free(MemoryChunk* chunk) {
759 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
760 if (chunk->owner() != NULL) {
762 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
763 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
766 isolate_->heap()->RememberUnmappedPage(
767 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
769 delete chunk->slots_buffer();
770 delete chunk->skip_list();
772 VirtualMemory* reservation = chunk->reserved_memory();
773 if (reservation->IsReserved()) {
774 FreeMemory(reservation, chunk->executable());
776 FreeMemory(chunk->address(),
778 chunk->executable());
783 bool MemoryAllocator::CommitBlock(Address start,
785 Executability executable) {
786 if (!CommitMemory(start, size, executable)) return false;
788 if (Heap::ShouldZapGarbage()) {
789 ZapBlock(start, size);
792 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
797 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
798 if (!VirtualMemory::UncommitRegion(start, size)) return false;
799 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
804 void MemoryAllocator::ZapBlock(Address start, size_t size) {
805 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
806 Memory::Address_at(start + s) = kZapValue;
811 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
812 AllocationAction action,
814 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
815 MemoryAllocationCallbackRegistration registration =
816 memory_allocation_callbacks_[i];
817 if ((registration.space & space) == space &&
818 (registration.action & action) == action)
819 registration.callback(space, action, static_cast<int>(size));
824 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
825 MemoryAllocationCallback callback) {
826 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
827 if (memory_allocation_callbacks_[i].callback == callback) return true;
833 void MemoryAllocator::AddMemoryAllocationCallback(
834 MemoryAllocationCallback callback,
836 AllocationAction action) {
837 ASSERT(callback != NULL);
838 MemoryAllocationCallbackRegistration registration(callback, space, action);
839 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
840 return memory_allocation_callbacks_.Add(registration);
844 void MemoryAllocator::RemoveMemoryAllocationCallback(
845 MemoryAllocationCallback callback) {
846 ASSERT(callback != NULL);
847 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
848 if (memory_allocation_callbacks_[i].callback == callback) {
849 memory_allocation_callbacks_.Remove(i);
858 void MemoryAllocator::ReportStatistics() {
859 float pct = static_cast<float>(capacity_ - size_) / capacity_;
860 PrintF(" capacity: %" V8_PTR_PREFIX "d"
861 ", used: %" V8_PTR_PREFIX "d"
862 ", available: %%%d\n\n",
863 capacity_, size_, static_cast<int>(pct*100));
868 int MemoryAllocator::CodePageGuardStartOffset() {
869 // We are guarding code pages: the first OS page after the header
870 // will be protected as non-writable.
871 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
875 int MemoryAllocator::CodePageGuardSize() {
876 return static_cast<int>(OS::CommitPageSize());
880 int MemoryAllocator::CodePageAreaStartOffset() {
881 // We are guarding code pages: the first OS page after the header
882 // will be protected as non-writable.
883 return CodePageGuardStartOffset() + CodePageGuardSize();
887 int MemoryAllocator::CodePageAreaEndOffset() {
888 // We are guarding code pages: the last OS page will be protected as
890 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
894 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
897 size_t reserved_size) {
898 // Commit page header (not executable).
899 if (!vm->Commit(start,
900 CodePageGuardStartOffset(),
905 // Create guard page after the header.
906 if (!vm->Guard(start + CodePageGuardStartOffset())) {
910 // Commit page body (executable).
911 if (!vm->Commit(start + CodePageAreaStartOffset(),
912 commit_size - CodePageGuardStartOffset(),
917 // Create guard page before the end.
918 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
922 UpdateAllocatedSpaceLimits(start,
923 start + CodePageAreaStartOffset() +
924 commit_size - CodePageGuardStartOffset());
929 // -----------------------------------------------------------------------------
930 // MemoryChunk implementation
932 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
933 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
934 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
935 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
937 chunk->IncrementLiveBytes(by);
941 // -----------------------------------------------------------------------------
942 // PagedSpace implementation
944 PagedSpace::PagedSpace(Heap* heap,
945 intptr_t max_capacity,
947 Executability executable)
948 : Space(heap, id, executable),
950 was_swept_conservatively_(false),
951 first_unswept_page_(Page::FromAddress(NULL)),
952 unswept_free_bytes_(0) {
953 if (id == CODE_SPACE) {
954 area_size_ = heap->isolate()->memory_allocator()->
957 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
959 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
961 accounting_stats_.Clear();
963 allocation_info_.set_top(NULL);
964 allocation_info_.set_limit(NULL);
966 anchor_.InitializeAsAnchor(this);
970 bool PagedSpace::SetUp() {
975 bool PagedSpace::HasBeenSetUp() {
980 void PagedSpace::TearDown() {
981 PageIterator iterator(this);
982 while (iterator.has_next()) {
983 heap()->isolate()->memory_allocator()->Free(iterator.next());
985 anchor_.set_next_page(&anchor_);
986 anchor_.set_prev_page(&anchor_);
987 accounting_stats_.Clear();
991 size_t PagedSpace::CommittedPhysicalMemory() {
992 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
993 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
995 PageIterator it(this);
996 while (it.has_next()) {
997 size += it.next()->CommittedPhysicalMemory();
1003 MaybeObject* PagedSpace::FindObject(Address addr) {
1004 // Note: this function can only be called on precisely swept spaces.
1005 ASSERT(!heap()->mark_compact_collector()->in_use());
1007 if (!Contains(addr)) return Failure::Exception();
1009 Page* p = Page::FromAddress(addr);
1010 HeapObjectIterator it(p, NULL);
1011 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1012 Address cur = obj->address();
1013 Address next = cur + obj->Size();
1014 if ((cur <= addr) && (addr < next)) return obj;
1018 return Failure::Exception();
1022 bool PagedSpace::CanExpand() {
1023 ASSERT(max_capacity_ % AreaSize() == 0);
1025 if (Capacity() == max_capacity_) return false;
1027 ASSERT(Capacity() < max_capacity_);
1029 // Are we going to exceed capacity for this space?
1030 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1036 bool PagedSpace::Expand() {
1037 if (!CanExpand()) return false;
1039 intptr_t size = AreaSize();
1041 if (anchor_.next_page() == &anchor_) {
1042 size = SizeOfFirstPage();
1045 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
1046 size, this, executable());
1047 if (p == NULL) return false;
1049 ASSERT(Capacity() <= max_capacity_);
1051 p->InsertAfter(anchor_.prev_page());
1057 intptr_t PagedSpace::SizeOfFirstPage() {
1059 switch (identity()) {
1060 case OLD_POINTER_SPACE:
1061 size = 72 * kPointerSize * KB;
1063 case OLD_DATA_SPACE:
1067 size = 16 * kPointerSize * KB;
1070 size = 16 * kPointerSize * KB;
1072 case PROPERTY_CELL_SPACE:
1073 size = 8 * kPointerSize * KB;
1076 if (heap()->isolate()->code_range()->exists()) {
1077 // When code range exists, code pages are allocated in a special way
1078 // (from the reserved code range). That part of the code is not yet
1079 // upgraded to handle small pages.
1088 return Min(size, AreaSize());
1092 int PagedSpace::CountTotalPages() {
1093 PageIterator it(this);
1095 while (it.has_next()) {
1103 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1104 sizes->huge_size_ = page->available_in_huge_free_list();
1105 sizes->small_size_ = page->available_in_small_free_list();
1106 sizes->medium_size_ = page->available_in_medium_free_list();
1107 sizes->large_size_ = page->available_in_large_free_list();
1111 void PagedSpace::ResetFreeListStatistics() {
1112 PageIterator page_iterator(this);
1113 while (page_iterator.has_next()) {
1114 Page* page = page_iterator.next();
1115 page->ResetFreeListStatistics();
1120 void PagedSpace::IncreaseCapacity(int size) {
1121 accounting_stats_.ExpandSpace(size);
1125 void PagedSpace::ReleasePage(Page* page, bool unlink) {
1126 ASSERT(page->LiveBytes() == 0);
1127 ASSERT(AreaSize() == page->area_size());
1129 // Adjust list of unswept pages if the page is the head of the list.
1130 if (first_unswept_page_ == page) {
1131 first_unswept_page_ = page->next_page();
1132 if (first_unswept_page_ == anchor()) {
1133 first_unswept_page_ = Page::FromAddress(NULL);
1137 if (page->WasSwept()) {
1138 intptr_t size = free_list_.EvictFreeListItems(page);
1139 accounting_stats_.AllocateBytes(size);
1140 ASSERT_EQ(AreaSize(), static_cast<int>(size));
1142 DecreaseUnsweptFreeBytes(page);
1145 // TODO(hpayer): This check is just used for debugging purpose and
1146 // should be removed or turned into an assert after investigating the
1147 // crash in concurrent sweeping.
1148 CHECK(!free_list_.ContainsPageFreeListItems(page));
1150 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1151 allocation_info_.set_top(NULL);
1152 allocation_info_.set_limit(NULL);
1158 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1159 heap()->isolate()->memory_allocator()->Free(page);
1161 heap()->QueueMemoryChunkForFree(page);
1164 ASSERT(Capacity() > 0);
1165 accounting_stats_.ShrinkSpace(AreaSize());
1170 void PagedSpace::Print() { }
1174 void PagedSpace::Verify(ObjectVisitor* visitor) {
1175 // We can only iterate over the pages if they were swept precisely.
1176 if (was_swept_conservatively_) return;
1178 bool allocation_pointer_found_in_space =
1179 (allocation_info_.top() == allocation_info_.limit());
1180 PageIterator page_iterator(this);
1181 while (page_iterator.has_next()) {
1182 Page* page = page_iterator.next();
1183 CHECK(page->owner() == this);
1184 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1185 allocation_pointer_found_in_space = true;
1187 CHECK(page->WasSweptPrecisely());
1188 HeapObjectIterator it(page, NULL);
1189 Address end_of_previous_object = page->area_start();
1190 Address top = page->area_end();
1192 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1193 CHECK(end_of_previous_object <= object->address());
1195 // The first word should be a map, and we expect all map pointers to
1197 Map* map = object->map();
1198 CHECK(map->IsMap());
1199 CHECK(heap()->map_space()->Contains(map));
1201 // Perform space-specific object verification.
1202 VerifyObject(object);
1204 // The object itself should look OK.
1207 // All the interior pointers should be contained in the heap.
1208 int size = object->Size();
1209 object->IterateBody(map->instance_type(), size, visitor);
1210 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1214 CHECK(object->address() + size <= top);
1215 end_of_previous_object = object->address() + size;
1217 CHECK_LE(black_size, page->LiveBytes());
1219 CHECK(allocation_pointer_found_in_space);
1221 #endif // VERIFY_HEAP
1223 // -----------------------------------------------------------------------------
1224 // NewSpace implementation
1227 bool NewSpace::SetUp(int reserved_semispace_capacity,
1228 int maximum_semispace_capacity) {
1229 // Set up new space based on the preallocated memory block defined by
1230 // start and size. The provided space is divided into two semi-spaces.
1231 // To support fast containment testing in the new space, the size of
1232 // this chunk must be a power of two and it must be aligned to its size.
1233 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1235 size_t size = 2 * reserved_semispace_capacity;
1237 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1238 size, size, &reservation_);
1239 if (base == NULL) return false;
1242 chunk_size_ = static_cast<uintptr_t>(size);
1243 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1245 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1246 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1248 // Allocate and set up the histogram arrays if necessary.
1249 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1250 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1252 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1253 promoted_histogram_[name].set_name(#name);
1254 INSTANCE_TYPE_LIST(SET_NAME)
1257 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1258 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1259 2 * heap()->ReservedSemiSpaceSize());
1260 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1262 to_space_.SetUp(chunk_base_,
1263 initial_semispace_capacity,
1264 maximum_semispace_capacity);
1265 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1266 initial_semispace_capacity,
1267 maximum_semispace_capacity);
1268 if (!to_space_.Commit()) {
1271 ASSERT(!from_space_.is_committed()); // No need to use memory yet.
1273 start_ = chunk_base_;
1274 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1275 object_mask_ = address_mask_ | kHeapObjectTagMask;
1276 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1278 ResetAllocationInfo();
1284 void NewSpace::TearDown() {
1285 if (allocated_histogram_) {
1286 DeleteArray(allocated_histogram_);
1287 allocated_histogram_ = NULL;
1289 if (promoted_histogram_) {
1290 DeleteArray(promoted_histogram_);
1291 promoted_histogram_ = NULL;
1295 allocation_info_.set_top(NULL);
1296 allocation_info_.set_limit(NULL);
1298 to_space_.TearDown();
1299 from_space_.TearDown();
1301 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1303 ASSERT(reservation_.IsReserved());
1304 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1311 void NewSpace::Flip() {
1312 SemiSpace::Swap(&from_space_, &to_space_);
1316 void NewSpace::Grow() {
1317 // Double the semispace size but only up to maximum capacity.
1318 ASSERT(Capacity() < MaximumCapacity());
1319 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1320 if (to_space_.GrowTo(new_capacity)) {
1321 // Only grow from space if we managed to grow to-space.
1322 if (!from_space_.GrowTo(new_capacity)) {
1323 // If we managed to grow to-space but couldn't grow from-space,
1324 // attempt to shrink to-space.
1325 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1326 // We are in an inconsistent state because we could not
1327 // commit/uncommit memory from new space.
1328 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1332 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1336 void NewSpace::Shrink() {
1337 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1338 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1339 if (rounded_new_capacity < Capacity() &&
1340 to_space_.ShrinkTo(rounded_new_capacity)) {
1341 // Only shrink from-space if we managed to shrink to-space.
1342 from_space_.Reset();
1343 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1344 // If we managed to shrink to-space but couldn't shrink from
1345 // space, attempt to grow to-space again.
1346 if (!to_space_.GrowTo(from_space_.Capacity())) {
1347 // We are in an inconsistent state because we could not
1348 // commit/uncommit memory from new space.
1349 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1353 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1357 void NewSpace::UpdateAllocationInfo() {
1358 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1359 allocation_info_.set_top(to_space_.page_low());
1360 allocation_info_.set_limit(to_space_.page_high());
1361 UpdateInlineAllocationLimit(0);
1362 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1366 void NewSpace::ResetAllocationInfo() {
1368 UpdateAllocationInfo();
1370 // Clear all mark-bits in the to-space.
1371 NewSpacePageIterator it(&to_space_);
1372 while (it.has_next()) {
1373 Bitmap::Clear(it.next());
1378 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1379 if (heap()->inline_allocation_disabled()) {
1380 // Lowest limit when linear allocation was disabled.
1381 Address high = to_space_.page_high();
1382 Address new_top = allocation_info_.top() + size_in_bytes;
1383 allocation_info_.set_limit(Min(new_top, high));
1384 } else if (inline_allocation_limit_step() == 0) {
1385 // Normal limit is the end of the current page.
1386 allocation_info_.set_limit(to_space_.page_high());
1388 // Lower limit during incremental marking.
1389 Address high = to_space_.page_high();
1390 Address new_top = allocation_info_.top() + size_in_bytes;
1391 Address new_limit = new_top + inline_allocation_limit_step_;
1392 allocation_info_.set_limit(Min(new_limit, high));
1394 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1398 bool NewSpace::AddFreshPage() {
1399 Address top = allocation_info_.top();
1400 if (NewSpacePage::IsAtStart(top)) {
1401 // The current page is already empty. Don't try to make another.
1403 // We should only get here if someone asks to allocate more
1404 // than what can be stored in a single page.
1405 // TODO(gc): Change the limit on new-space allocation to prevent this
1406 // from happening (all such allocations should go directly to LOSpace).
1409 if (!to_space_.AdvancePage()) {
1410 // Failed to get a new page in to-space.
1414 // Clear remainder of current page.
1415 Address limit = NewSpacePage::FromLimit(top)->area_end();
1416 if (heap()->gc_state() == Heap::SCAVENGE) {
1417 heap()->promotion_queue()->SetNewLimit(limit);
1418 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1421 int remaining_in_page = static_cast<int>(limit - top);
1422 heap()->CreateFillerObjectAt(top, remaining_in_page);
1424 UpdateAllocationInfo();
1430 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1431 Address old_top = allocation_info_.top();
1432 Address high = to_space_.page_high();
1433 if (allocation_info_.limit() < high) {
1434 // Either the limit has been lowered because linear allocation was disabled
1435 // or because incremental marking wants to get a chance to do a step. Set
1436 // the new limit accordingly.
1437 Address new_top = old_top + size_in_bytes;
1438 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1439 heap()->incremental_marking()->Step(
1440 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1441 UpdateInlineAllocationLimit(size_in_bytes);
1442 top_on_previous_step_ = new_top;
1443 return AllocateRaw(size_in_bytes);
1444 } else if (AddFreshPage()) {
1445 // Switched to new page. Try allocating again.
1446 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1447 heap()->incremental_marking()->Step(
1448 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1449 top_on_previous_step_ = to_space_.page_low();
1450 return AllocateRaw(size_in_bytes);
1452 return Failure::RetryAfterGC();
1458 // We do not use the SemiSpaceIterator because verification doesn't assume
1459 // that it works (it depends on the invariants we are checking).
1460 void NewSpace::Verify() {
1461 // The allocation pointer should be in the space or at the very end.
1462 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1464 // There should be objects packed in from the low address up to the
1465 // allocation pointer.
1466 Address current = to_space_.first_page()->area_start();
1467 CHECK_EQ(current, to_space_.space_start());
1469 while (current != top()) {
1470 if (!NewSpacePage::IsAtEnd(current)) {
1471 // The allocation pointer should not be in the middle of an object.
1472 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1475 HeapObject* object = HeapObject::FromAddress(current);
1477 // The first word should be a map, and we expect all map pointers to
1479 Map* map = object->map();
1480 CHECK(map->IsMap());
1481 CHECK(heap()->map_space()->Contains(map));
1483 // The object should not be code or a map.
1484 CHECK(!object->IsMap());
1485 CHECK(!object->IsCode());
1487 // The object itself should look OK.
1490 // All the interior pointers should be contained in the heap.
1491 VerifyPointersVisitor visitor;
1492 int size = object->Size();
1493 object->IterateBody(map->instance_type(), size, &visitor);
1497 // At end of page, switch to next page.
1498 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1499 // Next page should be valid.
1500 CHECK(!page->is_anchor());
1501 current = page->area_start();
1505 // Check semi-spaces.
1506 CHECK_EQ(from_space_.id(), kFromSpace);
1507 CHECK_EQ(to_space_.id(), kToSpace);
1508 from_space_.Verify();
1513 // -----------------------------------------------------------------------------
1514 // SemiSpace implementation
1516 void SemiSpace::SetUp(Address start,
1517 int initial_capacity,
1518 int maximum_capacity) {
1519 // Creates a space in the young generation. The constructor does not
1520 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1521 // memory of size 'capacity' when set up, and does not grow or shrink
1522 // otherwise. In the mark-compact collector, the memory region of the from
1523 // space is used as the marking stack. It requires contiguous memory
1525 ASSERT(maximum_capacity >= Page::kPageSize);
1526 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1527 capacity_ = initial_capacity;
1528 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1529 maximum_committed_ = 0;
1532 address_mask_ = ~(maximum_capacity - 1);
1533 object_mask_ = address_mask_ | kHeapObjectTagMask;
1534 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1539 void SemiSpace::TearDown() {
1545 bool SemiSpace::Commit() {
1546 ASSERT(!is_committed());
1547 int pages = capacity_ / Page::kPageSize;
1548 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
1554 NewSpacePage* current = anchor();
1555 for (int i = 0; i < pages; i++) {
1556 NewSpacePage* new_page =
1557 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1558 new_page->InsertAfter(current);
1562 SetCapacity(capacity_);
1569 bool SemiSpace::Uncommit() {
1570 ASSERT(is_committed());
1571 Address start = start_ + maximum_capacity_ - capacity_;
1572 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1575 anchor()->set_next_page(anchor());
1576 anchor()->set_prev_page(anchor());
1583 size_t SemiSpace::CommittedPhysicalMemory() {
1584 if (!is_committed()) return 0;
1586 NewSpacePageIterator it(this);
1587 while (it.has_next()) {
1588 size += it.next()->CommittedPhysicalMemory();
1594 bool SemiSpace::GrowTo(int new_capacity) {
1595 if (!is_committed()) {
1596 if (!Commit()) return false;
1598 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1599 ASSERT(new_capacity <= maximum_capacity_);
1600 ASSERT(new_capacity > capacity_);
1601 int pages_before = capacity_ / Page::kPageSize;
1602 int pages_after = new_capacity / Page::kPageSize;
1604 size_t delta = new_capacity - capacity_;
1606 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1607 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1608 start_ + capacity_, delta, executable())) {
1611 SetCapacity(new_capacity);
1612 NewSpacePage* last_page = anchor()->prev_page();
1613 ASSERT(last_page != anchor());
1614 for (int i = pages_before; i < pages_after; i++) {
1615 Address page_address = start_ + i * Page::kPageSize;
1616 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1619 new_page->InsertAfter(last_page);
1620 Bitmap::Clear(new_page);
1621 // Duplicate the flags that was set on the old page.
1622 new_page->SetFlags(last_page->GetFlags(),
1623 NewSpacePage::kCopyOnFlipFlagsMask);
1624 last_page = new_page;
1630 bool SemiSpace::ShrinkTo(int new_capacity) {
1631 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1632 ASSERT(new_capacity >= initial_capacity_);
1633 ASSERT(new_capacity < capacity_);
1634 if (is_committed()) {
1635 size_t delta = capacity_ - new_capacity;
1636 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1638 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1639 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1643 int pages_after = new_capacity / Page::kPageSize;
1644 NewSpacePage* new_last_page =
1645 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1646 new_last_page->set_next_page(anchor());
1647 anchor()->set_prev_page(new_last_page);
1648 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1651 SetCapacity(new_capacity);
1657 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1658 anchor_.set_owner(this);
1659 // Fixup back-pointers to anchor. Address of anchor changes
1661 anchor_.prev_page()->set_next_page(&anchor_);
1662 anchor_.next_page()->set_prev_page(&anchor_);
1664 bool becomes_to_space = (id_ == kFromSpace);
1665 id_ = becomes_to_space ? kToSpace : kFromSpace;
1666 NewSpacePage* page = anchor_.next_page();
1667 while (page != &anchor_) {
1668 page->set_owner(this);
1669 page->SetFlags(flags, mask);
1670 if (becomes_to_space) {
1671 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1672 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1673 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1674 page->ResetLiveBytes();
1676 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1677 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1679 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1680 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1681 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1682 page = page->next_page();
1687 void SemiSpace::Reset() {
1688 ASSERT(anchor_.next_page() != &anchor_);
1689 current_page_ = anchor_.next_page();
1693 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1694 // We won't be swapping semispaces without data in them.
1695 ASSERT(from->anchor_.next_page() != &from->anchor_);
1696 ASSERT(to->anchor_.next_page() != &to->anchor_);
1699 SemiSpace tmp = *from;
1703 // Fixup back-pointers to the page list anchor now that its address
1705 // Swap to/from-space bits on pages.
1706 // Copy GC flags from old active space (from-space) to new (to-space).
1707 intptr_t flags = from->current_page()->GetFlags();
1708 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1710 from->FlipPages(0, 0);
1714 void SemiSpace::SetCapacity(int new_capacity) {
1715 capacity_ = new_capacity;
1716 if (capacity_ > maximum_committed_) {
1717 maximum_committed_ = capacity_;
1722 void SemiSpace::set_age_mark(Address mark) {
1723 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1725 // Mark all pages up to the one containing mark.
1726 NewSpacePageIterator it(space_start(), mark);
1727 while (it.has_next()) {
1728 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1734 void SemiSpace::Print() { }
1738 void SemiSpace::Verify() {
1739 bool is_from_space = (id_ == kFromSpace);
1740 NewSpacePage* page = anchor_.next_page();
1741 CHECK(anchor_.semi_space() == this);
1742 while (page != &anchor_) {
1743 CHECK(page->semi_space() == this);
1744 CHECK(page->InNewSpace());
1745 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1746 : MemoryChunk::IN_TO_SPACE));
1747 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1748 : MemoryChunk::IN_FROM_SPACE));
1749 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1750 if (!is_from_space) {
1751 // The pointers-from-here-are-interesting flag isn't updated dynamically
1752 // on from-space pages, so it might be out of sync with the marking state.
1753 if (page->heap()->incremental_marking()->IsMarking()) {
1754 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1756 CHECK(!page->IsFlagSet(
1757 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1759 // TODO(gc): Check that the live_bytes_count_ field matches the
1760 // black marking on the page (if we make it match in new-space).
1762 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1763 CHECK(page->prev_page()->next_page() == page);
1764 page = page->next_page();
1770 void SemiSpace::AssertValidRange(Address start, Address end) {
1771 // Addresses belong to same semi-space
1772 NewSpacePage* page = NewSpacePage::FromLimit(start);
1773 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1774 SemiSpace* space = page->semi_space();
1775 CHECK_EQ(space, end_page->semi_space());
1776 // Start address is before end address, either on same page,
1777 // or end address is on a later page in the linked list of
1778 // semi-space pages.
1779 if (page == end_page) {
1780 CHECK(start <= end);
1782 while (page != end_page) {
1783 page = page->next_page();
1784 CHECK_NE(page, space->anchor());
1791 // -----------------------------------------------------------------------------
1792 // SemiSpaceIterator implementation.
1793 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1794 Initialize(space->bottom(), space->top(), NULL);
1798 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1799 HeapObjectCallback size_func) {
1800 Initialize(space->bottom(), space->top(), size_func);
1804 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1805 Initialize(start, space->top(), NULL);
1809 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1810 Initialize(from, to, NULL);
1814 void SemiSpaceIterator::Initialize(Address start,
1816 HeapObjectCallback size_func) {
1817 SemiSpace::AssertValidRange(start, end);
1820 size_func_ = size_func;
1825 // heap_histograms is shared, always clear it before using it.
1826 static void ClearHistograms(Isolate* isolate) {
1827 // We reset the name each time, though it hasn't changed.
1828 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1829 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1830 #undef DEF_TYPE_NAME
1832 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1833 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1834 #undef CLEAR_HISTOGRAM
1836 isolate->js_spill_information()->Clear();
1840 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1841 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1842 code_kind_statistics[i] = 0;
1847 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1848 PrintF("\n Code kind histograms: \n");
1849 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1850 if (code_kind_statistics[i] > 0) {
1851 PrintF(" %-20s: %10d bytes\n",
1852 Code::Kind2String(static_cast<Code::Kind>(i)),
1853 code_kind_statistics[i]);
1860 static int CollectHistogramInfo(HeapObject* obj) {
1861 Isolate* isolate = obj->GetIsolate();
1862 InstanceType type = obj->map()->instance_type();
1863 ASSERT(0 <= type && type <= LAST_TYPE);
1864 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1865 isolate->heap_histograms()[type].increment_number(1);
1866 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1868 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1869 JSObject::cast(obj)->IncrementSpillStatistics(
1870 isolate->js_spill_information());
1877 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1878 PrintF("\n Object Histogram:\n");
1879 for (int i = 0; i <= LAST_TYPE; i++) {
1880 if (isolate->heap_histograms()[i].number() > 0) {
1881 PrintF(" %-34s%10d (%10d bytes)\n",
1882 isolate->heap_histograms()[i].name(),
1883 isolate->heap_histograms()[i].number(),
1884 isolate->heap_histograms()[i].bytes());
1889 // Summarize string types.
1890 int string_number = 0;
1891 int string_bytes = 0;
1892 #define INCREMENT(type, size, name, camel_name) \
1893 string_number += isolate->heap_histograms()[type].number(); \
1894 string_bytes += isolate->heap_histograms()[type].bytes();
1895 STRING_TYPE_LIST(INCREMENT)
1897 if (string_number > 0) {
1898 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1902 if (FLAG_collect_heap_spill_statistics && print_spill) {
1903 isolate->js_spill_information()->Print();
1909 // Support for statistics gathering for --heap-stats and --log-gc.
1910 void NewSpace::ClearHistograms() {
1911 for (int i = 0; i <= LAST_TYPE; i++) {
1912 allocated_histogram_[i].clear();
1913 promoted_histogram_[i].clear();
1918 // Because the copying collector does not touch garbage objects, we iterate
1919 // the new space before a collection to get a histogram of allocated objects.
1920 // This only happens when --log-gc flag is set.
1921 void NewSpace::CollectStatistics() {
1923 SemiSpaceIterator it(this);
1924 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1925 RecordAllocation(obj);
1929 static void DoReportStatistics(Isolate* isolate,
1930 HistogramInfo* info, const char* description) {
1931 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1932 // Lump all the string types together.
1933 int string_number = 0;
1934 int string_bytes = 0;
1935 #define INCREMENT(type, size, name, camel_name) \
1936 string_number += info[type].number(); \
1937 string_bytes += info[type].bytes();
1938 STRING_TYPE_LIST(INCREMENT)
1940 if (string_number > 0) {
1942 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1945 // Then do the other types.
1946 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1947 if (info[i].number() > 0) {
1949 HeapSampleItemEvent(info[i].name(), info[i].number(),
1953 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1957 void NewSpace::ReportStatistics() {
1959 if (FLAG_heap_stats) {
1960 float pct = static_cast<float>(Available()) / Capacity();
1961 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1962 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1963 Capacity(), Available(), static_cast<int>(pct*100));
1964 PrintF("\n Object Histogram:\n");
1965 for (int i = 0; i <= LAST_TYPE; i++) {
1966 if (allocated_histogram_[i].number() > 0) {
1967 PrintF(" %-34s%10d (%10d bytes)\n",
1968 allocated_histogram_[i].name(),
1969 allocated_histogram_[i].number(),
1970 allocated_histogram_[i].bytes());
1978 Isolate* isolate = heap()->isolate();
1979 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1980 DoReportStatistics(isolate, promoted_histogram_, "promoted");
1985 void NewSpace::RecordAllocation(HeapObject* obj) {
1986 InstanceType type = obj->map()->instance_type();
1987 ASSERT(0 <= type && type <= LAST_TYPE);
1988 allocated_histogram_[type].increment_number(1);
1989 allocated_histogram_[type].increment_bytes(obj->Size());
1993 void NewSpace::RecordPromotion(HeapObject* obj) {
1994 InstanceType type = obj->map()->instance_type();
1995 ASSERT(0 <= type && type <= LAST_TYPE);
1996 promoted_histogram_[type].increment_number(1);
1997 promoted_histogram_[type].increment_bytes(obj->Size());
2001 size_t NewSpace::CommittedPhysicalMemory() {
2002 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
2003 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2004 size_t size = to_space_.CommittedPhysicalMemory();
2005 if (from_space_.is_committed()) {
2006 size += from_space_.CommittedPhysicalMemory();
2012 // -----------------------------------------------------------------------------
2013 // Free lists for old object spaces implementation
2015 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
2016 ASSERT(size_in_bytes > 0);
2017 ASSERT(IsAligned(size_in_bytes, kPointerSize));
2019 // We write a map and possibly size information to the block. If the block
2020 // is big enough to be a FreeSpace with at least one extra word (the next
2021 // pointer), we set its map to be the free space map and its size to an
2022 // appropriate array length for the desired size from HeapObject::Size().
2023 // If the block is too small (eg, one or two words), to hold both a size
2024 // field and a next pointer, we give it a filler map that gives it the
2026 if (size_in_bytes > FreeSpace::kHeaderSize) {
2027 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
2028 // Can't use FreeSpace::cast because it fails during deserialization.
2029 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
2030 this_as_free_space->set_size(size_in_bytes);
2031 } else if (size_in_bytes == kPointerSize) {
2032 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
2033 } else if (size_in_bytes == 2 * kPointerSize) {
2034 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
2038 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
2039 // deserialization because the free space map is not done yet.
2043 FreeListNode* FreeListNode::next() {
2044 ASSERT(IsFreeListNode(this));
2045 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2046 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2047 return reinterpret_cast<FreeListNode*>(
2048 Memory::Address_at(address() + kNextOffset));
2050 return reinterpret_cast<FreeListNode*>(
2051 Memory::Address_at(address() + kPointerSize));
2056 FreeListNode** FreeListNode::next_address() {
2057 ASSERT(IsFreeListNode(this));
2058 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2059 ASSERT(Size() >= kNextOffset + kPointerSize);
2060 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2062 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2067 void FreeListNode::set_next(FreeListNode* next) {
2068 ASSERT(IsFreeListNode(this));
2069 // While we are booting the VM the free space map will actually be null. So
2070 // we have to make sure that we don't try to use it for anything at that
2072 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2073 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2074 Memory::Address_at(address() + kNextOffset) =
2075 reinterpret_cast<Address>(next);
2077 Memory::Address_at(address() + kPointerSize) =
2078 reinterpret_cast<Address>(next);
2083 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2084 intptr_t free_bytes = 0;
2085 if (category->top_ != NULL) {
2086 ASSERT(category->end_ != NULL);
2087 // This is safe (not going to deadlock) since Concatenate operations
2088 // are never performed on the same free lists at the same time in
2090 LockGuard<Mutex> target_lock_guard(mutex());
2091 LockGuard<Mutex> source_lock_guard(category->mutex());
2092 free_bytes = category->available();
2094 end_ = category->end();
2096 category->end()->set_next(top_);
2098 top_ = category->top();
2099 available_ += category->available();
2106 void FreeListCategory::Reset() {
2113 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2115 FreeListNode** n = &top_;
2116 while (*n != NULL) {
2117 if (Page::FromAddress((*n)->address()) == p) {
2118 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2119 sum += free_space->Size();
2122 n = (*n)->next_address();
2133 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2134 FreeListNode** n = &top_;
2135 while (*n != NULL) {
2136 if (Page::FromAddress((*n)->address()) == p) return true;
2137 n = (*n)->next_address();
2143 FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
2144 FreeListNode* node = top_;
2146 if (node == NULL) return NULL;
2148 while (node != NULL &&
2149 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2150 available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2151 node = node->next();
2155 set_top(node->next());
2156 *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2157 available_ -= *node_size;
2162 if (top() == NULL) {
2170 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2172 FreeListNode* node = PickNodeFromList(node_size);
2173 if (node != NULL && *node_size < size_in_bytes) {
2174 Free(node, *node_size);
2182 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2183 node->set_next(top_);
2188 available_ += size_in_bytes;
2192 void FreeListCategory::RepairFreeList(Heap* heap) {
2193 FreeListNode* n = top_;
2195 Map** map_location = reinterpret_cast<Map**>(n->address());
2196 if (*map_location == NULL) {
2197 *map_location = heap->free_space_map();
2199 ASSERT(*map_location == heap->free_space_map());
2206 FreeList::FreeList(PagedSpace* owner)
2207 : owner_(owner), heap_(owner->heap()) {
2212 intptr_t FreeList::Concatenate(FreeList* free_list) {
2213 intptr_t free_bytes = 0;
2214 free_bytes += small_list_.Concatenate(free_list->small_list());
2215 free_bytes += medium_list_.Concatenate(free_list->medium_list());
2216 free_bytes += large_list_.Concatenate(free_list->large_list());
2217 free_bytes += huge_list_.Concatenate(free_list->huge_list());
2222 void FreeList::Reset() {
2223 small_list_.Reset();
2224 medium_list_.Reset();
2225 large_list_.Reset();
2230 int FreeList::Free(Address start, int size_in_bytes) {
2231 if (size_in_bytes == 0) return 0;
2233 FreeListNode* node = FreeListNode::FromAddress(start);
2234 node->set_size(heap_, size_in_bytes);
2235 Page* page = Page::FromAddress(start);
2237 // Early return to drop too-small blocks on the floor.
2238 if (size_in_bytes < kSmallListMin) {
2239 page->add_non_available_small_blocks(size_in_bytes);
2240 return size_in_bytes;
2243 // Insert other blocks at the head of a free list of the appropriate
2245 if (size_in_bytes <= kSmallListMax) {
2246 small_list_.Free(node, size_in_bytes);
2247 page->add_available_in_small_free_list(size_in_bytes);
2248 } else if (size_in_bytes <= kMediumListMax) {
2249 medium_list_.Free(node, size_in_bytes);
2250 page->add_available_in_medium_free_list(size_in_bytes);
2251 } else if (size_in_bytes <= kLargeListMax) {
2252 large_list_.Free(node, size_in_bytes);
2253 page->add_available_in_large_free_list(size_in_bytes);
2255 huge_list_.Free(node, size_in_bytes);
2256 page->add_available_in_huge_free_list(size_in_bytes);
2259 ASSERT(IsVeryLong() || available() == SumFreeLists());
2264 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2265 FreeListNode* node = NULL;
2268 if (size_in_bytes <= kSmallAllocationMax) {
2269 node = small_list_.PickNodeFromList(node_size);
2271 ASSERT(size_in_bytes <= *node_size);
2272 page = Page::FromAddress(node->address());
2273 page->add_available_in_small_free_list(-(*node_size));
2274 ASSERT(IsVeryLong() || available() == SumFreeLists());
2279 if (size_in_bytes <= kMediumAllocationMax) {
2280 node = medium_list_.PickNodeFromList(node_size);
2282 ASSERT(size_in_bytes <= *node_size);
2283 page = Page::FromAddress(node->address());
2284 page->add_available_in_medium_free_list(-(*node_size));
2285 ASSERT(IsVeryLong() || available() == SumFreeLists());
2290 if (size_in_bytes <= kLargeAllocationMax) {
2291 node = large_list_.PickNodeFromList(node_size);
2293 ASSERT(size_in_bytes <= *node_size);
2294 page = Page::FromAddress(node->address());
2295 page->add_available_in_large_free_list(-(*node_size));
2296 ASSERT(IsVeryLong() || available() == SumFreeLists());
2301 int huge_list_available = huge_list_.available();
2302 for (FreeListNode** cur = huge_list_.GetTopAddress();
2304 cur = (*cur)->next_address()) {
2305 FreeListNode* cur_node = *cur;
2306 while (cur_node != NULL &&
2307 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2308 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2309 huge_list_available -= size;
2310 page = Page::FromAddress(cur_node->address());
2311 page->add_available_in_huge_free_list(-size);
2312 cur_node = cur_node->next();
2316 if (cur_node == NULL) {
2317 huge_list_.set_end(NULL);
2321 ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
2322 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2323 int size = cur_as_free_space->Size();
2324 if (size >= size_in_bytes) {
2325 // Large enough node found. Unlink it from the list.
2327 *cur = node->next();
2329 huge_list_available -= size;
2330 page = Page::FromAddress(node->address());
2331 page->add_available_in_huge_free_list(-size);
2336 if (huge_list_.top() == NULL) {
2337 huge_list_.set_end(NULL);
2339 huge_list_.set_available(huge_list_available);
2342 ASSERT(IsVeryLong() || available() == SumFreeLists());
2346 if (size_in_bytes <= kSmallListMax) {
2347 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2349 ASSERT(size_in_bytes <= *node_size);
2350 page = Page::FromAddress(node->address());
2351 page->add_available_in_small_free_list(-(*node_size));
2353 } else if (size_in_bytes <= kMediumListMax) {
2354 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2356 ASSERT(size_in_bytes <= *node_size);
2357 page = Page::FromAddress(node->address());
2358 page->add_available_in_medium_free_list(-(*node_size));
2360 } else if (size_in_bytes <= kLargeListMax) {
2361 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2363 ASSERT(size_in_bytes <= *node_size);
2364 page = Page::FromAddress(node->address());
2365 page->add_available_in_large_free_list(-(*node_size));
2369 ASSERT(IsVeryLong() || available() == SumFreeLists());
2374 // Allocation on the old space free list. If it succeeds then a new linear
2375 // allocation space has been set up with the top and limit of the space. If
2376 // the allocation fails then NULL is returned, and the caller can perform a GC
2377 // or allocate a new page before retrying.
2378 HeapObject* FreeList::Allocate(int size_in_bytes) {
2379 ASSERT(0 < size_in_bytes);
2380 ASSERT(size_in_bytes <= kMaxBlockSize);
2381 ASSERT(IsAligned(size_in_bytes, kPointerSize));
2382 // Don't free list allocate if there is linear space available.
2383 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2385 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2386 // Mark the old linear allocation area with a free space map so it can be
2387 // skipped when scanning the heap. This also puts it back in the free list
2388 // if it is big enough.
2389 owner_->Free(owner_->top(), old_linear_size);
2391 owner_->heap()->incremental_marking()->OldSpaceStep(
2392 size_in_bytes - old_linear_size);
2394 int new_node_size = 0;
2395 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2396 if (new_node == NULL) {
2397 owner_->SetTopAndLimit(NULL, NULL);
2401 int bytes_left = new_node_size - size_in_bytes;
2402 ASSERT(bytes_left >= 0);
2405 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2406 reinterpret_cast<Object**>(new_node->address())[i] =
2407 Smi::FromInt(kCodeZapValue);
2411 // The old-space-step might have finished sweeping and restarted marking.
2412 // Verify that it did not turn the page of the new node into an evacuation
2414 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2416 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2418 // Memory in the linear allocation area is counted as allocated. We may free
2419 // a little of this again immediately - see below.
2420 owner_->Allocate(new_node_size);
2422 if (owner_->heap()->inline_allocation_disabled()) {
2423 // Keep the linear allocation area empty if requested to do so, just
2424 // return area back to the free list instead.
2425 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2426 ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
2427 } else if (bytes_left > kThreshold &&
2428 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2429 FLAG_incremental_marking_steps) {
2430 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2431 // We don't want to give too large linear areas to the allocator while
2432 // incremental marking is going on, because we won't check again whether
2433 // we want to do another increment until the linear area is used up.
2434 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2435 new_node_size - size_in_bytes - linear_size);
2436 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2437 new_node->address() + size_in_bytes + linear_size);
2438 } else if (bytes_left > 0) {
2439 // Normally we give the rest of the node to the allocator as its new
2440 // linear allocation area.
2441 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2442 new_node->address() + new_node_size);
2444 // TODO(gc) Try not freeing linear allocation region when bytes_left
2446 owner_->SetTopAndLimit(NULL, NULL);
2453 intptr_t FreeList::EvictFreeListItems(Page* p) {
2454 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2455 p->set_available_in_huge_free_list(0);
2457 if (sum < p->area_size()) {
2458 sum += small_list_.EvictFreeListItemsInList(p) +
2459 medium_list_.EvictFreeListItemsInList(p) +
2460 large_list_.EvictFreeListItemsInList(p);
2461 p->set_available_in_small_free_list(0);
2462 p->set_available_in_medium_free_list(0);
2463 p->set_available_in_large_free_list(0);
2470 bool FreeList::ContainsPageFreeListItems(Page* p) {
2471 return huge_list_.EvictFreeListItemsInList(p) ||
2472 small_list_.EvictFreeListItemsInList(p) ||
2473 medium_list_.EvictFreeListItemsInList(p) ||
2474 large_list_.EvictFreeListItemsInList(p);
2478 void FreeList::RepairLists(Heap* heap) {
2479 small_list_.RepairFreeList(heap);
2480 medium_list_.RepairFreeList(heap);
2481 large_list_.RepairFreeList(heap);
2482 huge_list_.RepairFreeList(heap);
2487 intptr_t FreeListCategory::SumFreeList() {
2489 FreeListNode* cur = top_;
2490 while (cur != NULL) {
2491 ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2492 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2493 sum += cur_as_free_space->Size();
2500 static const int kVeryLongFreeList = 500;
2503 int FreeListCategory::FreeListLength() {
2505 FreeListNode* cur = top_;
2506 while (cur != NULL) {
2509 if (length == kVeryLongFreeList) return length;
2515 bool FreeList::IsVeryLong() {
2516 if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2517 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2518 if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2519 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2524 // This can take a very long time because it is linear in the number of entries
2525 // on the free list, so it should not be called if FreeListLength returns
2526 // kVeryLongFreeList.
2527 intptr_t FreeList::SumFreeLists() {
2528 intptr_t sum = small_list_.SumFreeList();
2529 sum += medium_list_.SumFreeList();
2530 sum += large_list_.SumFreeList();
2531 sum += huge_list_.SumFreeList();
2537 // -----------------------------------------------------------------------------
2538 // OldSpace implementation
2540 void PagedSpace::PrepareForMarkCompact() {
2541 // We don't have a linear allocation area while sweeping. It will be restored
2542 // on the first allocation after the sweep.
2543 EmptyAllocationInfo();
2545 // Stop lazy sweeping and clear marking bits for unswept pages.
2546 if (first_unswept_page_ != NULL) {
2547 Page* p = first_unswept_page_;
2549 // Do not use ShouldBeSweptLazily predicate here.
2550 // New evacuation candidates were selected but they still have
2551 // to be swept before collection starts.
2552 if (!p->WasSwept()) {
2554 if (FLAG_gc_verbose) {
2555 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2556 reinterpret_cast<intptr_t>(p));
2560 } while (p != anchor());
2562 first_unswept_page_ = Page::FromAddress(NULL);
2563 unswept_free_bytes_ = 0;
2565 // Clear the free list before a full GC---it will be rebuilt afterward.
2570 intptr_t PagedSpace::SizeOfObjects() {
2571 ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
2572 return Size() - unswept_free_bytes_ - (limit() - top());
2576 // After we have booted, we have created a map which represents free space
2577 // on the heap. If there was already a free list then the elements on it
2578 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2580 void PagedSpace::RepairFreeListsAfterBoot() {
2581 free_list_.RepairLists(heap());
2585 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2586 if (IsLazySweepingComplete()) return true;
2588 intptr_t freed_bytes = 0;
2589 Page* p = first_unswept_page_;
2591 Page* next_page = p->next_page();
2592 if (ShouldBeSweptLazily(p)) {
2593 if (FLAG_gc_verbose) {
2594 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2595 reinterpret_cast<intptr_t>(p));
2597 DecreaseUnsweptFreeBytes(p);
2599 MarkCompactCollector::
2600 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
2604 } while (p != anchor() && freed_bytes < bytes_to_sweep);
2606 if (p == anchor()) {
2607 first_unswept_page_ = Page::FromAddress(NULL);
2609 first_unswept_page_ = p;
2612 heap()->FreeQueuedChunks();
2614 return IsLazySweepingComplete();
2618 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2619 if (allocation_info_.top() >= allocation_info_.limit()) return;
2621 if (Page::FromAllocationTop(allocation_info_.top())->
2622 IsEvacuationCandidate()) {
2623 // Create filler object to keep page iterable if it was iterable.
2625 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2626 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2628 allocation_info_.set_top(NULL);
2629 allocation_info_.set_limit(NULL);
2634 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
2635 MarkCompactCollector* collector = heap()->mark_compact_collector();
2636 if (collector->AreSweeperThreadsActivated()) {
2637 if (collector->IsConcurrentSweepingInProgress()) {
2638 if (collector->RefillFreeLists(this) < size_in_bytes) {
2639 if (!collector->sequential_sweeping()) {
2640 collector->WaitUntilSweepingCompleted();
2648 return AdvanceSweeper(size_in_bytes);
2653 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2654 // Allocation in this space has failed.
2656 // If there are unswept pages advance lazy sweeper a bounded number of times
2657 // until we find a size_in_bytes contiguous piece of memory
2658 const int kMaxSweepingTries = 5;
2659 bool sweeping_complete = false;
2661 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
2662 sweeping_complete = EnsureSweeperProgress(size_in_bytes);
2664 // Retry the free list allocation.
2665 HeapObject* object = free_list_.Allocate(size_in_bytes);
2666 if (object != NULL) return object;
2669 // Free list allocation failed and there is no next page. Fail if we have
2670 // hit the old generation size limit that should cause a garbage
2672 if (!heap()->always_allocate() &&
2673 heap()->OldGenerationAllocationLimitReached()) {
2677 // Try to expand the space and allocate in the new next page.
2679 ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2680 return free_list_.Allocate(size_in_bytes);
2683 // Last ditch, sweep all the remaining pages to try to find space. This may
2685 if (!IsLazySweepingComplete()) {
2686 EnsureSweeperProgress(kMaxInt);
2688 // Retry the free list allocation.
2689 HeapObject* object = free_list_.Allocate(size_in_bytes);
2690 if (object != NULL) return object;
2699 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2700 CommentStatistic* comments_statistics =
2701 isolate->paged_space_comments_statistics();
2702 ReportCodeKindStatistics(isolate->code_kind_statistics());
2703 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2704 "count (average)\"):\n");
2705 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2706 const CommentStatistic& cs = comments_statistics[i];
2708 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2716 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2717 CommentStatistic* comments_statistics =
2718 isolate->paged_space_comments_statistics();
2719 ClearCodeKindStatistics(isolate->code_kind_statistics());
2720 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2721 comments_statistics[i].Clear();
2723 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2724 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2725 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2729 // Adds comment to 'comment_statistics' table. Performance OK as long as
2730 // 'kMaxComments' is small
2731 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2732 CommentStatistic* comments_statistics =
2733 isolate->paged_space_comments_statistics();
2734 // Do not count empty comments
2735 if (delta <= 0) return;
2736 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2737 // Search for a free or matching entry in 'comments_statistics': 'cs'
2738 // points to result.
2739 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2740 if (comments_statistics[i].comment == NULL) {
2741 cs = &comments_statistics[i];
2742 cs->comment = comment;
2744 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2745 cs = &comments_statistics[i];
2749 // Update entry for 'comment'
2755 // Call for each nested comment start (start marked with '[ xxx', end marked
2756 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2757 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2758 ASSERT(!it->done());
2759 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2760 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2761 if (tmp[0] != '[') {
2762 // Not a nested comment; skip
2766 // Search for end of nested comment or a new nested comment
2767 const char* const comment_txt =
2768 reinterpret_cast<const char*>(it->rinfo()->data());
2769 const byte* prev_pc = it->rinfo()->pc();
2773 // All nested comments must be terminated properly, and therefore exit
2775 ASSERT(!it->done());
2776 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2777 const char* const txt =
2778 reinterpret_cast<const char*>(it->rinfo()->data());
2779 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2780 if (txt[0] == ']') break; // End of nested comment
2782 CollectCommentStatistics(isolate, it);
2783 // Skip code that was covered with previous comment
2784 prev_pc = it->rinfo()->pc();
2788 EnterComment(isolate, comment_txt, flat_delta);
2792 // Collects code size statistics:
2794 // - by code comment
2795 void PagedSpace::CollectCodeStatistics() {
2796 Isolate* isolate = heap()->isolate();
2797 HeapObjectIterator obj_it(this);
2798 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2799 if (obj->IsCode()) {
2800 Code* code = Code::cast(obj);
2801 isolate->code_kind_statistics()[code->kind()] += code->Size();
2802 RelocIterator it(code);
2804 const byte* prev_pc = code->instruction_start();
2805 while (!it.done()) {
2806 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2807 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2808 CollectCommentStatistics(isolate, &it);
2809 prev_pc = it.rinfo()->pc();
2814 ASSERT(code->instruction_start() <= prev_pc &&
2815 prev_pc <= code->instruction_end());
2816 delta += static_cast<int>(code->instruction_end() - prev_pc);
2817 EnterComment(isolate, "NoComment", delta);
2823 void PagedSpace::ReportStatistics() {
2824 int pct = static_cast<int>(Available() * 100 / Capacity());
2825 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2826 ", waste: %" V8_PTR_PREFIX "d"
2827 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2828 Capacity(), Waste(), Available(), pct);
2830 if (was_swept_conservatively_) return;
2831 ClearHistograms(heap()->isolate());
2832 HeapObjectIterator obj_it(this);
2833 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2834 CollectHistogramInfo(obj);
2835 ReportHistogram(heap()->isolate(), true);
2840 // -----------------------------------------------------------------------------
2841 // MapSpace implementation
2842 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2843 // there is at least one non-inlined virtual function. I would prefer to hide
2844 // the VerifyObject definition behind VERIFY_HEAP.
2846 void MapSpace::VerifyObject(HeapObject* object) {
2847 CHECK(object->IsMap());
2851 // -----------------------------------------------------------------------------
2852 // CellSpace and PropertyCellSpace implementation
2853 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2854 // there is at least one non-inlined virtual function. I would prefer to hide
2855 // the VerifyObject definition behind VERIFY_HEAP.
2857 void CellSpace::VerifyObject(HeapObject* object) {
2858 CHECK(object->IsCell());
2862 void PropertyCellSpace::VerifyObject(HeapObject* object) {
2863 CHECK(object->IsPropertyCell());
2867 // -----------------------------------------------------------------------------
2868 // LargeObjectIterator
2870 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2871 current_ = space->first_page_;
2876 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2877 HeapObjectCallback size_func) {
2878 current_ = space->first_page_;
2879 size_func_ = size_func;
2883 HeapObject* LargeObjectIterator::Next() {
2884 if (current_ == NULL) return NULL;
2886 HeapObject* object = current_->GetObject();
2887 current_ = current_->next_page();
2892 // -----------------------------------------------------------------------------
2894 static bool ComparePointers(void* key1, void* key2) {
2895 return key1 == key2;
2899 LargeObjectSpace::LargeObjectSpace(Heap* heap,
2900 intptr_t max_capacity,
2902 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2903 max_capacity_(max_capacity),
2908 chunk_map_(ComparePointers, 1024) {}
2911 bool LargeObjectSpace::SetUp() {
2914 maximum_committed_ = 0;
2922 void LargeObjectSpace::TearDown() {
2923 while (first_page_ != NULL) {
2924 LargePage* page = first_page_;
2925 first_page_ = first_page_->next_page();
2926 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2928 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2929 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2930 space, kAllocationActionFree, page->size());
2931 heap()->isolate()->memory_allocator()->Free(page);
2937 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2938 Executability executable) {
2939 // Check if we want to force a GC before growing the old space further.
2940 // If so, fail the allocation.
2941 if (!heap()->always_allocate() &&
2942 heap()->OldGenerationAllocationLimitReached()) {
2943 return Failure::RetryAfterGC(identity());
2946 if (Size() + object_size > max_capacity_) {
2947 return Failure::RetryAfterGC(identity());
2950 LargePage* page = heap()->isolate()->memory_allocator()->
2951 AllocateLargePage(object_size, this, executable);
2952 if (page == NULL) return Failure::RetryAfterGC(identity());
2953 ASSERT(page->area_size() >= object_size);
2955 size_ += static_cast<int>(page->size());
2956 objects_size_ += object_size;
2958 page->set_next_page(first_page_);
2961 if (size_ > maximum_committed_) {
2962 maximum_committed_ = size_;
2965 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2966 // this large page in the chunk map.
2967 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2968 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2969 for (uintptr_t key = base; key <= limit; key++) {
2970 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2971 static_cast<uint32_t>(key),
2973 ASSERT(entry != NULL);
2974 entry->value = page;
2977 HeapObject* object = page->GetObject();
2979 if (Heap::ShouldZapGarbage()) {
2980 // Make the object consistent so the heap can be verified in OldSpaceStep.
2981 // We only need to do this in debug builds or if verify_heap is on.
2982 reinterpret_cast<Object**>(object->address())[0] =
2983 heap()->fixed_array_map();
2984 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2987 heap()->incremental_marking()->OldSpaceStep(object_size);
2992 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2993 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
2995 LargePage* current = first_page_;
2996 while (current != NULL) {
2997 size += current->CommittedPhysicalMemory();
2998 current = current->next_page();
3005 MaybeObject* LargeObjectSpace::FindObject(Address a) {
3006 LargePage* page = FindPage(a);
3008 return page->GetObject();
3010 return Failure::Exception();
3014 LargePage* LargeObjectSpace::FindPage(Address a) {
3015 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3016 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3017 static_cast<uint32_t>(key),
3020 ASSERT(e->value != NULL);
3021 LargePage* page = reinterpret_cast<LargePage*>(e->value);
3022 ASSERT(page->is_valid());
3023 if (page->Contains(a)) {
3031 void LargeObjectSpace::FreeUnmarkedObjects() {
3032 LargePage* previous = NULL;
3033 LargePage* current = first_page_;
3034 while (current != NULL) {
3035 HeapObject* object = current->GetObject();
3036 // Can this large page contain pointers to non-trivial objects. No other
3037 // pointer object is this big.
3038 bool is_pointer_object = object->IsFixedArray();
3039 MarkBit mark_bit = Marking::MarkBitFrom(object);
3040 if (mark_bit.Get()) {
3042 Page::FromAddress(object->address())->ResetProgressBar();
3043 Page::FromAddress(object->address())->ResetLiveBytes();
3045 current = current->next_page();
3047 LargePage* page = current;
3048 // Cut the chunk out from the chunk list.
3049 current = current->next_page();
3050 if (previous == NULL) {
3051 first_page_ = current;
3053 previous->set_next_page(current);
3057 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
3058 object, heap()->isolate());
3059 size_ -= static_cast<int>(page->size());
3060 objects_size_ -= object->Size();
3063 // Remove entries belonging to this page.
3064 // Use variable alignment to help pass length check (<= 80 characters)
3065 // of single line in tools/presubmit.py.
3066 const intptr_t alignment = MemoryChunk::kAlignment;
3067 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
3068 uintptr_t limit = base + (page->size()-1)/alignment;
3069 for (uintptr_t key = base; key <= limit; key++) {
3070 chunk_map_.Remove(reinterpret_cast<void*>(key),
3071 static_cast<uint32_t>(key));
3074 if (is_pointer_object) {
3075 heap()->QueueMemoryChunkForFree(page);
3077 heap()->isolate()->memory_allocator()->Free(page);
3081 heap()->FreeQueuedChunks();
3085 bool LargeObjectSpace::Contains(HeapObject* object) {
3086 Address address = object->address();
3087 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3089 bool owned = (chunk->owner() == this);
3091 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
3098 // We do not assume that the large object iterator works, because it depends
3099 // on the invariants we are checking during verification.
3100 void LargeObjectSpace::Verify() {
3101 for (LargePage* chunk = first_page_;
3103 chunk = chunk->next_page()) {
3104 // Each chunk contains an object that starts at the large object page's
3105 // object area start.
3106 HeapObject* object = chunk->GetObject();
3107 Page* page = Page::FromAddress(object->address());
3108 CHECK(object->address() == page->area_start());
3110 // The first word should be a map, and we expect all map pointers to be
3112 Map* map = object->map();
3113 CHECK(map->IsMap());
3114 CHECK(heap()->map_space()->Contains(map));
3116 // We have only code, sequential strings, external strings
3117 // (sequential strings that have been morphed into external
3118 // strings), fixed arrays, and byte arrays in large object space.
3119 CHECK(object->IsCode() || object->IsSeqString() ||
3120 object->IsExternalString() || object->IsFixedArray() ||
3121 object->IsFixedDoubleArray() || object->IsByteArray());
3123 // The object itself should look OK.
3126 // Byte arrays and strings don't have interior pointers.
3127 if (object->IsCode()) {
3128 VerifyPointersVisitor code_visitor;
3129 object->IterateBody(map->instance_type(),
3132 } else if (object->IsFixedArray()) {
3133 FixedArray* array = FixedArray::cast(object);
3134 for (int j = 0; j < array->length(); j++) {
3135 Object* element = array->get(j);
3136 if (element->IsHeapObject()) {
3137 HeapObject* element_object = HeapObject::cast(element);
3138 CHECK(heap()->Contains(element_object));
3139 CHECK(element_object->map()->IsMap());
3149 void LargeObjectSpace::Print() {
3150 LargeObjectIterator it(this);
3151 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3157 void LargeObjectSpace::ReportStatistics() {
3158 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3159 int num_objects = 0;
3160 ClearHistograms(heap()->isolate());
3161 LargeObjectIterator it(this);
3162 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3164 CollectHistogramInfo(obj);
3167 PrintF(" number of objects %d, "
3168 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3169 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3173 void LargeObjectSpace::CollectCodeStatistics() {
3174 Isolate* isolate = heap()->isolate();
3175 LargeObjectIterator obj_it(this);
3176 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3177 if (obj->IsCode()) {
3178 Code* code = Code::cast(obj);
3179 isolate->code_kind_statistics()[code->kind()] += code->Size();
3185 void Page::Print() {
3186 // Make a best-effort to print the objects in the page.
3187 PrintF("Page@%p in %s\n",
3189 AllocationSpaceName(this->owner()->identity()));
3190 printf(" --------------------------------------\n");
3191 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3192 unsigned mark_size = 0;
3193 for (HeapObject* object = objects.Next();
3195 object = objects.Next()) {
3196 bool is_marked = Marking::MarkBitFrom(object).Get();
3197 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3199 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3201 object->ShortPrint();
3204 printf(" --------------------------------------\n");
3205 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3210 } } // namespace v8::internal