1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_SPACES_INL_H_
6 #define V8_HEAP_SPACES_INL_H_
8 #include "src/heap/spaces.h"
9 #include "src/isolate.h"
11 #include "src/profiler/heap-profiler.h"
12 #include "src/v8memory.h"
18 // -----------------------------------------------------------------------------
21 void Bitmap::Clear(MemoryChunk* chunk) {
22 Bitmap* bitmap = chunk->markbits();
23 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
24 chunk->ResetLiveBytes();
28 // -----------------------------------------------------------------------------
31 PageIterator::PageIterator(PagedSpace* space)
33 prev_page_(&space->anchor_),
34 next_page_(prev_page_->next_page()) {}
37 bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
40 Page* PageIterator::next() {
42 prev_page_ = next_page_;
43 next_page_ = next_page_->next_page();
48 // -----------------------------------------------------------------------------
51 HeapObject* SemiSpaceIterator::Next() {
52 if (current_ == limit_) return NULL;
53 if (NewSpacePage::IsAtEnd(current_)) {
54 NewSpacePage* page = NewSpacePage::FromLimit(current_);
55 page = page->next_page();
56 DCHECK(!page->is_anchor());
57 current_ = page->area_start();
58 if (current_ == limit_) return NULL;
61 HeapObject* object = HeapObject::FromAddress(current_);
62 int size = object->Size();
69 HeapObject* SemiSpaceIterator::next_object() { return Next(); }
72 // -----------------------------------------------------------------------------
73 // NewSpacePageIterator
75 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
76 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
77 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
78 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
80 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
81 : prev_page_(space->anchor()),
82 next_page_(prev_page_->next_page()),
83 last_page_(prev_page_->prev_page()) {}
85 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
86 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
87 next_page_(NewSpacePage::FromAddress(start)),
88 last_page_(NewSpacePage::FromLimit(limit)) {
89 SemiSpace::AssertValidRange(start, limit);
93 bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
96 NewSpacePage* NewSpacePageIterator::next() {
98 prev_page_ = next_page_;
99 next_page_ = next_page_->next_page();
104 // -----------------------------------------------------------------------------
105 // HeapObjectIterator
107 HeapObject* HeapObjectIterator::Next() {
109 HeapObject* next_obj = FromCurrentPage();
110 if (next_obj != NULL) return next_obj;
111 } while (AdvanceToNextPage());
116 HeapObject* HeapObjectIterator::next_object() { return Next(); }
119 HeapObject* HeapObjectIterator::FromCurrentPage() {
120 while (cur_addr_ != cur_end_) {
121 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
122 cur_addr_ = space_->limit();
125 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
126 int obj_size = obj->Size();
127 cur_addr_ += obj_size;
128 DCHECK(cur_addr_ <= cur_end_);
129 // TODO(hpayer): Remove the debugging code.
130 if (cur_addr_ > cur_end_) {
131 space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
135 if (!obj->IsFiller()) {
136 DCHECK_OBJECT_SIZE(obj_size);
144 // -----------------------------------------------------------------------------
147 #ifdef ENABLE_HEAP_PROTECTION
149 void MemoryAllocator::Protect(Address start, size_t size) {
150 base::OS::Protect(start, size);
154 void MemoryAllocator::Unprotect(Address start, size_t size,
155 Executability executable) {
156 base::OS::Unprotect(start, size, executable);
160 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
161 int id = GetChunkId(page);
162 base::OS::Protect(chunks_[id].address(), chunks_[id].size());
166 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
167 int id = GetChunkId(page);
168 base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
169 chunks_[id].owner()->executable() == EXECUTABLE);
175 // --------------------------------------------------------------------------
178 AllocationSpace AllocationResult::RetrySpace() {
180 return static_cast<AllocationSpace>(Smi::cast(object_)->value());
184 // --------------------------------------------------------------------------
187 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
189 Page* page = reinterpret_cast<Page*>(chunk);
190 page->mutex_ = new base::Mutex();
191 DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
192 DCHECK(chunk->owner() == owner);
193 owner->IncreaseCapacity(page->area_size());
194 owner->Free(page->area_start(), page->area_size());
196 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
202 bool PagedSpace::Contains(Address addr) {
203 Page* p = Page::FromAddress(addr);
204 if (!p->is_valid()) return false;
205 return p->owner() == this;
209 bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
212 void MemoryChunk::set_scan_on_scavenge(bool scan) {
214 if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
215 SetFlag(SCAN_ON_SCAVENGE);
217 if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
218 ClearFlag(SCAN_ON_SCAVENGE);
220 heap_->incremental_marking()->SetOldSpacePageFlags(this);
224 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
225 MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
226 OffsetFrom(addr) & ~Page::kPageAlignmentMask);
227 if (maybe->owner() != NULL) return maybe;
228 LargeObjectIterator iterator(heap->lo_space());
229 for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
230 // Fixed arrays are the only pointer-containing objects in large object
232 if (o->IsFixedArray()) {
233 MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
234 if (chunk->Contains(addr)) {
244 PointerChunkIterator::PointerChunkIterator(Heap* heap)
245 : state_(kOldSpaceState),
246 old_iterator_(heap->old_space()),
247 map_iterator_(heap->map_space()),
248 lo_iterator_(heap->lo_space()) {}
251 MemoryChunk* PointerChunkIterator::next() {
253 case kOldSpaceState: {
254 if (old_iterator_.has_next()) {
255 return old_iterator_.next();
261 if (map_iterator_.has_next()) {
262 return map_iterator_.next();
264 state_ = kLargeObjectState;
267 case kLargeObjectState: {
268 HeapObject* heap_object;
270 heap_object = lo_iterator_.Next();
271 if (heap_object == NULL) {
272 state_ = kFinishedState;
275 // Fixed arrays are the only pointer-containing objects in large
277 } while (!heap_object->IsFixedArray());
278 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
291 void Page::set_next_page(Page* page) {
292 DCHECK(page->owner() == owner());
293 set_next_chunk(page);
297 void Page::set_prev_page(Page* page) {
298 DCHECK(page->owner() == owner());
299 set_prev_chunk(page);
303 // Try linear allocation in the page of alloc_info's allocation top. Does
304 // not contain slow case logic (e.g. move to the next page or try free list
305 // allocation) so it can be used by all the allocation functions and for all
307 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
308 Address current_top = allocation_info_.top();
309 Address new_top = current_top + size_in_bytes;
310 if (new_top > allocation_info_.limit()) return NULL;
312 allocation_info_.set_top(new_top);
313 return HeapObject::FromAddress(current_top);
317 HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
318 AllocationAlignment alignment) {
319 Address current_top = allocation_info_.top();
320 int filler_size = Heap::GetFillToAlign(current_top, alignment);
322 Address new_top = current_top + filler_size + *size_in_bytes;
323 if (new_top > allocation_info_.limit()) return NULL;
325 allocation_info_.set_top(new_top);
326 if (filler_size > 0) {
327 *size_in_bytes += filler_size;
328 return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
332 return HeapObject::FromAddress(current_top);
337 AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
338 HeapObject* object = AllocateLinearly(size_in_bytes);
340 if (object == NULL) {
341 object = free_list_.Allocate(size_in_bytes);
342 if (object == NULL) {
343 object = SlowAllocateRaw(size_in_bytes);
347 if (object != NULL) {
348 if (identity() == CODE_SPACE) {
349 SkipList::Update(object->address(), size_in_bytes);
351 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
355 return AllocationResult::Retry(identity());
359 AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
361 base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
362 return AllocateRawUnaligned(size_in_bytes);
367 AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
368 AllocationAlignment alignment) {
369 DCHECK(identity() == OLD_SPACE);
370 int allocation_size = size_in_bytes;
371 HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
373 if (object == NULL) {
374 // We don't know exactly how much filler we need to align until space is
375 // allocated, so assume the worst case.
376 int filler_size = Heap::GetMaximumFillToAlign(alignment);
377 allocation_size += filler_size;
378 object = free_list_.Allocate(allocation_size);
379 if (object == NULL) {
380 object = SlowAllocateRaw(allocation_size);
382 if (object != NULL && filler_size != 0) {
383 object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
385 // Filler objects are initialized, so mark only the aligned object memory
387 allocation_size = size_in_bytes;
391 if (object != NULL) {
392 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
396 return AllocationResult::Retry(identity());
400 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
401 AllocationAlignment alignment) {
402 #ifdef V8_HOST_ARCH_32_BIT
403 return alignment == kDoubleAligned
404 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
405 : AllocateRawUnaligned(size_in_bytes);
407 return AllocateRawUnaligned(size_in_bytes);
412 // -----------------------------------------------------------------------------
416 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
417 AllocationAlignment alignment) {
418 Address top = allocation_info_.top();
419 int filler_size = Heap::GetFillToAlign(top, alignment);
420 int aligned_size_in_bytes = size_in_bytes + filler_size;
422 if (allocation_info_.limit() - top < aligned_size_in_bytes) {
423 // See if we can create room.
424 if (!EnsureAllocation(size_in_bytes, alignment)) {
425 return AllocationResult::Retry();
428 top = allocation_info_.top();
429 filler_size = Heap::GetFillToAlign(top, alignment);
430 aligned_size_in_bytes = size_in_bytes + filler_size;
433 HeapObject* obj = HeapObject::FromAddress(top);
434 allocation_info_.set_top(top + aligned_size_in_bytes);
435 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
437 if (filler_size > 0) {
438 obj = heap()->PrecedeWithFiller(obj, filler_size);
441 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
447 AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
448 Address top = allocation_info_.top();
449 if (allocation_info_.limit() - top < size_in_bytes) {
450 // See if we can create room.
451 if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
452 return AllocationResult::Retry();
455 top = allocation_info_.top();
458 HeapObject* obj = HeapObject::FromAddress(top);
459 allocation_info_.set_top(top + size_in_bytes);
460 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
462 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
468 AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
469 AllocationAlignment alignment) {
470 #ifdef V8_HOST_ARCH_32_BIT
471 return alignment == kDoubleAligned
472 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
473 : AllocateRawUnaligned(size_in_bytes);
475 return AllocateRawUnaligned(size_in_bytes);
480 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
481 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
482 return static_cast<LargePage*>(chunk);
486 intptr_t LargeObjectSpace::Available() {
487 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
491 } // namespace v8::internal
493 #endif // V8_HEAP_SPACES_INL_H_