1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_SPACES_INL_H_
6 #define V8_HEAP_SPACES_INL_H_
8 #include "src/heap/spaces.h"
9 #include "src/heap-profiler.h"
10 #include "src/isolate.h"
12 #include "src/v8memory.h"
18 // -----------------------------------------------------------------------------
21 void Bitmap::Clear(MemoryChunk* chunk) {
22 Bitmap* bitmap = chunk->markbits();
23 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
24 chunk->ResetLiveBytes();
28 // -----------------------------------------------------------------------------
32 PageIterator::PageIterator(PagedSpace* space)
34 prev_page_(&space->anchor_),
35 next_page_(prev_page_->next_page()) {}
38 bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
41 Page* PageIterator::next() {
43 prev_page_ = next_page_;
44 next_page_ = next_page_->next_page();
49 // -----------------------------------------------------------------------------
50 // NewSpacePageIterator
53 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
54 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
55 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
56 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
58 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
59 : prev_page_(space->anchor()),
60 next_page_(prev_page_->next_page()),
61 last_page_(prev_page_->prev_page()) {}
63 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
64 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
65 next_page_(NewSpacePage::FromAddress(start)),
66 last_page_(NewSpacePage::FromLimit(limit)) {
67 SemiSpace::AssertValidRange(start, limit);
71 bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
74 NewSpacePage* NewSpacePageIterator::next() {
76 prev_page_ = next_page_;
77 next_page_ = next_page_->next_page();
82 // -----------------------------------------------------------------------------
84 HeapObject* HeapObjectIterator::FromCurrentPage() {
85 while (cur_addr_ != cur_end_) {
86 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
87 cur_addr_ = space_->limit();
90 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
91 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
92 cur_addr_ += obj_size;
93 DCHECK(cur_addr_ <= cur_end_);
94 // TODO(hpayer): Remove the debugging code.
95 if (cur_addr_ > cur_end_) {
96 space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
100 if (!obj->IsFiller()) {
101 DCHECK_OBJECT_SIZE(obj_size);
109 // -----------------------------------------------------------------------------
112 #ifdef ENABLE_HEAP_PROTECTION
114 void MemoryAllocator::Protect(Address start, size_t size) {
115 base::OS::Protect(start, size);
119 void MemoryAllocator::Unprotect(Address start, size_t size,
120 Executability executable) {
121 base::OS::Unprotect(start, size, executable);
125 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
126 int id = GetChunkId(page);
127 base::OS::Protect(chunks_[id].address(), chunks_[id].size());
131 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
132 int id = GetChunkId(page);
133 base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
134 chunks_[id].owner()->executable() == EXECUTABLE);
140 // --------------------------------------------------------------------------
142 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
144 Page* page = reinterpret_cast<Page*>(chunk);
145 DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
146 DCHECK(chunk->owner() == owner);
147 owner->IncreaseCapacity(page->area_size());
148 owner->Free(page->area_start(), page->area_size());
150 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
156 bool PagedSpace::Contains(Address addr) {
157 Page* p = Page::FromAddress(addr);
158 if (!Page::IsValid(p)) return false;
159 return p->owner() == this;
163 void MemoryChunk::set_scan_on_scavenge(bool scan) {
165 if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
166 SetFlag(SCAN_ON_SCAVENGE);
168 if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
169 ClearFlag(SCAN_ON_SCAVENGE);
171 heap_->incremental_marking()->SetOldSpacePageFlags(this);
175 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
176 MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
177 OffsetFrom(addr) & ~Page::kPageAlignmentMask);
178 if (maybe->owner() != NULL) return maybe;
179 LargeObjectIterator iterator(heap->lo_space());
180 for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
181 // Fixed arrays are the only pointer-containing objects in large object
183 if (o->IsFixedArray()) {
184 MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
185 if (chunk->Contains(addr)) {
195 void MemoryChunk::UpdateHighWaterMark(Address mark) {
196 if (mark == NULL) return;
197 // Need to subtract one from the mark because when a chunk is full the
198 // top points to the next address after the chunk, which effectively belongs
199 // to another chunk. See the comment to Page::FromAllocationTop.
200 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
201 int new_mark = static_cast<int>(mark - chunk->address());
202 if (new_mark > chunk->high_water_mark_) {
203 chunk->high_water_mark_ = new_mark;
208 PointerChunkIterator::PointerChunkIterator(Heap* heap)
209 : state_(kOldSpaceState),
210 old_iterator_(heap->old_space()),
211 map_iterator_(heap->map_space()),
212 lo_iterator_(heap->lo_space()) {}
215 Page* Page::next_page() {
216 DCHECK(next_chunk()->owner() == owner());
217 return static_cast<Page*>(next_chunk());
221 Page* Page::prev_page() {
222 DCHECK(prev_chunk()->owner() == owner());
223 return static_cast<Page*>(prev_chunk());
227 void Page::set_next_page(Page* page) {
228 DCHECK(page->owner() == owner());
229 set_next_chunk(page);
233 void Page::set_prev_page(Page* page) {
234 DCHECK(page->owner() == owner());
235 set_prev_chunk(page);
239 // Try linear allocation in the page of alloc_info's allocation top. Does
240 // not contain slow case logic (e.g. move to the next page or try free list
241 // allocation) so it can be used by all the allocation functions and for all
243 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
244 Address current_top = allocation_info_.top();
245 Address new_top = current_top + size_in_bytes;
246 if (new_top > allocation_info_.limit()) return NULL;
248 allocation_info_.set_top(new_top);
249 return HeapObject::FromAddress(current_top);
253 HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
254 AllocationAlignment alignment) {
255 Address current_top = allocation_info_.top();
256 int filler_size = Heap::GetFillToAlign(current_top, alignment);
258 Address new_top = current_top + filler_size + *size_in_bytes;
259 if (new_top > allocation_info_.limit()) return NULL;
261 allocation_info_.set_top(new_top);
262 if (filler_size > 0) {
263 *size_in_bytes += filler_size;
264 return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
268 return HeapObject::FromAddress(current_top);
273 AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
274 HeapObject* object = AllocateLinearly(size_in_bytes);
276 if (object == NULL) {
277 object = free_list_.Allocate(size_in_bytes);
278 if (object == NULL) {
279 object = SlowAllocateRaw(size_in_bytes);
283 if (object != NULL) {
284 if (identity() == CODE_SPACE) {
285 SkipList::Update(object->address(), size_in_bytes);
287 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
291 return AllocationResult::Retry(identity());
296 AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
297 AllocationAlignment alignment) {
298 DCHECK(identity() == OLD_SPACE);
299 int allocation_size = size_in_bytes;
300 HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
302 if (object == NULL) {
303 // We don't know exactly how much filler we need to align until space is
304 // allocated, so assume the worst case.
305 int filler_size = Heap::GetMaximumFillToAlign(alignment);
306 allocation_size += filler_size;
307 object = free_list_.Allocate(allocation_size);
308 if (object == NULL) {
309 object = SlowAllocateRaw(allocation_size);
311 if (object != NULL && filler_size != 0) {
312 object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
314 // Filler objects are initialized, so mark only the aligned object memory
316 allocation_size = size_in_bytes;
320 if (object != NULL) {
321 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
325 return AllocationResult::Retry(identity());
329 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
330 AllocationAlignment alignment) {
331 #ifdef V8_HOST_ARCH_32_BIT
332 return alignment == kDoubleAligned
333 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
334 : AllocateRawUnaligned(size_in_bytes);
336 return AllocateRawUnaligned(size_in_bytes);
341 // -----------------------------------------------------------------------------
345 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
346 AllocationAlignment alignment) {
347 Address old_top = allocation_info_.top();
348 int filler_size = Heap::GetFillToAlign(old_top, alignment);
349 int aligned_size_in_bytes = size_in_bytes + filler_size;
351 if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
352 return SlowAllocateRaw(size_in_bytes, alignment);
355 HeapObject* obj = HeapObject::FromAddress(old_top);
356 allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
357 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
359 if (filler_size > 0) {
360 obj = heap()->PrecedeWithFiller(obj, filler_size);
363 // The slow path above ultimately goes through AllocateRaw, so this suffices.
364 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
370 AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
371 Address old_top = allocation_info_.top();
373 if (allocation_info_.limit() - old_top < size_in_bytes) {
374 return SlowAllocateRaw(size_in_bytes, kWordAligned);
377 HeapObject* obj = HeapObject::FromAddress(old_top);
378 allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
379 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
381 // The slow path above ultimately goes through AllocateRaw, so this suffices.
382 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
388 AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
389 AllocationAlignment alignment) {
390 #ifdef V8_HOST_ARCH_32_BIT
391 return alignment == kDoubleAligned
392 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
393 : AllocateRawUnaligned(size_in_bytes);
395 return AllocateRawUnaligned(size_in_bytes);
400 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
401 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
402 return static_cast<LargePage*>(chunk);
406 intptr_t LargeObjectSpace::Available() {
407 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
411 } // namespace v8::internal
413 #endif // V8_HEAP_SPACES_INL_H_