1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_SPACES_INL_H_
6 #define V8_SPACES_INL_H_
8 #include "heap-profiler.h"
17 // -----------------------------------------------------------------------------
20 void Bitmap::Clear(MemoryChunk* chunk) {
21 Bitmap* bitmap = chunk->markbits();
22 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
23 chunk->ResetLiveBytes();
27 // -----------------------------------------------------------------------------
31 PageIterator::PageIterator(PagedSpace* space)
33 prev_page_(&space->anchor_),
34 next_page_(prev_page_->next_page()) { }
37 bool PageIterator::has_next() {
38 return next_page_ != &space_->anchor_;
42 Page* PageIterator::next() {
44 prev_page_ = next_page_;
45 next_page_ = next_page_->next_page();
50 // -----------------------------------------------------------------------------
51 // NewSpacePageIterator
54 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
55 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
56 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
57 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
59 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
60 : prev_page_(space->anchor()),
61 next_page_(prev_page_->next_page()),
62 last_page_(prev_page_->prev_page()) { }
64 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
65 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
66 next_page_(NewSpacePage::FromAddress(start)),
67 last_page_(NewSpacePage::FromLimit(limit)) {
68 SemiSpace::AssertValidRange(start, limit);
72 bool NewSpacePageIterator::has_next() {
73 return prev_page_ != last_page_;
77 NewSpacePage* NewSpacePageIterator::next() {
79 prev_page_ = next_page_;
80 next_page_ = next_page_->next_page();
85 // -----------------------------------------------------------------------------
87 HeapObject* HeapObjectIterator::FromCurrentPage() {
88 while (cur_addr_ != cur_end_) {
89 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
90 cur_addr_ = space_->limit();
93 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
94 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
95 cur_addr_ += obj_size;
96 ASSERT(cur_addr_ <= cur_end_);
97 if (!obj->IsFiller()) {
98 ASSERT_OBJECT_SIZE(obj_size);
106 // -----------------------------------------------------------------------------
109 #ifdef ENABLE_HEAP_PROTECTION
111 void MemoryAllocator::Protect(Address start, size_t size) {
112 OS::Protect(start, size);
116 void MemoryAllocator::Unprotect(Address start,
118 Executability executable) {
119 OS::Unprotect(start, size, executable);
123 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
124 int id = GetChunkId(page);
125 OS::Protect(chunks_[id].address(), chunks_[id].size());
129 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
130 int id = GetChunkId(page);
131 OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
132 chunks_[id].owner()->executable() == EXECUTABLE);
138 // --------------------------------------------------------------------------
140 Page* Page::Initialize(Heap* heap,
142 Executability executable,
144 Page* page = reinterpret_cast<Page*>(chunk);
145 ASSERT(page->area_size() <= kMaxRegularHeapObjectSize);
146 ASSERT(chunk->owner() == owner);
147 owner->IncreaseCapacity(page->area_size());
148 owner->Free(page->area_start(), page->area_size());
150 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
156 bool PagedSpace::Contains(Address addr) {
157 Page* p = Page::FromAddress(addr);
158 if (!p->is_valid()) return false;
159 return p->owner() == this;
163 void MemoryChunk::set_scan_on_scavenge(bool scan) {
165 if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
166 SetFlag(SCAN_ON_SCAVENGE);
168 if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
169 ClearFlag(SCAN_ON_SCAVENGE);
171 heap_->incremental_marking()->SetOldSpacePageFlags(this);
175 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
176 MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
177 OffsetFrom(addr) & ~Page::kPageAlignmentMask);
178 if (maybe->owner() != NULL) return maybe;
179 LargeObjectIterator iterator(heap->lo_space());
180 for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
181 // Fixed arrays are the only pointer-containing objects in large object
183 if (o->IsFixedArray()) {
184 MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
185 if (chunk->Contains(addr)) {
195 void MemoryChunk::UpdateHighWaterMark(Address mark) {
196 if (mark == NULL) return;
197 // Need to subtract one from the mark because when a chunk is full the
198 // top points to the next address after the chunk, which effectively belongs
199 // to another chunk. See the comment to Page::FromAllocationTop.
200 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
201 int new_mark = static_cast<int>(mark - chunk->address());
202 if (new_mark > chunk->high_water_mark_) {
203 chunk->high_water_mark_ = new_mark;
208 PointerChunkIterator::PointerChunkIterator(Heap* heap)
209 : state_(kOldPointerState),
210 old_pointer_iterator_(heap->old_pointer_space()),
211 map_iterator_(heap->map_space()),
212 lo_iterator_(heap->lo_space()) { }
215 Page* Page::next_page() {
216 ASSERT(next_chunk()->owner() == owner());
217 return static_cast<Page*>(next_chunk());
221 Page* Page::prev_page() {
222 ASSERT(prev_chunk()->owner() == owner());
223 return static_cast<Page*>(prev_chunk());
227 void Page::set_next_page(Page* page) {
228 ASSERT(page->owner() == owner());
229 set_next_chunk(page);
233 void Page::set_prev_page(Page* page) {
234 ASSERT(page->owner() == owner());
235 set_prev_chunk(page);
239 // Try linear allocation in the page of alloc_info's allocation top. Does
240 // not contain slow case logic (e.g. move to the next page or try free list
241 // allocation) so it can be used by all the allocation functions and for all
243 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
244 Address current_top = allocation_info_.top();
245 Address new_top = current_top + size_in_bytes;
246 if (new_top > allocation_info_.limit()) return NULL;
248 allocation_info_.set_top(new_top);
249 return HeapObject::FromAddress(current_top);
254 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
255 HeapObject* object = AllocateLinearly(size_in_bytes);
256 if (object != NULL) {
257 if (identity() == CODE_SPACE) {
258 SkipList::Update(object->address(), size_in_bytes);
263 ASSERT(!heap()->linear_allocation() ||
264 (anchor_.next_chunk() == &anchor_ &&
265 anchor_.prev_chunk() == &anchor_));
267 object = free_list_.Allocate(size_in_bytes);
268 if (object != NULL) {
269 if (identity() == CODE_SPACE) {
270 SkipList::Update(object->address(), size_in_bytes);
275 object = SlowAllocateRaw(size_in_bytes);
276 if (object != NULL) {
277 if (identity() == CODE_SPACE) {
278 SkipList::Update(object->address(), size_in_bytes);
283 return AllocationResult::Retry(identity());
287 // -----------------------------------------------------------------------------
291 AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
292 Address old_top = allocation_info_.top();
294 // If we are stressing compaction we waste some memory in new space
295 // in order to get more frequent GCs.
296 if (FLAG_stress_compaction && !heap()->linear_allocation()) {
297 if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
298 int filler_size = size_in_bytes * 4;
299 for (int i = 0; i < filler_size; i += kPointerSize) {
300 *(reinterpret_cast<Object**>(old_top + i)) =
301 heap()->one_pointer_filler_map();
303 old_top += filler_size;
304 allocation_info_.set_top(allocation_info_.top() + filler_size);
309 if (allocation_info_.limit() - old_top < size_in_bytes) {
310 return SlowAllocateRaw(size_in_bytes);
313 HeapObject* obj = HeapObject::FromAddress(old_top);
314 allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
315 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
321 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
322 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
323 return static_cast<LargePage*>(chunk);
327 intptr_t LargeObjectSpace::Available() {
328 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
332 bool FreeListNode::IsFreeListNode(HeapObject* object) {
333 Map* map = object->map();
334 Heap* heap = object->GetHeap();
335 return map == heap->raw_unchecked_free_space_map()
336 || map == heap->raw_unchecked_one_pointer_filler_map()
337 || map == heap->raw_unchecked_two_pointer_filler_map();
340 } } // namespace v8::internal
342 #endif // V8_SPACES_INL_H_