1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_
39 // -----------------------------------------------------------------------------
42 void Bitmap::Clear(MemoryChunk* chunk) {
43 Bitmap* bitmap = chunk->markbits();
44 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
45 chunk->ResetLiveBytes();
49 // -----------------------------------------------------------------------------
53 PageIterator::PageIterator(PagedSpace* space)
55 prev_page_(&space->anchor_),
56 next_page_(prev_page_->next_page()) { }
59 bool PageIterator::has_next() {
60 return next_page_ != &space_->anchor_;
64 Page* PageIterator::next() {
66 prev_page_ = next_page_;
67 next_page_ = next_page_->next_page();
72 // -----------------------------------------------------------------------------
73 // NewSpacePageIterator
76 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
77 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
78 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
79 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
81 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
82 : prev_page_(space->anchor()),
83 next_page_(prev_page_->next_page()),
84 last_page_(prev_page_->prev_page()) { }
86 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
87 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
88 next_page_(NewSpacePage::FromAddress(start)),
89 last_page_(NewSpacePage::FromLimit(limit)) {
90 SemiSpace::AssertValidRange(start, limit);
94 bool NewSpacePageIterator::has_next() {
95 return prev_page_ != last_page_;
99 NewSpacePage* NewSpacePageIterator::next() {
101 prev_page_ = next_page_;
102 next_page_ = next_page_->next_page();
107 // -----------------------------------------------------------------------------
108 // HeapObjectIterator
109 HeapObject* HeapObjectIterator::FromCurrentPage() {
110 while (cur_addr_ != cur_end_) {
111 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
112 cur_addr_ = space_->limit();
115 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
116 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
117 cur_addr_ += obj_size;
118 ASSERT(cur_addr_ <= cur_end_);
119 if (!obj->IsFiller()) {
120 ASSERT_OBJECT_SIZE(obj_size);
128 // -----------------------------------------------------------------------------
131 #ifdef ENABLE_HEAP_PROTECTION
133 void MemoryAllocator::Protect(Address start, size_t size) {
134 OS::Protect(start, size);
138 void MemoryAllocator::Unprotect(Address start,
140 Executability executable) {
141 OS::Unprotect(start, size, executable);
145 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
146 int id = GetChunkId(page);
147 OS::Protect(chunks_[id].address(), chunks_[id].size());
151 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
152 int id = GetChunkId(page);
153 OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
154 chunks_[id].owner()->executable() == EXECUTABLE);
160 // --------------------------------------------------------------------------
162 Page* Page::Initialize(Heap* heap,
164 Executability executable,
166 Page* page = reinterpret_cast<Page*>(chunk);
167 ASSERT(chunk->size() <= static_cast<size_t>(kPageSize));
168 ASSERT(chunk->owner() == owner);
169 owner->IncreaseCapacity(page->area_size());
170 owner->Free(page->area_start(), page->area_size());
172 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
178 bool PagedSpace::Contains(Address addr) {
179 Page* p = Page::FromAddress(addr);
180 if (!p->is_valid()) return false;
181 return p->owner() == this;
185 void MemoryChunk::set_scan_on_scavenge(bool scan) {
187 if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
188 SetFlag(SCAN_ON_SCAVENGE);
190 if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
191 ClearFlag(SCAN_ON_SCAVENGE);
193 heap_->incremental_marking()->SetOldSpacePageFlags(this);
197 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
198 MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
199 OffsetFrom(addr) & ~Page::kPageAlignmentMask);
200 if (maybe->owner() != NULL) return maybe;
201 LargeObjectIterator iterator(HEAP->lo_space());
202 for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
203 // Fixed arrays are the only pointer-containing objects in large object
205 if (o->IsFixedArray()) {
206 MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
207 if (chunk->Contains(addr)) {
217 PointerChunkIterator::PointerChunkIterator(Heap* heap)
218 : state_(kOldPointerState),
219 old_pointer_iterator_(heap->old_pointer_space()),
220 map_iterator_(heap->map_space()),
221 lo_iterator_(heap->lo_space()) { }
224 Page* Page::next_page() {
225 ASSERT(next_chunk()->owner() == owner());
226 return static_cast<Page*>(next_chunk());
230 Page* Page::prev_page() {
231 ASSERT(prev_chunk()->owner() == owner());
232 return static_cast<Page*>(prev_chunk());
236 void Page::set_next_page(Page* page) {
237 ASSERT(page->owner() == owner());
238 set_next_chunk(page);
242 void Page::set_prev_page(Page* page) {
243 ASSERT(page->owner() == owner());
244 set_prev_chunk(page);
248 // Try linear allocation in the page of alloc_info's allocation top. Does
249 // not contain slow case logic (e.g. move to the next page or try free list
250 // allocation) so it can be used by all the allocation functions and for all
252 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
253 Address current_top = allocation_info_.top;
254 Address new_top = current_top + size_in_bytes;
255 if (new_top > allocation_info_.limit) return NULL;
257 allocation_info_.top = new_top;
258 return HeapObject::FromAddress(current_top);
263 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
264 HeapObject* object = AllocateLinearly(size_in_bytes);
265 if (object != NULL) {
266 if (identity() == CODE_SPACE) {
267 SkipList::Update(object->address(), size_in_bytes);
272 object = free_list_.Allocate(size_in_bytes);
273 if (object != NULL) {
274 if (identity() == CODE_SPACE) {
275 SkipList::Update(object->address(), size_in_bytes);
280 object = SlowAllocateRaw(size_in_bytes);
281 if (object != NULL) {
282 if (identity() == CODE_SPACE) {
283 SkipList::Update(object->address(), size_in_bytes);
288 return Failure::RetryAfterGC(identity());
292 // -----------------------------------------------------------------------------
296 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
297 Address old_top = allocation_info_.top;
299 // If we are stressing compaction we waste some memory in new space
300 // in order to get more frequent GCs.
301 if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
302 if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
303 int filler_size = size_in_bytes * 4;
304 for (int i = 0; i < filler_size; i += kPointerSize) {
305 *(reinterpret_cast<Object**>(old_top + i)) =
306 HEAP->one_pointer_filler_map();
308 old_top += filler_size;
309 allocation_info_.top += filler_size;
314 if (allocation_info_.limit - old_top < size_in_bytes) {
315 return SlowAllocateRaw(size_in_bytes);
318 Object* obj = HeapObject::FromAddress(old_top);
319 allocation_info_.top += size_in_bytes;
320 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
326 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
327 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
328 return static_cast<LargePage*>(chunk);
332 intptr_t LargeObjectSpace::Available() {
333 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
337 template <typename StringType>
338 void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
339 ASSERT(length <= string->length());
340 ASSERT(string->IsSeqString());
341 ASSERT(string->address() + StringType::SizeFor(string->length()) ==
342 allocation_info_.top);
343 Address old_top = allocation_info_.top;
344 allocation_info_.top =
345 string->address() + StringType::SizeFor(length);
346 string->set_length(length);
347 if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
348 int delta = static_cast<int>(old_top - allocation_info_.top);
349 MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta);
354 bool FreeListNode::IsFreeListNode(HeapObject* object) {
355 Map* map = object->map();
356 Heap* heap = object->GetHeap();
357 return map == heap->raw_unchecked_free_space_map()
358 || map == heap->raw_unchecked_one_pointer_filler_map()
359 || map == heap->raw_unchecked_two_pointer_filler_map();
362 } } // namespace v8::internal
364 #endif // V8_SPACES_INL_H_