a12ed6f296d986f2cd51e565f667f312c8553e5f
[platform/upstream/v8.git] / src / heap / spaces-inl.h
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_SPACES_INL_H_
6 #define V8_HEAP_SPACES_INL_H_
7
8 #include "src/heap/spaces.h"
9 #include "src/isolate.h"
10 #include "src/msan.h"
11 #include "src/profiler/heap-profiler.h"
12 #include "src/v8memory.h"
13
14 namespace v8 {
15 namespace internal {
16
17
18 // -----------------------------------------------------------------------------
19 // Bitmap
20
21 void Bitmap::Clear(MemoryChunk* chunk) {
22   Bitmap* bitmap = chunk->markbits();
23   for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
24   chunk->ResetLiveBytes();
25 }
26
27
28 // -----------------------------------------------------------------------------
29 // PageIterator
30
31 PageIterator::PageIterator(PagedSpace* space)
32     : space_(space),
33       prev_page_(&space->anchor_),
34       next_page_(prev_page_->next_page()) {}
35
36
37 bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
38
39
40 Page* PageIterator::next() {
41   DCHECK(has_next());
42   prev_page_ = next_page_;
43   next_page_ = next_page_->next_page();
44   return prev_page_;
45 }
46
47
48 // -----------------------------------------------------------------------------
49 // SemiSpaceIterator
50
51 HeapObject* SemiSpaceIterator::Next() {
52   if (current_ == limit_) return NULL;
53   if (NewSpacePage::IsAtEnd(current_)) {
54     NewSpacePage* page = NewSpacePage::FromLimit(current_);
55     page = page->next_page();
56     DCHECK(!page->is_anchor());
57     current_ = page->area_start();
58     if (current_ == limit_) return NULL;
59   }
60
61   HeapObject* object = HeapObject::FromAddress(current_);
62   int size = object->Size();
63
64   current_ += size;
65   return object;
66 }
67
68
69 HeapObject* SemiSpaceIterator::next_object() { return Next(); }
70
71
72 // -----------------------------------------------------------------------------
73 // NewSpacePageIterator
74
75 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
76     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
77       next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
78       last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
79
80 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
81     : prev_page_(space->anchor()),
82       next_page_(prev_page_->next_page()),
83       last_page_(prev_page_->prev_page()) {}
84
85 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
86     : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
87       next_page_(NewSpacePage::FromAddress(start)),
88       last_page_(NewSpacePage::FromLimit(limit)) {
89   SemiSpace::AssertValidRange(start, limit);
90 }
91
92
93 bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
94
95
96 NewSpacePage* NewSpacePageIterator::next() {
97   DCHECK(has_next());
98   prev_page_ = next_page_;
99   next_page_ = next_page_->next_page();
100   return prev_page_;
101 }
102
103
104 // -----------------------------------------------------------------------------
105 // HeapObjectIterator
106
107 HeapObject* HeapObjectIterator::Next() {
108   do {
109     HeapObject* next_obj = FromCurrentPage();
110     if (next_obj != NULL) return next_obj;
111   } while (AdvanceToNextPage());
112   return NULL;
113 }
114
115
116 HeapObject* HeapObjectIterator::next_object() { return Next(); }
117
118
119 HeapObject* HeapObjectIterator::FromCurrentPage() {
120   while (cur_addr_ != cur_end_) {
121     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
122       cur_addr_ = space_->limit();
123       continue;
124     }
125     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
126     int obj_size = obj->Size();
127     cur_addr_ += obj_size;
128     DCHECK(cur_addr_ <= cur_end_);
129     // TODO(hpayer): Remove the debugging code.
130     if (cur_addr_ > cur_end_) {
131       space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
132                                                       obj_size);
133     }
134
135     if (!obj->IsFiller()) {
136       DCHECK_OBJECT_SIZE(obj_size);
137       return obj;
138     }
139   }
140   return NULL;
141 }
142
143
144 // -----------------------------------------------------------------------------
145 // MemoryAllocator
146
147 #ifdef ENABLE_HEAP_PROTECTION
148
149 void MemoryAllocator::Protect(Address start, size_t size) {
150   base::OS::Protect(start, size);
151 }
152
153
154 void MemoryAllocator::Unprotect(Address start, size_t size,
155                                 Executability executable) {
156   base::OS::Unprotect(start, size, executable);
157 }
158
159
160 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
161   int id = GetChunkId(page);
162   base::OS::Protect(chunks_[id].address(), chunks_[id].size());
163 }
164
165
166 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
167   int id = GetChunkId(page);
168   base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
169                       chunks_[id].owner()->executable() == EXECUTABLE);
170 }
171
172 #endif
173
174
175 // --------------------------------------------------------------------------
176 // AllocationResult
177
178 AllocationSpace AllocationResult::RetrySpace() {
179   DCHECK(IsRetry());
180   return static_cast<AllocationSpace>(Smi::cast(object_)->value());
181 }
182
183
184 // --------------------------------------------------------------------------
185 // PagedSpace
186
187 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
188                        PagedSpace* owner) {
189   Page* page = reinterpret_cast<Page*>(chunk);
190   page->mutex_ = new base::Mutex();
191   DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
192   DCHECK(chunk->owner() == owner);
193   owner->IncreaseCapacity(page->area_size());
194   owner->Free(page->area_start(), page->area_size());
195
196   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
197
198   return page;
199 }
200
201
202 bool PagedSpace::Contains(Address addr) {
203   Page* p = Page::FromAddress(addr);
204   if (!p->is_valid()) return false;
205   return p->owner() == this;
206 }
207
208
209 bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
210
211
212 void MemoryChunk::set_scan_on_scavenge(bool scan) {
213   if (scan) {
214     if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
215     SetFlag(SCAN_ON_SCAVENGE);
216   } else {
217     if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
218     ClearFlag(SCAN_ON_SCAVENGE);
219   }
220   heap_->incremental_marking()->SetOldSpacePageFlags(this);
221 }
222
223
224 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
225   MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
226       OffsetFrom(addr) & ~Page::kPageAlignmentMask);
227   if (maybe->owner() != NULL) return maybe;
228   LargeObjectIterator iterator(heap->lo_space());
229   for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
230     // Fixed arrays are the only pointer-containing objects in large object
231     // space.
232     if (o->IsFixedArray()) {
233       MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
234       if (chunk->Contains(addr)) {
235         return chunk;
236       }
237     }
238   }
239   UNREACHABLE();
240   return NULL;
241 }
242
243
244 PointerChunkIterator::PointerChunkIterator(Heap* heap)
245     : state_(kOldSpaceState),
246       old_iterator_(heap->old_space()),
247       map_iterator_(heap->map_space()),
248       lo_iterator_(heap->lo_space()) {}
249
250
251 MemoryChunk* PointerChunkIterator::next() {
252   switch (state_) {
253     case kOldSpaceState: {
254       if (old_iterator_.has_next()) {
255         return old_iterator_.next();
256       }
257       state_ = kMapState;
258       // Fall through.
259     }
260     case kMapState: {
261       if (map_iterator_.has_next()) {
262         return map_iterator_.next();
263       }
264       state_ = kLargeObjectState;
265       // Fall through.
266     }
267     case kLargeObjectState: {
268       HeapObject* heap_object;
269       do {
270         heap_object = lo_iterator_.Next();
271         if (heap_object == NULL) {
272           state_ = kFinishedState;
273           return NULL;
274         }
275         // Fixed arrays are the only pointer-containing objects in large
276         // object space.
277       } while (!heap_object->IsFixedArray());
278       MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
279       return answer;
280     }
281     case kFinishedState:
282       return NULL;
283     default:
284       break;
285   }
286   UNREACHABLE();
287   return NULL;
288 }
289
290
291 void Page::set_next_page(Page* page) {
292   DCHECK(page->owner() == owner());
293   set_next_chunk(page);
294 }
295
296
297 void Page::set_prev_page(Page* page) {
298   DCHECK(page->owner() == owner());
299   set_prev_chunk(page);
300 }
301
302
303 // Try linear allocation in the page of alloc_info's allocation top.  Does
304 // not contain slow case logic (e.g. move to the next page or try free list
305 // allocation) so it can be used by all the allocation functions and for all
306 // the paged spaces.
307 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
308   Address current_top = allocation_info_.top();
309   Address new_top = current_top + size_in_bytes;
310   if (new_top > allocation_info_.limit()) return NULL;
311
312   allocation_info_.set_top(new_top);
313   return HeapObject::FromAddress(current_top);
314 }
315
316
317 HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
318                                                 AllocationAlignment alignment) {
319   Address current_top = allocation_info_.top();
320   int filler_size = Heap::GetFillToAlign(current_top, alignment);
321
322   Address new_top = current_top + filler_size + *size_in_bytes;
323   if (new_top > allocation_info_.limit()) return NULL;
324
325   allocation_info_.set_top(new_top);
326   if (filler_size > 0) {
327     *size_in_bytes += filler_size;
328     return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
329                                      filler_size);
330   }
331
332   return HeapObject::FromAddress(current_top);
333 }
334
335
336 // Raw allocation.
337 AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
338   HeapObject* object = AllocateLinearly(size_in_bytes);
339
340   if (object == NULL) {
341     object = free_list_.Allocate(size_in_bytes);
342     if (object == NULL) {
343       object = SlowAllocateRaw(size_in_bytes);
344     }
345   }
346
347   if (object != NULL) {
348     if (identity() == CODE_SPACE) {
349       SkipList::Update(object->address(), size_in_bytes);
350     }
351     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
352     return object;
353   }
354
355   return AllocationResult::Retry(identity());
356 }
357
358
359 AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
360     int size_in_bytes) {
361   base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
362   return AllocateRawUnaligned(size_in_bytes);
363 }
364
365
366 // Raw allocation.
367 AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
368                                                 AllocationAlignment alignment) {
369   DCHECK(identity() == OLD_SPACE);
370   int allocation_size = size_in_bytes;
371   HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
372
373   if (object == NULL) {
374     // We don't know exactly how much filler we need to align until space is
375     // allocated, so assume the worst case.
376     int filler_size = Heap::GetMaximumFillToAlign(alignment);
377     allocation_size += filler_size;
378     object = free_list_.Allocate(allocation_size);
379     if (object == NULL) {
380       object = SlowAllocateRaw(allocation_size);
381     }
382     if (object != NULL && filler_size != 0) {
383       object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
384                                        alignment);
385       // Filler objects are initialized, so mark only the aligned object memory
386       // as uninitialized.
387       allocation_size = size_in_bytes;
388     }
389   }
390
391   if (object != NULL) {
392     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
393     return object;
394   }
395
396   return AllocationResult::Retry(identity());
397 }
398
399
400 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
401                                          AllocationAlignment alignment) {
402 #ifdef V8_HOST_ARCH_32_BIT
403   return alignment == kDoubleAligned
404              ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
405              : AllocateRawUnaligned(size_in_bytes);
406 #else
407   return AllocateRawUnaligned(size_in_bytes);
408 #endif
409 }
410
411
412 // -----------------------------------------------------------------------------
413 // NewSpace
414
415
416 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
417                                               AllocationAlignment alignment) {
418   Address top = allocation_info_.top();
419   int filler_size = Heap::GetFillToAlign(top, alignment);
420   int aligned_size_in_bytes = size_in_bytes + filler_size;
421
422   if (allocation_info_.limit() - top < aligned_size_in_bytes) {
423     // See if we can create room.
424     if (!EnsureAllocation(size_in_bytes, alignment)) {
425       return AllocationResult::Retry();
426     }
427
428     top = allocation_info_.top();
429     filler_size = Heap::GetFillToAlign(top, alignment);
430     aligned_size_in_bytes = size_in_bytes + filler_size;
431   }
432
433   HeapObject* obj = HeapObject::FromAddress(top);
434   allocation_info_.set_top(top + aligned_size_in_bytes);
435   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
436
437   if (filler_size > 0) {
438     obj = heap()->PrecedeWithFiller(obj, filler_size);
439   }
440
441   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
442
443   return obj;
444 }
445
446
447 AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
448   Address top = allocation_info_.top();
449   if (allocation_info_.limit() - top < size_in_bytes) {
450     // See if we can create room.
451     if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
452       return AllocationResult::Retry();
453     }
454
455     top = allocation_info_.top();
456   }
457
458   HeapObject* obj = HeapObject::FromAddress(top);
459   allocation_info_.set_top(top + size_in_bytes);
460   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
461
462   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
463
464   return obj;
465 }
466
467
468 AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
469                                        AllocationAlignment alignment) {
470 #ifdef V8_HOST_ARCH_32_BIT
471   return alignment == kDoubleAligned
472              ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
473              : AllocateRawUnaligned(size_in_bytes);
474 #else
475   return AllocateRawUnaligned(size_in_bytes);
476 #endif
477 }
478
479
480 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
481   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
482   return static_cast<LargePage*>(chunk);
483 }
484
485
486 intptr_t LargeObjectSpace::Available() {
487   return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
488 }
489
490 }
491 }  // namespace v8::internal
492
493 #endif  // V8_HEAP_SPACES_INL_H_