deps: backport IsValid changes from 4e8736d in V8
[platform/upstream/nodejs.git] / deps / v8 / src / heap / spaces-inl.h
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_SPACES_INL_H_
6 #define V8_HEAP_SPACES_INL_H_
7
8 #include "src/heap/spaces.h"
9 #include "src/heap-profiler.h"
10 #include "src/isolate.h"
11 #include "src/msan.h"
12 #include "src/v8memory.h"
13
14 namespace v8 {
15 namespace internal {
16
17
18 // -----------------------------------------------------------------------------
19 // Bitmap
20
21 void Bitmap::Clear(MemoryChunk* chunk) {
22   Bitmap* bitmap = chunk->markbits();
23   for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
24   chunk->ResetLiveBytes();
25 }
26
27
28 // -----------------------------------------------------------------------------
29 // PageIterator
30
31
32 PageIterator::PageIterator(PagedSpace* space)
33     : space_(space),
34       prev_page_(&space->anchor_),
35       next_page_(prev_page_->next_page()) {}
36
37
38 bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
39
40
41 Page* PageIterator::next() {
42   DCHECK(has_next());
43   prev_page_ = next_page_;
44   next_page_ = next_page_->next_page();
45   return prev_page_;
46 }
47
48
49 // -----------------------------------------------------------------------------
50 // NewSpacePageIterator
51
52
53 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
54     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
55       next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
56       last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
57
58 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
59     : prev_page_(space->anchor()),
60       next_page_(prev_page_->next_page()),
61       last_page_(prev_page_->prev_page()) {}
62
63 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
64     : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
65       next_page_(NewSpacePage::FromAddress(start)),
66       last_page_(NewSpacePage::FromLimit(limit)) {
67   SemiSpace::AssertValidRange(start, limit);
68 }
69
70
71 bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
72
73
74 NewSpacePage* NewSpacePageIterator::next() {
75   DCHECK(has_next());
76   prev_page_ = next_page_;
77   next_page_ = next_page_->next_page();
78   return prev_page_;
79 }
80
81
82 // -----------------------------------------------------------------------------
83 // HeapObjectIterator
84 HeapObject* HeapObjectIterator::FromCurrentPage() {
85   while (cur_addr_ != cur_end_) {
86     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
87       cur_addr_ = space_->limit();
88       continue;
89     }
90     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
91     int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
92     cur_addr_ += obj_size;
93     DCHECK(cur_addr_ <= cur_end_);
94     // TODO(hpayer): Remove the debugging code.
95     if (cur_addr_ > cur_end_) {
96       space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
97                                                       obj_size);
98     }
99
100     if (!obj->IsFiller()) {
101       DCHECK_OBJECT_SIZE(obj_size);
102       return obj;
103     }
104   }
105   return NULL;
106 }
107
108
109 // -----------------------------------------------------------------------------
110 // MemoryAllocator
111
112 #ifdef ENABLE_HEAP_PROTECTION
113
114 void MemoryAllocator::Protect(Address start, size_t size) {
115   base::OS::Protect(start, size);
116 }
117
118
119 void MemoryAllocator::Unprotect(Address start, size_t size,
120                                 Executability executable) {
121   base::OS::Unprotect(start, size, executable);
122 }
123
124
125 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
126   int id = GetChunkId(page);
127   base::OS::Protect(chunks_[id].address(), chunks_[id].size());
128 }
129
130
131 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
132   int id = GetChunkId(page);
133   base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
134                       chunks_[id].owner()->executable() == EXECUTABLE);
135 }
136
137 #endif
138
139
140 // --------------------------------------------------------------------------
141 // PagedSpace
142 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
143                        PagedSpace* owner) {
144   Page* page = reinterpret_cast<Page*>(chunk);
145   DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
146   DCHECK(chunk->owner() == owner);
147   owner->IncreaseCapacity(page->area_size());
148   owner->Free(page->area_start(), page->area_size());
149
150   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
151
152   return page;
153 }
154
155
156 bool PagedSpace::Contains(Address addr) {
157   Page* p = Page::FromAddress(addr);
158   if (!Page::IsValid(p)) return false;
159   return p->owner() == this;
160 }
161
162
163 void MemoryChunk::set_scan_on_scavenge(bool scan) {
164   if (scan) {
165     if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
166     SetFlag(SCAN_ON_SCAVENGE);
167   } else {
168     if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
169     ClearFlag(SCAN_ON_SCAVENGE);
170   }
171   heap_->incremental_marking()->SetOldSpacePageFlags(this);
172 }
173
174
175 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
176   MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
177       OffsetFrom(addr) & ~Page::kPageAlignmentMask);
178   if (maybe->owner() != NULL) return maybe;
179   LargeObjectIterator iterator(heap->lo_space());
180   for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
181     // Fixed arrays are the only pointer-containing objects in large object
182     // space.
183     if (o->IsFixedArray()) {
184       MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
185       if (chunk->Contains(addr)) {
186         return chunk;
187       }
188     }
189   }
190   UNREACHABLE();
191   return NULL;
192 }
193
194
195 void MemoryChunk::UpdateHighWaterMark(Address mark) {
196   if (mark == NULL) return;
197   // Need to subtract one from the mark because when a chunk is full the
198   // top points to the next address after the chunk, which effectively belongs
199   // to another chunk. See the comment to Page::FromAllocationTop.
200   MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
201   int new_mark = static_cast<int>(mark - chunk->address());
202   if (new_mark > chunk->high_water_mark_) {
203     chunk->high_water_mark_ = new_mark;
204   }
205 }
206
207
208 PointerChunkIterator::PointerChunkIterator(Heap* heap)
209     : state_(kOldSpaceState),
210       old_iterator_(heap->old_space()),
211       map_iterator_(heap->map_space()),
212       lo_iterator_(heap->lo_space()) {}
213
214
215 Page* Page::next_page() {
216   DCHECK(next_chunk()->owner() == owner());
217   return static_cast<Page*>(next_chunk());
218 }
219
220
221 Page* Page::prev_page() {
222   DCHECK(prev_chunk()->owner() == owner());
223   return static_cast<Page*>(prev_chunk());
224 }
225
226
227 void Page::set_next_page(Page* page) {
228   DCHECK(page->owner() == owner());
229   set_next_chunk(page);
230 }
231
232
233 void Page::set_prev_page(Page* page) {
234   DCHECK(page->owner() == owner());
235   set_prev_chunk(page);
236 }
237
238
239 // Try linear allocation in the page of alloc_info's allocation top.  Does
240 // not contain slow case logic (e.g. move to the next page or try free list
241 // allocation) so it can be used by all the allocation functions and for all
242 // the paged spaces.
243 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
244   Address current_top = allocation_info_.top();
245   Address new_top = current_top + size_in_bytes;
246   if (new_top > allocation_info_.limit()) return NULL;
247
248   allocation_info_.set_top(new_top);
249   return HeapObject::FromAddress(current_top);
250 }
251
252
253 HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
254                                                 AllocationAlignment alignment) {
255   Address current_top = allocation_info_.top();
256   int filler_size = Heap::GetFillToAlign(current_top, alignment);
257
258   Address new_top = current_top + filler_size + *size_in_bytes;
259   if (new_top > allocation_info_.limit()) return NULL;
260
261   allocation_info_.set_top(new_top);
262   if (filler_size > 0) {
263     *size_in_bytes += filler_size;
264     return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
265                                      filler_size);
266   }
267
268   return HeapObject::FromAddress(current_top);
269 }
270
271
272 // Raw allocation.
273 AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
274   HeapObject* object = AllocateLinearly(size_in_bytes);
275
276   if (object == NULL) {
277     object = free_list_.Allocate(size_in_bytes);
278     if (object == NULL) {
279       object = SlowAllocateRaw(size_in_bytes);
280     }
281   }
282
283   if (object != NULL) {
284     if (identity() == CODE_SPACE) {
285       SkipList::Update(object->address(), size_in_bytes);
286     }
287     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
288     return object;
289   }
290
291   return AllocationResult::Retry(identity());
292 }
293
294
295 // Raw allocation.
296 AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
297                                                 AllocationAlignment alignment) {
298   DCHECK(identity() == OLD_SPACE);
299   int allocation_size = size_in_bytes;
300   HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
301
302   if (object == NULL) {
303     // We don't know exactly how much filler we need to align until space is
304     // allocated, so assume the worst case.
305     int filler_size = Heap::GetMaximumFillToAlign(alignment);
306     allocation_size += filler_size;
307     object = free_list_.Allocate(allocation_size);
308     if (object == NULL) {
309       object = SlowAllocateRaw(allocation_size);
310     }
311     if (object != NULL && filler_size != 0) {
312       object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
313                                        alignment);
314       // Filler objects are initialized, so mark only the aligned object memory
315       // as uninitialized.
316       allocation_size = size_in_bytes;
317     }
318   }
319
320   if (object != NULL) {
321     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
322     return object;
323   }
324
325   return AllocationResult::Retry(identity());
326 }
327
328
329 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
330                                          AllocationAlignment alignment) {
331 #ifdef V8_HOST_ARCH_32_BIT
332   return alignment == kDoubleAligned
333              ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
334              : AllocateRawUnaligned(size_in_bytes);
335 #else
336   return AllocateRawUnaligned(size_in_bytes);
337 #endif
338 }
339
340
341 // -----------------------------------------------------------------------------
342 // NewSpace
343
344
345 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
346                                               AllocationAlignment alignment) {
347   Address old_top = allocation_info_.top();
348   int filler_size = Heap::GetFillToAlign(old_top, alignment);
349   int aligned_size_in_bytes = size_in_bytes + filler_size;
350
351   if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
352     return SlowAllocateRaw(size_in_bytes, alignment);
353   }
354
355   HeapObject* obj = HeapObject::FromAddress(old_top);
356   allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
357   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
358
359   if (filler_size > 0) {
360     obj = heap()->PrecedeWithFiller(obj, filler_size);
361   }
362
363   // The slow path above ultimately goes through AllocateRaw, so this suffices.
364   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
365
366   return obj;
367 }
368
369
370 AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
371   Address old_top = allocation_info_.top();
372
373   if (allocation_info_.limit() - old_top < size_in_bytes) {
374     return SlowAllocateRaw(size_in_bytes, kWordAligned);
375   }
376
377   HeapObject* obj = HeapObject::FromAddress(old_top);
378   allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
379   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
380
381   // The slow path above ultimately goes through AllocateRaw, so this suffices.
382   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
383
384   return obj;
385 }
386
387
388 AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
389                                        AllocationAlignment alignment) {
390 #ifdef V8_HOST_ARCH_32_BIT
391   return alignment == kDoubleAligned
392              ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
393              : AllocateRawUnaligned(size_in_bytes);
394 #else
395   return AllocateRawUnaligned(size_in_bytes);
396 #endif
397 }
398
399
400 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
401   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
402   return static_cast<LargePage*>(chunk);
403 }
404
405
406 intptr_t LargeObjectSpace::Available() {
407   return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
408 }
409
410 }
411 }  // namespace v8::internal
412
413 #endif  // V8_HEAP_SPACES_INL_H_