Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / v8 / src / spaces-inl.h
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_SPACES_INL_H_
6 #define V8_SPACES_INL_H_
7
8 #include "heap-profiler.h"
9 #include "isolate.h"
10 #include "spaces.h"
11 #include "v8memory.h"
12
13 namespace v8 {
14 namespace internal {
15
16
17 // -----------------------------------------------------------------------------
18 // Bitmap
19
20 void Bitmap::Clear(MemoryChunk* chunk) {
21   Bitmap* bitmap = chunk->markbits();
22   for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
23   chunk->ResetLiveBytes();
24 }
25
26
27 // -----------------------------------------------------------------------------
28 // PageIterator
29
30
31 PageIterator::PageIterator(PagedSpace* space)
32     : space_(space),
33       prev_page_(&space->anchor_),
34       next_page_(prev_page_->next_page()) { }
35
36
37 bool PageIterator::has_next() {
38   return next_page_ != &space_->anchor_;
39 }
40
41
42 Page* PageIterator::next() {
43   ASSERT(has_next());
44   prev_page_ = next_page_;
45   next_page_ = next_page_->next_page();
46   return prev_page_;
47 }
48
49
50 // -----------------------------------------------------------------------------
51 // NewSpacePageIterator
52
53
54 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
55     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
56       next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
57       last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
58
59 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
60     : prev_page_(space->anchor()),
61       next_page_(prev_page_->next_page()),
62       last_page_(prev_page_->prev_page()) { }
63
64 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
65     : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
66       next_page_(NewSpacePage::FromAddress(start)),
67       last_page_(NewSpacePage::FromLimit(limit)) {
68   SemiSpace::AssertValidRange(start, limit);
69 }
70
71
72 bool NewSpacePageIterator::has_next() {
73   return prev_page_ != last_page_;
74 }
75
76
77 NewSpacePage* NewSpacePageIterator::next() {
78   ASSERT(has_next());
79   prev_page_ = next_page_;
80   next_page_ = next_page_->next_page();
81   return prev_page_;
82 }
83
84
85 // -----------------------------------------------------------------------------
86 // HeapObjectIterator
87 HeapObject* HeapObjectIterator::FromCurrentPage() {
88   while (cur_addr_ != cur_end_) {
89     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
90       cur_addr_ = space_->limit();
91       continue;
92     }
93     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
94     int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
95     cur_addr_ += obj_size;
96     ASSERT(cur_addr_ <= cur_end_);
97     if (!obj->IsFiller()) {
98       ASSERT_OBJECT_SIZE(obj_size);
99       return obj;
100     }
101   }
102   return NULL;
103 }
104
105
106 // -----------------------------------------------------------------------------
107 // MemoryAllocator
108
109 #ifdef ENABLE_HEAP_PROTECTION
110
111 void MemoryAllocator::Protect(Address start, size_t size) {
112   OS::Protect(start, size);
113 }
114
115
116 void MemoryAllocator::Unprotect(Address start,
117                                 size_t size,
118                                 Executability executable) {
119   OS::Unprotect(start, size, executable);
120 }
121
122
123 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
124   int id = GetChunkId(page);
125   OS::Protect(chunks_[id].address(), chunks_[id].size());
126 }
127
128
129 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
130   int id = GetChunkId(page);
131   OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
132                 chunks_[id].owner()->executable() == EXECUTABLE);
133 }
134
135 #endif
136
137
138 // --------------------------------------------------------------------------
139 // PagedSpace
140 Page* Page::Initialize(Heap* heap,
141                        MemoryChunk* chunk,
142                        Executability executable,
143                        PagedSpace* owner) {
144   Page* page = reinterpret_cast<Page*>(chunk);
145   ASSERT(page->area_size() <= kMaxRegularHeapObjectSize);
146   ASSERT(chunk->owner() == owner);
147   owner->IncreaseCapacity(page->area_size());
148   owner->Free(page->area_start(), page->area_size());
149
150   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
151
152   return page;
153 }
154
155
156 bool PagedSpace::Contains(Address addr) {
157   Page* p = Page::FromAddress(addr);
158   if (!p->is_valid()) return false;
159   return p->owner() == this;
160 }
161
162
163 void MemoryChunk::set_scan_on_scavenge(bool scan) {
164   if (scan) {
165     if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
166     SetFlag(SCAN_ON_SCAVENGE);
167   } else {
168     if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
169     ClearFlag(SCAN_ON_SCAVENGE);
170   }
171   heap_->incremental_marking()->SetOldSpacePageFlags(this);
172 }
173
174
175 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
176   MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
177       OffsetFrom(addr) & ~Page::kPageAlignmentMask);
178   if (maybe->owner() != NULL) return maybe;
179   LargeObjectIterator iterator(heap->lo_space());
180   for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
181     // Fixed arrays are the only pointer-containing objects in large object
182     // space.
183     if (o->IsFixedArray()) {
184       MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
185       if (chunk->Contains(addr)) {
186         return chunk;
187       }
188     }
189   }
190   UNREACHABLE();
191   return NULL;
192 }
193
194
195 void MemoryChunk::UpdateHighWaterMark(Address mark) {
196   if (mark == NULL) return;
197   // Need to subtract one from the mark because when a chunk is full the
198   // top points to the next address after the chunk, which effectively belongs
199   // to another chunk. See the comment to Page::FromAllocationTop.
200   MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
201   int new_mark = static_cast<int>(mark - chunk->address());
202   if (new_mark > chunk->high_water_mark_) {
203     chunk->high_water_mark_ = new_mark;
204   }
205 }
206
207
208 PointerChunkIterator::PointerChunkIterator(Heap* heap)
209     : state_(kOldPointerState),
210       old_pointer_iterator_(heap->old_pointer_space()),
211       map_iterator_(heap->map_space()),
212       lo_iterator_(heap->lo_space()) { }
213
214
215 Page* Page::next_page() {
216   ASSERT(next_chunk()->owner() == owner());
217   return static_cast<Page*>(next_chunk());
218 }
219
220
221 Page* Page::prev_page() {
222   ASSERT(prev_chunk()->owner() == owner());
223   return static_cast<Page*>(prev_chunk());
224 }
225
226
227 void Page::set_next_page(Page* page) {
228   ASSERT(page->owner() == owner());
229   set_next_chunk(page);
230 }
231
232
233 void Page::set_prev_page(Page* page) {
234   ASSERT(page->owner() == owner());
235   set_prev_chunk(page);
236 }
237
238
239 // Try linear allocation in the page of alloc_info's allocation top.  Does
240 // not contain slow case logic (e.g. move to the next page or try free list
241 // allocation) so it can be used by all the allocation functions and for all
242 // the paged spaces.
243 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
244   Address current_top = allocation_info_.top();
245   Address new_top = current_top + size_in_bytes;
246   if (new_top > allocation_info_.limit()) return NULL;
247
248   allocation_info_.set_top(new_top);
249   return HeapObject::FromAddress(current_top);
250 }
251
252
253 // Raw allocation.
254 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
255   HeapObject* object = AllocateLinearly(size_in_bytes);
256   if (object != NULL) {
257     if (identity() == CODE_SPACE) {
258       SkipList::Update(object->address(), size_in_bytes);
259     }
260     return object;
261   }
262
263   ASSERT(!heap()->linear_allocation() ||
264          (anchor_.next_chunk() == &anchor_ &&
265           anchor_.prev_chunk() == &anchor_));
266
267   object = free_list_.Allocate(size_in_bytes);
268   if (object != NULL) {
269     if (identity() == CODE_SPACE) {
270       SkipList::Update(object->address(), size_in_bytes);
271     }
272     return object;
273   }
274
275   object = SlowAllocateRaw(size_in_bytes);
276   if (object != NULL) {
277     if (identity() == CODE_SPACE) {
278       SkipList::Update(object->address(), size_in_bytes);
279     }
280     return object;
281   }
282
283   return AllocationResult::Retry(identity());
284 }
285
286
287 // -----------------------------------------------------------------------------
288 // NewSpace
289
290
291 AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
292   Address old_top = allocation_info_.top();
293 #ifdef DEBUG
294   // If we are stressing compaction we waste some memory in new space
295   // in order to get more frequent GCs.
296   if (FLAG_stress_compaction && !heap()->linear_allocation()) {
297     if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
298       int filler_size = size_in_bytes * 4;
299       for (int i = 0; i < filler_size; i += kPointerSize) {
300         *(reinterpret_cast<Object**>(old_top + i)) =
301             heap()->one_pointer_filler_map();
302       }
303       old_top += filler_size;
304       allocation_info_.set_top(allocation_info_.top() + filler_size);
305     }
306   }
307 #endif
308
309   if (allocation_info_.limit() - old_top < size_in_bytes) {
310     return SlowAllocateRaw(size_in_bytes);
311   }
312
313   HeapObject* obj = HeapObject::FromAddress(old_top);
314   allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
315   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
316
317   return obj;
318 }
319
320
321 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
322   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
323   return static_cast<LargePage*>(chunk);
324 }
325
326
327 intptr_t LargeObjectSpace::Available() {
328   return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
329 }
330
331
332 bool FreeListNode::IsFreeListNode(HeapObject* object) {
333   Map* map = object->map();
334   Heap* heap = object->GetHeap();
335   return map == heap->raw_unchecked_free_space_map()
336       || map == heap->raw_unchecked_one_pointer_filler_map()
337       || map == heap->raw_unchecked_two_pointer_filler_map();
338 }
339
340 } }  // namespace v8::internal
341
342 #endif  // V8_SPACES_INL_H_