1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 using namespace v8::internal;
36 static void VerifyRegionMarking(Address page_start) {
37 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
38 Page* p = Page::FromAddress(page_start);
40 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
42 for (Address addr = p->ObjectAreaStart();
43 addr < p->ObjectAreaEnd();
44 addr += kPointerSize) {
45 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
48 for (Address addr = p->ObjectAreaStart();
49 addr < p->ObjectAreaEnd();
50 addr += kPointerSize) {
51 Page::FromAddress(addr)->MarkRegionDirty(addr);
54 for (Address addr = p->ObjectAreaStart();
55 addr < p->ObjectAreaEnd();
56 addr += kPointerSize) {
57 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
64 // TODO(gc) you can no longer allocate pages like this. Details are hidden.
67 byte* mem = NewArray<byte>(2*Page::kPageSize);
70 Address start = reinterpret_cast<Address>(mem);
71 Address page_start = RoundUp(start, Page::kPageSize);
73 Page* p = Page::FromAddress(page_start);
74 // Initialized Page has heap pointer, normally set by memory_allocator.
75 p->heap_ = CcTest::heap();
76 CHECK(p->address() == page_start);
80 p->SetIsLargeObjectPage(false);
81 CHECK(!p->next_page()->is_valid());
83 CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
84 CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
86 CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
87 Page::kObjectStartOffset);
88 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
90 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
91 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
93 // test region marking
94 VerifyRegionMarking(page_start);
104 // Temporarily sets a given allocator in an isolate.
105 class TestMemoryAllocatorScope {
107 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
109 old_allocator_(isolate->memory_allocator_) {
110 isolate->memory_allocator_ = allocator;
113 ~TestMemoryAllocatorScope() {
114 isolate_->memory_allocator_ = old_allocator_;
119 MemoryAllocator* old_allocator_;
121 DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
125 // Temporarily sets a given code range in an isolate.
126 class TestCodeRangeScope {
128 TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
130 old_code_range_(isolate->code_range_) {
131 isolate->code_range_ = code_range;
134 ~TestCodeRangeScope() {
135 isolate_->code_range_ = old_code_range_;
140 CodeRange* old_code_range_;
142 DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
145 } } // namespace v8::internal
148 static void VerifyMemoryChunk(Isolate* isolate,
150 CodeRange* code_range,
151 size_t reserve_area_size,
152 size_t commit_area_size,
153 size_t second_commit_area_size,
154 Executability executable) {
155 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
156 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
157 heap->MaxExecutableSize()));
158 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
159 TestCodeRangeScope test_code_range_scope(isolate, code_range);
161 size_t header_size = (executable == EXECUTABLE)
162 ? MemoryAllocator::CodePageGuardStartOffset()
163 : MemoryChunk::kObjectStartOffset;
164 size_t guard_size = (executable == EXECUTABLE)
165 ? MemoryAllocator::CodePageGuardSize()
168 MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
172 size_t alignment = code_range->exists() ?
173 MemoryChunk::kAlignment : OS::CommitPageSize();
174 size_t reserved_size = ((executable == EXECUTABLE))
175 ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
177 : RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
178 CHECK(memory_chunk->size() == reserved_size);
179 CHECK(memory_chunk->area_start() < memory_chunk->address() +
180 memory_chunk->size());
181 CHECK(memory_chunk->area_end() <= memory_chunk->address() +
182 memory_chunk->size());
183 CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
185 Address area_start = memory_chunk->area_start();
187 memory_chunk->CommitArea(second_commit_area_size);
188 CHECK(area_start == memory_chunk->area_start());
189 CHECK(memory_chunk->area_start() < memory_chunk->address() +
190 memory_chunk->size());
191 CHECK(memory_chunk->area_end() <= memory_chunk->address() +
192 memory_chunk->size());
193 CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
194 second_commit_area_size);
196 memory_allocator->Free(memory_chunk);
197 memory_allocator->TearDown();
198 delete memory_allocator;
202 static unsigned int Pseudorandom() {
203 static uint32_t lo = 2345;
204 lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
210 Isolate* isolate = CcTest::i_isolate();
211 isolate->InitializeLoggingAndCounters();
212 Heap* heap = isolate->heap();
213 CHECK(heap->ConfigureHeapDefault());
215 size_t reserve_area_size = 1 * MB;
216 size_t initial_commit_area_size, second_commit_area_size;
218 for (int i = 0; i < 100; i++) {
219 initial_commit_area_size = Pseudorandom();
220 second_commit_area_size = Pseudorandom();
223 CodeRange* code_range = new CodeRange(isolate);
224 const int code_range_size = 32 * MB;
225 if (!code_range->SetUp(code_range_size)) return;
227 VerifyMemoryChunk(isolate,
231 initial_commit_area_size,
232 second_commit_area_size,
235 VerifyMemoryChunk(isolate,
239 initial_commit_area_size,
240 second_commit_area_size,
244 // Without CodeRange.
246 VerifyMemoryChunk(isolate,
250 initial_commit_area_size,
251 second_commit_area_size,
254 VerifyMemoryChunk(isolate,
258 initial_commit_area_size,
259 second_commit_area_size,
265 TEST(MemoryAllocator) {
266 Isolate* isolate = CcTest::i_isolate();
267 isolate->InitializeLoggingAndCounters();
268 Heap* heap = isolate->heap();
269 CHECK(isolate->heap()->ConfigureHeapDefault());
271 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
272 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
273 heap->MaxExecutableSize()));
276 OldSpace faked_space(heap,
280 Page* first_page = memory_allocator->AllocatePage(
281 faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
283 first_page->InsertAfter(faked_space.anchor()->prev_page());
284 CHECK(first_page->is_valid());
285 CHECK(first_page->next_page() == faked_space.anchor());
288 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
289 CHECK(p->owner() == &faked_space);
292 // Again, we should get n or n - 1 pages.
293 Page* other = memory_allocator->AllocatePage(
294 faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
295 CHECK(other->is_valid());
297 other->InsertAfter(first_page);
299 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
300 CHECK(p->owner() == &faked_space);
303 CHECK(total_pages == page_count);
305 Page* second_page = first_page->next_page();
306 CHECK(second_page->is_valid());
307 memory_allocator->Free(first_page);
308 memory_allocator->Free(second_page);
309 memory_allocator->TearDown();
310 delete memory_allocator;
315 Isolate* isolate = CcTest::i_isolate();
316 isolate->InitializeLoggingAndCounters();
317 Heap* heap = isolate->heap();
318 CHECK(heap->ConfigureHeapDefault());
319 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
320 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
321 heap->MaxExecutableSize()));
322 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
324 NewSpace new_space(heap);
326 CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(),
327 CcTest::heap()->ReservedSemiSpaceSize()));
328 CHECK(new_space.HasBeenSetUp());
330 while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
332 new_space.AllocateRaw(Page::kMaxRegularHeapObjectSize)->
334 CHECK(new_space.Contains(HeapObject::cast(obj)));
337 new_space.TearDown();
338 memory_allocator->TearDown();
339 delete memory_allocator;
344 Isolate* isolate = CcTest::i_isolate();
345 isolate->InitializeLoggingAndCounters();
346 Heap* heap = isolate->heap();
347 CHECK(heap->ConfigureHeapDefault());
348 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
349 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
350 heap->MaxExecutableSize()));
351 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
353 OldSpace* s = new OldSpace(heap,
354 heap->MaxOldGenerationSize(),
361 while (s->Available() > 0) {
362 s->AllocateRaw(Page::kMaxRegularHeapObjectSize)->ToObjectUnchecked();
367 memory_allocator->TearDown();
368 delete memory_allocator;
372 TEST(LargeObjectSpace) {
373 v8::V8::Initialize();
375 LargeObjectSpace* lo = CcTest::heap()->lo_space();
378 int lo_size = Page::kPageSize;
380 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
381 CHECK(obj->IsHeapObject());
383 HeapObject* ho = HeapObject::cast(obj);
385 CHECK(lo->Contains(HeapObject::cast(obj)));
387 CHECK(lo->FindObject(ho->address()) == obj);
389 CHECK(lo->Contains(ho));
392 intptr_t available = lo->Available();
393 { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
394 if (!maybe_obj->ToObject(&obj)) break;
396 CHECK(lo->Available() < available);
399 CHECK(!lo->IsEmpty());
401 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
405 TEST(SizeOfFirstPageIsLargeEnough) {
406 if (i::FLAG_always_opt) return;
407 CcTest::InitializeVM();
408 Isolate* isolate = CcTest::i_isolate();
410 // Freshly initialized VM gets by with one page per space.
411 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
412 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
415 // Executing the empty script gets by with one page per space.
416 HandleScope scope(isolate);
417 CompileRun("/*empty*/");
418 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
419 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
422 // No large objects required to perform the above steps.
423 CHECK(isolate->heap()->lo_space()->IsEmpty());