1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "src/snapshot.h"
32 #include "test/cctest/cctest.h"
35 using namespace v8::internal;
38 static void VerifyRegionMarking(Address page_start) {
39 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
40 Page* p = Page::FromAddress(page_start);
42 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
44 for (Address addr = p->ObjectAreaStart();
45 addr < p->ObjectAreaEnd();
46 addr += kPointerSize) {
47 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
50 for (Address addr = p->ObjectAreaStart();
51 addr < p->ObjectAreaEnd();
52 addr += kPointerSize) {
53 Page::FromAddress(addr)->MarkRegionDirty(addr);
56 for (Address addr = p->ObjectAreaStart();
57 addr < p->ObjectAreaEnd();
58 addr += kPointerSize) {
59 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
66 // TODO(gc) you can no longer allocate pages like this. Details are hidden.
69 byte* mem = NewArray<byte>(2*Page::kPageSize);
72 Address start = reinterpret_cast<Address>(mem);
73 Address page_start = RoundUp(start, Page::kPageSize);
75 Page* p = Page::FromAddress(page_start);
76 // Initialized Page has heap pointer, normally set by memory_allocator.
77 p->heap_ = CcTest::heap();
78 CHECK(p->address() == page_start);
82 p->SetIsLargeObjectPage(false);
83 CHECK(!p->next_page()->is_valid());
85 CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
86 CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
88 CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
89 Page::kObjectStartOffset);
90 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
92 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
93 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
95 // test region marking
96 VerifyRegionMarking(page_start);
106 // Temporarily sets a given allocator in an isolate.
107 class TestMemoryAllocatorScope {
109 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
111 old_allocator_(isolate->memory_allocator_) {
112 isolate->memory_allocator_ = allocator;
115 ~TestMemoryAllocatorScope() {
116 isolate_->memory_allocator_ = old_allocator_;
121 MemoryAllocator* old_allocator_;
123 DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
127 // Temporarily sets a given code range in an isolate.
128 class TestCodeRangeScope {
130 TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
132 old_code_range_(isolate->code_range_) {
133 isolate->code_range_ = code_range;
136 ~TestCodeRangeScope() {
137 isolate_->code_range_ = old_code_range_;
142 CodeRange* old_code_range_;
144 DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
147 } } // namespace v8::internal
150 static void VerifyMemoryChunk(Isolate* isolate,
152 CodeRange* code_range,
153 size_t reserve_area_size,
154 size_t commit_area_size,
155 size_t second_commit_area_size,
156 Executability executable) {
157 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
158 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
159 heap->MaxExecutableSize()));
160 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
161 TestCodeRangeScope test_code_range_scope(isolate, code_range);
163 size_t header_size = (executable == EXECUTABLE)
164 ? MemoryAllocator::CodePageGuardStartOffset()
165 : MemoryChunk::kObjectStartOffset;
166 size_t guard_size = (executable == EXECUTABLE)
167 ? MemoryAllocator::CodePageGuardSize()
170 MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
174 size_t alignment = code_range != NULL && code_range->valid() ?
175 MemoryChunk::kAlignment : v8::base::OS::CommitPageSize();
176 size_t reserved_size =
177 ((executable == EXECUTABLE))
178 ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
180 : RoundUp(header_size + reserve_area_size,
181 v8::base::OS::CommitPageSize());
182 CHECK(memory_chunk->size() == reserved_size);
183 CHECK(memory_chunk->area_start() < memory_chunk->address() +
184 memory_chunk->size());
185 CHECK(memory_chunk->area_end() <= memory_chunk->address() +
186 memory_chunk->size());
187 CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
189 Address area_start = memory_chunk->area_start();
191 memory_chunk->CommitArea(second_commit_area_size);
192 CHECK(area_start == memory_chunk->area_start());
193 CHECK(memory_chunk->area_start() < memory_chunk->address() +
194 memory_chunk->size());
195 CHECK(memory_chunk->area_end() <= memory_chunk->address() +
196 memory_chunk->size());
197 CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
198 second_commit_area_size);
200 memory_allocator->Free(memory_chunk);
201 memory_allocator->TearDown();
202 delete memory_allocator;
206 static unsigned int Pseudorandom() {
207 static uint32_t lo = 2345;
208 lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
214 Isolate* isolate = CcTest::i_isolate();
215 isolate->InitializeLoggingAndCounters();
216 Heap* heap = isolate->heap();
217 CHECK(heap->ConfigureHeapDefault());
219 size_t reserve_area_size = 1 * MB;
220 size_t initial_commit_area_size, second_commit_area_size;
222 for (int i = 0; i < 100; i++) {
223 initial_commit_area_size = Pseudorandom();
224 second_commit_area_size = Pseudorandom();
227 CodeRange* code_range = new CodeRange(isolate);
228 const size_t code_range_size = 32 * MB;
229 if (!code_range->SetUp(code_range_size)) return;
231 VerifyMemoryChunk(isolate,
235 initial_commit_area_size,
236 second_commit_area_size,
239 VerifyMemoryChunk(isolate,
243 initial_commit_area_size,
244 second_commit_area_size,
248 // Without CodeRange.
250 VerifyMemoryChunk(isolate,
254 initial_commit_area_size,
255 second_commit_area_size,
258 VerifyMemoryChunk(isolate,
262 initial_commit_area_size,
263 second_commit_area_size,
269 TEST(MemoryAllocator) {
270 Isolate* isolate = CcTest::i_isolate();
271 isolate->InitializeLoggingAndCounters();
272 Heap* heap = isolate->heap();
273 CHECK(isolate->heap()->ConfigureHeapDefault());
275 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
276 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
277 heap->MaxExecutableSize()));
280 OldSpace faked_space(heap,
284 Page* first_page = memory_allocator->AllocatePage(
285 faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
287 first_page->InsertAfter(faked_space.anchor()->prev_page());
288 CHECK(first_page->is_valid());
289 CHECK(first_page->next_page() == faked_space.anchor());
292 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
293 CHECK(p->owner() == &faked_space);
296 // Again, we should get n or n - 1 pages.
297 Page* other = memory_allocator->AllocatePage(
298 faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
299 CHECK(other->is_valid());
301 other->InsertAfter(first_page);
303 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
304 CHECK(p->owner() == &faked_space);
307 CHECK(total_pages == page_count);
309 Page* second_page = first_page->next_page();
310 CHECK(second_page->is_valid());
311 memory_allocator->Free(first_page);
312 memory_allocator->Free(second_page);
313 memory_allocator->TearDown();
314 delete memory_allocator;
319 Isolate* isolate = CcTest::i_isolate();
320 isolate->InitializeLoggingAndCounters();
321 Heap* heap = isolate->heap();
322 CHECK(heap->ConfigureHeapDefault());
323 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
324 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
325 heap->MaxExecutableSize()));
326 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
328 NewSpace new_space(heap);
330 CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(),
331 CcTest::heap()->ReservedSemiSpaceSize()));
332 CHECK(new_space.HasBeenSetUp());
334 while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
335 Object* obj = new_space.AllocateRaw(
336 Page::kMaxRegularHeapObjectSize).ToObjectChecked();
337 CHECK(new_space.Contains(HeapObject::cast(obj)));
340 new_space.TearDown();
341 memory_allocator->TearDown();
342 delete memory_allocator;
347 Isolate* isolate = CcTest::i_isolate();
348 isolate->InitializeLoggingAndCounters();
349 Heap* heap = isolate->heap();
350 CHECK(heap->ConfigureHeapDefault());
351 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
352 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
353 heap->MaxExecutableSize()));
354 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
356 OldSpace* s = new OldSpace(heap,
357 heap->MaxOldGenerationSize(),
364 while (s->Available() > 0) {
365 s->AllocateRaw(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
370 memory_allocator->TearDown();
371 delete memory_allocator;
375 TEST(LargeObjectSpace) {
376 v8::V8::Initialize();
378 LargeObjectSpace* lo = CcTest::heap()->lo_space();
381 int lo_size = Page::kPageSize;
383 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
384 CHECK(obj->IsHeapObject());
386 HeapObject* ho = HeapObject::cast(obj);
388 CHECK(lo->Contains(HeapObject::cast(obj)));
390 CHECK(lo->FindObject(ho->address()) == obj);
392 CHECK(lo->Contains(ho));
395 intptr_t available = lo->Available();
396 { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
397 if (allocation.IsRetry()) break;
399 CHECK(lo->Available() < available);
402 CHECK(!lo->IsEmpty());
404 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
408 TEST(SizeOfFirstPageIsLargeEnough) {
409 if (i::FLAG_always_opt) return;
410 // Bootstrapping without a snapshot causes more allocations.
411 if (!i::Snapshot::HaveASnapshotToStartFrom()) return;
412 CcTest::InitializeVM();
413 Isolate* isolate = CcTest::i_isolate();
415 // Freshly initialized VM gets by with one page per space.
416 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
417 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
418 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
419 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
422 // Executing the empty script gets by with one page per space.
423 HandleScope scope(isolate);
424 CompileRun("/*empty*/");
425 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
426 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
427 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
428 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
431 // No large objects required to perform the above steps.
432 CHECK(isolate->heap()->lo_space()->IsEmpty());