eager_deoptimization_entry_code_entries_ = -1;
lazy_deoptimization_entry_code_entries_ = -1;
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
- MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
- size_t initial_commit_size = OS::CommitPageSize();
- eager_deoptimization_entry_code_ =
- allocator->AllocateChunk(deopt_table_size,
- initial_commit_size,
- EXECUTABLE,
- NULL);
- lazy_deoptimization_entry_code_ =
- allocator->AllocateChunk(deopt_table_size,
- initial_commit_size,
- EXECUTABLE,
- NULL);
+ eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+ lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizerData::~DeoptimizerData() {
- Isolate::Current()->memory_allocator()->Free(
- eager_deoptimization_entry_code_);
+ delete eager_deoptimization_entry_code_;
eager_deoptimization_entry_code_ = NULL;
- Isolate::Current()->memory_allocator()->Free(
- lazy_deoptimization_entry_code_);
+ delete lazy_deoptimization_entry_code_;
lazy_deoptimization_entry_code_ = NULL;
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
GetEntryMode mode) {
ASSERT(id >= 0);
if (id >= kMaxNumberOfEntries) return NULL;
- MemoryChunk* base = NULL;
+ VirtualMemory* base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(type, id);
} else {
} else {
base = data->lazy_deoptimization_entry_code_;
}
- return base->area_start() + (id * table_entry_size_);
+ return
+ static_cast<Address>(base->address()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- MemoryChunk* base = NULL;
+ VirtualMemory* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
} else {
base = data->lazy_deoptimization_entry_code_;
}
- Address start = base->area_start();
+ Address base_casted = reinterpret_cast<Address>(base->address());
if (base == NULL ||
- addr < start ||
- addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
+ addr < base->address() ||
+ addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
- static_cast<int>(addr - start) % table_entry_size_);
- return static_cast<int>(addr - start) / table_entry_size_;
+ static_cast<int>(addr - base_casted) % table_entry_size_);
+ return static_cast<int>(addr - base_casted) / table_entry_size_;
}
CodeDesc desc;
masm.GetCode(&desc);
- MemoryChunk* chunk = type == EAGER
+ VirtualMemory* memory = type == EAGER
? data->eager_deoptimization_entry_code_
: data->lazy_deoptimization_entry_code_;
- ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
- desc.instr_size);
- chunk->CommitArea(desc.instr_size);
- memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->area_start(), desc.instr_size);
+ size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
+ ASSERT(static_cast<int>(table_size) >= desc.instr_size);
+ memory->Commit(memory->address(), table_size, true);
+ memcpy(memory->address(), desc.buffer, desc.instr_size);
+ CPU::FlushICache(memory->address(), desc.instr_size);
if (type == EAGER) {
data->eager_deoptimization_entry_code_entries_ = entry_count;
}
-Address CodeRange::AllocateRawMemory(const size_t requested_size,
- const size_t commit_size,
+
+Address CodeRange::AllocateRawMemory(const size_t requested,
size_t* allocated) {
- ASSERT(commit_size <= requested_size);
ASSERT(current_allocation_block_index_ < allocation_list_.length());
- if (requested_size > allocation_list_[current_allocation_block_index_].size) {
+ if (requested > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough. This function call may
// call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested_size);
+ GetNextAllocationBlock(requested);
}
// Commit the requested memory at the start of the current allocation block.
- size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
+ size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
FreeBlock current = allocation_list_[current_allocation_block_index_];
if (aligned_requested >= (current.size - Page::kPageSize)) {
// Don't leave a small free block, useless for a large object or chunk.
}
ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!MemoryAllocator::CommitExecutableMemory(code_range_,
- current.start,
- commit_size,
- *allocated)) {
+ if (!MemoryAllocator::CommitCodePage(code_range_,
+ current.start,
+ *allocated)) {
*allocated = 0;
return NULL;
}
}
-bool CodeRange::CommitRawMemory(Address start, size_t length) {
- return code_range_->Commit(start, length, true);
-}
-
-
-bool CodeRange::UncommitRawMemory(Address start, size_t length) {
- return code_range_->Uncommit(start, length);
-}
-
-
void CodeRange::FreeRawMemory(Address address, size_t length) {
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
free_list_.Add(FreeBlock(address, length));
if (!reservation.IsReserved()) return NULL;
size_ += reservation.size();
- Address base = static_cast<Address>(reservation.address());
+ Address base = RoundUp(static_cast<Address>(reservation.address()),
+ alignment);
controller->TakeControl(&reservation);
return base;
}
-Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
- size_t commit_size,
+Address MemoryAllocator::AllocateAlignedMemory(size_t size,
size_t alignment,
Executability executable,
VirtualMemory* controller) {
- ASSERT(commit_size <= reserve_size);
VirtualMemory reservation;
- Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
+ Address base = ReserveAlignedMemory(size, alignment, &reservation);
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
- if (!CommitExecutableMemory(&reservation,
- base,
- commit_size,
- reserve_size)) {
+ if (!CommitCodePage(&reservation, base, size)) {
base = NULL;
}
} else {
- if (!reservation.Commit(base, commit_size, false)) {
+ if (!reservation.Commit(base, size, false)) {
base = NULL;
}
}
}
-// Commit MemoryChunk area to the requested size.
-bool MemoryChunk::CommitArea(size_t requested) {
- size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
- MemoryAllocator::CodePageGuardSize() : 0;
- size_t header_size = area_start() - address() - guard_size;
- size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
- size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
- OS::CommitPageSize());
-
- if (commit_size > committed_size) {
- // Commit size should be less or equal than the reserved size.
- ASSERT(commit_size <= size() - 2 * guard_size);
- // Append the committed area.
- Address start = address() + committed_size + guard_size;
- size_t length = commit_size - committed_size;
- if (reservation_.IsReserved()) {
- if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
- return false;
- }
- } else {
- CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
- if (!code_range->CommitRawMemory(start, length)) return false;
- }
-
- if (Heap::ShouldZapGarbage()) {
- heap_->isolate()->memory_allocator()->ZapBlock(start, length);
- }
- } else if (commit_size < committed_size) {
- ASSERT(commit_size > 0);
- // Shrink the committed area.
- size_t length = committed_size - commit_size;
- Address start = address() + committed_size + guard_size - length;
- if (reservation_.IsReserved()) {
- if (!reservation_.Uncommit(start, length)) return false;
- } else {
- CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
- if (!code_range->UncommitRawMemory(start, length)) return false;
- }
- }
-
- area_end_ = area_start_ + requested;
- return true;
-}
-
-
void MemoryChunk::InsertAfter(MemoryChunk* other) {
next_chunk_ = other->next_chunk_;
prev_chunk_ = other;
}
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
- intptr_t commit_area_size,
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
Executability executable,
Space* owner) {
- ASSERT(commit_area_size <= reserve_area_size);
-
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = NULL;
Address area_start = NULL;
Address area_end = NULL;
- //
- // MemoryChunk layout:
- //
- // Executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- base + CodePageGuardStartOffset
- // | Guard |
- // +----------------------------+<- area_start_
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- aligned at OS page boundary
- // | Guard |
- // +----------------------------+<- base + chunk_size
- //
- // Non-executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- area_start_ (base + kObjectStartOffset)
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- base + chunk_size
- //
-
if (executable == EXECUTABLE) {
- chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
+ chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
OS::CommitPageSize()) + CodePageGuardSize();
// Check executable memory limit.
return NULL;
}
- // Size of header (not executable) plus area (executable).
- size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
- OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
if (isolate_->code_range()->exists()) {
- base = isolate_->code_range()->AllocateRawMemory(chunk_size,
- commit_size,
- &chunk_size);
+ base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
MemoryChunk::kAlignment));
if (base == NULL) return NULL;
size_executable_ += chunk_size;
} else {
base = AllocateAlignedMemory(chunk_size,
- commit_size,
MemoryChunk::kAlignment,
executable,
&reservation);
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
+ ZapBlock(base + CodePageAreaStartOffset(), body_size);
}
area_start = base + CodePageAreaStartOffset();
- area_end = area_start + commit_area_size;
+ area_end = area_start + body_size;
} else {
- chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- OS::CommitPageSize());
- size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
- commit_area_size, OS::CommitPageSize());
+ chunk_size = MemoryChunk::kObjectStartOffset + body_size;
base = AllocateAlignedMemory(chunk_size,
- commit_size,
MemoryChunk::kAlignment,
executable,
&reservation);
if (base == NULL) return NULL;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
+ ZapBlock(base, chunk_size);
}
area_start = base + Page::kObjectStartOffset;
- area_end = area_start + commit_area_size;
+ area_end = base + chunk_size;
}
- // Use chunk_size for statistics and callbacks because we assume that they
- // treat reserved but not-yet committed memory regions of chunks as allocated.
isolate_->counters()->memory_allocated()->
Increment(static_cast<int>(chunk_size));
Page* MemoryAllocator::AllocatePage(intptr_t size,
PagedSpace* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+ MemoryChunk* chunk = AllocateChunk(size, executable, owner);
if (chunk == NULL) return NULL;
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Space* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(object_size,
- object_size,
- executable,
- owner);
+ MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
}
}
-bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size) {
+bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
+ Address start,
+ size_t size) {
// Commit page header (not executable).
if (!vm->Commit(start,
CodePageGuardStartOffset(),
}
// Commit page body (executable).
+ size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
if (!vm->Commit(start + CodePageAreaStartOffset(),
- commit_size - CodePageGuardStartOffset(),
+ area_size,
true)) {
return false;
}
- // Create guard page before the end.
- if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
+ // Create guard page after the allocatable area.
+ if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
return false;
}
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
-
-// Temporarily sets a given code range in an isolate.
-class TestCodeRangeScope {
- public:
- TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
- : isolate_(isolate),
- old_code_range_(isolate->code_range_) {
- isolate->code_range_ = code_range;
- }
-
- ~TestCodeRangeScope() {
- isolate_->code_range_ = old_code_range_;
- }
-
- private:
- Isolate* isolate_;
- CodeRange* old_code_range_;
-
- DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
-};
-
} } // namespace v8::internal
-static void VerifyMemoryChunk(Isolate* isolate,
- Heap* heap,
- CodeRange* code_range,
- size_t reserve_area_size,
- size_t commit_area_size,
- size_t second_commit_area_size,
- Executability executable) {
- MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(),
- heap->MaxExecutableSize()));
- TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
- TestCodeRangeScope test_code_range_scope(isolate, code_range);
-
- size_t header_size = (executable == EXECUTABLE)
- ? MemoryAllocator::CodePageGuardStartOffset()
- : MemoryChunk::kObjectStartOffset;
- size_t guard_size = (executable == EXECUTABLE)
- ? MemoryAllocator::CodePageGuardSize()
- : 0;
-
- MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
- commit_area_size,
- executable,
- NULL);
- size_t alignment = code_range->exists() ?
- MemoryChunk::kAlignment : OS::CommitPageSize();
- size_t reserved_size = ((executable == EXECUTABLE))
- ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
- alignment)
- : RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
- CHECK(memory_chunk->size() == reserved_size);
- CHECK(memory_chunk->area_start() < memory_chunk->address() +
- memory_chunk->size());
- CHECK(memory_chunk->area_end() <= memory_chunk->address() +
- memory_chunk->size());
- CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
-
- Address area_start = memory_chunk->area_start();
-
- memory_chunk->CommitArea(second_commit_area_size);
- CHECK(area_start == memory_chunk->area_start());
- CHECK(memory_chunk->area_start() < memory_chunk->address() +
- memory_chunk->size());
- CHECK(memory_chunk->area_end() <= memory_chunk->address() +
- memory_chunk->size());
- CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
- second_commit_area_size);
-
- memory_allocator->Free(memory_chunk);
- memory_allocator->TearDown();
- delete memory_allocator;
-}
-
-
-static unsigned int Pseudorandom() {
- static uint32_t lo = 2345;
- lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
- return lo & 0xFFFFF;
-}
-
-
-TEST(MemoryChunk) {
- OS::SetUp();
- Isolate* isolate = Isolate::Current();
- isolate->InitializeLoggingAndCounters();
- Heap* heap = isolate->heap();
- CHECK(heap->ConfigureHeapDefault());
-
- size_t reserve_area_size = 1 * MB;
- size_t initial_commit_area_size, second_commit_area_size;
-
- for (int i = 0; i < 100; i++) {
- initial_commit_area_size = Pseudorandom();
- second_commit_area_size = Pseudorandom();
-
- // With CodeRange.
- CodeRange* code_range = new CodeRange(isolate);
- const int code_range_size = 32 * MB;
- if (!code_range->SetUp(code_range_size)) return;
-
- VerifyMemoryChunk(isolate,
- heap,
- code_range,
- reserve_area_size,
- initial_commit_area_size,
- second_commit_area_size,
- EXECUTABLE);
-
- VerifyMemoryChunk(isolate,
- heap,
- code_range,
- reserve_area_size,
- initial_commit_area_size,
- second_commit_area_size,
- NOT_EXECUTABLE);
- delete code_range;
-
- // Without CodeRange.
- code_range = NULL;
- VerifyMemoryChunk(isolate,
- heap,
- code_range,
- reserve_area_size,
- initial_commit_area_size,
- second_commit_area_size,
- EXECUTABLE);
-
- VerifyMemoryChunk(isolate,
- heap,
- code_range,
- reserve_area_size,
- initial_commit_area_size,
- second_commit_area_size,
- NOT_EXECUTABLE);
- }
-}
-
-
TEST(MemoryAllocator) {
OS::SetUp();
Isolate* isolate = Isolate::Current();