bool Heap::Contains(Address addr) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
return HasBeenSetUp() &&
(new_space_.ToSpaceContains(addr) ||
old_pointer_space_->Contains(addr) ||
bool Heap::InSpace(Address addr, AllocationSpace space) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
if (!HasBeenSetUp()) return false;
switch (space) {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- LockGuard<Mutex> lock_guard(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true;
}
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
}
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- LockGuard<Mutex> lock_guard(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
}
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
#ifdef __arm__
bool OS::ArmUsingHardFloat() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- LockGuard<Mutex> lock_guard(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
return false;
}
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
}
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- LockGuard<Mutex> lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
// Constants used for mmap.
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(address, size);
return true;
}
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
}
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- LockGuard<Mutex> lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
}
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- LockGuard<Mutex> lock_guard(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
}
}
-static Mutex* limit_mutex = NULL;
-
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
#undef _TRUNCATE
#undef STRUNCATE
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- LockGuard<Mutex> lock_guard(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* pointer) {
- if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
- return true;
- // Ask the Windows API
- if (IsBadWritePtr(pointer, 1))
- return true;
- return false;
-}
-
// Get the system's page size used by VirtualAlloc() or the next power
// of two. The reason for always returning a power of two is that the
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
return mbase;
}
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true;
}
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srand(static_cast<unsigned int>(seed));
- limit_mutex = new Mutex();
}
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
// called after CPU initialization.
static void PostSetUp();
- // Clean up platform-OS-related things. Called once at VM shutdown.
- static void TearDown();
-
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
- // Returns an indication of whether a pointer is in a space that
- // has been allocated by Allocate(). This method may conservatively
- // always return false, but giving more accurate information may
- // improve the robustness of the stack dump code in the presence of
- // heap corruption.
- static bool IsOutsideAllocatedSpace(void* pointer);
-
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
}
ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!MemoryAllocator::CommitExecutableMemory(code_range_,
- current.start,
- commit_size,
- *allocated)) {
+ if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
+ current.start,
+ commit_size,
+ *allocated)) {
*allocated = 0;
return NULL;
}
bool CodeRange::CommitRawMemory(Address start, size_t length) {
- return code_range_->Commit(start, length, true);
+ return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
}
capacity_(0),
capacity_executable_(0),
size_(0),
- size_executable_(0) {
+ size_executable_(0),
+ lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
+ highest_ever_allocated_(reinterpret_cast<void*>(0)) {
}
}
+bool MemoryAllocator::CommitMemory(Address base,
+ size_t size,
+ Executability executable) {
+ if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
+ return false;
+ }
+ UpdateAllocatedSpaceLimits(base, base + size);
+ return true;
+}
+
+
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
base = NULL;
}
} else {
- if (!reservation.Commit(base, commit_size, false)) {
+ if (reservation.Commit(base, commit_size, false)) {
+ UpdateAllocatedSpaceLimits(base, base + commit_size);
+ } else {
base = NULL;
}
}
Address start = address() + committed_size + guard_size;
size_t length = commit_size - committed_size;
if (reservation_.IsReserved()) {
- if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
+ Executability executable = IsFlagSet(IS_EXECUTABLE)
+ ? EXECUTABLE : NOT_EXECUTABLE;
+ if (!heap()->isolate()->memory_allocator()->CommitMemory(
+ start, length, executable)) {
return false;
}
} else {
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
Executability executable) {
- if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+ if (!CommitMemory(start, size, executable)) return false;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size);
return false;
}
+ UpdateAllocatedSpaceLimits(start,
+ start + CodePageAreaStartOffset() +
+ commit_size - CodePageGuardStartOffset());
return true;
}
return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
}
+ // Returns an indication of whether a pointer is in a space that has
+ // been allocated by this MemoryAllocator.
+ V8_INLINE(bool IsOutsideAllocatedSpace(const void* address)) const {
+ return address < lowest_ever_allocated_ ||
+ address >= highest_ever_allocated_;
+ }
+
#ifdef DEBUG
// Reports statistic info of the space.
void ReportStatistics();
Executability executable,
VirtualMemory* controller);
+ bool CommitMemory(Address addr, size_t size, Executability executable);
+
void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
- MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
+ MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
private:
Isolate* isolate_;
// Allocated executable space size in bytes.
size_t size_executable_;
+ // We keep the lowest and highest addresses allocated as a quick way
+ // of determining that pointers are outside the heap. The estimate is
+ // conservative, i.e. not all addrsses in 'allocated' space are allocated
+ // to our heap. The range is [lowest, highest[, inclusive on the low end
+ // and exclusive on the high end.
+ void* lowest_ever_allocated_;
+ void* highest_ever_allocated_;
+
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
ObjectSpace space,
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
+ void UpdateAllocatedSpaceLimits(void* low, void* high) {
+ lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
+ highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
call_completed_callbacks_ = NULL;
Sampler::TearDown();
- OS::TearDown();
}
TEST(CodeRange) {
const int code_range_size = 32*MB;
- OS::SetUp();
- Isolate::Current()->InitializeLoggingAndCounters();
- CodeRange* code_range = new CodeRange(Isolate::Current());
- code_range->SetUp(code_range_size);
+ CcTest::InitializeVM();
+ CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()));
+ code_range.SetUp(code_range_size);
int current_allocated = 0;
int total_allocated = 0;
List<Block> blocks(1000);
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
- Address base = code_range->AllocateRawMemory(requested,
- requested,
- &allocated);
+ Address base = code_range.AllocateRawMemory(requested,
+ requested,
+ &allocated);
CHECK(base != NULL);
blocks.Add(Block(base, static_cast<int>(allocated)));
current_allocated += static_cast<int>(allocated);
} else {
// Free a block.
int index = Pseudorandom() % blocks.length();
- code_range->FreeRawMemory(blocks[index].base, blocks[index].size);
+ code_range.FreeRawMemory(blocks[index].base, blocks[index].size);
current_allocated -= blocks[index].size;
if (index < blocks.length() - 1) {
blocks[index] = blocks.RemoveLast();
}
}
- code_range->TearDown();
- delete code_range;
+ code_range.TearDown();
}