Refactor and cleanup VirtualMemory.
authorbmeurer@chromium.org <bmeurer@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 11 Sep 2013 08:47:02 +0000 (08:47 +0000)
committerbmeurer@chromium.org <bmeurer@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 11 Sep 2013 08:47:02 +0000 (08:47 +0000)
Remove a lot of platform duplication, and simplify the virtual
memory implementation. Also improve readability by avoiding bool
parameters for executability (use a dedicated Executability type
instead).

Get rid of the Isolate::UncheckedCurrent() call in the platform
code, as part of the Isolate TLS cleanup.

Use a dedicated random number generator for the address
randomization, instead of messing with the per-isolate random
number generators.

TEST=cctest/test-virtual-memory
R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/23641009

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16637 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

36 files changed:
include/v8config.h
src/arm/codegen-arm.cc
src/deoptimizer.cc
src/heap-inl.h
src/heap.cc
src/ia32/codegen-ia32.cc
src/incremental-marking.cc
src/mips/codegen-mips.cc
src/platform-cygwin.cc
src/platform-freebsd.cc
src/platform-linux.cc
src/platform-macos.cc
src/platform-openbsd.cc
src/platform-posix.cc
src/platform-solaris.cc
src/platform-win32.cc
src/platform.h
src/platform/virtual-memory.cc [new file with mode: 0644]
src/platform/virtual-memory.h [new file with mode: 0644]
src/spaces-inl.h
src/spaces.cc
src/spaces.h
src/store-buffer.cc
src/v8globals.h
src/x64/codegen-x64.cc
test/cctest/cctest.gyp
test/cctest/test-assembler-x64.cc
test/cctest/test-code-stubs-arm.cc
test/cctest/test-code-stubs-ia32.cc
test/cctest/test-code-stubs-x64.cc
test/cctest/test-macro-assembler-x64.cc
test/cctest/test-platform-linux.cc
test/cctest/test-platform-win32.cc
test/cctest/test-spaces.cc
test/cctest/test-virtual-memory.cc [new file with mode: 0644]
tools/gyp/v8.gyp

index 0993a9f..2dfbcfa 100644 (file)
 //                                        supported
 //  V8_HAS_ATTRIBUTE_DEPRECATED         - __attribute__((deprecated)) supported
 //  V8_HAS_ATTRIBUTE_NOINLINE           - __attribute__((noinline)) supported
+//  V8_HAS_ATTRIBUTE_PURE               - __attribute__((pure)) supported
 //  V8_HAS_ATTRIBUTE_VISIBILITY         - __attribute__((visibility)) supported
 //  V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
 //                                        supported
 # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
 # define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
 # define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
+# define V8_HAS_ATTRIBUTE_PURE (__has_attribute(pure))
 # define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
 # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
     (__has_attribute(warn_unused_result))
 # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
 # define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
 # define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_ATTRIBUTE_PURE (V8_GNUC_PREREQ(2, 96, 0))
 # define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
 # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
     (!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
 #endif
 
 
+// Many functions have no effects except the return value and their return value
+// depends only on the parameters and/or global variables. Such a function can
+// be subject to common subexpression elimination and loop optimization just as
+// an arithmetic operator would be. These functions should be declared with the
+// attribute V8_PURE. For example,
+//
+//   int square (int) V8_PURE;
+//
+// says that the hypothetical function square is safe to call fewer times than
+// the program says.
+//
+// Some of common examples of pure functions are strlen or memcmp. Interesting
+// non-V8_PURE functions are functions with infinite loops or those depending
+// on volatile memory or other system resource, that may change between two
+// consecutive calls (such as feof in a multithreaded environment).
+#if V8_HAS_ATTRIBUTE_PURE
+# define V8_PURE __attribute__((pure))
+#else
+# define V8_PURE /* NOT SUPPORTED */
+#endif
+
+
 // Annotate a function indicating the caller must examine the return value.
 // Use like:
 //   int foo() V8_WARN_UNUSED_RESULT;
index 1bcf3e3..faf7b54 100644 (file)
@@ -64,7 +64,8 @@ double fast_exp_simulator(double x) {
 UnaryMathFunction CreateExpFunction() {
   if (!FLAG_fast_math) return &exp;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) return &exp;
   ExternalReference::InitializeMathExpData();
 
@@ -102,7 +103,9 @@ UnaryMathFunction CreateExpFunction() {
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
 
 #if !defined(USE_SIMULATOR)
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
@@ -122,7 +125,8 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
     return stub;
   }
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) return stub;
 
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -264,7 +268,9 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
   return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
 #endif
 }
@@ -280,7 +286,8 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
     return stub;
   }
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) return stub;
 
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -352,7 +359,9 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
   masm.GetCode(&desc);
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
 
   return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
 #endif
index c979a53..50ee05c 100644 (file)
@@ -42,14 +42,8 @@ namespace internal {
 
 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
   return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
-                                  OS::CommitPageSize(),
-#if defined(__native_client__)
-  // The Native Client port of V8 uses an interpreter,
-  // so code pages don't need PROT_EXEC.
-                                  NOT_EXECUTABLE,
-#else
-                                  EXECUTABLE,
-#endif
+                                  VirtualMemory::GetPageSize(),
+                                  VirtualMemory::EXECUTABLE,
                                   NULL);
 }
 
@@ -128,7 +122,7 @@ static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
 size_t Deoptimizer::GetMaxDeoptTableSize() {
   int entries_size =
       Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
-  int commit_page_size = static_cast<int>(OS::CommitPageSize());
+  int commit_page_size = static_cast<int>(VirtualMemory::GetPageSize());
   int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
                     commit_page_size) + 1;
   return static_cast<size_t>(commit_page_size * page_count);
index 86aff1a..3892170 100644 (file)
@@ -144,7 +144,7 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
   // Allocate string.
   Object* result;
   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
-                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+                   ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -178,7 +178,7 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
   // Allocate string.
   Object* result;
   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
-                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+                   ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -242,7 +242,8 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
   } else if (CODE_SPACE == space) {
     result = code_space_->AllocateRaw(size_in_bytes);
   } else if (LO_SPACE == space) {
-    result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+    result = lo_space_->AllocateRaw(
+        size_in_bytes, VirtualMemory::NOT_EXECUTABLE);
   } else if (CELL_SPACE == space) {
     result = cell_space_->AllocateRaw(size_in_bytes);
   } else if (PROPERTY_CELL_SPACE == space) {
index 0455a84..83c8bad 100644 (file)
@@ -172,8 +172,7 @@ Heap::Heap()
   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
 #endif
 
-  intptr_t max_virtual = OS::MaxVirtualMemory();
-
+  intptr_t max_virtual = static_cast<intptr_t>(VirtualMemory::GetLimit());
   if (max_virtual > 0) {
     if (code_range_size_ > 0) {
       // Reserve no more than 1/8 of the memory for the code range.
@@ -4151,7 +4150,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
   HeapObject* result;
   bool force_lo_space = obj_size > code_space()->AreaSize();
   if (force_lo_space) {
-    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+    maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -4163,7 +4162,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
     // Discard the first code allocation, which was on a page where it could be
     // moved.
     CreateFillerObjectAt(result->address(), obj_size);
-    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+    maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
   }
 
@@ -4214,7 +4213,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
   int obj_size = code->Size();
   MaybeObject* maybe_result;
   if (obj_size > code_space()->AreaSize()) {
-    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+    maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -4257,7 +4256,8 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
 
   MaybeObject* maybe_result;
   if (new_obj_size > code_space()->AreaSize()) {
-    maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
+    maybe_result = lo_space_->AllocateRaw(
+        new_obj_size, VirtualMemory::EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(new_obj_size);
   }
@@ -5370,7 +5370,7 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
   // Allocate string.
   Object* result;
   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
-                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+                   ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -5523,7 +5523,7 @@ MaybeObject* Heap::AllocateRawFixedArray(int length) {
   int size = FixedArray::SizeFor(length);
   return size <= Page::kMaxNonCodeHeapObjectSize
       ? new_space_.AllocateRaw(size)
-      : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+      : lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE);
 }
 
 
@@ -6878,7 +6878,7 @@ bool Heap::SetUp() {
       new OldSpace(this,
                    max_old_generation_size_,
                    OLD_POINTER_SPACE,
-                   NOT_EXECUTABLE);
+                   VirtualMemory::NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
   if (!old_pointer_space_->SetUp()) return false;
 
@@ -6887,7 +6887,7 @@ bool Heap::SetUp() {
       new OldSpace(this,
                    max_old_generation_size_,
                    OLD_DATA_SPACE,
-                   NOT_EXECUTABLE);
+                   VirtualMemory::NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
   if (!old_data_space_->SetUp()) return false;
 
@@ -6901,8 +6901,8 @@ bool Heap::SetUp() {
     }
   }
 
-  code_space_ =
-      new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
+  code_space_ = new OldSpace(
+      this, max_old_generation_size_, CODE_SPACE, VirtualMemory::EXECUTABLE);
   if (code_space_ == NULL) return false;
   if (!code_space_->SetUp()) return false;
 
@@ -7999,8 +7999,9 @@ void Heap::FreeQueuedChunks() {
       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
       while (inner <= inner_last) {
         // Size of a large chunk is always a multiple of
-        // OS::AllocateAlignment() so there is always
-        // enough space for a fake MemoryChunk header.
+        // VirtualMemory::GetAllocationGranularity() so
+        // there is always enough space for a fake
+        // MemoryChunk header.
         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
         // Guard against overflow.
         if (area_end < inner->address()) area_end = chunk_end;
index 84a4d23..0e4fe8c 100644 (file)
@@ -60,9 +60,8 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) {
     // Fallback to library function if function cannot be created.
     switch (type) {
@@ -97,7 +96,9 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -106,7 +107,8 @@ UnaryMathFunction CreateExpFunction() {
   if (!CpuFeatures::IsSupported(SSE2)) return &exp;
   if (!FLAG_fast_math) return &exp;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) return &exp;
   ExternalReference::InitializeMathExpData();
 
@@ -135,7 +137,9 @@ UnaryMathFunction CreateExpFunction() {
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -143,9 +147,8 @@ UnaryMathFunction CreateExpFunction() {
 UnaryMathFunction CreateSqrtFunction() {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   // If SSE2 is not available, we can use libc's implementation to ensure
   // consistency since code by fullcodegen's calls into runtime in that case.
   if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
@@ -168,7 +171,9 @@ UnaryMathFunction CreateSqrtFunction() {
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -262,7 +267,8 @@ class LabelConverter {
 OS::MemMoveFunction CreateMemMoveFunction() {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) return NULL;
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
   LabelConverter conv(buffer);
@@ -639,7 +645,9 @@ OS::MemMoveFunction CreateMemMoveFunction() {
   masm.GetCode(&desc);
   ASSERT(!RelocInfo::RequiresRelocation(desc));
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
   // TODO(jkummerow): It would be nice to register this code creation event
   // with the PROFILE / GDBJIT system.
   return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
index df0f14a..4d46b66 100644 (file)
@@ -558,7 +558,7 @@ void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
     bool success = marking_deque_memory_->Commit(
         reinterpret_cast<Address>(marking_deque_memory_->address()),
         marking_deque_memory_->size(),
-        false);  // Not executable.
+        VirtualMemory::NOT_EXECUTABLE);
     CHECK(success);
     marking_deque_memory_committed_ = true;
   }
index 5c847fc..e9148ce 100644 (file)
@@ -64,7 +64,8 @@ double fast_exp_simulator(double x) {
 UnaryMathFunction CreateExpFunction() {
   if (!FLAG_fast_math) return &exp;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) return &exp;
   ExternalReference::InitializeMathExpData();
 
@@ -102,7 +103,9 @@ UnaryMathFunction CreateExpFunction() {
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
 
 #if !defined(USE_SIMULATOR)
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
index 9618d7e..bad1ccb 100644 (file)
@@ -73,21 +73,6 @@ double OS::LocalTimeOffset() {
 }
 
 
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  if (mbase == MAP_FAILED) {
-    LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
-    return NULL;
-  }
-  *allocated = msize;
-  return mbase;
-}
-
-
 void OS::DumpBacktrace() {
   // Currently unsupported.
 }
@@ -224,7 +209,8 @@ static void* GetRandomAddr() {
   // CpuFeatures::Probe. We don't care about randomization in this case because
   // the code page is immediately freed.
   if (isolate != NULL) {
-    // The address range used to randomize RWX allocations in OS::Allocate
+    // The address range used to randomize RWX allocations in
+    // VirtualMemory::AllocateRegion().
     // Try not to map pages into the default range that windows loads DLLs
     // Use a multiple of 64k to prevent committing unused memory.
     // Note: This does not guarantee RWX regions will be within the
@@ -245,126 +231,4 @@ static void* GetRandomAddr() {
   return NULL;
 }
 
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
-  LPVOID base = NULL;
-
-  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
-    // For exectutable pages try and randomize the allocation address
-    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
-      base = VirtualAlloc(GetRandomAddr(), size, action, protection);
-    }
-  }
-
-  // After three attempts give up and let the OS find an address to use.
-  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
-  return base;
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* address = ReserveRegion(request_size);
-  if (address == NULL) return;
-  Address base = RoundUp(static_cast<Address>(address), alignment);
-  // Try reducing the size by freeing and then reallocating a specific area.
-  bool result = ReleaseRegion(address, request_size);
-  USE(result);
-  ASSERT(result);
-  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
-  if (address != NULL) {
-    request_size = size;
-    ASSERT(base == static_cast<Address>(address));
-  } else {
-    // Resizing failed, just go with a bigger area.
-    address = ReserveRegion(request_size);
-    if (address == NULL) return;
-  }
-  address_ = address;
-  size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address_, size_);
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  ASSERT(IsReserved());
-  return UncommitRegion(address, size);
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  if (NULL == VirtualAlloc(address,
-                           OS::CommitPageSize(),
-                           MEM_COMMIT,
-                           PAGE_NOACCESS)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
 } }  // namespace v8::internal
index 34ed709..202402b 100644 (file)
@@ -81,22 +81,6 @@ double OS::LocalTimeOffset() {
 }
 
 
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool executable) {
-  const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
-  if (mbase == MAP_FAILED) {
-    LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
-    return NULL;
-  }
-  *allocated = msize;
-  return mbase;
-}
-
-
 void OS::DumpBacktrace() {
   POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
 }
@@ -203,141 +187,4 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
   return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
 }
 
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
 } }  // namespace v8::internal
index 39bf1cb..bad41ad 100644 (file)
@@ -137,23 +137,6 @@ double OS::LocalTimeOffset() {
 }
 
 
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, AllocateAlignment());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = OS::GetRandomMmapAddr();
-  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  if (mbase == MAP_FAILED) {
-    LOG(i::Isolate::Current(),
-        StringEvent("OS::Allocate", "mmap failed"));
-    return NULL;
-  }
-  *allocated = msize;
-  return mbase;
-}
-
-
 void OS::DumpBacktrace() {
   // backtrace is a glibc extension.
 #if defined(__GLIBC__) && !defined(__UCLIBC__)
@@ -184,12 +167,16 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
   int size = ftell(file);
 
   void* memory =
-      mmap(OS::GetRandomMmapAddr(),
+      mmap(NULL,
            size,
            PROT_READ | PROT_WRITE,
            MAP_SHARED,
            fileno(file),
            0);
+  if (memory == MAP_FAILED) {
+    fclose(file);
+    return NULL;
+  }
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
@@ -204,18 +191,24 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
     return NULL;
   }
   void* memory =
-      mmap(OS::GetRandomMmapAddr(),
+      mmap(NULL,
            size,
            PROT_READ | PROT_WRITE,
            MAP_SHARED,
            fileno(file),
            0);
+  if (memory == MAP_FAILED) {
+    fclose(file);
+    return NULL;
+  }
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
+  int result = munmap(memory_, size_);
+  ASSERT_EQ(0, result);
+  USE(result);
   fclose(file_);
 }
 
@@ -296,7 +289,7 @@ void OS::SignalCodeMovingGC() {
     OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
     OS::Abort();
   }
-  void* addr = mmap(OS::GetRandomMmapAddr(),
+  void* addr = mmap(NULL,
                     size,
 #if defined(__native_client__)
                     // The Native Client port of V8 uses an interpreter,
@@ -309,7 +302,9 @@ void OS::SignalCodeMovingGC() {
                     fileno(f),
                     0);
   ASSERT(addr != MAP_FAILED);
-  OS::Free(addr, size);
+  int result = munmap(addr, size);
+  ASSERT_EQ(0, result);
+  USE(result);
   fclose(f);
 }
 
@@ -323,147 +318,4 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
 #endif
 }
 
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-#if defined(__native_client__)
-  // The Native Client port of V8 uses an interpreter,
-  // so code pages don't need PROT_EXEC.
-  int prot = PROT_READ | PROT_WRITE;
-#else
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-#endif
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  return true;
-}
-
 } }  // namespace v8::internal
index f618737..d7c30a0 100644 (file)
@@ -79,34 +79,6 @@ namespace v8 {
 namespace internal {
 
 
-// Constants used for mmap.
-// kMmapFd is used to pass vm_alloc flags to tag the region with the user
-// defined tag 255 This helps identify V8-allocated regions in memory analysis
-// tools like vmmap(1).
-static const int kMmapFd = VM_MAKE_TAG(255);
-static const off_t kMmapFdOffset = 0;
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(OS::GetRandomMmapAddr(),
-                     msize,
-                     prot,
-                     MAP_PRIVATE | MAP_ANON,
-                     kMmapFd,
-                     kMmapFdOffset);
-  if (mbase == MAP_FAILED) {
-    LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
-    return NULL;
-  }
-  *allocated = msize;
-  return mbase;
-}
-
-
 void OS::DumpBacktrace() {
   // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
   if (backtrace == NULL) return;
@@ -137,7 +109,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
   int size = ftell(file);
 
   void* memory =
-      mmap(OS::GetRandomMmapAddr(),
+      mmap(NULL,
            size,
            PROT_READ | PROT_WRITE,
            MAP_SHARED,
@@ -157,7 +129,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
     return NULL;
   }
   void* memory =
-      mmap(OS::GetRandomMmapAddr(),
+      mmap(NULL,
           size,
           PROT_READ | PROT_WRITE,
           MAP_SHARED,
@@ -168,7 +140,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
+  if (memory_) munmap(memory_, size_);
   fclose(file_);
 }
 
@@ -227,137 +199,4 @@ int OS::StackWalk(Vector<StackFrame> frames) {
   return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
 }
 
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* address,
-                                 size_t size,
-                                 bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
-  return mmap(address,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
-  return munmap(address, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  return false;
-}
-
 } }  // namespace v8::internal
index bb20df3..198eedf 100644 (file)
@@ -79,23 +79,6 @@ double OS::LocalTimeOffset() {
 }
 
 
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, AllocateAlignment());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = OS::GetRandomMmapAddr();
-  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-  if (mbase == MAP_FAILED) {
-    LOG(i::Isolate::Current(),
-        StringEvent("OS::Allocate", "mmap failed"));
-    return NULL;
-  }
-  *allocated = msize;
-  return mbase;
-}
-
-
 void OS::DumpBacktrace() {
   // Currently unsupported.
 }
@@ -260,141 +243,4 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
   return frames_count;
 }
 
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
 } }  // namespace v8::internal
index fe27eaf..fce34ba 100644 (file)
@@ -91,17 +91,6 @@ uint64_t OS::CpuFeaturesImpliedByPlatform() {
 }
 
 
-// Maximum size of the virtual memory.  0 means there is no artificial
-// limit.
-
-intptr_t OS::MaxVirtualMemory() {
-  struct rlimit limit;
-  int result = getrlimit(RLIMIT_DATA, &limit);
-  if (result != 0) return 0;
-  return limit.rlim_cur;
-}
-
-
 int OS::ActivationFrameAlignment() {
 #if V8_TARGET_ARCH_ARM
   // On EABI ARM targets this is required for fp correctness in the
@@ -120,97 +109,6 @@ int OS::ActivationFrameAlignment() {
 }
 
 
-intptr_t OS::CommitPageSize() {
-  static intptr_t page_size = getpagesize();
-  return page_size;
-}
-
-
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-// Get rid of writable permission on code allocations.
-void OS::ProtectCode(void* address, const size_t size) {
-#if defined(__CYGWIN__)
-  DWORD old_protect;
-  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-#elif defined(__native_client__)
-  // The Native Client port of V8 uses an interpreter, so
-  // code pages don't need PROT_EXEC.
-  mprotect(address, size, PROT_READ);
-#else
-  mprotect(address, size, PROT_READ | PROT_EXEC);
-#endif
-}
-
-
-// Create guard pages.
-void OS::Guard(void* address, const size_t size) {
-#if defined(__CYGWIN__)
-  DWORD oldprotect;
-  VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-#else
-  mprotect(address, size, PROT_NONE);
-#endif
-}
-
-
-void* OS::GetRandomMmapAddr() {
-#if defined(__native_client__)
-  // TODO(bradchen): restore randomization once Native Client gets
-  // smarter about using mmap address hints.
-  // See http://code.google.com/p/nativeclient/issues/3341
-  return NULL;
-#endif
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-    uintptr_t raw_addr;
-    isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr));
-#if V8_TARGET_ARCH_X64
-    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
-    // the hint address to 46 bits to give the kernel a fighting chance of
-    // fulfilling our placement request.
-    raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
-    raw_addr &= 0x3ffff000;
-
-# ifdef __sun
-    // For our Solaris/illumos mmap hint, we pick a random address in the bottom
-    // half of the top half of the address space (that is, the third quarter).
-    // Because we do not MAP_FIXED, this will be treated only as a hint -- the
-    // system will not fail to mmap() because something else happens to already
-    // be mapped at our random address. We deliberately set the hint high enough
-    // to get well above the system's break (that is, the heap); Solaris and
-    // illumos will try the hint and if that fails allocate as if there were
-    // no hint at all. The high hint prevents the break from getting hemmed in
-    // at low values, ceding half of the address space to the system heap.
-    raw_addr += 0x80000000;
-# else
-    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
-    // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
-    // 10.6 and 10.7.
-    raw_addr += 0x20000000;
-# endif
-#endif
-    return reinterpret_cast<void*>(raw_addr);
-  }
-  return NULL;
-}
-
-
-size_t OS::AllocateAlignment() {
-  return getpagesize();
-}
-
-
 void OS::Sleep(int milliseconds) {
   useconds_t ms = static_cast<useconds_t>(milliseconds);
   usleep(1000 * ms);
index 512d995..f303601 100644 (file)
@@ -96,22 +96,6 @@ double OS::LocalTimeOffset() {
 }
 
 
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
-  if (mbase == MAP_FAILED) {
-    LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
-    return NULL;
-  }
-  *allocated = msize;
-  return mbase;
-}
-
-
 void OS::DumpBacktrace() {
   // Currently unsupported.
 }
@@ -224,141 +208,4 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
   return walker.index;
 }
 
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
 } }  // namespace v8::internal
index 2e82056..106c73a 100644 (file)
@@ -69,11 +69,6 @@ int strncasecmp(const char* s1, const char* s2, int n) {
 #define _TRUNCATE 0
 #define STRUNCATE 80
 
-inline void MemoryBarrier() {
-  int barrier = 0;
-  __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
-}
-
 #endif  // __MINGW64_VERSION_MAJOR
 
 
@@ -128,11 +123,6 @@ int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
 namespace v8 {
 namespace internal {
 
-intptr_t OS::MaxVirtualMemory() {
-  return 0;
-}
-
-
 double ceiling(double x) {
   return ceil(x);
 }
@@ -743,127 +733,6 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
 #undef STRUNCATE
 
 
-// Get the system's page size used by VirtualAlloc() or the next power
-// of two. The reason for always returning a power of two is that the
-// rounding up in OS::Allocate expects that.
-static size_t GetPageSize() {
-  static size_t page_size = 0;
-  if (page_size == 0) {
-    SYSTEM_INFO info;
-    GetSystemInfo(&info);
-    page_size = RoundUpToPowerOf2(info.dwPageSize);
-  }
-  return page_size;
-}
-
-
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
-size_t OS::AllocateAlignment() {
-  static size_t allocate_alignment = 0;
-  if (allocate_alignment == 0) {
-    SYSTEM_INFO info;
-    GetSystemInfo(&info);
-    allocate_alignment = info.dwAllocationGranularity;
-  }
-  return allocate_alignment;
-}
-
-
-void* OS::GetRandomMmapAddr() {
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-    // The address range used to randomize RWX allocations in OS::Allocate
-    // Try not to map pages into the default range that windows loads DLLs
-    // Use a multiple of 64k to prevent committing unused memory.
-    // Note: This does not guarantee RWX regions will be within the
-    // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
-    static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
-    static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
-    static const intptr_t kAllocationRandomAddressMin = 0x04000000;
-    static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
-    uintptr_t address =
-        (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
-        kAllocationRandomAddressMin;
-    address &= kAllocationRandomAddressMax;
-    return reinterpret_cast<void *>(address);
-  }
-  return NULL;
-}
-
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
-  LPVOID base = NULL;
-
-  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
-    // For exectutable pages try and randomize the allocation address
-    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
-      base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
-    }
-  }
-
-  // After three attempts give up and let the OS find an address to use.
-  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
-  return base;
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  // VirtualAlloc rounds allocated size to page size automatically.
-  size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-
-  // Windows XP SP2 allows Data Excution Prevention (DEP).
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-
-  LPVOID mbase = RandomizedVirtualAlloc(msize,
-                                        MEM_COMMIT | MEM_RESERVE,
-                                        prot);
-
-  if (mbase == NULL) {
-    LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed"));
-    return NULL;
-  }
-
-  ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
-
-  *allocated = msize;
-  return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): VirtualFree has a return value which is ignored here.
-  VirtualFree(address, 0, MEM_RELEASE);
-  USE(size);
-}
-
-
-intptr_t OS::CommitPageSize() {
-  return 4096;
-}
-
-
-void OS::ProtectCode(void* address, const size_t size) {
-  DWORD old_protect;
-  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-void OS::Guard(void* address, const size_t size) {
-  DWORD oldprotect;
-  VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-}
-
-
 void OS::Sleep(int milliseconds) {
   ::Sleep(milliseconds);
 }
@@ -1368,111 +1237,6 @@ int OS::ActivationFrameAlignment() {
 }
 
 
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* address = ReserveRegion(request_size);
-  if (address == NULL) return;
-  Address base = RoundUp(static_cast<Address>(address), alignment);
-  // Try reducing the size by freeing and then reallocating a specific area.
-  bool result = ReleaseRegion(address, request_size);
-  USE(result);
-  ASSERT(result);
-  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
-  if (address != NULL) {
-    request_size = size;
-    ASSERT(base == static_cast<Address>(address));
-  } else {
-    // Resizing failed, just go with a bigger area.
-    address = ReserveRegion(request_size);
-    if (address == NULL) return;
-  }
-  address_ = address;
-  size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  ASSERT(IsReserved());
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  if (NULL == VirtualAlloc(address,
-                           OS::CommitPageSize(),
-                           MEM_COMMIT,
-                           PAGE_NOACCESS)) {
-    return false;
-  }
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
-
 // ----------------------------------------------------------------------------
 // Win32 thread support.
 
index 428ba55..46bc2de 100644 (file)
@@ -219,30 +219,6 @@ class OS {
   static void PrintError(const char* format, ...);
   static void VPrintError(const char* format, va_list args);
 
-  // Allocate/Free memory used by JS heap. Pages are readable/writable, but
-  // they are not guaranteed to be executable unless 'executable' is true.
-  // Returns the address of allocated memory, or NULL if failed.
-  static void* Allocate(const size_t requested,
-                        size_t* allocated,
-                        bool is_executable);
-  static void Free(void* address, const size_t size);
-
-  // This is the granularity at which the ProtectCode(...) call can set page
-  // permissions.
-  static intptr_t CommitPageSize();
-
-  // Mark code segments non-writable.
-  static void ProtectCode(void* address, const size_t size);
-
-  // Assign memory as a guard page so that access will cause an exception.
-  static void Guard(void* address, const size_t size);
-
-  // Generate a random address to be used for hinting mmap().
-  static void* GetRandomMmapAddr();
-
-  // Get the Alignment guaranteed by Allocate().
-  static size_t AllocateAlignment();
-
   // Sleep for a number of milliseconds.
   static void Sleep(const int milliseconds);
 
@@ -303,10 +279,6 @@ class OS {
   // positions indicated by the members of the CpuFeature enum from globals.h
   static uint64_t CpuFeaturesImpliedByPlatform();
 
-  // Maximum size of the virtual memory.  0 means there is no artificial
-  // limit.
-  static intptr_t MaxVirtualMemory();
-
   // Returns the double constant NAN
   static double nan_value();
 
@@ -386,99 +358,6 @@ class OS {
   DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
 };
 
-// Represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by assignment or copy-contructing. This removes the reserved memory
-// from the original object.
-class VirtualMemory {
- public:
-  // Empty VirtualMemory object, controlling no reserved memory.
-  VirtualMemory();
-
-  // Reserves virtual memory with size.
-  explicit VirtualMemory(size_t size);
-
-  // Reserves virtual memory containing an area of the given size that
-  // is aligned per alignment. This may not be at the position returned
-  // by address().
-  VirtualMemory(size_t size, size_t alignment);
-
-  // Releases the reserved memory, if any, controlled by this VirtualMemory
-  // object.
-  ~VirtualMemory();
-
-  // Returns whether the memory has been reserved.
-  bool IsReserved();
-
-  // Initialize or resets an embedded VirtualMemory object.
-  void Reset();
-
-  // Returns the start address of the reserved memory.
-  // If the memory was reserved with an alignment, this address is not
-  // necessarily aligned. The user might need to round it up to a multiple of
-  // the alignment to get the start of the aligned block.
-  void* address() {
-    ASSERT(IsReserved());
-    return address_;
-  }
-
-  // Returns the size of the reserved memory. The returned value is only
-  // meaningful when IsReserved() returns true.
-  // If the memory was reserved with an alignment, this size may be larger
-  // than the requested size.
-  size_t size() { return size_; }
-
-  // Commits real memory. Returns whether the operation succeeded.
-  bool Commit(void* address, size_t size, bool is_executable);
-
-  // Uncommit real memory.  Returns whether the operation succeeded.
-  bool Uncommit(void* address, size_t size);
-
-  // Creates a single guard page at the given address.
-  bool Guard(void* address);
-
-  void Release() {
-    ASSERT(IsReserved());
-    // Notice: Order is important here. The VirtualMemory object might live
-    // inside the allocated region.
-    void* address = address_;
-    size_t size = size_;
-    Reset();
-    bool result = ReleaseRegion(address, size);
-    USE(result);
-    ASSERT(result);
-  }
-
-  // Assign control of the reserved region to a different VirtualMemory object.
-  // The old object is no longer functional (IsReserved() returns false).
-  void TakeControl(VirtualMemory* from) {
-    ASSERT(!IsReserved());
-    address_ = from->address_;
-    size_ = from->size_;
-    from->Reset();
-  }
-
-  static void* ReserveRegion(size_t size);
-
-  static bool CommitRegion(void* base, size_t size, bool is_executable);
-
-  static bool UncommitRegion(void* base, size_t size);
-
-  // Must be called with a base pointer that has been returned by ReserveRegion
-  // and the same size it was reserved with.
-  static bool ReleaseRegion(void* base, size_t size);
-
-  // Returns true if OS performs lazy commits, i.e. the memory allocation call
-  // defers actual physical memory allocation till the first memory access.
-  // Otherwise returns false.
-  static bool HasLazyCommits();
-
- private:
-  void* address_;  // Start address of the virtual memory.
-  size_t size_;  // Size of the virtual memory.
-};
-
-
 // ----------------------------------------------------------------------------
 // Thread
 //
diff --git a/src/platform/virtual-memory.cc b/src/platform/virtual-memory.cc
new file mode 100644 (file)
index 0000000..627b31e
--- /dev/null
@@ -0,0 +1,510 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/virtual-memory.h"
+
+#if V8_OS_POSIX
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include <unistd.h>
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/vm_statistics.h>
+#endif
+
+#include <cerrno>
+
+#include "platform/mutex.h"
+#include "utils.h"
+#include "utils/random-number-generator.h"
+#if V8_OS_CYGIN || V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+class RandomAddressGenerator V8_FINAL {
+  public:
+  V8_INLINE(uintptr_t NextAddress()) {
+    LockGuard<Mutex> lock_guard(&mutex_);
+    uintptr_t address = rng_.NextInt();
+#if V8_HOST_ARCH_64_BIT
+    address = (address << 32) + static_cast<uintptr_t>(rng_.NextInt());
+#endif
+    return address;
+  }
+
+  private:
+  Mutex mutex_;
+  RandomNumberGenerator rng_;
+};
+
+typedef LazyInstance<RandomAddressGenerator,
+                     DefaultConstructTrait<RandomAddressGenerator>,
+                     ThreadSafeInitOnceTrait>::type LazyRandomAddressGenerator;
+
+#define LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER LAZY_INSTANCE_INITIALIZER
+
+
+static V8_INLINE(void* GenerateRandomAddress()) {
+#if V8_OS_NACL
+  // TODO(bradchen): Restore randomization once Native Client gets smarter
+  // about using mmap address hints.
+  // See http://code.google.com/p/nativeclient/issues/3341
+  return NULL;
+#else  // V8_OS_NACL
+  LazyRandomAddressGenerator random_address_generator =
+      LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER;
+  uintptr_t address = random_address_generator.Pointer()->NextAddress();
+
+# if V8_TARGET_ARCH_X64
+#  if V8_OS_CYGWIN || V8_OS_WIN
+    // Try not to map pages into the default range that windows loads DLLs.
+    // Use a multiple of 64KiB to prevent committing unused memory.
+    address += V8_UINT64_C(0x00080000000);
+    address &= V8_UINT64_C(0x3ffffff0000);
+#  else  // V8_OS_CYGWIN || V8_OS_WIN
+    // Currently available CPUs have 48 bits of virtual addressing. Truncate
+    // the hint address to 46 bits to give the kernel a fighting chance of
+    // fulfilling our placement request.
+    address &= V8_UINT64_C(0x3ffffffff000);
+#  endif  // V8_OS_CYGWIN || V8_OS_WIN
+# else  // V8_TARGET_ARCH_X64
+#  if V8_OS_CYGWIN || V8_OS_WIN
+    // Try not to map pages into the default range that windows loads DLLs.
+    // Use a multiple of 64KiB to prevent committing unused memory.
+    address += 0x04000000;
+    address &= 0x3fff0000;
+#  elif V8_OS_SOLARIS
+    // For our Solaris/illumos mmap hint, we pick a random address in the bottom
+    // half of the top half of the address space (that is, the third quarter).
+    // Because we do not MAP_FIXED, this will be treated only as a hint -- the
+    // system will not fail to mmap() because something else happens to already
+    // be mapped at our random address. We deliberately set the hint high enough
+    // to get well above the system's break (that is, the heap); Solaris and
+    // illumos will try the hint and if that fails allocate as if there were
+    // no hint at all. The high hint prevents the break from getting hemmed in
+    // at low values, ceding half of the address space to the system heap.
+    address &= 0x3ffff000;
+    address += 0x80000000;
+#  else  // V8_OS_CYGWIN || V8_OS_WIN
+    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+    // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on Mac OS X
+    // 10.6 and 10.7.
+    address &= 0x3ffff000;
+    address += 0x20000000;
+#  endif  // V8_OS_CYGIN || V8_OS_WIN
+# endif  // V8_TARGET_ARCH_X64
+    return reinterpret_cast<void*>(address);
+#endif  // V8_OS_NACL
+}
+
+
+// static
+void* VirtualMemory::AllocateRegion(size_t size,
+                                    size_t* size_return,
+                                    Executability executability) {
+  ASSERT_LT(0, size);
+  ASSERT_NE(NULL, size_return);
+  void* address = ReserveRegion(size, &size);
+  if (address == NULL) return NULL;
+  if (!CommitRegion(address, size, executability)) {
+    bool result = ReleaseRegion(address, size);
+    ASSERT(result);
+    USE(result);
+    return NULL;
+  }
+  *size_return = size;
+  return address;
+}
+
+#if V8_OS_CYGWIN || V8_OS_WIN
+
+// static
+void* VirtualMemory::ReserveRegion(size_t size, size_t* size_return) {
+  ASSERT_LT(0, size);
+  ASSERT_NE(NULL, size_return);
+  // The minimum size that can be reserved is 64KiB, see
+  // http://msdn.microsoft.com/en-us/library/ms810627.aspx
+  if (size < 64 * KB) {
+    size = 64 * KB;
+  }
+  size = RoundUp(size, GetAllocationGranularity());
+  LPVOID address = NULL;
+  // Try and randomize the allocation address (up to three attempts).
+  for (unsigned attempts = 0; address == NULL && attempts < 3; ++attempts) {
+    address = VirtualAlloc(GenerateRandomAddress(),
+                           size,
+                           MEM_RESERVE,
+                           PAGE_NOACCESS);
+  }
+  if (address == NULL) {
+    // After three attempts give up and let the kernel find an address.
+    address = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
+  }
+  if (address == NULL) {
+    return NULL;
+  }
+  ASSERT(IsAligned(reinterpret_cast<uintptr_t>(address),
+                   GetAllocationGranularity()));
+  *size_return = size;
+  return address;
+}
+
+
+// static
+void* VirtualMemory::ReserveRegion(size_t size,
+                                   size_t* size_return,
+                                   size_t alignment) {
+  ASSERT_LT(0, size);
+  ASSERT_NE(NULL, size_return);
+  ASSERT(IsAligned(alignment, GetAllocationGranularity()));
+
+  size_t reserved_size;
+  Address reserved_base = static_cast<Address>(
+      ReserveRegion(size + alignment, &reserved_size));
+  if (reserved_base == NULL) {
+    return NULL;
+  }
+  ASSERT_LE(size, reserved_size);
+  ASSERT(IsAligned(reserved_size, GetPageSize()));
+
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(reserved_base, reserved_size);
+  USE(result);
+  ASSERT(result);
+  size_t aligned_size = RoundUp(size, GetPageSize());
+  Address aligned_base = static_cast<Address>(
+      VirtualAlloc(RoundUp(reserved_base, alignment),
+                   aligned_size,
+                   MEM_RESERVE,
+                   PAGE_NOACCESS));
+  if (aligned_base != NULL) {
+    ASSERT(aligned_base == RoundUp(reserved_base, alignment));
+    ASSERT(IsAligned(reinterpret_cast<uintptr_t>(aligned_base),
+                     GetAllocationGranularity()));
+    ASSERT(IsAligned(aligned_size, GetPageSize()));
+    *size_return = aligned_size;
+    return aligned_base;
+  }
+
+  // Resizing failed, just go with a bigger area.
+  return ReserveRegion(reserved_size, size_return);
+}
+
+
+// static
+bool VirtualMemory::CommitRegion(void* address,
+                                 size_t size,
+                                 Executability executability) {
+  ASSERT_NE(NULL, address);
+  ASSERT_LT(0, size);
+  DWORD protect = 0;
+  switch (executability) {
+    case NOT_EXECUTABLE:
+      protect = PAGE_READWRITE;
+      break;
+
+    case EXECUTABLE:
+      protect = PAGE_EXECUTE_READWRITE;
+      break;
+  }
+  LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, protect);
+  if (result == NULL) {
+    ASSERT(GetLastError() != ERROR_INVALID_ADDRESS);
+    return false;
+  }
+  ASSERT_EQ(address, result);
+  return true;
+}
+
+
+// static
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+  ASSERT_NE(NULL, address);
+  ASSERT_LT(0, size);
+  int result = VirtualFree(address, size, MEM_DECOMMIT);
+  if (result == 0) {
+    return false;
+  }
+  return true;
+}
+
+
+// static
+bool VirtualMemory::WriteProtectRegion(void* address, size_t size) {
+  ASSERT_NE(NULL, address);
+  ASSERT_LT(0, size);
+  DWORD old_protect;
+  return VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+}
+
+
+// static
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+  ASSERT_NE(NULL, address);
+  ASSERT_LT(0, size);
+  USE(size);
+  int result = VirtualFree(address, 0, MEM_RELEASE);
+  if (result == 0) {
+    return false;
+  }
+  return true;
+}
+
+
+// static
+size_t VirtualMemory::GetAllocationGranularity() {
+  static size_t allocation_granularity = 0;
+  if (allocation_granularity == 0) {
+    SYSTEM_INFO system_info;
+    GetSystemInfo(&system_info);
+    allocation_granularity = system_info.dwAllocationGranularity;
+    MemoryBarrier();
+  }
+  return allocation_granularity;
+}
+
+
+// static
+size_t VirtualMemory::GetLimit() {
+  return 0;
+}
+
+
+// static
+size_t VirtualMemory::GetPageSize() {
+  static size_t page_size = 0;
+  if (page_size == 0) {
+    SYSTEM_INFO system_info;
+    GetSystemInfo(&system_info);
+    page_size = system_info.dwPageSize;
+    MemoryBarrier();
+  }
+  return page_size;
+}
+
+
+#else  // V8_OS_CYGIN || V8_OS_WIN
+
+
+// Constants used for mmap.
+#if V8_OS_MACOSX
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+#else
+static const int kMmapFd = -1;
+#endif  // V8_OS_MACOSX
+static const off_t kMmapFdOffset = 0;
+
+
+// static
+void* VirtualMemory::ReserveRegion(size_t size, size_t* size_return) {
+  ASSERT_LT(0, size);
+  ASSERT_NE(NULL, size_return);
+
+  size = RoundUp(size, GetPageSize());
+  void* address = mmap(GenerateRandomAddress(),
+                       size,
+                       PROT_NONE,
+                       MAP_ANON | MAP_NORESERVE | MAP_PRIVATE,
+                       kMmapFd,
+                       kMmapFdOffset);
+  if (address == MAP_FAILED) {
+    ASSERT_NE(EINVAL, errno);
+    return NULL;
+  }
+  *size_return = size;
+  return address;
+}
+
+
+// static
+void* VirtualMemory::ReserveRegion(size_t size,
+                                   size_t* size_return,
+                                   size_t alignment) {
+  ASSERT_LT(0, size);
+  ASSERT_NE(NULL, size_return);
+  ASSERT(IsAligned(alignment, GetPageSize()));
+
+  size_t reserved_size;
+  Address reserved_base = static_cast<Address>(
+      ReserveRegion(size + alignment, &reserved_size));
+  if (reserved_base == NULL) {
+    return NULL;
+  }
+
+  Address aligned_base = RoundUp(reserved_base, alignment);
+  ASSERT_LE(reserved_base, aligned_base);
+
+  // Unmap extra memory reserved before the aligned region.
+  if (aligned_base != reserved_base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - reserved_base);
+    bool result = ReleaseRegion(reserved_base, prefix_size);
+    ASSERT(result);
+    USE(result);
+    reserved_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, GetPageSize());
+  ASSERT_LE(aligned_size, reserved_size);
+
+  // Unmap extra memory reserved after the aligned region.
+  if (aligned_size != reserved_size) {
+    size_t suffix_size = reserved_size - aligned_size;
+    bool result = ReleaseRegion(aligned_base + aligned_size, suffix_size);
+    ASSERT(result);
+    USE(result);
+    reserved_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == reserved_size);
+  ASSERT_NE(NULL, aligned_base);
+
+  *size_return = aligned_size;
+  return aligned_base;
+}
+
+
+// static
+bool VirtualMemory::CommitRegion(void* address,
+                                 size_t size,
+                                 Executability executability) {
+  ASSERT_NE(NULL, address);
+  ASSERT_LT(0, size);
+  int prot = 0;
+  // The Native Client port of V8 uses an interpreter,
+  // so code pages don't need PROT_EXEC.
+#if V8_OS_NACL
+  executability = NOT_EXECUTABLE;
+#endif
+  switch (executability) {
+    case NOT_EXECUTABLE:
+      prot = PROT_READ | PROT_WRITE;
+      break;
+
+    case EXECUTABLE:
+      prot = PROT_EXEC | PROT_READ | PROT_WRITE;
+      break;
+  }
+  void* result = mmap(address,
+                      size,
+                      prot,
+                      MAP_ANON | MAP_FIXED | MAP_PRIVATE,
+                      kMmapFd,
+                      kMmapFdOffset);
+  if (result == MAP_FAILED) {
+    ASSERT_NE(EINVAL, errno);
+    return false;
+  }
+  return true;
+}
+
+
+// static
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+  ASSERT_NE(NULL, address);
+  ASSERT_LT(0, size);
+  void* result = mmap(address,
+                      size,
+                      PROT_NONE,
+                      MAP_ANON | MAP_FIXED | MAP_NORESERVE | MAP_PRIVATE,
+                      kMmapFd,
+                      kMmapFdOffset);
+  if (result == MAP_FAILED) {
+    ASSERT_NE(EINVAL, errno);
+    return false;
+  }
+  return true;
+}
+
+
+// static
+bool VirtualMemory::WriteProtectRegion(void* address, size_t size) {
+  ASSERT_NE(NULL, address);
+  ASSERT_LT(0, size);
+#if V8_OS_NACL
+  // The Native Client port of V8 uses an interpreter,
+  // so code pages don't need PROT_EXEC.
+  int prot = PROT_READ;
+#else
+  int prot = PROT_EXEC | PROT_READ;
+#endif
+  int result = mprotect(address, size, prot);
+  if (result < 0) {
+    ASSERT_NE(EINVAL, errno);
+    return false;
+  }
+  return true;
+}
+
+
+// static
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+  ASSERT_NE(NULL, address);
+  ASSERT_LT(0, size);
+  int result = munmap(address, size);
+  if (result < 0) {
+    ASSERT_NE(EINVAL, errno);
+    return false;
+  }
+  return true;
+}
+
+
+// static
+size_t VirtualMemory::GetAllocationGranularity() {
+  return GetPageSize();
+}
+
+
+// static
+size_t VirtualMemory::GetLimit() {
+  struct rlimit rlim;
+  int result = getrlimit(RLIMIT_DATA, &rlim);
+  ASSERT_EQ(0, result);
+  USE(result);
+  return rlim.rlim_cur;
+}
+
+
+// static
+size_t VirtualMemory::GetPageSize() {
+  static const size_t kPageSize = getpagesize();
+  return kPageSize;
+}
+
+#endif  // V8_OS_CYGWIN || V8_OS_WIN
+
+} }  // namespace v8::internal
diff --git a/src/platform/virtual-memory.h b/src/platform/virtual-memory.h
new file mode 100644 (file)
index 0000000..858c862
--- /dev/null
@@ -0,0 +1,211 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_VIRTUAL_MEMORY_H_
+#define V8_PLATFORM_VIRTUAL_MEMORY_H_
+
+#include "checks.h"
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// VirtualMemory
+//
+// This class represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or contructing. This removes the reserved memory from
+// the original object.
+class VirtualMemory V8_FINAL {
+ public:
+  // The executability of a memory region.
+  enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+
+  // Empty VirtualMemory object, controlling no reserved memory.
+  VirtualMemory() : address_(NULL), size_(0) {}
+
+  // Reserves virtual memory with size.
+  explicit VirtualMemory(size_t size) : size_(0) {
+    address_ = ReserveRegion(size, &size_);
+  }
+
+  // Reserves virtual memory containing an area of the given size that
+  // is aligned per alignment. This may not be at the position returned
+  // by address().
+  VirtualMemory(size_t size, size_t alignment) : size_(0) {
+    address_ = ReserveRegion(size, &size_, alignment);
+  }
+
+  // Releases the reserved memory, if any, controlled by this VirtualMemory
+  // object.
+  ~VirtualMemory() {
+    if (IsReserved()) {
+      bool result = ReleaseRegion(address_, size_);
+      ASSERT(result);
+      USE(result);
+    }
+  }
+
+  // Returns whether the memory contains the specified address.
+  bool Contains(const void* address) const V8_WARN_UNUSED_RESULT {
+    if (!IsReserved()) return false;
+    if (address < address_) return false;
+    if (address >= reinterpret_cast<uint8_t*>(address_) + size_) return false;
+    return true;
+  }
+
+  // Returns whether the memory has been reserved.
+  bool IsReserved() const V8_WARN_UNUSED_RESULT {
+    return address_ != NULL;
+  }
+
+  // Initialize or resets an embedded VirtualMemory object.
+  void Reset() {
+    address_ = NULL;
+    size_ = 0;
+  }
+
+  // Returns the start address of the reserved memory. The returned value is
+  // only meaningful if |IsReserved()| returns true.
+  // If the memory was reserved with an alignment, this address is not
+  // necessarily aligned. The user might need to round it up to a multiple of
+  // the alignment to get the start of the aligned block.
+  void* address() const V8_WARN_UNUSED_RESULT { return address_; }
+
+  // Returns the size of the reserved memory. The returned value is only
+  // meaningful when |IsReserved()| returns true.
+  // If the memory was reserved with an alignment, this size may be larger
+  // than the requested size.
+  size_t size() const V8_WARN_UNUSED_RESULT { return size_; }
+
+  // Commits real memory. Returns whether the operation succeeded.
+  bool Commit(void* address,
+              size_t size,
+              Executability executability) V8_WARN_UNUSED_RESULT {
+    ASSERT(IsReserved());
+    ASSERT(Contains(address));
+    ASSERT(Contains(reinterpret_cast<uint8_t*>(address) + size - 1));
+    return CommitRegion(address, size, executability);
+  }
+
+  // Uncommit real memory.  Returns whether the operation succeeded.
+  bool Uncommit(void* address, size_t size) V8_WARN_UNUSED_RESULT {
+    ASSERT(IsReserved());
+    ASSERT(Contains(address));
+    ASSERT(Contains(reinterpret_cast<uint8_t*>(address) + size - 1));
+    return UncommitRegion(address, size);
+  }
+
+  // Creates guard pages at the given address.
+  bool Guard(void* address, size_t size) V8_WARN_UNUSED_RESULT {
+    // We can simply uncommit the specified pages. Any access
+    // to them will cause a processor exception.
+    return Uncommit(address, size);
+  }
+
+  void Release() {
+    ASSERT(IsReserved());
+    // WARNING: Order is important here. The VirtualMemory
+    // object might live inside the allocated region.
+    void* address = address_;
+    size_t size = size_;
+    Reset();
+    bool result = ReleaseRegion(address, size);
+    USE(result);
+    ASSERT(result);
+  }
+
+  // Assign control of the reserved region to a different VirtualMemory object.
+  // The old object is no longer functional (IsReserved() returns false).
+  void TakeControl(VirtualMemory* from) {
+    ASSERT(!IsReserved());
+    address_ = from->address_;
+    size_ = from->size_;
+    from->Reset();
+  }
+
+  // Allocates a region of memory pages. The pages are readable/writable,
+  // but are not guaranteed to be executable unless explicitly requested.
+  // Returns the base address of the allocated memory region, or NULL in
+  // case of an error.
+  static void* AllocateRegion(size_t size,
+                              size_t* size_return,
+                              Executability executability)
+      V8_WARN_UNUSED_RESULT;
+
+  static void* ReserveRegion(size_t size,
+                             size_t* size_return) V8_WARN_UNUSED_RESULT;
+
+  static void* ReserveRegion(size_t size,
+                             size_t* size_return,
+                             size_t alignment) V8_WARN_UNUSED_RESULT;
+
+  static bool CommitRegion(void* address,
+                           size_t size,
+                           Executability executability) V8_WARN_UNUSED_RESULT;
+
+  static bool UncommitRegion(void* address, size_t size) V8_WARN_UNUSED_RESULT;
+
+  // Mark code segments readable-executable.
+  static bool WriteProtectRegion(void* address,
+                                 size_t size) V8_WARN_UNUSED_RESULT;
+
+  // Must be called with a base pointer that has been returned by ReserveRegion
+  // and the same size it was reserved with.
+  static bool ReleaseRegion(void* address, size_t size) V8_WARN_UNUSED_RESULT;
+
+  // The granularity for the starting address at which virtual memory can be
+  // reserved (or allocated in terms of the underlying operating system).
+  static size_t GetAllocationGranularity() V8_PURE;
+
+  // The maximum size of the virtual memory. 0 means there is no artificial
+  // limit.
+  static size_t GetLimit() V8_PURE;
+
+  // The page size and the granularity of page protection and commitment.
+  static size_t GetPageSize() V8_PURE;
+
+  // Returns true if OS performs lazy commits, i.e. the memory allocation call
+  // defers actual physical memory allocation till the first memory access.
+  // Otherwise returns false.
+  static V8_INLINE(bool HasLazyCommits()) {
+#if V8_OS_LINUX
+    return true;
+#else
+    return false;
+#endif
+  }
+
+ private:
+  void* address_;  // Start address of the virtual memory.
+  size_t size_;  // Size of the virtual memory.
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_PLATFORM_VIRTUAL_MEMORY_H_
index be2ae2a..37002e6 100644 (file)
@@ -125,43 +125,11 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
 }
 
 
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
-  OS::Protect(start, size);
-}
-
-
-void MemoryAllocator::Unprotect(Address start,
-                                size_t size,
-                                Executability executable) {
-  OS::Unprotect(start, size, executable);
-}
-
-
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
-  int id = GetChunkId(page);
-  OS::Protect(chunks_[id].address(), chunks_[id].size());
-}
-
-
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
-  int id = GetChunkId(page);
-  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
-                chunks_[id].owner()->executable() == EXECUTABLE);
-}
-
-#endif
-
-
 // --------------------------------------------------------------------------
 // PagedSpace
 Page* Page::Initialize(Heap* heap,
                        MemoryChunk* chunk,
-                       Executability executable,
+                       VirtualMemory::Executability executability,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
   ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
index 2faf419..84cce8a 100644 (file)
@@ -245,7 +245,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
 
 
 bool CodeRange::CommitRawMemory(Address start, size_t length) {
-  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
+  return isolate_->memory_allocator()->CommitMemory(
+      start, length, VirtualMemory::EXECUTABLE);
 }
 
 
@@ -257,7 +258,9 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) {
 void CodeRange::FreeRawMemory(Address address, size_t length) {
   ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
   free_list_.Add(FreeBlock(address, length));
-  code_range_->Uncommit(address, length);
+  bool result = code_range_->Uncommit(address, length);
+  ASSERT(result);
+  USE(result);
 }
 
 
@@ -308,8 +311,8 @@ void MemoryAllocator::TearDown() {
 
 bool MemoryAllocator::CommitMemory(Address base,
                                    size_t size,
-                                   Executability executable) {
-  if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
+                                   VirtualMemory::Executability executability) {
+  if (!VirtualMemory::CommitRegion(base, size, executability)) {
     return false;
   }
   UpdateAllocatedSpaceLimits(base, base + size);
@@ -318,7 +321,7 @@ bool MemoryAllocator::CommitMemory(Address base,
 
 
 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
-                                 Executability executable) {
+                                 VirtualMemory::Executability executability) {
   // TODO(gc) make code_range part of memory allocator?
   ASSERT(reservation->IsReserved());
   size_t size = reservation->size();
@@ -327,36 +330,38 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
 
   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
 
-  if (executable == EXECUTABLE) {
+  if (executability == VirtualMemory::EXECUTABLE) {
     ASSERT(size_executable_ >= size);
     size_executable_ -= size;
   }
   // Code which is part of the code-range does not have its own VirtualMemory.
   ASSERT(!isolate_->code_range()->contains(
       static_cast<Address>(reservation->address())));
-  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+  ASSERT(executability == VirtualMemory::NOT_EXECUTABLE ||
+         !isolate_->code_range()->exists());
   reservation->Release();
 }
 
 
 void MemoryAllocator::FreeMemory(Address base,
                                  size_t size,
-                                 Executability executable) {
+                                 VirtualMemory::Executability executability) {
   // TODO(gc) make code_range part of memory allocator?
   ASSERT(size_ >= size);
   size_ -= size;
 
   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
 
-  if (executable == EXECUTABLE) {
+  if (executability == VirtualMemory::EXECUTABLE) {
     ASSERT(size_executable_ >= size);
     size_executable_ -= size;
   }
   if (isolate_->code_range()->contains(static_cast<Address>(base))) {
-    ASSERT(executable == EXECUTABLE);
+    ASSERT(executability == VirtualMemory::EXECUTABLE);
     isolate_->code_range()->FreeRawMemory(base, size);
   } else {
-    ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+    ASSERT(executability == VirtualMemory::NOT_EXECUTABLE ||
+           !isolate_->code_range()->exists());
     bool result = VirtualMemory::ReleaseRegion(base, size);
     USE(result);
     ASSERT(result);
@@ -378,17 +383,18 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
 }
 
 
-Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
-                                               size_t commit_size,
-                                               size_t alignment,
-                                               Executability executable,
-                                               VirtualMemory* controller) {
+Address MemoryAllocator::AllocateAlignedMemory(
+    size_t reserve_size,
+    size_t commit_size,
+    size_t alignment,
+    VirtualMemory::Executability executability,
+    VirtualMemory* controller) {
   ASSERT(commit_size <= reserve_size);
   VirtualMemory reservation;
   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
   if (base == NULL) return NULL;
 
-  if (executable == EXECUTABLE) {
+  if (executability == VirtualMemory::EXECUTABLE) {
     if (!CommitExecutableMemory(&reservation,
                                 base,
                                 commit_size,
@@ -396,7 +402,7 @@ Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
       base = NULL;
     }
   } else {
-    if (reservation.Commit(base, commit_size, false)) {
+    if (reservation.Commit(base, commit_size, VirtualMemory::NOT_EXECUTABLE)) {
       UpdateAllocatedSpaceLimits(base, base + commit_size);
     } else {
       base = NULL;
@@ -433,7 +439,7 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap,
                                                Page::kPageSize,
                                                area_start,
                                                area_end,
-                                               NOT_EXECUTABLE,
+                                               VirtualMemory::NOT_EXECUTABLE,
                                                semi_space);
   chunk->set_next_chunk(NULL);
   chunk->set_prev_chunk(NULL);
@@ -464,7 +470,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
                                      size_t size,
                                      Address area_start,
                                      Address area_end,
-                                     Executability executable,
+                                     VirtualMemory::Executability executability,
                                      Space* owner) {
   MemoryChunk* chunk = FromAddress(base);
 
@@ -496,7 +502,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
   ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
   ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
 
-  if (executable == EXECUTABLE) {
+  if (executability == VirtualMemory::EXECUTABLE) {
     chunk->SetFlag(IS_EXECUTABLE);
   }
 
@@ -513,9 +519,10 @@ bool MemoryChunk::CommitArea(size_t requested) {
   size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
                       MemoryAllocator::CodePageGuardSize() : 0;
   size_t header_size = area_start() - address() - guard_size;
-  size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
+  size_t commit_size = RoundUp(header_size + requested,
+                               VirtualMemory::GetPageSize());
   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
-                                  OS::CommitPageSize());
+                                  VirtualMemory::GetPageSize());
 
   if (commit_size > committed_size) {
     // Commit size should be less or equal than the reserved size.
@@ -524,10 +531,10 @@ bool MemoryChunk::CommitArea(size_t requested) {
     Address start = address() + committed_size + guard_size;
     size_t length = commit_size - committed_size;
     if (reservation_.IsReserved()) {
-      Executability executable = IsFlagSet(IS_EXECUTABLE)
-          ? EXECUTABLE : NOT_EXECUTABLE;
+      VirtualMemory::Executability executability = IsFlagSet(IS_EXECUTABLE)
+          ? VirtualMemory::EXECUTABLE : VirtualMemory::NOT_EXECUTABLE;
       if (!heap()->isolate()->memory_allocator()->CommitMemory(
-              start, length, executable)) {
+              start, length, executability)) {
         return false;
       }
     } else {
@@ -589,10 +596,11 @@ void MemoryChunk::Unlink() {
 }
 
 
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
-                                            intptr_t commit_area_size,
-                                            Executability executable,
-                                            Space* owner) {
+MemoryChunk* MemoryAllocator::AllocateChunk(
+    intptr_t reserve_area_size,
+    intptr_t commit_area_size,
+    VirtualMemory::Executability executability,
+    Space* owner) {
   ASSERT(commit_area_size <= reserve_area_size);
 
   size_t chunk_size;
@@ -632,9 +640,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
   // +----------------------------+<- base + chunk_size
   //
 
-  if (executable == EXECUTABLE) {
+  if (executability == VirtualMemory::EXECUTABLE) {
     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
-                         OS::CommitPageSize()) + CodePageGuardSize();
+                         VirtualMemory::GetPageSize()) + CodePageGuardSize();
 
     // Check executable memory limit.
     if (size_executable_ + chunk_size > capacity_executable_) {
@@ -646,7 +654,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
 
     // Size of header (not executable) plus area (executable).
     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
-                                 OS::CommitPageSize());
+                                 VirtualMemory::GetPageSize());
     // Allocate executable memory either from code range or from the
     // OS.
     if (isolate_->code_range()->exists()) {
@@ -663,7 +671,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
       base = AllocateAlignedMemory(chunk_size,
                                    commit_size,
                                    MemoryChunk::kAlignment,
-                                   executable,
+                                   executability,
                                    &reservation);
       if (base == NULL) return NULL;
       // Update executable memory size.
@@ -679,13 +687,14 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
     area_end = area_start + commit_area_size;
   } else {
     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
-                         OS::CommitPageSize());
-    size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
-                                 commit_area_size, OS::CommitPageSize());
+                         VirtualMemory::GetPageSize());
+    size_t commit_size = RoundUp(
+        MemoryChunk::kObjectStartOffset + commit_area_size,
+        VirtualMemory::GetPageSize());
     base = AllocateAlignedMemory(chunk_size,
                                  commit_size,
                                  MemoryChunk::kAlignment,
-                                 executable,
+                                 executability,
                                  &reservation);
 
     if (base == NULL) return NULL;
@@ -714,7 +723,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
                                                 chunk_size,
                                                 area_start,
                                                 area_end,
-                                                executable,
+                                                executability,
                                                 owner);
   result->set_reserved_memory(&reservation);
   return result;
@@ -730,23 +739,25 @@ void Page::ResetFreeListStatistics() {
 }
 
 
-Page* MemoryAllocator::AllocatePage(intptr_t size,
-                                    PagedSpace* owner,
-                                    Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+Page* MemoryAllocator::AllocatePage(
+    intptr_t size,
+    PagedSpace* owner,
+    VirtualMemory::Executability executability) {
+  MemoryChunk* chunk = AllocateChunk(size, size, executability, owner);
 
   if (chunk == NULL) return NULL;
 
-  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
+  return Page::Initialize(isolate_->heap(), chunk, executability, owner);
 }
 
 
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
-                                              Space* owner,
-                                              Executability executable) {
+LargePage* MemoryAllocator::AllocateLargePage(
+    intptr_t object_size,
+    Space* owner,
+    VirtualMemory::Executability executability) {
   MemoryChunk* chunk = AllocateChunk(object_size,
                                      object_size,
-                                     executable,
+                                     executability,
                                      owner);
   if (chunk == NULL) return NULL;
   return LargePage::Initialize(isolate_->heap(), chunk);
@@ -769,19 +780,19 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
 
   VirtualMemory* reservation = chunk->reserved_memory();
   if (reservation->IsReserved()) {
-    FreeMemory(reservation, chunk->executable());
+    FreeMemory(reservation, chunk->executability());
   } else {
     FreeMemory(chunk->address(),
                chunk->size(),
-               chunk->executable());
+               chunk->executability());
   }
 }
 
 
 bool MemoryAllocator::CommitBlock(Address start,
                                   size_t size,
-                                  Executability executable) {
-  if (!CommitMemory(start, size, executable)) return false;
+                                  VirtualMemory::Executability executability) {
+  if (!CommitMemory(start, size, executability)) return false;
 
   if (Heap::ShouldZapGarbage()) {
     ZapBlock(start, size);
@@ -866,12 +877,12 @@ void MemoryAllocator::ReportStatistics() {
 int MemoryAllocator::CodePageGuardStartOffset() {
   // We are guarding code pages: the first OS page after the header
   // will be protected as non-writable.
-  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
+  return RoundUp(Page::kObjectStartOffset, VirtualMemory::GetPageSize());
 }
 
 
 int MemoryAllocator::CodePageGuardSize() {
-  return static_cast<int>(OS::CommitPageSize());
+  return static_cast<int>(VirtualMemory::GetPageSize());
 }
 
 
@@ -885,7 +896,7 @@ int MemoryAllocator::CodePageAreaStartOffset() {
 int MemoryAllocator::CodePageAreaEndOffset() {
   // We are guarding code pages: the last OS page will be protected as
   // non-writable.
-  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
+  return Page::kPageSize - static_cast<int>(VirtualMemory::GetPageSize());
 }
 
 
@@ -896,24 +907,26 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
   // Commit page header (not executable).
   if (!vm->Commit(start,
                   CodePageGuardStartOffset(),
-                  false)) {
+                  VirtualMemory::NOT_EXECUTABLE)) {
     return false;
   }
 
   // Create guard page after the header.
-  if (!vm->Guard(start + CodePageGuardStartOffset())) {
+  if (!vm->Guard(start + CodePageGuardStartOffset(),
+                 VirtualMemory::GetPageSize())) {
     return false;
   }
 
   // Commit page body (executable).
   if (!vm->Commit(start + CodePageAreaStartOffset(),
                   commit_size - CodePageGuardStartOffset(),
-                  true)) {
+                  VirtualMemory::EXECUTABLE)) {
     return false;
   }
 
   // Create guard page before the end.
-  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
+  if (!vm->Guard(start + reserved_size - CodePageGuardSize(),
+                 VirtualMemory::GetPageSize())) {
     return false;
   }
 
@@ -942,8 +955,8 @@ void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
 PagedSpace::PagedSpace(Heap* heap,
                        intptr_t max_capacity,
                        AllocationSpace id,
-                       Executability executable)
-    : Space(heap, id, executable),
+                       VirtualMemory::Executability executability)
+    : Space(heap, id, executability),
       free_list_(this),
       was_swept_conservatively_(false),
       first_unswept_page_(Page::FromAddress(NULL)),
@@ -1041,7 +1054,7 @@ bool PagedSpace::Expand() {
   }
 
   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
-      size, this, executable());
+      size, this, executability());
   if (p == NULL) return false;
 
   ASSERT(Capacity() <= max_capacity_);
@@ -1288,8 +1301,8 @@ void NewSpace::TearDown() {
   LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
 
   ASSERT(reservation_.IsReserved());
-  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
-                                                    NOT_EXECUTABLE);
+  heap()->isolate()->memory_allocator()->FreeMemory(
+      &reservation_, VirtualMemory::NOT_EXECUTABLE);
   chunk_base_ = NULL;
   chunk_size_ = 0;
 }
@@ -1524,7 +1537,7 @@ bool SemiSpace::Commit() {
   Address start = end - pages * Page::kPageSize;
   if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
                                                           capacity_,
-                                                          executable())) {
+                                                          executability())) {
     return false;
   }
 
@@ -1581,9 +1594,9 @@ bool SemiSpace::GrowTo(int new_capacity) {
   Address start = end - new_capacity;
   size_t delta = new_capacity - capacity_;
 
-  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+  ASSERT(IsAligned(delta, VirtualMemory::GetAllocationGranularity()));
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      start, delta, executable())) {
+      start, delta, executability())) {
     return false;
   }
   capacity_ = new_capacity;
@@ -1616,7 +1629,7 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
     Address space_end = start_ + maximum_capacity_;
     Address old_start = space_end - capacity_;
     size_t delta = capacity_ - new_capacity;
-    ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+    ASSERT(IsAligned(delta, VirtualMemory::GetAllocationGranularity()));
 
     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
     if (!allocator->UncommitBlock(old_start, delta)) {
@@ -2924,7 +2937,8 @@ static bool ComparePointers(void* key1, void* key2) {
 LargeObjectSpace::LargeObjectSpace(Heap* heap,
                                    intptr_t max_capacity,
                                    AllocationSpace id)
-    : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
+    // Managed on a per-allocation basis
+    : Space(heap, id, VirtualMemory::NOT_EXECUTABLE),
       max_capacity_(max_capacity),
       first_page_(NULL),
       size_(0),
@@ -2958,8 +2972,8 @@ void LargeObjectSpace::TearDown() {
 }
 
 
-MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
-                                           Executability executable) {
+MaybeObject* LargeObjectSpace::AllocateRaw(
+    int object_size, VirtualMemory::Executability executability) {
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
   if (!heap()->always_allocate() &&
@@ -2972,7 +2986,7 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
   }
 
   LargePage* page = heap()->isolate()->memory_allocator()->
-      AllocateLargePage(object_size, this, executable);
+      AllocateLargePage(object_size, this, executability);
   if (page == NULL) return Failure::RetryAfterGC(identity());
   ASSERT(page->area_size() >= object_size);
 
index 1ccdacb..e0ffe0e 100644 (file)
@@ -33,6 +33,7 @@
 #include "list.h"
 #include "log.h"
 #include "platform/mutex.h"
+#include "platform/virtual-memory.h"
 #include "v8utils.h"
 
 namespace v8 {
@@ -573,8 +574,10 @@ class MemoryChunk {
     area_end_ = area_end;
   }
 
-  Executability executable() {
-    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+  VirtualMemory::Executability executability() {
+    return IsFlagSet(IS_EXECUTABLE)
+        ? VirtualMemory::EXECUTABLE
+        : VirtualMemory::NOT_EXECUTABLE;
   }
 
   bool ContainsOnlyData() {
@@ -716,7 +719,7 @@ class MemoryChunk {
                                  size_t size,
                                  Address area_start,
                                  Address area_end,
-                                 Executability executable,
+                                 VirtualMemory::Executability executability,
                                  Space* owner);
 
   friend class MemoryAllocator;
@@ -796,7 +799,7 @@ class Page : public MemoryChunk {
 
   static inline Page* Initialize(Heap* heap,
                                  MemoryChunk* chunk,
-                                 Executability executable,
+                                 VirtualMemory::Executability executable,
                                  PagedSpace* owner);
 
   void InitializeAsAnchor(PagedSpace* owner);
@@ -862,15 +865,17 @@ STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
 // Space is the abstract superclass for all allocation spaces.
 class Space : public Malloced {
  public:
-  Space(Heap* heap, AllocationSpace id, Executability executable)
-      : heap_(heap), id_(id), executable_(executable) {}
+  Space(Heap* heap,
+        AllocationSpace id,
+        VirtualMemory::Executability executability)
+      : heap_(heap), id_(id), executability_(executability) {}
 
   virtual ~Space() {}
 
   Heap* heap() const { return heap_; }
 
   // Does the space need executable memory?
-  Executability executable() { return executable_; }
+  VirtualMemory::Executability executability() { return executability_; }
 
   // Identity used in error reporting.
   AllocationSpace identity() { return id_; }
@@ -897,7 +902,7 @@ class Space : public Malloced {
  private:
   Heap* heap_;
   AllocationSpace id_;
-  Executability executable_;
+  VirtualMemory::Executability executability_;
 };
 
 
@@ -1055,11 +1060,13 @@ class MemoryAllocator {
 
   void TearDown();
 
-  Page* AllocatePage(
-      intptr_t size, PagedSpace* owner, Executability executable);
+  Page* AllocatePage(intptr_t size,
+                     PagedSpace* owner,
+                     VirtualMemory::Executability executability);
 
-  LargePage* AllocateLargePage(
-      intptr_t object_size, Space* owner, Executability executable);
+  LargePage* AllocateLargePage(intptr_t object_size,
+                               Space* owner,
+                               VirtualMemory::Executability executability);
 
   void Free(MemoryChunk* chunk);
 
@@ -1100,7 +1107,7 @@ class MemoryAllocator {
   // could be committed later by calling MemoryChunk::CommitArea.
   MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
                              intptr_t commit_area_size,
-                             Executability executable,
+                             VirtualMemory::Executability executability,
                              Space* space);
 
   Address ReserveAlignedMemory(size_t requested,
@@ -1109,19 +1116,26 @@ class MemoryAllocator {
   Address AllocateAlignedMemory(size_t reserve_size,
                                 size_t commit_size,
                                 size_t alignment,
-                                Executability executable,
+                                VirtualMemory::Executability executability,
                                 VirtualMemory* controller);
 
-  bool CommitMemory(Address addr, size_t size, Executability executable);
+  bool CommitMemory(Address addr,
+                    size_t size,
+                    VirtualMemory::Executability executability);
 
-  void FreeMemory(VirtualMemory* reservation, Executability executable);
-  void FreeMemory(Address addr, size_t size, Executability executable);
+  void FreeMemory(VirtualMemory* reservation,
+                  VirtualMemory::Executability executability);
+  void FreeMemory(Address addr,
+                  size_t size,
+                  VirtualMemory::Executability executability);
 
   // Commit a contiguous block of memory from the initial chunk.  Assumes that
   // the address is not NULL, the size is greater than zero, and that the
   // block is contained in the initial chunk.  Returns true if it succeeded
   // and false otherwise.
-  bool CommitBlock(Address start, size_t size, Executability executable);
+  bool CommitBlock(Address start,
+                   size_t size,
+                   VirtualMemory::Executability executability);
 
   // Uncommit a contiguous block of memory [start..(start+size)[.
   // start is not NULL, the size is greater than zero, and the
@@ -1612,7 +1626,7 @@ class PagedSpace : public Space {
   PagedSpace(Heap* heap,
              intptr_t max_capacity,
              AllocationSpace id,
-             Executability executable);
+             VirtualMemory::Executability executability);
 
   virtual ~PagedSpace() {}
 
@@ -2037,7 +2051,7 @@ class SemiSpace : public Space {
  public:
   // Constructor.
   SemiSpace(Heap* heap, SemiSpaceId semispace)
-    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+    : Space(heap, NEW_SPACE, VirtualMemory::NOT_EXECUTABLE),
       start_(NULL),
       age_mark_(NULL),
       id_(semispace),
@@ -2290,7 +2304,7 @@ class NewSpace : public Space {
  public:
   // Constructor.
   explicit NewSpace(Heap* heap)
-    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+    : Space(heap, NEW_SPACE, VirtualMemory::NOT_EXECUTABLE),
       to_space_(heap, kToSpace),
       from_space_(heap, kFromSpace),
       reservation_(),
@@ -2555,8 +2569,8 @@ class OldSpace : public PagedSpace {
   OldSpace(Heap* heap,
            intptr_t max_capacity,
            AllocationSpace id,
-           Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable) {
+           VirtualMemory::Executability executability)
+      : PagedSpace(heap, max_capacity, id, executability) {
     page_extra_ = 0;
   }
 
@@ -2587,7 +2601,7 @@ class FixedSpace : public PagedSpace {
              intptr_t max_capacity,
              AllocationSpace id,
              int object_size_in_bytes)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
+      : PagedSpace(heap, max_capacity, id, VirtualMemory::NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes) {
     page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
   }
@@ -2727,8 +2741,8 @@ class LargeObjectSpace : public Space {
 
   // Shared implementation of AllocateRaw, AllocateRawCode and
   // AllocateRawFixedArray.
-  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
-                                           Executability executable);
+  MUST_USE_RESULT MaybeObject* AllocateRaw(
+      int object_size, VirtualMemory::Executability executability);
 
   // Available bytes for objects in this space.
   inline intptr_t Available();
index 22a5467..7a0dc88 100644 (file)
@@ -72,7 +72,8 @@ void StoreBuffer::SetUp() {
   // Don't know the alignment requirements of the OS, but it is certainly not
   // less than 0xfff.
   ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
-  int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
+  int initial_length =
+      static_cast<int>(VirtualMemory::GetPageSize() / kPointerSize);
   ASSERT(initial_length > 0);
   ASSERT(initial_length <= kOldStoreBufferLength);
   old_limit_ = old_start_ + initial_length;
@@ -81,7 +82,7 @@ void StoreBuffer::SetUp() {
   CHECK(old_virtual_memory_->Commit(
             reinterpret_cast<void*>(old_start_),
             (old_limit_ - old_start_) * kPointerSize,
-            false));
+            VirtualMemory::NOT_EXECUTABLE));
 
   ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
   ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
@@ -97,7 +98,7 @@ void StoreBuffer::SetUp() {
 
   CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
                                 kStoreBufferSize,
-                                false));  // Not executable.
+                                VirtualMemory::NOT_EXECUTABLE));
   heap_->public_set_store_buffer_top(start_);
 
   hash_set_1_ = new uintptr_t[kHashSetLength];
@@ -154,7 +155,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
     size_t grow = old_limit_ - old_start_;  // Double size.
     CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
                                       grow * kPointerSize,
-                                      false));
+                                      VirtualMemory::NOT_EXECUTABLE));
     old_limit_ += grow;
   }
 
index 7fa2fd6..6144631 100644 (file)
@@ -201,8 +201,6 @@ enum PretenureFlag { NOT_TENURED, TENURED };
 
 enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
 
-enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-
 enum VisitMode {
   VISIT_ALL,
   VISIT_ALL_IN_SCAVENGE,
index 24773c2..9d4421e 100644 (file)
@@ -58,9 +58,8 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) {
     // Fallback to library function if function cannot be created.
     switch (type) {
@@ -94,7 +93,9 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -102,7 +103,8 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
 UnaryMathFunction CreateExpFunction() {
   if (!FLAG_fast_math) return &exp;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) return &exp;
   ExternalReference::InitializeMathExpData();
 
@@ -125,7 +127,9 @@ UnaryMathFunction CreateExpFunction() {
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool ok = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(ok);
+  USE(ok);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -133,9 +137,8 @@ UnaryMathFunction CreateExpFunction() {
 UnaryMathFunction CreateSqrtFunction() {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
   if (buffer == NULL) return &sqrt;
 
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -149,7 +152,9 @@ UnaryMathFunction CreateSqrtFunction() {
   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -237,7 +242,9 @@ ModuloFunction CreateModuloFunction() {
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  OS::ProtectCode(buffer, actual_size);
+  bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
+  ASSERT(result);
+  USE(result);
   // Call the function from C++ through this pointer.
   return FUNCTION_CAST<ModuloFunction>(buffer);
 }
index 0775cc5..1f21294 100644 (file)
         'test-unbound-queue.cc',
         'test-utils.cc',
         'test-version.cc',
+        'test-virtual-memory.cc',
         'test-weakmaps.cc',
         'test-weaksets.cc',
         'test-weaktypedarrays.cc'
index f7d2311..7fd4db4 100644 (file)
 #include "serialize.h"
 #include "cctest.h"
 
-using v8::internal::Assembler;
-using v8::internal::Code;
-using v8::internal::CodeDesc;
-using v8::internal::FUNCTION_CAST;
-using v8::internal::Immediate;
-using v8::internal::Isolate;
-using v8::internal::Label;
-using v8::internal::OS;
-using v8::internal::Operand;
-using v8::internal::byte;
-using v8::internal::greater;
-using v8::internal::less_equal;
-using v8::internal::equal;
-using v8::internal::not_equal;
-using v8::internal::r13;
-using v8::internal::r15;
-using v8::internal::r8;
-using v8::internal::r9;
-using v8::internal::rax;
-using v8::internal::rbx;
-using v8::internal::rbp;
-using v8::internal::rcx;
-using v8::internal::rdi;
-using v8::internal::rdx;
-using v8::internal::rsi;
-using v8::internal::rsp;
-using v8::internal::times_1;
-using v8::internal::xmm0;
+using namespace v8::internal;
 
 // Test the x64 assembler by compiling some simple functions into
 // a buffer and executing them.  These tests do not initialize the
@@ -92,9 +65,10 @@ static const v8::internal::Register arg2 = rsi;
 TEST(AssemblerX64ReturnOperation) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
@@ -114,9 +88,10 @@ TEST(AssemblerX64ReturnOperation) {
 TEST(AssemblerX64StackOperations) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
@@ -146,9 +121,10 @@ TEST(AssemblerX64StackOperations) {
 TEST(AssemblerX64ArithmeticOperations) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
@@ -168,9 +144,10 @@ TEST(AssemblerX64ArithmeticOperations) {
 TEST(AssemblerX64ImulOperation) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
@@ -196,9 +173,10 @@ TEST(AssemblerX64ImulOperation) {
 TEST(AssemblerX64MemoryOperands) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
@@ -230,9 +208,10 @@ TEST(AssemblerX64MemoryOperands) {
 TEST(AssemblerX64ControlFlow) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
@@ -259,9 +238,10 @@ TEST(AssemblerX64ControlFlow) {
 TEST(AssemblerX64LoopImmediates) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
   // Assemble two loops using rax as counter, and verify the ending counts.
index c99433e..b4d23a4 100644 (file)
@@ -47,9 +47,10 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
                                               bool inline_fastpath) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
index 3f62175..2fde4a5 100644 (file)
@@ -47,9 +47,10 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
                                               Register destination_reg) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
index 4af5b45..a058118 100644 (file)
@@ -46,9 +46,10 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
                                               Register destination_reg) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
index a2070a5..4973ef5 100644 (file)
 #include "serialize.h"
 #include "cctest.h"
 
-using v8::internal::Assembler;
-using v8::internal::CodeDesc;
-using v8::internal::Condition;
-using v8::internal::FUNCTION_CAST;
-using v8::internal::HandleScope;
-using v8::internal::Immediate;
-using v8::internal::Isolate;
-using v8::internal::Label;
-using v8::internal::MacroAssembler;
-using v8::internal::OS;
-using v8::internal::Operand;
-using v8::internal::RelocInfo;
-using v8::internal::Smi;
-using v8::internal::SmiIndex;
-using v8::internal::byte;
-using v8::internal::carry;
-using v8::internal::greater;
-using v8::internal::greater_equal;
-using v8::internal::kIntSize;
-using v8::internal::kPointerSize;
-using v8::internal::kSmiTagMask;
-using v8::internal::kSmiValueSize;
-using v8::internal::less_equal;
-using v8::internal::negative;
-using v8::internal::not_carry;
-using v8::internal::not_equal;
-using v8::internal::not_zero;
-using v8::internal::positive;
-using v8::internal::r11;
-using v8::internal::r13;
-using v8::internal::r14;
-using v8::internal::r15;
-using v8::internal::r8;
-using v8::internal::r9;
-using v8::internal::rax;
-using v8::internal::rbp;
-using v8::internal::rbx;
-using v8::internal::rcx;
-using v8::internal::rdi;
-using v8::internal::rdx;
-using v8::internal::rsi;
-using v8::internal::rsp;
-using v8::internal::times_pointer_size;
+using namespace v8::internal;
 
 // Test the x64 assembler by compiling some simple functions into
 // a buffer and executing them.  These tests do not initialize the
@@ -153,9 +111,10 @@ TEST(SmiMove) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -241,10 +200,10 @@ TEST(SmiCompare) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 2,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -293,9 +252,10 @@ TEST(Integer32ToSmi) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -422,9 +382,10 @@ TEST(Integer64PlusConstantToSmi) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -467,9 +428,10 @@ TEST(SmiCheck) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -715,10 +677,10 @@ TEST(SmiNeg) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -806,9 +768,10 @@ TEST(SmiAdd) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -996,10 +959,10 @@ TEST(SmiSub) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 2,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1088,9 +1051,10 @@ TEST(SmiMul) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1194,10 +1158,10 @@ TEST(SmiDiv) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 2,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1305,10 +1269,10 @@ TEST(SmiMod) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 2,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1403,10 +1367,10 @@ TEST(SmiIndex) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 3,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1473,10 +1437,10 @@ TEST(SmiSelectNonSmi) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1553,10 +1517,10 @@ TEST(SmiAnd) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1635,10 +1599,10 @@ TEST(SmiOr) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1719,10 +1683,10 @@ TEST(SmiXor) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1787,10 +1751,10 @@ TEST(SmiNot) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1884,10 +1848,10 @@ TEST(SmiShiftLeft) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 4,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -1991,10 +1955,10 @@ TEST(SmiShiftLogicalRight) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 3,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -2061,10 +2025,10 @@ TEST(SmiShiftArithmeticRight) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 2,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -2126,10 +2090,10 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
   v8::internal::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 4,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
@@ -2170,10 +2134,10 @@ TEST(OperandOffset) {
 
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
+          Assembler::kMinimalBufferSize * 2,
+          &actual_size,
+          VirtualMemory::EXECUTABLE));
   CHECK(buffer);
   Isolate* isolate = Isolate::Current();
   HandleScope handles(isolate);
index f289e94..e0eae02 100644 (file)
 using namespace ::v8::internal;
 
 
-TEST(VirtualMemory) {
-  VirtualMemory* vm = new VirtualMemory(1 * MB);
-  CHECK(vm->IsReserved());
-  void* block_addr = vm->address();
-  size_t block_size = 4 * KB;
-  CHECK(vm->Commit(block_addr, block_size, false));
-  // Check whether we can write to memory.
-  int* addr = static_cast<int*>(block_addr);
-  addr[KB-1] = 2;
-  CHECK(vm->Uncommit(block_addr, block_size));
-  delete vm;
-}
-
-
 TEST(GetCurrentProcessId) {
   CHECK_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
 }
index d7fdab1..3db5f39 100644 (file)
 using namespace ::v8::internal;
 
 
-TEST(VirtualMemory) {
-  VirtualMemory* vm = new VirtualMemory(1 * MB);
-  CHECK(vm->IsReserved());
-  void* block_addr = vm->address();
-  size_t block_size = 4 * KB;
-  CHECK(vm->Commit(block_addr, block_size, false));
-  // Check whether we can write to memory.
-  int* addr = static_cast<int*>(block_addr);
-  addr[KB-1] = 2;
-  CHECK(vm->Uncommit(block_addr, block_size));
-  delete vm;
-}
-
-
 TEST(GetCurrentProcessId) {
   CHECK_EQ(static_cast<int>(::GetCurrentProcessId()),
            OS::GetCurrentProcessId());
index 3326a01..2edb57f 100644 (file)
@@ -151,30 +151,30 @@ static void VerifyMemoryChunk(Isolate* isolate,
                               size_t reserve_area_size,
                               size_t commit_area_size,
                               size_t second_commit_area_size,
-                              Executability executable) {
+                              VirtualMemory::Executability executability) {
   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
   CHECK(memory_allocator->SetUp(heap->MaxReserved(),
                                 heap->MaxExecutableSize()));
   TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
   TestCodeRangeScope test_code_range_scope(isolate, code_range);
 
-  size_t header_size = (executable == EXECUTABLE)
+  size_t header_size = (executability == VirtualMemory::EXECUTABLE)
                        ? MemoryAllocator::CodePageGuardStartOffset()
                        : MemoryChunk::kObjectStartOffset;
-  size_t guard_size = (executable == EXECUTABLE)
+  size_t guard_size = (executability == VirtualMemory::EXECUTABLE)
                        ? MemoryAllocator::CodePageGuardSize()
                        : 0;
 
   MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
                                                               commit_area_size,
-                                                              executable,
+                                                              executability,
                                                               NULL);
   size_t alignment = code_range->exists() ?
-                     MemoryChunk::kAlignment : OS::CommitPageSize();
-  size_t reserved_size = ((executable == EXECUTABLE))
+                     MemoryChunk::kAlignment : VirtualMemory::GetPageSize();
+  size_t reserved_size = ((executability == VirtualMemory::EXECUTABLE))
       ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
                 alignment)
-      : RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
+      : RoundUp(header_size + reserve_area_size, VirtualMemory::GetPageSize());
   CHECK(memory_chunk->size() == reserved_size);
   CHECK(memory_chunk->area_start() < memory_chunk->address() +
                                      memory_chunk->size());
@@ -230,7 +230,7 @@ TEST(MemoryChunk) {
                       reserve_area_size,
                       initial_commit_area_size,
                       second_commit_area_size,
-                      EXECUTABLE);
+                      VirtualMemory::EXECUTABLE);
 
     VerifyMemoryChunk(isolate,
                       heap,
@@ -238,7 +238,7 @@ TEST(MemoryChunk) {
                       reserve_area_size,
                       initial_commit_area_size,
                       second_commit_area_size,
-                      NOT_EXECUTABLE);
+                      VirtualMemory::NOT_EXECUTABLE);
     delete code_range;
 
     // Without CodeRange.
@@ -249,7 +249,7 @@ TEST(MemoryChunk) {
                       reserve_area_size,
                       initial_commit_area_size,
                       second_commit_area_size,
-                      EXECUTABLE);
+                      VirtualMemory::EXECUTABLE);
 
     VerifyMemoryChunk(isolate,
                       heap,
@@ -257,7 +257,7 @@ TEST(MemoryChunk) {
                       reserve_area_size,
                       initial_commit_area_size,
                       second_commit_area_size,
-                      NOT_EXECUTABLE);
+                      VirtualMemory::NOT_EXECUTABLE);
   }
 }
 
@@ -276,9 +276,9 @@ TEST(MemoryAllocator) {
   OldSpace faked_space(heap,
                        heap->MaxReserved(),
                        OLD_POINTER_SPACE,
-                       NOT_EXECUTABLE);
+                       VirtualMemory::NOT_EXECUTABLE);
   Page* first_page = memory_allocator->AllocatePage(
-      faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
+      faked_space.AreaSize(), &faked_space, VirtualMemory::NOT_EXECUTABLE);
 
   first_page->InsertAfter(faked_space.anchor()->prev_page());
   CHECK(first_page->is_valid());
@@ -291,7 +291,7 @@ TEST(MemoryAllocator) {
 
   // Again, we should get n or n - 1 pages.
   Page* other = memory_allocator->AllocatePage(
-      faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
+      faked_space.AreaSize(), &faked_space, VirtualMemory::NOT_EXECUTABLE);
   CHECK(other->is_valid());
   total_pages++;
   other->InsertAfter(first_page);
@@ -353,7 +353,7 @@ TEST(OldSpace) {
   OldSpace* s = new OldSpace(heap,
                              heap->MaxOldGenerationSize(),
                              OLD_POINTER_SPACE,
-                             NOT_EXECUTABLE);
+                             VirtualMemory::NOT_EXECUTABLE);
   CHECK(s != NULL);
 
   CHECK(s->SetUp());
@@ -377,7 +377,8 @@ TEST(LargeObjectSpace) {
 
   int lo_size = Page::kPageSize;
 
-  Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
+  Object* obj = lo->AllocateRaw(
+      lo_size, VirtualMemory::NOT_EXECUTABLE)->ToObjectUnchecked();
   CHECK(obj->IsHeapObject());
 
   HeapObject* ho = HeapObject::cast(obj);
@@ -390,7 +391,8 @@ TEST(LargeObjectSpace) {
 
   while (true) {
     intptr_t available = lo->Available();
-    { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
+    { MaybeObject* maybe_obj = lo->AllocateRaw(
+            lo_size, VirtualMemory::NOT_EXECUTABLE);
       if (!maybe_obj->ToObject(&obj)) break;
     }
     CHECK(lo->Available() < available);
@@ -398,5 +400,5 @@ TEST(LargeObjectSpace) {
 
   CHECK(!lo->IsEmpty());
 
-  CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
+  CHECK(lo->AllocateRaw(lo_size, VirtualMemory::NOT_EXECUTABLE)->IsFailure());
 }
diff --git a/test/cctest/test-virtual-memory.cc b/test/cctest/test-virtual-memory.cc
new file mode 100644 (file)
index 0000000..d441835
--- /dev/null
@@ -0,0 +1,86 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "platform/virtual-memory.h"
+
+using namespace ::v8::internal;
+
+
+TEST(CommitAndUncommit) {
+  static const size_t kSize = 1 * MB;
+  static const size_t kBlockSize = 4 * KB;
+  VirtualMemory vm(kSize);
+  CHECK(vm.IsReserved());
+  void* block_addr = vm.address();
+  CHECK(vm.Commit(block_addr, kBlockSize, VirtualMemory::NOT_EXECUTABLE));
+  // Check whether we can write to memory.
+  int* addr = static_cast<int*>(block_addr);
+  addr[5] = 2;
+  CHECK(vm.Uncommit(block_addr, kBlockSize));
+}
+
+
+TEST(Release) {
+  static const size_t kSize = 4 * KB;
+  VirtualMemory vm(kSize);
+  CHECK(vm.IsReserved());
+  CHECK_LE(kSize, vm.size());
+  CHECK_NE(NULL, vm.address());
+  vm.Release();
+  CHECK(!vm.IsReserved());
+}
+
+
+TEST(TakeControl) {
+  static const size_t kSize = 64 * KB;
+
+  VirtualMemory vm1(kSize);
+  size_t size1 = vm1.size();
+  CHECK(vm1.IsReserved());
+  CHECK_LE(kSize, size1);
+
+  VirtualMemory vm2;
+  CHECK(!vm2.IsReserved());
+
+  vm2.TakeControl(&vm1);
+  CHECK(vm2.IsReserved());
+  CHECK(!vm1.IsReserved());
+  CHECK(vm2.size() == size1);
+}
+
+
+TEST(AllocationGranularityIsPowerOf2) {
+  CHECK(IsPowerOf2(VirtualMemory::GetAllocationGranularity()));
+}
+
+
+TEST(PageSizeIsPowerOf2) {
+  CHECK(IsPowerOf2(VirtualMemory::GetPageSize()));
+}
index 5d4933a..c7417fd 100644 (file)
         '../../src/platform/semaphore.h',
         '../../src/platform/socket.cc',
         '../../src/platform/socket.h',
+        '../../src/platform/virtual-memory.cc',
+        '../../src/platform/virtual-memory.h',
         '../../src/preparse-data-format.h',
         '../../src/preparse-data.cc',
         '../../src/preparse-data.h',