From 718a6a9a9ed22a45d7cf36f32675a438b8ed3040 Mon Sep 17 00:00:00 2001 From: "mstarzinger@chromium.org" Date: Wed, 11 Sep 2013 18:30:01 +0000 Subject: [PATCH] Revert r16648, r16641, r16638 and r16637. Original descriptions were: - "Refactor and cleanup VirtualMemory." - "Fix typo." - "Deuglify V8_INLINE and V8_NOINLINE." - "Don't align size on allocation granularity for unaligned ReserveRegion calls." Reasons for the revert are: - Our mjsunit test suite slower by a factor of 5(!) in release mode. - Flaky cctest/test-alloc/CodeRange on all architectures and platforms. - Tankage of Sunspider by about 6% overall (unverified). TBR=bmeurer@chromium.org Review URL: https://codereview.chromium.org/23970004 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16662 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- include/v8.h | 467 +++++++++++++++-------------- include/v8config.h | 41 +-- src/arm/codegen-arm.cc | 21 +- src/cpu.cc | 2 +- src/deoptimizer.cc | 12 +- src/globals.h | 6 +- src/heap-inl.h | 7 +- src/heap.cc | 29 +- src/ia32/codegen-ia32.cc | 32 +- src/incremental-marking.cc | 2 +- src/mips/codegen-mips.cc | 7 +- src/platform-cygwin.cc | 140 ++++++++- src/platform-freebsd.cc | 153 ++++++++++ src/platform-linux.cc | 182 +++++++++-- src/platform-macos.cc | 167 ++++++++++- src/platform-openbsd.cc | 154 ++++++++++ src/platform-posix.cc | 102 +++++++ src/platform-solaris.cc | 153 ++++++++++ src/platform-win32.cc | 236 +++++++++++++++ src/platform.h | 121 ++++++++ src/platform/elapsed-timer.h | 2 +- src/platform/mutex.cc | 24 +- src/platform/mutex.h | 4 +- src/platform/socket.h | 2 +- src/platform/virtual-memory.cc | 513 -------------------------------- src/platform/virtual-memory.h | 211 ------------- src/spaces-inl.h | 34 ++- src/spaces.cc | 158 +++++----- src/spaces.h | 68 ++--- src/store-buffer.cc | 9 +- src/utils/random-number-generator.h | 4 +- src/v8globals.h | 2 + src/x64/codegen-x64.cc | 29 +- test/cctest/cctest.gyp | 1 - test/cctest/test-api.cc | 4 +- test/cctest/test-assembler-x64.cc | 78 +++-- test/cctest/test-code-stubs-arm.cc | 7 +- test/cctest/test-code-stubs-ia32.cc | 7 +- test/cctest/test-code-stubs-x64.cc | 7 +- test/cctest/test-macro-assembler-x64.cc | 214 +++++++------ test/cctest/test-platform-linux.cc | 14 + test/cctest/test-platform-win32.cc | 14 + test/cctest/test-spaces.cc | 38 ++- test/cctest/test-virtual-memory.cc | 86 ------ tools/gyp/v8.gyp | 2 - 45 files changed, 2077 insertions(+), 1489 deletions(-) delete mode 100644 src/platform/virtual-memory.cc delete mode 100644 src/platform/virtual-memory.h delete mode 100644 test/cctest/test-virtual-memory.cc diff --git a/include/v8.h b/include/v8.h index 7ffddea..d98a768 100644 --- a/include/v8.h +++ b/include/v8.h @@ -211,7 +211,7 @@ template class Handle { /** * Creates an empty handle. */ - V8_INLINE Handle() : val_(0) {} + V8_INLINE(Handle()) : val_(0) {} /** * Creates a handle for the contents of the specified handle. This @@ -223,7 +223,7 @@ template class Handle { * Handle to a variable declared as Handle, is legal * because String is a subclass of Value. */ - template V8_INLINE Handle(Handle that) + template V8_INLINE(Handle(Handle that)) : val_(reinterpret_cast(*that)) { /** * This check fails when trying to convert between incompatible @@ -236,16 +236,16 @@ template class Handle { /** * Returns true if the handle is empty. */ - V8_INLINE bool IsEmpty() const { return val_ == 0; } + V8_INLINE(bool IsEmpty() const) { return val_ == 0; } /** * Sets the handle to be empty. IsEmpty() will then return true. */ - V8_INLINE void Clear() { val_ = 0; } + V8_INLINE(void Clear()) { val_ = 0; } - V8_INLINE T* operator->() const { return val_; } + V8_INLINE(T* operator->() const) { return val_; } - V8_INLINE T* operator*() const { return val_; } + V8_INLINE(T* operator*() const) { return val_; } /** * Checks whether two handles are the same. @@ -253,7 +253,7 @@ template class Handle { * to which they refer are identical. * The handles' references are not checked. */ - template V8_INLINE bool operator==(const Handle& that) const { + template V8_INLINE(bool operator==(const Handle& that) const) { internal::Object** a = reinterpret_cast(**this); internal::Object** b = reinterpret_cast(*that); if (a == 0) return b == 0; @@ -261,8 +261,8 @@ template class Handle { return *a == *b; } - template V8_INLINE bool operator==( - const Persistent& that) const { + template V8_INLINE( + bool operator==(const Persistent& that) const) { internal::Object** a = reinterpret_cast(**this); internal::Object** b = reinterpret_cast(*that); if (a == 0) return b == 0; @@ -276,16 +276,16 @@ template class Handle { * the objects to which they refer are different. * The handles' references are not checked. */ - template V8_INLINE bool operator!=(const Handle& that) const { + template V8_INLINE(bool operator!=(const Handle& that) const) { return !operator==(that); } - template V8_INLINE bool operator!=( - const Persistent& that) const { + template V8_INLINE( + bool operator!=(const Persistent& that) const) { return !operator==(that); } - template V8_INLINE static Handle Cast(Handle that) { + template V8_INLINE(static Handle Cast(Handle that)) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -294,14 +294,14 @@ template class Handle { return Handle(T::Cast(*that)); } - template V8_INLINE Handle As() { + template V8_INLINE(Handle As()) { return Handle::Cast(*this); } - V8_INLINE static Handle New(Isolate* isolate, Handle that) { + V8_INLINE(static Handle New(Isolate* isolate, Handle that)) { return New(isolate, that.val_); } - V8_INLINE static Handle New(Isolate* isolate, const Persistent& that) { + V8_INLINE(static Handle New(Isolate* isolate, const Persistent& that)) { return New(isolate, that.val_); } @@ -312,7 +312,7 @@ template class Handle { /** * Creates a new handle for the specified value. */ - V8_INLINE explicit Handle(T* val) : val_(val) {} + V8_INLINE(explicit Handle(T* val)) : val_(val) {} private: friend class Utils; @@ -328,7 +328,7 @@ template class Handle { friend class Context; friend class HandleScope; - V8_INLINE static Handle New(Isolate* isolate, T* that); + V8_INLINE(static Handle New(Isolate* isolate, T* that)); T* val_; }; @@ -343,8 +343,8 @@ template class Handle { */ template class Local : public Handle { public: - V8_INLINE Local(); - template V8_INLINE Local(Local that) + V8_INLINE(Local()); + template V8_INLINE(Local(Local that)) : Handle(reinterpret_cast(*that)) { /** * This check fails when trying to convert between incompatible @@ -355,7 +355,7 @@ template class Local : public Handle { } - template V8_INLINE static Local Cast(Local that) { + template V8_INLINE(static Local Cast(Local that)) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -363,12 +363,12 @@ template class Local : public Handle { #endif return Local(T::Cast(*that)); } - template V8_INLINE Local(Handle that) + template V8_INLINE(Local(Handle that)) : Handle(reinterpret_cast(*that)) { TYPE_CHECK(T, S); } - template V8_INLINE Local As() { + template V8_INLINE(Local As()) { return Local::Cast(*this); } @@ -377,17 +377,17 @@ template class Local : public Handle { * The referee is kept alive by the local handle even when * the original handle is destroyed/disposed. */ - V8_INLINE static Local New(Handle that); - V8_INLINE static Local New(Isolate* isolate, Handle that); + V8_INLINE(static Local New(Handle that)); + V8_INLINE(static Local New(Isolate* isolate, Handle that)); template - V8_INLINE static Local New(Isolate* isolate, - const Persistent& that); + V8_INLINE(static Local New(Isolate* isolate, + const Persistent& that)); #ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR private: #endif - template V8_INLINE Local(S* that) : Handle(that) { } + template V8_INLINE(Local(S* that) : Handle(that)) { } private: friend class Utils; @@ -402,22 +402,24 @@ template class Local : public Handle { template friend class internal::CustomArguments; friend class HandleScope; - V8_INLINE static Local New(Isolate* isolate, T* that); + V8_INLINE(static Local New(Isolate* isolate, T* that)); }; // Eternal handles are set-once handles that live for the life of the isolate. template class Eternal { public: - V8_INLINE Eternal() : index_(kInitialValue) { } + V8_INLINE(Eternal()) : index_(kInitialValue) { } template - V8_INLINE Eternal(Isolate* isolate, Local handle) : index_(kInitialValue) { + V8_INLINE(Eternal(Isolate* isolate, Local handle)) + : index_(kInitialValue) { Set(isolate, handle); } // Can only be safely called if already set. - V8_INLINE Local Get(Isolate* isolate); - V8_INLINE bool IsEmpty() { return index_ == kInitialValue; } - template V8_INLINE void Set(Isolate* isolate, Local handle); + V8_INLINE(Local Get(Isolate* isolate)); + V8_INLINE(bool IsEmpty()) { return index_ == kInitialValue; } + template + V8_INLINE(void Set(Isolate* isolate, Local handle)); private: static const int kInitialValue = -1; @@ -430,9 +432,9 @@ class WeakCallbackData { public: typedef void (*Callback)(const WeakCallbackData& data); - V8_INLINE Isolate* GetIsolate() const { return isolate_; } - V8_INLINE Local GetValue() const { return handle_; } - V8_INLINE P* GetParameter() const { return parameter_; } + V8_INLINE(Isolate* GetIsolate()) const { return isolate_; } + V8_INLINE(Local GetValue()) const { return handle_; } + V8_INLINE(P* GetParameter()) const { return parameter_; } private: friend class internal::GlobalHandles; @@ -468,12 +470,13 @@ class NonCopyablePersistentTraits { typedef Persistent > NonCopyablePersistent; static const bool kResetInDestructor = false; template - V8_INLINE static void Copy(const Persistent& source, - NonCopyablePersistent* dest) { + V8_INLINE(static void Copy(const Persistent& source, + NonCopyablePersistent* dest)) { Uncompilable(); } // TODO(dcarney): come up with a good compile error here. - template V8_INLINE static void Uncompilable() { + template + V8_INLINE(static void Uncompilable()) { TYPE_CHECK(O, Primitive); } }; @@ -499,13 +502,13 @@ template class Persistent { /** * A Persistent with no storage cell. */ - V8_INLINE Persistent() : val_(0) { } + V8_INLINE(Persistent()) : val_(0) { } /** * Construct a Persistent from a Handle. * When the Handle is non-empty, a new storage cell is created * pointing to the same object, and no flags are set. */ - template V8_INLINE Persistent(Isolate* isolate, Handle that) + template V8_INLINE(Persistent(Isolate* isolate, Handle that)) : val_(New(isolate, *that)) { TYPE_CHECK(T, S); } @@ -515,7 +518,7 @@ template class Persistent { * pointing to the same object, and no flags are set. */ template - V8_INLINE Persistent(Isolate* isolate, const Persistent& that) + V8_INLINE(Persistent(Isolate* isolate, const Persistent& that)) : val_(New(isolate, *that)) { TYPE_CHECK(T, S); } @@ -525,19 +528,19 @@ template class Persistent { * traits class is called, allowing the setting of flags based on the * copied Persistent. */ - V8_INLINE Persistent(const Persistent& that) : val_(0) { + V8_INLINE(Persistent(const Persistent& that)) : val_(0) { Copy(that); } template - V8_INLINE Persistent(const Persistent& that) : val_(0) { + V8_INLINE(Persistent(const Persistent& that)) : val_(0) { Copy(that); } - V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT + V8_INLINE(Persistent& operator=(const Persistent& that)) { // NOLINT Copy(that); return *this; } template - V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT + V8_INLINE(Persistent& operator=(const Persistent& that)) { // NOLINT Copy(that); return *this; } @@ -546,7 +549,7 @@ template class Persistent { * kResetInDestructor flags in the traits class. Since not calling dispose * can result in a memory leak, it is recommended to always set this flag. */ - V8_INLINE ~Persistent() { + V8_INLINE(~Persistent()) { if (M::kResetInDestructor) Reset(); } @@ -554,28 +557,28 @@ template class Persistent { * If non-empty, destroy the underlying storage cell * IsEmpty() will return true after this call. */ - V8_INLINE void Reset(); + V8_INLINE(void Reset()); + template /** * If non-empty, destroy the underlying storage cell * and create a new one with the contents of other if other is non empty */ - template - V8_INLINE void Reset(Isolate* isolate, const Handle& other); + V8_INLINE(void Reset(Isolate* isolate, const Handle& other)); /** * If non-empty, destroy the underlying storage cell * and create a new one with the contents of other if other is non empty */ template - V8_INLINE void Reset(Isolate* isolate, const Persistent& other); + V8_INLINE(void Reset(Isolate* isolate, const Persistent& other)); // TODO(dcarney): deprecate - V8_INLINE void Dispose() { Reset(); } - V8_DEPRECATED(V8_INLINE void Dispose(Isolate* isolate)) { Reset(); } + V8_INLINE(void Dispose()) { Reset(); } + V8_DEPRECATED(V8_INLINE(void Dispose(Isolate* isolate))) { Reset(); } - V8_INLINE bool IsEmpty() const { return val_ == 0; } + V8_INLINE(bool IsEmpty() const) { return val_ == 0; } // TODO(dcarney): this is pretty useless, fix or remove template - V8_INLINE static Persistent& Cast(Persistent& that) { // NOLINT + V8_INLINE(static Persistent& Cast(Persistent& that)) { // NOLINT #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -585,12 +588,12 @@ template class Persistent { } // TODO(dcarney): this is pretty useless, fix or remove - template V8_INLINE Persistent& As() { // NOLINT + template V8_INLINE(Persistent& As()) { // NOLINT return Persistent::Cast(*this); } - template - V8_INLINE bool operator==(const Persistent& that) const { + template V8_INLINE( + bool operator==(const Persistent& that) const) { internal::Object** a = reinterpret_cast(**this); internal::Object** b = reinterpret_cast(*that); if (a == 0) return b == 0; @@ -598,7 +601,7 @@ template class Persistent { return *a == *b; } - template V8_INLINE bool operator==(const Handle& that) const { + template V8_INLINE(bool operator==(const Handle& that) const) { internal::Object** a = reinterpret_cast(**this); internal::Object** b = reinterpret_cast(*that); if (a == 0) return b == 0; @@ -606,40 +609,40 @@ template class Persistent { return *a == *b; } - template - V8_INLINE bool operator!=(const Persistent& that) const { + template V8_INLINE( + bool operator!=(const Persistent& that) const) { return !operator==(that); } - template V8_INLINE bool operator!=(const Handle& that) const { + template V8_INLINE(bool operator!=(const Handle& that) const) { return !operator==(that); } template - V8_INLINE void SetWeak( + V8_INLINE(void SetWeak( P* parameter, - typename WeakCallbackData::Callback callback); + typename WeakCallbackData::Callback callback)); template - V8_INLINE void SetWeak( + V8_INLINE(void SetWeak( P* parameter, - typename WeakCallbackData::Callback callback); + typename WeakCallbackData::Callback callback)); // TODO(dcarney): deprecate template - V8_INLINE void MakeWeak( + V8_INLINE(void MakeWeak( P* parameter, - typename WeakReferenceCallbacks::Revivable callback); + typename WeakReferenceCallbacks::Revivable callback)); // TODO(dcarney): deprecate template - V8_INLINE void MakeWeak( + V8_INLINE(void MakeWeak( P* parameter, - typename WeakReferenceCallbacks::Revivable callback); + typename WeakReferenceCallbacks::Revivable callback)); - V8_INLINE void ClearWeak(); + V8_INLINE(void ClearWeak()); - V8_DEPRECATED(V8_INLINE void ClearWeak(Isolate* isolate)) { ClearWeak(); } + V8_DEPRECATED(V8_INLINE(void ClearWeak(Isolate* isolate))) { ClearWeak(); } /** * Marks the reference to this object independent. Garbage collector is free @@ -647,9 +650,9 @@ template class Persistent { * independent handle should not assume that it will be preceded by a global * GC prologue callback or followed by a global GC epilogue callback. */ - V8_INLINE void MarkIndependent(); + V8_INLINE(void MarkIndependent()); - V8_DEPRECATED(V8_INLINE void MarkIndependent(Isolate* isolate)) { + V8_DEPRECATED(V8_INLINE(void MarkIndependent(Isolate* isolate))) { MarkIndependent(); } @@ -661,29 +664,29 @@ template class Persistent { * external dependencies. This mark is automatically cleared after each * garbage collection. */ - V8_INLINE void MarkPartiallyDependent(); + V8_INLINE(void MarkPartiallyDependent()); - V8_DEPRECATED(V8_INLINE void MarkPartiallyDependent(Isolate* isolate)) { + V8_DEPRECATED(V8_INLINE(void MarkPartiallyDependent(Isolate* isolate))) { MarkPartiallyDependent(); } - V8_INLINE bool IsIndependent() const; + V8_INLINE(bool IsIndependent() const); - V8_DEPRECATED(V8_INLINE bool IsIndependent(Isolate* isolate) const) { + V8_DEPRECATED(V8_INLINE(bool IsIndependent(Isolate* isolate)) const) { return IsIndependent(); } /** Checks if the handle holds the only reference to an object. */ - V8_INLINE bool IsNearDeath() const; + V8_INLINE(bool IsNearDeath() const); - V8_DEPRECATED(V8_INLINE bool IsNearDeath(Isolate* isolate) const) { + V8_DEPRECATED(V8_INLINE(bool IsNearDeath(Isolate* isolate)) const) { return IsNearDeath(); } /** Returns true if the handle's reference is weak. */ - V8_INLINE bool IsWeak() const; + V8_INLINE(bool IsWeak() const); - V8_DEPRECATED(V8_INLINE bool IsWeak(Isolate* isolate) const) { + V8_DEPRECATED(V8_INLINE(bool IsWeak(Isolate* isolate)) const) { return IsWeak(); } @@ -691,10 +694,10 @@ template class Persistent { * Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface * description in v8-profiler.h for details. */ - V8_INLINE void SetWrapperClassId(uint16_t class_id); + V8_INLINE(void SetWrapperClassId(uint16_t class_id)); V8_DEPRECATED( - V8_INLINE void SetWrapperClassId(Isolate * isolate, uint16_t class_id)) { + V8_INLINE(void SetWrapperClassId(Isolate * isolate, uint16_t class_id))) { SetWrapperClassId(class_id); } @@ -702,26 +705,26 @@ template class Persistent { * Returns the class ID previously assigned to this handle or 0 if no class ID * was previously assigned. */ - V8_INLINE uint16_t WrapperClassId() const; + V8_INLINE(uint16_t WrapperClassId() const); - V8_DEPRECATED(V8_INLINE uint16_t WrapperClassId(Isolate* isolate) const) { + V8_DEPRECATED(V8_INLINE(uint16_t WrapperClassId(Isolate* isolate)) const) { return WrapperClassId(); } // TODO(dcarney): remove - V8_INLINE T* ClearAndLeak(); + V8_INLINE(T* ClearAndLeak()); // TODO(dcarney): remove - V8_INLINE void Clear() { val_ = 0; } + V8_INLINE(void Clear()) { val_ = 0; } // TODO(dcarney): remove #ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR private: #endif - template V8_INLINE Persistent(S* that) : val_(that) { } + template V8_INLINE(Persistent(S* that)) : val_(that) { } - V8_INLINE T* operator*() const { return val_; } + V8_INLINE(T* operator*() const) { return val_; } private: friend class Utils; @@ -730,9 +733,9 @@ template class Persistent { template friend class Persistent; template friend class ReturnValue; - V8_INLINE static T* New(Isolate* isolate, T* that); + V8_INLINE(static T* New(Isolate* isolate, T* that)); template - V8_INLINE void Copy(const Persistent& that); + V8_INLINE(void Copy(const Persistent& that)); T* val_; }; @@ -792,7 +795,7 @@ class V8_EXPORT HandleScope { internal::Object** next; internal::Object** limit; int level; - V8_INLINE void Initialize() { + V8_INLINE(void Initialize()) { next = limit = NULL; level = 0; } @@ -901,19 +904,19 @@ class V8_EXPORT ScriptData { // NOLINT */ class ScriptOrigin { public: - V8_INLINE ScriptOrigin( + V8_INLINE(ScriptOrigin( Handle resource_name, Handle resource_line_offset = Handle(), Handle resource_column_offset = Handle(), - Handle resource_is_shared_cross_origin = Handle()) + Handle resource_is_shared_cross_origin = Handle())) : resource_name_(resource_name), resource_line_offset_(resource_line_offset), resource_column_offset_(resource_column_offset), resource_is_shared_cross_origin_(resource_is_shared_cross_origin) { } - V8_INLINE Handle ResourceName() const; - V8_INLINE Handle ResourceLineOffset() const; - V8_INLINE Handle ResourceColumnOffset() const; - V8_INLINE Handle ResourceIsSharedCrossOrigin() const; + V8_INLINE(Handle ResourceName() const); + V8_INLINE(Handle ResourceLineOffset() const); + V8_INLINE(Handle ResourceColumnOffset() const); + V8_INLINE(Handle ResourceIsSharedCrossOrigin() const); private: Handle resource_name_; Handle resource_line_offset_; @@ -1254,13 +1257,13 @@ class V8_EXPORT Value : public Data { * Returns true if this value is the undefined value. See ECMA-262 * 4.3.10. */ - V8_INLINE bool IsUndefined() const; + V8_INLINE(bool IsUndefined() const); /** * Returns true if this value is the null value. See ECMA-262 * 4.3.11. */ - V8_INLINE bool IsNull() const; + V8_INLINE(bool IsNull() const); /** * Returns true if this value is true. @@ -1276,7 +1279,7 @@ class V8_EXPORT Value : public Data { * Returns true if this value is an instance of the String type. * See ECMA-262 8.4. */ - V8_INLINE bool IsString() const; + V8_INLINE(bool IsString() const); /** * Returns true if this value is a symbol. @@ -1464,12 +1467,12 @@ class V8_EXPORT Value : public Data { bool Equals(Handle that) const; bool StrictEquals(Handle that) const; - template V8_INLINE static Value* Cast(T* value); + template V8_INLINE(static Value* Cast(T* value)); private: - V8_INLINE bool QuickIsUndefined() const; - V8_INLINE bool QuickIsNull() const; - V8_INLINE bool QuickIsString() const; + V8_INLINE(bool QuickIsUndefined() const); + V8_INLINE(bool QuickIsNull() const); + V8_INLINE(bool QuickIsString() const); bool FullIsUndefined() const; bool FullIsNull() const; bool FullIsString() const; @@ -1489,7 +1492,7 @@ class V8_EXPORT Primitive : public Value { }; class V8_EXPORT Boolean : public Primitive { public: bool Value() const; - V8_INLINE static Handle New(bool value); + V8_INLINE(static Handle New(bool value)); }; @@ -1518,7 +1521,7 @@ class V8_EXPORT String : public Primitive { /** * This function is no longer useful. */ - V8_DEPRECATED(V8_INLINE bool MayContainNonAscii() const) { return true; } + V8_DEPRECATED(V8_INLINE(bool MayContainNonAscii()) const) { return true; } /** * Returns whether this string is known to contain only one byte data. @@ -1590,7 +1593,7 @@ class V8_EXPORT String : public Primitive { * A zero length string. */ static v8::Local Empty(); - V8_INLINE static v8::Local Empty(Isolate* isolate); + V8_INLINE(static v8::Local Empty(Isolate* isolate)); /** * Returns true if the string is external @@ -1688,14 +1691,14 @@ class V8_EXPORT String : public Primitive { * regardless of the encoding, otherwise return NULL. The encoding of the * string is returned in encoding_out. */ - V8_INLINE ExternalStringResourceBase* GetExternalStringResourceBase( - Encoding* encoding_out) const; + V8_INLINE(ExternalStringResourceBase* GetExternalStringResourceBase( + Encoding* encoding_out) const); /** * Get the ExternalStringResource for an external string. Returns * NULL if IsExternal() doesn't return true. */ - V8_INLINE ExternalStringResource* GetExternalStringResource() const; + V8_INLINE(ExternalStringResource* GetExternalStringResource() const); /** * Get the ExternalAsciiStringResource for an external ASCII string. @@ -1703,7 +1706,7 @@ class V8_EXPORT String : public Primitive { */ const ExternalAsciiStringResource* GetExternalAsciiStringResource() const; - V8_INLINE static String* Cast(v8::Value* obj); + V8_INLINE(static String* Cast(v8::Value* obj)); // TODO(dcarney): deprecate /** @@ -1711,18 +1714,18 @@ class V8_EXPORT String : public Primitive { * The second parameter 'length' gives the buffer length. If omitted, * the function calls 'strlen' to determine the buffer length. */ - V8_INLINE static Local New(const char* data, int length = -1); + V8_INLINE(static Local New(const char* data, int length = -1)); // TODO(dcarney): deprecate /** Allocates a new string from 16-bit character codes.*/ - V8_INLINE static Local New(const uint16_t* data, int length = -1); + V8_INLINE(static Local New(const uint16_t* data, int length = -1)); // TODO(dcarney): deprecate /** * Creates an internalized string (historically called a "symbol", * not to be confused with ES6 symbols). Returns one if it exists already. */ - V8_INLINE static Local NewSymbol(const char* data, int length = -1); + V8_INLINE(static Local NewSymbol(const char* data, int length = -1)); enum NewStringType { kNormalString, kInternalizedString, kUndetectableString @@ -1803,13 +1806,13 @@ class V8_EXPORT String : public Primitive { // TODO(dcarney): deprecate /** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/ - V8_INLINE static Local NewUndetectable(const char* data, - int length = -1); + V8_INLINE( + static Local NewUndetectable(const char* data, int length = -1)); // TODO(dcarney): deprecate /** Creates an undetectable string from the supplied 16-bit character codes.*/ - V8_INLINE static Local NewUndetectable(const uint16_t* data, - int length = -1); + V8_INLINE(static Local NewUndetectable( + const uint16_t* data, int length = -1)); /** * Converts an object to a UTF-8-encoded character array. Useful if @@ -1904,7 +1907,7 @@ class V8_EXPORT Symbol : public Primitive { // Create a symbol with a print name. static Local New(Isolate *isolate, const char* data, int length = -1); - V8_INLINE static Symbol* Cast(v8::Value* obj); + V8_INLINE(static Symbol* Cast(v8::Value* obj)); private: Symbol(); static void CheckCast(v8::Value* obj); @@ -1919,7 +1922,7 @@ class V8_EXPORT Number : public Primitive { double Value() const; static Local New(double value); static Local New(Isolate* isolate, double value); - V8_INLINE static Number* Cast(v8::Value* obj); + V8_INLINE(static Number* Cast(v8::Value* obj)); private: Number(); static void CheckCast(v8::Value* obj); @@ -1936,7 +1939,7 @@ class V8_EXPORT Integer : public Number { static Local New(int32_t value, Isolate*); static Local NewFromUnsigned(uint32_t value, Isolate*); int64_t Value() const; - V8_INLINE static Integer* Cast(v8::Value* obj); + V8_INLINE(static Integer* Cast(v8::Value* obj)); private: Integer(); static void CheckCast(v8::Value* obj); @@ -2137,7 +2140,7 @@ class V8_EXPORT Object : public Value { int InternalFieldCount(); /** Gets the value from an internal field. */ - V8_INLINE Local GetInternalField(int index); + V8_INLINE(Local GetInternalField(int index)); /** Sets the value in an internal field. */ void SetInternalField(int index, Handle value); @@ -2147,7 +2150,7 @@ class V8_EXPORT Object : public Value { * must have been set by SetAlignedPointerInInternalField, everything else * leads to undefined behavior. */ - V8_INLINE void* GetAlignedPointerFromInternalField(int index); + V8_INLINE(void* GetAlignedPointerFromInternalField(int index)); /** * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such @@ -2277,7 +2280,7 @@ class V8_EXPORT Object : public Value { Local CallAsConstructor(int argc, Handle argv[]); static Local New(); - V8_INLINE static Object* Cast(Value* obj); + V8_INLINE(static Object* Cast(Value* obj)); private: Object(); @@ -2306,7 +2309,7 @@ class V8_EXPORT Array : public Object { */ static Local New(int length = 0); - V8_INLINE static Array* Cast(Value* obj); + V8_INLINE(static Array* Cast(Value* obj)); private: Array(); static void CheckCast(Value* obj); @@ -2316,31 +2319,31 @@ class V8_EXPORT Array : public Object { template class ReturnValue { public: - template V8_INLINE ReturnValue(const ReturnValue& that) + template V8_INLINE(ReturnValue(const ReturnValue& that)) : value_(that.value_) { TYPE_CHECK(T, S); } // Handle setters - template V8_INLINE void Set(const Persistent& handle); - template V8_INLINE void Set(const Handle handle); + template V8_INLINE(void Set(const Persistent& handle)); + template V8_INLINE(void Set(const Handle handle)); // Fast primitive setters - V8_INLINE void Set(bool value); - V8_INLINE void Set(double i); - V8_INLINE void Set(int32_t i); - V8_INLINE void Set(uint32_t i); + V8_INLINE(void Set(bool value)); + V8_INLINE(void Set(double i)); + V8_INLINE(void Set(int32_t i)); + V8_INLINE(void Set(uint32_t i)); // Fast JS primitive setters - V8_INLINE void SetNull(); - V8_INLINE void SetUndefined(); - V8_INLINE void SetEmptyString(); + V8_INLINE(void SetNull()); + V8_INLINE(void SetUndefined()); + V8_INLINE(void SetEmptyString()); // Convenience getter for Isolate - V8_INLINE Isolate* GetIsolate(); + V8_INLINE(Isolate* GetIsolate()); private: template friend class ReturnValue; template friend class FunctionCallbackInfo; template friend class PropertyCallbackInfo; - V8_INLINE internal::Object* GetDefaultValue(); - V8_INLINE explicit ReturnValue(internal::Object** slot); + V8_INLINE(internal::Object* GetDefaultValue()); + V8_INLINE(explicit ReturnValue(internal::Object** slot)); internal::Object** value_; }; @@ -2354,15 +2357,15 @@ class ReturnValue { template class FunctionCallbackInfo { public: - V8_INLINE int Length() const; - V8_INLINE Local operator[](int i) const; - V8_INLINE Local Callee() const; - V8_INLINE Local This() const; - V8_INLINE Local Holder() const; - V8_INLINE bool IsConstructCall() const; - V8_INLINE Local Data() const; - V8_INLINE Isolate* GetIsolate() const; - V8_INLINE ReturnValue GetReturnValue() const; + V8_INLINE(int Length() const); + V8_INLINE(Local operator[](int i) const); + V8_INLINE(Local Callee() const); + V8_INLINE(Local This() const); + V8_INLINE(Local Holder() const); + V8_INLINE(bool IsConstructCall() const); + V8_INLINE(Local Data() const); + V8_INLINE(Isolate* GetIsolate() const); + V8_INLINE(ReturnValue GetReturnValue() const); // This shouldn't be public, but the arm compiler needs it. static const int kArgsLength = 6; @@ -2376,10 +2379,10 @@ class FunctionCallbackInfo { static const int kCalleeIndex = -4; static const int kHolderIndex = -5; - V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args, + V8_INLINE(FunctionCallbackInfo(internal::Object** implicit_args, internal::Object** values, int length, - bool is_construct_call); + bool is_construct_call)); internal::Object** implicit_args_; internal::Object** values_; int length_; @@ -2394,11 +2397,11 @@ class FunctionCallbackInfo { template class PropertyCallbackInfo { public: - V8_INLINE Isolate* GetIsolate() const; - V8_INLINE Local Data() const; - V8_INLINE Local This() const; - V8_INLINE Local Holder() const; - V8_INLINE ReturnValue GetReturnValue() const; + V8_INLINE(Isolate* GetIsolate() const); + V8_INLINE(Local Data() const); + V8_INLINE(Local This() const); + V8_INLINE(Local Holder() const); + V8_INLINE(ReturnValue GetReturnValue() const); // This shouldn't be public, but the arm compiler needs it. static const int kArgsLength = 6; @@ -2413,7 +2416,8 @@ class PropertyCallbackInfo { static const int kReturnValueDefaultValueIndex = -4; static const int kIsolateIndex = -5; - V8_INLINE PropertyCallbackInfo(internal::Object** args) : args_(args) {} + V8_INLINE(PropertyCallbackInfo(internal::Object** args)) + : args_(args) { } internal::Object** args_; }; @@ -2472,7 +2476,7 @@ class V8_EXPORT Function : public Object { int ScriptId() const; ScriptOrigin GetScriptOrigin() const; - V8_INLINE static Function* Cast(Value* obj); + V8_INLINE(static Function* Cast(Value* obj)); static const int kLineOffsetNotFound; private: @@ -2591,7 +2595,7 @@ class V8_EXPORT ArrayBuffer : public Object { */ Contents Externalize(); - V8_INLINE static ArrayBuffer* Cast(Value* obj); + V8_INLINE(static ArrayBuffer* Cast(Value* obj)); static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT; @@ -2632,7 +2636,7 @@ class V8_EXPORT ArrayBufferView : public Object { */ void* BaseAddress(); - V8_INLINE static ArrayBufferView* Cast(Value* obj); + V8_INLINE(static ArrayBufferView* Cast(Value* obj)); static const int kInternalFieldCount = V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT; @@ -2656,7 +2660,7 @@ class V8_EXPORT TypedArray : public ArrayBufferView { */ size_t Length(); - V8_INLINE static TypedArray* Cast(Value* obj); + V8_INLINE(static TypedArray* Cast(Value* obj)); private: TypedArray(); @@ -2672,7 +2676,7 @@ class V8_EXPORT Uint8Array : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Uint8Array* Cast(Value* obj); + V8_INLINE(static Uint8Array* Cast(Value* obj)); private: Uint8Array(); @@ -2688,7 +2692,7 @@ class V8_EXPORT Uint8ClampedArray : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Uint8ClampedArray* Cast(Value* obj); + V8_INLINE(static Uint8ClampedArray* Cast(Value* obj)); private: Uint8ClampedArray(); @@ -2703,7 +2707,7 @@ class V8_EXPORT Int8Array : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Int8Array* Cast(Value* obj); + V8_INLINE(static Int8Array* Cast(Value* obj)); private: Int8Array(); @@ -2719,7 +2723,7 @@ class V8_EXPORT Uint16Array : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Uint16Array* Cast(Value* obj); + V8_INLINE(static Uint16Array* Cast(Value* obj)); private: Uint16Array(); @@ -2735,7 +2739,7 @@ class V8_EXPORT Int16Array : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Int16Array* Cast(Value* obj); + V8_INLINE(static Int16Array* Cast(Value* obj)); private: Int16Array(); @@ -2751,7 +2755,7 @@ class V8_EXPORT Uint32Array : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Uint32Array* Cast(Value* obj); + V8_INLINE(static Uint32Array* Cast(Value* obj)); private: Uint32Array(); @@ -2767,7 +2771,7 @@ class V8_EXPORT Int32Array : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Int32Array* Cast(Value* obj); + V8_INLINE(static Int32Array* Cast(Value* obj)); private: Int32Array(); @@ -2783,7 +2787,7 @@ class V8_EXPORT Float32Array : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Float32Array* Cast(Value* obj); + V8_INLINE(static Float32Array* Cast(Value* obj)); private: Float32Array(); @@ -2799,7 +2803,7 @@ class V8_EXPORT Float64Array : public TypedArray { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static Float64Array* Cast(Value* obj); + V8_INLINE(static Float64Array* Cast(Value* obj)); private: Float64Array(); @@ -2815,7 +2819,7 @@ class V8_EXPORT DataView : public ArrayBufferView { public: static Local New(Handle array_buffer, size_t byte_offset, size_t length); - V8_INLINE static DataView* Cast(Value* obj); + V8_INLINE(static DataView* Cast(Value* obj)); private: DataView(); @@ -2840,7 +2844,7 @@ class V8_EXPORT Date : public Object { */ double ValueOf() const; - V8_INLINE static Date* Cast(v8::Value* obj); + V8_INLINE(static Date* Cast(v8::Value* obj)); /** * Notification that the embedder has changed the time zone, @@ -2877,7 +2881,7 @@ class V8_EXPORT NumberObject : public Object { */ double ValueOf() const; - V8_INLINE static NumberObject* Cast(v8::Value* obj); + V8_INLINE(static NumberObject* Cast(v8::Value* obj)); private: static void CheckCast(v8::Value* obj); @@ -2900,7 +2904,7 @@ class V8_EXPORT BooleanObject : public Object { */ bool ValueOf() const; - V8_INLINE static BooleanObject* Cast(v8::Value* obj); + V8_INLINE(static BooleanObject* Cast(v8::Value* obj)); private: static void CheckCast(v8::Value* obj); @@ -2923,7 +2927,7 @@ class V8_EXPORT StringObject : public Object { */ Local ValueOf() const; - V8_INLINE static StringObject* Cast(v8::Value* obj); + V8_INLINE(static StringObject* Cast(v8::Value* obj)); private: static void CheckCast(v8::Value* obj); @@ -2948,7 +2952,7 @@ class V8_EXPORT SymbolObject : public Object { */ Local ValueOf() const; - V8_INLINE static SymbolObject* Cast(v8::Value* obj); + V8_INLINE(static SymbolObject* Cast(v8::Value* obj)); private: static void CheckCast(v8::Value* obj); @@ -2994,7 +2998,7 @@ class V8_EXPORT RegExp : public Object { */ Flags GetFlags() const; - V8_INLINE static RegExp* Cast(v8::Value* obj); + V8_INLINE(static RegExp* Cast(v8::Value* obj)); private: static void CheckCast(v8::Value* obj); @@ -3008,7 +3012,7 @@ class V8_EXPORT RegExp : public Object { class V8_EXPORT External : public Value { public: static Local New(void* value); - V8_INLINE static External* Cast(Value* obj); + V8_INLINE(static External* Cast(Value* obj)); void* Value() const; private: static void CheckCast(v8::Value* obj); @@ -3026,7 +3030,7 @@ class V8_EXPORT Template : public Data { /** Adds a property to each instance created by this template.*/ void Set(Handle name, Handle value, PropertyAttribute attributes = None); - V8_INLINE void Set(const char* name, Handle value); + V8_INLINE(void Set(const char* name, Handle value)); void SetAccessorProperty( Local name, @@ -3703,7 +3707,7 @@ void V8_EXPORT RegisterExtension(Extension* extension); */ class V8_EXPORT DeclareExtension { public: - V8_INLINE DeclareExtension(Extension* extension) { + V8_INLINE(DeclareExtension(Extension* extension)) { RegisterExtension(extension); } }; @@ -3717,10 +3721,10 @@ Handle V8_EXPORT Null(); Handle V8_EXPORT True(); Handle V8_EXPORT False(); -V8_INLINE Handle Undefined(Isolate* isolate); -V8_INLINE Handle Null(Isolate* isolate); -V8_INLINE Handle True(Isolate* isolate); -V8_INLINE Handle False(Isolate* isolate); +V8_INLINE(Handle Undefined(Isolate* isolate)); +V8_INLINE(Handle Null(Isolate* isolate)); +V8_INLINE(Handle True(Isolate* isolate)); +V8_INLINE(Handle False(Isolate* isolate)); /** @@ -3978,13 +3982,13 @@ class V8_EXPORT Isolate { /** * Associate embedder-specific data with the isolate */ - V8_INLINE void SetData(void* data); + V8_INLINE(void SetData(void* data)); /** * Retrieve embedder-specific data from the isolate. * Returns NULL if SetData has never been called. */ - V8_INLINE void* GetData(); + V8_INLINE(void* GetData()); /** * Get statistics about the heap memory usage. @@ -4256,7 +4260,7 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT class V8_EXPORT AssertNoGCScope { #ifndef DEBUG // TODO(yangguo): remove isolate argument. - V8_INLINE AssertNoGCScope(Isolate* isolate) {} + V8_INLINE(AssertNoGCScope(Isolate* isolate)) { } #else AssertNoGCScope(Isolate* isolate); ~AssertNoGCScope(); @@ -4959,7 +4963,7 @@ class V8_EXPORT Context { * previous call to SetEmbedderData with the same index. Note that index 0 * currently has a special meaning for Chrome's debugger. */ - V8_INLINE Local GetEmbedderData(int index); + V8_INLINE(Local GetEmbedderData(int index)); /** * Sets the embedder data with the given index, growing the data as @@ -4974,7 +4978,7 @@ class V8_EXPORT Context { * SetAlignedPointerInEmbedderData with the same index. Note that index 0 * currently has a special meaning for Chrome's debugger. */ - V8_INLINE void* GetAlignedPointerFromEmbedderData(int index); + V8_INLINE(void* GetAlignedPointerFromEmbedderData(int index)); /** * Sets a 2-byte-aligned native pointer in the embedder data with the given @@ -5017,15 +5021,15 @@ class V8_EXPORT Context { */ class Scope { public: - explicit V8_INLINE Scope(Handle context) : context_(context) { + explicit V8_INLINE(Scope(Handle context)) : context_(context) { context_->Enter(); } // TODO(dcarney): deprecate - V8_INLINE Scope(Isolate* isolate, Persistent& context) // NOLINT + V8_INLINE(Scope(Isolate* isolate, Persistent& context)) // NOLINT : context_(Handle::New(isolate, context)) { context_->Enter(); } - V8_INLINE ~Scope() { context_->Exit(); } + V8_INLINE(~Scope()) { context_->Exit(); } private: Handle context_; @@ -5123,7 +5127,7 @@ class V8_EXPORT Unlocker { /** * Initialize Unlocker for a given Isolate. */ - V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); } + V8_INLINE(explicit Unlocker(Isolate* isolate)) { Initialize(isolate); } /** Deprecated. Use Isolate version instead. */ V8_DEPRECATED(Unlocker()); @@ -5141,7 +5145,7 @@ class V8_EXPORT Locker { /** * Initialize Locker for a given Isolate. */ - V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); } + V8_INLINE(explicit Locker(Isolate* isolate)) { Initialize(isolate); } /** Deprecated. Use Isolate version instead. */ V8_DEPRECATED(Locker()); @@ -5270,7 +5274,7 @@ const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1; template struct SmiTagging; template -V8_INLINE internal::Object* IntToSmi(int value) { +V8_INLINE(internal::Object* IntToSmi(int value)) { int smi_shift_bits = kSmiTagSize + kSmiShiftSize; intptr_t tagged_value = (static_cast(value) << smi_shift_bits) | kSmiTag; @@ -5281,15 +5285,15 @@ V8_INLINE internal::Object* IntToSmi(int value) { template <> struct SmiTagging<4> { static const int kSmiShiftSize = 0; static const int kSmiValueSize = 31; - V8_INLINE static int SmiToInt(internal::Object* value) { + V8_INLINE(static int SmiToInt(internal::Object* value)) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Throw away top 32 bits and shift down (requires >> to be sign extending). return static_cast(reinterpret_cast(value)) >> shift_bits; } - V8_INLINE static internal::Object* IntToSmi(int value) { + V8_INLINE(static internal::Object* IntToSmi(int value)) { return internal::IntToSmi(value); } - V8_INLINE static bool IsValidSmi(intptr_t value) { + V8_INLINE(static bool IsValidSmi(intptr_t value)) { // To be representable as an tagged small integer, the two // most-significant bits of 'value' must be either 00 or 11 due to // sign-extension. To check this we add 01 to the two @@ -5309,15 +5313,15 @@ template <> struct SmiTagging<4> { template <> struct SmiTagging<8> { static const int kSmiShiftSize = 31; static const int kSmiValueSize = 32; - V8_INLINE static int SmiToInt(internal::Object* value) { + V8_INLINE(static int SmiToInt(internal::Object* value)) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Shift down and throw away top 32 bits. return static_cast(reinterpret_cast(value) >> shift_bits); } - V8_INLINE static internal::Object* IntToSmi(int value) { + V8_INLINE(static internal::Object* IntToSmi(int value)) { return internal::IntToSmi(value); } - V8_INLINE static bool IsValidSmi(intptr_t value) { + V8_INLINE(static bool IsValidSmi(intptr_t value)) { // To be representable as a long smi, the value must be a 32-bit integer. return (value == static_cast(value)); } @@ -5326,8 +5330,8 @@ template <> struct SmiTagging<8> { typedef SmiTagging PlatformSmiTagging; const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; -V8_INLINE static bool SmiValuesAre31Bits() { return kSmiValueSize == 31; } -V8_INLINE static bool SmiValuesAre32Bits() { return kSmiValueSize == 32; } +V8_INLINE(static bool SmiValuesAre31Bits()) { return kSmiValueSize == 31; } +V8_INLINE(static bool SmiValuesAre32Bits()) { return kSmiValueSize == 32; } /** * This class exports constants and functionality from within v8 that @@ -5379,93 +5383,94 @@ class Internals { static const int kNullOddballKind = 3; static void CheckInitializedImpl(v8::Isolate* isolate); - V8_INLINE static void CheckInitialized(v8::Isolate* isolate) { + V8_INLINE(static void CheckInitialized(v8::Isolate* isolate)) { #ifdef V8_ENABLE_CHECKS CheckInitializedImpl(isolate); #endif } - V8_INLINE static bool HasHeapObjectTag(internal::Object* value) { + V8_INLINE(static bool HasHeapObjectTag(internal::Object* value)) { return ((reinterpret_cast(value) & kHeapObjectTagMask) == kHeapObjectTag); } - V8_INLINE static int SmiValue(internal::Object* value) { + V8_INLINE(static int SmiValue(internal::Object* value)) { return PlatformSmiTagging::SmiToInt(value); } - V8_INLINE static internal::Object* IntToSmi(int value) { + V8_INLINE(static internal::Object* IntToSmi(int value)) { return PlatformSmiTagging::IntToSmi(value); } - V8_INLINE static bool IsValidSmi(intptr_t value) { + V8_INLINE(static bool IsValidSmi(intptr_t value)) { return PlatformSmiTagging::IsValidSmi(value); } - V8_INLINE static int GetInstanceType(internal::Object* obj) { + V8_INLINE(static int GetInstanceType(internal::Object* obj)) { typedef internal::Object O; O* map = ReadField(obj, kHeapObjectMapOffset); return ReadField(map, kMapInstanceTypeOffset); } - V8_INLINE static int GetOddballKind(internal::Object* obj) { + V8_INLINE(static int GetOddballKind(internal::Object* obj)) { typedef internal::Object O; return SmiValue(ReadField(obj, kOddballKindOffset)); } - V8_INLINE static bool IsExternalTwoByteString(int instance_type) { + V8_INLINE(static bool IsExternalTwoByteString(int instance_type)) { int representation = (instance_type & kFullStringRepresentationMask); return representation == kExternalTwoByteRepresentationTag; } - V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) { + V8_INLINE(static uint8_t GetNodeFlag(internal::Object** obj, int shift)) { uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; return *addr & static_cast(1U << shift); } - V8_INLINE static void UpdateNodeFlag(internal::Object** obj, - bool value, int shift) { + V8_INLINE(static void UpdateNodeFlag(internal::Object** obj, + bool value, int shift)) { uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; uint8_t mask = static_cast(1 << shift); *addr = static_cast((*addr & ~mask) | (value << shift)); } - V8_INLINE static uint8_t GetNodeState(internal::Object** obj) { + V8_INLINE(static uint8_t GetNodeState(internal::Object** obj)) { uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; return *addr & kNodeStateMask; } - V8_INLINE static void UpdateNodeState(internal::Object** obj, - uint8_t value) { + V8_INLINE(static void UpdateNodeState(internal::Object** obj, + uint8_t value)) { uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; *addr = static_cast((*addr & ~kNodeStateMask) | value); } - V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, void* data) { + V8_INLINE(static void SetEmbedderData(v8::Isolate* isolate, void* data)) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateEmbedderDataOffset; *reinterpret_cast(addr) = data; } - V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate) { + V8_INLINE(static void* GetEmbedderData(v8::Isolate* isolate)) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateEmbedderDataOffset; return *reinterpret_cast(addr); } - V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate, - int index) { + V8_INLINE(static internal::Object** GetRoot(v8::Isolate* isolate, + int index)) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateRootsOffset; return reinterpret_cast(addr + index * kApiPointerSize); } - template V8_INLINE static T ReadField(Object* ptr, int offset) { + template + V8_INLINE(static T ReadField(Object* ptr, int offset)) { uint8_t* addr = reinterpret_cast(ptr) + offset - kHeapObjectTag; return *reinterpret_cast(addr); } template - V8_INLINE static T ReadEmbedderData(Context* context, int index) { + V8_INLINE(static T ReadEmbedderData(Context* context, int index)) { typedef internal::Object O; typedef internal::Internals I; O* ctx = *reinterpret_cast(context); @@ -5477,13 +5482,13 @@ class Internals { return I::ReadField(embedder_data, value_offset); } - V8_INLINE static bool CanCastToHeapObject(void* o) { return false; } - V8_INLINE static bool CanCastToHeapObject(Context* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(String* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(Object* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(Message* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(StackTrace* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(StackFrame* o) { return true; } + V8_INLINE(static bool CanCastToHeapObject(void* o)) { return false; } + V8_INLINE(static bool CanCastToHeapObject(Context* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(String* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(Object* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(Message* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(StackTrace* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(StackFrame* o)) { return true; } }; } // namespace internal diff --git a/include/v8config.h b/include/v8config.h index cfafc6f..0993a9f 100644 --- a/include/v8config.h +++ b/include/v8config.h @@ -187,7 +187,6 @@ // supported // V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported // V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported -// V8_HAS_ATTRIBUTE_PURE - __attribute__((pure)) supported // V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported // V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result)) // supported @@ -217,7 +216,6 @@ # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline)) # define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated)) # define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline)) -# define V8_HAS_ATTRIBUTE_PURE (__has_attribute(pure)) # define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility)) # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ (__has_attribute(warn_unused_result)) @@ -248,7 +246,6 @@ # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0)) # define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0)) # define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0)) -# define V8_HAS_ATTRIBUTE_PURE (V8_GNUC_PREREQ(2, 96, 0)) # define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0)) # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ (!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0)) @@ -298,27 +295,23 @@ // Helper macros // A macro used to make better inlining. Don't bother for debug builds. -// Use like: -// V8_INLINE int GetZero() { return 0; } #if !defined(DEBUG) && V8_HAS_ATTRIBUTE_ALWAYS_INLINE -# define V8_INLINE inline __attribute__((always_inline)) +# define V8_INLINE(declarator) inline __attribute__((always_inline)) declarator #elif !defined(DEBUG) && V8_HAS___FORCEINLINE -# define V8_INLINE __forceinline +# define V8_INLINE(declarator) __forceinline declarator #else -# define V8_INLINE inline +# define V8_INLINE(declarator) inline declarator #endif // A macro used to tell the compiler to never inline a particular function. // Don't bother for debug builds. -// Use like: -// V8_NOINLINE int GetMinusOne() { return -1; } #if !defined(DEBUG) && V8_HAS_ATTRIBUTE_NOINLINE -# define V8_NOINLINE __attribute__((noinline)) +# define V8_NOINLINE(declarator) __attribute__((noinline)) declarator #elif !defined(DEBUG) && V8_HAS_DECLSPEC_NOINLINE -# define V8_NOINLINE __declspec(noinline) +# define V8_NOINLINE(declarator) __declspec(noinline) declarator #else -# define V8_NOINLINE /* NOT SUPPORTED */ +# define V8_NOINLINE(declarator) declarator #endif @@ -332,28 +325,6 @@ #endif -// Many functions have no effects except the return value and their return value -// depends only on the parameters and/or global variables. Such a function can -// be subject to common subexpression elimination and loop optimization just as -// an arithmetic operator would be. These functions should be declared with the -// attribute V8_PURE. For example, -// -// int square (int) V8_PURE; -// -// says that the hypothetical function square is safe to call fewer times than -// the program says. -// -// Some of common examples of pure functions are strlen or memcmp. Interesting -// non-V8_PURE functions are functions with infinite loops or those depending -// on volatile memory or other system resource, that may change between two -// consecutive calls (such as feof in a multithreaded environment). -#if V8_HAS_ATTRIBUTE_PURE -# define V8_PURE __attribute__((pure)) -#else -# define V8_PURE /* NOT SUPPORTED */ -#endif - - // Annotate a function indicating the caller must examine the return value. // Use like: // int foo() V8_WARN_UNUSED_RESULT; diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index faf7b54..1bcf3e3 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -64,8 +64,7 @@ double fast_exp_simulator(double x) { UnaryMathFunction CreateExpFunction() { if (!FLAG_fast_math) return &exp; size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &exp; ExternalReference::InitializeMathExpData(); @@ -103,9 +102,7 @@ UnaryMathFunction CreateExpFunction() { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); #if !defined(USE_SIMULATOR) return FUNCTION_CAST(buffer); @@ -125,8 +122,7 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function( return stub; } size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return stub; MacroAssembler masm(NULL, buffer, static_cast(actual_size)); @@ -268,9 +264,7 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function( ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST(buffer); #endif } @@ -286,8 +280,7 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( return stub; } size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return stub; MacroAssembler masm(NULL, buffer, static_cast(actual_size)); @@ -359,9 +352,7 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( masm.GetCode(&desc); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST(buffer); #endif diff --git a/src/cpu.cc b/src/cpu.cc index 2bf51a7..26eca61 100644 --- a/src/cpu.cc +++ b/src/cpu.cc @@ -54,7 +54,7 @@ namespace internal { // Define __cpuid() for non-MSVC compilers. #if !V8_CC_MSVC -static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { +static V8_INLINE(void __cpuid(int cpu_info[4], int info_type)) { #if defined(__i386__) && defined(__pic__) // Make sure to preserve ebx, which contains the pointer // to the GOT in case we're generating PIC. diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc index 50ee05c..c979a53 100644 --- a/src/deoptimizer.cc +++ b/src/deoptimizer.cc @@ -42,8 +42,14 @@ namespace internal { static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(), - VirtualMemory::GetPageSize(), - VirtualMemory::EXECUTABLE, + OS::CommitPageSize(), +#if defined(__native_client__) + // The Native Client port of V8 uses an interpreter, + // so code pages don't need PROT_EXEC. + NOT_EXECUTABLE, +#else + EXECUTABLE, +#endif NULL); } @@ -122,7 +128,7 @@ static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB; size_t Deoptimizer::GetMaxDeoptTableSize() { int entries_size = Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_; - int commit_page_size = static_cast(VirtualMemory::GetPageSize()); + int commit_page_size = static_cast(OS::CommitPageSize()); int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) / commit_page_size) + 1; return static_cast(commit_page_size * page_count); diff --git a/src/globals.h b/src/globals.h index 1977e68..992f3a8 100644 --- a/src/globals.h +++ b/src/globals.h @@ -342,9 +342,9 @@ F FUNCTION_CAST(Address addr) { DISALLOW_COPY_AND_ASSIGN(TypeName) -// Newly written code should use V8_INLINE and V8_NOINLINE directly. -#define INLINE(declarator) V8_INLINE declarator -#define NO_INLINE(declarator) V8_NOINLINE declarator +// Newly written code should use V8_INLINE() and V8_NOINLINE() directly. +#define INLINE(declarator) V8_INLINE(declarator) +#define NO_INLINE(declarator) V8_NOINLINE(declarator) // Newly written code should use V8_WARN_UNUSED_RESULT. diff --git a/src/heap-inl.h b/src/heap-inl.h index 3892170..86aff1a 100644 --- a/src/heap-inl.h +++ b/src/heap-inl.h @@ -144,7 +144,7 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector str, // Allocate string. Object* result; { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize) - ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE) + ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) : old_data_space_->AllocateRaw(size); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -178,7 +178,7 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector str, // Allocate string. Object* result; { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize) - ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE) + ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) : old_data_space_->AllocateRaw(size); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -242,8 +242,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes, } else if (CODE_SPACE == space) { result = code_space_->AllocateRaw(size_in_bytes); } else if (LO_SPACE == space) { - result = lo_space_->AllocateRaw( - size_in_bytes, VirtualMemory::NOT_EXECUTABLE); + result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE); } else if (CELL_SPACE == space) { result = cell_space_->AllocateRaw(size_in_bytes); } else if (PROPERTY_CELL_SPACE == space) { diff --git a/src/heap.cc b/src/heap.cc index 83c8bad..0455a84 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -172,7 +172,8 @@ Heap::Heap() max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; #endif - intptr_t max_virtual = static_cast(VirtualMemory::GetLimit()); + intptr_t max_virtual = OS::MaxVirtualMemory(); + if (max_virtual > 0) { if (code_range_size_ > 0) { // Reserve no more than 1/8 of the memory for the code range. @@ -4150,7 +4151,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, HeapObject* result; bool force_lo_space = obj_size > code_space()->AreaSize(); if (force_lo_space) { - maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE); + maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); } else { maybe_result = code_space_->AllocateRaw(obj_size); } @@ -4162,7 +4163,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, // Discard the first code allocation, which was on a page where it could be // moved. CreateFillerObjectAt(result->address(), obj_size); - maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE); + maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); if (!maybe_result->To(&result)) return maybe_result; } @@ -4213,7 +4214,7 @@ MaybeObject* Heap::CopyCode(Code* code) { int obj_size = code->Size(); MaybeObject* maybe_result; if (obj_size > code_space()->AreaSize()) { - maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE); + maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); } else { maybe_result = code_space_->AllocateRaw(obj_size); } @@ -4256,8 +4257,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector reloc_info) { MaybeObject* maybe_result; if (new_obj_size > code_space()->AreaSize()) { - maybe_result = lo_space_->AllocateRaw( - new_obj_size, VirtualMemory::EXECUTABLE); + maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE); } else { maybe_result = code_space_->AllocateRaw(new_obj_size); } @@ -5370,7 +5370,7 @@ MaybeObject* Heap::AllocateInternalizedStringImpl( // Allocate string. Object* result; { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize) - ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE) + ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) : old_data_space_->AllocateRaw(size); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -5523,7 +5523,7 @@ MaybeObject* Heap::AllocateRawFixedArray(int length) { int size = FixedArray::SizeFor(length); return size <= Page::kMaxNonCodeHeapObjectSize ? new_space_.AllocateRaw(size) - : lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE); + : lo_space_->AllocateRaw(size, NOT_EXECUTABLE); } @@ -6878,7 +6878,7 @@ bool Heap::SetUp() { new OldSpace(this, max_old_generation_size_, OLD_POINTER_SPACE, - VirtualMemory::NOT_EXECUTABLE); + NOT_EXECUTABLE); if (old_pointer_space_ == NULL) return false; if (!old_pointer_space_->SetUp()) return false; @@ -6887,7 +6887,7 @@ bool Heap::SetUp() { new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE, - VirtualMemory::NOT_EXECUTABLE); + NOT_EXECUTABLE); if (old_data_space_ == NULL) return false; if (!old_data_space_->SetUp()) return false; @@ -6901,8 +6901,8 @@ bool Heap::SetUp() { } } - code_space_ = new OldSpace( - this, max_old_generation_size_, CODE_SPACE, VirtualMemory::EXECUTABLE); + code_space_ = + new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); if (code_space_ == NULL) return false; if (!code_space_->SetUp()) return false; @@ -7999,9 +7999,8 @@ void Heap::FreeQueuedChunks() { MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1); while (inner <= inner_last) { // Size of a large chunk is always a multiple of - // VirtualMemory::GetAllocationGranularity() so - // there is always enough space for a fake - // MemoryChunk header. + // OS::AllocateAlignment() so there is always + // enough space for a fake MemoryChunk header. Address area_end = Min(inner->address() + Page::kPageSize, chunk_end); // Guard against overflow. if (area_end < inner->address()) area_end = chunk_end; diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc index 0e4fe8c..84a4d23 100644 --- a/src/ia32/codegen-ia32.cc +++ b/src/ia32/codegen-ia32.cc @@ -60,8 +60,9 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { size_t actual_size; // Allocate buffer in executable space. - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, + &actual_size, + true)); if (buffer == NULL) { // Fallback to library function if function cannot be created. switch (type) { @@ -96,9 +97,7 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST(buffer); } @@ -107,8 +106,7 @@ UnaryMathFunction CreateExpFunction() { if (!CpuFeatures::IsSupported(SSE2)) return &exp; if (!FLAG_fast_math) return &exp; size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &exp; ExternalReference::InitializeMathExpData(); @@ -137,9 +135,7 @@ UnaryMathFunction CreateExpFunction() { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST(buffer); } @@ -147,8 +143,9 @@ UnaryMathFunction CreateExpFunction() { UnaryMathFunction CreateSqrtFunction() { size_t actual_size; // Allocate buffer in executable space. - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, + &actual_size, + true)); // If SSE2 is not available, we can use libc's implementation to ensure // consistency since code by fullcodegen's calls into runtime in that case. if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt; @@ -171,9 +168,7 @@ UnaryMathFunction CreateSqrtFunction() { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST(buffer); } @@ -267,8 +262,7 @@ class LabelConverter { OS::MemMoveFunction CreateMemMoveFunction() { size_t actual_size; // Allocate buffer in executable space. - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return NULL; MacroAssembler masm(NULL, buffer, static_cast(actual_size)); LabelConverter conv(buffer); @@ -645,9 +639,7 @@ OS::MemMoveFunction CreateMemMoveFunction() { masm.GetCode(&desc); ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); // TODO(jkummerow): It would be nice to register this code creation event // with the PROFILE / GDBJIT system. return FUNCTION_CAST(buffer); diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc index 4d46b66..df0f14a 100644 --- a/src/incremental-marking.cc +++ b/src/incremental-marking.cc @@ -558,7 +558,7 @@ void IncrementalMarking::EnsureMarkingDequeIsCommitted() { bool success = marking_deque_memory_->Commit( reinterpret_cast
(marking_deque_memory_->address()), marking_deque_memory_->size(), - VirtualMemory::NOT_EXECUTABLE); + false); // Not executable. CHECK(success); marking_deque_memory_committed_ = true; } diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc index e9148ce..5c847fc 100644 --- a/src/mips/codegen-mips.cc +++ b/src/mips/codegen-mips.cc @@ -64,8 +64,7 @@ double fast_exp_simulator(double x) { UnaryMathFunction CreateExpFunction() { if (!FLAG_fast_math) return &exp; size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &exp; ExternalReference::InitializeMathExpData(); @@ -103,9 +102,7 @@ UnaryMathFunction CreateExpFunction() { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); #if !defined(USE_SIMULATOR) return FUNCTION_CAST(buffer); diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc index 081f195..4d3b1e3 100644 --- a/src/platform-cygwin.cc +++ b/src/platform-cygwin.cc @@ -73,6 +73,21 @@ double OS::LocalTimeOffset() { } +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mbase == MAP_FAILED) { + LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + return mbase; +} + + void OS::DumpBacktrace() { // Currently unsupported. } @@ -208,8 +223,7 @@ static void* GetRandomAddr() { // CpuFeatures::Probe. We don't care about randomization in this case because // the code page is immediately freed. if (isolate != NULL) { - // The address range used to randomize RWX allocations in - // VirtualMemory::AllocateRegion(). + // The address range used to randomize RWX allocations in OS::Allocate // Try not to map pages into the default range that windows loads DLLs // Use a multiple of 64k to prevent committing unused memory. // Note: This does not guarantee RWX regions will be within the @@ -230,4 +244,126 @@ static void* GetRandomAddr() { return NULL; } + +static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { + LPVOID base = NULL; + + if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { + // For exectutable pages try and randomize the allocation address + for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { + base = VirtualAlloc(GetRandomAddr(), size, action, protection); + } + } + + // After three attempts give up and let the OS find an address to use. + if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); + + return base; +} + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* address = ReserveRegion(request_size); + if (address == NULL) return; + Address base = RoundUp(static_cast
(address), alignment); + // Try reducing the size by freeing and then reallocating a specific area. + bool result = ReleaseRegion(address, request_size); + USE(result); + ASSERT(result); + address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); + if (address != NULL) { + request_size = size; + ASSERT(base == static_cast
(address)); + } else { + // Resizing failed, just go with a bigger area. + address = ReserveRegion(request_size); + if (address == NULL) return; + } + address_ = address; + size_ = request_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address_, size_); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + ASSERT(IsReserved()); + return UncommitRegion(address, size); +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { + return false; + } + return true; +} + + +bool VirtualMemory::Guard(void* address) { + if (NULL == VirtualAlloc(address, + OS::CommitPageSize(), + MEM_COMMIT, + PAGE_NOACCESS)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return VirtualFree(base, size, MEM_DECOMMIT) != 0; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return VirtualFree(base, 0, MEM_RELEASE) != 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + } } // namespace v8::internal diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc index ff914c7..d818278 100644 --- a/src/platform-freebsd.cc +++ b/src/platform-freebsd.cc @@ -81,6 +81,22 @@ double OS::LocalTimeOffset() { } +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool executable) { + const size_t msize = RoundUp(requested, getpagesize()); + int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); + + if (mbase == MAP_FAILED) { + LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + return mbase; +} + + void OS::DumpBacktrace() { POSIXBacktraceHelper::DumpBacktrace(); } @@ -187,4 +203,141 @@ int OS::StackWalk(Vector frames) { return POSIXBacktraceHelper::StackWalk(frames); } + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast
(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + } } // namespace v8::internal diff --git a/src/platform-linux.cc b/src/platform-linux.cc index 8296b6c..b8b9602 100644 --- a/src/platform-linux.cc +++ b/src/platform-linux.cc @@ -137,6 +137,23 @@ double OS::LocalTimeOffset() { } +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, AllocateAlignment()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* addr = OS::GetRandomMmapAddr(); + void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mbase == MAP_FAILED) { + LOG(i::Isolate::Current(), + StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + return mbase; +} + + void OS::DumpBacktrace() { // backtrace is a glibc extension. #if defined(__GLIBC__) && !defined(__UCLIBC__) @@ -167,16 +184,12 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { int size = ftell(file); void* memory = - mmap(NULL, + mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - if (memory == MAP_FAILED) { - fclose(file); - return NULL; - } return new PosixMemoryMappedFile(file, memory, size); } @@ -191,24 +204,18 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, return NULL; } void* memory = - mmap(NULL, + mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - if (memory == MAP_FAILED) { - fclose(file); - return NULL; - } return new PosixMemoryMappedFile(file, memory, size); } PosixMemoryMappedFile::~PosixMemoryMappedFile() { - int result = munmap(memory_, size_); - ASSERT_EQ(0, result); - USE(result); + if (memory_) OS::Free(memory_, size_); fclose(file_); } @@ -288,7 +295,7 @@ void OS::SignalCodeMovingGC() { OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap); OS::Abort(); } - void* addr = mmap(NULL, + void* addr = mmap(OS::GetRandomMmapAddr(), size, #if defined(__native_client__) // The Native Client port of V8 uses an interpreter, @@ -301,9 +308,7 @@ void OS::SignalCodeMovingGC() { fileno(f), 0); ASSERT(addr != MAP_FAILED); - int result = munmap(addr, size); - ASSERT_EQ(0, result); - USE(result); + OS::Free(addr, size); fclose(f); } @@ -317,4 +322,147 @@ int OS::StackWalk(Vector frames) { #endif } + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast
(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { +#if defined(__native_client__) + // The Native Client port of V8 uses an interpreter, + // so code pages don't need PROT_EXEC. + int prot = PROT_READ | PROT_WRITE; +#else + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); +#endif + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + return true; +} + } } // namespace v8::internal diff --git a/src/platform-macos.cc b/src/platform-macos.cc index 7a2249c..67cc96f 100644 --- a/src/platform-macos.cc +++ b/src/platform-macos.cc @@ -79,6 +79,34 @@ namespace v8 { namespace internal { +// Constants used for mmap. +// kMmapFd is used to pass vm_alloc flags to tag the region with the user +// defined tag 255 This helps identify V8-allocated regions in memory analysis +// tools like vmmap(1). +static const int kMmapFd = VM_MAKE_TAG(255); +static const off_t kMmapFdOffset = 0; + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, getpagesize()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* mbase = mmap(OS::GetRandomMmapAddr(), + msize, + prot, + MAP_PRIVATE | MAP_ANON, + kMmapFd, + kMmapFdOffset); + if (mbase == MAP_FAILED) { + LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + return mbase; +} + + void OS::DumpBacktrace() { // If weak link to execinfo lib has failed, ie because we are on 10.4, abort. if (backtrace == NULL) return; @@ -109,7 +137,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { int size = ftell(file); void* memory = - mmap(NULL, + mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE, MAP_SHARED, @@ -129,7 +157,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, return NULL; } void* memory = - mmap(NULL, + mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE, MAP_SHARED, @@ -140,7 +168,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) munmap(memory_, size_); + if (memory_) OS::Free(memory_, size_); fclose(file_); } @@ -199,4 +227,137 @@ int OS::StackWalk(Vector frames) { return POSIXBacktraceHelper::StackWalk(frames); } + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast
(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* address, + size_t size, + bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(address, + size, + prot, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* address, size_t size) { + return mmap(address, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* address, size_t size) { + return munmap(address, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + return false; +} + } } // namespace v8::internal diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc index 78654b6..30a484f 100644 --- a/src/platform-openbsd.cc +++ b/src/platform-openbsd.cc @@ -79,6 +79,23 @@ double OS::LocalTimeOffset() { } +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, AllocateAlignment()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* addr = OS::GetRandomMmapAddr(); + void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mbase == MAP_FAILED) { + LOG(i::Isolate::Current(), + StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + return mbase; +} + + void OS::DumpBacktrace() { // Currently unsupported. } @@ -242,4 +259,141 @@ int OS::StackWalk(Vector frames) { return frames_count; } + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast
(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + } } // namespace v8::internal diff --git a/src/platform-posix.cc b/src/platform-posix.cc index fce34ba..fe27eaf 100644 --- a/src/platform-posix.cc +++ b/src/platform-posix.cc @@ -91,6 +91,17 @@ uint64_t OS::CpuFeaturesImpliedByPlatform() { } +// Maximum size of the virtual memory. 0 means there is no artificial +// limit. + +intptr_t OS::MaxVirtualMemory() { + struct rlimit limit; + int result = getrlimit(RLIMIT_DATA, &limit); + if (result != 0) return 0; + return limit.rlim_cur; +} + + int OS::ActivationFrameAlignment() { #if V8_TARGET_ARCH_ARM // On EABI ARM targets this is required for fp correctness in the @@ -109,6 +120,97 @@ int OS::ActivationFrameAlignment() { } +intptr_t OS::CommitPageSize() { + static intptr_t page_size = getpagesize(); + return page_size; +} + + +void OS::Free(void* address, const size_t size) { + // TODO(1240712): munmap has a return value which is ignored here. + int result = munmap(address, size); + USE(result); + ASSERT(result == 0); +} + + +// Get rid of writable permission on code allocations. +void OS::ProtectCode(void* address, const size_t size) { +#if defined(__CYGWIN__) + DWORD old_protect; + VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); +#elif defined(__native_client__) + // The Native Client port of V8 uses an interpreter, so + // code pages don't need PROT_EXEC. + mprotect(address, size, PROT_READ); +#else + mprotect(address, size, PROT_READ | PROT_EXEC); +#endif +} + + +// Create guard pages. +void OS::Guard(void* address, const size_t size) { +#if defined(__CYGWIN__) + DWORD oldprotect; + VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); +#else + mprotect(address, size, PROT_NONE); +#endif +} + + +void* OS::GetRandomMmapAddr() { +#if defined(__native_client__) + // TODO(bradchen): restore randomization once Native Client gets + // smarter about using mmap address hints. + // See http://code.google.com/p/nativeclient/issues/3341 + return NULL; +#endif + Isolate* isolate = Isolate::UncheckedCurrent(); + // Note that the current isolate isn't set up in a call path via + // CpuFeatures::Probe. We don't care about randomization in this case because + // the code page is immediately freed. + if (isolate != NULL) { + uintptr_t raw_addr; + isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr)); +#if V8_TARGET_ARCH_X64 + // Currently available CPUs have 48 bits of virtual addressing. Truncate + // the hint address to 46 bits to give the kernel a fighting chance of + // fulfilling our placement request. + raw_addr &= V8_UINT64_C(0x3ffffffff000); +#else + raw_addr &= 0x3ffff000; + +# ifdef __sun + // For our Solaris/illumos mmap hint, we pick a random address in the bottom + // half of the top half of the address space (that is, the third quarter). + // Because we do not MAP_FIXED, this will be treated only as a hint -- the + // system will not fail to mmap() because something else happens to already + // be mapped at our random address. We deliberately set the hint high enough + // to get well above the system's break (that is, the heap); Solaris and + // illumos will try the hint and if that fails allocate as if there were + // no hint at all. The high hint prevents the break from getting hemmed in + // at low values, ceding half of the address space to the system heap. + raw_addr += 0x80000000; +# else + // The range 0x20000000 - 0x60000000 is relatively unpopulated across a + // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos + // 10.6 and 10.7. + raw_addr += 0x20000000; +# endif +#endif + return reinterpret_cast(raw_addr); + } + return NULL; +} + + +size_t OS::AllocateAlignment() { + return getpagesize(); +} + + void OS::Sleep(int milliseconds) { useconds_t ms = static_cast(milliseconds); usleep(1000 * ms); diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc index fbb61a9..f082af1 100644 --- a/src/platform-solaris.cc +++ b/src/platform-solaris.cc @@ -96,6 +96,22 @@ double OS::LocalTimeOffset() { } +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, getpagesize()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); + + if (mbase == MAP_FAILED) { + LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + return mbase; +} + + void OS::DumpBacktrace() { // Currently unsupported. } @@ -208,4 +224,141 @@ int OS::StackWalk(Vector frames) { return walker.index; } + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast
(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + } } // namespace v8::internal diff --git a/src/platform-win32.cc b/src/platform-win32.cc index 8f188a0..ea4f7ea 100644 --- a/src/platform-win32.cc +++ b/src/platform-win32.cc @@ -69,6 +69,11 @@ int strncasecmp(const char* s1, const char* s2, int n) { #define _TRUNCATE 0 #define STRUNCATE 80 +inline void MemoryBarrier() { + int barrier = 0; + __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier)); +} + #endif // __MINGW64_VERSION_MAJOR @@ -123,6 +128,11 @@ int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) { namespace v8 { namespace internal { +intptr_t OS::MaxVirtualMemory() { + return 0; +} + + double ceiling(double x) { return ceil(x); } @@ -733,6 +743,127 @@ void OS::StrNCpy(Vector dest, const char* src, size_t n) { #undef STRUNCATE +// Get the system's page size used by VirtualAlloc() or the next power +// of two. The reason for always returning a power of two is that the +// rounding up in OS::Allocate expects that. +static size_t GetPageSize() { + static size_t page_size = 0; + if (page_size == 0) { + SYSTEM_INFO info; + GetSystemInfo(&info); + page_size = RoundUpToPowerOf2(info.dwPageSize); + } + return page_size; +} + + +// The allocation alignment is the guaranteed alignment for +// VirtualAlloc'ed blocks of memory. +size_t OS::AllocateAlignment() { + static size_t allocate_alignment = 0; + if (allocate_alignment == 0) { + SYSTEM_INFO info; + GetSystemInfo(&info); + allocate_alignment = info.dwAllocationGranularity; + } + return allocate_alignment; +} + + +void* OS::GetRandomMmapAddr() { + Isolate* isolate = Isolate::UncheckedCurrent(); + // Note that the current isolate isn't set up in a call path via + // CpuFeatures::Probe. We don't care about randomization in this case because + // the code page is immediately freed. + if (isolate != NULL) { + // The address range used to randomize RWX allocations in OS::Allocate + // Try not to map pages into the default range that windows loads DLLs + // Use a multiple of 64k to prevent committing unused memory. + // Note: This does not guarantee RWX regions will be within the + // range kAllocationRandomAddressMin to kAllocationRandomAddressMax +#ifdef V8_HOST_ARCH_64_BIT + static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; + static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; +#else + static const intptr_t kAllocationRandomAddressMin = 0x04000000; + static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; +#endif + uintptr_t address = + (isolate->random_number_generator()->NextInt() << kPageSizeBits) | + kAllocationRandomAddressMin; + address &= kAllocationRandomAddressMax; + return reinterpret_cast(address); + } + return NULL; +} + + +static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { + LPVOID base = NULL; + + if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { + // For exectutable pages try and randomize the allocation address + for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { + base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection); + } + } + + // After three attempts give up and let the OS find an address to use. + if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); + + return base; +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + // VirtualAlloc rounds allocated size to page size automatically. + size_t msize = RoundUp(requested, static_cast(GetPageSize())); + + // Windows XP SP2 allows Data Excution Prevention (DEP). + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + + LPVOID mbase = RandomizedVirtualAlloc(msize, + MEM_COMMIT | MEM_RESERVE, + prot); + + if (mbase == NULL) { + LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed")); + return NULL; + } + + ASSERT(IsAligned(reinterpret_cast(mbase), OS::AllocateAlignment())); + + *allocated = msize; + return mbase; +} + + +void OS::Free(void* address, const size_t size) { + // TODO(1240712): VirtualFree has a return value which is ignored here. + VirtualFree(address, 0, MEM_RELEASE); + USE(size); +} + + +intptr_t OS::CommitPageSize() { + return 4096; +} + + +void OS::ProtectCode(void* address, const size_t size) { + DWORD old_protect; + VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); +} + + +void OS::Guard(void* address, const size_t size) { + DWORD oldprotect; + VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); +} + + void OS::Sleep(int milliseconds) { ::Sleep(milliseconds); } @@ -1237,6 +1368,111 @@ int OS::ActivationFrameAlignment() { } +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* address = ReserveRegion(request_size); + if (address == NULL) return; + Address base = RoundUp(static_cast
(address), alignment); + // Try reducing the size by freeing and then reallocating a specific area. + bool result = ReleaseRegion(address, request_size); + USE(result); + ASSERT(result); + address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); + if (address != NULL) { + request_size = size; + ASSERT(base == static_cast
(address)); + } else { + // Resizing failed, just go with a bigger area. + address = ReserveRegion(request_size); + if (address == NULL) return; + } + address_ = address; + size_ = request_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + ASSERT(IsReserved()); + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + if (NULL == VirtualAlloc(address, + OS::CommitPageSize(), + MEM_COMMIT, + PAGE_NOACCESS)) { + return false; + } + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return VirtualFree(base, size, MEM_DECOMMIT) != 0; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return VirtualFree(base, 0, MEM_RELEASE) != 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + + // ---------------------------------------------------------------------------- // Win32 thread support. diff --git a/src/platform.h b/src/platform.h index 848966e..ee8fb92 100644 --- a/src/platform.h +++ b/src/platform.h @@ -219,6 +219,30 @@ class OS { static void PrintError(const char* format, ...); static void VPrintError(const char* format, va_list args); + // Allocate/Free memory used by JS heap. Pages are readable/writable, but + // they are not guaranteed to be executable unless 'executable' is true. + // Returns the address of allocated memory, or NULL if failed. + static void* Allocate(const size_t requested, + size_t* allocated, + bool is_executable); + static void Free(void* address, const size_t size); + + // This is the granularity at which the ProtectCode(...) call can set page + // permissions. + static intptr_t CommitPageSize(); + + // Mark code segments non-writable. + static void ProtectCode(void* address, const size_t size); + + // Assign memory as a guard page so that access will cause an exception. + static void Guard(void* address, const size_t size); + + // Generate a random address to be used for hinting mmap(). + static void* GetRandomMmapAddr(); + + // Get the Alignment guaranteed by Allocate(). + static size_t AllocateAlignment(); + // Sleep for a number of milliseconds. static void Sleep(const int milliseconds); @@ -279,6 +303,10 @@ class OS { // positions indicated by the members of the CpuFeature enum from globals.h static uint64_t CpuFeaturesImpliedByPlatform(); + // Maximum size of the virtual memory. 0 means there is no artificial + // limit. + static intptr_t MaxVirtualMemory(); + // Returns the double constant NAN static double nan_value(); @@ -358,6 +386,99 @@ class OS { DISALLOW_IMPLICIT_CONSTRUCTORS(OS); }; +// Represents and controls an area of reserved memory. +// Control of the reserved memory can be assigned to another VirtualMemory +// object by assignment or copy-contructing. This removes the reserved memory +// from the original object. +class VirtualMemory { + public: + // Empty VirtualMemory object, controlling no reserved memory. + VirtualMemory(); + + // Reserves virtual memory with size. + explicit VirtualMemory(size_t size); + + // Reserves virtual memory containing an area of the given size that + // is aligned per alignment. This may not be at the position returned + // by address(). + VirtualMemory(size_t size, size_t alignment); + + // Releases the reserved memory, if any, controlled by this VirtualMemory + // object. + ~VirtualMemory(); + + // Returns whether the memory has been reserved. + bool IsReserved(); + + // Initialize or resets an embedded VirtualMemory object. + void Reset(); + + // Returns the start address of the reserved memory. + // If the memory was reserved with an alignment, this address is not + // necessarily aligned. The user might need to round it up to a multiple of + // the alignment to get the start of the aligned block. + void* address() { + ASSERT(IsReserved()); + return address_; + } + + // Returns the size of the reserved memory. The returned value is only + // meaningful when IsReserved() returns true. + // If the memory was reserved with an alignment, this size may be larger + // than the requested size. + size_t size() { return size_; } + + // Commits real memory. Returns whether the operation succeeded. + bool Commit(void* address, size_t size, bool is_executable); + + // Uncommit real memory. Returns whether the operation succeeded. + bool Uncommit(void* address, size_t size); + + // Creates a single guard page at the given address. + bool Guard(void* address); + + void Release() { + ASSERT(IsReserved()); + // Notice: Order is important here. The VirtualMemory object might live + // inside the allocated region. + void* address = address_; + size_t size = size_; + Reset(); + bool result = ReleaseRegion(address, size); + USE(result); + ASSERT(result); + } + + // Assign control of the reserved region to a different VirtualMemory object. + // The old object is no longer functional (IsReserved() returns false). + void TakeControl(VirtualMemory* from) { + ASSERT(!IsReserved()); + address_ = from->address_; + size_ = from->size_; + from->Reset(); + } + + static void* ReserveRegion(size_t size); + + static bool CommitRegion(void* base, size_t size, bool is_executable); + + static bool UncommitRegion(void* base, size_t size); + + // Must be called with a base pointer that has been returned by ReserveRegion + // and the same size it was reserved with. + static bool ReleaseRegion(void* base, size_t size); + + // Returns true if OS performs lazy commits, i.e. the memory allocation call + // defers actual physical memory allocation till the first memory access. + // Otherwise returns false. + static bool HasLazyCommits(); + + private: + void* address_; // Start address of the virtual memory. + size_t size_; // Size of the virtual memory. +}; + + // ---------------------------------------------------------------------------- // Thread // diff --git a/src/platform/elapsed-timer.h b/src/platform/elapsed-timer.h index 2311db2..8044bd0 100644 --- a/src/platform/elapsed-timer.h +++ b/src/platform/elapsed-timer.h @@ -103,7 +103,7 @@ class ElapsedTimer V8_FINAL BASE_EMBEDDED { } private: - static V8_INLINE TimeTicks Now() { + V8_INLINE(static TimeTicks Now()) { TimeTicks now = TimeTicks::HighResNow(); ASSERT(!now.IsNull()); return now; diff --git a/src/platform/mutex.cc b/src/platform/mutex.cc index ad97740..1a7c69a 100644 --- a/src/platform/mutex.cc +++ b/src/platform/mutex.cc @@ -34,7 +34,7 @@ namespace internal { #if V8_OS_POSIX -static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) { +static V8_INLINE(void InitializeNativeHandle(pthread_mutex_t* mutex)) { int result; #if defined(DEBUG) // Use an error checking mutex in debug mode. @@ -55,7 +55,7 @@ static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) { } -static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) { +static V8_INLINE(void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex)) { pthread_mutexattr_t attr; int result = pthread_mutexattr_init(&attr); ASSERT_EQ(0, result); @@ -69,28 +69,28 @@ static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) { } -static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) { +static V8_INLINE(void DestroyNativeHandle(pthread_mutex_t* mutex)) { int result = pthread_mutex_destroy(mutex); ASSERT_EQ(0, result); USE(result); } -static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) { +static V8_INLINE(void LockNativeHandle(pthread_mutex_t* mutex)) { int result = pthread_mutex_lock(mutex); ASSERT_EQ(0, result); USE(result); } -static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) { +static V8_INLINE(void UnlockNativeHandle(pthread_mutex_t* mutex)) { int result = pthread_mutex_unlock(mutex); ASSERT_EQ(0, result); USE(result); } -static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) { +static V8_INLINE(bool TryLockNativeHandle(pthread_mutex_t* mutex)) { int result = pthread_mutex_trylock(mutex); if (result == EBUSY) { return false; @@ -101,32 +101,32 @@ static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) { #elif V8_OS_WIN -static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) { +static V8_INLINE(void InitializeNativeHandle(PCRITICAL_SECTION cs)) { InitializeCriticalSection(cs); } -static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) { +static V8_INLINE(void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs)) { InitializeCriticalSection(cs); } -static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) { +static V8_INLINE(void DestroyNativeHandle(PCRITICAL_SECTION cs)) { DeleteCriticalSection(cs); } -static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) { +static V8_INLINE(void LockNativeHandle(PCRITICAL_SECTION cs)) { EnterCriticalSection(cs); } -static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) { +static V8_INLINE(void UnlockNativeHandle(PCRITICAL_SECTION cs)) { LeaveCriticalSection(cs); } -static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) { +static V8_INLINE(bool TryLockNativeHandle(PCRITICAL_SECTION cs)) { return TryEnterCriticalSection(cs); } diff --git a/src/platform/mutex.h b/src/platform/mutex.h index 0f899ca..f08de4f 100644 --- a/src/platform/mutex.h +++ b/src/platform/mutex.h @@ -94,14 +94,14 @@ class Mutex V8_FINAL { int level_; #endif - V8_INLINE void AssertHeldAndUnmark() { + V8_INLINE(void AssertHeldAndUnmark()) { #ifdef DEBUG ASSERT_EQ(1, level_); level_--; #endif } - V8_INLINE void AssertUnheldAndMark() { + V8_INLINE(void AssertUnheldAndMark()) { #ifdef DEBUG ASSERT_EQ(0, level_); level_++; diff --git a/src/platform/socket.h b/src/platform/socket.h index ff8c1de..e9e2fa2 100644 --- a/src/platform/socket.h +++ b/src/platform/socket.h @@ -66,7 +66,7 @@ class Socket V8_FINAL { // Set the value of the SO_REUSEADDR socket option. bool SetReuseAddress(bool reuse_address); - V8_INLINE bool IsValid() const { + V8_INLINE(bool IsValid()) const { return native_handle_ != kInvalidNativeHandle; } diff --git a/src/platform/virtual-memory.cc b/src/platform/virtual-memory.cc deleted file mode 100644 index f72bc90..0000000 --- a/src/platform/virtual-memory.cc +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "platform/virtual-memory.h" - -#if V8_OS_POSIX -#include -#include -#include -#include - -#include -#endif - -#if V8_OS_MACOSX -#include -#endif - -#include - -#include "platform/mutex.h" -#include "utils.h" -#include "utils/random-number-generator.h" -#if V8_OS_CYGIN || V8_OS_WIN -#include "win32-headers.h" -#endif - -namespace v8 { -namespace internal { - -class RandomAddressGenerator V8_FINAL { - public: - V8_INLINE uintptr_t NextAddress() { - LockGuard lock_guard(&mutex_); - uintptr_t address = rng_.NextInt(); -#if V8_HOST_ARCH_64_BIT - address = (address << 32) + static_cast(rng_.NextInt()); -#endif - return address; - } - - private: - Mutex mutex_; - RandomNumberGenerator rng_; -}; - -typedef LazyInstance, - ThreadSafeInitOnceTrait>::type LazyRandomAddressGenerator; - -#define LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER LAZY_INSTANCE_INITIALIZER - - -static V8_INLINE void* GenerateRandomAddress() { -#if V8_OS_NACL - // TODO(bradchen): Restore randomization once Native Client gets smarter - // about using mmap address hints. - // See http://code.google.com/p/nativeclient/issues/3341 - return NULL; -#else // V8_OS_NACL - LazyRandomAddressGenerator random_address_generator = - LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER; - uintptr_t address = random_address_generator.Pointer()->NextAddress(); - -# if V8_TARGET_ARCH_X64 -# if V8_OS_CYGWIN || V8_OS_WIN - // Try not to map pages into the default range that windows loads DLLs. - // Use a multiple of 64KiB to prevent committing unused memory. - address += V8_UINT64_C(0x00080000000); - address &= V8_UINT64_C(0x3ffffff0000); -# else // V8_OS_CYGWIN || V8_OS_WIN - // Currently available CPUs have 48 bits of virtual addressing. Truncate - // the hint address to 46 bits to give the kernel a fighting chance of - // fulfilling our placement request. - address &= V8_UINT64_C(0x3ffffffff000); -# endif // V8_OS_CYGWIN || V8_OS_WIN -# else // V8_TARGET_ARCH_X64 -# if V8_OS_CYGWIN || V8_OS_WIN - // Try not to map pages into the default range that windows loads DLLs. - // Use a multiple of 64KiB to prevent committing unused memory. - address += 0x04000000; - address &= 0x3fff0000; -# elif V8_OS_SOLARIS - // For our Solaris/illumos mmap hint, we pick a random address in the bottom - // half of the top half of the address space (that is, the third quarter). - // Because we do not MAP_FIXED, this will be treated only as a hint -- the - // system will not fail to mmap() because something else happens to already - // be mapped at our random address. We deliberately set the hint high enough - // to get well above the system's break (that is, the heap); Solaris and - // illumos will try the hint and if that fails allocate as if there were - // no hint at all. The high hint prevents the break from getting hemmed in - // at low values, ceding half of the address space to the system heap. - address &= 0x3ffff000; - address += 0x80000000; -# else // V8_OS_CYGWIN || V8_OS_WIN - // The range 0x20000000 - 0x60000000 is relatively unpopulated across a - // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on Mac OS X - // 10.6 and 10.7. - address &= 0x3ffff000; - address += 0x20000000; -# endif // V8_OS_CYGIN || V8_OS_WIN -# endif // V8_TARGET_ARCH_X64 - return reinterpret_cast(address); -#endif // V8_OS_NACL -} - - -// static -void* VirtualMemory::AllocateRegion(size_t size, - size_t* size_return, - Executability executability) { - ASSERT_LT(0, size); - ASSERT_NE(NULL, size_return); - void* address = ReserveRegion(size, &size); - if (address == NULL) return NULL; - if (!CommitRegion(address, size, executability)) { - bool result = ReleaseRegion(address, size); - ASSERT(result); - USE(result); - return NULL; - } - *size_return = size; - return address; -} - -#if V8_OS_CYGWIN || V8_OS_WIN - -// static -void* VirtualMemory::ReserveRegion(size_t size, size_t* size_return) { - ASSERT_LT(0, size); - ASSERT_NE(NULL, size_return); - // The minimum size that can be reserved is 64KiB, see - // http://msdn.microsoft.com/en-us/library/ms810627.aspx - if (size < 64 * KB) { - size = 64 * KB; - } - size = RoundUp(size, GetPageSize()); - LPVOID address = NULL; - // Try and randomize the allocation address (up to three attempts). - for (unsigned attempts = 0; address == NULL && attempts < 3; ++attempts) { - address = VirtualAlloc(GenerateRandomAddress(), - size, - MEM_RESERVE, - PAGE_NOACCESS); - } - if (address == NULL) { - // After three attempts give up and let the kernel find an address. - address = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); - } - if (address == NULL) { - return NULL; - } - ASSERT(IsAligned(reinterpret_cast(address), - GetAllocationGranularity())); - *size_return = size; - return address; -} - - -// static -void* VirtualMemory::ReserveRegion(size_t size, - size_t* size_return, - size_t alignment) { - ASSERT_LT(0, size); - ASSERT_NE(NULL, size_return); - ASSERT(IsAligned(alignment, GetAllocationGranularity())); - - size_t reserved_size = RoundUp(size + alignment, GetAllocationGranularity()); - Address reserved_base = static_cast
( - ReserveRegion(reserved_size, &reserved_size)); - if (reserved_base == NULL) { - return NULL; - } - ASSERT_LE(size, reserved_size); - ASSERT_LE(size + alignment, reserved_size); - ASSERT(IsAligned(reserved_size, GetPageSize())); - - // Try reducing the size by freeing and then reallocating a specific area. - bool result = ReleaseRegion(reserved_base, reserved_size); - USE(result); - ASSERT(result); - size_t aligned_size = RoundUp(size, GetPageSize()); - Address aligned_base = static_cast
( - VirtualAlloc(RoundUp(reserved_base, alignment), - aligned_size, - MEM_RESERVE, - PAGE_NOACCESS)); - if (aligned_base != NULL) { - ASSERT(aligned_base == RoundUp(reserved_base, alignment)); - ASSERT(IsAligned(reinterpret_cast(aligned_base), - GetAllocationGranularity())); - ASSERT(IsAligned(aligned_size, GetPageSize())); - *size_return = aligned_size; - return aligned_base; - } - - // Resizing failed, just go with a bigger area. - ASSERT(IsAligned(reserved_size, GetAllocationGranularity())); - return ReserveRegion(reserved_size, size_return); -} - - -// static -bool VirtualMemory::CommitRegion(void* address, - size_t size, - Executability executability) { - ASSERT_NE(NULL, address); - ASSERT_LT(0, size); - DWORD protect = 0; - switch (executability) { - case NOT_EXECUTABLE: - protect = PAGE_READWRITE; - break; - - case EXECUTABLE: - protect = PAGE_EXECUTE_READWRITE; - break; - } - LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, protect); - if (result == NULL) { - ASSERT(GetLastError() != ERROR_INVALID_ADDRESS); - return false; - } - ASSERT_EQ(address, result); - return true; -} - - -// static -bool VirtualMemory::UncommitRegion(void* address, size_t size) { - ASSERT_NE(NULL, address); - ASSERT_LT(0, size); - int result = VirtualFree(address, size, MEM_DECOMMIT); - if (result == 0) { - return false; - } - return true; -} - - -// static -bool VirtualMemory::WriteProtectRegion(void* address, size_t size) { - ASSERT_NE(NULL, address); - ASSERT_LT(0, size); - DWORD old_protect; - return VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); -} - - -// static -bool VirtualMemory::ReleaseRegion(void* address, size_t size) { - ASSERT_NE(NULL, address); - ASSERT_LT(0, size); - USE(size); - int result = VirtualFree(address, 0, MEM_RELEASE); - if (result == 0) { - return false; - } - return true; -} - - -// static -size_t VirtualMemory::GetAllocationGranularity() { - static size_t allocation_granularity = 0; - if (allocation_granularity == 0) { - SYSTEM_INFO system_info; - GetSystemInfo(&system_info); - allocation_granularity = system_info.dwAllocationGranularity; - MemoryBarrier(); - } - ASSERT_GE(allocation_granularity, GetPageSize()); - return allocation_granularity; -} - - -// static -size_t VirtualMemory::GetLimit() { - return 0; -} - - -// static -size_t VirtualMemory::GetPageSize() { - static size_t page_size = 0; - if (page_size == 0) { - SYSTEM_INFO system_info; - GetSystemInfo(&system_info); - page_size = system_info.dwPageSize; - MemoryBarrier(); - } - return page_size; -} - - -#else // V8_OS_CYGIN || V8_OS_WIN - - -// Constants used for mmap. -#if V8_OS_MACOSX -// kMmapFd is used to pass vm_alloc flags to tag the region with the user -// defined tag 255 This helps identify V8-allocated regions in memory analysis -// tools like vmmap(1). -static const int kMmapFd = VM_MAKE_TAG(255); -#else -static const int kMmapFd = -1; -#endif // V8_OS_MACOSX -static const off_t kMmapFdOffset = 0; - - -// static -void* VirtualMemory::ReserveRegion(size_t size, size_t* size_return) { - ASSERT_LT(0, size); - ASSERT_NE(NULL, size_return); - - size = RoundUp(size, GetPageSize()); - void* address = mmap(GenerateRandomAddress(), - size, - PROT_NONE, - MAP_ANON | MAP_NORESERVE | MAP_PRIVATE, - kMmapFd, - kMmapFdOffset); - if (address == MAP_FAILED) { - ASSERT_NE(EINVAL, errno); - return NULL; - } - *size_return = size; - return address; -} - - -// static -void* VirtualMemory::ReserveRegion(size_t size, - size_t* size_return, - size_t alignment) { - ASSERT_LT(0, size); - ASSERT_NE(NULL, size_return); - ASSERT(IsAligned(alignment, GetPageSize())); - - size_t reserved_size; - Address reserved_base = static_cast
( - ReserveRegion(size + alignment, &reserved_size)); - if (reserved_base == NULL) { - return NULL; - } - - Address aligned_base = RoundUp(reserved_base, alignment); - ASSERT_LE(reserved_base, aligned_base); - - // Unmap extra memory reserved before the aligned region. - if (aligned_base != reserved_base) { - size_t prefix_size = static_cast(aligned_base - reserved_base); - bool result = ReleaseRegion(reserved_base, prefix_size); - ASSERT(result); - USE(result); - reserved_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, GetPageSize()); - ASSERT_LE(aligned_size, reserved_size); - - // Unmap extra memory reserved after the aligned region. - if (aligned_size != reserved_size) { - size_t suffix_size = reserved_size - aligned_size; - bool result = ReleaseRegion(aligned_base + aligned_size, suffix_size); - ASSERT(result); - USE(result); - reserved_size -= suffix_size; - } - - ASSERT(aligned_size == reserved_size); - ASSERT_NE(NULL, aligned_base); - - *size_return = aligned_size; - return aligned_base; -} - - -// static -bool VirtualMemory::CommitRegion(void* address, - size_t size, - Executability executability) { - ASSERT_NE(NULL, address); - ASSERT_LT(0, size); - int prot = 0; - // The Native Client port of V8 uses an interpreter, - // so code pages don't need PROT_EXEC. -#if V8_OS_NACL - executability = NOT_EXECUTABLE; -#endif - switch (executability) { - case NOT_EXECUTABLE: - prot = PROT_READ | PROT_WRITE; - break; - - case EXECUTABLE: - prot = PROT_EXEC | PROT_READ | PROT_WRITE; - break; - } - void* result = mmap(address, - size, - prot, - MAP_ANON | MAP_FIXED | MAP_PRIVATE, - kMmapFd, - kMmapFdOffset); - if (result == MAP_FAILED) { - ASSERT_NE(EINVAL, errno); - return false; - } - return true; -} - - -// static -bool VirtualMemory::UncommitRegion(void* address, size_t size) { - ASSERT_NE(NULL, address); - ASSERT_LT(0, size); - void* result = mmap(address, - size, - PROT_NONE, - MAP_ANON | MAP_FIXED | MAP_NORESERVE | MAP_PRIVATE, - kMmapFd, - kMmapFdOffset); - if (result == MAP_FAILED) { - ASSERT_NE(EINVAL, errno); - return false; - } - return true; -} - - -// static -bool VirtualMemory::WriteProtectRegion(void* address, size_t size) { - ASSERT_NE(NULL, address); - ASSERT_LT(0, size); -#if V8_OS_NACL - // The Native Client port of V8 uses an interpreter, - // so code pages don't need PROT_EXEC. - int prot = PROT_READ; -#else - int prot = PROT_EXEC | PROT_READ; -#endif - int result = mprotect(address, size, prot); - if (result < 0) { - ASSERT_NE(EINVAL, errno); - return false; - } - return true; -} - - -// static -bool VirtualMemory::ReleaseRegion(void* address, size_t size) { - ASSERT_NE(NULL, address); - ASSERT_LT(0, size); - int result = munmap(address, size); - if (result < 0) { - ASSERT_NE(EINVAL, errno); - return false; - } - return true; -} - - -// static -size_t VirtualMemory::GetAllocationGranularity() { - return GetPageSize(); -} - - -// static -size_t VirtualMemory::GetLimit() { - struct rlimit rlim; - int result = getrlimit(RLIMIT_DATA, &rlim); - ASSERT_EQ(0, result); - USE(result); - return rlim.rlim_cur; -} - - -// static -size_t VirtualMemory::GetPageSize() { - static const size_t kPageSize = getpagesize(); - return kPageSize; -} - -#endif // V8_OS_CYGWIN || V8_OS_WIN - -} } // namespace v8::internal diff --git a/src/platform/virtual-memory.h b/src/platform/virtual-memory.h deleted file mode 100644 index 9a62c32..0000000 --- a/src/platform/virtual-memory.h +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_PLATFORM_VIRTUAL_MEMORY_H_ -#define V8_PLATFORM_VIRTUAL_MEMORY_H_ - -#include "checks.h" -#include "globals.h" - -namespace v8 { -namespace internal { - -// ----------------------------------------------------------------------------- -// VirtualMemory -// -// This class represents and controls an area of reserved memory. -// Control of the reserved memory can be assigned to another VirtualMemory -// object by assignment or copy-constructing. This removes the reserved memory -// from the original object. -class VirtualMemory V8_FINAL { - public: - // The executability of a memory region. - enum Executability { NOT_EXECUTABLE, EXECUTABLE }; - - // Empty VirtualMemory object, controlling no reserved memory. - VirtualMemory() : address_(NULL), size_(0) {} - - // Reserves virtual memory with size. - explicit VirtualMemory(size_t size) : size_(0) { - address_ = ReserveRegion(size, &size_); - } - - // Reserves virtual memory containing an area of the given size that - // is aligned per alignment. This may not be at the position returned - // by address(). - VirtualMemory(size_t size, size_t alignment) : size_(0) { - address_ = ReserveRegion(size, &size_, alignment); - } - - // Releases the reserved memory, if any, controlled by this VirtualMemory - // object. - ~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address_, size_); - ASSERT(result); - USE(result); - } - } - - // Returns whether the memory contains the specified address. - bool Contains(const void* address) const V8_WARN_UNUSED_RESULT { - if (!IsReserved()) return false; - if (address < address_) return false; - if (address >= reinterpret_cast(address_) + size_) return false; - return true; - } - - // Returns whether the memory has been reserved. - bool IsReserved() const V8_WARN_UNUSED_RESULT { - return address_ != NULL; - } - - // Initialize or resets an embedded VirtualMemory object. - void Reset() { - address_ = NULL; - size_ = 0; - } - - // Returns the start address of the reserved memory. The returned value is - // only meaningful if |IsReserved()| returns true. - // If the memory was reserved with an alignment, this address is not - // necessarily aligned. The user might need to round it up to a multiple of - // the alignment to get the start of the aligned block. - void* address() const V8_WARN_UNUSED_RESULT { return address_; } - - // Returns the size of the reserved memory. The returned value is only - // meaningful when |IsReserved()| returns true. - // If the memory was reserved with an alignment, this size may be larger - // than the requested size. - size_t size() const V8_WARN_UNUSED_RESULT { return size_; } - - // Commits real memory. Returns whether the operation succeeded. - bool Commit(void* address, - size_t size, - Executability executability) V8_WARN_UNUSED_RESULT { - ASSERT(IsReserved()); - ASSERT(Contains(address)); - ASSERT(Contains(reinterpret_cast(address) + size - 1)); - return CommitRegion(address, size, executability); - } - - // Uncommit real memory. Returns whether the operation succeeded. - bool Uncommit(void* address, size_t size) V8_WARN_UNUSED_RESULT { - ASSERT(IsReserved()); - ASSERT(Contains(address)); - ASSERT(Contains(reinterpret_cast(address) + size - 1)); - return UncommitRegion(address, size); - } - - // Creates guard pages at the given address. - bool Guard(void* address, size_t size) V8_WARN_UNUSED_RESULT { - // We can simply uncommit the specified pages. Any access - // to them will cause a processor exception. - return Uncommit(address, size); - } - - void Release() { - ASSERT(IsReserved()); - // WARNING: Order is important here. The VirtualMemory - // object might live inside the allocated region. - void* address = address_; - size_t size = size_; - Reset(); - bool result = ReleaseRegion(address, size); - USE(result); - ASSERT(result); - } - - // Assign control of the reserved region to a different VirtualMemory object. - // The old object is no longer functional (IsReserved() returns false). - void TakeControl(VirtualMemory* from) { - ASSERT(!IsReserved()); - address_ = from->address_; - size_ = from->size_; - from->Reset(); - } - - // Allocates a region of memory pages. The pages are readable/writable, - // but are not guaranteed to be executable unless explicitly requested. - // Returns the base address of the allocated memory region, or NULL in - // case of an error. - static void* AllocateRegion(size_t size, - size_t* size_return, - Executability executability) - V8_WARN_UNUSED_RESULT; - - static void* ReserveRegion(size_t size, - size_t* size_return) V8_WARN_UNUSED_RESULT; - - static void* ReserveRegion(size_t size, - size_t* size_return, - size_t alignment) V8_WARN_UNUSED_RESULT; - - static bool CommitRegion(void* address, - size_t size, - Executability executability) V8_WARN_UNUSED_RESULT; - - static bool UncommitRegion(void* address, size_t size) V8_WARN_UNUSED_RESULT; - - // Mark code segments readable-executable. - static bool WriteProtectRegion(void* address, - size_t size) V8_WARN_UNUSED_RESULT; - - // Must be called with a base pointer that has been returned by ReserveRegion - // and the same size it was reserved with. - static bool ReleaseRegion(void* address, size_t size) V8_WARN_UNUSED_RESULT; - - // The granularity for the starting address at which virtual memory can be - // reserved (or allocated in terms of the underlying operating system). - static size_t GetAllocationGranularity() V8_PURE; - - // The maximum size of the virtual memory. 0 means there is no artificial - // limit. - static size_t GetLimit() V8_PURE; - - // The page size and the granularity of page protection and commitment. - static size_t GetPageSize() V8_PURE; - - // Returns true if OS performs lazy commits, i.e. the memory allocation call - // defers actual physical memory allocation till the first memory access. - // Otherwise returns false. - static V8_INLINE bool HasLazyCommits() { -#if V8_OS_LINUX - return true; -#else - return false; -#endif - } - - private: - void* address_; // Start address of the virtual memory. - size_t size_; // Size of the virtual memory. -}; - -} } // namespace v8::internal - -#endif // V8_PLATFORM_VIRTUAL_MEMORY_H_ diff --git a/src/spaces-inl.h b/src/spaces-inl.h index 37002e6..be2ae2a 100644 --- a/src/spaces-inl.h +++ b/src/spaces-inl.h @@ -125,11 +125,43 @@ HeapObject* HeapObjectIterator::FromCurrentPage() { } +// ----------------------------------------------------------------------------- +// MemoryAllocator + +#ifdef ENABLE_HEAP_PROTECTION + +void MemoryAllocator::Protect(Address start, size_t size) { + OS::Protect(start, size); +} + + +void MemoryAllocator::Unprotect(Address start, + size_t size, + Executability executable) { + OS::Unprotect(start, size, executable); +} + + +void MemoryAllocator::ProtectChunkFromPage(Page* page) { + int id = GetChunkId(page); + OS::Protect(chunks_[id].address(), chunks_[id].size()); +} + + +void MemoryAllocator::UnprotectChunkFromPage(Page* page) { + int id = GetChunkId(page); + OS::Unprotect(chunks_[id].address(), chunks_[id].size(), + chunks_[id].owner()->executable() == EXECUTABLE); +} + +#endif + + // -------------------------------------------------------------------------- // PagedSpace Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, - VirtualMemory::Executability executability, + Executability executable, PagedSpace* owner) { Page* page = reinterpret_cast(chunk); ASSERT(page->area_size() <= kNonCodeObjectAreaSize); diff --git a/src/spaces.cc b/src/spaces.cc index 84cce8a..2faf419 100644 --- a/src/spaces.cc +++ b/src/spaces.cc @@ -245,8 +245,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size, bool CodeRange::CommitRawMemory(Address start, size_t length) { - return isolate_->memory_allocator()->CommitMemory( - start, length, VirtualMemory::EXECUTABLE); + return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); } @@ -258,9 +257,7 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) { void CodeRange::FreeRawMemory(Address address, size_t length) { ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); free_list_.Add(FreeBlock(address, length)); - bool result = code_range_->Uncommit(address, length); - ASSERT(result); - USE(result); + code_range_->Uncommit(address, length); } @@ -311,8 +308,8 @@ void MemoryAllocator::TearDown() { bool MemoryAllocator::CommitMemory(Address base, size_t size, - VirtualMemory::Executability executability) { - if (!VirtualMemory::CommitRegion(base, size, executability)) { + Executability executable) { + if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) { return false; } UpdateAllocatedSpaceLimits(base, base + size); @@ -321,7 +318,7 @@ bool MemoryAllocator::CommitMemory(Address base, void MemoryAllocator::FreeMemory(VirtualMemory* reservation, - VirtualMemory::Executability executability) { + Executability executable) { // TODO(gc) make code_range part of memory allocator? ASSERT(reservation->IsReserved()); size_t size = reservation->size(); @@ -330,38 +327,36 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation, isolate_->counters()->memory_allocated()->Decrement(static_cast(size)); - if (executability == VirtualMemory::EXECUTABLE) { + if (executable == EXECUTABLE) { ASSERT(size_executable_ >= size); size_executable_ -= size; } // Code which is part of the code-range does not have its own VirtualMemory. ASSERT(!isolate_->code_range()->contains( static_cast
(reservation->address()))); - ASSERT(executability == VirtualMemory::NOT_EXECUTABLE || - !isolate_->code_range()->exists()); + ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); reservation->Release(); } void MemoryAllocator::FreeMemory(Address base, size_t size, - VirtualMemory::Executability executability) { + Executability executable) { // TODO(gc) make code_range part of memory allocator? ASSERT(size_ >= size); size_ -= size; isolate_->counters()->memory_allocated()->Decrement(static_cast(size)); - if (executability == VirtualMemory::EXECUTABLE) { + if (executable == EXECUTABLE) { ASSERT(size_executable_ >= size); size_executable_ -= size; } if (isolate_->code_range()->contains(static_cast
(base))) { - ASSERT(executability == VirtualMemory::EXECUTABLE); + ASSERT(executable == EXECUTABLE); isolate_->code_range()->FreeRawMemory(base, size); } else { - ASSERT(executability == VirtualMemory::NOT_EXECUTABLE || - !isolate_->code_range()->exists()); + ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); bool result = VirtualMemory::ReleaseRegion(base, size); USE(result); ASSERT(result); @@ -383,18 +378,17 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, } -Address MemoryAllocator::AllocateAlignedMemory( - size_t reserve_size, - size_t commit_size, - size_t alignment, - VirtualMemory::Executability executability, - VirtualMemory* controller) { +Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size, + size_t commit_size, + size_t alignment, + Executability executable, + VirtualMemory* controller) { ASSERT(commit_size <= reserve_size); VirtualMemory reservation; Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); if (base == NULL) return NULL; - if (executability == VirtualMemory::EXECUTABLE) { + if (executable == EXECUTABLE) { if (!CommitExecutableMemory(&reservation, base, commit_size, @@ -402,7 +396,7 @@ Address MemoryAllocator::AllocateAlignedMemory( base = NULL; } } else { - if (reservation.Commit(base, commit_size, VirtualMemory::NOT_EXECUTABLE)) { + if (reservation.Commit(base, commit_size, false)) { UpdateAllocatedSpaceLimits(base, base + commit_size); } else { base = NULL; @@ -439,7 +433,7 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, Page::kPageSize, area_start, area_end, - VirtualMemory::NOT_EXECUTABLE, + NOT_EXECUTABLE, semi_space); chunk->set_next_chunk(NULL); chunk->set_prev_chunk(NULL); @@ -470,7 +464,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, size_t size, Address area_start, Address area_end, - VirtualMemory::Executability executability, + Executability executable, Space* owner) { MemoryChunk* chunk = FromAddress(base); @@ -502,7 +496,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); - if (executability == VirtualMemory::EXECUTABLE) { + if (executable == EXECUTABLE) { chunk->SetFlag(IS_EXECUTABLE); } @@ -519,10 +513,9 @@ bool MemoryChunk::CommitArea(size_t requested) { size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; size_t header_size = area_start() - address() - guard_size; - size_t commit_size = RoundUp(header_size + requested, - VirtualMemory::GetPageSize()); + size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize()); size_t committed_size = RoundUp(header_size + (area_end() - area_start()), - VirtualMemory::GetPageSize()); + OS::CommitPageSize()); if (commit_size > committed_size) { // Commit size should be less or equal than the reserved size. @@ -531,10 +524,10 @@ bool MemoryChunk::CommitArea(size_t requested) { Address start = address() + committed_size + guard_size; size_t length = commit_size - committed_size; if (reservation_.IsReserved()) { - VirtualMemory::Executability executability = IsFlagSet(IS_EXECUTABLE) - ? VirtualMemory::EXECUTABLE : VirtualMemory::NOT_EXECUTABLE; + Executability executable = IsFlagSet(IS_EXECUTABLE) + ? EXECUTABLE : NOT_EXECUTABLE; if (!heap()->isolate()->memory_allocator()->CommitMemory( - start, length, executability)) { + start, length, executable)) { return false; } } else { @@ -596,11 +589,10 @@ void MemoryChunk::Unlink() { } -MemoryChunk* MemoryAllocator::AllocateChunk( - intptr_t reserve_area_size, - intptr_t commit_area_size, - VirtualMemory::Executability executability, - Space* owner) { +MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, + intptr_t commit_area_size, + Executability executable, + Space* owner) { ASSERT(commit_area_size <= reserve_area_size); size_t chunk_size; @@ -640,9 +632,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk( // +----------------------------+<- base + chunk_size // - if (executability == VirtualMemory::EXECUTABLE) { + if (executable == EXECUTABLE) { chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, - VirtualMemory::GetPageSize()) + CodePageGuardSize(); + OS::CommitPageSize()) + CodePageGuardSize(); // Check executable memory limit. if (size_executable_ + chunk_size > capacity_executable_) { @@ -654,7 +646,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk( // Size of header (not executable) plus area (executable). size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, - VirtualMemory::GetPageSize()); + OS::CommitPageSize()); // Allocate executable memory either from code range or from the // OS. if (isolate_->code_range()->exists()) { @@ -671,7 +663,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk( base = AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, - executability, + executable, &reservation); if (base == NULL) return NULL; // Update executable memory size. @@ -687,14 +679,13 @@ MemoryChunk* MemoryAllocator::AllocateChunk( area_end = area_start + commit_area_size; } else { chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, - VirtualMemory::GetPageSize()); - size_t commit_size = RoundUp( - MemoryChunk::kObjectStartOffset + commit_area_size, - VirtualMemory::GetPageSize()); + OS::CommitPageSize()); + size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset + + commit_area_size, OS::CommitPageSize()); base = AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, - executability, + executable, &reservation); if (base == NULL) return NULL; @@ -723,7 +714,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk( chunk_size, area_start, area_end, - executability, + executable, owner); result->set_reserved_memory(&reservation); return result; @@ -739,25 +730,23 @@ void Page::ResetFreeListStatistics() { } -Page* MemoryAllocator::AllocatePage( - intptr_t size, - PagedSpace* owner, - VirtualMemory::Executability executability) { - MemoryChunk* chunk = AllocateChunk(size, size, executability, owner); +Page* MemoryAllocator::AllocatePage(intptr_t size, + PagedSpace* owner, + Executability executable) { + MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); if (chunk == NULL) return NULL; - return Page::Initialize(isolate_->heap(), chunk, executability, owner); + return Page::Initialize(isolate_->heap(), chunk, executable, owner); } -LargePage* MemoryAllocator::AllocateLargePage( - intptr_t object_size, - Space* owner, - VirtualMemory::Executability executability) { +LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, + Space* owner, + Executability executable) { MemoryChunk* chunk = AllocateChunk(object_size, object_size, - executability, + executable, owner); if (chunk == NULL) return NULL; return LargePage::Initialize(isolate_->heap(), chunk); @@ -780,19 +769,19 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { VirtualMemory* reservation = chunk->reserved_memory(); if (reservation->IsReserved()) { - FreeMemory(reservation, chunk->executability()); + FreeMemory(reservation, chunk->executable()); } else { FreeMemory(chunk->address(), chunk->size(), - chunk->executability()); + chunk->executable()); } } bool MemoryAllocator::CommitBlock(Address start, size_t size, - VirtualMemory::Executability executability) { - if (!CommitMemory(start, size, executability)) return false; + Executability executable) { + if (!CommitMemory(start, size, executable)) return false; if (Heap::ShouldZapGarbage()) { ZapBlock(start, size); @@ -877,12 +866,12 @@ void MemoryAllocator::ReportStatistics() { int MemoryAllocator::CodePageGuardStartOffset() { // We are guarding code pages: the first OS page after the header // will be protected as non-writable. - return RoundUp(Page::kObjectStartOffset, VirtualMemory::GetPageSize()); + return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize()); } int MemoryAllocator::CodePageGuardSize() { - return static_cast(VirtualMemory::GetPageSize()); + return static_cast(OS::CommitPageSize()); } @@ -896,7 +885,7 @@ int MemoryAllocator::CodePageAreaStartOffset() { int MemoryAllocator::CodePageAreaEndOffset() { // We are guarding code pages: the last OS page will be protected as // non-writable. - return Page::kPageSize - static_cast(VirtualMemory::GetPageSize()); + return Page::kPageSize - static_cast(OS::CommitPageSize()); } @@ -907,26 +896,24 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, // Commit page header (not executable). if (!vm->Commit(start, CodePageGuardStartOffset(), - VirtualMemory::NOT_EXECUTABLE)) { + false)) { return false; } // Create guard page after the header. - if (!vm->Guard(start + CodePageGuardStartOffset(), - VirtualMemory::GetPageSize())) { + if (!vm->Guard(start + CodePageGuardStartOffset())) { return false; } // Commit page body (executable). if (!vm->Commit(start + CodePageAreaStartOffset(), commit_size - CodePageGuardStartOffset(), - VirtualMemory::EXECUTABLE)) { + true)) { return false; } // Create guard page before the end. - if (!vm->Guard(start + reserved_size - CodePageGuardSize(), - VirtualMemory::GetPageSize())) { + if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { return false; } @@ -955,8 +942,8 @@ void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, - VirtualMemory::Executability executability) - : Space(heap, id, executability), + Executability executable) + : Space(heap, id, executable), free_list_(this), was_swept_conservatively_(false), first_unswept_page_(Page::FromAddress(NULL)), @@ -1054,7 +1041,7 @@ bool PagedSpace::Expand() { } Page* p = heap()->isolate()->memory_allocator()->AllocatePage( - size, this, executability()); + size, this, executable()); if (p == NULL) return false; ASSERT(Capacity() <= max_capacity_); @@ -1301,8 +1288,8 @@ void NewSpace::TearDown() { LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); ASSERT(reservation_.IsReserved()); - heap()->isolate()->memory_allocator()->FreeMemory( - &reservation_, VirtualMemory::NOT_EXECUTABLE); + heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, + NOT_EXECUTABLE); chunk_base_ = NULL; chunk_size_ = 0; } @@ -1537,7 +1524,7 @@ bool SemiSpace::Commit() { Address start = end - pages * Page::kPageSize; if (!heap()->isolate()->memory_allocator()->CommitBlock(start, capacity_, - executability())) { + executable())) { return false; } @@ -1594,9 +1581,9 @@ bool SemiSpace::GrowTo(int new_capacity) { Address start = end - new_capacity; size_t delta = new_capacity - capacity_; - ASSERT(IsAligned(delta, VirtualMemory::GetAllocationGranularity())); + ASSERT(IsAligned(delta, OS::AllocateAlignment())); if (!heap()->isolate()->memory_allocator()->CommitBlock( - start, delta, executability())) { + start, delta, executable())) { return false; } capacity_ = new_capacity; @@ -1629,7 +1616,7 @@ bool SemiSpace::ShrinkTo(int new_capacity) { Address space_end = start_ + maximum_capacity_; Address old_start = space_end - capacity_; size_t delta = capacity_ - new_capacity; - ASSERT(IsAligned(delta, VirtualMemory::GetAllocationGranularity())); + ASSERT(IsAligned(delta, OS::AllocateAlignment())); MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); if (!allocator->UncommitBlock(old_start, delta)) { @@ -2937,8 +2924,7 @@ static bool ComparePointers(void* key1, void* key2) { LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) - // Managed on a per-allocation basis - : Space(heap, id, VirtualMemory::NOT_EXECUTABLE), + : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis max_capacity_(max_capacity), first_page_(NULL), size_(0), @@ -2972,8 +2958,8 @@ void LargeObjectSpace::TearDown() { } -MaybeObject* LargeObjectSpace::AllocateRaw( - int object_size, VirtualMemory::Executability executability) { +MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, + Executability executable) { // Check if we want to force a GC before growing the old space further. // If so, fail the allocation. if (!heap()->always_allocate() && @@ -2986,7 +2972,7 @@ MaybeObject* LargeObjectSpace::AllocateRaw( } LargePage* page = heap()->isolate()->memory_allocator()-> - AllocateLargePage(object_size, this, executability); + AllocateLargePage(object_size, this, executable); if (page == NULL) return Failure::RetryAfterGC(identity()); ASSERT(page->area_size() >= object_size); diff --git a/src/spaces.h b/src/spaces.h index 10a803e..1ccdacb 100644 --- a/src/spaces.h +++ b/src/spaces.h @@ -33,7 +33,6 @@ #include "list.h" #include "log.h" #include "platform/mutex.h" -#include "platform/virtual-memory.h" #include "v8utils.h" namespace v8 { @@ -574,10 +573,8 @@ class MemoryChunk { area_end_ = area_end; } - VirtualMemory::Executability executability() { - return IsFlagSet(IS_EXECUTABLE) - ? VirtualMemory::EXECUTABLE - : VirtualMemory::NOT_EXECUTABLE; + Executability executable() { + return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; } bool ContainsOnlyData() { @@ -719,7 +716,7 @@ class MemoryChunk { size_t size, Address area_start, Address area_end, - VirtualMemory::Executability executability, + Executability executable, Space* owner); friend class MemoryAllocator; @@ -799,7 +796,7 @@ class Page : public MemoryChunk { static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, - VirtualMemory::Executability executable, + Executability executable, PagedSpace* owner); void InitializeAsAnchor(PagedSpace* owner); @@ -865,17 +862,15 @@ STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize); // Space is the abstract superclass for all allocation spaces. class Space : public Malloced { public: - Space(Heap* heap, - AllocationSpace id, - VirtualMemory::Executability executability) - : heap_(heap), id_(id), executability_(executability) {} + Space(Heap* heap, AllocationSpace id, Executability executable) + : heap_(heap), id_(id), executable_(executable) {} virtual ~Space() {} Heap* heap() const { return heap_; } // Does the space need executable memory? - VirtualMemory::Executability executability() { return executability_; } + Executability executable() { return executable_; } // Identity used in error reporting. AllocationSpace identity() { return id_; } @@ -902,7 +897,7 @@ class Space : public Malloced { private: Heap* heap_; AllocationSpace id_; - VirtualMemory::Executability executability_; + Executability executable_; }; @@ -1060,13 +1055,11 @@ class MemoryAllocator { void TearDown(); - Page* AllocatePage(intptr_t size, - PagedSpace* owner, - VirtualMemory::Executability executability); + Page* AllocatePage( + intptr_t size, PagedSpace* owner, Executability executable); - LargePage* AllocateLargePage(intptr_t object_size, - Space* owner, - VirtualMemory::Executability executability); + LargePage* AllocateLargePage( + intptr_t object_size, Space* owner, Executability executable); void Free(MemoryChunk* chunk); @@ -1092,7 +1085,7 @@ class MemoryAllocator { // Returns an indication of whether a pointer is in a space that has // been allocated by this MemoryAllocator. - V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { + V8_INLINE(bool IsOutsideAllocatedSpace(const void* address)) const { return address < lowest_ever_allocated_ || address >= highest_ever_allocated_; } @@ -1107,7 +1100,7 @@ class MemoryAllocator { // could be committed later by calling MemoryChunk::CommitArea. MemoryChunk* AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, - VirtualMemory::Executability executability, + Executability executable, Space* space); Address ReserveAlignedMemory(size_t requested, @@ -1116,26 +1109,19 @@ class MemoryAllocator { Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, - VirtualMemory::Executability executability, + Executability executable, VirtualMemory* controller); - bool CommitMemory(Address addr, - size_t size, - VirtualMemory::Executability executability); + bool CommitMemory(Address addr, size_t size, Executability executable); - void FreeMemory(VirtualMemory* reservation, - VirtualMemory::Executability executability); - void FreeMemory(Address addr, - size_t size, - VirtualMemory::Executability executability); + void FreeMemory(VirtualMemory* reservation, Executability executable); + void FreeMemory(Address addr, size_t size, Executability executable); // Commit a contiguous block of memory from the initial chunk. Assumes that // the address is not NULL, the size is greater than zero, and that the // block is contained in the initial chunk. Returns true if it succeeded // and false otherwise. - bool CommitBlock(Address start, - size_t size, - VirtualMemory::Executability executability); + bool CommitBlock(Address start, size_t size, Executability executable); // Uncommit a contiguous block of memory [start..(start+size)[. // start is not NULL, the size is greater than zero, and the @@ -1626,7 +1612,7 @@ class PagedSpace : public Space { PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, - VirtualMemory::Executability executability); + Executability executable); virtual ~PagedSpace() {} @@ -2051,7 +2037,7 @@ class SemiSpace : public Space { public: // Constructor. SemiSpace(Heap* heap, SemiSpaceId semispace) - : Space(heap, NEW_SPACE, VirtualMemory::NOT_EXECUTABLE), + : Space(heap, NEW_SPACE, NOT_EXECUTABLE), start_(NULL), age_mark_(NULL), id_(semispace), @@ -2304,7 +2290,7 @@ class NewSpace : public Space { public: // Constructor. explicit NewSpace(Heap* heap) - : Space(heap, NEW_SPACE, VirtualMemory::NOT_EXECUTABLE), + : Space(heap, NEW_SPACE, NOT_EXECUTABLE), to_space_(heap, kToSpace), from_space_(heap, kFromSpace), reservation_(), @@ -2569,8 +2555,8 @@ class OldSpace : public PagedSpace { OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, - VirtualMemory::Executability executability) - : PagedSpace(heap, max_capacity, id, executability) { + Executability executable) + : PagedSpace(heap, max_capacity, id, executable) { page_extra_ = 0; } @@ -2601,7 +2587,7 @@ class FixedSpace : public PagedSpace { intptr_t max_capacity, AllocationSpace id, int object_size_in_bytes) - : PagedSpace(heap, max_capacity, id, VirtualMemory::NOT_EXECUTABLE), + : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), object_size_in_bytes_(object_size_in_bytes) { page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes; } @@ -2741,8 +2727,8 @@ class LargeObjectSpace : public Space { // Shared implementation of AllocateRaw, AllocateRawCode and // AllocateRawFixedArray. - MUST_USE_RESULT MaybeObject* AllocateRaw( - int object_size, VirtualMemory::Executability executability); + MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size, + Executability executable); // Available bytes for objects in this space. inline intptr_t Available(); diff --git a/src/store-buffer.cc b/src/store-buffer.cc index 7a0dc88..22a5467 100644 --- a/src/store-buffer.cc +++ b/src/store-buffer.cc @@ -72,8 +72,7 @@ void StoreBuffer::SetUp() { // Don't know the alignment requirements of the OS, but it is certainly not // less than 0xfff. ASSERT((reinterpret_cast(old_start_) & 0xfff) == 0); - int initial_length = - static_cast(VirtualMemory::GetPageSize() / kPointerSize); + int initial_length = static_cast(OS::CommitPageSize() / kPointerSize); ASSERT(initial_length > 0); ASSERT(initial_length <= kOldStoreBufferLength); old_limit_ = old_start_ + initial_length; @@ -82,7 +81,7 @@ void StoreBuffer::SetUp() { CHECK(old_virtual_memory_->Commit( reinterpret_cast(old_start_), (old_limit_ - old_start_) * kPointerSize, - VirtualMemory::NOT_EXECUTABLE)); + false)); ASSERT(reinterpret_cast
(start_) >= virtual_memory_->address()); ASSERT(reinterpret_cast
(limit_) >= virtual_memory_->address()); @@ -98,7 +97,7 @@ void StoreBuffer::SetUp() { CHECK(virtual_memory_->Commit(reinterpret_cast
(start_), kStoreBufferSize, - VirtualMemory::NOT_EXECUTABLE)); + false)); // Not executable. heap_->public_set_store_buffer_top(start_); hash_set_1_ = new uintptr_t[kHashSetLength]; @@ -155,7 +154,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) { size_t grow = old_limit_ - old_start_; // Double size. CHECK(old_virtual_memory_->Commit(reinterpret_cast(old_limit_), grow * kPointerSize, - VirtualMemory::NOT_EXECUTABLE)); + false)); old_limit_ += grow; } diff --git a/src/utils/random-number-generator.h b/src/utils/random-number-generator.h index bd7dca7..fc14ef4 100644 --- a/src/utils/random-number-generator.h +++ b/src/utils/random-number-generator.h @@ -59,7 +59,7 @@ class RandomNumberGenerator V8_FINAL { // that one int value is pseudorandomly generated and returned. // All 2^32 possible integer values are produced with (approximately) equal // probability. - V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT { + V8_INLINE(int NextInt()) V8_WARN_UNUSED_RESULT { return Next(32); } @@ -76,7 +76,7 @@ class RandomNumberGenerator V8_FINAL { // |NextBoolean()| is that one boolean value is pseudorandomly generated and // returned. The values true and false are produced with (approximately) equal // probability. - V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT { + V8_INLINE(bool NextBool()) V8_WARN_UNUSED_RESULT { return Next(1) != 0; } diff --git a/src/v8globals.h b/src/v8globals.h index 6144631..7fa2fd6 100644 --- a/src/v8globals.h +++ b/src/v8globals.h @@ -201,6 +201,8 @@ enum PretenureFlag { NOT_TENURED, TENURED }; enum GarbageCollector { SCAVENGER, MARK_COMPACTOR }; +enum Executability { NOT_EXECUTABLE, EXECUTABLE }; + enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index ce69d15..03a9887 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -58,8 +58,9 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { size_t actual_size; // Allocate buffer in executable space. - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, + &actual_size, + true)); if (buffer == NULL) { // Fallback to library function if function cannot be created. switch (type) { @@ -93,9 +94,7 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST(buffer); } @@ -103,8 +102,7 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { UnaryMathFunction CreateExpFunction() { if (!FLAG_fast_math) return &exp; size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &exp; ExternalReference::InitializeMathExpData(); @@ -127,9 +125,7 @@ UnaryMathFunction CreateExpFunction() { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool ok = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(ok); - USE(ok); + OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST(buffer); } @@ -137,8 +133,9 @@ UnaryMathFunction CreateExpFunction() { UnaryMathFunction CreateSqrtFunction() { size_t actual_size; // Allocate buffer in executable space. - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - 1 * KB, &actual_size, VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(1 * KB, + &actual_size, + true)); if (buffer == NULL) return &sqrt; MacroAssembler masm(NULL, buffer, static_cast(actual_size)); @@ -152,9 +149,7 @@ UnaryMathFunction CreateSqrtFunction() { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST(buffer); } @@ -243,9 +238,7 @@ ModuloFunction CreateModuloFunction() { CodeDesc desc; masm.GetCode(&desc); - bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size); - ASSERT(result); - USE(result); + OS::ProtectCode(buffer, actual_size); // Call the function from C++ through this pointer. return FUNCTION_CAST(buffer); } diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index 1f21294..0775cc5 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -108,7 +108,6 @@ 'test-unbound-queue.cc', 'test-utils.cc', 'test-version.cc', - 'test-virtual-memory.cc', 'test-weakmaps.cc', 'test-weaksets.cc', 'test-weaktypedarrays.cc' diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index f4e40cd..6e47cd6 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -12670,8 +12670,8 @@ struct CopyablePersistentTraits { typedef Persistent > CopyablePersistent; static const bool kResetInDestructor = true; template - static V8_INLINE void Copy(const Persistent& source, - CopyablePersistent* dest) { + V8_INLINE(static void Copy(const Persistent& source, + CopyablePersistent* dest)) { // do nothing, just allow copy } }; diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc index 7fd4db4..f7d2311 100644 --- a/test/cctest/test-assembler-x64.cc +++ b/test/cctest/test-assembler-x64.cc @@ -35,7 +35,34 @@ #include "serialize.h" #include "cctest.h" -using namespace v8::internal; +using v8::internal::Assembler; +using v8::internal::Code; +using v8::internal::CodeDesc; +using v8::internal::FUNCTION_CAST; +using v8::internal::Immediate; +using v8::internal::Isolate; +using v8::internal::Label; +using v8::internal::OS; +using v8::internal::Operand; +using v8::internal::byte; +using v8::internal::greater; +using v8::internal::less_equal; +using v8::internal::equal; +using v8::internal::not_equal; +using v8::internal::r13; +using v8::internal::r15; +using v8::internal::r8; +using v8::internal::r9; +using v8::internal::rax; +using v8::internal::rbx; +using v8::internal::rbp; +using v8::internal::rcx; +using v8::internal::rdi; +using v8::internal::rdx; +using v8::internal::rsi; +using v8::internal::rsp; +using v8::internal::times_1; +using v8::internal::xmm0; // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the @@ -65,10 +92,9 @@ static const v8::internal::Register arg2 = rsi; TEST(AssemblerX64ReturnOperation) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Assembler assm(Isolate::Current(), buffer, static_cast(actual_size)); @@ -88,10 +114,9 @@ TEST(AssemblerX64ReturnOperation) { TEST(AssemblerX64StackOperations) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Assembler assm(Isolate::Current(), buffer, static_cast(actual_size)); @@ -121,10 +146,9 @@ TEST(AssemblerX64StackOperations) { TEST(AssemblerX64ArithmeticOperations) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Assembler assm(Isolate::Current(), buffer, static_cast(actual_size)); @@ -144,10 +168,9 @@ TEST(AssemblerX64ArithmeticOperations) { TEST(AssemblerX64ImulOperation) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Assembler assm(Isolate::Current(), buffer, static_cast(actual_size)); @@ -173,10 +196,9 @@ TEST(AssemblerX64ImulOperation) { TEST(AssemblerX64MemoryOperands) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Assembler assm(Isolate::Current(), buffer, static_cast(actual_size)); @@ -208,10 +230,9 @@ TEST(AssemblerX64MemoryOperands) { TEST(AssemblerX64ControlFlow) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Assembler assm(Isolate::Current(), buffer, static_cast(actual_size)); @@ -238,10 +259,9 @@ TEST(AssemblerX64ControlFlow) { TEST(AssemblerX64LoopImmediates) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Assembler assm(Isolate::Current(), buffer, static_cast(actual_size)); // Assemble two loops using rax as counter, and verify the ending counts. diff --git a/test/cctest/test-code-stubs-arm.cc b/test/cctest/test-code-stubs-arm.cc index b4d23a4..c99433e 100644 --- a/test/cctest/test-code-stubs-arm.cc +++ b/test/cctest/test-code-stubs-arm.cc @@ -47,10 +47,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, bool inline_fastpath) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); HandleScope handles(isolate); MacroAssembler masm(isolate, buffer, static_cast(actual_size)); diff --git a/test/cctest/test-code-stubs-ia32.cc b/test/cctest/test-code-stubs-ia32.cc index 2fde4a5..3f62175 100644 --- a/test/cctest/test-code-stubs-ia32.cc +++ b/test/cctest/test-code-stubs-ia32.cc @@ -47,10 +47,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, Register destination_reg) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); HandleScope handles(isolate); MacroAssembler assm(isolate, buffer, static_cast(actual_size)); diff --git a/test/cctest/test-code-stubs-x64.cc b/test/cctest/test-code-stubs-x64.cc index a058118..4af5b45 100644 --- a/test/cctest/test-code-stubs-x64.cc +++ b/test/cctest/test-code-stubs-x64.cc @@ -46,10 +46,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, Register destination_reg) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); HandleScope handles(isolate); MacroAssembler assm(isolate, buffer, static_cast(actual_size)); diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc index 4973ef5..a2070a5 100644 --- a/test/cctest/test-macro-assembler-x64.cc +++ b/test/cctest/test-macro-assembler-x64.cc @@ -35,7 +35,49 @@ #include "serialize.h" #include "cctest.h" -using namespace v8::internal; +using v8::internal::Assembler; +using v8::internal::CodeDesc; +using v8::internal::Condition; +using v8::internal::FUNCTION_CAST; +using v8::internal::HandleScope; +using v8::internal::Immediate; +using v8::internal::Isolate; +using v8::internal::Label; +using v8::internal::MacroAssembler; +using v8::internal::OS; +using v8::internal::Operand; +using v8::internal::RelocInfo; +using v8::internal::Smi; +using v8::internal::SmiIndex; +using v8::internal::byte; +using v8::internal::carry; +using v8::internal::greater; +using v8::internal::greater_equal; +using v8::internal::kIntSize; +using v8::internal::kPointerSize; +using v8::internal::kSmiTagMask; +using v8::internal::kSmiValueSize; +using v8::internal::less_equal; +using v8::internal::negative; +using v8::internal::not_carry; +using v8::internal::not_equal; +using v8::internal::not_zero; +using v8::internal::positive; +using v8::internal::r11; +using v8::internal::r13; +using v8::internal::r14; +using v8::internal::r15; +using v8::internal::r8; +using v8::internal::r9; +using v8::internal::rax; +using v8::internal::rbp; +using v8::internal::rbx; +using v8::internal::rcx; +using v8::internal::rdi; +using v8::internal::rdx; +using v8::internal::rsi; +using v8::internal::rsp; +using v8::internal::times_pointer_size; // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the @@ -111,10 +153,9 @@ TEST(SmiMove) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -200,10 +241,10 @@ TEST(SmiCompare) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 2, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 2, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -252,10 +293,9 @@ TEST(Integer32ToSmi) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -382,10 +422,9 @@ TEST(Integer64PlusConstantToSmi) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -428,10 +467,9 @@ TEST(SmiCheck) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -677,10 +715,10 @@ TEST(SmiNeg) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -768,10 +806,9 @@ TEST(SmiAdd) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -959,10 +996,10 @@ TEST(SmiSub) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 2, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 2, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1051,10 +1088,9 @@ TEST(SmiMul) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1158,10 +1194,10 @@ TEST(SmiDiv) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 2, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 2, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1269,10 +1305,10 @@ TEST(SmiMod) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 2, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 2, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1367,10 +1403,10 @@ TEST(SmiIndex) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 3, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 3, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1437,10 +1473,10 @@ TEST(SmiSelectNonSmi) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1517,10 +1553,10 @@ TEST(SmiAnd) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1599,10 +1635,10 @@ TEST(SmiOr) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1683,10 +1719,10 @@ TEST(SmiXor) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1751,10 +1787,10 @@ TEST(SmiNot) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1848,10 +1884,10 @@ TEST(SmiShiftLeft) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 4, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 4, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -1955,10 +1991,10 @@ TEST(SmiShiftLogicalRight) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 3, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 3, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -2025,10 +2061,10 @@ TEST(SmiShiftArithmeticRight) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 2, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 2, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -2090,10 +2126,10 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) { v8::internal::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 4, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 4, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); @@ -2134,10 +2170,10 @@ TEST(OperandOffset) { // Allocate an executable page of memory. size_t actual_size; - byte* buffer = static_cast(VirtualMemory::AllocateRegion( - Assembler::kMinimalBufferSize * 2, - &actual_size, - VirtualMemory::EXECUTABLE)); + byte* buffer = + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 2, + &actual_size, + true)); CHECK(buffer); Isolate* isolate = Isolate::Current(); HandleScope handles(isolate); diff --git a/test/cctest/test-platform-linux.cc b/test/cctest/test-platform-linux.cc index e0eae02..f289e94 100644 --- a/test/cctest/test-platform-linux.cc +++ b/test/cctest/test-platform-linux.cc @@ -39,6 +39,20 @@ using namespace ::v8::internal; +TEST(VirtualMemory) { + VirtualMemory* vm = new VirtualMemory(1 * MB); + CHECK(vm->IsReserved()); + void* block_addr = vm->address(); + size_t block_size = 4 * KB; + CHECK(vm->Commit(block_addr, block_size, false)); + // Check whether we can write to memory. + int* addr = static_cast(block_addr); + addr[KB-1] = 2; + CHECK(vm->Uncommit(block_addr, block_size)); + delete vm; +} + + TEST(GetCurrentProcessId) { CHECK_EQ(static_cast(getpid()), OS::GetCurrentProcessId()); } diff --git a/test/cctest/test-platform-win32.cc b/test/cctest/test-platform-win32.cc index 3db5f39..d7fdab1 100644 --- a/test/cctest/test-platform-win32.cc +++ b/test/cctest/test-platform-win32.cc @@ -38,6 +38,20 @@ using namespace ::v8::internal; +TEST(VirtualMemory) { + VirtualMemory* vm = new VirtualMemory(1 * MB); + CHECK(vm->IsReserved()); + void* block_addr = vm->address(); + size_t block_size = 4 * KB; + CHECK(vm->Commit(block_addr, block_size, false)); + // Check whether we can write to memory. + int* addr = static_cast(block_addr); + addr[KB-1] = 2; + CHECK(vm->Uncommit(block_addr, block_size)); + delete vm; +} + + TEST(GetCurrentProcessId) { CHECK_EQ(static_cast(::GetCurrentProcessId()), OS::GetCurrentProcessId()); diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc index 2edb57f..3326a01 100644 --- a/test/cctest/test-spaces.cc +++ b/test/cctest/test-spaces.cc @@ -151,30 +151,30 @@ static void VerifyMemoryChunk(Isolate* isolate, size_t reserve_area_size, size_t commit_area_size, size_t second_commit_area_size, - VirtualMemory::Executability executability) { + Executability executable) { MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator); TestCodeRangeScope test_code_range_scope(isolate, code_range); - size_t header_size = (executability == VirtualMemory::EXECUTABLE) + size_t header_size = (executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardStartOffset() : MemoryChunk::kObjectStartOffset; - size_t guard_size = (executability == VirtualMemory::EXECUTABLE) + size_t guard_size = (executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size, commit_area_size, - executability, + executable, NULL); size_t alignment = code_range->exists() ? - MemoryChunk::kAlignment : VirtualMemory::GetPageSize(); - size_t reserved_size = ((executability == VirtualMemory::EXECUTABLE)) + MemoryChunk::kAlignment : OS::CommitPageSize(); + size_t reserved_size = ((executable == EXECUTABLE)) ? RoundUp(header_size + guard_size + reserve_area_size + guard_size, alignment) - : RoundUp(header_size + reserve_area_size, VirtualMemory::GetPageSize()); + : RoundUp(header_size + reserve_area_size, OS::CommitPageSize()); CHECK(memory_chunk->size() == reserved_size); CHECK(memory_chunk->area_start() < memory_chunk->address() + memory_chunk->size()); @@ -230,7 +230,7 @@ TEST(MemoryChunk) { reserve_area_size, initial_commit_area_size, second_commit_area_size, - VirtualMemory::EXECUTABLE); + EXECUTABLE); VerifyMemoryChunk(isolate, heap, @@ -238,7 +238,7 @@ TEST(MemoryChunk) { reserve_area_size, initial_commit_area_size, second_commit_area_size, - VirtualMemory::NOT_EXECUTABLE); + NOT_EXECUTABLE); delete code_range; // Without CodeRange. @@ -249,7 +249,7 @@ TEST(MemoryChunk) { reserve_area_size, initial_commit_area_size, second_commit_area_size, - VirtualMemory::EXECUTABLE); + EXECUTABLE); VerifyMemoryChunk(isolate, heap, @@ -257,7 +257,7 @@ TEST(MemoryChunk) { reserve_area_size, initial_commit_area_size, second_commit_area_size, - VirtualMemory::NOT_EXECUTABLE); + NOT_EXECUTABLE); } } @@ -276,9 +276,9 @@ TEST(MemoryAllocator) { OldSpace faked_space(heap, heap->MaxReserved(), OLD_POINTER_SPACE, - VirtualMemory::NOT_EXECUTABLE); + NOT_EXECUTABLE); Page* first_page = memory_allocator->AllocatePage( - faked_space.AreaSize(), &faked_space, VirtualMemory::NOT_EXECUTABLE); + faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE); first_page->InsertAfter(faked_space.anchor()->prev_page()); CHECK(first_page->is_valid()); @@ -291,7 +291,7 @@ TEST(MemoryAllocator) { // Again, we should get n or n - 1 pages. Page* other = memory_allocator->AllocatePage( - faked_space.AreaSize(), &faked_space, VirtualMemory::NOT_EXECUTABLE); + faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE); CHECK(other->is_valid()); total_pages++; other->InsertAfter(first_page); @@ -353,7 +353,7 @@ TEST(OldSpace) { OldSpace* s = new OldSpace(heap, heap->MaxOldGenerationSize(), OLD_POINTER_SPACE, - VirtualMemory::NOT_EXECUTABLE); + NOT_EXECUTABLE); CHECK(s != NULL); CHECK(s->SetUp()); @@ -377,8 +377,7 @@ TEST(LargeObjectSpace) { int lo_size = Page::kPageSize; - Object* obj = lo->AllocateRaw( - lo_size, VirtualMemory::NOT_EXECUTABLE)->ToObjectUnchecked(); + Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked(); CHECK(obj->IsHeapObject()); HeapObject* ho = HeapObject::cast(obj); @@ -391,8 +390,7 @@ TEST(LargeObjectSpace) { while (true) { intptr_t available = lo->Available(); - { MaybeObject* maybe_obj = lo->AllocateRaw( - lo_size, VirtualMemory::NOT_EXECUTABLE); + { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE); if (!maybe_obj->ToObject(&obj)) break; } CHECK(lo->Available() < available); @@ -400,5 +398,5 @@ TEST(LargeObjectSpace) { CHECK(!lo->IsEmpty()); - CHECK(lo->AllocateRaw(lo_size, VirtualMemory::NOT_EXECUTABLE)->IsFailure()); + CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure()); } diff --git a/test/cctest/test-virtual-memory.cc b/test/cctest/test-virtual-memory.cc deleted file mode 100644 index d441835..0000000 --- a/test/cctest/test-virtual-memory.cc +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "cctest.h" -#include "platform/virtual-memory.h" - -using namespace ::v8::internal; - - -TEST(CommitAndUncommit) { - static const size_t kSize = 1 * MB; - static const size_t kBlockSize = 4 * KB; - VirtualMemory vm(kSize); - CHECK(vm.IsReserved()); - void* block_addr = vm.address(); - CHECK(vm.Commit(block_addr, kBlockSize, VirtualMemory::NOT_EXECUTABLE)); - // Check whether we can write to memory. - int* addr = static_cast(block_addr); - addr[5] = 2; - CHECK(vm.Uncommit(block_addr, kBlockSize)); -} - - -TEST(Release) { - static const size_t kSize = 4 * KB; - VirtualMemory vm(kSize); - CHECK(vm.IsReserved()); - CHECK_LE(kSize, vm.size()); - CHECK_NE(NULL, vm.address()); - vm.Release(); - CHECK(!vm.IsReserved()); -} - - -TEST(TakeControl) { - static const size_t kSize = 64 * KB; - - VirtualMemory vm1(kSize); - size_t size1 = vm1.size(); - CHECK(vm1.IsReserved()); - CHECK_LE(kSize, size1); - - VirtualMemory vm2; - CHECK(!vm2.IsReserved()); - - vm2.TakeControl(&vm1); - CHECK(vm2.IsReserved()); - CHECK(!vm1.IsReserved()); - CHECK(vm2.size() == size1); -} - - -TEST(AllocationGranularityIsPowerOf2) { - CHECK(IsPowerOf2(VirtualMemory::GetAllocationGranularity())); -} - - -TEST(PageSizeIsPowerOf2) { - CHECK(IsPowerOf2(VirtualMemory::GetPageSize())); -} diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp index dde6d57..4f629ff 100644 --- a/tools/gyp/v8.gyp +++ b/tools/gyp/v8.gyp @@ -450,8 +450,6 @@ '../../src/platform/semaphore.h', '../../src/platform/socket.cc', '../../src/platform/socket.h', - '../../src/platform/virtual-memory.cc', - '../../src/platform/virtual-memory.h', '../../src/preparse-data-format.h', '../../src/preparse-data.cc', '../../src/preparse-data.h', -- 2.7.4