/**
* Creates an empty handle.
*/
- V8_INLINE Handle() : val_(0) {}
+ V8_INLINE(Handle()) : val_(0) {}
/**
* Creates a handle for the contents of the specified handle. This
* Handle<String> to a variable declared as Handle<Value>, is legal
* because String is a subclass of Value.
*/
- template <class S> V8_INLINE Handle(Handle<S> that)
+ template <class S> V8_INLINE(Handle(Handle<S> that))
: val_(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
/**
* Returns true if the handle is empty.
*/
- V8_INLINE bool IsEmpty() const { return val_ == 0; }
+ V8_INLINE(bool IsEmpty() const) { return val_ == 0; }
/**
* Sets the handle to be empty. IsEmpty() will then return true.
*/
- V8_INLINE void Clear() { val_ = 0; }
+ V8_INLINE(void Clear()) { val_ = 0; }
- V8_INLINE T* operator->() const { return val_; }
+ V8_INLINE(T* operator->() const) { return val_; }
- V8_INLINE T* operator*() const { return val_; }
+ V8_INLINE(T* operator*() const) { return val_; }
/**
* Checks whether two handles are the same.
* to which they refer are identical.
* The handles' references are not checked.
*/
- template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
+ template <class S> V8_INLINE(bool operator==(const Handle<S>& that) const) {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
return *a == *b;
}
- template <class S> V8_INLINE bool operator==(
- const Persistent<S>& that) const {
+ template <class S> V8_INLINE(
+ bool operator==(const Persistent<S>& that) const) {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
* the objects to which they refer are different.
* The handles' references are not checked.
*/
- template <class S> V8_INLINE bool operator!=(const Handle<S>& that) const {
+ template <class S> V8_INLINE(bool operator!=(const Handle<S>& that) const) {
return !operator==(that);
}
- template <class S> V8_INLINE bool operator!=(
- const Persistent<S>& that) const {
+ template <class S> V8_INLINE(
+ bool operator!=(const Persistent<S>& that) const) {
return !operator==(that);
}
- template <class S> V8_INLINE static Handle<T> Cast(Handle<S> that) {
+ template <class S> V8_INLINE(static Handle<T> Cast(Handle<S> that)) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
return Handle<T>(T::Cast(*that));
}
- template <class S> V8_INLINE Handle<S> As() {
+ template <class S> V8_INLINE(Handle<S> As()) {
return Handle<S>::Cast(*this);
}
- V8_INLINE static Handle<T> New(Isolate* isolate, Handle<T> that) {
+ V8_INLINE(static Handle<T> New(Isolate* isolate, Handle<T> that)) {
return New(isolate, that.val_);
}
- V8_INLINE static Handle<T> New(Isolate* isolate, const Persistent<T>& that) {
+ V8_INLINE(static Handle<T> New(Isolate* isolate, const Persistent<T>& that)) {
return New(isolate, that.val_);
}
/**
* Creates a new handle for the specified value.
*/
- V8_INLINE explicit Handle(T* val) : val_(val) {}
+ V8_INLINE(explicit Handle(T* val)) : val_(val) {}
private:
friend class Utils;
friend class Context;
friend class HandleScope;
- V8_INLINE static Handle<T> New(Isolate* isolate, T* that);
+ V8_INLINE(static Handle<T> New(Isolate* isolate, T* that));
T* val_;
};
*/
template <class T> class Local : public Handle<T> {
public:
- V8_INLINE Local();
- template <class S> V8_INLINE Local(Local<S> that)
+ V8_INLINE(Local());
+ template <class S> V8_INLINE(Local(Local<S> that))
: Handle<T>(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
}
- template <class S> V8_INLINE static Local<T> Cast(Local<S> that) {
+ template <class S> V8_INLINE(static Local<T> Cast(Local<S> that)) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
#endif
return Local<T>(T::Cast(*that));
}
- template <class S> V8_INLINE Local(Handle<S> that)
+ template <class S> V8_INLINE(Local(Handle<S> that))
: Handle<T>(reinterpret_cast<T*>(*that)) {
TYPE_CHECK(T, S);
}
- template <class S> V8_INLINE Local<S> As() {
+ template <class S> V8_INLINE(Local<S> As()) {
return Local<S>::Cast(*this);
}
* The referee is kept alive by the local handle even when
* the original handle is destroyed/disposed.
*/
- V8_INLINE static Local<T> New(Handle<T> that);
- V8_INLINE static Local<T> New(Isolate* isolate, Handle<T> that);
+ V8_INLINE(static Local<T> New(Handle<T> that));
+ V8_INLINE(static Local<T> New(Isolate* isolate, Handle<T> that));
template<class M>
- V8_INLINE static Local<T> New(Isolate* isolate,
- const Persistent<T, M>& that);
+ V8_INLINE(static Local<T> New(Isolate* isolate,
+ const Persistent<T, M>& that));
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
- template <class S> V8_INLINE Local(S* that) : Handle<T>(that) { }
+ template <class S> V8_INLINE(Local(S* that) : Handle<T>(that)) { }
private:
friend class Utils;
template<class F> friend class internal::CustomArguments;
friend class HandleScope;
- V8_INLINE static Local<T> New(Isolate* isolate, T* that);
+ V8_INLINE(static Local<T> New(Isolate* isolate, T* that));
};
// Eternal handles are set-once handles that live for the life of the isolate.
template <class T> class Eternal {
public:
- V8_INLINE Eternal() : index_(kInitialValue) { }
+ V8_INLINE(Eternal()) : index_(kInitialValue) { }
template<class S>
- V8_INLINE Eternal(Isolate* isolate, Local<S> handle) : index_(kInitialValue) {
+ V8_INLINE(Eternal(Isolate* isolate, Local<S> handle))
+ : index_(kInitialValue) {
Set(isolate, handle);
}
// Can only be safely called if already set.
- V8_INLINE Local<T> Get(Isolate* isolate);
- V8_INLINE bool IsEmpty() { return index_ == kInitialValue; }
- template<class S> V8_INLINE void Set(Isolate* isolate, Local<S> handle);
+ V8_INLINE(Local<T> Get(Isolate* isolate));
+ V8_INLINE(bool IsEmpty()) { return index_ == kInitialValue; }
+ template<class S>
+ V8_INLINE(void Set(Isolate* isolate, Local<S> handle));
private:
static const int kInitialValue = -1;
public:
typedef void (*Callback)(const WeakCallbackData<T, P>& data);
- V8_INLINE Isolate* GetIsolate() const { return isolate_; }
- V8_INLINE Local<T> GetValue() const { return handle_; }
- V8_INLINE P* GetParameter() const { return parameter_; }
+ V8_INLINE(Isolate* GetIsolate()) const { return isolate_; }
+ V8_INLINE(Local<T> GetValue()) const { return handle_; }
+ V8_INLINE(P* GetParameter()) const { return parameter_; }
private:
friend class internal::GlobalHandles;
typedef Persistent<T, NonCopyablePersistentTraits<T> > NonCopyablePersistent;
static const bool kResetInDestructor = false;
template<class S, class M>
- V8_INLINE static void Copy(const Persistent<S, M>& source,
- NonCopyablePersistent* dest) {
+ V8_INLINE(static void Copy(const Persistent<S, M>& source,
+ NonCopyablePersistent* dest)) {
Uncompilable<Object>();
}
// TODO(dcarney): come up with a good compile error here.
- template<class O> V8_INLINE static void Uncompilable() {
+ template<class O>
+ V8_INLINE(static void Uncompilable()) {
TYPE_CHECK(O, Primitive);
}
};
/**
* A Persistent with no storage cell.
*/
- V8_INLINE Persistent() : val_(0) { }
+ V8_INLINE(Persistent()) : val_(0) { }
/**
* Construct a Persistent from a Handle.
* When the Handle is non-empty, a new storage cell is created
* pointing to the same object, and no flags are set.
*/
- template <class S> V8_INLINE Persistent(Isolate* isolate, Handle<S> that)
+ template <class S> V8_INLINE(Persistent(Isolate* isolate, Handle<S> that))
: val_(New(isolate, *that)) {
TYPE_CHECK(T, S);
}
* pointing to the same object, and no flags are set.
*/
template <class S, class M2>
- V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
+ V8_INLINE(Persistent(Isolate* isolate, const Persistent<S, M2>& that))
: val_(New(isolate, *that)) {
TYPE_CHECK(T, S);
}
* traits class is called, allowing the setting of flags based on the
* copied Persistent.
*/
- V8_INLINE Persistent(const Persistent& that) : val_(0) {
+ V8_INLINE(Persistent(const Persistent& that)) : val_(0) {
Copy(that);
}
template <class S, class M2>
- V8_INLINE Persistent(const Persistent<S, M2>& that) : val_(0) {
+ V8_INLINE(Persistent(const Persistent<S, M2>& that)) : val_(0) {
Copy(that);
}
- V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT
+ V8_INLINE(Persistent& operator=(const Persistent& that)) { // NOLINT
Copy(that);
return *this;
}
template <class S, class M2>
- V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) { // NOLINT
+ V8_INLINE(Persistent& operator=(const Persistent<S, M2>& that)) { // NOLINT
Copy(that);
return *this;
}
* kResetInDestructor flags in the traits class. Since not calling dispose
* can result in a memory leak, it is recommended to always set this flag.
*/
- V8_INLINE ~Persistent() {
+ V8_INLINE(~Persistent()) {
if (M::kResetInDestructor) Reset();
}
* If non-empty, destroy the underlying storage cell
* IsEmpty() will return true after this call.
*/
- V8_INLINE void Reset();
+ V8_INLINE(void Reset());
+ template <class S>
/**
* If non-empty, destroy the underlying storage cell
* and create a new one with the contents of other if other is non empty
*/
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Handle<S>& other);
+ V8_INLINE(void Reset(Isolate* isolate, const Handle<S>& other));
/**
* If non-empty, destroy the underlying storage cell
* and create a new one with the contents of other if other is non empty
*/
template <class S, class M2>
- V8_INLINE void Reset(Isolate* isolate, const Persistent<S, M2>& other);
+ V8_INLINE(void Reset(Isolate* isolate, const Persistent<S, M2>& other));
// TODO(dcarney): deprecate
- V8_INLINE void Dispose() { Reset(); }
- V8_DEPRECATED(V8_INLINE void Dispose(Isolate* isolate)) { Reset(); }
+ V8_INLINE(void Dispose()) { Reset(); }
+ V8_DEPRECATED(V8_INLINE(void Dispose(Isolate* isolate))) { Reset(); }
- V8_INLINE bool IsEmpty() const { return val_ == 0; }
+ V8_INLINE(bool IsEmpty() const) { return val_ == 0; }
// TODO(dcarney): this is pretty useless, fix or remove
template <class S>
- V8_INLINE static Persistent<T>& Cast(Persistent<S>& that) { // NOLINT
+ V8_INLINE(static Persistent<T>& Cast(Persistent<S>& that)) { // NOLINT
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
}
// TODO(dcarney): this is pretty useless, fix or remove
- template <class S> V8_INLINE Persistent<S>& As() { // NOLINT
+ template <class S> V8_INLINE(Persistent<S>& As()) { // NOLINT
return Persistent<S>::Cast(*this);
}
- template <class S, class M2>
- V8_INLINE bool operator==(const Persistent<S, M2>& that) const {
+ template <class S, class M2> V8_INLINE(
+ bool operator==(const Persistent<S, M2>& that) const) {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
return *a == *b;
}
- template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
+ template <class S> V8_INLINE(bool operator==(const Handle<S>& that) const) {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
return *a == *b;
}
- template <class S, class M2>
- V8_INLINE bool operator!=(const Persistent<S, M2>& that) const {
+ template <class S, class M2> V8_INLINE(
+ bool operator!=(const Persistent<S, M2>& that) const) {
return !operator==(that);
}
- template <class S> V8_INLINE bool operator!=(const Handle<S>& that) const {
+ template <class S> V8_INLINE(bool operator!=(const Handle<S>& that) const) {
return !operator==(that);
}
template<typename P>
- V8_INLINE void SetWeak(
+ V8_INLINE(void SetWeak(
P* parameter,
- typename WeakCallbackData<T, P>::Callback callback);
+ typename WeakCallbackData<T, P>::Callback callback));
template<typename S, typename P>
- V8_INLINE void SetWeak(
+ V8_INLINE(void SetWeak(
P* parameter,
- typename WeakCallbackData<S, P>::Callback callback);
+ typename WeakCallbackData<S, P>::Callback callback));
// TODO(dcarney): deprecate
template<typename S, typename P>
- V8_INLINE void MakeWeak(
+ V8_INLINE(void MakeWeak(
P* parameter,
- typename WeakReferenceCallbacks<S, P>::Revivable callback);
+ typename WeakReferenceCallbacks<S, P>::Revivable callback));
// TODO(dcarney): deprecate
template<typename P>
- V8_INLINE void MakeWeak(
+ V8_INLINE(void MakeWeak(
P* parameter,
- typename WeakReferenceCallbacks<T, P>::Revivable callback);
+ typename WeakReferenceCallbacks<T, P>::Revivable callback));
- V8_INLINE void ClearWeak();
+ V8_INLINE(void ClearWeak());
- V8_DEPRECATED(V8_INLINE void ClearWeak(Isolate* isolate)) { ClearWeak(); }
+ V8_DEPRECATED(V8_INLINE(void ClearWeak(Isolate* isolate))) { ClearWeak(); }
/**
* Marks the reference to this object independent. Garbage collector is free
* independent handle should not assume that it will be preceded by a global
* GC prologue callback or followed by a global GC epilogue callback.
*/
- V8_INLINE void MarkIndependent();
+ V8_INLINE(void MarkIndependent());
- V8_DEPRECATED(V8_INLINE void MarkIndependent(Isolate* isolate)) {
+ V8_DEPRECATED(V8_INLINE(void MarkIndependent(Isolate* isolate))) {
MarkIndependent();
}
* external dependencies. This mark is automatically cleared after each
* garbage collection.
*/
- V8_INLINE void MarkPartiallyDependent();
+ V8_INLINE(void MarkPartiallyDependent());
- V8_DEPRECATED(V8_INLINE void MarkPartiallyDependent(Isolate* isolate)) {
+ V8_DEPRECATED(V8_INLINE(void MarkPartiallyDependent(Isolate* isolate))) {
MarkPartiallyDependent();
}
- V8_INLINE bool IsIndependent() const;
+ V8_INLINE(bool IsIndependent() const);
- V8_DEPRECATED(V8_INLINE bool IsIndependent(Isolate* isolate) const) {
+ V8_DEPRECATED(V8_INLINE(bool IsIndependent(Isolate* isolate)) const) {
return IsIndependent();
}
/** Checks if the handle holds the only reference to an object. */
- V8_INLINE bool IsNearDeath() const;
+ V8_INLINE(bool IsNearDeath() const);
- V8_DEPRECATED(V8_INLINE bool IsNearDeath(Isolate* isolate) const) {
+ V8_DEPRECATED(V8_INLINE(bool IsNearDeath(Isolate* isolate)) const) {
return IsNearDeath();
}
/** Returns true if the handle's reference is weak. */
- V8_INLINE bool IsWeak() const;
+ V8_INLINE(bool IsWeak() const);
- V8_DEPRECATED(V8_INLINE bool IsWeak(Isolate* isolate) const) {
+ V8_DEPRECATED(V8_INLINE(bool IsWeak(Isolate* isolate)) const) {
return IsWeak();
}
* Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
* description in v8-profiler.h for details.
*/
- V8_INLINE void SetWrapperClassId(uint16_t class_id);
+ V8_INLINE(void SetWrapperClassId(uint16_t class_id));
V8_DEPRECATED(
- V8_INLINE void SetWrapperClassId(Isolate * isolate, uint16_t class_id)) {
+ V8_INLINE(void SetWrapperClassId(Isolate * isolate, uint16_t class_id))) {
SetWrapperClassId(class_id);
}
* Returns the class ID previously assigned to this handle or 0 if no class ID
* was previously assigned.
*/
- V8_INLINE uint16_t WrapperClassId() const;
+ V8_INLINE(uint16_t WrapperClassId() const);
- V8_DEPRECATED(V8_INLINE uint16_t WrapperClassId(Isolate* isolate) const) {
+ V8_DEPRECATED(V8_INLINE(uint16_t WrapperClassId(Isolate* isolate)) const) {
return WrapperClassId();
}
// TODO(dcarney): remove
- V8_INLINE T* ClearAndLeak();
+ V8_INLINE(T* ClearAndLeak());
// TODO(dcarney): remove
- V8_INLINE void Clear() { val_ = 0; }
+ V8_INLINE(void Clear()) { val_ = 0; }
// TODO(dcarney): remove
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
- template <class S> V8_INLINE Persistent(S* that) : val_(that) { }
+ template <class S> V8_INLINE(Persistent(S* that)) : val_(that) { }
- V8_INLINE T* operator*() const { return val_; }
+ V8_INLINE(T* operator*() const) { return val_; }
private:
friend class Utils;
template<class F1, class F2> friend class Persistent;
template<class F> friend class ReturnValue;
- V8_INLINE static T* New(Isolate* isolate, T* that);
+ V8_INLINE(static T* New(Isolate* isolate, T* that));
template<class S, class M2>
- V8_INLINE void Copy(const Persistent<S, M2>& that);
+ V8_INLINE(void Copy(const Persistent<S, M2>& that));
T* val_;
};
internal::Object** next;
internal::Object** limit;
int level;
- V8_INLINE void Initialize() {
+ V8_INLINE(void Initialize()) {
next = limit = NULL;
level = 0;
}
*/
class ScriptOrigin {
public:
- V8_INLINE ScriptOrigin(
+ V8_INLINE(ScriptOrigin(
Handle<Value> resource_name,
Handle<Integer> resource_line_offset = Handle<Integer>(),
Handle<Integer> resource_column_offset = Handle<Integer>(),
- Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>())
+ Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>()))
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset),
resource_is_shared_cross_origin_(resource_is_shared_cross_origin) { }
- V8_INLINE Handle<Value> ResourceName() const;
- V8_INLINE Handle<Integer> ResourceLineOffset() const;
- V8_INLINE Handle<Integer> ResourceColumnOffset() const;
- V8_INLINE Handle<Boolean> ResourceIsSharedCrossOrigin() const;
+ V8_INLINE(Handle<Value> ResourceName() const);
+ V8_INLINE(Handle<Integer> ResourceLineOffset() const);
+ V8_INLINE(Handle<Integer> ResourceColumnOffset() const);
+ V8_INLINE(Handle<Boolean> ResourceIsSharedCrossOrigin() const);
private:
Handle<Value> resource_name_;
Handle<Integer> resource_line_offset_;
* Returns true if this value is the undefined value. See ECMA-262
* 4.3.10.
*/
- V8_INLINE bool IsUndefined() const;
+ V8_INLINE(bool IsUndefined() const);
/**
* Returns true if this value is the null value. See ECMA-262
* 4.3.11.
*/
- V8_INLINE bool IsNull() const;
+ V8_INLINE(bool IsNull() const);
/**
* Returns true if this value is true.
* Returns true if this value is an instance of the String type.
* See ECMA-262 8.4.
*/
- V8_INLINE bool IsString() const;
+ V8_INLINE(bool IsString() const);
/**
* Returns true if this value is a symbol.
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
- template <class T> V8_INLINE static Value* Cast(T* value);
+ template <class T> V8_INLINE(static Value* Cast(T* value));
private:
- V8_INLINE bool QuickIsUndefined() const;
- V8_INLINE bool QuickIsNull() const;
- V8_INLINE bool QuickIsString() const;
+ V8_INLINE(bool QuickIsUndefined() const);
+ V8_INLINE(bool QuickIsNull() const);
+ V8_INLINE(bool QuickIsString() const);
bool FullIsUndefined() const;
bool FullIsNull() const;
bool FullIsString() const;
class V8_EXPORT Boolean : public Primitive {
public:
bool Value() const;
- V8_INLINE static Handle<Boolean> New(bool value);
+ V8_INLINE(static Handle<Boolean> New(bool value));
};
/**
* This function is no longer useful.
*/
- V8_DEPRECATED(V8_INLINE bool MayContainNonAscii() const) { return true; }
+ V8_DEPRECATED(V8_INLINE(bool MayContainNonAscii()) const) { return true; }
/**
* Returns whether this string is known to contain only one byte data.
* A zero length string.
*/
static v8::Local<v8::String> Empty();
- V8_INLINE static v8::Local<v8::String> Empty(Isolate* isolate);
+ V8_INLINE(static v8::Local<v8::String> Empty(Isolate* isolate));
/**
* Returns true if the string is external
* regardless of the encoding, otherwise return NULL. The encoding of the
* string is returned in encoding_out.
*/
- V8_INLINE ExternalStringResourceBase* GetExternalStringResourceBase(
- Encoding* encoding_out) const;
+ V8_INLINE(ExternalStringResourceBase* GetExternalStringResourceBase(
+ Encoding* encoding_out) const);
/**
* Get the ExternalStringResource for an external string. Returns
* NULL if IsExternal() doesn't return true.
*/
- V8_INLINE ExternalStringResource* GetExternalStringResource() const;
+ V8_INLINE(ExternalStringResource* GetExternalStringResource() const);
/**
* Get the ExternalAsciiStringResource for an external ASCII string.
*/
const ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
- V8_INLINE static String* Cast(v8::Value* obj);
+ V8_INLINE(static String* Cast(v8::Value* obj));
// TODO(dcarney): deprecate
/**
* The second parameter 'length' gives the buffer length. If omitted,
* the function calls 'strlen' to determine the buffer length.
*/
- V8_INLINE static Local<String> New(const char* data, int length = -1);
+ V8_INLINE(static Local<String> New(const char* data, int length = -1));
// TODO(dcarney): deprecate
/** Allocates a new string from 16-bit character codes.*/
- V8_INLINE static Local<String> New(const uint16_t* data, int length = -1);
+ V8_INLINE(static Local<String> New(const uint16_t* data, int length = -1));
// TODO(dcarney): deprecate
/**
* Creates an internalized string (historically called a "symbol",
* not to be confused with ES6 symbols). Returns one if it exists already.
*/
- V8_INLINE static Local<String> NewSymbol(const char* data, int length = -1);
+ V8_INLINE(static Local<String> NewSymbol(const char* data, int length = -1));
enum NewStringType {
kNormalString, kInternalizedString, kUndetectableString
// TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
- V8_INLINE static Local<String> NewUndetectable(const char* data,
- int length = -1);
+ V8_INLINE(
+ static Local<String> NewUndetectable(const char* data, int length = -1));
// TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied 16-bit character codes.*/
- V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
- int length = -1);
+ V8_INLINE(static Local<String> NewUndetectable(
+ const uint16_t* data, int length = -1));
/**
* Converts an object to a UTF-8-encoded character array. Useful if
// Create a symbol with a print name.
static Local<Symbol> New(Isolate *isolate, const char* data, int length = -1);
- V8_INLINE static Symbol* Cast(v8::Value* obj);
+ V8_INLINE(static Symbol* Cast(v8::Value* obj));
private:
Symbol();
static void CheckCast(v8::Value* obj);
double Value() const;
static Local<Number> New(double value);
static Local<Number> New(Isolate* isolate, double value);
- V8_INLINE static Number* Cast(v8::Value* obj);
+ V8_INLINE(static Number* Cast(v8::Value* obj));
private:
Number();
static void CheckCast(v8::Value* obj);
static Local<Integer> New(int32_t value, Isolate*);
static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
int64_t Value() const;
- V8_INLINE static Integer* Cast(v8::Value* obj);
+ V8_INLINE(static Integer* Cast(v8::Value* obj));
private:
Integer();
static void CheckCast(v8::Value* obj);
int InternalFieldCount();
/** Gets the value from an internal field. */
- V8_INLINE Local<Value> GetInternalField(int index);
+ V8_INLINE(Local<Value> GetInternalField(int index));
/** Sets the value in an internal field. */
void SetInternalField(int index, Handle<Value> value);
* must have been set by SetAlignedPointerInInternalField, everything else
* leads to undefined behavior.
*/
- V8_INLINE void* GetAlignedPointerFromInternalField(int index);
+ V8_INLINE(void* GetAlignedPointerFromInternalField(int index));
/**
* Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
Local<Value> CallAsConstructor(int argc, Handle<Value> argv[]);
static Local<Object> New();
- V8_INLINE static Object* Cast(Value* obj);
+ V8_INLINE(static Object* Cast(Value* obj));
private:
Object();
*/
static Local<Array> New(int length = 0);
- V8_INLINE static Array* Cast(Value* obj);
+ V8_INLINE(static Array* Cast(Value* obj));
private:
Array();
static void CheckCast(Value* obj);
template<typename T>
class ReturnValue {
public:
- template <class S> V8_INLINE ReturnValue(const ReturnValue<S>& that)
+ template <class S> V8_INLINE(ReturnValue(const ReturnValue<S>& that))
: value_(that.value_) {
TYPE_CHECK(T, S);
}
// Handle setters
- template <typename S> V8_INLINE void Set(const Persistent<S>& handle);
- template <typename S> V8_INLINE void Set(const Handle<S> handle);
+ template <typename S> V8_INLINE(void Set(const Persistent<S>& handle));
+ template <typename S> V8_INLINE(void Set(const Handle<S> handle));
// Fast primitive setters
- V8_INLINE void Set(bool value);
- V8_INLINE void Set(double i);
- V8_INLINE void Set(int32_t i);
- V8_INLINE void Set(uint32_t i);
+ V8_INLINE(void Set(bool value));
+ V8_INLINE(void Set(double i));
+ V8_INLINE(void Set(int32_t i));
+ V8_INLINE(void Set(uint32_t i));
// Fast JS primitive setters
- V8_INLINE void SetNull();
- V8_INLINE void SetUndefined();
- V8_INLINE void SetEmptyString();
+ V8_INLINE(void SetNull());
+ V8_INLINE(void SetUndefined());
+ V8_INLINE(void SetEmptyString());
// Convenience getter for Isolate
- V8_INLINE Isolate* GetIsolate();
+ V8_INLINE(Isolate* GetIsolate());
private:
template<class F> friend class ReturnValue;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
- V8_INLINE internal::Object* GetDefaultValue();
- V8_INLINE explicit ReturnValue(internal::Object** slot);
+ V8_INLINE(internal::Object* GetDefaultValue());
+ V8_INLINE(explicit ReturnValue(internal::Object** slot));
internal::Object** value_;
};
template<typename T>
class FunctionCallbackInfo {
public:
- V8_INLINE int Length() const;
- V8_INLINE Local<Value> operator[](int i) const;
- V8_INLINE Local<Function> Callee() const;
- V8_INLINE Local<Object> This() const;
- V8_INLINE Local<Object> Holder() const;
- V8_INLINE bool IsConstructCall() const;
- V8_INLINE Local<Value> Data() const;
- V8_INLINE Isolate* GetIsolate() const;
- V8_INLINE ReturnValue<T> GetReturnValue() const;
+ V8_INLINE(int Length() const);
+ V8_INLINE(Local<Value> operator[](int i) const);
+ V8_INLINE(Local<Function> Callee() const);
+ V8_INLINE(Local<Object> This() const);
+ V8_INLINE(Local<Object> Holder() const);
+ V8_INLINE(bool IsConstructCall() const);
+ V8_INLINE(Local<Value> Data() const);
+ V8_INLINE(Isolate* GetIsolate() const);
+ V8_INLINE(ReturnValue<T> GetReturnValue() const);
// This shouldn't be public, but the arm compiler needs it.
static const int kArgsLength = 6;
static const int kCalleeIndex = -4;
static const int kHolderIndex = -5;
- V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
+ V8_INLINE(FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values,
int length,
- bool is_construct_call);
+ bool is_construct_call));
internal::Object** implicit_args_;
internal::Object** values_;
int length_;
template<typename T>
class PropertyCallbackInfo {
public:
- V8_INLINE Isolate* GetIsolate() const;
- V8_INLINE Local<Value> Data() const;
- V8_INLINE Local<Object> This() const;
- V8_INLINE Local<Object> Holder() const;
- V8_INLINE ReturnValue<T> GetReturnValue() const;
+ V8_INLINE(Isolate* GetIsolate() const);
+ V8_INLINE(Local<Value> Data() const);
+ V8_INLINE(Local<Object> This() const);
+ V8_INLINE(Local<Object> Holder() const);
+ V8_INLINE(ReturnValue<T> GetReturnValue() const);
// This shouldn't be public, but the arm compiler needs it.
static const int kArgsLength = 6;
static const int kReturnValueDefaultValueIndex = -4;
static const int kIsolateIndex = -5;
- V8_INLINE PropertyCallbackInfo(internal::Object** args) : args_(args) {}
+ V8_INLINE(PropertyCallbackInfo(internal::Object** args))
+ : args_(args) { }
internal::Object** args_;
};
int ScriptId() const;
ScriptOrigin GetScriptOrigin() const;
- V8_INLINE static Function* Cast(Value* obj);
+ V8_INLINE(static Function* Cast(Value* obj));
static const int kLineOffsetNotFound;
private:
*/
Contents Externalize();
- V8_INLINE static ArrayBuffer* Cast(Value* obj);
+ V8_INLINE(static ArrayBuffer* Cast(Value* obj));
static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
*/
void* BaseAddress();
- V8_INLINE static ArrayBufferView* Cast(Value* obj);
+ V8_INLINE(static ArrayBufferView* Cast(Value* obj));
static const int kInternalFieldCount =
V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
*/
size_t Length();
- V8_INLINE static TypedArray* Cast(Value* obj);
+ V8_INLINE(static TypedArray* Cast(Value* obj));
private:
TypedArray();
public:
static Local<Uint8Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Uint8Array* Cast(Value* obj);
+ V8_INLINE(static Uint8Array* Cast(Value* obj));
private:
Uint8Array();
public:
static Local<Uint8ClampedArray> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Uint8ClampedArray* Cast(Value* obj);
+ V8_INLINE(static Uint8ClampedArray* Cast(Value* obj));
private:
Uint8ClampedArray();
public:
static Local<Int8Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Int8Array* Cast(Value* obj);
+ V8_INLINE(static Int8Array* Cast(Value* obj));
private:
Int8Array();
public:
static Local<Uint16Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Uint16Array* Cast(Value* obj);
+ V8_INLINE(static Uint16Array* Cast(Value* obj));
private:
Uint16Array();
public:
static Local<Int16Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Int16Array* Cast(Value* obj);
+ V8_INLINE(static Int16Array* Cast(Value* obj));
private:
Int16Array();
public:
static Local<Uint32Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Uint32Array* Cast(Value* obj);
+ V8_INLINE(static Uint32Array* Cast(Value* obj));
private:
Uint32Array();
public:
static Local<Int32Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Int32Array* Cast(Value* obj);
+ V8_INLINE(static Int32Array* Cast(Value* obj));
private:
Int32Array();
public:
static Local<Float32Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Float32Array* Cast(Value* obj);
+ V8_INLINE(static Float32Array* Cast(Value* obj));
private:
Float32Array();
public:
static Local<Float64Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static Float64Array* Cast(Value* obj);
+ V8_INLINE(static Float64Array* Cast(Value* obj));
private:
Float64Array();
public:
static Local<DataView> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE static DataView* Cast(Value* obj);
+ V8_INLINE(static DataView* Cast(Value* obj));
private:
DataView();
*/
double ValueOf() const;
- V8_INLINE static Date* Cast(v8::Value* obj);
+ V8_INLINE(static Date* Cast(v8::Value* obj));
/**
* Notification that the embedder has changed the time zone,
*/
double ValueOf() const;
- V8_INLINE static NumberObject* Cast(v8::Value* obj);
+ V8_INLINE(static NumberObject* Cast(v8::Value* obj));
private:
static void CheckCast(v8::Value* obj);
*/
bool ValueOf() const;
- V8_INLINE static BooleanObject* Cast(v8::Value* obj);
+ V8_INLINE(static BooleanObject* Cast(v8::Value* obj));
private:
static void CheckCast(v8::Value* obj);
*/
Local<String> ValueOf() const;
- V8_INLINE static StringObject* Cast(v8::Value* obj);
+ V8_INLINE(static StringObject* Cast(v8::Value* obj));
private:
static void CheckCast(v8::Value* obj);
*/
Local<Symbol> ValueOf() const;
- V8_INLINE static SymbolObject* Cast(v8::Value* obj);
+ V8_INLINE(static SymbolObject* Cast(v8::Value* obj));
private:
static void CheckCast(v8::Value* obj);
*/
Flags GetFlags() const;
- V8_INLINE static RegExp* Cast(v8::Value* obj);
+ V8_INLINE(static RegExp* Cast(v8::Value* obj));
private:
static void CheckCast(v8::Value* obj);
class V8_EXPORT External : public Value {
public:
static Local<External> New(void* value);
- V8_INLINE static External* Cast(Value* obj);
+ V8_INLINE(static External* Cast(Value* obj));
void* Value() const;
private:
static void CheckCast(v8::Value* obj);
/** Adds a property to each instance created by this template.*/
void Set(Handle<String> name, Handle<Data> value,
PropertyAttribute attributes = None);
- V8_INLINE void Set(const char* name, Handle<Data> value);
+ V8_INLINE(void Set(const char* name, Handle<Data> value));
void SetAccessorProperty(
Local<String> name,
*/
class V8_EXPORT DeclareExtension {
public:
- V8_INLINE DeclareExtension(Extension* extension) {
+ V8_INLINE(DeclareExtension(Extension* extension)) {
RegisterExtension(extension);
}
};
Handle<Boolean> V8_EXPORT True();
Handle<Boolean> V8_EXPORT False();
-V8_INLINE Handle<Primitive> Undefined(Isolate* isolate);
-V8_INLINE Handle<Primitive> Null(Isolate* isolate);
-V8_INLINE Handle<Boolean> True(Isolate* isolate);
-V8_INLINE Handle<Boolean> False(Isolate* isolate);
+V8_INLINE(Handle<Primitive> Undefined(Isolate* isolate));
+V8_INLINE(Handle<Primitive> Null(Isolate* isolate));
+V8_INLINE(Handle<Boolean> True(Isolate* isolate));
+V8_INLINE(Handle<Boolean> False(Isolate* isolate));
/**
/**
* Associate embedder-specific data with the isolate
*/
- V8_INLINE void SetData(void* data);
+ V8_INLINE(void SetData(void* data));
/**
* Retrieve embedder-specific data from the isolate.
* Returns NULL if SetData has never been called.
*/
- V8_INLINE void* GetData();
+ V8_INLINE(void* GetData());
/**
* Get statistics about the heap memory usage.
class V8_EXPORT AssertNoGCScope {
#ifndef DEBUG
// TODO(yangguo): remove isolate argument.
- V8_INLINE AssertNoGCScope(Isolate* isolate) {}
+ V8_INLINE(AssertNoGCScope(Isolate* isolate)) { }
#else
AssertNoGCScope(Isolate* isolate);
~AssertNoGCScope();
* previous call to SetEmbedderData with the same index. Note that index 0
* currently has a special meaning for Chrome's debugger.
*/
- V8_INLINE Local<Value> GetEmbedderData(int index);
+ V8_INLINE(Local<Value> GetEmbedderData(int index));
/**
* Sets the embedder data with the given index, growing the data as
* SetAlignedPointerInEmbedderData with the same index. Note that index 0
* currently has a special meaning for Chrome's debugger.
*/
- V8_INLINE void* GetAlignedPointerFromEmbedderData(int index);
+ V8_INLINE(void* GetAlignedPointerFromEmbedderData(int index));
/**
* Sets a 2-byte-aligned native pointer in the embedder data with the given
*/
class Scope {
public:
- explicit V8_INLINE Scope(Handle<Context> context) : context_(context) {
+ explicit V8_INLINE(Scope(Handle<Context> context)) : context_(context) {
context_->Enter();
}
// TODO(dcarney): deprecate
- V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context) // NOLINT
+ V8_INLINE(Scope(Isolate* isolate, Persistent<Context>& context)) // NOLINT
: context_(Handle<Context>::New(isolate, context)) {
context_->Enter();
}
- V8_INLINE ~Scope() { context_->Exit(); }
+ V8_INLINE(~Scope()) { context_->Exit(); }
private:
Handle<Context> context_;
/**
* Initialize Unlocker for a given Isolate.
*/
- V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
+ V8_INLINE(explicit Unlocker(Isolate* isolate)) { Initialize(isolate); }
/** Deprecated. Use Isolate version instead. */
V8_DEPRECATED(Unlocker());
/**
* Initialize Locker for a given Isolate.
*/
- V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
+ V8_INLINE(explicit Locker(Isolate* isolate)) { Initialize(isolate); }
/** Deprecated. Use Isolate version instead. */
V8_DEPRECATED(Locker());
template <size_t ptr_size> struct SmiTagging;
template<int kSmiShiftSize>
-V8_INLINE internal::Object* IntToSmi(int value) {
+V8_INLINE(internal::Object* IntToSmi(int value)) {
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
intptr_t tagged_value =
(static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0;
static const int kSmiValueSize = 31;
- V8_INLINE static int SmiToInt(internal::Object* value) {
+ V8_INLINE(static int SmiToInt(internal::Object* value)) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
}
- V8_INLINE static internal::Object* IntToSmi(int value) {
+ V8_INLINE(static internal::Object* IntToSmi(int value)) {
return internal::IntToSmi<kSmiShiftSize>(value);
}
- V8_INLINE static bool IsValidSmi(intptr_t value) {
+ V8_INLINE(static bool IsValidSmi(intptr_t value)) {
// To be representable as an tagged small integer, the two
// most-significant bits of 'value' must be either 00 or 11 due to
// sign-extension. To check this we add 01 to the two
template <> struct SmiTagging<8> {
static const int kSmiShiftSize = 31;
static const int kSmiValueSize = 32;
- V8_INLINE static int SmiToInt(internal::Object* value) {
+ V8_INLINE(static int SmiToInt(internal::Object* value)) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
}
- V8_INLINE static internal::Object* IntToSmi(int value) {
+ V8_INLINE(static internal::Object* IntToSmi(int value)) {
return internal::IntToSmi<kSmiShiftSize>(value);
}
- V8_INLINE static bool IsValidSmi(intptr_t value) {
+ V8_INLINE(static bool IsValidSmi(intptr_t value)) {
// To be representable as a long smi, the value must be a 32-bit integer.
return (value == static_cast<int32_t>(value));
}
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
-V8_INLINE static bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
-V8_INLINE static bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
+V8_INLINE(static bool SmiValuesAre31Bits()) { return kSmiValueSize == 31; }
+V8_INLINE(static bool SmiValuesAre32Bits()) { return kSmiValueSize == 32; }
/**
* This class exports constants and functionality from within v8 that
static const int kNullOddballKind = 3;
static void CheckInitializedImpl(v8::Isolate* isolate);
- V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
+ V8_INLINE(static void CheckInitialized(v8::Isolate* isolate)) {
#ifdef V8_ENABLE_CHECKS
CheckInitializedImpl(isolate);
#endif
}
- V8_INLINE static bool HasHeapObjectTag(internal::Object* value) {
+ V8_INLINE(static bool HasHeapObjectTag(internal::Object* value)) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
- V8_INLINE static int SmiValue(internal::Object* value) {
+ V8_INLINE(static int SmiValue(internal::Object* value)) {
return PlatformSmiTagging::SmiToInt(value);
}
- V8_INLINE static internal::Object* IntToSmi(int value) {
+ V8_INLINE(static internal::Object* IntToSmi(int value)) {
return PlatformSmiTagging::IntToSmi(value);
}
- V8_INLINE static bool IsValidSmi(intptr_t value) {
+ V8_INLINE(static bool IsValidSmi(intptr_t value)) {
return PlatformSmiTagging::IsValidSmi(value);
}
- V8_INLINE static int GetInstanceType(internal::Object* obj) {
+ V8_INLINE(static int GetInstanceType(internal::Object* obj)) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
}
- V8_INLINE static int GetOddballKind(internal::Object* obj) {
+ V8_INLINE(static int GetOddballKind(internal::Object* obj)) {
typedef internal::Object O;
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
}
- V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
+ V8_INLINE(static bool IsExternalTwoByteString(int instance_type)) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
}
- V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) {
+ V8_INLINE(static uint8_t GetNodeFlag(internal::Object** obj, int shift)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & static_cast<uint8_t>(1U << shift);
}
- V8_INLINE static void UpdateNodeFlag(internal::Object** obj,
- bool value, int shift) {
+ V8_INLINE(static void UpdateNodeFlag(internal::Object** obj,
+ bool value, int shift)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
uint8_t mask = static_cast<uint8_t>(1 << shift);
*addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
}
- V8_INLINE static uint8_t GetNodeState(internal::Object** obj) {
+ V8_INLINE(static uint8_t GetNodeState(internal::Object** obj)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & kNodeStateMask;
}
- V8_INLINE static void UpdateNodeState(internal::Object** obj,
- uint8_t value) {
+ V8_INLINE(static void UpdateNodeState(internal::Object** obj,
+ uint8_t value)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
- V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, void* data) {
+ V8_INLINE(static void SetEmbedderData(v8::Isolate* isolate, void* data)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
*reinterpret_cast<void**>(addr) = data;
}
- V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate) {
+ V8_INLINE(static void* GetEmbedderData(v8::Isolate* isolate)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
return *reinterpret_cast<void**>(addr);
}
- V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate,
- int index) {
+ V8_INLINE(static internal::Object** GetRoot(v8::Isolate* isolate,
+ int index)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
}
- template <typename T> V8_INLINE static T ReadField(Object* ptr, int offset) {
+ template <typename T>
+ V8_INLINE(static T ReadField(Object* ptr, int offset)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<T*>(addr);
}
template <typename T>
- V8_INLINE static T ReadEmbedderData(Context* context, int index) {
+ V8_INLINE(static T ReadEmbedderData(Context* context, int index)) {
typedef internal::Object O;
typedef internal::Internals I;
O* ctx = *reinterpret_cast<O**>(context);
return I::ReadField<T>(embedder_data, value_offset);
}
- V8_INLINE static bool CanCastToHeapObject(void* o) { return false; }
- V8_INLINE static bool CanCastToHeapObject(Context* o) { return true; }
- V8_INLINE static bool CanCastToHeapObject(String* o) { return true; }
- V8_INLINE static bool CanCastToHeapObject(Object* o) { return true; }
- V8_INLINE static bool CanCastToHeapObject(Message* o) { return true; }
- V8_INLINE static bool CanCastToHeapObject(StackTrace* o) { return true; }
- V8_INLINE static bool CanCastToHeapObject(StackFrame* o) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(void* o)) { return false; }
+ V8_INLINE(static bool CanCastToHeapObject(Context* o)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(String* o)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(Object* o)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(Message* o)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(StackTrace* o)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(StackFrame* o)) { return true; }
};
} // namespace internal
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
-// V8_HAS_ATTRIBUTE_PURE - __attribute__((pure)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
// supported
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
-# define V8_HAS_ATTRIBUTE_PURE (__has_attribute(pure))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
-# define V8_HAS_ATTRIBUTE_PURE (V8_GNUC_PREREQ(2, 96, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
// Helper macros
// A macro used to make better inlining. Don't bother for debug builds.
-// Use like:
-// V8_INLINE int GetZero() { return 0; }
#if !defined(DEBUG) && V8_HAS_ATTRIBUTE_ALWAYS_INLINE
-# define V8_INLINE inline __attribute__((always_inline))
+# define V8_INLINE(declarator) inline __attribute__((always_inline)) declarator
#elif !defined(DEBUG) && V8_HAS___FORCEINLINE
-# define V8_INLINE __forceinline
+# define V8_INLINE(declarator) __forceinline declarator
#else
-# define V8_INLINE inline
+# define V8_INLINE(declarator) inline declarator
#endif
// A macro used to tell the compiler to never inline a particular function.
// Don't bother for debug builds.
-// Use like:
-// V8_NOINLINE int GetMinusOne() { return -1; }
#if !defined(DEBUG) && V8_HAS_ATTRIBUTE_NOINLINE
-# define V8_NOINLINE __attribute__((noinline))
+# define V8_NOINLINE(declarator) __attribute__((noinline)) declarator
#elif !defined(DEBUG) && V8_HAS_DECLSPEC_NOINLINE
-# define V8_NOINLINE __declspec(noinline)
+# define V8_NOINLINE(declarator) __declspec(noinline) declarator
#else
-# define V8_NOINLINE /* NOT SUPPORTED */
+# define V8_NOINLINE(declarator) declarator
#endif
#endif
-// Many functions have no effects except the return value and their return value
-// depends only on the parameters and/or global variables. Such a function can
-// be subject to common subexpression elimination and loop optimization just as
-// an arithmetic operator would be. These functions should be declared with the
-// attribute V8_PURE. For example,
-//
-// int square (int) V8_PURE;
-//
-// says that the hypothetical function square is safe to call fewer times than
-// the program says.
-//
-// Some of common examples of pure functions are strlen or memcmp. Interesting
-// non-V8_PURE functions are functions with infinite loops or those depending
-// on volatile memory or other system resource, that may change between two
-// consecutive calls (such as feof in a multithreaded environment).
-#if V8_HAS_ATTRIBUTE_PURE
-# define V8_PURE __attribute__((pure))
-#else
-# define V8_PURE /* NOT SUPPORTED */
-#endif
-
-
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() V8_WARN_UNUSED_RESULT;
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &exp;
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
return stub;
}
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
#endif
}
return stub;
}
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
masm.GetCode(&desc);
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
#endif
// Define __cpuid() for non-MSVC compilers.
#if !V8_CC_MSVC
-static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
+static V8_INLINE(void __cpuid(int cpu_info[4], int info_type)) {
#if defined(__i386__) && defined(__pic__)
// Make sure to preserve ebx, which contains the pointer
// to the GOT in case we're generating PIC.
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
- VirtualMemory::GetPageSize(),
- VirtualMemory::EXECUTABLE,
+ OS::CommitPageSize(),
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ NOT_EXECUTABLE,
+#else
+ EXECUTABLE,
+#endif
NULL);
}
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
- int commit_page_size = static_cast<int>(VirtualMemory::GetPageSize());
+ int commit_page_size = static_cast<int>(OS::CommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
DISALLOW_COPY_AND_ASSIGN(TypeName)
-// Newly written code should use V8_INLINE and V8_NOINLINE directly.
-#define INLINE(declarator) V8_INLINE declarator
-#define NO_INLINE(declarator) V8_NOINLINE declarator
+// Newly written code should use V8_INLINE() and V8_NOINLINE() directly.
+#define INLINE(declarator) V8_INLINE(declarator)
+#define NO_INLINE(declarator) V8_NOINLINE(declarator)
// Newly written code should use V8_WARN_UNUSED_RESULT.
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
} else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
- result = lo_space_->AllocateRaw(
- size_in_bytes, VirtualMemory::NOT_EXECUTABLE);
+ result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else if (PROPERTY_CELL_SPACE == space) {
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
- intptr_t max_virtual = static_cast<intptr_t>(VirtualMemory::GetLimit());
+ intptr_t max_virtual = OS::MaxVirtualMemory();
+
if (max_virtual > 0) {
if (code_range_size_ > 0) {
// Reserve no more than 1/8 of the memory for the code range.
HeapObject* result;
bool force_lo_space = obj_size > code_space()->AreaSize();
if (force_lo_space) {
- maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
+ maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
// Discard the first code allocation, which was on a page where it could be
// moved.
CreateFillerObjectAt(result->address(), obj_size);
- maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
+ maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
}
int obj_size = code->Size();
MaybeObject* maybe_result;
if (obj_size > code_space()->AreaSize()) {
- maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
+ maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
MaybeObject* maybe_result;
if (new_obj_size > code_space()->AreaSize()) {
- maybe_result = lo_space_->AllocateRaw(
- new_obj_size, VirtualMemory::EXECUTABLE);
+ maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(new_obj_size);
}
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
int size = FixedArray::SizeFor(length);
return size <= Page::kMaxNonCodeHeapObjectSize
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE);
+ : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
}
new OldSpace(this,
max_old_generation_size_,
OLD_POINTER_SPACE,
- VirtualMemory::NOT_EXECUTABLE);
+ NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
if (!old_pointer_space_->SetUp()) return false;
new OldSpace(this,
max_old_generation_size_,
OLD_DATA_SPACE,
- VirtualMemory::NOT_EXECUTABLE);
+ NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
if (!old_data_space_->SetUp()) return false;
}
}
- code_space_ = new OldSpace(
- this, max_old_generation_size_, CODE_SPACE, VirtualMemory::EXECUTABLE);
+ code_space_ =
+ new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->SetUp()) return false;
MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
while (inner <= inner_last) {
// Size of a large chunk is always a multiple of
- // VirtualMemory::GetAllocationGranularity() so
- // there is always enough space for a fake
- // MemoryChunk header.
+ // OS::AllocateAlignment() so there is always
+ // enough space for a fake MemoryChunk header.
Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
// Guard against overflow.
if (area_end < inner->address()) area_end = chunk_end;
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
if (buffer == NULL) {
// Fallback to library function if function cannot be created.
switch (type) {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
if (!CpuFeatures::IsSupported(SSE2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
// If SSE2 is not available, we can use libc's implementation to ensure
// consistency since code by fullcodegen's calls into runtime in that case.
if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
OS::MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
LabelConverter conv(buffer);
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
bool success = marking_deque_memory_->Commit(
reinterpret_cast<Address>(marking_deque_memory_->address()),
marking_deque_memory_->size(),
- VirtualMemory::NOT_EXECUTABLE);
+ false); // Not executable.
CHECK(success);
marking_deque_memory_committed_ = true;
}
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &exp;
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) {
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ return mbase;
+}
+
+
void OS::DumpBacktrace() {
// Currently unsupported.
}
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
- // The address range used to randomize RWX allocations in
- // VirtualMemory::AllocateRegion().
+ // The address range used to randomize RWX allocations in OS::Allocate
// Try not to map pages into the default range that windows loads DLLs
// Use a multiple of 64k to prevent committing unused memory.
// Note: This does not guarantee RWX regions will be within the
return NULL;
}
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+ LPVOID base = NULL;
+
+ if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+ // For exectutable pages try and randomize the allocation address
+ for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+ base = VirtualAlloc(GetRandomAddr(), size, action, protection);
+ }
+ }
+
+ // After three attempts give up and let the OS find an address to use.
+ if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+ return base;
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ Address base = RoundUp(static_cast<Address>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ ASSERT(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != NULL) {
+ request_size = size;
+ ASSERT(base == static_cast<Address>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ }
+ address_ = address;
+ size_ = request_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address_, size_);
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ ASSERT(IsReserved());
+ return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ if (NULL == VirtualAlloc(address,
+ OS::CommitPageSize(),
+ MEM_COMMIT,
+ PAGE_NOACCESS)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
} } // namespace v8::internal
}
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (mbase == MAP_FAILED) {
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ return mbase;
+}
+
+
void OS::DumpBacktrace() {
POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
}
return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
}
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
} } // namespace v8::internal
}
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = OS::GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) {
+ LOG(i::Isolate::Current(),
+ StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ return mbase;
+}
+
+
void OS::DumpBacktrace() {
// backtrace is a glibc extension.
#if defined(__GLIBC__) && !defined(__UCLIBC__)
int size = ftell(file);
void* memory =
- mmap(NULL,
+ mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
- if (memory == MAP_FAILED) {
- fclose(file);
- return NULL;
- }
return new PosixMemoryMappedFile(file, memory, size);
}
return NULL;
}
void* memory =
- mmap(NULL,
+ mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
- if (memory == MAP_FAILED) {
- fclose(file);
- return NULL;
- }
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- int result = munmap(memory_, size_);
- ASSERT_EQ(0, result);
- USE(result);
+ if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
OS::Abort();
}
- void* addr = mmap(NULL,
+ void* addr = mmap(OS::GetRandomMmapAddr(),
size,
#if defined(__native_client__)
// The Native Client port of V8 uses an interpreter,
fileno(f),
0);
ASSERT(addr != MAP_FAILED);
- int result = munmap(addr, size);
- ASSERT_EQ(0, result);
- USE(result);
+ OS::Free(addr, size);
fclose(f);
}
#endif
}
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ int prot = PROT_READ | PROT_WRITE;
+#else
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+#endif
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ return true;
+}
+
} } // namespace v8::internal
namespace internal {
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(OS::GetRandomMmapAddr(),
+ msize,
+ prot,
+ MAP_PRIVATE | MAP_ANON,
+ kMmapFd,
+ kMmapFdOffset);
+ if (mbase == MAP_FAILED) {
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ return mbase;
+}
+
+
void OS::DumpBacktrace() {
// If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
if (backtrace == NULL) return;
int size = ftell(file);
void* memory =
- mmap(NULL,
+ mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
return NULL;
}
void* memory =
- mmap(NULL,
+ mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
+ if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
}
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* address,
+ size_t size,
+ bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+ return mmap(address,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ return false;
+}
+
} } // namespace v8::internal
}
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = OS::GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (mbase == MAP_FAILED) {
+ LOG(i::Isolate::Current(),
+ StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ return mbase;
+}
+
+
void OS::DumpBacktrace() {
// Currently unsupported.
}
return frames_count;
}
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
} } // namespace v8::internal
}
+// Maximum size of the virtual memory. 0 means there is no artificial
+// limit.
+
+intptr_t OS::MaxVirtualMemory() {
+ struct rlimit limit;
+ int result = getrlimit(RLIMIT_DATA, &limit);
+ if (result != 0) return 0;
+ return limit.rlim_cur;
+}
+
+
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
}
+intptr_t OS::CommitPageSize() {
+ static intptr_t page_size = getpagesize();
+ return page_size;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): munmap has a return value which is ignored here.
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+// Get rid of writable permission on code allocations.
+void OS::ProtectCode(void* address, const size_t size) {
+#if defined(__CYGWIN__)
+ DWORD old_protect;
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+#elif defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter, so
+ // code pages don't need PROT_EXEC.
+ mprotect(address, size, PROT_READ);
+#else
+ mprotect(address, size, PROT_READ | PROT_EXEC);
+#endif
+}
+
+
+// Create guard pages.
+void OS::Guard(void* address, const size_t size) {
+#if defined(__CYGWIN__)
+ DWORD oldprotect;
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+#else
+ mprotect(address, size, PROT_NONE);
+#endif
+}
+
+
+void* OS::GetRandomMmapAddr() {
+#if defined(__native_client__)
+ // TODO(bradchen): restore randomization once Native Client gets
+ // smarter about using mmap address hints.
+ // See http://code.google.com/p/nativeclient/issues/3341
+ return NULL;
+#endif
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+ uintptr_t raw_addr;
+ isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr));
+#if V8_TARGET_ARCH_X64
+ // Currently available CPUs have 48 bits of virtual addressing. Truncate
+ // the hint address to 46 bits to give the kernel a fighting chance of
+ // fulfilling our placement request.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+ raw_addr &= 0x3ffff000;
+
+# ifdef __sun
+ // For our Solaris/illumos mmap hint, we pick a random address in the bottom
+ // half of the top half of the address space (that is, the third quarter).
+ // Because we do not MAP_FIXED, this will be treated only as a hint -- the
+ // system will not fail to mmap() because something else happens to already
+ // be mapped at our random address. We deliberately set the hint high enough
+ // to get well above the system's break (that is, the heap); Solaris and
+ // illumos will try the hint and if that fails allocate as if there were
+ // no hint at all. The high hint prevents the break from getting hemmed in
+ // at low values, ceding half of the address space to the system heap.
+ raw_addr += 0x80000000;
+# else
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+ // 10.6 and 10.7.
+ raw_addr += 0x20000000;
+# endif
+#endif
+ return reinterpret_cast<void*>(raw_addr);
+ }
+ return NULL;
+}
+
+
+size_t OS::AllocateAlignment() {
+ return getpagesize();
+}
+
+
void OS::Sleep(int milliseconds) {
useconds_t ms = static_cast<useconds_t>(milliseconds);
usleep(1000 * ms);
}
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (mbase == MAP_FAILED) {
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ return mbase;
+}
+
+
void OS::DumpBacktrace() {
// Currently unsupported.
}
return walker.index;
}
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
} } // namespace v8::internal
#define _TRUNCATE 0
#define STRUNCATE 80
+inline void MemoryBarrier() {
+ int barrier = 0;
+ __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
+}
+
#endif // __MINGW64_VERSION_MAJOR
namespace v8 {
namespace internal {
+intptr_t OS::MaxVirtualMemory() {
+ return 0;
+}
+
+
double ceiling(double x) {
return ceil(x);
}
#undef STRUNCATE
+// Get the system's page size used by VirtualAlloc() or the next power
+// of two. The reason for always returning a power of two is that the
+// rounding up in OS::Allocate expects that.
+static size_t GetPageSize() {
+ static size_t page_size = 0;
+ if (page_size == 0) {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ page_size = RoundUpToPowerOf2(info.dwPageSize);
+ }
+ return page_size;
+}
+
+
+// The allocation alignment is the guaranteed alignment for
+// VirtualAlloc'ed blocks of memory.
+size_t OS::AllocateAlignment() {
+ static size_t allocate_alignment = 0;
+ if (allocate_alignment == 0) {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ allocate_alignment = info.dwAllocationGranularity;
+ }
+ return allocate_alignment;
+}
+
+
+void* OS::GetRandomMmapAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+ // The address range used to randomize RWX allocations in OS::Allocate
+ // Try not to map pages into the default range that windows loads DLLs
+ // Use a multiple of 64k to prevent committing unused memory.
+ // Note: This does not guarantee RWX regions will be within the
+ // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+ static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+ static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+ uintptr_t address =
+ (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
+ address &= kAllocationRandomAddressMax;
+ return reinterpret_cast<void *>(address);
+ }
+ return NULL;
+}
+
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+ LPVOID base = NULL;
+
+ if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+ // For exectutable pages try and randomize the allocation address
+ for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+ base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
+ }
+ }
+
+ // After three attempts give up and let the OS find an address to use.
+ if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+ return base;
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ // VirtualAlloc rounds allocated size to page size automatically.
+ size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
+
+ // Windows XP SP2 allows Data Excution Prevention (DEP).
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+
+ LPVOID mbase = RandomizedVirtualAlloc(msize,
+ MEM_COMMIT | MEM_RESERVE,
+ prot);
+
+ if (mbase == NULL) {
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed"));
+ return NULL;
+ }
+
+ ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
+
+ *allocated = msize;
+ return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): VirtualFree has a return value which is ignored here.
+ VirtualFree(address, 0, MEM_RELEASE);
+ USE(size);
+}
+
+
+intptr_t OS::CommitPageSize() {
+ return 4096;
+}
+
+
+void OS::ProtectCode(void* address, const size_t size) {
+ DWORD old_protect;
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+}
+
+
+void OS::Guard(void* address, const size_t size) {
+ DWORD oldprotect;
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+}
+
+
void OS::Sleep(int milliseconds) {
::Sleep(milliseconds);
}
}
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ Address base = RoundUp(static_cast<Address>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ ASSERT(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != NULL) {
+ request_size = size;
+ ASSERT(base == static_cast<Address>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ }
+ address_ = address;
+ size_ = request_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ ASSERT(IsReserved());
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ if (NULL == VirtualAlloc(address,
+ OS::CommitPageSize(),
+ MEM_COMMIT,
+ PAGE_NOACCESS)) {
+ return false;
+ }
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
// ----------------------------------------------------------------------------
// Win32 thread support.
static void PrintError(const char* format, ...);
static void VPrintError(const char* format, va_list args);
+ // Allocate/Free memory used by JS heap. Pages are readable/writable, but
+ // they are not guaranteed to be executable unless 'executable' is true.
+ // Returns the address of allocated memory, or NULL if failed.
+ static void* Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable);
+ static void Free(void* address, const size_t size);
+
+ // This is the granularity at which the ProtectCode(...) call can set page
+ // permissions.
+ static intptr_t CommitPageSize();
+
+ // Mark code segments non-writable.
+ static void ProtectCode(void* address, const size_t size);
+
+ // Assign memory as a guard page so that access will cause an exception.
+ static void Guard(void* address, const size_t size);
+
+ // Generate a random address to be used for hinting mmap().
+ static void* GetRandomMmapAddr();
+
+ // Get the Alignment guaranteed by Allocate().
+ static size_t AllocateAlignment();
+
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
+ // Maximum size of the virtual memory. 0 means there is no artificial
+ // limit.
+ static intptr_t MaxVirtualMemory();
+
// Returns the double constant NAN
static double nan_value();
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
+// Represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or copy-contructing. This removes the reserved memory
+// from the original object.
+class VirtualMemory {
+ public:
+ // Empty VirtualMemory object, controlling no reserved memory.
+ VirtualMemory();
+
+ // Reserves virtual memory with size.
+ explicit VirtualMemory(size_t size);
+
+ // Reserves virtual memory containing an area of the given size that
+ // is aligned per alignment. This may not be at the position returned
+ // by address().
+ VirtualMemory(size_t size, size_t alignment);
+
+ // Releases the reserved memory, if any, controlled by this VirtualMemory
+ // object.
+ ~VirtualMemory();
+
+ // Returns whether the memory has been reserved.
+ bool IsReserved();
+
+ // Initialize or resets an embedded VirtualMemory object.
+ void Reset();
+
+ // Returns the start address of the reserved memory.
+ // If the memory was reserved with an alignment, this address is not
+ // necessarily aligned. The user might need to round it up to a multiple of
+ // the alignment to get the start of the aligned block.
+ void* address() {
+ ASSERT(IsReserved());
+ return address_;
+ }
+
+ // Returns the size of the reserved memory. The returned value is only
+ // meaningful when IsReserved() returns true.
+ // If the memory was reserved with an alignment, this size may be larger
+ // than the requested size.
+ size_t size() { return size_; }
+
+ // Commits real memory. Returns whether the operation succeeded.
+ bool Commit(void* address, size_t size, bool is_executable);
+
+ // Uncommit real memory. Returns whether the operation succeeded.
+ bool Uncommit(void* address, size_t size);
+
+ // Creates a single guard page at the given address.
+ bool Guard(void* address);
+
+ void Release() {
+ ASSERT(IsReserved());
+ // Notice: Order is important here. The VirtualMemory object might live
+ // inside the allocated region.
+ void* address = address_;
+ size_t size = size_;
+ Reset();
+ bool result = ReleaseRegion(address, size);
+ USE(result);
+ ASSERT(result);
+ }
+
+ // Assign control of the reserved region to a different VirtualMemory object.
+ // The old object is no longer functional (IsReserved() returns false).
+ void TakeControl(VirtualMemory* from) {
+ ASSERT(!IsReserved());
+ address_ = from->address_;
+ size_ = from->size_;
+ from->Reset();
+ }
+
+ static void* ReserveRegion(size_t size);
+
+ static bool CommitRegion(void* base, size_t size, bool is_executable);
+
+ static bool UncommitRegion(void* base, size_t size);
+
+ // Must be called with a base pointer that has been returned by ReserveRegion
+ // and the same size it was reserved with.
+ static bool ReleaseRegion(void* base, size_t size);
+
+ // Returns true if OS performs lazy commits, i.e. the memory allocation call
+ // defers actual physical memory allocation till the first memory access.
+ // Otherwise returns false.
+ static bool HasLazyCommits();
+
+ private:
+ void* address_; // Start address of the virtual memory.
+ size_t size_; // Size of the virtual memory.
+};
+
+
// ----------------------------------------------------------------------------
// Thread
//
}
private:
- static V8_INLINE TimeTicks Now() {
+ V8_INLINE(static TimeTicks Now()) {
TimeTicks now = TimeTicks::HighResNow();
ASSERT(!now.IsNull());
return now;
#if V8_OS_POSIX
-static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
+static V8_INLINE(void InitializeNativeHandle(pthread_mutex_t* mutex)) {
int result;
#if defined(DEBUG)
// Use an error checking mutex in debug mode.
}
-static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
+static V8_INLINE(void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex)) {
pthread_mutexattr_t attr;
int result = pthread_mutexattr_init(&attr);
ASSERT_EQ(0, result);
}
-static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
+static V8_INLINE(void DestroyNativeHandle(pthread_mutex_t* mutex)) {
int result = pthread_mutex_destroy(mutex);
ASSERT_EQ(0, result);
USE(result);
}
-static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
+static V8_INLINE(void LockNativeHandle(pthread_mutex_t* mutex)) {
int result = pthread_mutex_lock(mutex);
ASSERT_EQ(0, result);
USE(result);
}
-static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
+static V8_INLINE(void UnlockNativeHandle(pthread_mutex_t* mutex)) {
int result = pthread_mutex_unlock(mutex);
ASSERT_EQ(0, result);
USE(result);
}
-static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
+static V8_INLINE(bool TryLockNativeHandle(pthread_mutex_t* mutex)) {
int result = pthread_mutex_trylock(mutex);
if (result == EBUSY) {
return false;
#elif V8_OS_WIN
-static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
+static V8_INLINE(void InitializeNativeHandle(PCRITICAL_SECTION cs)) {
InitializeCriticalSection(cs);
}
-static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
+static V8_INLINE(void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs)) {
InitializeCriticalSection(cs);
}
-static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
+static V8_INLINE(void DestroyNativeHandle(PCRITICAL_SECTION cs)) {
DeleteCriticalSection(cs);
}
-static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
+static V8_INLINE(void LockNativeHandle(PCRITICAL_SECTION cs)) {
EnterCriticalSection(cs);
}
-static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
+static V8_INLINE(void UnlockNativeHandle(PCRITICAL_SECTION cs)) {
LeaveCriticalSection(cs);
}
-static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
+static V8_INLINE(bool TryLockNativeHandle(PCRITICAL_SECTION cs)) {
return TryEnterCriticalSection(cs);
}
int level_;
#endif
- V8_INLINE void AssertHeldAndUnmark() {
+ V8_INLINE(void AssertHeldAndUnmark()) {
#ifdef DEBUG
ASSERT_EQ(1, level_);
level_--;
#endif
}
- V8_INLINE void AssertUnheldAndMark() {
+ V8_INLINE(void AssertUnheldAndMark()) {
#ifdef DEBUG
ASSERT_EQ(0, level_);
level_++;
// Set the value of the SO_REUSEADDR socket option.
bool SetReuseAddress(bool reuse_address);
- V8_INLINE bool IsValid() const {
+ V8_INLINE(bool IsValid()) const {
return native_handle_ != kInvalidNativeHandle;
}
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "platform/virtual-memory.h"
-
-#if V8_OS_POSIX
-#include <sys/types.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-
-#include <unistd.h>
-#endif
-
-#if V8_OS_MACOSX
-#include <mach/vm_statistics.h>
-#endif
-
-#include <cerrno>
-
-#include "platform/mutex.h"
-#include "utils.h"
-#include "utils/random-number-generator.h"
-#if V8_OS_CYGIN || V8_OS_WIN
-#include "win32-headers.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-class RandomAddressGenerator V8_FINAL {
- public:
- V8_INLINE uintptr_t NextAddress() {
- LockGuard<Mutex> lock_guard(&mutex_);
- uintptr_t address = rng_.NextInt();
-#if V8_HOST_ARCH_64_BIT
- address = (address << 32) + static_cast<uintptr_t>(rng_.NextInt());
-#endif
- return address;
- }
-
- private:
- Mutex mutex_;
- RandomNumberGenerator rng_;
-};
-
-typedef LazyInstance<RandomAddressGenerator,
- DefaultConstructTrait<RandomAddressGenerator>,
- ThreadSafeInitOnceTrait>::type LazyRandomAddressGenerator;
-
-#define LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER LAZY_INSTANCE_INITIALIZER
-
-
-static V8_INLINE void* GenerateRandomAddress() {
-#if V8_OS_NACL
- // TODO(bradchen): Restore randomization once Native Client gets smarter
- // about using mmap address hints.
- // See http://code.google.com/p/nativeclient/issues/3341
- return NULL;
-#else // V8_OS_NACL
- LazyRandomAddressGenerator random_address_generator =
- LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER;
- uintptr_t address = random_address_generator.Pointer()->NextAddress();
-
-# if V8_TARGET_ARCH_X64
-# if V8_OS_CYGWIN || V8_OS_WIN
- // Try not to map pages into the default range that windows loads DLLs.
- // Use a multiple of 64KiB to prevent committing unused memory.
- address += V8_UINT64_C(0x00080000000);
- address &= V8_UINT64_C(0x3ffffff0000);
-# else // V8_OS_CYGWIN || V8_OS_WIN
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- address &= V8_UINT64_C(0x3ffffffff000);
-# endif // V8_OS_CYGWIN || V8_OS_WIN
-# else // V8_TARGET_ARCH_X64
-# if V8_OS_CYGWIN || V8_OS_WIN
- // Try not to map pages into the default range that windows loads DLLs.
- // Use a multiple of 64KiB to prevent committing unused memory.
- address += 0x04000000;
- address &= 0x3fff0000;
-# elif V8_OS_SOLARIS
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- address &= 0x3ffff000;
- address += 0x80000000;
-# else // V8_OS_CYGWIN || V8_OS_WIN
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on Mac OS X
- // 10.6 and 10.7.
- address &= 0x3ffff000;
- address += 0x20000000;
-# endif // V8_OS_CYGIN || V8_OS_WIN
-# endif // V8_TARGET_ARCH_X64
- return reinterpret_cast<void*>(address);
-#endif // V8_OS_NACL
-}
-
-
-// static
-void* VirtualMemory::AllocateRegion(size_t size,
- size_t* size_return,
- Executability executability) {
- ASSERT_LT(0, size);
- ASSERT_NE(NULL, size_return);
- void* address = ReserveRegion(size, &size);
- if (address == NULL) return NULL;
- if (!CommitRegion(address, size, executability)) {
- bool result = ReleaseRegion(address, size);
- ASSERT(result);
- USE(result);
- return NULL;
- }
- *size_return = size;
- return address;
-}
-
-#if V8_OS_CYGWIN || V8_OS_WIN
-
-// static
-void* VirtualMemory::ReserveRegion(size_t size, size_t* size_return) {
- ASSERT_LT(0, size);
- ASSERT_NE(NULL, size_return);
- // The minimum size that can be reserved is 64KiB, see
- // http://msdn.microsoft.com/en-us/library/ms810627.aspx
- if (size < 64 * KB) {
- size = 64 * KB;
- }
- size = RoundUp(size, GetPageSize());
- LPVOID address = NULL;
- // Try and randomize the allocation address (up to three attempts).
- for (unsigned attempts = 0; address == NULL && attempts < 3; ++attempts) {
- address = VirtualAlloc(GenerateRandomAddress(),
- size,
- MEM_RESERVE,
- PAGE_NOACCESS);
- }
- if (address == NULL) {
- // After three attempts give up and let the kernel find an address.
- address = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
- }
- if (address == NULL) {
- return NULL;
- }
- ASSERT(IsAligned(reinterpret_cast<uintptr_t>(address),
- GetAllocationGranularity()));
- *size_return = size;
- return address;
-}
-
-
-// static
-void* VirtualMemory::ReserveRegion(size_t size,
- size_t* size_return,
- size_t alignment) {
- ASSERT_LT(0, size);
- ASSERT_NE(NULL, size_return);
- ASSERT(IsAligned(alignment, GetAllocationGranularity()));
-
- size_t reserved_size = RoundUp(size + alignment, GetAllocationGranularity());
- Address reserved_base = static_cast<Address>(
- ReserveRegion(reserved_size, &reserved_size));
- if (reserved_base == NULL) {
- return NULL;
- }
- ASSERT_LE(size, reserved_size);
- ASSERT_LE(size + alignment, reserved_size);
- ASSERT(IsAligned(reserved_size, GetPageSize()));
-
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(reserved_base, reserved_size);
- USE(result);
- ASSERT(result);
- size_t aligned_size = RoundUp(size, GetPageSize());
- Address aligned_base = static_cast<Address>(
- VirtualAlloc(RoundUp(reserved_base, alignment),
- aligned_size,
- MEM_RESERVE,
- PAGE_NOACCESS));
- if (aligned_base != NULL) {
- ASSERT(aligned_base == RoundUp(reserved_base, alignment));
- ASSERT(IsAligned(reinterpret_cast<uintptr_t>(aligned_base),
- GetAllocationGranularity()));
- ASSERT(IsAligned(aligned_size, GetPageSize()));
- *size_return = aligned_size;
- return aligned_base;
- }
-
- // Resizing failed, just go with a bigger area.
- ASSERT(IsAligned(reserved_size, GetAllocationGranularity()));
- return ReserveRegion(reserved_size, size_return);
-}
-
-
-// static
-bool VirtualMemory::CommitRegion(void* address,
- size_t size,
- Executability executability) {
- ASSERT_NE(NULL, address);
- ASSERT_LT(0, size);
- DWORD protect = 0;
- switch (executability) {
- case NOT_EXECUTABLE:
- protect = PAGE_READWRITE;
- break;
-
- case EXECUTABLE:
- protect = PAGE_EXECUTE_READWRITE;
- break;
- }
- LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, protect);
- if (result == NULL) {
- ASSERT(GetLastError() != ERROR_INVALID_ADDRESS);
- return false;
- }
- ASSERT_EQ(address, result);
- return true;
-}
-
-
-// static
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
- ASSERT_NE(NULL, address);
- ASSERT_LT(0, size);
- int result = VirtualFree(address, size, MEM_DECOMMIT);
- if (result == 0) {
- return false;
- }
- return true;
-}
-
-
-// static
-bool VirtualMemory::WriteProtectRegion(void* address, size_t size) {
- ASSERT_NE(NULL, address);
- ASSERT_LT(0, size);
- DWORD old_protect;
- return VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-// static
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
- ASSERT_NE(NULL, address);
- ASSERT_LT(0, size);
- USE(size);
- int result = VirtualFree(address, 0, MEM_RELEASE);
- if (result == 0) {
- return false;
- }
- return true;
-}
-
-
-// static
-size_t VirtualMemory::GetAllocationGranularity() {
- static size_t allocation_granularity = 0;
- if (allocation_granularity == 0) {
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- allocation_granularity = system_info.dwAllocationGranularity;
- MemoryBarrier();
- }
- ASSERT_GE(allocation_granularity, GetPageSize());
- return allocation_granularity;
-}
-
-
-// static
-size_t VirtualMemory::GetLimit() {
- return 0;
-}
-
-
-// static
-size_t VirtualMemory::GetPageSize() {
- static size_t page_size = 0;
- if (page_size == 0) {
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- page_size = system_info.dwPageSize;
- MemoryBarrier();
- }
- return page_size;
-}
-
-
-#else // V8_OS_CYGIN || V8_OS_WIN
-
-
-// Constants used for mmap.
-#if V8_OS_MACOSX
-// kMmapFd is used to pass vm_alloc flags to tag the region with the user
-// defined tag 255 This helps identify V8-allocated regions in memory analysis
-// tools like vmmap(1).
-static const int kMmapFd = VM_MAKE_TAG(255);
-#else
-static const int kMmapFd = -1;
-#endif // V8_OS_MACOSX
-static const off_t kMmapFdOffset = 0;
-
-
-// static
-void* VirtualMemory::ReserveRegion(size_t size, size_t* size_return) {
- ASSERT_LT(0, size);
- ASSERT_NE(NULL, size_return);
-
- size = RoundUp(size, GetPageSize());
- void* address = mmap(GenerateRandomAddress(),
- size,
- PROT_NONE,
- MAP_ANON | MAP_NORESERVE | MAP_PRIVATE,
- kMmapFd,
- kMmapFdOffset);
- if (address == MAP_FAILED) {
- ASSERT_NE(EINVAL, errno);
- return NULL;
- }
- *size_return = size;
- return address;
-}
-
-
-// static
-void* VirtualMemory::ReserveRegion(size_t size,
- size_t* size_return,
- size_t alignment) {
- ASSERT_LT(0, size);
- ASSERT_NE(NULL, size_return);
- ASSERT(IsAligned(alignment, GetPageSize()));
-
- size_t reserved_size;
- Address reserved_base = static_cast<Address>(
- ReserveRegion(size + alignment, &reserved_size));
- if (reserved_base == NULL) {
- return NULL;
- }
-
- Address aligned_base = RoundUp(reserved_base, alignment);
- ASSERT_LE(reserved_base, aligned_base);
-
- // Unmap extra memory reserved before the aligned region.
- if (aligned_base != reserved_base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - reserved_base);
- bool result = ReleaseRegion(reserved_base, prefix_size);
- ASSERT(result);
- USE(result);
- reserved_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, GetPageSize());
- ASSERT_LE(aligned_size, reserved_size);
-
- // Unmap extra memory reserved after the aligned region.
- if (aligned_size != reserved_size) {
- size_t suffix_size = reserved_size - aligned_size;
- bool result = ReleaseRegion(aligned_base + aligned_size, suffix_size);
- ASSERT(result);
- USE(result);
- reserved_size -= suffix_size;
- }
-
- ASSERT(aligned_size == reserved_size);
- ASSERT_NE(NULL, aligned_base);
-
- *size_return = aligned_size;
- return aligned_base;
-}
-
-
-// static
-bool VirtualMemory::CommitRegion(void* address,
- size_t size,
- Executability executability) {
- ASSERT_NE(NULL, address);
- ASSERT_LT(0, size);
- int prot = 0;
- // The Native Client port of V8 uses an interpreter,
- // so code pages don't need PROT_EXEC.
-#if V8_OS_NACL
- executability = NOT_EXECUTABLE;
-#endif
- switch (executability) {
- case NOT_EXECUTABLE:
- prot = PROT_READ | PROT_WRITE;
- break;
-
- case EXECUTABLE:
- prot = PROT_EXEC | PROT_READ | PROT_WRITE;
- break;
- }
- void* result = mmap(address,
- size,
- prot,
- MAP_ANON | MAP_FIXED | MAP_PRIVATE,
- kMmapFd,
- kMmapFdOffset);
- if (result == MAP_FAILED) {
- ASSERT_NE(EINVAL, errno);
- return false;
- }
- return true;
-}
-
-
-// static
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
- ASSERT_NE(NULL, address);
- ASSERT_LT(0, size);
- void* result = mmap(address,
- size,
- PROT_NONE,
- MAP_ANON | MAP_FIXED | MAP_NORESERVE | MAP_PRIVATE,
- kMmapFd,
- kMmapFdOffset);
- if (result == MAP_FAILED) {
- ASSERT_NE(EINVAL, errno);
- return false;
- }
- return true;
-}
-
-
-// static
-bool VirtualMemory::WriteProtectRegion(void* address, size_t size) {
- ASSERT_NE(NULL, address);
- ASSERT_LT(0, size);
-#if V8_OS_NACL
- // The Native Client port of V8 uses an interpreter,
- // so code pages don't need PROT_EXEC.
- int prot = PROT_READ;
-#else
- int prot = PROT_EXEC | PROT_READ;
-#endif
- int result = mprotect(address, size, prot);
- if (result < 0) {
- ASSERT_NE(EINVAL, errno);
- return false;
- }
- return true;
-}
-
-
-// static
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
- ASSERT_NE(NULL, address);
- ASSERT_LT(0, size);
- int result = munmap(address, size);
- if (result < 0) {
- ASSERT_NE(EINVAL, errno);
- return false;
- }
- return true;
-}
-
-
-// static
-size_t VirtualMemory::GetAllocationGranularity() {
- return GetPageSize();
-}
-
-
-// static
-size_t VirtualMemory::GetLimit() {
- struct rlimit rlim;
- int result = getrlimit(RLIMIT_DATA, &rlim);
- ASSERT_EQ(0, result);
- USE(result);
- return rlim.rlim_cur;
-}
-
-
-// static
-size_t VirtualMemory::GetPageSize() {
- static const size_t kPageSize = getpagesize();
- return kPageSize;
-}
-
-#endif // V8_OS_CYGWIN || V8_OS_WIN
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_VIRTUAL_MEMORY_H_
-#define V8_PLATFORM_VIRTUAL_MEMORY_H_
-
-#include "checks.h"
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// VirtualMemory
-//
-// This class represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by assignment or copy-constructing. This removes the reserved memory
-// from the original object.
-class VirtualMemory V8_FINAL {
- public:
- // The executability of a memory region.
- enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-
- // Empty VirtualMemory object, controlling no reserved memory.
- VirtualMemory() : address_(NULL), size_(0) {}
-
- // Reserves virtual memory with size.
- explicit VirtualMemory(size_t size) : size_(0) {
- address_ = ReserveRegion(size, &size_);
- }
-
- // Reserves virtual memory containing an area of the given size that
- // is aligned per alignment. This may not be at the position returned
- // by address().
- VirtualMemory(size_t size, size_t alignment) : size_(0) {
- address_ = ReserveRegion(size, &size_, alignment);
- }
-
- // Releases the reserved memory, if any, controlled by this VirtualMemory
- // object.
- ~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address_, size_);
- ASSERT(result);
- USE(result);
- }
- }
-
- // Returns whether the memory contains the specified address.
- bool Contains(const void* address) const V8_WARN_UNUSED_RESULT {
- if (!IsReserved()) return false;
- if (address < address_) return false;
- if (address >= reinterpret_cast<uint8_t*>(address_) + size_) return false;
- return true;
- }
-
- // Returns whether the memory has been reserved.
- bool IsReserved() const V8_WARN_UNUSED_RESULT {
- return address_ != NULL;
- }
-
- // Initialize or resets an embedded VirtualMemory object.
- void Reset() {
- address_ = NULL;
- size_ = 0;
- }
-
- // Returns the start address of the reserved memory. The returned value is
- // only meaningful if |IsReserved()| returns true.
- // If the memory was reserved with an alignment, this address is not
- // necessarily aligned. The user might need to round it up to a multiple of
- // the alignment to get the start of the aligned block.
- void* address() const V8_WARN_UNUSED_RESULT { return address_; }
-
- // Returns the size of the reserved memory. The returned value is only
- // meaningful when |IsReserved()| returns true.
- // If the memory was reserved with an alignment, this size may be larger
- // than the requested size.
- size_t size() const V8_WARN_UNUSED_RESULT { return size_; }
-
- // Commits real memory. Returns whether the operation succeeded.
- bool Commit(void* address,
- size_t size,
- Executability executability) V8_WARN_UNUSED_RESULT {
- ASSERT(IsReserved());
- ASSERT(Contains(address));
- ASSERT(Contains(reinterpret_cast<uint8_t*>(address) + size - 1));
- return CommitRegion(address, size, executability);
- }
-
- // Uncommit real memory. Returns whether the operation succeeded.
- bool Uncommit(void* address, size_t size) V8_WARN_UNUSED_RESULT {
- ASSERT(IsReserved());
- ASSERT(Contains(address));
- ASSERT(Contains(reinterpret_cast<uint8_t*>(address) + size - 1));
- return UncommitRegion(address, size);
- }
-
- // Creates guard pages at the given address.
- bool Guard(void* address, size_t size) V8_WARN_UNUSED_RESULT {
- // We can simply uncommit the specified pages. Any access
- // to them will cause a processor exception.
- return Uncommit(address, size);
- }
-
- void Release() {
- ASSERT(IsReserved());
- // WARNING: Order is important here. The VirtualMemory
- // object might live inside the allocated region.
- void* address = address_;
- size_t size = size_;
- Reset();
- bool result = ReleaseRegion(address, size);
- USE(result);
- ASSERT(result);
- }
-
- // Assign control of the reserved region to a different VirtualMemory object.
- // The old object is no longer functional (IsReserved() returns false).
- void TakeControl(VirtualMemory* from) {
- ASSERT(!IsReserved());
- address_ = from->address_;
- size_ = from->size_;
- from->Reset();
- }
-
- // Allocates a region of memory pages. The pages are readable/writable,
- // but are not guaranteed to be executable unless explicitly requested.
- // Returns the base address of the allocated memory region, or NULL in
- // case of an error.
- static void* AllocateRegion(size_t size,
- size_t* size_return,
- Executability executability)
- V8_WARN_UNUSED_RESULT;
-
- static void* ReserveRegion(size_t size,
- size_t* size_return) V8_WARN_UNUSED_RESULT;
-
- static void* ReserveRegion(size_t size,
- size_t* size_return,
- size_t alignment) V8_WARN_UNUSED_RESULT;
-
- static bool CommitRegion(void* address,
- size_t size,
- Executability executability) V8_WARN_UNUSED_RESULT;
-
- static bool UncommitRegion(void* address, size_t size) V8_WARN_UNUSED_RESULT;
-
- // Mark code segments readable-executable.
- static bool WriteProtectRegion(void* address,
- size_t size) V8_WARN_UNUSED_RESULT;
-
- // Must be called with a base pointer that has been returned by ReserveRegion
- // and the same size it was reserved with.
- static bool ReleaseRegion(void* address, size_t size) V8_WARN_UNUSED_RESULT;
-
- // The granularity for the starting address at which virtual memory can be
- // reserved (or allocated in terms of the underlying operating system).
- static size_t GetAllocationGranularity() V8_PURE;
-
- // The maximum size of the virtual memory. 0 means there is no artificial
- // limit.
- static size_t GetLimit() V8_PURE;
-
- // The page size and the granularity of page protection and commitment.
- static size_t GetPageSize() V8_PURE;
-
- // Returns true if OS performs lazy commits, i.e. the memory allocation call
- // defers actual physical memory allocation till the first memory access.
- // Otherwise returns false.
- static V8_INLINE bool HasLazyCommits() {
-#if V8_OS_LINUX
- return true;
-#else
- return false;
-#endif
- }
-
- private:
- void* address_; // Start address of the virtual memory.
- size_t size_; // Size of the virtual memory.
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_VIRTUAL_MEMORY_H_
}
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void MemoryAllocator::Protect(Address start, size_t size) {
+ OS::Protect(start, size);
+}
+
+
+void MemoryAllocator::Unprotect(Address start,
+ size_t size,
+ Executability executable) {
+ OS::Unprotect(start, size, executable);
+}
+
+
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+ int id = GetChunkId(page);
+ OS::Protect(chunks_[id].address(), chunks_[id].size());
+}
+
+
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+ int id = GetChunkId(page);
+ OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+ chunks_[id].owner()->executable() == EXECUTABLE);
+}
+
+#endif
+
+
// --------------------------------------------------------------------------
// PagedSpace
Page* Page::Initialize(Heap* heap,
MemoryChunk* chunk,
- VirtualMemory::Executability executability,
+ Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
bool CodeRange::CommitRawMemory(Address start, size_t length) {
- return isolate_->memory_allocator()->CommitMemory(
- start, length, VirtualMemory::EXECUTABLE);
+ return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
}
void CodeRange::FreeRawMemory(Address address, size_t length) {
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
free_list_.Add(FreeBlock(address, length));
- bool result = code_range_->Uncommit(address, length);
- ASSERT(result);
- USE(result);
+ code_range_->Uncommit(address, length);
}
bool MemoryAllocator::CommitMemory(Address base,
size_t size,
- VirtualMemory::Executability executability) {
- if (!VirtualMemory::CommitRegion(base, size, executability)) {
+ Executability executable) {
+ if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
- VirtualMemory::Executability executability) {
+ Executability executable) {
// TODO(gc) make code_range part of memory allocator?
ASSERT(reservation->IsReserved());
size_t size = reservation->size();
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- if (executability == VirtualMemory::EXECUTABLE) {
+ if (executable == EXECUTABLE) {
ASSERT(size_executable_ >= size);
size_executable_ -= size;
}
// Code which is part of the code-range does not have its own VirtualMemory.
ASSERT(!isolate_->code_range()->contains(
static_cast<Address>(reservation->address())));
- ASSERT(executability == VirtualMemory::NOT_EXECUTABLE ||
- !isolate_->code_range()->exists());
+ ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
reservation->Release();
}
void MemoryAllocator::FreeMemory(Address base,
size_t size,
- VirtualMemory::Executability executability) {
+ Executability executable) {
// TODO(gc) make code_range part of memory allocator?
ASSERT(size_ >= size);
size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- if (executability == VirtualMemory::EXECUTABLE) {
+ if (executable == EXECUTABLE) {
ASSERT(size_executable_ >= size);
size_executable_ -= size;
}
if (isolate_->code_range()->contains(static_cast<Address>(base))) {
- ASSERT(executability == VirtualMemory::EXECUTABLE);
+ ASSERT(executable == EXECUTABLE);
isolate_->code_range()->FreeRawMemory(base, size);
} else {
- ASSERT(executability == VirtualMemory::NOT_EXECUTABLE ||
- !isolate_->code_range()->exists());
+ ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
bool result = VirtualMemory::ReleaseRegion(base, size);
USE(result);
ASSERT(result);
}
-Address MemoryAllocator::AllocateAlignedMemory(
- size_t reserve_size,
- size_t commit_size,
- size_t alignment,
- VirtualMemory::Executability executability,
- VirtualMemory* controller) {
+Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
+ size_t commit_size,
+ size_t alignment,
+ Executability executable,
+ VirtualMemory* controller) {
ASSERT(commit_size <= reserve_size);
VirtualMemory reservation;
Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
if (base == NULL) return NULL;
- if (executability == VirtualMemory::EXECUTABLE) {
+ if (executable == EXECUTABLE) {
if (!CommitExecutableMemory(&reservation,
base,
commit_size,
base = NULL;
}
} else {
- if (reservation.Commit(base, commit_size, VirtualMemory::NOT_EXECUTABLE)) {
+ if (reservation.Commit(base, commit_size, false)) {
UpdateAllocatedSpaceLimits(base, base + commit_size);
} else {
base = NULL;
Page::kPageSize,
area_start,
area_end,
- VirtualMemory::NOT_EXECUTABLE,
+ NOT_EXECUTABLE,
semi_space);
chunk->set_next_chunk(NULL);
chunk->set_prev_chunk(NULL);
size_t size,
Address area_start,
Address area_end,
- VirtualMemory::Executability executability,
+ Executability executable,
Space* owner) {
MemoryChunk* chunk = FromAddress(base);
ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
- if (executability == VirtualMemory::EXECUTABLE) {
+ if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
}
size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
MemoryAllocator::CodePageGuardSize() : 0;
size_t header_size = area_start() - address() - guard_size;
- size_t commit_size = RoundUp(header_size + requested,
- VirtualMemory::GetPageSize());
+ size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
- VirtualMemory::GetPageSize());
+ OS::CommitPageSize());
if (commit_size > committed_size) {
// Commit size should be less or equal than the reserved size.
Address start = address() + committed_size + guard_size;
size_t length = commit_size - committed_size;
if (reservation_.IsReserved()) {
- VirtualMemory::Executability executability = IsFlagSet(IS_EXECUTABLE)
- ? VirtualMemory::EXECUTABLE : VirtualMemory::NOT_EXECUTABLE;
+ Executability executable = IsFlagSet(IS_EXECUTABLE)
+ ? EXECUTABLE : NOT_EXECUTABLE;
if (!heap()->isolate()->memory_allocator()->CommitMemory(
- start, length, executability)) {
+ start, length, executable)) {
return false;
}
} else {
}
-MemoryChunk* MemoryAllocator::AllocateChunk(
- intptr_t reserve_area_size,
- intptr_t commit_area_size,
- VirtualMemory::Executability executability,
- Space* owner) {
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
+ intptr_t commit_area_size,
+ Executability executable,
+ Space* owner) {
ASSERT(commit_area_size <= reserve_area_size);
size_t chunk_size;
// +----------------------------+<- base + chunk_size
//
- if (executability == VirtualMemory::EXECUTABLE) {
+ if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
- VirtualMemory::GetPageSize()) + CodePageGuardSize();
+ OS::CommitPageSize()) + CodePageGuardSize();
// Check executable memory limit.
if (size_executable_ + chunk_size > capacity_executable_) {
// Size of header (not executable) plus area (executable).
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
- VirtualMemory::GetPageSize());
+ OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
if (isolate_->code_range()->exists()) {
base = AllocateAlignedMemory(chunk_size,
commit_size,
MemoryChunk::kAlignment,
- executability,
+ executable,
&reservation);
if (base == NULL) return NULL;
// Update executable memory size.
area_end = area_start + commit_area_size;
} else {
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- VirtualMemory::GetPageSize());
- size_t commit_size = RoundUp(
- MemoryChunk::kObjectStartOffset + commit_area_size,
- VirtualMemory::GetPageSize());
+ OS::CommitPageSize());
+ size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
+ commit_area_size, OS::CommitPageSize());
base = AllocateAlignedMemory(chunk_size,
commit_size,
MemoryChunk::kAlignment,
- executability,
+ executable,
&reservation);
if (base == NULL) return NULL;
chunk_size,
area_start,
area_end,
- executability,
+ executable,
owner);
result->set_reserved_memory(&reservation);
return result;
}
-Page* MemoryAllocator::AllocatePage(
- intptr_t size,
- PagedSpace* owner,
- VirtualMemory::Executability executability) {
- MemoryChunk* chunk = AllocateChunk(size, size, executability, owner);
+Page* MemoryAllocator::AllocatePage(intptr_t size,
+ PagedSpace* owner,
+ Executability executable) {
+ MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
if (chunk == NULL) return NULL;
- return Page::Initialize(isolate_->heap(), chunk, executability, owner);
+ return Page::Initialize(isolate_->heap(), chunk, executable, owner);
}
-LargePage* MemoryAllocator::AllocateLargePage(
- intptr_t object_size,
- Space* owner,
- VirtualMemory::Executability executability) {
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
+ Space* owner,
+ Executability executable) {
MemoryChunk* chunk = AllocateChunk(object_size,
object_size,
- executability,
+ executable,
owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
- FreeMemory(reservation, chunk->executability());
+ FreeMemory(reservation, chunk->executable());
} else {
FreeMemory(chunk->address(),
chunk->size(),
- chunk->executability());
+ chunk->executable());
}
}
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
- VirtualMemory::Executability executability) {
- if (!CommitMemory(start, size, executability)) return false;
+ Executability executable) {
+ if (!CommitMemory(start, size, executable)) return false;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size);
int MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
- return RoundUp(Page::kObjectStartOffset, VirtualMemory::GetPageSize());
+ return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
}
int MemoryAllocator::CodePageGuardSize() {
- return static_cast<int>(VirtualMemory::GetPageSize());
+ return static_cast<int>(OS::CommitPageSize());
}
int MemoryAllocator::CodePageAreaEndOffset() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
- return Page::kPageSize - static_cast<int>(VirtualMemory::GetPageSize());
+ return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
}
// Commit page header (not executable).
if (!vm->Commit(start,
CodePageGuardStartOffset(),
- VirtualMemory::NOT_EXECUTABLE)) {
+ false)) {
return false;
}
// Create guard page after the header.
- if (!vm->Guard(start + CodePageGuardStartOffset(),
- VirtualMemory::GetPageSize())) {
+ if (!vm->Guard(start + CodePageGuardStartOffset())) {
return false;
}
// Commit page body (executable).
if (!vm->Commit(start + CodePageAreaStartOffset(),
commit_size - CodePageGuardStartOffset(),
- VirtualMemory::EXECUTABLE)) {
+ true)) {
return false;
}
// Create guard page before the end.
- if (!vm->Guard(start + reserved_size - CodePageGuardSize(),
- VirtualMemory::GetPageSize())) {
+ if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
return false;
}
PagedSpace::PagedSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
- VirtualMemory::Executability executability)
- : Space(heap, id, executability),
+ Executability executable)
+ : Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
first_unswept_page_(Page::FromAddress(NULL)),
}
Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
- size, this, executability());
+ size, this, executable());
if (p == NULL) return false;
ASSERT(Capacity() <= max_capacity_);
LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
ASSERT(reservation_.IsReserved());
- heap()->isolate()->memory_allocator()->FreeMemory(
- &reservation_, VirtualMemory::NOT_EXECUTABLE);
+ heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
+ NOT_EXECUTABLE);
chunk_base_ = NULL;
chunk_size_ = 0;
}
Address start = end - pages * Page::kPageSize;
if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
capacity_,
- executability())) {
+ executable())) {
return false;
}
Address start = end - new_capacity;
size_t delta = new_capacity - capacity_;
- ASSERT(IsAligned(delta, VirtualMemory::GetAllocationGranularity()));
+ ASSERT(IsAligned(delta, OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start, delta, executability())) {
+ start, delta, executable())) {
return false;
}
capacity_ = new_capacity;
Address space_end = start_ + maximum_capacity_;
Address old_start = space_end - capacity_;
size_t delta = capacity_ - new_capacity;
- ASSERT(IsAligned(delta, VirtualMemory::GetAllocationGranularity()));
+ ASSERT(IsAligned(delta, OS::AllocateAlignment()));
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
if (!allocator->UncommitBlock(old_start, delta)) {
LargeObjectSpace::LargeObjectSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id)
- // Managed on a per-allocation basis
- : Space(heap, id, VirtualMemory::NOT_EXECUTABLE),
+ : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
max_capacity_(max_capacity),
first_page_(NULL),
size_(0),
}
-MaybeObject* LargeObjectSpace::AllocateRaw(
- int object_size, VirtualMemory::Executability executability) {
+MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
+ Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->always_allocate() &&
}
LargePage* page = heap()->isolate()->memory_allocator()->
- AllocateLargePage(object_size, this, executability);
+ AllocateLargePage(object_size, this, executable);
if (page == NULL) return Failure::RetryAfterGC(identity());
ASSERT(page->area_size() >= object_size);
#include "list.h"
#include "log.h"
#include "platform/mutex.h"
-#include "platform/virtual-memory.h"
#include "v8utils.h"
namespace v8 {
area_end_ = area_end;
}
- VirtualMemory::Executability executability() {
- return IsFlagSet(IS_EXECUTABLE)
- ? VirtualMemory::EXECUTABLE
- : VirtualMemory::NOT_EXECUTABLE;
+ Executability executable() {
+ return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
bool ContainsOnlyData() {
size_t size,
Address area_start,
Address area_end,
- VirtualMemory::Executability executability,
+ Executability executable,
Space* owner);
friend class MemoryAllocator;
static inline Page* Initialize(Heap* heap,
MemoryChunk* chunk,
- VirtualMemory::Executability executable,
+ Executability executable,
PagedSpace* owner);
void InitializeAsAnchor(PagedSpace* owner);
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
public:
- Space(Heap* heap,
- AllocationSpace id,
- VirtualMemory::Executability executability)
- : heap_(heap), id_(id), executability_(executability) {}
+ Space(Heap* heap, AllocationSpace id, Executability executable)
+ : heap_(heap), id_(id), executable_(executable) {}
virtual ~Space() {}
Heap* heap() const { return heap_; }
// Does the space need executable memory?
- VirtualMemory::Executability executability() { return executability_; }
+ Executability executable() { return executable_; }
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
private:
Heap* heap_;
AllocationSpace id_;
- VirtualMemory::Executability executability_;
+ Executability executable_;
};
void TearDown();
- Page* AllocatePage(intptr_t size,
- PagedSpace* owner,
- VirtualMemory::Executability executability);
+ Page* AllocatePage(
+ intptr_t size, PagedSpace* owner, Executability executable);
- LargePage* AllocateLargePage(intptr_t object_size,
- Space* owner,
- VirtualMemory::Executability executability);
+ LargePage* AllocateLargePage(
+ intptr_t object_size, Space* owner, Executability executable);
void Free(MemoryChunk* chunk);
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
- V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
+ V8_INLINE(bool IsOutsideAllocatedSpace(const void* address)) const {
return address < lowest_ever_allocated_ ||
address >= highest_ever_allocated_;
}
// could be committed later by calling MemoryChunk::CommitArea.
MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
- VirtualMemory::Executability executability,
+ Executability executable,
Space* space);
Address ReserveAlignedMemory(size_t requested,
Address AllocateAlignedMemory(size_t reserve_size,
size_t commit_size,
size_t alignment,
- VirtualMemory::Executability executability,
+ Executability executable,
VirtualMemory* controller);
- bool CommitMemory(Address addr,
- size_t size,
- VirtualMemory::Executability executability);
+ bool CommitMemory(Address addr, size_t size, Executability executable);
- void FreeMemory(VirtualMemory* reservation,
- VirtualMemory::Executability executability);
- void FreeMemory(Address addr,
- size_t size,
- VirtualMemory::Executability executability);
+ void FreeMemory(VirtualMemory* reservation, Executability executable);
+ void FreeMemory(Address addr, size_t size, Executability executable);
// Commit a contiguous block of memory from the initial chunk. Assumes that
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
- bool CommitBlock(Address start,
- size_t size,
- VirtualMemory::Executability executability);
+ bool CommitBlock(Address start, size_t size, Executability executable);
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not NULL, the size is greater than zero, and the
PagedSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
- VirtualMemory::Executability executability);
+ Executability executable);
virtual ~PagedSpace() {}
public:
// Constructor.
SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE, VirtualMemory::NOT_EXECUTABLE),
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
start_(NULL),
age_mark_(NULL),
id_(semispace),
public:
// Constructor.
explicit NewSpace(Heap* heap)
- : Space(heap, NEW_SPACE, VirtualMemory::NOT_EXECUTABLE),
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
OldSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
- VirtualMemory::Executability executability)
- : PagedSpace(heap, max_capacity, id, executability) {
+ Executability executable)
+ : PagedSpace(heap, max_capacity, id, executable) {
page_extra_ = 0;
}
intptr_t max_capacity,
AllocationSpace id,
int object_size_in_bytes)
- : PagedSpace(heap, max_capacity, id, VirtualMemory::NOT_EXECUTABLE),
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes) {
page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
}
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRaw(
- int object_size, VirtualMemory::Executability executability);
+ MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
+ Executability executable);
// Available bytes for objects in this space.
inline intptr_t Available();
// Don't know the alignment requirements of the OS, but it is certainly not
// less than 0xfff.
ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
- int initial_length =
- static_cast<int>(VirtualMemory::GetPageSize() / kPointerSize);
+ int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
ASSERT(initial_length > 0);
ASSERT(initial_length <= kOldStoreBufferLength);
old_limit_ = old_start_ + initial_length;
CHECK(old_virtual_memory_->Commit(
reinterpret_cast<void*>(old_start_),
(old_limit_ - old_start_) * kPointerSize,
- VirtualMemory::NOT_EXECUTABLE));
+ false));
ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
kStoreBufferSize,
- VirtualMemory::NOT_EXECUTABLE));
+ false)); // Not executable.
heap_->public_set_store_buffer_top(start_);
hash_set_1_ = new uintptr_t[kHashSetLength];
size_t grow = old_limit_ - old_start_; // Double size.
CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
grow * kPointerSize,
- VirtualMemory::NOT_EXECUTABLE));
+ false));
old_limit_ += grow;
}
// that one int value is pseudorandomly generated and returned.
// All 2^32 possible integer values are produced with (approximately) equal
// probability.
- V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT {
+ V8_INLINE(int NextInt()) V8_WARN_UNUSED_RESULT {
return Next(32);
}
// |NextBoolean()| is that one boolean value is pseudorandomly generated and
// returned. The values true and false are produced with (approximately) equal
// probability.
- V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT {
+ V8_INLINE(bool NextBool()) V8_WARN_UNUSED_RESULT {
return Next(1) != 0;
}
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
+enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+
enum VisitMode {
VISIT_ALL,
VISIT_ALL_IN_SCAVENGE,
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
if (buffer == NULL) {
// Fallback to library function if function cannot be created.
switch (type) {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &exp;
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool ok = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(ok);
- USE(ok);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- 1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
if (buffer == NULL) return &sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
CodeDesc desc;
masm.GetCode(&desc);
- bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
- ASSERT(result);
- USE(result);
+ OS::ProtectCode(buffer, actual_size);
// Call the function from C++ through this pointer.
return FUNCTION_CAST<ModuloFunction>(buffer);
}
'test-unbound-queue.cc',
'test-utils.cc',
'test-version.cc',
- 'test-virtual-memory.cc',
'test-weakmaps.cc',
'test-weaksets.cc',
'test-weaktypedarrays.cc'
typedef Persistent<T, CopyablePersistentTraits<T> > CopyablePersistent;
static const bool kResetInDestructor = true;
template<class S, class M>
- static V8_INLINE void Copy(const Persistent<S, M>& source,
- CopyablePersistent* dest) {
+ V8_INLINE(static void Copy(const Persistent<S, M>& source,
+ CopyablePersistent* dest)) {
// do nothing, just allow copy
}
};
#include "serialize.h"
#include "cctest.h"
-using namespace v8::internal;
+using v8::internal::Assembler;
+using v8::internal::Code;
+using v8::internal::CodeDesc;
+using v8::internal::FUNCTION_CAST;
+using v8::internal::Immediate;
+using v8::internal::Isolate;
+using v8::internal::Label;
+using v8::internal::OS;
+using v8::internal::Operand;
+using v8::internal::byte;
+using v8::internal::greater;
+using v8::internal::less_equal;
+using v8::internal::equal;
+using v8::internal::not_equal;
+using v8::internal::r13;
+using v8::internal::r15;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::rax;
+using v8::internal::rbx;
+using v8::internal::rbp;
+using v8::internal::rcx;
+using v8::internal::rdi;
+using v8::internal::rdx;
+using v8::internal::rsi;
+using v8::internal::rsp;
+using v8::internal::times_1;
+using v8::internal::xmm0;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
TEST(AssemblerX64ReturnOperation) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
TEST(AssemblerX64StackOperations) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
TEST(AssemblerX64ArithmeticOperations) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
TEST(AssemblerX64ImulOperation) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
TEST(AssemblerX64MemoryOperands) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
TEST(AssemblerX64ControlFlow) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
TEST(AssemblerX64LoopImmediates) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble two loops using rax as counter, and verify the ending counts.
bool inline_fastpath) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
#include "serialize.h"
#include "cctest.h"
-using namespace v8::internal;
+using v8::internal::Assembler;
+using v8::internal::CodeDesc;
+using v8::internal::Condition;
+using v8::internal::FUNCTION_CAST;
+using v8::internal::HandleScope;
+using v8::internal::Immediate;
+using v8::internal::Isolate;
+using v8::internal::Label;
+using v8::internal::MacroAssembler;
+using v8::internal::OS;
+using v8::internal::Operand;
+using v8::internal::RelocInfo;
+using v8::internal::Smi;
+using v8::internal::SmiIndex;
+using v8::internal::byte;
+using v8::internal::carry;
+using v8::internal::greater;
+using v8::internal::greater_equal;
+using v8::internal::kIntSize;
+using v8::internal::kPointerSize;
+using v8::internal::kSmiTagMask;
+using v8::internal::kSmiValueSize;
+using v8::internal::less_equal;
+using v8::internal::negative;
+using v8::internal::not_carry;
+using v8::internal::not_equal;
+using v8::internal::not_zero;
+using v8::internal::positive;
+using v8::internal::r11;
+using v8::internal::r13;
+using v8::internal::r14;
+using v8::internal::r15;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::rax;
+using v8::internal::rbp;
+using v8::internal::rbx;
+using v8::internal::rcx;
+using v8::internal::rdi;
+using v8::internal::rdx;
+using v8::internal::rsi;
+using v8::internal::rsp;
+using v8::internal::times_pointer_size;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 2,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 2,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 2,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 2,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 3,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 4,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 3,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 2,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 4,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
- Assembler::kMinimalBufferSize * 2,
- &actual_size,
- VirtualMemory::EXECUTABLE));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
CHECK(buffer);
Isolate* isolate = Isolate::Current();
HandleScope handles(isolate);
using namespace ::v8::internal;
+TEST(VirtualMemory) {
+ VirtualMemory* vm = new VirtualMemory(1 * MB);
+ CHECK(vm->IsReserved());
+ void* block_addr = vm->address();
+ size_t block_size = 4 * KB;
+ CHECK(vm->Commit(block_addr, block_size, false));
+ // Check whether we can write to memory.
+ int* addr = static_cast<int*>(block_addr);
+ addr[KB-1] = 2;
+ CHECK(vm->Uncommit(block_addr, block_size));
+ delete vm;
+}
+
+
TEST(GetCurrentProcessId) {
CHECK_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
}
using namespace ::v8::internal;
+TEST(VirtualMemory) {
+ VirtualMemory* vm = new VirtualMemory(1 * MB);
+ CHECK(vm->IsReserved());
+ void* block_addr = vm->address();
+ size_t block_size = 4 * KB;
+ CHECK(vm->Commit(block_addr, block_size, false));
+ // Check whether we can write to memory.
+ int* addr = static_cast<int*>(block_addr);
+ addr[KB-1] = 2;
+ CHECK(vm->Uncommit(block_addr, block_size));
+ delete vm;
+}
+
+
TEST(GetCurrentProcessId) {
CHECK_EQ(static_cast<int>(::GetCurrentProcessId()),
OS::GetCurrentProcessId());
size_t reserve_area_size,
size_t commit_area_size,
size_t second_commit_area_size,
- VirtualMemory::Executability executability) {
+ Executability executable) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
TestCodeRangeScope test_code_range_scope(isolate, code_range);
- size_t header_size = (executability == VirtualMemory::EXECUTABLE)
+ size_t header_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardStartOffset()
: MemoryChunk::kObjectStartOffset;
- size_t guard_size = (executability == VirtualMemory::EXECUTABLE)
+ size_t guard_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardSize()
: 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
commit_area_size,
- executability,
+ executable,
NULL);
size_t alignment = code_range->exists() ?
- MemoryChunk::kAlignment : VirtualMemory::GetPageSize();
- size_t reserved_size = ((executability == VirtualMemory::EXECUTABLE))
+ MemoryChunk::kAlignment : OS::CommitPageSize();
+ size_t reserved_size = ((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
alignment)
- : RoundUp(header_size + reserve_area_size, VirtualMemory::GetPageSize());
+ : RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() < memory_chunk->address() +
memory_chunk->size());
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
- VirtualMemory::EXECUTABLE);
+ EXECUTABLE);
VerifyMemoryChunk(isolate,
heap,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
- VirtualMemory::NOT_EXECUTABLE);
+ NOT_EXECUTABLE);
delete code_range;
// Without CodeRange.
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
- VirtualMemory::EXECUTABLE);
+ EXECUTABLE);
VerifyMemoryChunk(isolate,
heap,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
- VirtualMemory::NOT_EXECUTABLE);
+ NOT_EXECUTABLE);
}
}
OldSpace faked_space(heap,
heap->MaxReserved(),
OLD_POINTER_SPACE,
- VirtualMemory::NOT_EXECUTABLE);
+ NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage(
- faked_space.AreaSize(), &faked_space, VirtualMemory::NOT_EXECUTABLE);
+ faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid());
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(
- faked_space.AreaSize(), &faked_space, VirtualMemory::NOT_EXECUTABLE);
+ faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid());
total_pages++;
other->InsertAfter(first_page);
OldSpace* s = new OldSpace(heap,
heap->MaxOldGenerationSize(),
OLD_POINTER_SPACE,
- VirtualMemory::NOT_EXECUTABLE);
+ NOT_EXECUTABLE);
CHECK(s != NULL);
CHECK(s->SetUp());
int lo_size = Page::kPageSize;
- Object* obj = lo->AllocateRaw(
- lo_size, VirtualMemory::NOT_EXECUTABLE)->ToObjectUnchecked();
+ Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
CHECK(obj->IsHeapObject());
HeapObject* ho = HeapObject::cast(obj);
while (true) {
intptr_t available = lo->Available();
- { MaybeObject* maybe_obj = lo->AllocateRaw(
- lo_size, VirtualMemory::NOT_EXECUTABLE);
+ { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
if (!maybe_obj->ToObject(&obj)) break;
}
CHECK(lo->Available() < available);
CHECK(!lo->IsEmpty());
- CHECK(lo->AllocateRaw(lo_size, VirtualMemory::NOT_EXECUTABLE)->IsFailure());
+ CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
}
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cctest.h"
-#include "platform/virtual-memory.h"
-
-using namespace ::v8::internal;
-
-
-TEST(CommitAndUncommit) {
- static const size_t kSize = 1 * MB;
- static const size_t kBlockSize = 4 * KB;
- VirtualMemory vm(kSize);
- CHECK(vm.IsReserved());
- void* block_addr = vm.address();
- CHECK(vm.Commit(block_addr, kBlockSize, VirtualMemory::NOT_EXECUTABLE));
- // Check whether we can write to memory.
- int* addr = static_cast<int*>(block_addr);
- addr[5] = 2;
- CHECK(vm.Uncommit(block_addr, kBlockSize));
-}
-
-
-TEST(Release) {
- static const size_t kSize = 4 * KB;
- VirtualMemory vm(kSize);
- CHECK(vm.IsReserved());
- CHECK_LE(kSize, vm.size());
- CHECK_NE(NULL, vm.address());
- vm.Release();
- CHECK(!vm.IsReserved());
-}
-
-
-TEST(TakeControl) {
- static const size_t kSize = 64 * KB;
-
- VirtualMemory vm1(kSize);
- size_t size1 = vm1.size();
- CHECK(vm1.IsReserved());
- CHECK_LE(kSize, size1);
-
- VirtualMemory vm2;
- CHECK(!vm2.IsReserved());
-
- vm2.TakeControl(&vm1);
- CHECK(vm2.IsReserved());
- CHECK(!vm1.IsReserved());
- CHECK(vm2.size() == size1);
-}
-
-
-TEST(AllocationGranularityIsPowerOf2) {
- CHECK(IsPowerOf2(VirtualMemory::GetAllocationGranularity()));
-}
-
-
-TEST(PageSizeIsPowerOf2) {
- CHECK(IsPowerOf2(VirtualMemory::GetPageSize()));
-}
'../../src/platform/semaphore.h',
'../../src/platform/socket.cc',
'../../src/platform/socket.h',
- '../../src/platform/virtual-memory.cc',
- '../../src/platform/virtual-memory.h',
'../../src/preparse-data-format.h',
'../../src/preparse-data.cc',
'../../src/preparse-data.h',